Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.459
      1 /*	$NetBSD: if_wm.c,v 1.459 2016/12/26 07:55:00 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- Disable D0 LPLU on 8257[12356], 82580 and I350.
     77  *	- TX Multi queue improvement (refine queue selection logic)
     78  *	- Advanced Receive Descriptor
     79  *	- EEE (Energy Efficiency Ethernet)
     80  *	- Virtual Function
     81  *	- Set LED correctly (based on contents in EEPROM)
     82  *	- Rework how parameters are loaded from the EEPROM.
     83  *	- Image Unique ID
     84  */
     85 
     86 #include <sys/cdefs.h>
     87 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.459 2016/12/26 07:55:00 msaitoh Exp $");
     88 
     89 #ifdef _KERNEL_OPT
     90 #include "opt_net_mpsafe.h"
     91 #endif
     92 
     93 #include <sys/param.h>
     94 #include <sys/systm.h>
     95 #include <sys/callout.h>
     96 #include <sys/mbuf.h>
     97 #include <sys/malloc.h>
     98 #include <sys/kmem.h>
     99 #include <sys/kernel.h>
    100 #include <sys/socket.h>
    101 #include <sys/ioctl.h>
    102 #include <sys/errno.h>
    103 #include <sys/device.h>
    104 #include <sys/queue.h>
    105 #include <sys/syslog.h>
    106 #include <sys/interrupt.h>
    107 #include <sys/cpu.h>
    108 #include <sys/pcq.h>
    109 
    110 #include <sys/rndsource.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <netinet/in.h>			/* XXX for struct ip */
    120 #include <netinet/in_systm.h>		/* XXX for struct ip */
    121 #include <netinet/ip.h>			/* XXX for struct ip */
    122 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    123 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    124 
    125 #include <sys/bus.h>
    126 #include <sys/intr.h>
    127 #include <machine/endian.h>
    128 
    129 #include <dev/mii/mii.h>
    130 #include <dev/mii/miivar.h>
    131 #include <dev/mii/miidevs.h>
    132 #include <dev/mii/mii_bitbang.h>
    133 #include <dev/mii/ikphyreg.h>
    134 #include <dev/mii/igphyreg.h>
    135 #include <dev/mii/igphyvar.h>
    136 #include <dev/mii/inbmphyreg.h>
    137 
    138 #include <dev/pci/pcireg.h>
    139 #include <dev/pci/pcivar.h>
    140 #include <dev/pci/pcidevs.h>
    141 
    142 #include <dev/pci/if_wmreg.h>
    143 #include <dev/pci/if_wmvar.h>
    144 
    145 #ifdef WM_DEBUG
    146 #define	WM_DEBUG_LINK		__BIT(0)
    147 #define	WM_DEBUG_TX		__BIT(1)
    148 #define	WM_DEBUG_RX		__BIT(2)
    149 #define	WM_DEBUG_GMII		__BIT(3)
    150 #define	WM_DEBUG_MANAGE		__BIT(4)
    151 #define	WM_DEBUG_NVM		__BIT(5)
    152 #define	WM_DEBUG_INIT		__BIT(6)
    153 #define	WM_DEBUG_LOCK		__BIT(7)
    154 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    155     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    156 
    157 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    158 #else
    159 #define	DPRINTF(x, y)	/* nothing */
    160 #endif /* WM_DEBUG */
    161 
    162 #ifdef NET_MPSAFE
    163 #define WM_MPSAFE	1
    164 #endif
    165 
    166 /*
    167  * This device driver's max interrupt numbers.
    168  */
    169 #define WM_MAX_NQUEUEINTR	16
    170 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    171 
    172 /*
    173  * Transmit descriptor list size.  Due to errata, we can only have
    174  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    175  * on >= 82544.  We tell the upper layers that they can queue a lot
    176  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    177  * of them at a time.
    178  *
    179  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    180  * chains containing many small mbufs have been observed in zero-copy
    181  * situations with jumbo frames.
    182  */
    183 #define	WM_NTXSEGS		256
    184 #define	WM_IFQUEUELEN		256
    185 #define	WM_TXQUEUELEN_MAX	64
    186 #define	WM_TXQUEUELEN_MAX_82547	16
    187 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    188 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    189 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    190 #define	WM_NTXDESC_82542	256
    191 #define	WM_NTXDESC_82544	4096
    192 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    193 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    194 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    195 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    196 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    197 
    198 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    199 
    200 #define	WM_TXINTERQSIZE		256
    201 
    202 /*
    203  * Receive descriptor list size.  We have one Rx buffer for normal
    204  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    205  * packet.  We allocate 256 receive descriptors, each with a 2k
    206  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    207  */
    208 #define	WM_NRXDESC		256
    209 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    210 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    211 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    212 
    213 typedef union txdescs {
    214 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    215 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    216 } txdescs_t;
    217 
    218 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    219 #define	WM_CDRXOFF(x)	(sizeof(wiseman_rxdesc_t) * x)
    220 
    221 /*
    222  * Software state for transmit jobs.
    223  */
    224 struct wm_txsoft {
    225 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    226 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    227 	int txs_firstdesc;		/* first descriptor in packet */
    228 	int txs_lastdesc;		/* last descriptor in packet */
    229 	int txs_ndesc;			/* # of descriptors used */
    230 };
    231 
    232 /*
    233  * Software state for receive buffers.  Each descriptor gets a
    234  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    235  * more than one buffer, we chain them together.
    236  */
    237 struct wm_rxsoft {
    238 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    239 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    240 };
    241 
    242 #define WM_LINKUP_TIMEOUT	50
    243 
    244 static uint16_t swfwphysem[] = {
    245 	SWFW_PHY0_SM,
    246 	SWFW_PHY1_SM,
    247 	SWFW_PHY2_SM,
    248 	SWFW_PHY3_SM
    249 };
    250 
    251 static const uint32_t wm_82580_rxpbs_table[] = {
    252 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    253 };
    254 
    255 struct wm_softc;
    256 
    257 #ifdef WM_EVENT_COUNTERS
    258 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    259 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    260 	struct evcnt qname##_ev_##evname;
    261 
    262 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    263 	do{								\
    264 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    265 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    266 		    "%s%02d%s", #qname, (qnum), #evname);		\
    267 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    268 		    (evtype), NULL, (xname),				\
    269 		    (q)->qname##_##evname##_evcnt_name);		\
    270 	}while(0)
    271 
    272 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    273 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    274 
    275 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    276 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    277 #endif /* WM_EVENT_COUNTERS */
    278 
    279 struct wm_txqueue {
    280 	kmutex_t *txq_lock;		/* lock for tx operations */
    281 
    282 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    283 
    284 	/* Software state for the transmit descriptors. */
    285 	int txq_num;			/* must be a power of two */
    286 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    287 
    288 	/* TX control data structures. */
    289 	int txq_ndesc;			/* must be a power of two */
    290 	size_t txq_descsize;		/* a tx descriptor size */
    291 	txdescs_t *txq_descs_u;
    292         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    293 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    294 	int txq_desc_rseg;		/* real number of control segment */
    295 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    296 #define	txq_descs	txq_descs_u->sctxu_txdescs
    297 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    298 
    299 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    300 
    301 	int txq_free;			/* number of free Tx descriptors */
    302 	int txq_next;			/* next ready Tx descriptor */
    303 
    304 	int txq_sfree;			/* number of free Tx jobs */
    305 	int txq_snext;			/* next free Tx job */
    306 	int txq_sdirty;			/* dirty Tx jobs */
    307 
    308 	/* These 4 variables are used only on the 82547. */
    309 	int txq_fifo_size;		/* Tx FIFO size */
    310 	int txq_fifo_head;		/* current head of FIFO */
    311 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    312 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    313 
    314 	/*
    315 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    316 	 * CPUs. This queue intermediate them without block.
    317 	 */
    318 	pcq_t *txq_interq;
    319 
    320 	/*
    321 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    322 	 * to manage Tx H/W queue's busy flag.
    323 	 */
    324 	int txq_flags;			/* flags for H/W queue, see below */
    325 #define	WM_TXQ_NO_SPACE	0x1
    326 
    327 	bool txq_stopping;
    328 
    329 #ifdef WM_EVENT_COUNTERS
    330 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
    331 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
    332 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
    333 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
    334 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
    335 						/* XXX not used? */
    336 
    337 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
    338 	WM_Q_EVCNT_DEFINE(txq,txtusum)		/* TCP/UDP cksums comp. out-bound */
    339 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
    340 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
    341 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
    342 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
    343 
    344 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
    345 
    346 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
    347 
    348 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    349 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    350 #endif /* WM_EVENT_COUNTERS */
    351 };
    352 
    353 struct wm_rxqueue {
    354 	kmutex_t *rxq_lock;		/* lock for rx operations */
    355 
    356 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    357 
    358 	/* Software state for the receive descriptors. */
    359 	wiseman_rxdesc_t *rxq_descs;
    360 
    361 	/* RX control data structures. */
    362 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    363 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    364 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    365 	int rxq_desc_rseg;		/* real number of control segment */
    366 	size_t rxq_desc_size;		/* control data size */
    367 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    368 
    369 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    370 
    371 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    372 	int rxq_discard;
    373 	int rxq_len;
    374 	struct mbuf *rxq_head;
    375 	struct mbuf *rxq_tail;
    376 	struct mbuf **rxq_tailp;
    377 
    378 	bool rxq_stopping;
    379 
    380 #ifdef WM_EVENT_COUNTERS
    381 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
    382 
    383 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
    384 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
    385 #endif
    386 };
    387 
    388 struct wm_queue {
    389 	int wmq_id;			/* index of transmit and receive queues */
    390 	int wmq_intr_idx;		/* index of MSI-X tables */
    391 
    392 	struct wm_txqueue wmq_txq;
    393 	struct wm_rxqueue wmq_rxq;
    394 };
    395 
    396 struct wm_phyop {
    397 	int (*acquire)(struct wm_softc *);
    398 	void (*release)(struct wm_softc *);
    399 	int reset_delay_us;
    400 };
    401 
    402 /*
    403  * Software state per device.
    404  */
    405 struct wm_softc {
    406 	device_t sc_dev;		/* generic device information */
    407 	bus_space_tag_t sc_st;		/* bus space tag */
    408 	bus_space_handle_t sc_sh;	/* bus space handle */
    409 	bus_size_t sc_ss;		/* bus space size */
    410 	bus_space_tag_t sc_iot;		/* I/O space tag */
    411 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    412 	bus_size_t sc_ios;		/* I/O space size */
    413 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    414 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    415 	bus_size_t sc_flashs;		/* flash registers space size */
    416 	off_t sc_flashreg_offset;	/*
    417 					 * offset to flash registers from
    418 					 * start of BAR
    419 					 */
    420 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    421 
    422 	struct ethercom sc_ethercom;	/* ethernet common data */
    423 	struct mii_data sc_mii;		/* MII/media information */
    424 
    425 	pci_chipset_tag_t sc_pc;
    426 	pcitag_t sc_pcitag;
    427 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    428 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    429 
    430 	uint16_t sc_pcidevid;		/* PCI device ID */
    431 	wm_chip_type sc_type;		/* MAC type */
    432 	int sc_rev;			/* MAC revision */
    433 	wm_phy_type sc_phytype;		/* PHY type */
    434 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    435 #define	WM_MEDIATYPE_UNKNOWN		0x00
    436 #define	WM_MEDIATYPE_FIBER		0x01
    437 #define	WM_MEDIATYPE_COPPER		0x02
    438 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    439 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    440 	int sc_flags;			/* flags; see below */
    441 	int sc_if_flags;		/* last if_flags */
    442 	int sc_flowflags;		/* 802.3x flow control flags */
    443 	int sc_align_tweak;
    444 
    445 	void *sc_ihs[WM_MAX_NINTR];	/*
    446 					 * interrupt cookie.
    447 					 * legacy and msi use sc_ihs[0].
    448 					 */
    449 	pci_intr_handle_t *sc_intrs;	/* legacy and msi use sc_intrs[0] */
    450 	int sc_nintrs;			/* number of interrupts */
    451 
    452 	int sc_link_intr_idx;		/* index of MSI-X tables */
    453 
    454 	callout_t sc_tick_ch;		/* tick callout */
    455 	bool sc_core_stopping;
    456 
    457 	int sc_nvm_ver_major;
    458 	int sc_nvm_ver_minor;
    459 	int sc_nvm_ver_build;
    460 	int sc_nvm_addrbits;		/* NVM address bits */
    461 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    462 	int sc_ich8_flash_base;
    463 	int sc_ich8_flash_bank_size;
    464 	int sc_nvm_k1_enabled;
    465 
    466 	int sc_nqueues;
    467 	struct wm_queue *sc_queue;
    468 
    469 	int sc_affinity_offset;
    470 
    471 #ifdef WM_EVENT_COUNTERS
    472 	/* Event counters. */
    473 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    474 
    475         /* WM_T_82542_2_1 only */
    476 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    477 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    478 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    479 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    480 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    481 #endif /* WM_EVENT_COUNTERS */
    482 
    483 	/* This variable are used only on the 82547. */
    484 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    485 
    486 	uint32_t sc_ctrl;		/* prototype CTRL register */
    487 #if 0
    488 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    489 #endif
    490 	uint32_t sc_icr;		/* prototype interrupt bits */
    491 	uint32_t sc_itr;		/* prototype intr throttling reg */
    492 	uint32_t sc_tctl;		/* prototype TCTL register */
    493 	uint32_t sc_rctl;		/* prototype RCTL register */
    494 	uint32_t sc_txcw;		/* prototype TXCW register */
    495 	uint32_t sc_tipg;		/* prototype TIPG register */
    496 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    497 	uint32_t sc_pba;		/* prototype PBA register */
    498 
    499 	int sc_tbi_linkup;		/* TBI link status */
    500 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    501 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    502 
    503 	int sc_mchash_type;		/* multicast filter offset */
    504 
    505 	krndsource_t rnd_source;	/* random source */
    506 
    507 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    508 
    509 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    510 	kmutex_t *sc_ich_phymtx;	/*
    511 					 * 82574/82583/ICH/PCH specific PHY
    512 					 * mutex. For 82574/82583, the mutex
    513 					 * is used for both PHY and NVM.
    514 					 */
    515 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    516 
    517 	struct wm_phyop phy;
    518 };
    519 
    520 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    521 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    522 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    523 
    524 #ifdef WM_MPSAFE
    525 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    526 #else
    527 #define CALLOUT_FLAGS	0
    528 #endif
    529 
    530 #define	WM_RXCHAIN_RESET(rxq)						\
    531 do {									\
    532 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    533 	*(rxq)->rxq_tailp = NULL;					\
    534 	(rxq)->rxq_len = 0;						\
    535 } while (/*CONSTCOND*/0)
    536 
    537 #define	WM_RXCHAIN_LINK(rxq, m)						\
    538 do {									\
    539 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    540 	(rxq)->rxq_tailp = &(m)->m_next;				\
    541 } while (/*CONSTCOND*/0)
    542 
    543 #ifdef WM_EVENT_COUNTERS
    544 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    545 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    546 
    547 #define WM_Q_EVCNT_INCR(qname, evname)			\
    548 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    549 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    550 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    551 #else /* !WM_EVENT_COUNTERS */
    552 #define	WM_EVCNT_INCR(ev)	/* nothing */
    553 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    554 
    555 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    556 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    557 #endif /* !WM_EVENT_COUNTERS */
    558 
    559 #define	CSR_READ(sc, reg)						\
    560 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    561 #define	CSR_WRITE(sc, reg, val)						\
    562 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    563 #define	CSR_WRITE_FLUSH(sc)						\
    564 	(void) CSR_READ((sc), WMREG_STATUS)
    565 
    566 #define ICH8_FLASH_READ32(sc, reg)					\
    567 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    568 	    (reg) + sc->sc_flashreg_offset)
    569 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    570 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    571 	    (reg) + sc->sc_flashreg_offset, (data))
    572 
    573 #define ICH8_FLASH_READ16(sc, reg)					\
    574 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    575 	    (reg) + sc->sc_flashreg_offset)
    576 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    577 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    578 	    (reg) + sc->sc_flashreg_offset, (data))
    579 
    580 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    581 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((x)))
    582 
    583 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    584 #define	WM_CDTXADDR_HI(txq, x)						\
    585 	(sizeof(bus_addr_t) == 8 ?					\
    586 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    587 
    588 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    589 #define	WM_CDRXADDR_HI(rxq, x)						\
    590 	(sizeof(bus_addr_t) == 8 ?					\
    591 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    592 
    593 /*
    594  * Register read/write functions.
    595  * Other than CSR_{READ|WRITE}().
    596  */
    597 #if 0
    598 static inline uint32_t wm_io_read(struct wm_softc *, int);
    599 #endif
    600 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    601 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    602 	uint32_t, uint32_t);
    603 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    604 
    605 /*
    606  * Descriptor sync/init functions.
    607  */
    608 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    609 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    610 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    611 
    612 /*
    613  * Device driver interface functions and commonly used functions.
    614  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    615  */
    616 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    617 static int	wm_match(device_t, cfdata_t, void *);
    618 static void	wm_attach(device_t, device_t, void *);
    619 static int	wm_detach(device_t, int);
    620 static bool	wm_suspend(device_t, const pmf_qual_t *);
    621 static bool	wm_resume(device_t, const pmf_qual_t *);
    622 static void	wm_watchdog(struct ifnet *);
    623 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
    624 static void	wm_tick(void *);
    625 static int	wm_ifflags_cb(struct ethercom *);
    626 static int	wm_ioctl(struct ifnet *, u_long, void *);
    627 /* MAC address related */
    628 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    629 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    630 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    631 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    632 static void	wm_set_filter(struct wm_softc *);
    633 /* Reset and init related */
    634 static void	wm_set_vlan(struct wm_softc *);
    635 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    636 static void	wm_get_auto_rd_done(struct wm_softc *);
    637 static void	wm_lan_init_done(struct wm_softc *);
    638 static void	wm_get_cfg_done(struct wm_softc *);
    639 static void	wm_initialize_hardware_bits(struct wm_softc *);
    640 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    641 static void	wm_reset_phy(struct wm_softc *);
    642 static void	wm_flush_desc_rings(struct wm_softc *);
    643 static void	wm_reset(struct wm_softc *);
    644 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    645 static void	wm_rxdrain(struct wm_rxqueue *);
    646 static void	wm_rss_getkey(uint8_t *);
    647 static void	wm_init_rss(struct wm_softc *);
    648 static void	wm_adjust_qnum(struct wm_softc *, int);
    649 static int	wm_setup_legacy(struct wm_softc *);
    650 static int	wm_setup_msix(struct wm_softc *);
    651 static int	wm_init(struct ifnet *);
    652 static int	wm_init_locked(struct ifnet *);
    653 static void	wm_turnon(struct wm_softc *);
    654 static void	wm_turnoff(struct wm_softc *);
    655 static void	wm_stop(struct ifnet *, int);
    656 static void	wm_stop_locked(struct ifnet *, int);
    657 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    658 static void	wm_82547_txfifo_stall(void *);
    659 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    660 /* DMA related */
    661 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    662 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    663 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    664 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    665     struct wm_txqueue *);
    666 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    667 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    668 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    669     struct wm_rxqueue *);
    670 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    671 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    672 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    673 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    674 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    675 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    676 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    677     struct wm_txqueue *);
    678 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    679     struct wm_rxqueue *);
    680 static int	wm_alloc_txrx_queues(struct wm_softc *);
    681 static void	wm_free_txrx_queues(struct wm_softc *);
    682 static int	wm_init_txrx_queues(struct wm_softc *);
    683 /* Start */
    684 static int	wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
    685     uint32_t *, uint8_t *);
    686 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    687 static void	wm_start(struct ifnet *);
    688 static void	wm_start_locked(struct ifnet *);
    689 static int	wm_transmit(struct ifnet *, struct mbuf *);
    690 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    691 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    692 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    693     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    694 static void	wm_nq_start(struct ifnet *);
    695 static void	wm_nq_start_locked(struct ifnet *);
    696 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    697 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    698 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    699 static void	wm_deferred_start(struct ifnet *);
    700 /* Interrupt */
    701 static int	wm_txeof(struct wm_softc *, struct wm_txqueue *);
    702 static void	wm_rxeof(struct wm_rxqueue *);
    703 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    704 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    705 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    706 static void	wm_linkintr(struct wm_softc *, uint32_t);
    707 static int	wm_intr_legacy(void *);
    708 static int	wm_txrxintr_msix(void *);
    709 static int	wm_linkintr_msix(void *);
    710 
    711 /*
    712  * Media related.
    713  * GMII, SGMII, TBI, SERDES and SFP.
    714  */
    715 /* Common */
    716 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    717 /* GMII related */
    718 static void	wm_gmii_reset(struct wm_softc *);
    719 static int	wm_get_phy_id_82575(struct wm_softc *);
    720 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    721 static int	wm_gmii_mediachange(struct ifnet *);
    722 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    723 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    724 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    725 static int	wm_gmii_i82543_readreg(device_t, int, int);
    726 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    727 static int	wm_gmii_mdic_readreg(device_t, int, int);
    728 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    729 static int	wm_gmii_i82544_readreg(device_t, int, int);
    730 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    731 static int	wm_gmii_i80003_readreg(device_t, int, int);
    732 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    733 static int	wm_gmii_bm_readreg(device_t, int, int);
    734 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    735 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    736 static int	wm_gmii_hv_readreg(device_t, int, int);
    737 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
    738 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    739 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
    740 static int	wm_gmii_82580_readreg(device_t, int, int);
    741 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    742 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    743 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    744 static void	wm_gmii_statchg(struct ifnet *);
    745 /*
    746  * kumeran related (80003, ICH* and PCH*).
    747  * These functions are not for accessing MII registers but for accessing
    748  * kumeran specific registers.
    749  */
    750 static int	wm_kmrn_readreg(struct wm_softc *, int);
    751 static int	wm_kmrn_readreg_locked(struct wm_softc *, int);
    752 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
    753 static void	wm_kmrn_writereg_locked(struct wm_softc *, int, int);
    754 /* SGMII */
    755 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    756 static int	wm_sgmii_readreg(device_t, int, int);
    757 static void	wm_sgmii_writereg(device_t, int, int, int);
    758 /* TBI related */
    759 static void	wm_tbi_mediainit(struct wm_softc *);
    760 static int	wm_tbi_mediachange(struct ifnet *);
    761 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    762 static int	wm_check_for_link(struct wm_softc *);
    763 static void	wm_tbi_tick(struct wm_softc *);
    764 /* SERDES related */
    765 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    766 static int	wm_serdes_mediachange(struct ifnet *);
    767 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    768 static void	wm_serdes_tick(struct wm_softc *);
    769 /* SFP related */
    770 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    771 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    772 
    773 /*
    774  * NVM related.
    775  * Microwire, SPI (w/wo EERD) and Flash.
    776  */
    777 /* Misc functions */
    778 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    779 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    780 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    781 /* Microwire */
    782 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    783 /* SPI */
    784 static int	wm_nvm_ready_spi(struct wm_softc *);
    785 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    786 /* Using with EERD */
    787 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    788 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    789 /* Flash */
    790 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    791     unsigned int *);
    792 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    793 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    794 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    795 	uint32_t *);
    796 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    797 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    798 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    799 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    800 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    801 /* iNVM */
    802 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    803 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    804 /* Lock, detecting NVM type, validate checksum and read */
    805 static int	wm_nvm_acquire(struct wm_softc *);
    806 static void	wm_nvm_release(struct wm_softc *);
    807 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    808 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    809 static int	wm_nvm_validate_checksum(struct wm_softc *);
    810 static void	wm_nvm_version_invm(struct wm_softc *);
    811 static void	wm_nvm_version(struct wm_softc *);
    812 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    813 
    814 /*
    815  * Hardware semaphores.
    816  * Very complexed...
    817  */
    818 static int	wm_get_null(struct wm_softc *);
    819 static void	wm_put_null(struct wm_softc *);
    820 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    821 static void	wm_put_swsm_semaphore(struct wm_softc *);
    822 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    823 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    824 static int	wm_get_phy_82575(struct wm_softc *);
    825 static void	wm_put_phy_82575(struct wm_softc *);
    826 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    827 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    828 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    829 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    830 static int	wm_get_nvm_ich8lan(struct wm_softc *);		/* For NVM */
    831 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    832 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    833 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    834 
    835 /*
    836  * Management mode and power management related subroutines.
    837  * BMC, AMT, suspend/resume and EEE.
    838  */
    839 #if 0
    840 static int	wm_check_mng_mode(struct wm_softc *);
    841 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    842 static int	wm_check_mng_mode_82574(struct wm_softc *);
    843 static int	wm_check_mng_mode_generic(struct wm_softc *);
    844 #endif
    845 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    846 static bool	wm_phy_resetisblocked(struct wm_softc *);
    847 static void	wm_get_hw_control(struct wm_softc *);
    848 static void	wm_release_hw_control(struct wm_softc *);
    849 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    850 static void	wm_smbustopci(struct wm_softc *);
    851 static void	wm_init_manageability(struct wm_softc *);
    852 static void	wm_release_manageability(struct wm_softc *);
    853 static void	wm_get_wakeup(struct wm_softc *);
    854 static void	wm_ulp_disable(struct wm_softc *);
    855 static void	wm_enable_phy_wakeup(struct wm_softc *);
    856 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    857 static void	wm_enable_wakeup(struct wm_softc *);
    858 /* LPLU (Low Power Link Up) */
    859 static void	wm_lplu_d0_disable(struct wm_softc *);
    860 static void	wm_lplu_d0_disable_pch(struct wm_softc *);
    861 /* EEE */
    862 static void	wm_set_eee_i350(struct wm_softc *);
    863 
    864 /*
    865  * Workarounds (mainly PHY related).
    866  * Basically, PHY's workarounds are in the PHY drivers.
    867  */
    868 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    869 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    870 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    871 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    872 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    873 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    874 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    875 static void	wm_reset_init_script_82575(struct wm_softc *);
    876 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    877 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
    878 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
    879 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
    880 static void	wm_pll_workaround_i210(struct wm_softc *);
    881 
    882 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    883     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    884 
    885 /*
    886  * Devices supported by this driver.
    887  */
    888 static const struct wm_product {
    889 	pci_vendor_id_t		wmp_vendor;
    890 	pci_product_id_t	wmp_product;
    891 	const char		*wmp_name;
    892 	wm_chip_type		wmp_type;
    893 	uint32_t		wmp_flags;
    894 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    895 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    896 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    897 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    898 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    899 } wm_products[] = {
    900 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    901 	  "Intel i82542 1000BASE-X Ethernet",
    902 	  WM_T_82542_2_1,	WMP_F_FIBER },
    903 
    904 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    905 	  "Intel i82543GC 1000BASE-X Ethernet",
    906 	  WM_T_82543,		WMP_F_FIBER },
    907 
    908 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    909 	  "Intel i82543GC 1000BASE-T Ethernet",
    910 	  WM_T_82543,		WMP_F_COPPER },
    911 
    912 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    913 	  "Intel i82544EI 1000BASE-T Ethernet",
    914 	  WM_T_82544,		WMP_F_COPPER },
    915 
    916 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    917 	  "Intel i82544EI 1000BASE-X Ethernet",
    918 	  WM_T_82544,		WMP_F_FIBER },
    919 
    920 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    921 	  "Intel i82544GC 1000BASE-T Ethernet",
    922 	  WM_T_82544,		WMP_F_COPPER },
    923 
    924 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    925 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    926 	  WM_T_82544,		WMP_F_COPPER },
    927 
    928 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    929 	  "Intel i82540EM 1000BASE-T Ethernet",
    930 	  WM_T_82540,		WMP_F_COPPER },
    931 
    932 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    933 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    934 	  WM_T_82540,		WMP_F_COPPER },
    935 
    936 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    937 	  "Intel i82540EP 1000BASE-T Ethernet",
    938 	  WM_T_82540,		WMP_F_COPPER },
    939 
    940 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    941 	  "Intel i82540EP 1000BASE-T Ethernet",
    942 	  WM_T_82540,		WMP_F_COPPER },
    943 
    944 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    945 	  "Intel i82540EP 1000BASE-T Ethernet",
    946 	  WM_T_82540,		WMP_F_COPPER },
    947 
    948 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
    949 	  "Intel i82545EM 1000BASE-T Ethernet",
    950 	  WM_T_82545,		WMP_F_COPPER },
    951 
    952 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
    953 	  "Intel i82545GM 1000BASE-T Ethernet",
    954 	  WM_T_82545_3,		WMP_F_COPPER },
    955 
    956 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
    957 	  "Intel i82545GM 1000BASE-X Ethernet",
    958 	  WM_T_82545_3,		WMP_F_FIBER },
    959 
    960 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
    961 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
    962 	  WM_T_82545_3,		WMP_F_SERDES },
    963 
    964 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
    965 	  "Intel i82546EB 1000BASE-T Ethernet",
    966 	  WM_T_82546,		WMP_F_COPPER },
    967 
    968 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
    969 	  "Intel i82546EB 1000BASE-T Ethernet",
    970 	  WM_T_82546,		WMP_F_COPPER },
    971 
    972 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
    973 	  "Intel i82545EM 1000BASE-X Ethernet",
    974 	  WM_T_82545,		WMP_F_FIBER },
    975 
    976 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
    977 	  "Intel i82546EB 1000BASE-X Ethernet",
    978 	  WM_T_82546,		WMP_F_FIBER },
    979 
    980 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
    981 	  "Intel i82546GB 1000BASE-T Ethernet",
    982 	  WM_T_82546_3,		WMP_F_COPPER },
    983 
    984 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
    985 	  "Intel i82546GB 1000BASE-X Ethernet",
    986 	  WM_T_82546_3,		WMP_F_FIBER },
    987 
    988 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
    989 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
    990 	  WM_T_82546_3,		WMP_F_SERDES },
    991 
    992 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
    993 	  "i82546GB quad-port Gigabit Ethernet",
    994 	  WM_T_82546_3,		WMP_F_COPPER },
    995 
    996 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
    997 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
    998 	  WM_T_82546_3,		WMP_F_COPPER },
    999 
   1000 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1001 	  "Intel PRO/1000MT (82546GB)",
   1002 	  WM_T_82546_3,		WMP_F_COPPER },
   1003 
   1004 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1005 	  "Intel i82541EI 1000BASE-T Ethernet",
   1006 	  WM_T_82541,		WMP_F_COPPER },
   1007 
   1008 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1009 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1010 	  WM_T_82541,		WMP_F_COPPER },
   1011 
   1012 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1013 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1014 	  WM_T_82541,		WMP_F_COPPER },
   1015 
   1016 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1017 	  "Intel i82541ER 1000BASE-T Ethernet",
   1018 	  WM_T_82541_2,		WMP_F_COPPER },
   1019 
   1020 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1021 	  "Intel i82541GI 1000BASE-T Ethernet",
   1022 	  WM_T_82541_2,		WMP_F_COPPER },
   1023 
   1024 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1025 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1026 	  WM_T_82541_2,		WMP_F_COPPER },
   1027 
   1028 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1029 	  "Intel i82541PI 1000BASE-T Ethernet",
   1030 	  WM_T_82541_2,		WMP_F_COPPER },
   1031 
   1032 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1033 	  "Intel i82547EI 1000BASE-T Ethernet",
   1034 	  WM_T_82547,		WMP_F_COPPER },
   1035 
   1036 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1037 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1038 	  WM_T_82547,		WMP_F_COPPER },
   1039 
   1040 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1041 	  "Intel i82547GI 1000BASE-T Ethernet",
   1042 	  WM_T_82547_2,		WMP_F_COPPER },
   1043 
   1044 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1045 	  "Intel PRO/1000 PT (82571EB)",
   1046 	  WM_T_82571,		WMP_F_COPPER },
   1047 
   1048 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1049 	  "Intel PRO/1000 PF (82571EB)",
   1050 	  WM_T_82571,		WMP_F_FIBER },
   1051 
   1052 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1053 	  "Intel PRO/1000 PB (82571EB)",
   1054 	  WM_T_82571,		WMP_F_SERDES },
   1055 
   1056 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1057 	  "Intel PRO/1000 QT (82571EB)",
   1058 	  WM_T_82571,		WMP_F_COPPER },
   1059 
   1060 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1061 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1062 	  WM_T_82571,		WMP_F_COPPER, },
   1063 
   1064 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1065 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1066 	  WM_T_82571,		WMP_F_COPPER, },
   1067 
   1068 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1069 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1070 	  WM_T_82571,		WMP_F_SERDES, },
   1071 
   1072 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1073 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1074 	  WM_T_82571,		WMP_F_SERDES, },
   1075 
   1076 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1077 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1078 	  WM_T_82571,		WMP_F_FIBER, },
   1079 
   1080 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1081 	  "Intel i82572EI 1000baseT Ethernet",
   1082 	  WM_T_82572,		WMP_F_COPPER },
   1083 
   1084 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1085 	  "Intel i82572EI 1000baseX Ethernet",
   1086 	  WM_T_82572,		WMP_F_FIBER },
   1087 
   1088 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1089 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1090 	  WM_T_82572,		WMP_F_SERDES },
   1091 
   1092 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1093 	  "Intel i82572EI 1000baseT Ethernet",
   1094 	  WM_T_82572,		WMP_F_COPPER },
   1095 
   1096 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1097 	  "Intel i82573E",
   1098 	  WM_T_82573,		WMP_F_COPPER },
   1099 
   1100 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1101 	  "Intel i82573E IAMT",
   1102 	  WM_T_82573,		WMP_F_COPPER },
   1103 
   1104 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1105 	  "Intel i82573L Gigabit Ethernet",
   1106 	  WM_T_82573,		WMP_F_COPPER },
   1107 
   1108 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1109 	  "Intel i82574L",
   1110 	  WM_T_82574,		WMP_F_COPPER },
   1111 
   1112 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1113 	  "Intel i82574L",
   1114 	  WM_T_82574,		WMP_F_COPPER },
   1115 
   1116 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1117 	  "Intel i82583V",
   1118 	  WM_T_82583,		WMP_F_COPPER },
   1119 
   1120 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1121 	  "i80003 dual 1000baseT Ethernet",
   1122 	  WM_T_80003,		WMP_F_COPPER },
   1123 
   1124 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1125 	  "i80003 dual 1000baseX Ethernet",
   1126 	  WM_T_80003,		WMP_F_COPPER },
   1127 
   1128 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1129 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1130 	  WM_T_80003,		WMP_F_SERDES },
   1131 
   1132 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1133 	  "Intel i80003 1000baseT Ethernet",
   1134 	  WM_T_80003,		WMP_F_COPPER },
   1135 
   1136 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1137 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1138 	  WM_T_80003,		WMP_F_SERDES },
   1139 
   1140 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1141 	  "Intel i82801H (M_AMT) LAN Controller",
   1142 	  WM_T_ICH8,		WMP_F_COPPER },
   1143 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1144 	  "Intel i82801H (AMT) LAN Controller",
   1145 	  WM_T_ICH8,		WMP_F_COPPER },
   1146 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1147 	  "Intel i82801H LAN Controller",
   1148 	  WM_T_ICH8,		WMP_F_COPPER },
   1149 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1150 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1151 	  WM_T_ICH8,		WMP_F_COPPER },
   1152 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1153 	  "Intel i82801H (M) LAN Controller",
   1154 	  WM_T_ICH8,		WMP_F_COPPER },
   1155 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1156 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1157 	  WM_T_ICH8,		WMP_F_COPPER },
   1158 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1159 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1160 	  WM_T_ICH8,		WMP_F_COPPER },
   1161 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1162 	  "82567V-3 LAN Controller",
   1163 	  WM_T_ICH8,		WMP_F_COPPER },
   1164 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1165 	  "82801I (AMT) LAN Controller",
   1166 	  WM_T_ICH9,		WMP_F_COPPER },
   1167 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1168 	  "82801I 10/100 LAN Controller",
   1169 	  WM_T_ICH9,		WMP_F_COPPER },
   1170 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1171 	  "82801I (G) 10/100 LAN Controller",
   1172 	  WM_T_ICH9,		WMP_F_COPPER },
   1173 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1174 	  "82801I (GT) 10/100 LAN Controller",
   1175 	  WM_T_ICH9,		WMP_F_COPPER },
   1176 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1177 	  "82801I (C) LAN Controller",
   1178 	  WM_T_ICH9,		WMP_F_COPPER },
   1179 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1180 	  "82801I mobile LAN Controller",
   1181 	  WM_T_ICH9,		WMP_F_COPPER },
   1182 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1183 	  "82801I mobile (V) LAN Controller",
   1184 	  WM_T_ICH9,		WMP_F_COPPER },
   1185 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1186 	  "82801I mobile (AMT) LAN Controller",
   1187 	  WM_T_ICH9,		WMP_F_COPPER },
   1188 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1189 	  "82567LM-4 LAN Controller",
   1190 	  WM_T_ICH9,		WMP_F_COPPER },
   1191 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1192 	  "82567LM-2 LAN Controller",
   1193 	  WM_T_ICH10,		WMP_F_COPPER },
   1194 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1195 	  "82567LF-2 LAN Controller",
   1196 	  WM_T_ICH10,		WMP_F_COPPER },
   1197 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1198 	  "82567LM-3 LAN Controller",
   1199 	  WM_T_ICH10,		WMP_F_COPPER },
   1200 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1201 	  "82567LF-3 LAN Controller",
   1202 	  WM_T_ICH10,		WMP_F_COPPER },
   1203 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1204 	  "82567V-2 LAN Controller",
   1205 	  WM_T_ICH10,		WMP_F_COPPER },
   1206 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1207 	  "82567V-3? LAN Controller",
   1208 	  WM_T_ICH10,		WMP_F_COPPER },
   1209 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1210 	  "HANKSVILLE LAN Controller",
   1211 	  WM_T_ICH10,		WMP_F_COPPER },
   1212 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1213 	  "PCH LAN (82577LM) Controller",
   1214 	  WM_T_PCH,		WMP_F_COPPER },
   1215 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1216 	  "PCH LAN (82577LC) Controller",
   1217 	  WM_T_PCH,		WMP_F_COPPER },
   1218 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1219 	  "PCH LAN (82578DM) Controller",
   1220 	  WM_T_PCH,		WMP_F_COPPER },
   1221 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1222 	  "PCH LAN (82578DC) Controller",
   1223 	  WM_T_PCH,		WMP_F_COPPER },
   1224 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1225 	  "PCH2 LAN (82579LM) Controller",
   1226 	  WM_T_PCH2,		WMP_F_COPPER },
   1227 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1228 	  "PCH2 LAN (82579V) Controller",
   1229 	  WM_T_PCH2,		WMP_F_COPPER },
   1230 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1231 	  "82575EB dual-1000baseT Ethernet",
   1232 	  WM_T_82575,		WMP_F_COPPER },
   1233 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1234 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1235 	  WM_T_82575,		WMP_F_SERDES },
   1236 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1237 	  "82575GB quad-1000baseT Ethernet",
   1238 	  WM_T_82575,		WMP_F_COPPER },
   1239 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1240 	  "82575GB quad-1000baseT Ethernet (PM)",
   1241 	  WM_T_82575,		WMP_F_COPPER },
   1242 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1243 	  "82576 1000BaseT Ethernet",
   1244 	  WM_T_82576,		WMP_F_COPPER },
   1245 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1246 	  "82576 1000BaseX Ethernet",
   1247 	  WM_T_82576,		WMP_F_FIBER },
   1248 
   1249 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1250 	  "82576 gigabit Ethernet (SERDES)",
   1251 	  WM_T_82576,		WMP_F_SERDES },
   1252 
   1253 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1254 	  "82576 quad-1000BaseT Ethernet",
   1255 	  WM_T_82576,		WMP_F_COPPER },
   1256 
   1257 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1258 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1259 	  WM_T_82576,		WMP_F_COPPER },
   1260 
   1261 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1262 	  "82576 gigabit Ethernet",
   1263 	  WM_T_82576,		WMP_F_COPPER },
   1264 
   1265 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1266 	  "82576 gigabit Ethernet (SERDES)",
   1267 	  WM_T_82576,		WMP_F_SERDES },
   1268 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1269 	  "82576 quad-gigabit Ethernet (SERDES)",
   1270 	  WM_T_82576,		WMP_F_SERDES },
   1271 
   1272 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1273 	  "82580 1000BaseT Ethernet",
   1274 	  WM_T_82580,		WMP_F_COPPER },
   1275 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1276 	  "82580 1000BaseX Ethernet",
   1277 	  WM_T_82580,		WMP_F_FIBER },
   1278 
   1279 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1280 	  "82580 1000BaseT Ethernet (SERDES)",
   1281 	  WM_T_82580,		WMP_F_SERDES },
   1282 
   1283 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1284 	  "82580 gigabit Ethernet (SGMII)",
   1285 	  WM_T_82580,		WMP_F_COPPER },
   1286 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1287 	  "82580 dual-1000BaseT Ethernet",
   1288 	  WM_T_82580,		WMP_F_COPPER },
   1289 
   1290 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1291 	  "82580 quad-1000BaseX Ethernet",
   1292 	  WM_T_82580,		WMP_F_FIBER },
   1293 
   1294 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1295 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1296 	  WM_T_82580,		WMP_F_COPPER },
   1297 
   1298 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1299 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1300 	  WM_T_82580,		WMP_F_SERDES },
   1301 
   1302 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1303 	  "DH89XXCC 1000BASE-KX Ethernet",
   1304 	  WM_T_82580,		WMP_F_SERDES },
   1305 
   1306 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1307 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1308 	  WM_T_82580,		WMP_F_SERDES },
   1309 
   1310 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1311 	  "I350 Gigabit Network Connection",
   1312 	  WM_T_I350,		WMP_F_COPPER },
   1313 
   1314 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1315 	  "I350 Gigabit Fiber Network Connection",
   1316 	  WM_T_I350,		WMP_F_FIBER },
   1317 
   1318 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1319 	  "I350 Gigabit Backplane Connection",
   1320 	  WM_T_I350,		WMP_F_SERDES },
   1321 
   1322 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1323 	  "I350 Quad Port Gigabit Ethernet",
   1324 	  WM_T_I350,		WMP_F_SERDES },
   1325 
   1326 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1327 	  "I350 Gigabit Connection",
   1328 	  WM_T_I350,		WMP_F_COPPER },
   1329 
   1330 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1331 	  "I354 Gigabit Ethernet (KX)",
   1332 	  WM_T_I354,		WMP_F_SERDES },
   1333 
   1334 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1335 	  "I354 Gigabit Ethernet (SGMII)",
   1336 	  WM_T_I354,		WMP_F_COPPER },
   1337 
   1338 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1339 	  "I354 Gigabit Ethernet (2.5G)",
   1340 	  WM_T_I354,		WMP_F_COPPER },
   1341 
   1342 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1343 	  "I210-T1 Ethernet Server Adapter",
   1344 	  WM_T_I210,		WMP_F_COPPER },
   1345 
   1346 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1347 	  "I210 Ethernet (Copper OEM)",
   1348 	  WM_T_I210,		WMP_F_COPPER },
   1349 
   1350 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1351 	  "I210 Ethernet (Copper IT)",
   1352 	  WM_T_I210,		WMP_F_COPPER },
   1353 
   1354 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1355 	  "I210 Ethernet (FLASH less)",
   1356 	  WM_T_I210,		WMP_F_COPPER },
   1357 
   1358 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1359 	  "I210 Gigabit Ethernet (Fiber)",
   1360 	  WM_T_I210,		WMP_F_FIBER },
   1361 
   1362 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1363 	  "I210 Gigabit Ethernet (SERDES)",
   1364 	  WM_T_I210,		WMP_F_SERDES },
   1365 
   1366 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1367 	  "I210 Gigabit Ethernet (FLASH less)",
   1368 	  WM_T_I210,		WMP_F_SERDES },
   1369 
   1370 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1371 	  "I210 Gigabit Ethernet (SGMII)",
   1372 	  WM_T_I210,		WMP_F_COPPER },
   1373 
   1374 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1375 	  "I211 Ethernet (COPPER)",
   1376 	  WM_T_I211,		WMP_F_COPPER },
   1377 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1378 	  "I217 V Ethernet Connection",
   1379 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1380 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1381 	  "I217 LM Ethernet Connection",
   1382 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1383 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1384 	  "I218 V Ethernet Connection",
   1385 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1386 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1387 	  "I218 V Ethernet Connection",
   1388 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1389 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1390 	  "I218 V Ethernet Connection",
   1391 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1392 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1393 	  "I218 LM Ethernet Connection",
   1394 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1395 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1396 	  "I218 LM Ethernet Connection",
   1397 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1398 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1399 	  "I218 LM Ethernet Connection",
   1400 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1401 #if 0
   1402 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1403 	  "I219 V Ethernet Connection",
   1404 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1405 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1406 	  "I219 V Ethernet Connection",
   1407 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1408 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1409 	  "I219 V Ethernet Connection",
   1410 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1411 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1412 	  "I219 V Ethernet Connection",
   1413 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1414 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1415 	  "I219 LM Ethernet Connection",
   1416 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1417 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1418 	  "I219 LM Ethernet Connection",
   1419 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1420 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1421 	  "I219 LM Ethernet Connection",
   1422 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1423 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1424 	  "I219 LM Ethernet Connection",
   1425 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1426 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1427 	  "I219 LM Ethernet Connection",
   1428 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1429 #endif
   1430 	{ 0,			0,
   1431 	  NULL,
   1432 	  0,			0 },
   1433 };
   1434 
   1435 /*
   1436  * Register read/write functions.
   1437  * Other than CSR_{READ|WRITE}().
   1438  */
   1439 
   1440 #if 0 /* Not currently used */
   1441 static inline uint32_t
   1442 wm_io_read(struct wm_softc *sc, int reg)
   1443 {
   1444 
   1445 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1446 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1447 }
   1448 #endif
   1449 
   1450 static inline void
   1451 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1452 {
   1453 
   1454 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1455 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1456 }
   1457 
   1458 static inline void
   1459 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1460     uint32_t data)
   1461 {
   1462 	uint32_t regval;
   1463 	int i;
   1464 
   1465 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1466 
   1467 	CSR_WRITE(sc, reg, regval);
   1468 
   1469 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1470 		delay(5);
   1471 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1472 			break;
   1473 	}
   1474 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1475 		aprint_error("%s: WARNING:"
   1476 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1477 		    device_xname(sc->sc_dev), reg);
   1478 	}
   1479 }
   1480 
   1481 static inline void
   1482 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1483 {
   1484 	wa->wa_low = htole32(v & 0xffffffffU);
   1485 	if (sizeof(bus_addr_t) == 8)
   1486 		wa->wa_high = htole32((uint64_t) v >> 32);
   1487 	else
   1488 		wa->wa_high = 0;
   1489 }
   1490 
   1491 /*
   1492  * Descriptor sync/init functions.
   1493  */
   1494 static inline void
   1495 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1496 {
   1497 	struct wm_softc *sc = txq->txq_sc;
   1498 
   1499 	/* If it will wrap around, sync to the end of the ring. */
   1500 	if ((start + num) > WM_NTXDESC(txq)) {
   1501 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1502 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1503 		    (WM_NTXDESC(txq) - start), ops);
   1504 		num -= (WM_NTXDESC(txq) - start);
   1505 		start = 0;
   1506 	}
   1507 
   1508 	/* Now sync whatever is left. */
   1509 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1510 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1511 }
   1512 
   1513 static inline void
   1514 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1515 {
   1516 	struct wm_softc *sc = rxq->rxq_sc;
   1517 
   1518 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1519 	    WM_CDRXOFF(start), sizeof(wiseman_rxdesc_t), ops);
   1520 }
   1521 
   1522 static inline void
   1523 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1524 {
   1525 	struct wm_softc *sc = rxq->rxq_sc;
   1526 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1527 	wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1528 	struct mbuf *m = rxs->rxs_mbuf;
   1529 
   1530 	/*
   1531 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1532 	 * so that the payload after the Ethernet header is aligned
   1533 	 * to a 4-byte boundary.
   1534 
   1535 	 * XXX BRAINDAMAGE ALERT!
   1536 	 * The stupid chip uses the same size for every buffer, which
   1537 	 * is set in the Receive Control register.  We are using the 2K
   1538 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1539 	 * reason, we can't "scoot" packets longer than the standard
   1540 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1541 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1542 	 * the upper layer copy the headers.
   1543 	 */
   1544 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1545 
   1546 	wm_set_dma_addr(&rxd->wrx_addr,
   1547 	    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1548 	rxd->wrx_len = 0;
   1549 	rxd->wrx_cksum = 0;
   1550 	rxd->wrx_status = 0;
   1551 	rxd->wrx_errors = 0;
   1552 	rxd->wrx_special = 0;
   1553 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1554 
   1555 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1556 }
   1557 
   1558 /*
   1559  * Device driver interface functions and commonly used functions.
   1560  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1561  */
   1562 
   1563 /* Lookup supported device table */
   1564 static const struct wm_product *
   1565 wm_lookup(const struct pci_attach_args *pa)
   1566 {
   1567 	const struct wm_product *wmp;
   1568 
   1569 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1570 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1571 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1572 			return wmp;
   1573 	}
   1574 	return NULL;
   1575 }
   1576 
   1577 /* The match function (ca_match) */
   1578 static int
   1579 wm_match(device_t parent, cfdata_t cf, void *aux)
   1580 {
   1581 	struct pci_attach_args *pa = aux;
   1582 
   1583 	if (wm_lookup(pa) != NULL)
   1584 		return 1;
   1585 
   1586 	return 0;
   1587 }
   1588 
   1589 /* The attach function (ca_attach) */
   1590 static void
   1591 wm_attach(device_t parent, device_t self, void *aux)
   1592 {
   1593 	struct wm_softc *sc = device_private(self);
   1594 	struct pci_attach_args *pa = aux;
   1595 	prop_dictionary_t dict;
   1596 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1597 	pci_chipset_tag_t pc = pa->pa_pc;
   1598 	int counts[PCI_INTR_TYPE_SIZE];
   1599 	pci_intr_type_t max_type;
   1600 	const char *eetype, *xname;
   1601 	bus_space_tag_t memt;
   1602 	bus_space_handle_t memh;
   1603 	bus_size_t memsize;
   1604 	int memh_valid;
   1605 	int i, error;
   1606 	const struct wm_product *wmp;
   1607 	prop_data_t ea;
   1608 	prop_number_t pn;
   1609 	uint8_t enaddr[ETHER_ADDR_LEN];
   1610 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1611 	pcireg_t preg, memtype;
   1612 	uint16_t eeprom_data, apme_mask;
   1613 	bool force_clear_smbi;
   1614 	uint32_t link_mode;
   1615 	uint32_t reg;
   1616 	void (*deferred_start_func)(struct ifnet *) = NULL;
   1617 
   1618 	sc->sc_dev = self;
   1619 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1620 	sc->sc_core_stopping = false;
   1621 
   1622 	wmp = wm_lookup(pa);
   1623 #ifdef DIAGNOSTIC
   1624 	if (wmp == NULL) {
   1625 		printf("\n");
   1626 		panic("wm_attach: impossible");
   1627 	}
   1628 #endif
   1629 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1630 
   1631 	sc->sc_pc = pa->pa_pc;
   1632 	sc->sc_pcitag = pa->pa_tag;
   1633 
   1634 	if (pci_dma64_available(pa))
   1635 		sc->sc_dmat = pa->pa_dmat64;
   1636 	else
   1637 		sc->sc_dmat = pa->pa_dmat;
   1638 
   1639 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1640 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1641 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1642 
   1643 	sc->sc_type = wmp->wmp_type;
   1644 
   1645 	/* Set default function pointers */
   1646 	sc->phy.acquire = wm_get_null;
   1647 	sc->phy.release = wm_put_null;
   1648 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1649 
   1650 	if (sc->sc_type < WM_T_82543) {
   1651 		if (sc->sc_rev < 2) {
   1652 			aprint_error_dev(sc->sc_dev,
   1653 			    "i82542 must be at least rev. 2\n");
   1654 			return;
   1655 		}
   1656 		if (sc->sc_rev < 3)
   1657 			sc->sc_type = WM_T_82542_2_0;
   1658 	}
   1659 
   1660 	/*
   1661 	 * Disable MSI for Errata:
   1662 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1663 	 *
   1664 	 *  82544: Errata 25
   1665 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1666 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1667 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1668 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1669 	 *
   1670 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1671 	 *
   1672 	 *  82571 & 82572: Errata 63
   1673 	 */
   1674 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1675 	    || (sc->sc_type == WM_T_82572))
   1676 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1677 
   1678 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1679 	    || (sc->sc_type == WM_T_82580)
   1680 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1681 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1682 		sc->sc_flags |= WM_F_NEWQUEUE;
   1683 
   1684 	/* Set device properties (mactype) */
   1685 	dict = device_properties(sc->sc_dev);
   1686 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1687 
   1688 	/*
   1689 	 * Map the device.  All devices support memory-mapped acccess,
   1690 	 * and it is really required for normal operation.
   1691 	 */
   1692 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1693 	switch (memtype) {
   1694 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1695 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1696 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1697 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1698 		break;
   1699 	default:
   1700 		memh_valid = 0;
   1701 		break;
   1702 	}
   1703 
   1704 	if (memh_valid) {
   1705 		sc->sc_st = memt;
   1706 		sc->sc_sh = memh;
   1707 		sc->sc_ss = memsize;
   1708 	} else {
   1709 		aprint_error_dev(sc->sc_dev,
   1710 		    "unable to map device registers\n");
   1711 		return;
   1712 	}
   1713 
   1714 	/*
   1715 	 * In addition, i82544 and later support I/O mapped indirect
   1716 	 * register access.  It is not desirable (nor supported in
   1717 	 * this driver) to use it for normal operation, though it is
   1718 	 * required to work around bugs in some chip versions.
   1719 	 */
   1720 	if (sc->sc_type >= WM_T_82544) {
   1721 		/* First we have to find the I/O BAR. */
   1722 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1723 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1724 			if (memtype == PCI_MAPREG_TYPE_IO)
   1725 				break;
   1726 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1727 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1728 				i += 4;	/* skip high bits, too */
   1729 		}
   1730 		if (i < PCI_MAPREG_END) {
   1731 			/*
   1732 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1733 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1734 			 * It's no problem because newer chips has no this
   1735 			 * bug.
   1736 			 *
   1737 			 * The i8254x doesn't apparently respond when the
   1738 			 * I/O BAR is 0, which looks somewhat like it's not
   1739 			 * been configured.
   1740 			 */
   1741 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1742 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1743 				aprint_error_dev(sc->sc_dev,
   1744 				    "WARNING: I/O BAR at zero.\n");
   1745 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1746 					0, &sc->sc_iot, &sc->sc_ioh,
   1747 					NULL, &sc->sc_ios) == 0) {
   1748 				sc->sc_flags |= WM_F_IOH_VALID;
   1749 			} else {
   1750 				aprint_error_dev(sc->sc_dev,
   1751 				    "WARNING: unable to map I/O space\n");
   1752 			}
   1753 		}
   1754 
   1755 	}
   1756 
   1757 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1758 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1759 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1760 	if (sc->sc_type < WM_T_82542_2_1)
   1761 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1762 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1763 
   1764 	/* power up chip */
   1765 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1766 	    NULL)) && error != EOPNOTSUPP) {
   1767 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1768 		return;
   1769 	}
   1770 
   1771 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1772 
   1773 	/* Allocation settings */
   1774 	max_type = PCI_INTR_TYPE_MSIX;
   1775 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1;
   1776 	counts[PCI_INTR_TYPE_MSI] = 1;
   1777 	counts[PCI_INTR_TYPE_INTX] = 1;
   1778 
   1779 alloc_retry:
   1780 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1781 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1782 		return;
   1783 	}
   1784 
   1785 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1786 		error = wm_setup_msix(sc);
   1787 		if (error) {
   1788 			pci_intr_release(pc, sc->sc_intrs,
   1789 			    counts[PCI_INTR_TYPE_MSIX]);
   1790 
   1791 			/* Setup for MSI: Disable MSI-X */
   1792 			max_type = PCI_INTR_TYPE_MSI;
   1793 			counts[PCI_INTR_TYPE_MSI] = 1;
   1794 			counts[PCI_INTR_TYPE_INTX] = 1;
   1795 			goto alloc_retry;
   1796 		}
   1797 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1798 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1799 		error = wm_setup_legacy(sc);
   1800 		if (error) {
   1801 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1802 			    counts[PCI_INTR_TYPE_MSI]);
   1803 
   1804 			/* The next try is for INTx: Disable MSI */
   1805 			max_type = PCI_INTR_TYPE_INTX;
   1806 			counts[PCI_INTR_TYPE_INTX] = 1;
   1807 			goto alloc_retry;
   1808 		}
   1809 	} else {
   1810 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1811 		error = wm_setup_legacy(sc);
   1812 		if (error) {
   1813 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1814 			    counts[PCI_INTR_TYPE_INTX]);
   1815 			return;
   1816 		}
   1817 	}
   1818 
   1819 	/*
   1820 	 * Check the function ID (unit number of the chip).
   1821 	 */
   1822 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1823 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1824 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1825 	    || (sc->sc_type == WM_T_82580)
   1826 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1827 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1828 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1829 	else
   1830 		sc->sc_funcid = 0;
   1831 
   1832 	/*
   1833 	 * Determine a few things about the bus we're connected to.
   1834 	 */
   1835 	if (sc->sc_type < WM_T_82543) {
   1836 		/* We don't really know the bus characteristics here. */
   1837 		sc->sc_bus_speed = 33;
   1838 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1839 		/*
   1840 		 * CSA (Communication Streaming Architecture) is about as fast
   1841 		 * a 32-bit 66MHz PCI Bus.
   1842 		 */
   1843 		sc->sc_flags |= WM_F_CSA;
   1844 		sc->sc_bus_speed = 66;
   1845 		aprint_verbose_dev(sc->sc_dev,
   1846 		    "Communication Streaming Architecture\n");
   1847 		if (sc->sc_type == WM_T_82547) {
   1848 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1849 			callout_setfunc(&sc->sc_txfifo_ch,
   1850 					wm_82547_txfifo_stall, sc);
   1851 			aprint_verbose_dev(sc->sc_dev,
   1852 			    "using 82547 Tx FIFO stall work-around\n");
   1853 		}
   1854 	} else if (sc->sc_type >= WM_T_82571) {
   1855 		sc->sc_flags |= WM_F_PCIE;
   1856 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1857 		    && (sc->sc_type != WM_T_ICH10)
   1858 		    && (sc->sc_type != WM_T_PCH)
   1859 		    && (sc->sc_type != WM_T_PCH2)
   1860 		    && (sc->sc_type != WM_T_PCH_LPT)
   1861 		    && (sc->sc_type != WM_T_PCH_SPT)) {
   1862 			/* ICH* and PCH* have no PCIe capability registers */
   1863 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1864 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1865 				NULL) == 0)
   1866 				aprint_error_dev(sc->sc_dev,
   1867 				    "unable to find PCIe capability\n");
   1868 		}
   1869 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1870 	} else {
   1871 		reg = CSR_READ(sc, WMREG_STATUS);
   1872 		if (reg & STATUS_BUS64)
   1873 			sc->sc_flags |= WM_F_BUS64;
   1874 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1875 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1876 
   1877 			sc->sc_flags |= WM_F_PCIX;
   1878 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1879 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1880 				aprint_error_dev(sc->sc_dev,
   1881 				    "unable to find PCIX capability\n");
   1882 			else if (sc->sc_type != WM_T_82545_3 &&
   1883 				 sc->sc_type != WM_T_82546_3) {
   1884 				/*
   1885 				 * Work around a problem caused by the BIOS
   1886 				 * setting the max memory read byte count
   1887 				 * incorrectly.
   1888 				 */
   1889 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1890 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1891 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1892 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1893 
   1894 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1895 				    PCIX_CMD_BYTECNT_SHIFT;
   1896 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1897 				    PCIX_STATUS_MAXB_SHIFT;
   1898 				if (bytecnt > maxb) {
   1899 					aprint_verbose_dev(sc->sc_dev,
   1900 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1901 					    512 << bytecnt, 512 << maxb);
   1902 					pcix_cmd = (pcix_cmd &
   1903 					    ~PCIX_CMD_BYTECNT_MASK) |
   1904 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1905 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1906 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1907 					    pcix_cmd);
   1908 				}
   1909 			}
   1910 		}
   1911 		/*
   1912 		 * The quad port adapter is special; it has a PCIX-PCIX
   1913 		 * bridge on the board, and can run the secondary bus at
   1914 		 * a higher speed.
   1915 		 */
   1916 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   1917 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   1918 								      : 66;
   1919 		} else if (sc->sc_flags & WM_F_PCIX) {
   1920 			switch (reg & STATUS_PCIXSPD_MASK) {
   1921 			case STATUS_PCIXSPD_50_66:
   1922 				sc->sc_bus_speed = 66;
   1923 				break;
   1924 			case STATUS_PCIXSPD_66_100:
   1925 				sc->sc_bus_speed = 100;
   1926 				break;
   1927 			case STATUS_PCIXSPD_100_133:
   1928 				sc->sc_bus_speed = 133;
   1929 				break;
   1930 			default:
   1931 				aprint_error_dev(sc->sc_dev,
   1932 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   1933 				    reg & STATUS_PCIXSPD_MASK);
   1934 				sc->sc_bus_speed = 66;
   1935 				break;
   1936 			}
   1937 		} else
   1938 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   1939 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   1940 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   1941 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   1942 	}
   1943 
   1944 	/* clear interesting stat counters */
   1945 	CSR_READ(sc, WMREG_COLC);
   1946 	CSR_READ(sc, WMREG_RXERRC);
   1947 
   1948 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   1949 	    || (sc->sc_type >= WM_T_ICH8))
   1950 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   1951 	if (sc->sc_type >= WM_T_ICH8)
   1952 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   1953 
   1954 	/* Set PHY, NVM mutex related stuff */
   1955 	switch (sc->sc_type) {
   1956 	case WM_T_82542_2_0:
   1957 	case WM_T_82542_2_1:
   1958 	case WM_T_82543:
   1959 	case WM_T_82544:
   1960 		/* Microwire */
   1961 		sc->sc_nvm_wordsize = 64;
   1962 		sc->sc_nvm_addrbits = 6;
   1963 		break;
   1964 	case WM_T_82540:
   1965 	case WM_T_82545:
   1966 	case WM_T_82545_3:
   1967 	case WM_T_82546:
   1968 	case WM_T_82546_3:
   1969 		/* Microwire */
   1970 		reg = CSR_READ(sc, WMREG_EECD);
   1971 		if (reg & EECD_EE_SIZE) {
   1972 			sc->sc_nvm_wordsize = 256;
   1973 			sc->sc_nvm_addrbits = 8;
   1974 		} else {
   1975 			sc->sc_nvm_wordsize = 64;
   1976 			sc->sc_nvm_addrbits = 6;
   1977 		}
   1978 		sc->sc_flags |= WM_F_LOCK_EECD;
   1979 		break;
   1980 	case WM_T_82541:
   1981 	case WM_T_82541_2:
   1982 	case WM_T_82547:
   1983 	case WM_T_82547_2:
   1984 		sc->sc_flags |= WM_F_LOCK_EECD;
   1985 		reg = CSR_READ(sc, WMREG_EECD);
   1986 		if (reg & EECD_EE_TYPE) {
   1987 			/* SPI */
   1988 			sc->sc_flags |= WM_F_EEPROM_SPI;
   1989 			wm_nvm_set_addrbits_size_eecd(sc);
   1990 		} else {
   1991 			/* Microwire */
   1992 			if ((reg & EECD_EE_ABITS) != 0) {
   1993 				sc->sc_nvm_wordsize = 256;
   1994 				sc->sc_nvm_addrbits = 8;
   1995 			} else {
   1996 				sc->sc_nvm_wordsize = 64;
   1997 				sc->sc_nvm_addrbits = 6;
   1998 			}
   1999 		}
   2000 		break;
   2001 	case WM_T_82571:
   2002 	case WM_T_82572:
   2003 		/* SPI */
   2004 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2005 		wm_nvm_set_addrbits_size_eecd(sc);
   2006 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
   2007 		sc->phy.acquire = wm_get_swsm_semaphore;
   2008 		sc->phy.release = wm_put_swsm_semaphore;
   2009 		break;
   2010 	case WM_T_82573:
   2011 	case WM_T_82574:
   2012 	case WM_T_82583:
   2013 		if (sc->sc_type == WM_T_82573) {
   2014 			sc->sc_flags |= WM_F_LOCK_SWSM;
   2015 			sc->phy.acquire = wm_get_swsm_semaphore;
   2016 			sc->phy.release = wm_put_swsm_semaphore;
   2017 		} else {
   2018 			sc->sc_flags |= WM_F_LOCK_EXTCNF;
   2019 			/* Both PHY and NVM use the same semaphore. */
   2020 			sc->phy.acquire
   2021 			    = wm_get_swfwhw_semaphore;
   2022 			sc->phy.release
   2023 			    = wm_put_swfwhw_semaphore;
   2024 		}
   2025 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2026 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2027 			sc->sc_nvm_wordsize = 2048;
   2028 		} else {
   2029 			/* SPI */
   2030 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2031 			wm_nvm_set_addrbits_size_eecd(sc);
   2032 		}
   2033 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2034 		break;
   2035 	case WM_T_82575:
   2036 	case WM_T_82576:
   2037 	case WM_T_82580:
   2038 	case WM_T_I350:
   2039 	case WM_T_I354:
   2040 	case WM_T_80003:
   2041 		/* SPI */
   2042 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2043 		wm_nvm_set_addrbits_size_eecd(sc);
   2044 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
   2045 		    | WM_F_LOCK_SWSM;
   2046 		sc->phy.acquire = wm_get_phy_82575;
   2047 		sc->phy.release = wm_put_phy_82575;
   2048 		break;
   2049 	case WM_T_ICH8:
   2050 	case WM_T_ICH9:
   2051 	case WM_T_ICH10:
   2052 	case WM_T_PCH:
   2053 	case WM_T_PCH2:
   2054 	case WM_T_PCH_LPT:
   2055 		/* FLASH */
   2056 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2057 		sc->sc_nvm_wordsize = 2048;
   2058 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2059 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2060 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2061 			aprint_error_dev(sc->sc_dev,
   2062 			    "can't map FLASH registers\n");
   2063 			goto out;
   2064 		}
   2065 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2066 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2067 		    ICH_FLASH_SECTOR_SIZE;
   2068 		sc->sc_ich8_flash_bank_size =
   2069 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2070 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2071 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2072 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2073 		sc->sc_flashreg_offset = 0;
   2074 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2075 		sc->phy.release = wm_put_swflag_ich8lan;
   2076 		break;
   2077 	case WM_T_PCH_SPT:
   2078 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2079 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2080 		sc->sc_flasht = sc->sc_st;
   2081 		sc->sc_flashh = sc->sc_sh;
   2082 		sc->sc_ich8_flash_base = 0;
   2083 		sc->sc_nvm_wordsize =
   2084 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2085 			* NVM_SIZE_MULTIPLIER;
   2086 		/* It is size in bytes, we want words */
   2087 		sc->sc_nvm_wordsize /= 2;
   2088 		/* assume 2 banks */
   2089 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2090 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2091 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2092 		sc->phy.release = wm_put_swflag_ich8lan;
   2093 		break;
   2094 	case WM_T_I210:
   2095 	case WM_T_I211:
   2096 		if (wm_nvm_get_flash_presence_i210(sc)) {
   2097 			wm_nvm_set_addrbits_size_eecd(sc);
   2098 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2099 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2100 		} else {
   2101 			sc->sc_nvm_wordsize = INVM_SIZE;
   2102 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2103 		}
   2104 		sc->sc_flags |= WM_F_LOCK_SWFW | WM_F_LOCK_SWSM;
   2105 		sc->phy.acquire = wm_get_phy_82575;
   2106 		sc->phy.release = wm_put_phy_82575;
   2107 		break;
   2108 	default:
   2109 		break;
   2110 	}
   2111 
   2112 	/* Reset the chip to a known state. */
   2113 	wm_reset(sc);
   2114 
   2115 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2116 	switch (sc->sc_type) {
   2117 	case WM_T_82571:
   2118 	case WM_T_82572:
   2119 		reg = CSR_READ(sc, WMREG_SWSM2);
   2120 		if ((reg & SWSM2_LOCK) == 0) {
   2121 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2122 			force_clear_smbi = true;
   2123 		} else
   2124 			force_clear_smbi = false;
   2125 		break;
   2126 	case WM_T_82573:
   2127 	case WM_T_82574:
   2128 	case WM_T_82583:
   2129 		force_clear_smbi = true;
   2130 		break;
   2131 	default:
   2132 		force_clear_smbi = false;
   2133 		break;
   2134 	}
   2135 	if (force_clear_smbi) {
   2136 		reg = CSR_READ(sc, WMREG_SWSM);
   2137 		if ((reg & SWSM_SMBI) != 0)
   2138 			aprint_error_dev(sc->sc_dev,
   2139 			    "Please update the Bootagent\n");
   2140 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2141 	}
   2142 
   2143 	/*
   2144 	 * Defer printing the EEPROM type until after verifying the checksum
   2145 	 * This allows the EEPROM type to be printed correctly in the case
   2146 	 * that no EEPROM is attached.
   2147 	 */
   2148 	/*
   2149 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2150 	 * this for later, so we can fail future reads from the EEPROM.
   2151 	 */
   2152 	if (wm_nvm_validate_checksum(sc)) {
   2153 		/*
   2154 		 * Read twice again because some PCI-e parts fail the
   2155 		 * first check due to the link being in sleep state.
   2156 		 */
   2157 		if (wm_nvm_validate_checksum(sc))
   2158 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2159 	}
   2160 
   2161 	/* Set device properties (macflags) */
   2162 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2163 
   2164 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2165 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2166 	else {
   2167 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2168 		    sc->sc_nvm_wordsize);
   2169 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2170 			aprint_verbose("iNVM");
   2171 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2172 			aprint_verbose("FLASH(HW)");
   2173 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2174 			aprint_verbose("FLASH");
   2175 		else {
   2176 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2177 				eetype = "SPI";
   2178 			else
   2179 				eetype = "MicroWire";
   2180 			aprint_verbose("(%d address bits) %s EEPROM",
   2181 			    sc->sc_nvm_addrbits, eetype);
   2182 		}
   2183 	}
   2184 	wm_nvm_version(sc);
   2185 	aprint_verbose("\n");
   2186 
   2187 	/* Check for I21[01] PLL workaround */
   2188 	if (sc->sc_type == WM_T_I210)
   2189 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2190 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2191 		/* NVM image release 3.25 has a workaround */
   2192 		if ((sc->sc_nvm_ver_major < 3)
   2193 		    || ((sc->sc_nvm_ver_major == 3)
   2194 			&& (sc->sc_nvm_ver_minor < 25))) {
   2195 			aprint_verbose_dev(sc->sc_dev,
   2196 			    "ROM image version %d.%d is older than 3.25\n",
   2197 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2198 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2199 		}
   2200 	}
   2201 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2202 		wm_pll_workaround_i210(sc);
   2203 
   2204 	wm_get_wakeup(sc);
   2205 
   2206 	/* Non-AMT based hardware can now take control from firmware */
   2207 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2208 		wm_get_hw_control(sc);
   2209 
   2210 	/*
   2211 	 * Read the Ethernet address from the EEPROM, if not first found
   2212 	 * in device properties.
   2213 	 */
   2214 	ea = prop_dictionary_get(dict, "mac-address");
   2215 	if (ea != NULL) {
   2216 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2217 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2218 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2219 	} else {
   2220 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2221 			aprint_error_dev(sc->sc_dev,
   2222 			    "unable to read Ethernet address\n");
   2223 			goto out;
   2224 		}
   2225 	}
   2226 
   2227 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2228 	    ether_sprintf(enaddr));
   2229 
   2230 	/*
   2231 	 * Read the config info from the EEPROM, and set up various
   2232 	 * bits in the control registers based on their contents.
   2233 	 */
   2234 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2235 	if (pn != NULL) {
   2236 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2237 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2238 	} else {
   2239 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2240 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2241 			goto out;
   2242 		}
   2243 	}
   2244 
   2245 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2246 	if (pn != NULL) {
   2247 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2248 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2249 	} else {
   2250 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2251 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2252 			goto out;
   2253 		}
   2254 	}
   2255 
   2256 	/* check for WM_F_WOL */
   2257 	switch (sc->sc_type) {
   2258 	case WM_T_82542_2_0:
   2259 	case WM_T_82542_2_1:
   2260 	case WM_T_82543:
   2261 		/* dummy? */
   2262 		eeprom_data = 0;
   2263 		apme_mask = NVM_CFG3_APME;
   2264 		break;
   2265 	case WM_T_82544:
   2266 		apme_mask = NVM_CFG2_82544_APM_EN;
   2267 		eeprom_data = cfg2;
   2268 		break;
   2269 	case WM_T_82546:
   2270 	case WM_T_82546_3:
   2271 	case WM_T_82571:
   2272 	case WM_T_82572:
   2273 	case WM_T_82573:
   2274 	case WM_T_82574:
   2275 	case WM_T_82583:
   2276 	case WM_T_80003:
   2277 	default:
   2278 		apme_mask = NVM_CFG3_APME;
   2279 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2280 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2281 		break;
   2282 	case WM_T_82575:
   2283 	case WM_T_82576:
   2284 	case WM_T_82580:
   2285 	case WM_T_I350:
   2286 	case WM_T_I354: /* XXX ok? */
   2287 	case WM_T_ICH8:
   2288 	case WM_T_ICH9:
   2289 	case WM_T_ICH10:
   2290 	case WM_T_PCH:
   2291 	case WM_T_PCH2:
   2292 	case WM_T_PCH_LPT:
   2293 	case WM_T_PCH_SPT:
   2294 		/* XXX The funcid should be checked on some devices */
   2295 		apme_mask = WUC_APME;
   2296 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2297 		break;
   2298 	}
   2299 
   2300 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2301 	if ((eeprom_data & apme_mask) != 0)
   2302 		sc->sc_flags |= WM_F_WOL;
   2303 #ifdef WM_DEBUG
   2304 	if ((sc->sc_flags & WM_F_WOL) != 0)
   2305 		printf("WOL\n");
   2306 #endif
   2307 
   2308 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2309 		/* Check NVM for autonegotiation */
   2310 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2311 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2312 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2313 		}
   2314 	}
   2315 
   2316 	/*
   2317 	 * XXX need special handling for some multiple port cards
   2318 	 * to disable a paticular port.
   2319 	 */
   2320 
   2321 	if (sc->sc_type >= WM_T_82544) {
   2322 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2323 		if (pn != NULL) {
   2324 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2325 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2326 		} else {
   2327 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2328 				aprint_error_dev(sc->sc_dev,
   2329 				    "unable to read SWDPIN\n");
   2330 				goto out;
   2331 			}
   2332 		}
   2333 	}
   2334 
   2335 	if (cfg1 & NVM_CFG1_ILOS)
   2336 		sc->sc_ctrl |= CTRL_ILOS;
   2337 
   2338 	/*
   2339 	 * XXX
   2340 	 * This code isn't correct because pin 2 and 3 are located
   2341 	 * in different position on newer chips. Check all datasheet.
   2342 	 *
   2343 	 * Until resolve this problem, check if a chip < 82580
   2344 	 */
   2345 	if (sc->sc_type <= WM_T_82580) {
   2346 		if (sc->sc_type >= WM_T_82544) {
   2347 			sc->sc_ctrl |=
   2348 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2349 			    CTRL_SWDPIO_SHIFT;
   2350 			sc->sc_ctrl |=
   2351 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2352 			    CTRL_SWDPINS_SHIFT;
   2353 		} else {
   2354 			sc->sc_ctrl |=
   2355 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2356 			    CTRL_SWDPIO_SHIFT;
   2357 		}
   2358 	}
   2359 
   2360 	/* XXX For other than 82580? */
   2361 	if (sc->sc_type == WM_T_82580) {
   2362 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2363 		if (nvmword & __BIT(13))
   2364 			sc->sc_ctrl |= CTRL_ILOS;
   2365 	}
   2366 
   2367 #if 0
   2368 	if (sc->sc_type >= WM_T_82544) {
   2369 		if (cfg1 & NVM_CFG1_IPS0)
   2370 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2371 		if (cfg1 & NVM_CFG1_IPS1)
   2372 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2373 		sc->sc_ctrl_ext |=
   2374 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2375 		    CTRL_EXT_SWDPIO_SHIFT;
   2376 		sc->sc_ctrl_ext |=
   2377 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2378 		    CTRL_EXT_SWDPINS_SHIFT;
   2379 	} else {
   2380 		sc->sc_ctrl_ext |=
   2381 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2382 		    CTRL_EXT_SWDPIO_SHIFT;
   2383 	}
   2384 #endif
   2385 
   2386 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2387 #if 0
   2388 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2389 #endif
   2390 
   2391 	if (sc->sc_type == WM_T_PCH) {
   2392 		uint16_t val;
   2393 
   2394 		/* Save the NVM K1 bit setting */
   2395 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2396 
   2397 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2398 			sc->sc_nvm_k1_enabled = 1;
   2399 		else
   2400 			sc->sc_nvm_k1_enabled = 0;
   2401 	}
   2402 
   2403 	/*
   2404 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
   2405 	 * media structures accordingly.
   2406 	 */
   2407 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2408 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2409 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2410 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
   2411 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2412 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
   2413 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2414 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2415 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2416 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2417 	    || (sc->sc_type ==WM_T_I211)) {
   2418 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2419 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2420 		switch (link_mode) {
   2421 		case CTRL_EXT_LINK_MODE_1000KX:
   2422 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2423 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2424 			break;
   2425 		case CTRL_EXT_LINK_MODE_SGMII:
   2426 			if (wm_sgmii_uses_mdio(sc)) {
   2427 				aprint_verbose_dev(sc->sc_dev,
   2428 				    "SGMII(MDIO)\n");
   2429 				sc->sc_flags |= WM_F_SGMII;
   2430 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2431 				break;
   2432 			}
   2433 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2434 			/*FALLTHROUGH*/
   2435 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2436 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2437 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2438 				if (link_mode
   2439 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2440 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2441 					sc->sc_flags |= WM_F_SGMII;
   2442 				} else {
   2443 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2444 					aprint_verbose_dev(sc->sc_dev,
   2445 					    "SERDES\n");
   2446 				}
   2447 				break;
   2448 			}
   2449 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2450 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2451 
   2452 			/* Change current link mode setting */
   2453 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2454 			switch (sc->sc_mediatype) {
   2455 			case WM_MEDIATYPE_COPPER:
   2456 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2457 				break;
   2458 			case WM_MEDIATYPE_SERDES:
   2459 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2460 				break;
   2461 			default:
   2462 				break;
   2463 			}
   2464 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2465 			break;
   2466 		case CTRL_EXT_LINK_MODE_GMII:
   2467 		default:
   2468 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2469 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2470 			break;
   2471 		}
   2472 
   2473 		reg &= ~CTRL_EXT_I2C_ENA;
   2474 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2475 			reg |= CTRL_EXT_I2C_ENA;
   2476 		else
   2477 			reg &= ~CTRL_EXT_I2C_ENA;
   2478 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2479 
   2480 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2481 			wm_gmii_mediainit(sc, wmp->wmp_product);
   2482 		else
   2483 			wm_tbi_mediainit(sc);
   2484 	} else if (sc->sc_type < WM_T_82543 ||
   2485 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2486 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2487 			aprint_error_dev(sc->sc_dev,
   2488 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2489 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2490 		}
   2491 		wm_tbi_mediainit(sc);
   2492 	} else {
   2493 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2494 			aprint_error_dev(sc->sc_dev,
   2495 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2496 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2497 		}
   2498 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2499 	}
   2500 
   2501 	ifp = &sc->sc_ethercom.ec_if;
   2502 	xname = device_xname(sc->sc_dev);
   2503 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2504 	ifp->if_softc = sc;
   2505 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2506 	ifp->if_extflags = IFEF_START_MPSAFE;
   2507 	ifp->if_ioctl = wm_ioctl;
   2508 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2509 		ifp->if_start = wm_nq_start;
   2510 		if (sc->sc_nqueues > 1) {
   2511 			ifp->if_transmit = wm_nq_transmit;
   2512 			deferred_start_func = wm_deferred_start;
   2513 		}
   2514 	} else {
   2515 		ifp->if_start = wm_start;
   2516 		if (sc->sc_nqueues > 1) {
   2517 			ifp->if_transmit = wm_transmit;
   2518 			deferred_start_func = wm_deferred_start;
   2519 		}
   2520 	}
   2521 	ifp->if_watchdog = wm_watchdog;
   2522 	ifp->if_init = wm_init;
   2523 	ifp->if_stop = wm_stop;
   2524 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2525 	IFQ_SET_READY(&ifp->if_snd);
   2526 
   2527 	/* Check for jumbo frame */
   2528 	switch (sc->sc_type) {
   2529 	case WM_T_82573:
   2530 		/* XXX limited to 9234 if ASPM is disabled */
   2531 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2532 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2533 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2534 		break;
   2535 	case WM_T_82571:
   2536 	case WM_T_82572:
   2537 	case WM_T_82574:
   2538 	case WM_T_82575:
   2539 	case WM_T_82576:
   2540 	case WM_T_82580:
   2541 	case WM_T_I350:
   2542 	case WM_T_I354: /* XXXX ok? */
   2543 	case WM_T_I210:
   2544 	case WM_T_I211:
   2545 	case WM_T_80003:
   2546 	case WM_T_ICH9:
   2547 	case WM_T_ICH10:
   2548 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2549 	case WM_T_PCH_LPT:
   2550 	case WM_T_PCH_SPT:
   2551 		/* XXX limited to 9234 */
   2552 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2553 		break;
   2554 	case WM_T_PCH:
   2555 		/* XXX limited to 4096 */
   2556 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2557 		break;
   2558 	case WM_T_82542_2_0:
   2559 	case WM_T_82542_2_1:
   2560 	case WM_T_82583:
   2561 	case WM_T_ICH8:
   2562 		/* No support for jumbo frame */
   2563 		break;
   2564 	default:
   2565 		/* ETHER_MAX_LEN_JUMBO */
   2566 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2567 		break;
   2568 	}
   2569 
   2570 	/* If we're a i82543 or greater, we can support VLANs. */
   2571 	if (sc->sc_type >= WM_T_82543)
   2572 		sc->sc_ethercom.ec_capabilities |=
   2573 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2574 
   2575 	/*
   2576 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2577 	 * on i82543 and later.
   2578 	 */
   2579 	if (sc->sc_type >= WM_T_82543) {
   2580 		ifp->if_capabilities |=
   2581 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2582 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2583 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2584 		    IFCAP_CSUM_TCPv6_Tx |
   2585 		    IFCAP_CSUM_UDPv6_Tx;
   2586 	}
   2587 
   2588 	/*
   2589 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2590 	 *
   2591 	 *	82541GI (8086:1076) ... no
   2592 	 *	82572EI (8086:10b9) ... yes
   2593 	 */
   2594 	if (sc->sc_type >= WM_T_82571) {
   2595 		ifp->if_capabilities |=
   2596 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2597 	}
   2598 
   2599 	/*
   2600 	 * If we're a i82544 or greater (except i82547), we can do
   2601 	 * TCP segmentation offload.
   2602 	 */
   2603 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2604 		ifp->if_capabilities |= IFCAP_TSOv4;
   2605 	}
   2606 
   2607 	if (sc->sc_type >= WM_T_82571) {
   2608 		ifp->if_capabilities |= IFCAP_TSOv6;
   2609 	}
   2610 
   2611 #ifdef WM_MPSAFE
   2612 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2613 #else
   2614 	sc->sc_core_lock = NULL;
   2615 #endif
   2616 
   2617 	/* Attach the interface. */
   2618 	if_initialize(ifp);
   2619 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2620 	if_deferred_start_init(ifp, deferred_start_func);
   2621 	ether_ifattach(ifp, enaddr);
   2622 	if_register(ifp);
   2623 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2624 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2625 			  RND_FLAG_DEFAULT);
   2626 
   2627 #ifdef WM_EVENT_COUNTERS
   2628 	/* Attach event counters. */
   2629 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2630 	    NULL, xname, "linkintr");
   2631 
   2632 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2633 	    NULL, xname, "tx_xoff");
   2634 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2635 	    NULL, xname, "tx_xon");
   2636 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2637 	    NULL, xname, "rx_xoff");
   2638 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2639 	    NULL, xname, "rx_xon");
   2640 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2641 	    NULL, xname, "rx_macctl");
   2642 #endif /* WM_EVENT_COUNTERS */
   2643 
   2644 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2645 		pmf_class_network_register(self, ifp);
   2646 	else
   2647 		aprint_error_dev(self, "couldn't establish power handler\n");
   2648 
   2649 	sc->sc_flags |= WM_F_ATTACHED;
   2650  out:
   2651 	return;
   2652 }
   2653 
   2654 /* The detach function (ca_detach) */
   2655 static int
   2656 wm_detach(device_t self, int flags __unused)
   2657 {
   2658 	struct wm_softc *sc = device_private(self);
   2659 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2660 	int i;
   2661 
   2662 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2663 		return 0;
   2664 
   2665 	/* Stop the interface. Callouts are stopped in it. */
   2666 	wm_stop(ifp, 1);
   2667 
   2668 	pmf_device_deregister(self);
   2669 
   2670 	/* Tell the firmware about the release */
   2671 	WM_CORE_LOCK(sc);
   2672 	wm_release_manageability(sc);
   2673 	wm_release_hw_control(sc);
   2674 	wm_enable_wakeup(sc);
   2675 	WM_CORE_UNLOCK(sc);
   2676 
   2677 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2678 
   2679 	/* Delete all remaining media. */
   2680 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2681 
   2682 	ether_ifdetach(ifp);
   2683 	if_detach(ifp);
   2684 	if_percpuq_destroy(sc->sc_ipq);
   2685 
   2686 	/* Unload RX dmamaps and free mbufs */
   2687 	for (i = 0; i < sc->sc_nqueues; i++) {
   2688 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2689 		mutex_enter(rxq->rxq_lock);
   2690 		wm_rxdrain(rxq);
   2691 		mutex_exit(rxq->rxq_lock);
   2692 	}
   2693 	/* Must unlock here */
   2694 
   2695 	/* Disestablish the interrupt handler */
   2696 	for (i = 0; i < sc->sc_nintrs; i++) {
   2697 		if (sc->sc_ihs[i] != NULL) {
   2698 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2699 			sc->sc_ihs[i] = NULL;
   2700 		}
   2701 	}
   2702 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2703 
   2704 	wm_free_txrx_queues(sc);
   2705 
   2706 	/* Unmap the registers */
   2707 	if (sc->sc_ss) {
   2708 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2709 		sc->sc_ss = 0;
   2710 	}
   2711 	if (sc->sc_ios) {
   2712 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2713 		sc->sc_ios = 0;
   2714 	}
   2715 	if (sc->sc_flashs) {
   2716 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2717 		sc->sc_flashs = 0;
   2718 	}
   2719 
   2720 	if (sc->sc_core_lock)
   2721 		mutex_obj_free(sc->sc_core_lock);
   2722 	if (sc->sc_ich_phymtx)
   2723 		mutex_obj_free(sc->sc_ich_phymtx);
   2724 	if (sc->sc_ich_nvmmtx)
   2725 		mutex_obj_free(sc->sc_ich_nvmmtx);
   2726 
   2727 	return 0;
   2728 }
   2729 
   2730 static bool
   2731 wm_suspend(device_t self, const pmf_qual_t *qual)
   2732 {
   2733 	struct wm_softc *sc = device_private(self);
   2734 
   2735 	wm_release_manageability(sc);
   2736 	wm_release_hw_control(sc);
   2737 	wm_enable_wakeup(sc);
   2738 
   2739 	return true;
   2740 }
   2741 
   2742 static bool
   2743 wm_resume(device_t self, const pmf_qual_t *qual)
   2744 {
   2745 	struct wm_softc *sc = device_private(self);
   2746 
   2747 	wm_init_manageability(sc);
   2748 
   2749 	return true;
   2750 }
   2751 
   2752 /*
   2753  * wm_watchdog:		[ifnet interface function]
   2754  *
   2755  *	Watchdog timer handler.
   2756  */
   2757 static void
   2758 wm_watchdog(struct ifnet *ifp)
   2759 {
   2760 	int qid;
   2761 	struct wm_softc *sc = ifp->if_softc;
   2762 
   2763 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2764 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2765 
   2766 		wm_watchdog_txq(ifp, txq);
   2767 	}
   2768 
   2769 	/* Reset the interface. */
   2770 	(void) wm_init(ifp);
   2771 
   2772 	/*
   2773 	 * There are still some upper layer processing which call
   2774 	 * ifp->if_start(). e.g. ALTQ
   2775 	 */
   2776 	/* Try to get more packets going. */
   2777 	ifp->if_start(ifp);
   2778 }
   2779 
   2780 static void
   2781 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
   2782 {
   2783 	struct wm_softc *sc = ifp->if_softc;
   2784 
   2785 	/*
   2786 	 * Since we're using delayed interrupts, sweep up
   2787 	 * before we report an error.
   2788 	 */
   2789 	mutex_enter(txq->txq_lock);
   2790 	wm_txeof(sc, txq);
   2791 	mutex_exit(txq->txq_lock);
   2792 
   2793 	if (txq->txq_free != WM_NTXDESC(txq)) {
   2794 #ifdef WM_DEBUG
   2795 		int i, j;
   2796 		struct wm_txsoft *txs;
   2797 #endif
   2798 		log(LOG_ERR,
   2799 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2800 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   2801 		    txq->txq_next);
   2802 		ifp->if_oerrors++;
   2803 #ifdef WM_DEBUG
   2804 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   2805 		    i = WM_NEXTTXS(txq, i)) {
   2806 		    txs = &txq->txq_soft[i];
   2807 		    printf("txs %d tx %d -> %d\n",
   2808 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2809 		    for (j = txs->txs_firstdesc; ;
   2810 			j = WM_NEXTTX(txq, j)) {
   2811 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2812 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   2813 			printf("\t %#08x%08x\n",
   2814 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   2815 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   2816 			if (j == txs->txs_lastdesc)
   2817 				break;
   2818 			}
   2819 		}
   2820 #endif
   2821 	}
   2822 }
   2823 
   2824 /*
   2825  * wm_tick:
   2826  *
   2827  *	One second timer, used to check link status, sweep up
   2828  *	completed transmit jobs, etc.
   2829  */
   2830 static void
   2831 wm_tick(void *arg)
   2832 {
   2833 	struct wm_softc *sc = arg;
   2834 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2835 #ifndef WM_MPSAFE
   2836 	int s = splnet();
   2837 #endif
   2838 
   2839 	WM_CORE_LOCK(sc);
   2840 
   2841 	if (sc->sc_core_stopping)
   2842 		goto out;
   2843 
   2844 	if (sc->sc_type >= WM_T_82542_2_1) {
   2845 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2846 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2847 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2848 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2849 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2850 	}
   2851 
   2852 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2853 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
   2854 	    + CSR_READ(sc, WMREG_CRCERRS)
   2855 	    + CSR_READ(sc, WMREG_ALGNERRC)
   2856 	    + CSR_READ(sc, WMREG_SYMERRC)
   2857 	    + CSR_READ(sc, WMREG_RXERRC)
   2858 	    + CSR_READ(sc, WMREG_SEC)
   2859 	    + CSR_READ(sc, WMREG_CEXTERR)
   2860 	    + CSR_READ(sc, WMREG_RLEC);
   2861 	/*
   2862 	 * WMREG_RNBC is incremented when there is no available buffers in host
   2863 	 * memory. It does not mean the number of dropped packet. Because
   2864 	 * ethernet controller can receive packets in such case if there is
   2865 	 * space in phy's FIFO.
   2866 	 *
   2867 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   2868 	 * own EVCNT instead of if_iqdrops.
   2869 	 */
   2870 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   2871 
   2872 	if (sc->sc_flags & WM_F_HAS_MII)
   2873 		mii_tick(&sc->sc_mii);
   2874 	else if ((sc->sc_type >= WM_T_82575)
   2875 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   2876 		wm_serdes_tick(sc);
   2877 	else
   2878 		wm_tbi_tick(sc);
   2879 
   2880 out:
   2881 	WM_CORE_UNLOCK(sc);
   2882 #ifndef WM_MPSAFE
   2883 	splx(s);
   2884 #endif
   2885 
   2886 	if (!sc->sc_core_stopping)
   2887 		callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2888 }
   2889 
   2890 static int
   2891 wm_ifflags_cb(struct ethercom *ec)
   2892 {
   2893 	struct ifnet *ifp = &ec->ec_if;
   2894 	struct wm_softc *sc = ifp->if_softc;
   2895 	int rc = 0;
   2896 
   2897 	WM_CORE_LOCK(sc);
   2898 
   2899 	int change = ifp->if_flags ^ sc->sc_if_flags;
   2900 	sc->sc_if_flags = ifp->if_flags;
   2901 
   2902 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   2903 		rc = ENETRESET;
   2904 		goto out;
   2905 	}
   2906 
   2907 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   2908 		wm_set_filter(sc);
   2909 
   2910 	wm_set_vlan(sc);
   2911 
   2912 out:
   2913 	WM_CORE_UNLOCK(sc);
   2914 
   2915 	return rc;
   2916 }
   2917 
   2918 /*
   2919  * wm_ioctl:		[ifnet interface function]
   2920  *
   2921  *	Handle control requests from the operator.
   2922  */
   2923 static int
   2924 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   2925 {
   2926 	struct wm_softc *sc = ifp->if_softc;
   2927 	struct ifreq *ifr = (struct ifreq *) data;
   2928 	struct ifaddr *ifa = (struct ifaddr *)data;
   2929 	struct sockaddr_dl *sdl;
   2930 	int s, error;
   2931 
   2932 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   2933 		device_xname(sc->sc_dev), __func__));
   2934 
   2935 #ifndef WM_MPSAFE
   2936 	s = splnet();
   2937 #endif
   2938 	switch (cmd) {
   2939 	case SIOCSIFMEDIA:
   2940 	case SIOCGIFMEDIA:
   2941 		WM_CORE_LOCK(sc);
   2942 		/* Flow control requires full-duplex mode. */
   2943 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   2944 		    (ifr->ifr_media & IFM_FDX) == 0)
   2945 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   2946 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   2947 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   2948 				/* We can do both TXPAUSE and RXPAUSE. */
   2949 				ifr->ifr_media |=
   2950 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   2951 			}
   2952 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   2953 		}
   2954 		WM_CORE_UNLOCK(sc);
   2955 #ifdef WM_MPSAFE
   2956 		s = splnet();
   2957 #endif
   2958 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   2959 #ifdef WM_MPSAFE
   2960 		splx(s);
   2961 #endif
   2962 		break;
   2963 	case SIOCINITIFADDR:
   2964 		WM_CORE_LOCK(sc);
   2965 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   2966 			sdl = satosdl(ifp->if_dl->ifa_addr);
   2967 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   2968 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   2969 			/* unicast address is first multicast entry */
   2970 			wm_set_filter(sc);
   2971 			error = 0;
   2972 			WM_CORE_UNLOCK(sc);
   2973 			break;
   2974 		}
   2975 		WM_CORE_UNLOCK(sc);
   2976 		/*FALLTHROUGH*/
   2977 	default:
   2978 #ifdef WM_MPSAFE
   2979 		s = splnet();
   2980 #endif
   2981 		/* It may call wm_start, so unlock here */
   2982 		error = ether_ioctl(ifp, cmd, data);
   2983 #ifdef WM_MPSAFE
   2984 		splx(s);
   2985 #endif
   2986 		if (error != ENETRESET)
   2987 			break;
   2988 
   2989 		error = 0;
   2990 
   2991 		if (cmd == SIOCSIFCAP) {
   2992 			error = (*ifp->if_init)(ifp);
   2993 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   2994 			;
   2995 		else if (ifp->if_flags & IFF_RUNNING) {
   2996 			/*
   2997 			 * Multicast list has changed; set the hardware filter
   2998 			 * accordingly.
   2999 			 */
   3000 			WM_CORE_LOCK(sc);
   3001 			wm_set_filter(sc);
   3002 			WM_CORE_UNLOCK(sc);
   3003 		}
   3004 		break;
   3005 	}
   3006 
   3007 #ifndef WM_MPSAFE
   3008 	splx(s);
   3009 #endif
   3010 	return error;
   3011 }
   3012 
   3013 /* MAC address related */
   3014 
   3015 /*
   3016  * Get the offset of MAC address and return it.
   3017  * If error occured, use offset 0.
   3018  */
   3019 static uint16_t
   3020 wm_check_alt_mac_addr(struct wm_softc *sc)
   3021 {
   3022 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3023 	uint16_t offset = NVM_OFF_MACADDR;
   3024 
   3025 	/* Try to read alternative MAC address pointer */
   3026 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3027 		return 0;
   3028 
   3029 	/* Check pointer if it's valid or not. */
   3030 	if ((offset == 0x0000) || (offset == 0xffff))
   3031 		return 0;
   3032 
   3033 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3034 	/*
   3035 	 * Check whether alternative MAC address is valid or not.
   3036 	 * Some cards have non 0xffff pointer but those don't use
   3037 	 * alternative MAC address in reality.
   3038 	 *
   3039 	 * Check whether the broadcast bit is set or not.
   3040 	 */
   3041 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3042 		if (((myea[0] & 0xff) & 0x01) == 0)
   3043 			return offset; /* Found */
   3044 
   3045 	/* Not found */
   3046 	return 0;
   3047 }
   3048 
   3049 static int
   3050 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3051 {
   3052 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3053 	uint16_t offset = NVM_OFF_MACADDR;
   3054 	int do_invert = 0;
   3055 
   3056 	switch (sc->sc_type) {
   3057 	case WM_T_82580:
   3058 	case WM_T_I350:
   3059 	case WM_T_I354:
   3060 		/* EEPROM Top Level Partitioning */
   3061 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3062 		break;
   3063 	case WM_T_82571:
   3064 	case WM_T_82575:
   3065 	case WM_T_82576:
   3066 	case WM_T_80003:
   3067 	case WM_T_I210:
   3068 	case WM_T_I211:
   3069 		offset = wm_check_alt_mac_addr(sc);
   3070 		if (offset == 0)
   3071 			if ((sc->sc_funcid & 0x01) == 1)
   3072 				do_invert = 1;
   3073 		break;
   3074 	default:
   3075 		if ((sc->sc_funcid & 0x01) == 1)
   3076 			do_invert = 1;
   3077 		break;
   3078 	}
   3079 
   3080 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3081 		goto bad;
   3082 
   3083 	enaddr[0] = myea[0] & 0xff;
   3084 	enaddr[1] = myea[0] >> 8;
   3085 	enaddr[2] = myea[1] & 0xff;
   3086 	enaddr[3] = myea[1] >> 8;
   3087 	enaddr[4] = myea[2] & 0xff;
   3088 	enaddr[5] = myea[2] >> 8;
   3089 
   3090 	/*
   3091 	 * Toggle the LSB of the MAC address on the second port
   3092 	 * of some dual port cards.
   3093 	 */
   3094 	if (do_invert != 0)
   3095 		enaddr[5] ^= 1;
   3096 
   3097 	return 0;
   3098 
   3099  bad:
   3100 	return -1;
   3101 }
   3102 
   3103 /*
   3104  * wm_set_ral:
   3105  *
   3106  *	Set an entery in the receive address list.
   3107  */
   3108 static void
   3109 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3110 {
   3111 	uint32_t ral_lo, ral_hi;
   3112 
   3113 	if (enaddr != NULL) {
   3114 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3115 		    (enaddr[3] << 24);
   3116 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3117 		ral_hi |= RAL_AV;
   3118 	} else {
   3119 		ral_lo = 0;
   3120 		ral_hi = 0;
   3121 	}
   3122 
   3123 	if (sc->sc_type >= WM_T_82544) {
   3124 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
   3125 		    ral_lo);
   3126 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
   3127 		    ral_hi);
   3128 	} else {
   3129 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
   3130 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
   3131 	}
   3132 }
   3133 
   3134 /*
   3135  * wm_mchash:
   3136  *
   3137  *	Compute the hash of the multicast address for the 4096-bit
   3138  *	multicast filter.
   3139  */
   3140 static uint32_t
   3141 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3142 {
   3143 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3144 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3145 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3146 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3147 	uint32_t hash;
   3148 
   3149 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3150 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3151 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3152 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   3153 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3154 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3155 		return (hash & 0x3ff);
   3156 	}
   3157 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3158 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3159 
   3160 	return (hash & 0xfff);
   3161 }
   3162 
   3163 /*
   3164  * wm_set_filter:
   3165  *
   3166  *	Set up the receive filter.
   3167  */
   3168 static void
   3169 wm_set_filter(struct wm_softc *sc)
   3170 {
   3171 	struct ethercom *ec = &sc->sc_ethercom;
   3172 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3173 	struct ether_multi *enm;
   3174 	struct ether_multistep step;
   3175 	bus_addr_t mta_reg;
   3176 	uint32_t hash, reg, bit;
   3177 	int i, size, ralmax;
   3178 
   3179 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3180 		device_xname(sc->sc_dev), __func__));
   3181 
   3182 	if (sc->sc_type >= WM_T_82544)
   3183 		mta_reg = WMREG_CORDOVA_MTA;
   3184 	else
   3185 		mta_reg = WMREG_MTA;
   3186 
   3187 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3188 
   3189 	if (ifp->if_flags & IFF_BROADCAST)
   3190 		sc->sc_rctl |= RCTL_BAM;
   3191 	if (ifp->if_flags & IFF_PROMISC) {
   3192 		sc->sc_rctl |= RCTL_UPE;
   3193 		goto allmulti;
   3194 	}
   3195 
   3196 	/*
   3197 	 * Set the station address in the first RAL slot, and
   3198 	 * clear the remaining slots.
   3199 	 */
   3200 	if (sc->sc_type == WM_T_ICH8)
   3201 		size = WM_RAL_TABSIZE_ICH8 -1;
   3202 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3203 	    || (sc->sc_type == WM_T_PCH))
   3204 		size = WM_RAL_TABSIZE_ICH8;
   3205 	else if (sc->sc_type == WM_T_PCH2)
   3206 		size = WM_RAL_TABSIZE_PCH2;
   3207 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
   3208 		size = WM_RAL_TABSIZE_PCH_LPT;
   3209 	else if (sc->sc_type == WM_T_82575)
   3210 		size = WM_RAL_TABSIZE_82575;
   3211 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3212 		size = WM_RAL_TABSIZE_82576;
   3213 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3214 		size = WM_RAL_TABSIZE_I350;
   3215 	else
   3216 		size = WM_RAL_TABSIZE;
   3217 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3218 
   3219 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   3220 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3221 		switch (i) {
   3222 		case 0:
   3223 			/* We can use all entries */
   3224 			ralmax = size;
   3225 			break;
   3226 		case 1:
   3227 			/* Only RAR[0] */
   3228 			ralmax = 1;
   3229 			break;
   3230 		default:
   3231 			/* available SHRA + RAR[0] */
   3232 			ralmax = i + 1;
   3233 		}
   3234 	} else
   3235 		ralmax = size;
   3236 	for (i = 1; i < size; i++) {
   3237 		if (i < ralmax)
   3238 			wm_set_ral(sc, NULL, i);
   3239 	}
   3240 
   3241 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3242 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3243 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3244 	    || (sc->sc_type == WM_T_PCH_SPT))
   3245 		size = WM_ICH8_MC_TABSIZE;
   3246 	else
   3247 		size = WM_MC_TABSIZE;
   3248 	/* Clear out the multicast table. */
   3249 	for (i = 0; i < size; i++)
   3250 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3251 
   3252 	ETHER_FIRST_MULTI(step, ec, enm);
   3253 	while (enm != NULL) {
   3254 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3255 			/*
   3256 			 * We must listen to a range of multicast addresses.
   3257 			 * For now, just accept all multicasts, rather than
   3258 			 * trying to set only those filter bits needed to match
   3259 			 * the range.  (At this time, the only use of address
   3260 			 * ranges is for IP multicast routing, for which the
   3261 			 * range is big enough to require all bits set.)
   3262 			 */
   3263 			goto allmulti;
   3264 		}
   3265 
   3266 		hash = wm_mchash(sc, enm->enm_addrlo);
   3267 
   3268 		reg = (hash >> 5);
   3269 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3270 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3271 		    || (sc->sc_type == WM_T_PCH2)
   3272 		    || (sc->sc_type == WM_T_PCH_LPT)
   3273 		    || (sc->sc_type == WM_T_PCH_SPT))
   3274 			reg &= 0x1f;
   3275 		else
   3276 			reg &= 0x7f;
   3277 		bit = hash & 0x1f;
   3278 
   3279 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3280 		hash |= 1U << bit;
   3281 
   3282 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3283 			/*
   3284 			 * 82544 Errata 9: Certain register cannot be written
   3285 			 * with particular alignments in PCI-X bus operation
   3286 			 * (FCAH, MTA and VFTA).
   3287 			 */
   3288 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3289 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3290 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3291 		} else
   3292 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3293 
   3294 		ETHER_NEXT_MULTI(step, enm);
   3295 	}
   3296 
   3297 	ifp->if_flags &= ~IFF_ALLMULTI;
   3298 	goto setit;
   3299 
   3300  allmulti:
   3301 	ifp->if_flags |= IFF_ALLMULTI;
   3302 	sc->sc_rctl |= RCTL_MPE;
   3303 
   3304  setit:
   3305 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3306 }
   3307 
   3308 /* Reset and init related */
   3309 
   3310 static void
   3311 wm_set_vlan(struct wm_softc *sc)
   3312 {
   3313 
   3314 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3315 		device_xname(sc->sc_dev), __func__));
   3316 
   3317 	/* Deal with VLAN enables. */
   3318 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3319 		sc->sc_ctrl |= CTRL_VME;
   3320 	else
   3321 		sc->sc_ctrl &= ~CTRL_VME;
   3322 
   3323 	/* Write the control registers. */
   3324 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3325 }
   3326 
   3327 static void
   3328 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3329 {
   3330 	uint32_t gcr;
   3331 	pcireg_t ctrl2;
   3332 
   3333 	gcr = CSR_READ(sc, WMREG_GCR);
   3334 
   3335 	/* Only take action if timeout value is defaulted to 0 */
   3336 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3337 		goto out;
   3338 
   3339 	if ((gcr & GCR_CAP_VER2) == 0) {
   3340 		gcr |= GCR_CMPL_TMOUT_10MS;
   3341 		goto out;
   3342 	}
   3343 
   3344 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3345 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3346 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3347 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3348 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3349 
   3350 out:
   3351 	/* Disable completion timeout resend */
   3352 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3353 
   3354 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3355 }
   3356 
   3357 void
   3358 wm_get_auto_rd_done(struct wm_softc *sc)
   3359 {
   3360 	int i;
   3361 
   3362 	/* wait for eeprom to reload */
   3363 	switch (sc->sc_type) {
   3364 	case WM_T_82571:
   3365 	case WM_T_82572:
   3366 	case WM_T_82573:
   3367 	case WM_T_82574:
   3368 	case WM_T_82583:
   3369 	case WM_T_82575:
   3370 	case WM_T_82576:
   3371 	case WM_T_82580:
   3372 	case WM_T_I350:
   3373 	case WM_T_I354:
   3374 	case WM_T_I210:
   3375 	case WM_T_I211:
   3376 	case WM_T_80003:
   3377 	case WM_T_ICH8:
   3378 	case WM_T_ICH9:
   3379 		for (i = 0; i < 10; i++) {
   3380 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3381 				break;
   3382 			delay(1000);
   3383 		}
   3384 		if (i == 10) {
   3385 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3386 			    "complete\n", device_xname(sc->sc_dev));
   3387 		}
   3388 		break;
   3389 	default:
   3390 		break;
   3391 	}
   3392 }
   3393 
   3394 void
   3395 wm_lan_init_done(struct wm_softc *sc)
   3396 {
   3397 	uint32_t reg = 0;
   3398 	int i;
   3399 
   3400 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3401 		device_xname(sc->sc_dev), __func__));
   3402 
   3403 	/* Wait for eeprom to reload */
   3404 	switch (sc->sc_type) {
   3405 	case WM_T_ICH10:
   3406 	case WM_T_PCH:
   3407 	case WM_T_PCH2:
   3408 	case WM_T_PCH_LPT:
   3409 	case WM_T_PCH_SPT:
   3410 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3411 			reg = CSR_READ(sc, WMREG_STATUS);
   3412 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3413 				break;
   3414 			delay(100);
   3415 		}
   3416 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3417 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3418 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3419 		}
   3420 		break;
   3421 	default:
   3422 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3423 		    __func__);
   3424 		break;
   3425 	}
   3426 
   3427 	reg &= ~STATUS_LAN_INIT_DONE;
   3428 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3429 }
   3430 
   3431 void
   3432 wm_get_cfg_done(struct wm_softc *sc)
   3433 {
   3434 	int mask;
   3435 	uint32_t reg;
   3436 	int i;
   3437 
   3438 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3439 		device_xname(sc->sc_dev), __func__));
   3440 
   3441 	/* Wait for eeprom to reload */
   3442 	switch (sc->sc_type) {
   3443 	case WM_T_82542_2_0:
   3444 	case WM_T_82542_2_1:
   3445 		/* null */
   3446 		break;
   3447 	case WM_T_82543:
   3448 	case WM_T_82544:
   3449 	case WM_T_82540:
   3450 	case WM_T_82545:
   3451 	case WM_T_82545_3:
   3452 	case WM_T_82546:
   3453 	case WM_T_82546_3:
   3454 	case WM_T_82541:
   3455 	case WM_T_82541_2:
   3456 	case WM_T_82547:
   3457 	case WM_T_82547_2:
   3458 	case WM_T_82573:
   3459 	case WM_T_82574:
   3460 	case WM_T_82583:
   3461 		/* generic */
   3462 		delay(10*1000);
   3463 		break;
   3464 	case WM_T_80003:
   3465 	case WM_T_82571:
   3466 	case WM_T_82572:
   3467 	case WM_T_82575:
   3468 	case WM_T_82576:
   3469 	case WM_T_82580:
   3470 	case WM_T_I350:
   3471 	case WM_T_I354:
   3472 	case WM_T_I210:
   3473 	case WM_T_I211:
   3474 		if (sc->sc_type == WM_T_82571) {
   3475 			/* Only 82571 shares port 0 */
   3476 			mask = EEMNGCTL_CFGDONE_0;
   3477 		} else
   3478 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3479 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3480 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3481 				break;
   3482 			delay(1000);
   3483 		}
   3484 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3485 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3486 				device_xname(sc->sc_dev), __func__));
   3487 		}
   3488 		break;
   3489 	case WM_T_ICH8:
   3490 	case WM_T_ICH9:
   3491 	case WM_T_ICH10:
   3492 	case WM_T_PCH:
   3493 	case WM_T_PCH2:
   3494 	case WM_T_PCH_LPT:
   3495 	case WM_T_PCH_SPT:
   3496 		delay(10*1000);
   3497 		if (sc->sc_type >= WM_T_ICH10)
   3498 			wm_lan_init_done(sc);
   3499 		else
   3500 			wm_get_auto_rd_done(sc);
   3501 
   3502 		reg = CSR_READ(sc, WMREG_STATUS);
   3503 		if ((reg & STATUS_PHYRA) != 0)
   3504 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3505 		break;
   3506 	default:
   3507 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3508 		    __func__);
   3509 		break;
   3510 	}
   3511 }
   3512 
   3513 /* Init hardware bits */
   3514 void
   3515 wm_initialize_hardware_bits(struct wm_softc *sc)
   3516 {
   3517 	uint32_t tarc0, tarc1, reg;
   3518 
   3519 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3520 		device_xname(sc->sc_dev), __func__));
   3521 
   3522 	/* For 82571 variant, 80003 and ICHs */
   3523 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3524 	    || (sc->sc_type >= WM_T_80003)) {
   3525 
   3526 		/* Transmit Descriptor Control 0 */
   3527 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3528 		reg |= TXDCTL_COUNT_DESC;
   3529 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3530 
   3531 		/* Transmit Descriptor Control 1 */
   3532 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3533 		reg |= TXDCTL_COUNT_DESC;
   3534 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3535 
   3536 		/* TARC0 */
   3537 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3538 		switch (sc->sc_type) {
   3539 		case WM_T_82571:
   3540 		case WM_T_82572:
   3541 		case WM_T_82573:
   3542 		case WM_T_82574:
   3543 		case WM_T_82583:
   3544 		case WM_T_80003:
   3545 			/* Clear bits 30..27 */
   3546 			tarc0 &= ~__BITS(30, 27);
   3547 			break;
   3548 		default:
   3549 			break;
   3550 		}
   3551 
   3552 		switch (sc->sc_type) {
   3553 		case WM_T_82571:
   3554 		case WM_T_82572:
   3555 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3556 
   3557 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3558 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3559 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3560 			/* 8257[12] Errata No.7 */
   3561 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3562 
   3563 			/* TARC1 bit 28 */
   3564 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3565 				tarc1 &= ~__BIT(28);
   3566 			else
   3567 				tarc1 |= __BIT(28);
   3568 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3569 
   3570 			/*
   3571 			 * 8257[12] Errata No.13
   3572 			 * Disable Dyamic Clock Gating.
   3573 			 */
   3574 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3575 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3576 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3577 			break;
   3578 		case WM_T_82573:
   3579 		case WM_T_82574:
   3580 		case WM_T_82583:
   3581 			if ((sc->sc_type == WM_T_82574)
   3582 			    || (sc->sc_type == WM_T_82583))
   3583 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3584 
   3585 			/* Extended Device Control */
   3586 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3587 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3588 			reg |= __BIT(22);	/* Set bit 22 */
   3589 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3590 
   3591 			/* Device Control */
   3592 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   3593 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3594 
   3595 			/* PCIe Control Register */
   3596 			/*
   3597 			 * 82573 Errata (unknown).
   3598 			 *
   3599 			 * 82574 Errata 25 and 82583 Errata 12
   3600 			 * "Dropped Rx Packets":
   3601 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   3602 			 */
   3603 			reg = CSR_READ(sc, WMREG_GCR);
   3604 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   3605 			CSR_WRITE(sc, WMREG_GCR, reg);
   3606 
   3607 			if ((sc->sc_type == WM_T_82574)
   3608 			    || (sc->sc_type == WM_T_82583)) {
   3609 				/*
   3610 				 * Document says this bit must be set for
   3611 				 * proper operation.
   3612 				 */
   3613 				reg = CSR_READ(sc, WMREG_GCR);
   3614 				reg |= __BIT(22);
   3615 				CSR_WRITE(sc, WMREG_GCR, reg);
   3616 
   3617 				/*
   3618 				 * Apply workaround for hardware errata
   3619 				 * documented in errata docs Fixes issue where
   3620 				 * some error prone or unreliable PCIe
   3621 				 * completions are occurring, particularly
   3622 				 * with ASPM enabled. Without fix, issue can
   3623 				 * cause Tx timeouts.
   3624 				 */
   3625 				reg = CSR_READ(sc, WMREG_GCR2);
   3626 				reg |= __BIT(0);
   3627 				CSR_WRITE(sc, WMREG_GCR2, reg);
   3628 			}
   3629 			break;
   3630 		case WM_T_80003:
   3631 			/* TARC0 */
   3632 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   3633 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3634 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   3635 
   3636 			/* TARC1 bit 28 */
   3637 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3638 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3639 				tarc1 &= ~__BIT(28);
   3640 			else
   3641 				tarc1 |= __BIT(28);
   3642 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3643 			break;
   3644 		case WM_T_ICH8:
   3645 		case WM_T_ICH9:
   3646 		case WM_T_ICH10:
   3647 		case WM_T_PCH:
   3648 		case WM_T_PCH2:
   3649 		case WM_T_PCH_LPT:
   3650 		case WM_T_PCH_SPT:
   3651 			/* TARC0 */
   3652 			if ((sc->sc_type == WM_T_ICH8)
   3653 			    || (sc->sc_type == WM_T_PCH_SPT)) {
   3654 				/* Set TARC0 bits 29 and 28 */
   3655 				tarc0 |= __BITS(29, 28);
   3656 			}
   3657 			/* Set TARC0 bits 23,24,26,27 */
   3658 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   3659 
   3660 			/* CTRL_EXT */
   3661 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3662 			reg |= __BIT(22);	/* Set bit 22 */
   3663 			/*
   3664 			 * Enable PHY low-power state when MAC is at D3
   3665 			 * w/o WoL
   3666 			 */
   3667 			if (sc->sc_type >= WM_T_PCH)
   3668 				reg |= CTRL_EXT_PHYPDEN;
   3669 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3670 
   3671 			/* TARC1 */
   3672 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3673 			/* bit 28 */
   3674 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3675 				tarc1 &= ~__BIT(28);
   3676 			else
   3677 				tarc1 |= __BIT(28);
   3678 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   3679 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3680 
   3681 			/* Device Status */
   3682 			if (sc->sc_type == WM_T_ICH8) {
   3683 				reg = CSR_READ(sc, WMREG_STATUS);
   3684 				reg &= ~__BIT(31);
   3685 				CSR_WRITE(sc, WMREG_STATUS, reg);
   3686 
   3687 			}
   3688 
   3689 			/* IOSFPC */
   3690 			if (sc->sc_type == WM_T_PCH_SPT) {
   3691 				reg = CSR_READ(sc, WMREG_IOSFPC);
   3692 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   3693 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   3694 			}
   3695 			/*
   3696 			 * Work-around descriptor data corruption issue during
   3697 			 * NFS v2 UDP traffic, just disable the NFS filtering
   3698 			 * capability.
   3699 			 */
   3700 			reg = CSR_READ(sc, WMREG_RFCTL);
   3701 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   3702 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3703 			break;
   3704 		default:
   3705 			break;
   3706 		}
   3707 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   3708 
   3709 		/*
   3710 		 * 8257[12] Errata No.52 and some others.
   3711 		 * Avoid RSS Hash Value bug.
   3712 		 */
   3713 		switch (sc->sc_type) {
   3714 		case WM_T_82571:
   3715 		case WM_T_82572:
   3716 		case WM_T_82573:
   3717 		case WM_T_80003:
   3718 		case WM_T_ICH8:
   3719 			reg = CSR_READ(sc, WMREG_RFCTL);
   3720 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   3721 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3722 			break;
   3723 		default:
   3724 			break;
   3725 		}
   3726 	}
   3727 }
   3728 
   3729 static uint32_t
   3730 wm_rxpbs_adjust_82580(uint32_t val)
   3731 {
   3732 	uint32_t rv = 0;
   3733 
   3734 	if (val < __arraycount(wm_82580_rxpbs_table))
   3735 		rv = wm_82580_rxpbs_table[val];
   3736 
   3737 	return rv;
   3738 }
   3739 
   3740 /*
   3741  * wm_reset_phy:
   3742  *
   3743  *	generic PHY reset function.
   3744  *	Same as e1000_phy_hw_reset_generic()
   3745  */
   3746 static void
   3747 wm_reset_phy(struct wm_softc *sc)
   3748 {
   3749 	uint32_t reg;
   3750 
   3751 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3752 		device_xname(sc->sc_dev), __func__));
   3753 	if (wm_phy_resetisblocked(sc))
   3754 		return;
   3755 
   3756 	sc->phy.acquire(sc);
   3757 
   3758 	reg = CSR_READ(sc, WMREG_CTRL);
   3759 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   3760 	CSR_WRITE_FLUSH(sc);
   3761 
   3762 	delay(sc->phy.reset_delay_us);
   3763 
   3764 	CSR_WRITE(sc, WMREG_CTRL, reg);
   3765 	CSR_WRITE_FLUSH(sc);
   3766 
   3767 	delay(150);
   3768 
   3769 	sc->phy.release(sc);
   3770 
   3771 	wm_get_cfg_done(sc);
   3772 }
   3773 
   3774 static void
   3775 wm_flush_desc_rings(struct wm_softc *sc)
   3776 {
   3777 	pcireg_t preg;
   3778 	uint32_t reg;
   3779 	int nexttx;
   3780 
   3781 	/* First, disable MULR fix in FEXTNVM11 */
   3782 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   3783 	reg |= FEXTNVM11_DIS_MULRFIX;
   3784 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   3785 
   3786 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   3787 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   3788 	if (((preg & DESCRING_STATUS_FLUSH_REQ) != 0) && (reg != 0)) {
   3789 		struct wm_txqueue *txq;
   3790 		wiseman_txdesc_t *txd;
   3791 
   3792 		/* TX */
   3793 		printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   3794 		    device_xname(sc->sc_dev), preg, reg);
   3795 		reg = CSR_READ(sc, WMREG_TCTL);
   3796 		CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   3797 
   3798 		txq = &sc->sc_queue[0].wmq_txq;
   3799 		nexttx = txq->txq_next;
   3800 		txd = &txq->txq_descs[nexttx];
   3801 		wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   3802 		txd->wtx_cmdlen = htole32(WTX_CMD_IFCS| 512);
   3803 		txd->wtx_fields.wtxu_status = 0;
   3804 		txd->wtx_fields.wtxu_options = 0;
   3805 		txd->wtx_fields.wtxu_vlan = 0;
   3806 
   3807 		bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   3808 			BUS_SPACE_BARRIER_WRITE);
   3809 
   3810 		txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   3811 		CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   3812 		bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   3813 			BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   3814 		delay(250);
   3815 	}
   3816 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   3817 	if (preg & DESCRING_STATUS_FLUSH_REQ) {
   3818 		uint32_t rctl;
   3819 
   3820 		/* RX */
   3821 		printf("%s: Need RX flush (reg = %08x)\n",
   3822 		    device_xname(sc->sc_dev), preg);
   3823 		rctl = CSR_READ(sc, WMREG_RCTL);
   3824 		CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   3825 		CSR_WRITE_FLUSH(sc);
   3826 		delay(150);
   3827 
   3828 		reg = CSR_READ(sc, WMREG_RXDCTL(0));
   3829 		/* zero the lower 14 bits (prefetch and host thresholds) */
   3830 		reg &= 0xffffc000;
   3831 		/*
   3832 		 * update thresholds: prefetch threshold to 31, host threshold
   3833 		 * to 1 and make sure the granularity is "descriptors" and not
   3834 		 * "cache lines"
   3835 		 */
   3836 		reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   3837 		CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   3838 
   3839 		/*
   3840 		 * momentarily enable the RX ring for the changes to take
   3841 		 * effect
   3842 		 */
   3843 		CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   3844 		CSR_WRITE_FLUSH(sc);
   3845 		delay(150);
   3846 		CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   3847 	}
   3848 }
   3849 
   3850 /*
   3851  * wm_reset:
   3852  *
   3853  *	Reset the i82542 chip.
   3854  */
   3855 static void
   3856 wm_reset(struct wm_softc *sc)
   3857 {
   3858 	int phy_reset = 0;
   3859 	int i, error = 0;
   3860 	uint32_t reg;
   3861 
   3862 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3863 		device_xname(sc->sc_dev), __func__));
   3864 	KASSERT(sc->sc_type != 0);
   3865 
   3866 	/*
   3867 	 * Allocate on-chip memory according to the MTU size.
   3868 	 * The Packet Buffer Allocation register must be written
   3869 	 * before the chip is reset.
   3870 	 */
   3871 	switch (sc->sc_type) {
   3872 	case WM_T_82547:
   3873 	case WM_T_82547_2:
   3874 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3875 		    PBA_22K : PBA_30K;
   3876 		for (i = 0; i < sc->sc_nqueues; i++) {
   3877 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3878 			txq->txq_fifo_head = 0;
   3879 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   3880 			txq->txq_fifo_size =
   3881 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   3882 			txq->txq_fifo_stall = 0;
   3883 		}
   3884 		break;
   3885 	case WM_T_82571:
   3886 	case WM_T_82572:
   3887 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   3888 	case WM_T_80003:
   3889 		sc->sc_pba = PBA_32K;
   3890 		break;
   3891 	case WM_T_82573:
   3892 		sc->sc_pba = PBA_12K;
   3893 		break;
   3894 	case WM_T_82574:
   3895 	case WM_T_82583:
   3896 		sc->sc_pba = PBA_20K;
   3897 		break;
   3898 	case WM_T_82576:
   3899 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   3900 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   3901 		break;
   3902 	case WM_T_82580:
   3903 	case WM_T_I350:
   3904 	case WM_T_I354:
   3905 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   3906 		break;
   3907 	case WM_T_I210:
   3908 	case WM_T_I211:
   3909 		sc->sc_pba = PBA_34K;
   3910 		break;
   3911 	case WM_T_ICH8:
   3912 		/* Workaround for a bit corruption issue in FIFO memory */
   3913 		sc->sc_pba = PBA_8K;
   3914 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   3915 		break;
   3916 	case WM_T_ICH9:
   3917 	case WM_T_ICH10:
   3918 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   3919 		    PBA_14K : PBA_10K;
   3920 		break;
   3921 	case WM_T_PCH:
   3922 	case WM_T_PCH2:
   3923 	case WM_T_PCH_LPT:
   3924 	case WM_T_PCH_SPT:
   3925 		sc->sc_pba = PBA_26K;
   3926 		break;
   3927 	default:
   3928 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3929 		    PBA_40K : PBA_48K;
   3930 		break;
   3931 	}
   3932 	/*
   3933 	 * Only old or non-multiqueue devices have the PBA register
   3934 	 * XXX Need special handling for 82575.
   3935 	 */
   3936 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   3937 	    || (sc->sc_type == WM_T_82575))
   3938 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   3939 
   3940 	/* Prevent the PCI-E bus from sticking */
   3941 	if (sc->sc_flags & WM_F_PCIE) {
   3942 		int timeout = 800;
   3943 
   3944 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   3945 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3946 
   3947 		while (timeout--) {
   3948 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   3949 			    == 0)
   3950 				break;
   3951 			delay(100);
   3952 		}
   3953 	}
   3954 
   3955 	/* Set the completion timeout for interface */
   3956 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   3957 	    || (sc->sc_type == WM_T_82580)
   3958 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   3959 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   3960 		wm_set_pcie_completion_timeout(sc);
   3961 
   3962 	/* Clear interrupt */
   3963 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   3964 	if (sc->sc_nintrs > 1) {
   3965 		if (sc->sc_type != WM_T_82574) {
   3966 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   3967 			CSR_WRITE(sc, WMREG_EIAC, 0);
   3968 		} else {
   3969 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   3970 		}
   3971 	}
   3972 
   3973 	/* Stop the transmit and receive processes. */
   3974 	CSR_WRITE(sc, WMREG_RCTL, 0);
   3975 	sc->sc_rctl &= ~RCTL_EN;
   3976 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   3977 	CSR_WRITE_FLUSH(sc);
   3978 
   3979 	/* XXX set_tbi_sbp_82543() */
   3980 
   3981 	delay(10*1000);
   3982 
   3983 	/* Must acquire the MDIO ownership before MAC reset */
   3984 	switch (sc->sc_type) {
   3985 	case WM_T_82573:
   3986 	case WM_T_82574:
   3987 	case WM_T_82583:
   3988 		error = wm_get_hw_semaphore_82573(sc);
   3989 		break;
   3990 	default:
   3991 		break;
   3992 	}
   3993 
   3994 	/*
   3995 	 * 82541 Errata 29? & 82547 Errata 28?
   3996 	 * See also the description about PHY_RST bit in CTRL register
   3997 	 * in 8254x_GBe_SDM.pdf.
   3998 	 */
   3999 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4000 		CSR_WRITE(sc, WMREG_CTRL,
   4001 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4002 		CSR_WRITE_FLUSH(sc);
   4003 		delay(5000);
   4004 	}
   4005 
   4006 	switch (sc->sc_type) {
   4007 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4008 	case WM_T_82541:
   4009 	case WM_T_82541_2:
   4010 	case WM_T_82547:
   4011 	case WM_T_82547_2:
   4012 		/*
   4013 		 * On some chipsets, a reset through a memory-mapped write
   4014 		 * cycle can cause the chip to reset before completing the
   4015 		 * write cycle.  This causes major headache that can be
   4016 		 * avoided by issuing the reset via indirect register writes
   4017 		 * through I/O space.
   4018 		 *
   4019 		 * So, if we successfully mapped the I/O BAR at attach time,
   4020 		 * use that.  Otherwise, try our luck with a memory-mapped
   4021 		 * reset.
   4022 		 */
   4023 		if (sc->sc_flags & WM_F_IOH_VALID)
   4024 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4025 		else
   4026 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4027 		break;
   4028 	case WM_T_82545_3:
   4029 	case WM_T_82546_3:
   4030 		/* Use the shadow control register on these chips. */
   4031 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4032 		break;
   4033 	case WM_T_80003:
   4034 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4035 		sc->phy.acquire(sc);
   4036 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4037 		sc->phy.release(sc);
   4038 		break;
   4039 	case WM_T_ICH8:
   4040 	case WM_T_ICH9:
   4041 	case WM_T_ICH10:
   4042 	case WM_T_PCH:
   4043 	case WM_T_PCH2:
   4044 	case WM_T_PCH_LPT:
   4045 	case WM_T_PCH_SPT:
   4046 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4047 		if (wm_phy_resetisblocked(sc) == false) {
   4048 			/*
   4049 			 * Gate automatic PHY configuration by hardware on
   4050 			 * non-managed 82579
   4051 			 */
   4052 			if ((sc->sc_type == WM_T_PCH2)
   4053 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4054 				== 0))
   4055 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4056 
   4057 			reg |= CTRL_PHY_RESET;
   4058 			phy_reset = 1;
   4059 		} else
   4060 			printf("XXX reset is blocked!!!\n");
   4061 		sc->phy.acquire(sc);
   4062 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4063 		/* Don't insert a completion barrier when reset */
   4064 		delay(20*1000);
   4065 		mutex_exit(sc->sc_ich_phymtx);
   4066 		break;
   4067 	case WM_T_82580:
   4068 	case WM_T_I350:
   4069 	case WM_T_I354:
   4070 	case WM_T_I210:
   4071 	case WM_T_I211:
   4072 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4073 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4074 			CSR_WRITE_FLUSH(sc);
   4075 		delay(5000);
   4076 		break;
   4077 	case WM_T_82542_2_0:
   4078 	case WM_T_82542_2_1:
   4079 	case WM_T_82543:
   4080 	case WM_T_82540:
   4081 	case WM_T_82545:
   4082 	case WM_T_82546:
   4083 	case WM_T_82571:
   4084 	case WM_T_82572:
   4085 	case WM_T_82573:
   4086 	case WM_T_82574:
   4087 	case WM_T_82575:
   4088 	case WM_T_82576:
   4089 	case WM_T_82583:
   4090 	default:
   4091 		/* Everything else can safely use the documented method. */
   4092 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4093 		break;
   4094 	}
   4095 
   4096 	/* Must release the MDIO ownership after MAC reset */
   4097 	switch (sc->sc_type) {
   4098 	case WM_T_82573:
   4099 	case WM_T_82574:
   4100 	case WM_T_82583:
   4101 		if (error == 0)
   4102 			wm_put_hw_semaphore_82573(sc);
   4103 		break;
   4104 	default:
   4105 		break;
   4106 	}
   4107 
   4108 	if (phy_reset != 0)
   4109 		wm_get_cfg_done(sc);
   4110 
   4111 	/* reload EEPROM */
   4112 	switch (sc->sc_type) {
   4113 	case WM_T_82542_2_0:
   4114 	case WM_T_82542_2_1:
   4115 	case WM_T_82543:
   4116 	case WM_T_82544:
   4117 		delay(10);
   4118 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4119 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4120 		CSR_WRITE_FLUSH(sc);
   4121 		delay(2000);
   4122 		break;
   4123 	case WM_T_82540:
   4124 	case WM_T_82545:
   4125 	case WM_T_82545_3:
   4126 	case WM_T_82546:
   4127 	case WM_T_82546_3:
   4128 		delay(5*1000);
   4129 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4130 		break;
   4131 	case WM_T_82541:
   4132 	case WM_T_82541_2:
   4133 	case WM_T_82547:
   4134 	case WM_T_82547_2:
   4135 		delay(20000);
   4136 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4137 		break;
   4138 	case WM_T_82571:
   4139 	case WM_T_82572:
   4140 	case WM_T_82573:
   4141 	case WM_T_82574:
   4142 	case WM_T_82583:
   4143 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4144 			delay(10);
   4145 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4146 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4147 			CSR_WRITE_FLUSH(sc);
   4148 		}
   4149 		/* check EECD_EE_AUTORD */
   4150 		wm_get_auto_rd_done(sc);
   4151 		/*
   4152 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4153 		 * is set.
   4154 		 */
   4155 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4156 		    || (sc->sc_type == WM_T_82583))
   4157 			delay(25*1000);
   4158 		break;
   4159 	case WM_T_82575:
   4160 	case WM_T_82576:
   4161 	case WM_T_82580:
   4162 	case WM_T_I350:
   4163 	case WM_T_I354:
   4164 	case WM_T_I210:
   4165 	case WM_T_I211:
   4166 	case WM_T_80003:
   4167 		/* check EECD_EE_AUTORD */
   4168 		wm_get_auto_rd_done(sc);
   4169 		break;
   4170 	case WM_T_ICH8:
   4171 	case WM_T_ICH9:
   4172 	case WM_T_ICH10:
   4173 	case WM_T_PCH:
   4174 	case WM_T_PCH2:
   4175 	case WM_T_PCH_LPT:
   4176 	case WM_T_PCH_SPT:
   4177 		break;
   4178 	default:
   4179 		panic("%s: unknown type\n", __func__);
   4180 	}
   4181 
   4182 	/* Check whether EEPROM is present or not */
   4183 	switch (sc->sc_type) {
   4184 	case WM_T_82575:
   4185 	case WM_T_82576:
   4186 	case WM_T_82580:
   4187 	case WM_T_I350:
   4188 	case WM_T_I354:
   4189 	case WM_T_ICH8:
   4190 	case WM_T_ICH9:
   4191 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4192 			/* Not found */
   4193 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4194 			if (sc->sc_type == WM_T_82575)
   4195 				wm_reset_init_script_82575(sc);
   4196 		}
   4197 		break;
   4198 	default:
   4199 		break;
   4200 	}
   4201 
   4202 	if ((sc->sc_type == WM_T_82580)
   4203 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4204 		/* clear global device reset status bit */
   4205 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4206 	}
   4207 
   4208 	/* Clear any pending interrupt events. */
   4209 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4210 	reg = CSR_READ(sc, WMREG_ICR);
   4211 	if (sc->sc_nintrs > 1) {
   4212 		if (sc->sc_type != WM_T_82574) {
   4213 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4214 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4215 		} else
   4216 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4217 	}
   4218 
   4219 	/* reload sc_ctrl */
   4220 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4221 
   4222 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4223 		wm_set_eee_i350(sc);
   4224 
   4225 	/* Clear the host wakeup bit after lcd reset */
   4226 	if (sc->sc_type >= WM_T_PCH) {
   4227 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   4228 		    BM_PORT_GEN_CFG);
   4229 		reg &= ~BM_WUC_HOST_WU_BIT;
   4230 		wm_gmii_hv_writereg(sc->sc_dev, 2,
   4231 		    BM_PORT_GEN_CFG, reg);
   4232 	}
   4233 
   4234 	/*
   4235 	 * For PCH, this write will make sure that any noise will be detected
   4236 	 * as a CRC error and be dropped rather than show up as a bad packet
   4237 	 * to the DMA engine
   4238 	 */
   4239 	if (sc->sc_type == WM_T_PCH)
   4240 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4241 
   4242 	if (sc->sc_type >= WM_T_82544)
   4243 		CSR_WRITE(sc, WMREG_WUC, 0);
   4244 
   4245 	wm_reset_mdicnfg_82580(sc);
   4246 
   4247 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4248 		wm_pll_workaround_i210(sc);
   4249 }
   4250 
   4251 /*
   4252  * wm_add_rxbuf:
   4253  *
   4254  *	Add a receive buffer to the indiciated descriptor.
   4255  */
   4256 static int
   4257 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4258 {
   4259 	struct wm_softc *sc = rxq->rxq_sc;
   4260 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4261 	struct mbuf *m;
   4262 	int error;
   4263 
   4264 	KASSERT(mutex_owned(rxq->rxq_lock));
   4265 
   4266 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4267 	if (m == NULL)
   4268 		return ENOBUFS;
   4269 
   4270 	MCLGET(m, M_DONTWAIT);
   4271 	if ((m->m_flags & M_EXT) == 0) {
   4272 		m_freem(m);
   4273 		return ENOBUFS;
   4274 	}
   4275 
   4276 	if (rxs->rxs_mbuf != NULL)
   4277 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4278 
   4279 	rxs->rxs_mbuf = m;
   4280 
   4281 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4282 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4283 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4284 	if (error) {
   4285 		/* XXX XXX XXX */
   4286 		aprint_error_dev(sc->sc_dev,
   4287 		    "unable to load rx DMA map %d, error = %d\n",
   4288 		    idx, error);
   4289 		panic("wm_add_rxbuf");
   4290 	}
   4291 
   4292 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4293 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4294 
   4295 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4296 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4297 			wm_init_rxdesc(rxq, idx);
   4298 	} else
   4299 		wm_init_rxdesc(rxq, idx);
   4300 
   4301 	return 0;
   4302 }
   4303 
   4304 /*
   4305  * wm_rxdrain:
   4306  *
   4307  *	Drain the receive queue.
   4308  */
   4309 static void
   4310 wm_rxdrain(struct wm_rxqueue *rxq)
   4311 {
   4312 	struct wm_softc *sc = rxq->rxq_sc;
   4313 	struct wm_rxsoft *rxs;
   4314 	int i;
   4315 
   4316 	KASSERT(mutex_owned(rxq->rxq_lock));
   4317 
   4318 	for (i = 0; i < WM_NRXDESC; i++) {
   4319 		rxs = &rxq->rxq_soft[i];
   4320 		if (rxs->rxs_mbuf != NULL) {
   4321 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4322 			m_freem(rxs->rxs_mbuf);
   4323 			rxs->rxs_mbuf = NULL;
   4324 		}
   4325 	}
   4326 }
   4327 
   4328 
   4329 /*
   4330  * XXX copy from FreeBSD's sys/net/rss_config.c
   4331  */
   4332 /*
   4333  * RSS secret key, intended to prevent attacks on load-balancing.  Its
   4334  * effectiveness may be limited by algorithm choice and available entropy
   4335  * during the boot.
   4336  *
   4337  * XXXRW: And that we don't randomize it yet!
   4338  *
   4339  * This is the default Microsoft RSS specification key which is also
   4340  * the Chelsio T5 firmware default key.
   4341  */
   4342 #define RSS_KEYSIZE 40
   4343 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
   4344 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
   4345 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
   4346 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
   4347 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
   4348 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
   4349 };
   4350 
   4351 /*
   4352  * Caller must pass an array of size sizeof(rss_key).
   4353  *
   4354  * XXX
   4355  * As if_ixgbe may use this function, this function should not be
   4356  * if_wm specific function.
   4357  */
   4358 static void
   4359 wm_rss_getkey(uint8_t *key)
   4360 {
   4361 
   4362 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
   4363 }
   4364 
   4365 /*
   4366  * Setup registers for RSS.
   4367  *
   4368  * XXX not yet VMDq support
   4369  */
   4370 static void
   4371 wm_init_rss(struct wm_softc *sc)
   4372 {
   4373 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4374 	int i;
   4375 
   4376 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
   4377 
   4378 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4379 		int qid, reta_ent;
   4380 
   4381 		qid  = i % sc->sc_nqueues;
   4382 		switch(sc->sc_type) {
   4383 		case WM_T_82574:
   4384 			reta_ent = __SHIFTIN(qid,
   4385 			    RETA_ENT_QINDEX_MASK_82574);
   4386 			break;
   4387 		case WM_T_82575:
   4388 			reta_ent = __SHIFTIN(qid,
   4389 			    RETA_ENT_QINDEX1_MASK_82575);
   4390 			break;
   4391 		default:
   4392 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4393 			break;
   4394 		}
   4395 
   4396 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4397 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4398 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4399 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4400 	}
   4401 
   4402 	wm_rss_getkey((uint8_t *)rss_key);
   4403 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4404 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4405 
   4406 	if (sc->sc_type == WM_T_82574)
   4407 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4408 	else
   4409 		mrqc = MRQC_ENABLE_RSS_MQ;
   4410 
   4411 	/* XXXX
   4412 	 * The same as FreeBSD igb.
   4413 	 * Why doesn't use MRQC_RSS_FIELD_IPV6_EX?
   4414 	 */
   4415 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4416 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4417 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4418 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4419 
   4420 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4421 }
   4422 
   4423 /*
   4424  * Adjust TX and RX queue numbers which the system actulally uses.
   4425  *
   4426  * The numbers are affected by below parameters.
   4427  *     - The nubmer of hardware queues
   4428  *     - The number of MSI-X vectors (= "nvectors" argument)
   4429  *     - ncpu
   4430  */
   4431 static void
   4432 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4433 {
   4434 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4435 
   4436 	if (nvectors < 2) {
   4437 		sc->sc_nqueues = 1;
   4438 		return;
   4439 	}
   4440 
   4441 	switch(sc->sc_type) {
   4442 	case WM_T_82572:
   4443 		hw_ntxqueues = 2;
   4444 		hw_nrxqueues = 2;
   4445 		break;
   4446 	case WM_T_82574:
   4447 		hw_ntxqueues = 2;
   4448 		hw_nrxqueues = 2;
   4449 		break;
   4450 	case WM_T_82575:
   4451 		hw_ntxqueues = 4;
   4452 		hw_nrxqueues = 4;
   4453 		break;
   4454 	case WM_T_82576:
   4455 		hw_ntxqueues = 16;
   4456 		hw_nrxqueues = 16;
   4457 		break;
   4458 	case WM_T_82580:
   4459 	case WM_T_I350:
   4460 	case WM_T_I354:
   4461 		hw_ntxqueues = 8;
   4462 		hw_nrxqueues = 8;
   4463 		break;
   4464 	case WM_T_I210:
   4465 		hw_ntxqueues = 4;
   4466 		hw_nrxqueues = 4;
   4467 		break;
   4468 	case WM_T_I211:
   4469 		hw_ntxqueues = 2;
   4470 		hw_nrxqueues = 2;
   4471 		break;
   4472 		/*
   4473 		 * As below ethernet controllers does not support MSI-X,
   4474 		 * this driver let them not use multiqueue.
   4475 		 *     - WM_T_80003
   4476 		 *     - WM_T_ICH8
   4477 		 *     - WM_T_ICH9
   4478 		 *     - WM_T_ICH10
   4479 		 *     - WM_T_PCH
   4480 		 *     - WM_T_PCH2
   4481 		 *     - WM_T_PCH_LPT
   4482 		 */
   4483 	default:
   4484 		hw_ntxqueues = 1;
   4485 		hw_nrxqueues = 1;
   4486 		break;
   4487 	}
   4488 
   4489 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
   4490 
   4491 	/*
   4492 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   4493 	 * the number of queues used actually.
   4494 	 */
   4495 	if (nvectors < hw_nqueues + 1) {
   4496 		sc->sc_nqueues = nvectors - 1;
   4497 	} else {
   4498 		sc->sc_nqueues = hw_nqueues;
   4499 	}
   4500 
   4501 	/*
   4502 	 * As queues more then cpus cannot improve scaling, we limit
   4503 	 * the number of queues used actually.
   4504 	 */
   4505 	if (ncpu < sc->sc_nqueues)
   4506 		sc->sc_nqueues = ncpu;
   4507 }
   4508 
   4509 /*
   4510  * Both single interrupt MSI and INTx can use this function.
   4511  */
   4512 static int
   4513 wm_setup_legacy(struct wm_softc *sc)
   4514 {
   4515 	pci_chipset_tag_t pc = sc->sc_pc;
   4516 	const char *intrstr = NULL;
   4517 	char intrbuf[PCI_INTRSTR_LEN];
   4518 	int error;
   4519 
   4520 	error = wm_alloc_txrx_queues(sc);
   4521 	if (error) {
   4522 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4523 		    error);
   4524 		return ENOMEM;
   4525 	}
   4526 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   4527 	    sizeof(intrbuf));
   4528 #ifdef WM_MPSAFE
   4529 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   4530 #endif
   4531 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   4532 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   4533 	if (sc->sc_ihs[0] == NULL) {
   4534 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   4535 		    (pci_intr_type(pc, sc->sc_intrs[0])
   4536 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   4537 		return ENOMEM;
   4538 	}
   4539 
   4540 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   4541 	sc->sc_nintrs = 1;
   4542 	return 0;
   4543 }
   4544 
   4545 static int
   4546 wm_setup_msix(struct wm_softc *sc)
   4547 {
   4548 	void *vih;
   4549 	kcpuset_t *affinity;
   4550 	int qidx, error, intr_idx, txrx_established;
   4551 	pci_chipset_tag_t pc = sc->sc_pc;
   4552 	const char *intrstr = NULL;
   4553 	char intrbuf[PCI_INTRSTR_LEN];
   4554 	char intr_xname[INTRDEVNAMEBUF];
   4555 
   4556 	if (sc->sc_nqueues < ncpu) {
   4557 		/*
   4558 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   4559 		 * interrupts start from CPU#1.
   4560 		 */
   4561 		sc->sc_affinity_offset = 1;
   4562 	} else {
   4563 		/*
   4564 		 * In this case, this device use all CPUs. So, we unify
   4565 		 * affinitied cpu_index to msix vector number for readability.
   4566 		 */
   4567 		sc->sc_affinity_offset = 0;
   4568 	}
   4569 
   4570 	error = wm_alloc_txrx_queues(sc);
   4571 	if (error) {
   4572 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4573 		    error);
   4574 		return ENOMEM;
   4575 	}
   4576 
   4577 	kcpuset_create(&affinity, false);
   4578 	intr_idx = 0;
   4579 
   4580 	/*
   4581 	 * TX and RX
   4582 	 */
   4583 	txrx_established = 0;
   4584 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   4585 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4586 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   4587 
   4588 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4589 		    sizeof(intrbuf));
   4590 #ifdef WM_MPSAFE
   4591 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   4592 		    PCI_INTR_MPSAFE, true);
   4593 #endif
   4594 		memset(intr_xname, 0, sizeof(intr_xname));
   4595 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   4596 		    device_xname(sc->sc_dev), qidx);
   4597 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4598 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   4599 		if (vih == NULL) {
   4600 			aprint_error_dev(sc->sc_dev,
   4601 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   4602 			    intrstr ? " at " : "",
   4603 			    intrstr ? intrstr : "");
   4604 
   4605 			goto fail;
   4606 		}
   4607 		kcpuset_zero(affinity);
   4608 		/* Round-robin affinity */
   4609 		kcpuset_set(affinity, affinity_to);
   4610 		error = interrupt_distribute(vih, affinity, NULL);
   4611 		if (error == 0) {
   4612 			aprint_normal_dev(sc->sc_dev,
   4613 			    "for TX and RX interrupting at %s affinity to %u\n",
   4614 			    intrstr, affinity_to);
   4615 		} else {
   4616 			aprint_normal_dev(sc->sc_dev,
   4617 			    "for TX and RX interrupting at %s\n", intrstr);
   4618 		}
   4619 		sc->sc_ihs[intr_idx] = vih;
   4620 		wmq->wmq_id= qidx;
   4621 		wmq->wmq_intr_idx = intr_idx;
   4622 
   4623 		txrx_established++;
   4624 		intr_idx++;
   4625 	}
   4626 
   4627 	/*
   4628 	 * LINK
   4629 	 */
   4630 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4631 	    sizeof(intrbuf));
   4632 #ifdef WM_MPSAFE
   4633 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   4634 #endif
   4635 	memset(intr_xname, 0, sizeof(intr_xname));
   4636 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   4637 	    device_xname(sc->sc_dev));
   4638 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4639 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   4640 	if (vih == NULL) {
   4641 		aprint_error_dev(sc->sc_dev,
   4642 		    "unable to establish MSI-X(for LINK)%s%s\n",
   4643 		    intrstr ? " at " : "",
   4644 		    intrstr ? intrstr : "");
   4645 
   4646 		goto fail;
   4647 	}
   4648 	/* keep default affinity to LINK interrupt */
   4649 	aprint_normal_dev(sc->sc_dev,
   4650 	    "for LINK interrupting at %s\n", intrstr);
   4651 	sc->sc_ihs[intr_idx] = vih;
   4652 	sc->sc_link_intr_idx = intr_idx;
   4653 
   4654 	sc->sc_nintrs = sc->sc_nqueues + 1;
   4655 	kcpuset_destroy(affinity);
   4656 	return 0;
   4657 
   4658  fail:
   4659 	for (qidx = 0; qidx < txrx_established; qidx++) {
   4660 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4661 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   4662 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4663 	}
   4664 
   4665 	kcpuset_destroy(affinity);
   4666 	return ENOMEM;
   4667 }
   4668 
   4669 static void
   4670 wm_turnon(struct wm_softc *sc)
   4671 {
   4672 	int i;
   4673 
   4674 	KASSERT(WM_CORE_LOCKED(sc));
   4675 
   4676 	for(i = 0; i < sc->sc_nqueues; i++) {
   4677 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4678 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   4679 
   4680 		mutex_enter(txq->txq_lock);
   4681 		txq->txq_stopping = false;
   4682 		mutex_exit(txq->txq_lock);
   4683 
   4684 		mutex_enter(rxq->rxq_lock);
   4685 		rxq->rxq_stopping = false;
   4686 		mutex_exit(rxq->rxq_lock);
   4687 	}
   4688 
   4689 	sc->sc_core_stopping = false;
   4690 }
   4691 
   4692 static void
   4693 wm_turnoff(struct wm_softc *sc)
   4694 {
   4695 	int i;
   4696 
   4697 	KASSERT(WM_CORE_LOCKED(sc));
   4698 
   4699 	sc->sc_core_stopping = true;
   4700 
   4701 	for(i = 0; i < sc->sc_nqueues; i++) {
   4702 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   4703 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4704 
   4705 		mutex_enter(rxq->rxq_lock);
   4706 		rxq->rxq_stopping = true;
   4707 		mutex_exit(rxq->rxq_lock);
   4708 
   4709 		mutex_enter(txq->txq_lock);
   4710 		txq->txq_stopping = true;
   4711 		mutex_exit(txq->txq_lock);
   4712 	}
   4713 }
   4714 
   4715 /*
   4716  * wm_init:		[ifnet interface function]
   4717  *
   4718  *	Initialize the interface.
   4719  */
   4720 static int
   4721 wm_init(struct ifnet *ifp)
   4722 {
   4723 	struct wm_softc *sc = ifp->if_softc;
   4724 	int ret;
   4725 
   4726 	WM_CORE_LOCK(sc);
   4727 	ret = wm_init_locked(ifp);
   4728 	WM_CORE_UNLOCK(sc);
   4729 
   4730 	return ret;
   4731 }
   4732 
   4733 static int
   4734 wm_init_locked(struct ifnet *ifp)
   4735 {
   4736 	struct wm_softc *sc = ifp->if_softc;
   4737 	int i, j, trynum, error = 0;
   4738 	uint32_t reg;
   4739 
   4740 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4741 		device_xname(sc->sc_dev), __func__));
   4742 	KASSERT(WM_CORE_LOCKED(sc));
   4743 
   4744 	/*
   4745 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   4746 	 * There is a small but measurable benefit to avoiding the adjusment
   4747 	 * of the descriptor so that the headers are aligned, for normal mtu,
   4748 	 * on such platforms.  One possibility is that the DMA itself is
   4749 	 * slightly more efficient if the front of the entire packet (instead
   4750 	 * of the front of the headers) is aligned.
   4751 	 *
   4752 	 * Note we must always set align_tweak to 0 if we are using
   4753 	 * jumbo frames.
   4754 	 */
   4755 #ifdef __NO_STRICT_ALIGNMENT
   4756 	sc->sc_align_tweak = 0;
   4757 #else
   4758 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   4759 		sc->sc_align_tweak = 0;
   4760 	else
   4761 		sc->sc_align_tweak = 2;
   4762 #endif /* __NO_STRICT_ALIGNMENT */
   4763 
   4764 	/* Cancel any pending I/O. */
   4765 	wm_stop_locked(ifp, 0);
   4766 
   4767 	/* update statistics before reset */
   4768 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   4769 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   4770 
   4771 	/* PCH_SPT hardware workaround */
   4772 	if (sc->sc_type == WM_T_PCH_SPT)
   4773 		wm_flush_desc_rings(sc);
   4774 
   4775 	/* Reset the chip to a known state. */
   4776 	wm_reset(sc);
   4777 
   4778 	/* AMT based hardware can now take control from firmware */
   4779 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   4780 		wm_get_hw_control(sc);
   4781 
   4782 	/* Init hardware bits */
   4783 	wm_initialize_hardware_bits(sc);
   4784 
   4785 	/* Reset the PHY. */
   4786 	if (sc->sc_flags & WM_F_HAS_MII)
   4787 		wm_gmii_reset(sc);
   4788 
   4789 	/* Calculate (E)ITR value */
   4790 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4791 		sc->sc_itr = 450;	/* For EITR */
   4792 	} else if (sc->sc_type >= WM_T_82543) {
   4793 		/*
   4794 		 * Set up the interrupt throttling register (units of 256ns)
   4795 		 * Note that a footnote in Intel's documentation says this
   4796 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   4797 		 * or 10Mbit mode.  Empirically, it appears to be the case
   4798 		 * that that is also true for the 1024ns units of the other
   4799 		 * interrupt-related timer registers -- so, really, we ought
   4800 		 * to divide this value by 4 when the link speed is low.
   4801 		 *
   4802 		 * XXX implement this division at link speed change!
   4803 		 */
   4804 
   4805 		/*
   4806 		 * For N interrupts/sec, set this value to:
   4807 		 * 1000000000 / (N * 256).  Note that we set the
   4808 		 * absolute and packet timer values to this value
   4809 		 * divided by 4 to get "simple timer" behavior.
   4810 		 */
   4811 
   4812 		sc->sc_itr = 1500;		/* 2604 ints/sec */
   4813 	}
   4814 
   4815 	error = wm_init_txrx_queues(sc);
   4816 	if (error)
   4817 		goto out;
   4818 
   4819 	/*
   4820 	 * Clear out the VLAN table -- we don't use it (yet).
   4821 	 */
   4822 	CSR_WRITE(sc, WMREG_VET, 0);
   4823 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   4824 		trynum = 10; /* Due to hw errata */
   4825 	else
   4826 		trynum = 1;
   4827 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   4828 		for (j = 0; j < trynum; j++)
   4829 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   4830 
   4831 	/*
   4832 	 * Set up flow-control parameters.
   4833 	 *
   4834 	 * XXX Values could probably stand some tuning.
   4835 	 */
   4836 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   4837 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   4838 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   4839 	    && (sc->sc_type != WM_T_PCH_SPT)) {
   4840 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   4841 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   4842 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   4843 	}
   4844 
   4845 	sc->sc_fcrtl = FCRTL_DFLT;
   4846 	if (sc->sc_type < WM_T_82543) {
   4847 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   4848 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   4849 	} else {
   4850 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   4851 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   4852 	}
   4853 
   4854 	if (sc->sc_type == WM_T_80003)
   4855 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   4856 	else
   4857 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   4858 
   4859 	/* Writes the control register. */
   4860 	wm_set_vlan(sc);
   4861 
   4862 	if (sc->sc_flags & WM_F_HAS_MII) {
   4863 		int val;
   4864 
   4865 		switch (sc->sc_type) {
   4866 		case WM_T_80003:
   4867 		case WM_T_ICH8:
   4868 		case WM_T_ICH9:
   4869 		case WM_T_ICH10:
   4870 		case WM_T_PCH:
   4871 		case WM_T_PCH2:
   4872 		case WM_T_PCH_LPT:
   4873 		case WM_T_PCH_SPT:
   4874 			/*
   4875 			 * Set the mac to wait the maximum time between each
   4876 			 * iteration and increase the max iterations when
   4877 			 * polling the phy; this fixes erroneous timeouts at
   4878 			 * 10Mbps.
   4879 			 */
   4880 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   4881 			    0xFFFF);
   4882 			val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
   4883 			val |= 0x3F;
   4884 			wm_kmrn_writereg(sc,
   4885 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
   4886 			break;
   4887 		default:
   4888 			break;
   4889 		}
   4890 
   4891 		if (sc->sc_type == WM_T_80003) {
   4892 			val = CSR_READ(sc, WMREG_CTRL_EXT);
   4893 			val &= ~CTRL_EXT_LINK_MODE_MASK;
   4894 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   4895 
   4896 			/* Bypass RX and TX FIFO's */
   4897 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   4898 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   4899 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   4900 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   4901 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   4902 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   4903 		}
   4904 	}
   4905 #if 0
   4906 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   4907 #endif
   4908 
   4909 	/* Set up checksum offload parameters. */
   4910 	reg = CSR_READ(sc, WMREG_RXCSUM);
   4911 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   4912 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   4913 		reg |= RXCSUM_IPOFL;
   4914 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   4915 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   4916 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   4917 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   4918 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   4919 
   4920 	/* Set up MSI-X */
   4921 	if (sc->sc_nintrs > 1) {
   4922 		uint32_t ivar;
   4923 		struct wm_queue *wmq;
   4924 		int qid, qintr_idx;
   4925 
   4926 		if (sc->sc_type == WM_T_82575) {
   4927 			/* Interrupt control */
   4928 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4929 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   4930 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4931 
   4932 			/* TX and RX */
   4933 			for (i = 0; i < sc->sc_nqueues; i++) {
   4934 				wmq = &sc->sc_queue[i];
   4935 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   4936 				    EITR_TX_QUEUE(wmq->wmq_id)
   4937 				    | EITR_RX_QUEUE(wmq->wmq_id));
   4938 			}
   4939 			/* Link status */
   4940 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   4941 			    EITR_OTHER);
   4942 		} else if (sc->sc_type == WM_T_82574) {
   4943 			/* Interrupt control */
   4944 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4945 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   4946 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4947 
   4948 			ivar = 0;
   4949 			/* TX and RX */
   4950 			for (i = 0; i < sc->sc_nqueues; i++) {
   4951 				wmq = &sc->sc_queue[i];
   4952 				qid = wmq->wmq_id;
   4953 				qintr_idx = wmq->wmq_intr_idx;
   4954 
   4955 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   4956 				    IVAR_TX_MASK_Q_82574(qid));
   4957 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   4958 				    IVAR_RX_MASK_Q_82574(qid));
   4959 			}
   4960 			/* Link status */
   4961 			ivar |= __SHIFTIN((IVAR_VALID_82574
   4962 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   4963 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   4964 		} else {
   4965 			/* Interrupt control */
   4966 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   4967 			    | GPIE_EIAME | GPIE_PBA);
   4968 
   4969 			switch (sc->sc_type) {
   4970 			case WM_T_82580:
   4971 			case WM_T_I350:
   4972 			case WM_T_I354:
   4973 			case WM_T_I210:
   4974 			case WM_T_I211:
   4975 				/* TX and RX */
   4976 				for (i = 0; i < sc->sc_nqueues; i++) {
   4977 					wmq = &sc->sc_queue[i];
   4978 					qid = wmq->wmq_id;
   4979 					qintr_idx = wmq->wmq_intr_idx;
   4980 
   4981 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   4982 					ivar &= ~IVAR_TX_MASK_Q(qid);
   4983 					ivar |= __SHIFTIN((qintr_idx
   4984 						| IVAR_VALID),
   4985 					    IVAR_TX_MASK_Q(qid));
   4986 					ivar &= ~IVAR_RX_MASK_Q(qid);
   4987 					ivar |= __SHIFTIN((qintr_idx
   4988 						| IVAR_VALID),
   4989 					    IVAR_RX_MASK_Q(qid));
   4990 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   4991 				}
   4992 				break;
   4993 			case WM_T_82576:
   4994 				/* TX and RX */
   4995 				for (i = 0; i < sc->sc_nqueues; i++) {
   4996 					wmq = &sc->sc_queue[i];
   4997 					qid = wmq->wmq_id;
   4998 					qintr_idx = wmq->wmq_intr_idx;
   4999 
   5000 					ivar = CSR_READ(sc,
   5001 					    WMREG_IVAR_Q_82576(qid));
   5002 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   5003 					ivar |= __SHIFTIN((qintr_idx
   5004 						| IVAR_VALID),
   5005 					    IVAR_TX_MASK_Q_82576(qid));
   5006 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   5007 					ivar |= __SHIFTIN((qintr_idx
   5008 						| IVAR_VALID),
   5009 					    IVAR_RX_MASK_Q_82576(qid));
   5010 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   5011 					    ivar);
   5012 				}
   5013 				break;
   5014 			default:
   5015 				break;
   5016 			}
   5017 
   5018 			/* Link status */
   5019 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   5020 			    IVAR_MISC_OTHER);
   5021 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   5022 		}
   5023 
   5024 		if (sc->sc_nqueues > 1) {
   5025 			wm_init_rss(sc);
   5026 
   5027 			/*
   5028 			** NOTE: Receive Full-Packet Checksum Offload
   5029 			** is mutually exclusive with Multiqueue. However
   5030 			** this is not the same as TCP/IP checksums which
   5031 			** still work.
   5032 			*/
   5033 			reg = CSR_READ(sc, WMREG_RXCSUM);
   5034 			reg |= RXCSUM_PCSD;
   5035 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5036 		}
   5037 	}
   5038 
   5039 	/* Set up the interrupt registers. */
   5040 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5041 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   5042 	    ICR_RXO | ICR_RXT0;
   5043 	if (sc->sc_nintrs > 1) {
   5044 		uint32_t mask;
   5045 		struct wm_queue *wmq;
   5046 
   5047 		switch (sc->sc_type) {
   5048 		case WM_T_82574:
   5049 			CSR_WRITE(sc, WMREG_EIAC_82574,
   5050 			    WMREG_EIAC_82574_MSIX_MASK);
   5051 			sc->sc_icr |= WMREG_EIAC_82574_MSIX_MASK;
   5052 			CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5053 			break;
   5054 		default:
   5055 			if (sc->sc_type == WM_T_82575) {
   5056 				mask = 0;
   5057 				for (i = 0; i < sc->sc_nqueues; i++) {
   5058 					wmq = &sc->sc_queue[i];
   5059 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   5060 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   5061 				}
   5062 				mask |= EITR_OTHER;
   5063 			} else {
   5064 				mask = 0;
   5065 				for (i = 0; i < sc->sc_nqueues; i++) {
   5066 					wmq = &sc->sc_queue[i];
   5067 					mask |= 1 << wmq->wmq_intr_idx;
   5068 				}
   5069 				mask |= 1 << sc->sc_link_intr_idx;
   5070 			}
   5071 			CSR_WRITE(sc, WMREG_EIAC, mask);
   5072 			CSR_WRITE(sc, WMREG_EIAM, mask);
   5073 			CSR_WRITE(sc, WMREG_EIMS, mask);
   5074 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   5075 			break;
   5076 		}
   5077 	} else
   5078 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5079 
   5080 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5081 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5082 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5083 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   5084 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5085 		reg |= KABGTXD_BGSQLBIAS;
   5086 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5087 	}
   5088 
   5089 	/* Set up the inter-packet gap. */
   5090 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   5091 
   5092 	if (sc->sc_type >= WM_T_82543) {
   5093 		/*
   5094 		 * XXX 82574 has both ITR and EITR. SET EITR when we use
   5095 		 * the multi queue function with MSI-X.
   5096 		 */
   5097 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5098 			int qidx;
   5099 			for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5100 				struct wm_queue *wmq = &sc->sc_queue[qidx];
   5101 				CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx),
   5102 				    sc->sc_itr);
   5103 			}
   5104 			/*
   5105 			 * Link interrupts occur much less than TX
   5106 			 * interrupts and RX interrupts. So, we don't
   5107 			 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   5108 			 * FreeBSD's if_igb.
   5109 			 */
   5110 		} else
   5111 			CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
   5112 	}
   5113 
   5114 	/* Set the VLAN ethernetype. */
   5115 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   5116 
   5117 	/*
   5118 	 * Set up the transmit control register; we start out with
   5119 	 * a collision distance suitable for FDX, but update it whe
   5120 	 * we resolve the media type.
   5121 	 */
   5122 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   5123 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   5124 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5125 	if (sc->sc_type >= WM_T_82571)
   5126 		sc->sc_tctl |= TCTL_MULR;
   5127 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5128 
   5129 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5130 		/* Write TDT after TCTL.EN is set. See the document. */
   5131 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   5132 	}
   5133 
   5134 	if (sc->sc_type == WM_T_80003) {
   5135 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   5136 		reg &= ~TCTL_EXT_GCEX_MASK;
   5137 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   5138 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   5139 	}
   5140 
   5141 	/* Set the media. */
   5142 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   5143 		goto out;
   5144 
   5145 	/* Configure for OS presence */
   5146 	wm_init_manageability(sc);
   5147 
   5148 	/*
   5149 	 * Set up the receive control register; we actually program
   5150 	 * the register when we set the receive filter.  Use multicast
   5151 	 * address offset type 0.
   5152 	 *
   5153 	 * Only the i82544 has the ability to strip the incoming
   5154 	 * CRC, so we don't enable that feature.
   5155 	 */
   5156 	sc->sc_mchash_type = 0;
   5157 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   5158 	    | RCTL_MO(sc->sc_mchash_type);
   5159 
   5160 	/*
   5161 	 * The I350 has a bug where it always strips the CRC whether
   5162 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   5163 	 */
   5164 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5165 	    || (sc->sc_type == WM_T_I210))
   5166 		sc->sc_rctl |= RCTL_SECRC;
   5167 
   5168 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5169 	    && (ifp->if_mtu > ETHERMTU)) {
   5170 		sc->sc_rctl |= RCTL_LPE;
   5171 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5172 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5173 	}
   5174 
   5175 	if (MCLBYTES == 2048) {
   5176 		sc->sc_rctl |= RCTL_2k;
   5177 	} else {
   5178 		if (sc->sc_type >= WM_T_82543) {
   5179 			switch (MCLBYTES) {
   5180 			case 4096:
   5181 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5182 				break;
   5183 			case 8192:
   5184 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5185 				break;
   5186 			case 16384:
   5187 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5188 				break;
   5189 			default:
   5190 				panic("wm_init: MCLBYTES %d unsupported",
   5191 				    MCLBYTES);
   5192 				break;
   5193 			}
   5194 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   5195 	}
   5196 
   5197 	/* Set the receive filter. */
   5198 	wm_set_filter(sc);
   5199 
   5200 	/* Enable ECC */
   5201 	switch (sc->sc_type) {
   5202 	case WM_T_82571:
   5203 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5204 		reg |= PBA_ECC_CORR_EN;
   5205 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5206 		break;
   5207 	case WM_T_PCH_LPT:
   5208 	case WM_T_PCH_SPT:
   5209 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5210 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5211 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5212 
   5213 		sc->sc_ctrl |= CTRL_MEHE;
   5214 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5215 		break;
   5216 	default:
   5217 		break;
   5218 	}
   5219 
   5220 	/* On 575 and later set RDT only if RX enabled */
   5221 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5222 		int qidx;
   5223 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5224 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5225 			for (i = 0; i < WM_NRXDESC; i++) {
   5226 				mutex_enter(rxq->rxq_lock);
   5227 				wm_init_rxdesc(rxq, i);
   5228 				mutex_exit(rxq->rxq_lock);
   5229 
   5230 			}
   5231 		}
   5232 	}
   5233 
   5234 	wm_turnon(sc);
   5235 
   5236 	/* Start the one second link check clock. */
   5237 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5238 
   5239 	/* ...all done! */
   5240 	ifp->if_flags |= IFF_RUNNING;
   5241 	ifp->if_flags &= ~IFF_OACTIVE;
   5242 
   5243  out:
   5244 	sc->sc_if_flags = ifp->if_flags;
   5245 	if (error)
   5246 		log(LOG_ERR, "%s: interface not running\n",
   5247 		    device_xname(sc->sc_dev));
   5248 	return error;
   5249 }
   5250 
   5251 /*
   5252  * wm_stop:		[ifnet interface function]
   5253  *
   5254  *	Stop transmission on the interface.
   5255  */
   5256 static void
   5257 wm_stop(struct ifnet *ifp, int disable)
   5258 {
   5259 	struct wm_softc *sc = ifp->if_softc;
   5260 
   5261 	WM_CORE_LOCK(sc);
   5262 	wm_stop_locked(ifp, disable);
   5263 	WM_CORE_UNLOCK(sc);
   5264 }
   5265 
   5266 static void
   5267 wm_stop_locked(struct ifnet *ifp, int disable)
   5268 {
   5269 	struct wm_softc *sc = ifp->if_softc;
   5270 	struct wm_txsoft *txs;
   5271 	int i, qidx;
   5272 
   5273 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5274 		device_xname(sc->sc_dev), __func__));
   5275 	KASSERT(WM_CORE_LOCKED(sc));
   5276 
   5277 	wm_turnoff(sc);
   5278 
   5279 	/* Stop the one second clock. */
   5280 	callout_stop(&sc->sc_tick_ch);
   5281 
   5282 	/* Stop the 82547 Tx FIFO stall check timer. */
   5283 	if (sc->sc_type == WM_T_82547)
   5284 		callout_stop(&sc->sc_txfifo_ch);
   5285 
   5286 	if (sc->sc_flags & WM_F_HAS_MII) {
   5287 		/* Down the MII. */
   5288 		mii_down(&sc->sc_mii);
   5289 	} else {
   5290 #if 0
   5291 		/* Should we clear PHY's status properly? */
   5292 		wm_reset(sc);
   5293 #endif
   5294 	}
   5295 
   5296 	/* Stop the transmit and receive processes. */
   5297 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5298 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5299 	sc->sc_rctl &= ~RCTL_EN;
   5300 
   5301 	/*
   5302 	 * Clear the interrupt mask to ensure the device cannot assert its
   5303 	 * interrupt line.
   5304 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5305 	 * service any currently pending or shared interrupt.
   5306 	 */
   5307 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5308 	sc->sc_icr = 0;
   5309 	if (sc->sc_nintrs > 1) {
   5310 		if (sc->sc_type != WM_T_82574) {
   5311 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5312 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5313 		} else
   5314 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5315 	}
   5316 
   5317 	/* Release any queued transmit buffers. */
   5318 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5319 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5320 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5321 		mutex_enter(txq->txq_lock);
   5322 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5323 			txs = &txq->txq_soft[i];
   5324 			if (txs->txs_mbuf != NULL) {
   5325 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   5326 				m_freem(txs->txs_mbuf);
   5327 				txs->txs_mbuf = NULL;
   5328 			}
   5329 		}
   5330 		mutex_exit(txq->txq_lock);
   5331 	}
   5332 
   5333 	/* Mark the interface as down and cancel the watchdog timer. */
   5334 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5335 	ifp->if_timer = 0;
   5336 
   5337 	if (disable) {
   5338 		for (i = 0; i < sc->sc_nqueues; i++) {
   5339 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5340 			mutex_enter(rxq->rxq_lock);
   5341 			wm_rxdrain(rxq);
   5342 			mutex_exit(rxq->rxq_lock);
   5343 		}
   5344 	}
   5345 
   5346 #if 0 /* notyet */
   5347 	if (sc->sc_type >= WM_T_82544)
   5348 		CSR_WRITE(sc, WMREG_WUC, 0);
   5349 #endif
   5350 }
   5351 
   5352 static void
   5353 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   5354 {
   5355 	struct mbuf *m;
   5356 	int i;
   5357 
   5358 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   5359 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   5360 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   5361 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   5362 		    m->m_data, m->m_len, m->m_flags);
   5363 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   5364 	    i, i == 1 ? "" : "s");
   5365 }
   5366 
   5367 /*
   5368  * wm_82547_txfifo_stall:
   5369  *
   5370  *	Callout used to wait for the 82547 Tx FIFO to drain,
   5371  *	reset the FIFO pointers, and restart packet transmission.
   5372  */
   5373 static void
   5374 wm_82547_txfifo_stall(void *arg)
   5375 {
   5376 	struct wm_softc *sc = arg;
   5377 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5378 
   5379 	mutex_enter(txq->txq_lock);
   5380 
   5381 	if (txq->txq_stopping)
   5382 		goto out;
   5383 
   5384 	if (txq->txq_fifo_stall) {
   5385 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   5386 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   5387 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   5388 			/*
   5389 			 * Packets have drained.  Stop transmitter, reset
   5390 			 * FIFO pointers, restart transmitter, and kick
   5391 			 * the packet queue.
   5392 			 */
   5393 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   5394 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   5395 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   5396 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   5397 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   5398 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   5399 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   5400 			CSR_WRITE_FLUSH(sc);
   5401 
   5402 			txq->txq_fifo_head = 0;
   5403 			txq->txq_fifo_stall = 0;
   5404 			wm_start_locked(&sc->sc_ethercom.ec_if);
   5405 		} else {
   5406 			/*
   5407 			 * Still waiting for packets to drain; try again in
   5408 			 * another tick.
   5409 			 */
   5410 			callout_schedule(&sc->sc_txfifo_ch, 1);
   5411 		}
   5412 	}
   5413 
   5414 out:
   5415 	mutex_exit(txq->txq_lock);
   5416 }
   5417 
   5418 /*
   5419  * wm_82547_txfifo_bugchk:
   5420  *
   5421  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   5422  *	prevent enqueueing a packet that would wrap around the end
   5423  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   5424  *
   5425  *	We do this by checking the amount of space before the end
   5426  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   5427  *	the Tx FIFO, wait for all remaining packets to drain, reset
   5428  *	the internal FIFO pointers to the beginning, and restart
   5429  *	transmission on the interface.
   5430  */
   5431 #define	WM_FIFO_HDR		0x10
   5432 #define	WM_82547_PAD_LEN	0x3e0
   5433 static int
   5434 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   5435 {
   5436 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5437 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   5438 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   5439 
   5440 	/* Just return if already stalled. */
   5441 	if (txq->txq_fifo_stall)
   5442 		return 1;
   5443 
   5444 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   5445 		/* Stall only occurs in half-duplex mode. */
   5446 		goto send_packet;
   5447 	}
   5448 
   5449 	if (len >= WM_82547_PAD_LEN + space) {
   5450 		txq->txq_fifo_stall = 1;
   5451 		callout_schedule(&sc->sc_txfifo_ch, 1);
   5452 		return 1;
   5453 	}
   5454 
   5455  send_packet:
   5456 	txq->txq_fifo_head += len;
   5457 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   5458 		txq->txq_fifo_head -= txq->txq_fifo_size;
   5459 
   5460 	return 0;
   5461 }
   5462 
   5463 static int
   5464 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5465 {
   5466 	int error;
   5467 
   5468 	/*
   5469 	 * Allocate the control data structures, and create and load the
   5470 	 * DMA map for it.
   5471 	 *
   5472 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5473 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5474 	 * both sets within the same 4G segment.
   5475 	 */
   5476 	if (sc->sc_type < WM_T_82544)
   5477 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   5478 	else
   5479 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   5480 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5481 		txq->txq_descsize = sizeof(nq_txdesc_t);
   5482 	else
   5483 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   5484 
   5485 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   5486 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   5487 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   5488 		aprint_error_dev(sc->sc_dev,
   5489 		    "unable to allocate TX control data, error = %d\n",
   5490 		    error);
   5491 		goto fail_0;
   5492 	}
   5493 
   5494 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   5495 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   5496 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5497 		aprint_error_dev(sc->sc_dev,
   5498 		    "unable to map TX control data, error = %d\n", error);
   5499 		goto fail_1;
   5500 	}
   5501 
   5502 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   5503 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   5504 		aprint_error_dev(sc->sc_dev,
   5505 		    "unable to create TX control data DMA map, error = %d\n",
   5506 		    error);
   5507 		goto fail_2;
   5508 	}
   5509 
   5510 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   5511 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   5512 		aprint_error_dev(sc->sc_dev,
   5513 		    "unable to load TX control data DMA map, error = %d\n",
   5514 		    error);
   5515 		goto fail_3;
   5516 	}
   5517 
   5518 	return 0;
   5519 
   5520  fail_3:
   5521 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5522  fail_2:
   5523 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5524 	    WM_TXDESCS_SIZE(txq));
   5525  fail_1:
   5526 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5527  fail_0:
   5528 	return error;
   5529 }
   5530 
   5531 static void
   5532 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5533 {
   5534 
   5535 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   5536 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5537 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5538 	    WM_TXDESCS_SIZE(txq));
   5539 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5540 }
   5541 
   5542 static int
   5543 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5544 {
   5545 	int error;
   5546 
   5547 	/*
   5548 	 * Allocate the control data structures, and create and load the
   5549 	 * DMA map for it.
   5550 	 *
   5551 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5552 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5553 	 * both sets within the same 4G segment.
   5554 	 */
   5555 	rxq->rxq_desc_size = sizeof(wiseman_rxdesc_t) * WM_NRXDESC;
   5556 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq->rxq_desc_size,
   5557 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   5558 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   5559 		aprint_error_dev(sc->sc_dev,
   5560 		    "unable to allocate RX control data, error = %d\n",
   5561 		    error);
   5562 		goto fail_0;
   5563 	}
   5564 
   5565 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   5566 		    rxq->rxq_desc_rseg, rxq->rxq_desc_size,
   5567 		    (void **)&rxq->rxq_descs, BUS_DMA_COHERENT)) != 0) {
   5568 		aprint_error_dev(sc->sc_dev,
   5569 		    "unable to map RX control data, error = %d\n", error);
   5570 		goto fail_1;
   5571 	}
   5572 
   5573 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq->rxq_desc_size, 1,
   5574 		    rxq->rxq_desc_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   5575 		aprint_error_dev(sc->sc_dev,
   5576 		    "unable to create RX control data DMA map, error = %d\n",
   5577 		    error);
   5578 		goto fail_2;
   5579 	}
   5580 
   5581 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   5582 		    rxq->rxq_descs, rxq->rxq_desc_size, NULL, 0)) != 0) {
   5583 		aprint_error_dev(sc->sc_dev,
   5584 		    "unable to load RX control data DMA map, error = %d\n",
   5585 		    error);
   5586 		goto fail_3;
   5587 	}
   5588 
   5589 	return 0;
   5590 
   5591  fail_3:
   5592 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5593  fail_2:
   5594 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
   5595 	    rxq->rxq_desc_size);
   5596  fail_1:
   5597 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5598  fail_0:
   5599 	return error;
   5600 }
   5601 
   5602 static void
   5603 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5604 {
   5605 
   5606 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5607 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5608 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
   5609 	    rxq->rxq_desc_size);
   5610 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5611 }
   5612 
   5613 
   5614 static int
   5615 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5616 {
   5617 	int i, error;
   5618 
   5619 	/* Create the transmit buffer DMA maps. */
   5620 	WM_TXQUEUELEN(txq) =
   5621 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   5622 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   5623 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5624 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   5625 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   5626 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   5627 			aprint_error_dev(sc->sc_dev,
   5628 			    "unable to create Tx DMA map %d, error = %d\n",
   5629 			    i, error);
   5630 			goto fail;
   5631 		}
   5632 	}
   5633 
   5634 	return 0;
   5635 
   5636  fail:
   5637 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5638 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5639 			bus_dmamap_destroy(sc->sc_dmat,
   5640 			    txq->txq_soft[i].txs_dmamap);
   5641 	}
   5642 	return error;
   5643 }
   5644 
   5645 static void
   5646 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5647 {
   5648 	int i;
   5649 
   5650 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5651 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5652 			bus_dmamap_destroy(sc->sc_dmat,
   5653 			    txq->txq_soft[i].txs_dmamap);
   5654 	}
   5655 }
   5656 
   5657 static int
   5658 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5659 {
   5660 	int i, error;
   5661 
   5662 	/* Create the receive buffer DMA maps. */
   5663 	for (i = 0; i < WM_NRXDESC; i++) {
   5664 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   5665 			    MCLBYTES, 0, 0,
   5666 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   5667 			aprint_error_dev(sc->sc_dev,
   5668 			    "unable to create Rx DMA map %d error = %d\n",
   5669 			    i, error);
   5670 			goto fail;
   5671 		}
   5672 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   5673 	}
   5674 
   5675 	return 0;
   5676 
   5677  fail:
   5678 	for (i = 0; i < WM_NRXDESC; i++) {
   5679 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5680 			bus_dmamap_destroy(sc->sc_dmat,
   5681 			    rxq->rxq_soft[i].rxs_dmamap);
   5682 	}
   5683 	return error;
   5684 }
   5685 
   5686 static void
   5687 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5688 {
   5689 	int i;
   5690 
   5691 	for (i = 0; i < WM_NRXDESC; i++) {
   5692 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5693 			bus_dmamap_destroy(sc->sc_dmat,
   5694 			    rxq->rxq_soft[i].rxs_dmamap);
   5695 	}
   5696 }
   5697 
   5698 /*
   5699  * wm_alloc_quques:
   5700  *	Allocate {tx,rx}descs and {tx,rx} buffers
   5701  */
   5702 static int
   5703 wm_alloc_txrx_queues(struct wm_softc *sc)
   5704 {
   5705 	int i, error, tx_done, rx_done;
   5706 
   5707 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   5708 	    KM_SLEEP);
   5709 	if (sc->sc_queue == NULL) {
   5710 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   5711 		error = ENOMEM;
   5712 		goto fail_0;
   5713 	}
   5714 
   5715 	/*
   5716 	 * For transmission
   5717 	 */
   5718 	error = 0;
   5719 	tx_done = 0;
   5720 	for (i = 0; i < sc->sc_nqueues; i++) {
   5721 #ifdef WM_EVENT_COUNTERS
   5722 		int j;
   5723 		const char *xname;
   5724 #endif
   5725 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5726 		txq->txq_sc = sc;
   5727 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5728 
   5729 		error = wm_alloc_tx_descs(sc, txq);
   5730 		if (error)
   5731 			break;
   5732 		error = wm_alloc_tx_buffer(sc, txq);
   5733 		if (error) {
   5734 			wm_free_tx_descs(sc, txq);
   5735 			break;
   5736 		}
   5737 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   5738 		if (txq->txq_interq == NULL) {
   5739 			wm_free_tx_descs(sc, txq);
   5740 			wm_free_tx_buffer(sc, txq);
   5741 			error = ENOMEM;
   5742 			break;
   5743 		}
   5744 
   5745 #ifdef WM_EVENT_COUNTERS
   5746 		xname = device_xname(sc->sc_dev);
   5747 
   5748 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   5749 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   5750 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
   5751 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   5752 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   5753 
   5754 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
   5755 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
   5756 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
   5757 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
   5758 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
   5759 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
   5760 
   5761 		for (j = 0; j < WM_NTXSEGS; j++) {
   5762 			snprintf(txq->txq_txseg_evcnt_names[j],
   5763 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   5764 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   5765 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   5766 		}
   5767 
   5768 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
   5769 
   5770 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
   5771 #endif /* WM_EVENT_COUNTERS */
   5772 
   5773 		tx_done++;
   5774 	}
   5775 	if (error)
   5776 		goto fail_1;
   5777 
   5778 	/*
   5779 	 * For recieve
   5780 	 */
   5781 	error = 0;
   5782 	rx_done = 0;
   5783 	for (i = 0; i < sc->sc_nqueues; i++) {
   5784 #ifdef WM_EVENT_COUNTERS
   5785 		const char *xname;
   5786 #endif
   5787 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5788 		rxq->rxq_sc = sc;
   5789 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5790 
   5791 		error = wm_alloc_rx_descs(sc, rxq);
   5792 		if (error)
   5793 			break;
   5794 
   5795 		error = wm_alloc_rx_buffer(sc, rxq);
   5796 		if (error) {
   5797 			wm_free_rx_descs(sc, rxq);
   5798 			break;
   5799 		}
   5800 
   5801 #ifdef WM_EVENT_COUNTERS
   5802 		xname = device_xname(sc->sc_dev);
   5803 
   5804 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
   5805 
   5806 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
   5807 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
   5808 #endif /* WM_EVENT_COUNTERS */
   5809 
   5810 		rx_done++;
   5811 	}
   5812 	if (error)
   5813 		goto fail_2;
   5814 
   5815 	return 0;
   5816 
   5817  fail_2:
   5818 	for (i = 0; i < rx_done; i++) {
   5819 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5820 		wm_free_rx_buffer(sc, rxq);
   5821 		wm_free_rx_descs(sc, rxq);
   5822 		if (rxq->rxq_lock)
   5823 			mutex_obj_free(rxq->rxq_lock);
   5824 	}
   5825  fail_1:
   5826 	for (i = 0; i < tx_done; i++) {
   5827 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5828 		pcq_destroy(txq->txq_interq);
   5829 		wm_free_tx_buffer(sc, txq);
   5830 		wm_free_tx_descs(sc, txq);
   5831 		if (txq->txq_lock)
   5832 			mutex_obj_free(txq->txq_lock);
   5833 	}
   5834 
   5835 	kmem_free(sc->sc_queue,
   5836 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   5837  fail_0:
   5838 	return error;
   5839 }
   5840 
   5841 /*
   5842  * wm_free_quques:
   5843  *	Free {tx,rx}descs and {tx,rx} buffers
   5844  */
   5845 static void
   5846 wm_free_txrx_queues(struct wm_softc *sc)
   5847 {
   5848 	int i;
   5849 
   5850 	for (i = 0; i < sc->sc_nqueues; i++) {
   5851 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5852 		wm_free_rx_buffer(sc, rxq);
   5853 		wm_free_rx_descs(sc, rxq);
   5854 		if (rxq->rxq_lock)
   5855 			mutex_obj_free(rxq->rxq_lock);
   5856 	}
   5857 
   5858 	for (i = 0; i < sc->sc_nqueues; i++) {
   5859 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5860 		wm_free_tx_buffer(sc, txq);
   5861 		wm_free_tx_descs(sc, txq);
   5862 		if (txq->txq_lock)
   5863 			mutex_obj_free(txq->txq_lock);
   5864 	}
   5865 
   5866 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   5867 }
   5868 
   5869 static void
   5870 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   5871 {
   5872 
   5873 	KASSERT(mutex_owned(txq->txq_lock));
   5874 
   5875 	/* Initialize the transmit descriptor ring. */
   5876 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   5877 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   5878 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   5879 	txq->txq_free = WM_NTXDESC(txq);
   5880 	txq->txq_next = 0;
   5881 }
   5882 
   5883 static void
   5884 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   5885     struct wm_txqueue *txq)
   5886 {
   5887 
   5888 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5889 		device_xname(sc->sc_dev), __func__));
   5890 	KASSERT(mutex_owned(txq->txq_lock));
   5891 
   5892 	if (sc->sc_type < WM_T_82543) {
   5893 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   5894 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   5895 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   5896 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   5897 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   5898 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   5899 	} else {
   5900 		int qid = wmq->wmq_id;
   5901 
   5902 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   5903 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   5904 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   5905 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   5906 
   5907 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5908 			/*
   5909 			 * Don't write TDT before TCTL.EN is set.
   5910 			 * See the document.
   5911 			 */
   5912 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   5913 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   5914 			    | TXDCTL_WTHRESH(0));
   5915 		else {
   5916 			/* ITR / 4 */
   5917 			CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
   5918 			if (sc->sc_type >= WM_T_82540) {
   5919 				/* should be same */
   5920 				CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
   5921 			}
   5922 
   5923 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   5924 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   5925 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   5926 		}
   5927 	}
   5928 }
   5929 
   5930 static void
   5931 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   5932 {
   5933 	int i;
   5934 
   5935 	KASSERT(mutex_owned(txq->txq_lock));
   5936 
   5937 	/* Initialize the transmit job descriptors. */
   5938 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   5939 		txq->txq_soft[i].txs_mbuf = NULL;
   5940 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   5941 	txq->txq_snext = 0;
   5942 	txq->txq_sdirty = 0;
   5943 }
   5944 
   5945 static void
   5946 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   5947     struct wm_txqueue *txq)
   5948 {
   5949 
   5950 	KASSERT(mutex_owned(txq->txq_lock));
   5951 
   5952 	/*
   5953 	 * Set up some register offsets that are different between
   5954 	 * the i82542 and the i82543 and later chips.
   5955 	 */
   5956 	if (sc->sc_type < WM_T_82543)
   5957 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   5958 	else
   5959 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   5960 
   5961 	wm_init_tx_descs(sc, txq);
   5962 	wm_init_tx_regs(sc, wmq, txq);
   5963 	wm_init_tx_buffer(sc, txq);
   5964 }
   5965 
   5966 static void
   5967 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   5968     struct wm_rxqueue *rxq)
   5969 {
   5970 
   5971 	KASSERT(mutex_owned(rxq->rxq_lock));
   5972 
   5973 	/*
   5974 	 * Initialize the receive descriptor and receive job
   5975 	 * descriptor rings.
   5976 	 */
   5977 	if (sc->sc_type < WM_T_82543) {
   5978 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   5979 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   5980 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   5981 		    sizeof(wiseman_rxdesc_t) * WM_NRXDESC);
   5982 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   5983 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   5984 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   5985 
   5986 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   5987 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   5988 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   5989 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   5990 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   5991 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   5992 	} else {
   5993 		int qid = wmq->wmq_id;
   5994 
   5995 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   5996 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   5997 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_desc_size);
   5998 
   5999 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6000 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   6001 				panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
   6002 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_LEGACY
   6003 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   6004 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   6005 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   6006 			    | RXDCTL_WTHRESH(1));
   6007 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6008 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6009 		} else {
   6010 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6011 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6012 			/* ITR / 4 */
   6013 			CSR_WRITE(sc, WMREG_RDTR, (sc->sc_itr / 4) | RDTR_FPD);
   6014 			/* MUST be same */
   6015 			CSR_WRITE(sc, WMREG_RADV, sc->sc_itr / 4);
   6016 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   6017 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   6018 		}
   6019 	}
   6020 }
   6021 
   6022 static int
   6023 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6024 {
   6025 	struct wm_rxsoft *rxs;
   6026 	int error, i;
   6027 
   6028 	KASSERT(mutex_owned(rxq->rxq_lock));
   6029 
   6030 	for (i = 0; i < WM_NRXDESC; i++) {
   6031 		rxs = &rxq->rxq_soft[i];
   6032 		if (rxs->rxs_mbuf == NULL) {
   6033 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   6034 				log(LOG_ERR, "%s: unable to allocate or map "
   6035 				    "rx buffer %d, error = %d\n",
   6036 				    device_xname(sc->sc_dev), i, error);
   6037 				/*
   6038 				 * XXX Should attempt to run with fewer receive
   6039 				 * XXX buffers instead of just failing.
   6040 				 */
   6041 				wm_rxdrain(rxq);
   6042 				return ENOMEM;
   6043 			}
   6044 		} else {
   6045 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   6046 				wm_init_rxdesc(rxq, i);
   6047 			/*
   6048 			 * For 82575 and newer device, the RX descriptors
   6049 			 * must be initialized after the setting of RCTL.EN in
   6050 			 * wm_set_filter()
   6051 			 */
   6052 		}
   6053 	}
   6054 	rxq->rxq_ptr = 0;
   6055 	rxq->rxq_discard = 0;
   6056 	WM_RXCHAIN_RESET(rxq);
   6057 
   6058 	return 0;
   6059 }
   6060 
   6061 static int
   6062 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6063     struct wm_rxqueue *rxq)
   6064 {
   6065 
   6066 	KASSERT(mutex_owned(rxq->rxq_lock));
   6067 
   6068 	/*
   6069 	 * Set up some register offsets that are different between
   6070 	 * the i82542 and the i82543 and later chips.
   6071 	 */
   6072 	if (sc->sc_type < WM_T_82543)
   6073 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   6074 	else
   6075 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   6076 
   6077 	wm_init_rx_regs(sc, wmq, rxq);
   6078 	return wm_init_rx_buffer(sc, rxq);
   6079 }
   6080 
   6081 /*
   6082  * wm_init_quques:
   6083  *	Initialize {tx,rx}descs and {tx,rx} buffers
   6084  */
   6085 static int
   6086 wm_init_txrx_queues(struct wm_softc *sc)
   6087 {
   6088 	int i, error = 0;
   6089 
   6090 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6091 		device_xname(sc->sc_dev), __func__));
   6092 
   6093 	for (i = 0; i < sc->sc_nqueues; i++) {
   6094 		struct wm_queue *wmq = &sc->sc_queue[i];
   6095 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6096 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6097 
   6098 		mutex_enter(txq->txq_lock);
   6099 		wm_init_tx_queue(sc, wmq, txq);
   6100 		mutex_exit(txq->txq_lock);
   6101 
   6102 		mutex_enter(rxq->rxq_lock);
   6103 		error = wm_init_rx_queue(sc, wmq, rxq);
   6104 		mutex_exit(rxq->rxq_lock);
   6105 		if (error)
   6106 			break;
   6107 	}
   6108 
   6109 	return error;
   6110 }
   6111 
   6112 /*
   6113  * wm_tx_offload:
   6114  *
   6115  *	Set up TCP/IP checksumming parameters for the
   6116  *	specified packet.
   6117  */
   6118 static int
   6119 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
   6120     uint8_t *fieldsp)
   6121 {
   6122 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6123 	struct mbuf *m0 = txs->txs_mbuf;
   6124 	struct livengood_tcpip_ctxdesc *t;
   6125 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   6126 	uint32_t ipcse;
   6127 	struct ether_header *eh;
   6128 	int offset, iphl;
   6129 	uint8_t fields;
   6130 
   6131 	/*
   6132 	 * XXX It would be nice if the mbuf pkthdr had offset
   6133 	 * fields for the protocol headers.
   6134 	 */
   6135 
   6136 	eh = mtod(m0, struct ether_header *);
   6137 	switch (htons(eh->ether_type)) {
   6138 	case ETHERTYPE_IP:
   6139 	case ETHERTYPE_IPV6:
   6140 		offset = ETHER_HDR_LEN;
   6141 		break;
   6142 
   6143 	case ETHERTYPE_VLAN:
   6144 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6145 		break;
   6146 
   6147 	default:
   6148 		/*
   6149 		 * Don't support this protocol or encapsulation.
   6150 		 */
   6151 		*fieldsp = 0;
   6152 		*cmdp = 0;
   6153 		return 0;
   6154 	}
   6155 
   6156 	if ((m0->m_pkthdr.csum_flags &
   6157 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4)) != 0) {
   6158 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6159 	} else {
   6160 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6161 	}
   6162 	ipcse = offset + iphl - 1;
   6163 
   6164 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   6165 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   6166 	seg = 0;
   6167 	fields = 0;
   6168 
   6169 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6170 		int hlen = offset + iphl;
   6171 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6172 
   6173 		if (__predict_false(m0->m_len <
   6174 				    (hlen + sizeof(struct tcphdr)))) {
   6175 			/*
   6176 			 * TCP/IP headers are not in the first mbuf; we need
   6177 			 * to do this the slow and painful way.  Let's just
   6178 			 * hope this doesn't happen very often.
   6179 			 */
   6180 			struct tcphdr th;
   6181 
   6182 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6183 
   6184 			m_copydata(m0, hlen, sizeof(th), &th);
   6185 			if (v4) {
   6186 				struct ip ip;
   6187 
   6188 				m_copydata(m0, offset, sizeof(ip), &ip);
   6189 				ip.ip_len = 0;
   6190 				m_copyback(m0,
   6191 				    offset + offsetof(struct ip, ip_len),
   6192 				    sizeof(ip.ip_len), &ip.ip_len);
   6193 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6194 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6195 			} else {
   6196 				struct ip6_hdr ip6;
   6197 
   6198 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6199 				ip6.ip6_plen = 0;
   6200 				m_copyback(m0,
   6201 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6202 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6203 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6204 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6205 			}
   6206 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6207 			    sizeof(th.th_sum), &th.th_sum);
   6208 
   6209 			hlen += th.th_off << 2;
   6210 		} else {
   6211 			/*
   6212 			 * TCP/IP headers are in the first mbuf; we can do
   6213 			 * this the easy way.
   6214 			 */
   6215 			struct tcphdr *th;
   6216 
   6217 			if (v4) {
   6218 				struct ip *ip =
   6219 				    (void *)(mtod(m0, char *) + offset);
   6220 				th = (void *)(mtod(m0, char *) + hlen);
   6221 
   6222 				ip->ip_len = 0;
   6223 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6224 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6225 			} else {
   6226 				struct ip6_hdr *ip6 =
   6227 				    (void *)(mtod(m0, char *) + offset);
   6228 				th = (void *)(mtod(m0, char *) + hlen);
   6229 
   6230 				ip6->ip6_plen = 0;
   6231 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6232 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6233 			}
   6234 			hlen += th->th_off << 2;
   6235 		}
   6236 
   6237 		if (v4) {
   6238 			WM_Q_EVCNT_INCR(txq, txtso);
   6239 			cmdlen |= WTX_TCPIP_CMD_IP;
   6240 		} else {
   6241 			WM_Q_EVCNT_INCR(txq, txtso6);
   6242 			ipcse = 0;
   6243 		}
   6244 		cmd |= WTX_TCPIP_CMD_TSE;
   6245 		cmdlen |= WTX_TCPIP_CMD_TSE |
   6246 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   6247 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   6248 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   6249 	}
   6250 
   6251 	/*
   6252 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   6253 	 * offload feature, if we load the context descriptor, we
   6254 	 * MUST provide valid values for IPCSS and TUCSS fields.
   6255 	 */
   6256 
   6257 	ipcs = WTX_TCPIP_IPCSS(offset) |
   6258 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   6259 	    WTX_TCPIP_IPCSE(ipcse);
   6260 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   6261 		WM_Q_EVCNT_INCR(txq, txipsum);
   6262 		fields |= WTX_IXSM;
   6263 	}
   6264 
   6265 	offset += iphl;
   6266 
   6267 	if (m0->m_pkthdr.csum_flags &
   6268 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   6269 		WM_Q_EVCNT_INCR(txq, txtusum);
   6270 		fields |= WTX_TXSM;
   6271 		tucs = WTX_TCPIP_TUCSS(offset) |
   6272 		    WTX_TCPIP_TUCSO(offset +
   6273 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   6274 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6275 	} else if ((m0->m_pkthdr.csum_flags &
   6276 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   6277 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6278 		fields |= WTX_TXSM;
   6279 		tucs = WTX_TCPIP_TUCSS(offset) |
   6280 		    WTX_TCPIP_TUCSO(offset +
   6281 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   6282 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6283 	} else {
   6284 		/* Just initialize it to a valid TCP context. */
   6285 		tucs = WTX_TCPIP_TUCSS(offset) |
   6286 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   6287 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6288 	}
   6289 
   6290 	/* Fill in the context descriptor. */
   6291 	t = (struct livengood_tcpip_ctxdesc *)
   6292 	    &txq->txq_descs[txq->txq_next];
   6293 	t->tcpip_ipcs = htole32(ipcs);
   6294 	t->tcpip_tucs = htole32(tucs);
   6295 	t->tcpip_cmdlen = htole32(cmdlen);
   6296 	t->tcpip_seg = htole32(seg);
   6297 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6298 
   6299 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6300 	txs->txs_ndesc++;
   6301 
   6302 	*cmdp = cmd;
   6303 	*fieldsp = fields;
   6304 
   6305 	return 0;
   6306 }
   6307 
   6308 static inline int
   6309 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   6310 {
   6311 	struct wm_softc *sc = ifp->if_softc;
   6312 	u_int cpuid = cpu_index(curcpu());
   6313 
   6314 	/*
   6315 	 * Currently, simple distribute strategy.
   6316 	 * TODO:
   6317 	 * destribute by flowid(RSS has value).
   6318 	 */
   6319 	return (cpuid + sc->sc_affinity_offset) % sc->sc_nqueues;
   6320 }
   6321 
   6322 /*
   6323  * wm_start:		[ifnet interface function]
   6324  *
   6325  *	Start packet transmission on the interface.
   6326  */
   6327 static void
   6328 wm_start(struct ifnet *ifp)
   6329 {
   6330 	struct wm_softc *sc = ifp->if_softc;
   6331 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6332 
   6333 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6334 
   6335 	/*
   6336 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   6337 	 */
   6338 
   6339 	mutex_enter(txq->txq_lock);
   6340 	if (!txq->txq_stopping)
   6341 		wm_start_locked(ifp);
   6342 	mutex_exit(txq->txq_lock);
   6343 }
   6344 
   6345 static void
   6346 wm_start_locked(struct ifnet *ifp)
   6347 {
   6348 	struct wm_softc *sc = ifp->if_softc;
   6349 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6350 
   6351 	wm_send_common_locked(ifp, txq, false);
   6352 }
   6353 
   6354 static int
   6355 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   6356 {
   6357 	int qid;
   6358 	struct wm_softc *sc = ifp->if_softc;
   6359 	struct wm_txqueue *txq;
   6360 
   6361 	qid = wm_select_txqueue(ifp, m);
   6362 	txq = &sc->sc_queue[qid].wmq_txq;
   6363 
   6364 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   6365 		m_freem(m);
   6366 		WM_Q_EVCNT_INCR(txq, txdrop);
   6367 		return ENOBUFS;
   6368 	}
   6369 
   6370 	/*
   6371 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   6372 	 */
   6373 	ifp->if_obytes += m->m_pkthdr.len;
   6374 	if (m->m_flags & M_MCAST)
   6375 		ifp->if_omcasts++;
   6376 
   6377 	if (mutex_tryenter(txq->txq_lock)) {
   6378 		if (!txq->txq_stopping)
   6379 			wm_transmit_locked(ifp, txq);
   6380 		mutex_exit(txq->txq_lock);
   6381 	}
   6382 
   6383 	return 0;
   6384 }
   6385 
   6386 static void
   6387 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   6388 {
   6389 
   6390 	wm_send_common_locked(ifp, txq, true);
   6391 }
   6392 
   6393 static void
   6394 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   6395     bool is_transmit)
   6396 {
   6397 	struct wm_softc *sc = ifp->if_softc;
   6398 	struct mbuf *m0;
   6399 	struct m_tag *mtag;
   6400 	struct wm_txsoft *txs;
   6401 	bus_dmamap_t dmamap;
   6402 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   6403 	bus_addr_t curaddr;
   6404 	bus_size_t seglen, curlen;
   6405 	uint32_t cksumcmd;
   6406 	uint8_t cksumfields;
   6407 
   6408 	KASSERT(mutex_owned(txq->txq_lock));
   6409 
   6410 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
   6411 		return;
   6412 
   6413 	/* Remember the previous number of free descriptors. */
   6414 	ofree = txq->txq_free;
   6415 
   6416 	/*
   6417 	 * Loop through the send queue, setting up transmit descriptors
   6418 	 * until we drain the queue, or use up all available transmit
   6419 	 * descriptors.
   6420 	 */
   6421 	for (;;) {
   6422 		m0 = NULL;
   6423 
   6424 		/* Get a work queue entry. */
   6425 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6426 			wm_txeof(sc, txq);
   6427 			if (txq->txq_sfree == 0) {
   6428 				DPRINTF(WM_DEBUG_TX,
   6429 				    ("%s: TX: no free job descriptors\n",
   6430 					device_xname(sc->sc_dev)));
   6431 				WM_Q_EVCNT_INCR(txq, txsstall);
   6432 				break;
   6433 			}
   6434 		}
   6435 
   6436 		/* Grab a packet off the queue. */
   6437 		if (is_transmit)
   6438 			m0 = pcq_get(txq->txq_interq);
   6439 		else
   6440 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   6441 		if (m0 == NULL)
   6442 			break;
   6443 
   6444 		DPRINTF(WM_DEBUG_TX,
   6445 		    ("%s: TX: have packet to transmit: %p\n",
   6446 		    device_xname(sc->sc_dev), m0));
   6447 
   6448 		txs = &txq->txq_soft[txq->txq_snext];
   6449 		dmamap = txs->txs_dmamap;
   6450 
   6451 		use_tso = (m0->m_pkthdr.csum_flags &
   6452 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   6453 
   6454 		/*
   6455 		 * So says the Linux driver:
   6456 		 * The controller does a simple calculation to make sure
   6457 		 * there is enough room in the FIFO before initiating the
   6458 		 * DMA for each buffer.  The calc is:
   6459 		 *	4 = ceil(buffer len / MSS)
   6460 		 * To make sure we don't overrun the FIFO, adjust the max
   6461 		 * buffer len if the MSS drops.
   6462 		 */
   6463 		dmamap->dm_maxsegsz =
   6464 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   6465 		    ? m0->m_pkthdr.segsz << 2
   6466 		    : WTX_MAX_LEN;
   6467 
   6468 		/*
   6469 		 * Load the DMA map.  If this fails, the packet either
   6470 		 * didn't fit in the allotted number of segments, or we
   6471 		 * were short on resources.  For the too-many-segments
   6472 		 * case, we simply report an error and drop the packet,
   6473 		 * since we can't sanely copy a jumbo packet to a single
   6474 		 * buffer.
   6475 		 */
   6476 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6477 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   6478 		if (error) {
   6479 			if (error == EFBIG) {
   6480 				WM_Q_EVCNT_INCR(txq, txdrop);
   6481 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6482 				    "DMA segments, dropping...\n",
   6483 				    device_xname(sc->sc_dev));
   6484 				wm_dump_mbuf_chain(sc, m0);
   6485 				m_freem(m0);
   6486 				continue;
   6487 			}
   6488 			/*  Short on resources, just stop for now. */
   6489 			DPRINTF(WM_DEBUG_TX,
   6490 			    ("%s: TX: dmamap load failed: %d\n",
   6491 			    device_xname(sc->sc_dev), error));
   6492 			break;
   6493 		}
   6494 
   6495 		segs_needed = dmamap->dm_nsegs;
   6496 		if (use_tso) {
   6497 			/* For sentinel descriptor; see below. */
   6498 			segs_needed++;
   6499 		}
   6500 
   6501 		/*
   6502 		 * Ensure we have enough descriptors free to describe
   6503 		 * the packet.  Note, we always reserve one descriptor
   6504 		 * at the end of the ring due to the semantics of the
   6505 		 * TDT register, plus one more in the event we need
   6506 		 * to load offload context.
   6507 		 */
   6508 		if (segs_needed > txq->txq_free - 2) {
   6509 			/*
   6510 			 * Not enough free descriptors to transmit this
   6511 			 * packet.  We haven't committed anything yet,
   6512 			 * so just unload the DMA map, put the packet
   6513 			 * pack on the queue, and punt.  Notify the upper
   6514 			 * layer that there are no more slots left.
   6515 			 */
   6516 			DPRINTF(WM_DEBUG_TX,
   6517 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6518 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6519 			    segs_needed, txq->txq_free - 1));
   6520 			ifp->if_flags |= IFF_OACTIVE;
   6521 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6522 			WM_Q_EVCNT_INCR(txq, txdstall);
   6523 			break;
   6524 		}
   6525 
   6526 		/*
   6527 		 * Check for 82547 Tx FIFO bug.  We need to do this
   6528 		 * once we know we can transmit the packet, since we
   6529 		 * do some internal FIFO space accounting here.
   6530 		 */
   6531 		if (sc->sc_type == WM_T_82547 &&
   6532 		    wm_82547_txfifo_bugchk(sc, m0)) {
   6533 			DPRINTF(WM_DEBUG_TX,
   6534 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   6535 			    device_xname(sc->sc_dev)));
   6536 			ifp->if_flags |= IFF_OACTIVE;
   6537 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6538 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
   6539 			break;
   6540 		}
   6541 
   6542 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6543 
   6544 		DPRINTF(WM_DEBUG_TX,
   6545 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6546 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6547 
   6548 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   6549 
   6550 		/*
   6551 		 * Store a pointer to the packet so that we can free it
   6552 		 * later.
   6553 		 *
   6554 		 * Initially, we consider the number of descriptors the
   6555 		 * packet uses the number of DMA segments.  This may be
   6556 		 * incremented by 1 if we do checksum offload (a descriptor
   6557 		 * is used to set the checksum context).
   6558 		 */
   6559 		txs->txs_mbuf = m0;
   6560 		txs->txs_firstdesc = txq->txq_next;
   6561 		txs->txs_ndesc = segs_needed;
   6562 
   6563 		/* Set up offload parameters for this packet. */
   6564 		if (m0->m_pkthdr.csum_flags &
   6565 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   6566 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   6567 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   6568 			if (wm_tx_offload(sc, txs, &cksumcmd,
   6569 					  &cksumfields) != 0) {
   6570 				/* Error message already displayed. */
   6571 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   6572 				continue;
   6573 			}
   6574 		} else {
   6575 			cksumcmd = 0;
   6576 			cksumfields = 0;
   6577 		}
   6578 
   6579 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   6580 
   6581 		/* Sync the DMA map. */
   6582 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   6583 		    BUS_DMASYNC_PREWRITE);
   6584 
   6585 		/* Initialize the transmit descriptor. */
   6586 		for (nexttx = txq->txq_next, seg = 0;
   6587 		     seg < dmamap->dm_nsegs; seg++) {
   6588 			for (seglen = dmamap->dm_segs[seg].ds_len,
   6589 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   6590 			     seglen != 0;
   6591 			     curaddr += curlen, seglen -= curlen,
   6592 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   6593 				curlen = seglen;
   6594 
   6595 				/*
   6596 				 * So says the Linux driver:
   6597 				 * Work around for premature descriptor
   6598 				 * write-backs in TSO mode.  Append a
   6599 				 * 4-byte sentinel descriptor.
   6600 				 */
   6601 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   6602 				    curlen > 8)
   6603 					curlen -= 4;
   6604 
   6605 				wm_set_dma_addr(
   6606 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   6607 				txq->txq_descs[nexttx].wtx_cmdlen
   6608 				    = htole32(cksumcmd | curlen);
   6609 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   6610 				    = 0;
   6611 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   6612 				    = cksumfields;
   6613 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   6614 				lasttx = nexttx;
   6615 
   6616 				DPRINTF(WM_DEBUG_TX,
   6617 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   6618 				     "len %#04zx\n",
   6619 				    device_xname(sc->sc_dev), nexttx,
   6620 				    (uint64_t)curaddr, curlen));
   6621 			}
   6622 		}
   6623 
   6624 		KASSERT(lasttx != -1);
   6625 
   6626 		/*
   6627 		 * Set up the command byte on the last descriptor of
   6628 		 * the packet.  If we're in the interrupt delay window,
   6629 		 * delay the interrupt.
   6630 		 */
   6631 		txq->txq_descs[lasttx].wtx_cmdlen |=
   6632 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   6633 
   6634 		/*
   6635 		 * If VLANs are enabled and the packet has a VLAN tag, set
   6636 		 * up the descriptor to encapsulate the packet for us.
   6637 		 *
   6638 		 * This is only valid on the last descriptor of the packet.
   6639 		 */
   6640 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6641 			txq->txq_descs[lasttx].wtx_cmdlen |=
   6642 			    htole32(WTX_CMD_VLE);
   6643 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   6644 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   6645 		}
   6646 
   6647 		txs->txs_lastdesc = lasttx;
   6648 
   6649 		DPRINTF(WM_DEBUG_TX,
   6650 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   6651 		    device_xname(sc->sc_dev),
   6652 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   6653 
   6654 		/* Sync the descriptors we're using. */
   6655 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   6656 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6657 
   6658 		/* Give the packet to the chip. */
   6659 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   6660 
   6661 		DPRINTF(WM_DEBUG_TX,
   6662 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   6663 
   6664 		DPRINTF(WM_DEBUG_TX,
   6665 		    ("%s: TX: finished transmitting packet, job %d\n",
   6666 		    device_xname(sc->sc_dev), txq->txq_snext));
   6667 
   6668 		/* Advance the tx pointer. */
   6669 		txq->txq_free -= txs->txs_ndesc;
   6670 		txq->txq_next = nexttx;
   6671 
   6672 		txq->txq_sfree--;
   6673 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   6674 
   6675 		/* Pass the packet to any BPF listeners. */
   6676 		bpf_mtap(ifp, m0);
   6677 	}
   6678 
   6679 	if (m0 != NULL) {
   6680 		ifp->if_flags |= IFF_OACTIVE;
   6681 		WM_Q_EVCNT_INCR(txq, txdrop);
   6682 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   6683 			__func__));
   6684 		m_freem(m0);
   6685 	}
   6686 
   6687 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   6688 		/* No more slots; notify upper layer. */
   6689 		ifp->if_flags |= IFF_OACTIVE;
   6690 	}
   6691 
   6692 	if (txq->txq_free != ofree) {
   6693 		/* Set a watchdog timer in case the chip flakes out. */
   6694 		ifp->if_timer = 5;
   6695 	}
   6696 }
   6697 
   6698 /*
   6699  * wm_nq_tx_offload:
   6700  *
   6701  *	Set up TCP/IP checksumming parameters for the
   6702  *	specified packet, for NEWQUEUE devices
   6703  */
   6704 static int
   6705 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   6706     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   6707 {
   6708 	struct mbuf *m0 = txs->txs_mbuf;
   6709 	struct m_tag *mtag;
   6710 	uint32_t vl_len, mssidx, cmdc;
   6711 	struct ether_header *eh;
   6712 	int offset, iphl;
   6713 
   6714 	/*
   6715 	 * XXX It would be nice if the mbuf pkthdr had offset
   6716 	 * fields for the protocol headers.
   6717 	 */
   6718 	*cmdlenp = 0;
   6719 	*fieldsp = 0;
   6720 
   6721 	eh = mtod(m0, struct ether_header *);
   6722 	switch (htons(eh->ether_type)) {
   6723 	case ETHERTYPE_IP:
   6724 	case ETHERTYPE_IPV6:
   6725 		offset = ETHER_HDR_LEN;
   6726 		break;
   6727 
   6728 	case ETHERTYPE_VLAN:
   6729 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6730 		break;
   6731 
   6732 	default:
   6733 		/* Don't support this protocol or encapsulation. */
   6734 		*do_csum = false;
   6735 		return 0;
   6736 	}
   6737 	*do_csum = true;
   6738 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   6739 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   6740 
   6741 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   6742 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   6743 
   6744 	if ((m0->m_pkthdr.csum_flags &
   6745 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6746 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6747 	} else {
   6748 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6749 	}
   6750 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   6751 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   6752 
   6753 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6754 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   6755 		     << NQTXC_VLLEN_VLAN_SHIFT);
   6756 		*cmdlenp |= NQTX_CMD_VLE;
   6757 	}
   6758 
   6759 	mssidx = 0;
   6760 
   6761 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6762 		int hlen = offset + iphl;
   6763 		int tcp_hlen;
   6764 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6765 
   6766 		if (__predict_false(m0->m_len <
   6767 				    (hlen + sizeof(struct tcphdr)))) {
   6768 			/*
   6769 			 * TCP/IP headers are not in the first mbuf; we need
   6770 			 * to do this the slow and painful way.  Let's just
   6771 			 * hope this doesn't happen very often.
   6772 			 */
   6773 			struct tcphdr th;
   6774 
   6775 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6776 
   6777 			m_copydata(m0, hlen, sizeof(th), &th);
   6778 			if (v4) {
   6779 				struct ip ip;
   6780 
   6781 				m_copydata(m0, offset, sizeof(ip), &ip);
   6782 				ip.ip_len = 0;
   6783 				m_copyback(m0,
   6784 				    offset + offsetof(struct ip, ip_len),
   6785 				    sizeof(ip.ip_len), &ip.ip_len);
   6786 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6787 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6788 			} else {
   6789 				struct ip6_hdr ip6;
   6790 
   6791 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6792 				ip6.ip6_plen = 0;
   6793 				m_copyback(m0,
   6794 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6795 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6796 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6797 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6798 			}
   6799 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6800 			    sizeof(th.th_sum), &th.th_sum);
   6801 
   6802 			tcp_hlen = th.th_off << 2;
   6803 		} else {
   6804 			/*
   6805 			 * TCP/IP headers are in the first mbuf; we can do
   6806 			 * this the easy way.
   6807 			 */
   6808 			struct tcphdr *th;
   6809 
   6810 			if (v4) {
   6811 				struct ip *ip =
   6812 				    (void *)(mtod(m0, char *) + offset);
   6813 				th = (void *)(mtod(m0, char *) + hlen);
   6814 
   6815 				ip->ip_len = 0;
   6816 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6817 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6818 			} else {
   6819 				struct ip6_hdr *ip6 =
   6820 				    (void *)(mtod(m0, char *) + offset);
   6821 				th = (void *)(mtod(m0, char *) + hlen);
   6822 
   6823 				ip6->ip6_plen = 0;
   6824 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6825 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6826 			}
   6827 			tcp_hlen = th->th_off << 2;
   6828 		}
   6829 		hlen += tcp_hlen;
   6830 		*cmdlenp |= NQTX_CMD_TSE;
   6831 
   6832 		if (v4) {
   6833 			WM_Q_EVCNT_INCR(txq, txtso);
   6834 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   6835 		} else {
   6836 			WM_Q_EVCNT_INCR(txq, txtso6);
   6837 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   6838 		}
   6839 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   6840 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6841 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   6842 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   6843 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   6844 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   6845 	} else {
   6846 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   6847 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6848 	}
   6849 
   6850 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   6851 		*fieldsp |= NQTXD_FIELDS_IXSM;
   6852 		cmdc |= NQTXC_CMD_IP4;
   6853 	}
   6854 
   6855 	if (m0->m_pkthdr.csum_flags &
   6856 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6857 		WM_Q_EVCNT_INCR(txq, txtusum);
   6858 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6859 			cmdc |= NQTXC_CMD_TCP;
   6860 		} else {
   6861 			cmdc |= NQTXC_CMD_UDP;
   6862 		}
   6863 		cmdc |= NQTXC_CMD_IP4;
   6864 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6865 	}
   6866 	if (m0->m_pkthdr.csum_flags &
   6867 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6868 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6869 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6870 			cmdc |= NQTXC_CMD_TCP;
   6871 		} else {
   6872 			cmdc |= NQTXC_CMD_UDP;
   6873 		}
   6874 		cmdc |= NQTXC_CMD_IP6;
   6875 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6876 	}
   6877 
   6878 	/* Fill in the context descriptor. */
   6879 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   6880 	    htole32(vl_len);
   6881 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   6882 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   6883 	    htole32(cmdc);
   6884 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   6885 	    htole32(mssidx);
   6886 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6887 	DPRINTF(WM_DEBUG_TX,
   6888 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   6889 	    txq->txq_next, 0, vl_len));
   6890 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   6891 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6892 	txs->txs_ndesc++;
   6893 	return 0;
   6894 }
   6895 
   6896 /*
   6897  * wm_nq_start:		[ifnet interface function]
   6898  *
   6899  *	Start packet transmission on the interface for NEWQUEUE devices
   6900  */
   6901 static void
   6902 wm_nq_start(struct ifnet *ifp)
   6903 {
   6904 	struct wm_softc *sc = ifp->if_softc;
   6905 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6906 
   6907 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6908 
   6909 	/*
   6910 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   6911 	 */
   6912 
   6913 	mutex_enter(txq->txq_lock);
   6914 	if (!txq->txq_stopping)
   6915 		wm_nq_start_locked(ifp);
   6916 	mutex_exit(txq->txq_lock);
   6917 }
   6918 
   6919 static void
   6920 wm_nq_start_locked(struct ifnet *ifp)
   6921 {
   6922 	struct wm_softc *sc = ifp->if_softc;
   6923 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6924 
   6925 	wm_nq_send_common_locked(ifp, txq, false);
   6926 }
   6927 
   6928 static int
   6929 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   6930 {
   6931 	int qid;
   6932 	struct wm_softc *sc = ifp->if_softc;
   6933 	struct wm_txqueue *txq;
   6934 
   6935 	qid = wm_select_txqueue(ifp, m);
   6936 	txq = &sc->sc_queue[qid].wmq_txq;
   6937 
   6938 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   6939 		m_freem(m);
   6940 		WM_Q_EVCNT_INCR(txq, txdrop);
   6941 		return ENOBUFS;
   6942 	}
   6943 
   6944 	/*
   6945 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   6946 	 */
   6947 	ifp->if_obytes += m->m_pkthdr.len;
   6948 	if (m->m_flags & M_MCAST)
   6949 		ifp->if_omcasts++;
   6950 
   6951 	if (mutex_tryenter(txq->txq_lock)) {
   6952 		if (!txq->txq_stopping)
   6953 			wm_nq_transmit_locked(ifp, txq);
   6954 		mutex_exit(txq->txq_lock);
   6955 	}
   6956 
   6957 	return 0;
   6958 }
   6959 
   6960 static void
   6961 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   6962 {
   6963 
   6964 	wm_nq_send_common_locked(ifp, txq, true);
   6965 }
   6966 
   6967 static void
   6968 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   6969     bool is_transmit)
   6970 {
   6971 	struct wm_softc *sc = ifp->if_softc;
   6972 	struct mbuf *m0;
   6973 	struct m_tag *mtag;
   6974 	struct wm_txsoft *txs;
   6975 	bus_dmamap_t dmamap;
   6976 	int error, nexttx, lasttx = -1, seg, segs_needed;
   6977 	bool do_csum, sent;
   6978 
   6979 	KASSERT(mutex_owned(txq->txq_lock));
   6980 
   6981 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
   6982 		return;
   6983 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   6984 		return;
   6985 
   6986 	sent = false;
   6987 
   6988 	/*
   6989 	 * Loop through the send queue, setting up transmit descriptors
   6990 	 * until we drain the queue, or use up all available transmit
   6991 	 * descriptors.
   6992 	 */
   6993 	for (;;) {
   6994 		m0 = NULL;
   6995 
   6996 		/* Get a work queue entry. */
   6997 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6998 			wm_txeof(sc, txq);
   6999 			if (txq->txq_sfree == 0) {
   7000 				DPRINTF(WM_DEBUG_TX,
   7001 				    ("%s: TX: no free job descriptors\n",
   7002 					device_xname(sc->sc_dev)));
   7003 				WM_Q_EVCNT_INCR(txq, txsstall);
   7004 				break;
   7005 			}
   7006 		}
   7007 
   7008 		/* Grab a packet off the queue. */
   7009 		if (is_transmit)
   7010 			m0 = pcq_get(txq->txq_interq);
   7011 		else
   7012 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7013 		if (m0 == NULL)
   7014 			break;
   7015 
   7016 		DPRINTF(WM_DEBUG_TX,
   7017 		    ("%s: TX: have packet to transmit: %p\n",
   7018 		    device_xname(sc->sc_dev), m0));
   7019 
   7020 		txs = &txq->txq_soft[txq->txq_snext];
   7021 		dmamap = txs->txs_dmamap;
   7022 
   7023 		/*
   7024 		 * Load the DMA map.  If this fails, the packet either
   7025 		 * didn't fit in the allotted number of segments, or we
   7026 		 * were short on resources.  For the too-many-segments
   7027 		 * case, we simply report an error and drop the packet,
   7028 		 * since we can't sanely copy a jumbo packet to a single
   7029 		 * buffer.
   7030 		 */
   7031 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7032 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7033 		if (error) {
   7034 			if (error == EFBIG) {
   7035 				WM_Q_EVCNT_INCR(txq, txdrop);
   7036 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7037 				    "DMA segments, dropping...\n",
   7038 				    device_xname(sc->sc_dev));
   7039 				wm_dump_mbuf_chain(sc, m0);
   7040 				m_freem(m0);
   7041 				continue;
   7042 			}
   7043 			/* Short on resources, just stop for now. */
   7044 			DPRINTF(WM_DEBUG_TX,
   7045 			    ("%s: TX: dmamap load failed: %d\n",
   7046 			    device_xname(sc->sc_dev), error));
   7047 			break;
   7048 		}
   7049 
   7050 		segs_needed = dmamap->dm_nsegs;
   7051 
   7052 		/*
   7053 		 * Ensure we have enough descriptors free to describe
   7054 		 * the packet.  Note, we always reserve one descriptor
   7055 		 * at the end of the ring due to the semantics of the
   7056 		 * TDT register, plus one more in the event we need
   7057 		 * to load offload context.
   7058 		 */
   7059 		if (segs_needed > txq->txq_free - 2) {
   7060 			/*
   7061 			 * Not enough free descriptors to transmit this
   7062 			 * packet.  We haven't committed anything yet,
   7063 			 * so just unload the DMA map, put the packet
   7064 			 * pack on the queue, and punt.  Notify the upper
   7065 			 * layer that there are no more slots left.
   7066 			 */
   7067 			DPRINTF(WM_DEBUG_TX,
   7068 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7069 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7070 			    segs_needed, txq->txq_free - 1));
   7071 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7072 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7073 			WM_Q_EVCNT_INCR(txq, txdstall);
   7074 			break;
   7075 		}
   7076 
   7077 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7078 
   7079 		DPRINTF(WM_DEBUG_TX,
   7080 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7081 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7082 
   7083 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7084 
   7085 		/*
   7086 		 * Store a pointer to the packet so that we can free it
   7087 		 * later.
   7088 		 *
   7089 		 * Initially, we consider the number of descriptors the
   7090 		 * packet uses the number of DMA segments.  This may be
   7091 		 * incremented by 1 if we do checksum offload (a descriptor
   7092 		 * is used to set the checksum context).
   7093 		 */
   7094 		txs->txs_mbuf = m0;
   7095 		txs->txs_firstdesc = txq->txq_next;
   7096 		txs->txs_ndesc = segs_needed;
   7097 
   7098 		/* Set up offload parameters for this packet. */
   7099 		uint32_t cmdlen, fields, dcmdlen;
   7100 		if (m0->m_pkthdr.csum_flags &
   7101 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7102 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7103 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7104 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   7105 			    &do_csum) != 0) {
   7106 				/* Error message already displayed. */
   7107 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7108 				continue;
   7109 			}
   7110 		} else {
   7111 			do_csum = false;
   7112 			cmdlen = 0;
   7113 			fields = 0;
   7114 		}
   7115 
   7116 		/* Sync the DMA map. */
   7117 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7118 		    BUS_DMASYNC_PREWRITE);
   7119 
   7120 		/* Initialize the first transmit descriptor. */
   7121 		nexttx = txq->txq_next;
   7122 		if (!do_csum) {
   7123 			/* setup a legacy descriptor */
   7124 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   7125 			    dmamap->dm_segs[0].ds_addr);
   7126 			txq->txq_descs[nexttx].wtx_cmdlen =
   7127 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   7128 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   7129 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   7130 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   7131 			    NULL) {
   7132 				txq->txq_descs[nexttx].wtx_cmdlen |=
   7133 				    htole32(WTX_CMD_VLE);
   7134 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   7135 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   7136 			} else {
   7137 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7138 			}
   7139 			dcmdlen = 0;
   7140 		} else {
   7141 			/* setup an advanced data descriptor */
   7142 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7143 			    htole64(dmamap->dm_segs[0].ds_addr);
   7144 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   7145 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7146 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   7147 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   7148 			    htole32(fields);
   7149 			DPRINTF(WM_DEBUG_TX,
   7150 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   7151 			    device_xname(sc->sc_dev), nexttx,
   7152 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   7153 			DPRINTF(WM_DEBUG_TX,
   7154 			    ("\t 0x%08x%08x\n", fields,
   7155 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   7156 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   7157 		}
   7158 
   7159 		lasttx = nexttx;
   7160 		nexttx = WM_NEXTTX(txq, nexttx);
   7161 		/*
   7162 		 * fill in the next descriptors. legacy or adcanced format
   7163 		 * is the same here
   7164 		 */
   7165 		for (seg = 1; seg < dmamap->dm_nsegs;
   7166 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   7167 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7168 			    htole64(dmamap->dm_segs[seg].ds_addr);
   7169 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7170 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   7171 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   7172 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   7173 			lasttx = nexttx;
   7174 
   7175 			DPRINTF(WM_DEBUG_TX,
   7176 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   7177 			     "len %#04zx\n",
   7178 			    device_xname(sc->sc_dev), nexttx,
   7179 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   7180 			    dmamap->dm_segs[seg].ds_len));
   7181 		}
   7182 
   7183 		KASSERT(lasttx != -1);
   7184 
   7185 		/*
   7186 		 * Set up the command byte on the last descriptor of
   7187 		 * the packet.  If we're in the interrupt delay window,
   7188 		 * delay the interrupt.
   7189 		 */
   7190 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   7191 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   7192 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7193 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7194 
   7195 		txs->txs_lastdesc = lasttx;
   7196 
   7197 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7198 		    device_xname(sc->sc_dev),
   7199 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7200 
   7201 		/* Sync the descriptors we're using. */
   7202 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7203 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7204 
   7205 		/* Give the packet to the chip. */
   7206 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7207 		sent = true;
   7208 
   7209 		DPRINTF(WM_DEBUG_TX,
   7210 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7211 
   7212 		DPRINTF(WM_DEBUG_TX,
   7213 		    ("%s: TX: finished transmitting packet, job %d\n",
   7214 		    device_xname(sc->sc_dev), txq->txq_snext));
   7215 
   7216 		/* Advance the tx pointer. */
   7217 		txq->txq_free -= txs->txs_ndesc;
   7218 		txq->txq_next = nexttx;
   7219 
   7220 		txq->txq_sfree--;
   7221 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7222 
   7223 		/* Pass the packet to any BPF listeners. */
   7224 		bpf_mtap(ifp, m0);
   7225 	}
   7226 
   7227 	if (m0 != NULL) {
   7228 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7229 		WM_Q_EVCNT_INCR(txq, txdrop);
   7230 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7231 			__func__));
   7232 		m_freem(m0);
   7233 	}
   7234 
   7235 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7236 		/* No more slots; notify upper layer. */
   7237 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7238 	}
   7239 
   7240 	if (sent) {
   7241 		/* Set a watchdog timer in case the chip flakes out. */
   7242 		ifp->if_timer = 5;
   7243 	}
   7244 }
   7245 
   7246 static void
   7247 wm_deferred_start(struct ifnet *ifp)
   7248 {
   7249 	struct wm_softc *sc = ifp->if_softc;
   7250 	int qid = 0;
   7251 
   7252 	/*
   7253 	 * Try to transmit on all Tx queues. Passing a txq somehow and
   7254 	 * transmitting only on the txq may be better.
   7255 	 */
   7256 restart:
   7257 	WM_CORE_LOCK(sc);
   7258 	if (sc->sc_core_stopping)
   7259 		goto out;
   7260 
   7261 	for (; qid < sc->sc_nqueues; qid++) {
   7262 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   7263 
   7264 		if (!mutex_tryenter(txq->txq_lock))
   7265 			continue;
   7266 
   7267 		if (txq->txq_stopping) {
   7268 			mutex_exit(txq->txq_lock);
   7269 			continue;
   7270 		}
   7271 		WM_CORE_UNLOCK(sc);
   7272 
   7273 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7274 			/* XXX need for ALTQ */
   7275 			if (qid == 0)
   7276 				wm_nq_start_locked(ifp);
   7277 			wm_nq_transmit_locked(ifp, txq);
   7278 		} else {
   7279 			/* XXX need for ALTQ */
   7280 			if (qid == 0)
   7281 				wm_start_locked(ifp);
   7282 			wm_transmit_locked(ifp, txq);
   7283 		}
   7284 		mutex_exit(txq->txq_lock);
   7285 
   7286 		qid++;
   7287 		goto restart;
   7288 	}
   7289 out:
   7290 	WM_CORE_UNLOCK(sc);
   7291 }
   7292 
   7293 /* Interrupt */
   7294 
   7295 /*
   7296  * wm_txeof:
   7297  *
   7298  *	Helper; handle transmit interrupts.
   7299  */
   7300 static int
   7301 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
   7302 {
   7303 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7304 	struct wm_txsoft *txs;
   7305 	bool processed = false;
   7306 	int count = 0;
   7307 	int i;
   7308 	uint8_t status;
   7309 
   7310 	KASSERT(mutex_owned(txq->txq_lock));
   7311 
   7312 	if (txq->txq_stopping)
   7313 		return 0;
   7314 
   7315 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7316 		txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   7317 	else
   7318 		ifp->if_flags &= ~IFF_OACTIVE;
   7319 
   7320 	/*
   7321 	 * Go through the Tx list and free mbufs for those
   7322 	 * frames which have been transmitted.
   7323 	 */
   7324 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   7325 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   7326 		txs = &txq->txq_soft[i];
   7327 
   7328 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   7329 			device_xname(sc->sc_dev), i));
   7330 
   7331 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   7332 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   7333 
   7334 		status =
   7335 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   7336 		if ((status & WTX_ST_DD) == 0) {
   7337 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   7338 			    BUS_DMASYNC_PREREAD);
   7339 			break;
   7340 		}
   7341 
   7342 		processed = true;
   7343 		count++;
   7344 		DPRINTF(WM_DEBUG_TX,
   7345 		    ("%s: TX: job %d done: descs %d..%d\n",
   7346 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   7347 		    txs->txs_lastdesc));
   7348 
   7349 		/*
   7350 		 * XXX We should probably be using the statistics
   7351 		 * XXX registers, but I don't know if they exist
   7352 		 * XXX on chips before the i82544.
   7353 		 */
   7354 
   7355 #ifdef WM_EVENT_COUNTERS
   7356 		if (status & WTX_ST_TU)
   7357 			WM_Q_EVCNT_INCR(txq, tu);
   7358 #endif /* WM_EVENT_COUNTERS */
   7359 
   7360 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   7361 			ifp->if_oerrors++;
   7362 			if (status & WTX_ST_LC)
   7363 				log(LOG_WARNING, "%s: late collision\n",
   7364 				    device_xname(sc->sc_dev));
   7365 			else if (status & WTX_ST_EC) {
   7366 				ifp->if_collisions += 16;
   7367 				log(LOG_WARNING, "%s: excessive collisions\n",
   7368 				    device_xname(sc->sc_dev));
   7369 			}
   7370 		} else
   7371 			ifp->if_opackets++;
   7372 
   7373 		txq->txq_free += txs->txs_ndesc;
   7374 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   7375 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   7376 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   7377 		m_freem(txs->txs_mbuf);
   7378 		txs->txs_mbuf = NULL;
   7379 	}
   7380 
   7381 	/* Update the dirty transmit buffer pointer. */
   7382 	txq->txq_sdirty = i;
   7383 	DPRINTF(WM_DEBUG_TX,
   7384 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   7385 
   7386 	if (count != 0)
   7387 		rnd_add_uint32(&sc->rnd_source, count);
   7388 
   7389 	/*
   7390 	 * If there are no more pending transmissions, cancel the watchdog
   7391 	 * timer.
   7392 	 */
   7393 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   7394 		ifp->if_timer = 0;
   7395 
   7396 	return processed;
   7397 }
   7398 
   7399 /*
   7400  * wm_rxeof:
   7401  *
   7402  *	Helper; handle receive interrupts.
   7403  */
   7404 static void
   7405 wm_rxeof(struct wm_rxqueue *rxq)
   7406 {
   7407 	struct wm_softc *sc = rxq->rxq_sc;
   7408 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7409 	struct wm_rxsoft *rxs;
   7410 	struct mbuf *m;
   7411 	int i, len;
   7412 	int count = 0;
   7413 	uint8_t status, errors;
   7414 	uint16_t vlantag;
   7415 
   7416 	KASSERT(mutex_owned(rxq->rxq_lock));
   7417 
   7418 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   7419 		rxs = &rxq->rxq_soft[i];
   7420 
   7421 		DPRINTF(WM_DEBUG_RX,
   7422 		    ("%s: RX: checking descriptor %d\n",
   7423 		    device_xname(sc->sc_dev), i));
   7424 
   7425 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   7426 
   7427 		status = rxq->rxq_descs[i].wrx_status;
   7428 		errors = rxq->rxq_descs[i].wrx_errors;
   7429 		len = le16toh(rxq->rxq_descs[i].wrx_len);
   7430 		vlantag = rxq->rxq_descs[i].wrx_special;
   7431 
   7432 		if ((status & WRX_ST_DD) == 0) {
   7433 			/* We have processed all of the receive descriptors. */
   7434 			wm_cdrxsync(rxq, i, BUS_DMASYNC_PREREAD);
   7435 			break;
   7436 		}
   7437 
   7438 		count++;
   7439 		if (__predict_false(rxq->rxq_discard)) {
   7440 			DPRINTF(WM_DEBUG_RX,
   7441 			    ("%s: RX: discarding contents of descriptor %d\n",
   7442 			    device_xname(sc->sc_dev), i));
   7443 			wm_init_rxdesc(rxq, i);
   7444 			if (status & WRX_ST_EOP) {
   7445 				/* Reset our state. */
   7446 				DPRINTF(WM_DEBUG_RX,
   7447 				    ("%s: RX: resetting rxdiscard -> 0\n",
   7448 				    device_xname(sc->sc_dev)));
   7449 				rxq->rxq_discard = 0;
   7450 			}
   7451 			continue;
   7452 		}
   7453 
   7454 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   7455 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   7456 
   7457 		m = rxs->rxs_mbuf;
   7458 
   7459 		/*
   7460 		 * Add a new receive buffer to the ring, unless of
   7461 		 * course the length is zero. Treat the latter as a
   7462 		 * failed mapping.
   7463 		 */
   7464 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   7465 			/*
   7466 			 * Failed, throw away what we've done so
   7467 			 * far, and discard the rest of the packet.
   7468 			 */
   7469 			ifp->if_ierrors++;
   7470 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   7471 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   7472 			wm_init_rxdesc(rxq, i);
   7473 			if ((status & WRX_ST_EOP) == 0)
   7474 				rxq->rxq_discard = 1;
   7475 			if (rxq->rxq_head != NULL)
   7476 				m_freem(rxq->rxq_head);
   7477 			WM_RXCHAIN_RESET(rxq);
   7478 			DPRINTF(WM_DEBUG_RX,
   7479 			    ("%s: RX: Rx buffer allocation failed, "
   7480 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   7481 			    rxq->rxq_discard ? " (discard)" : ""));
   7482 			continue;
   7483 		}
   7484 
   7485 		m->m_len = len;
   7486 		rxq->rxq_len += len;
   7487 		DPRINTF(WM_DEBUG_RX,
   7488 		    ("%s: RX: buffer at %p len %d\n",
   7489 		    device_xname(sc->sc_dev), m->m_data, len));
   7490 
   7491 		/* If this is not the end of the packet, keep looking. */
   7492 		if ((status & WRX_ST_EOP) == 0) {
   7493 			WM_RXCHAIN_LINK(rxq, m);
   7494 			DPRINTF(WM_DEBUG_RX,
   7495 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   7496 			    device_xname(sc->sc_dev), rxq->rxq_len));
   7497 			continue;
   7498 		}
   7499 
   7500 		/*
   7501 		 * Okay, we have the entire packet now.  The chip is
   7502 		 * configured to include the FCS except I350 and I21[01]
   7503 		 * (not all chips can be configured to strip it),
   7504 		 * so we need to trim it.
   7505 		 * May need to adjust length of previous mbuf in the
   7506 		 * chain if the current mbuf is too short.
   7507 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   7508 		 * is always set in I350, so we don't trim it.
   7509 		 */
   7510 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   7511 		    && (sc->sc_type != WM_T_I210)
   7512 		    && (sc->sc_type != WM_T_I211)) {
   7513 			if (m->m_len < ETHER_CRC_LEN) {
   7514 				rxq->rxq_tail->m_len
   7515 				    -= (ETHER_CRC_LEN - m->m_len);
   7516 				m->m_len = 0;
   7517 			} else
   7518 				m->m_len -= ETHER_CRC_LEN;
   7519 			len = rxq->rxq_len - ETHER_CRC_LEN;
   7520 		} else
   7521 			len = rxq->rxq_len;
   7522 
   7523 		WM_RXCHAIN_LINK(rxq, m);
   7524 
   7525 		*rxq->rxq_tailp = NULL;
   7526 		m = rxq->rxq_head;
   7527 
   7528 		WM_RXCHAIN_RESET(rxq);
   7529 
   7530 		DPRINTF(WM_DEBUG_RX,
   7531 		    ("%s: RX: have entire packet, len -> %d\n",
   7532 		    device_xname(sc->sc_dev), len));
   7533 
   7534 		/* If an error occurred, update stats and drop the packet. */
   7535 		if (errors &
   7536 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
   7537 			if (errors & WRX_ER_SE)
   7538 				log(LOG_WARNING, "%s: symbol error\n",
   7539 				    device_xname(sc->sc_dev));
   7540 			else if (errors & WRX_ER_SEQ)
   7541 				log(LOG_WARNING, "%s: receive sequence error\n",
   7542 				    device_xname(sc->sc_dev));
   7543 			else if (errors & WRX_ER_CE)
   7544 				log(LOG_WARNING, "%s: CRC error\n",
   7545 				    device_xname(sc->sc_dev));
   7546 			m_freem(m);
   7547 			continue;
   7548 		}
   7549 
   7550 		/* No errors.  Receive the packet. */
   7551 		m_set_rcvif(m, ifp);
   7552 		m->m_pkthdr.len = len;
   7553 
   7554 		/*
   7555 		 * If VLANs are enabled, VLAN packets have been unwrapped
   7556 		 * for us.  Associate the tag with the packet.
   7557 		 */
   7558 		/* XXXX should check for i350 and i354 */
   7559 		if ((status & WRX_ST_VP) != 0) {
   7560 			VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), continue);
   7561 		}
   7562 
   7563 		/* Set up checksum info for this packet. */
   7564 		if ((status & WRX_ST_IXSM) == 0) {
   7565 			if (status & WRX_ST_IPCS) {
   7566 				WM_Q_EVCNT_INCR(rxq, rxipsum);
   7567 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   7568 				if (errors & WRX_ER_IPE)
   7569 					m->m_pkthdr.csum_flags |=
   7570 					    M_CSUM_IPv4_BAD;
   7571 			}
   7572 			if (status & WRX_ST_TCPCS) {
   7573 				/*
   7574 				 * Note: we don't know if this was TCP or UDP,
   7575 				 * so we just set both bits, and expect the
   7576 				 * upper layers to deal.
   7577 				 */
   7578 				WM_Q_EVCNT_INCR(rxq, rxtusum);
   7579 				m->m_pkthdr.csum_flags |=
   7580 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7581 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   7582 				if (errors & WRX_ER_TCPE)
   7583 					m->m_pkthdr.csum_flags |=
   7584 					    M_CSUM_TCP_UDP_BAD;
   7585 			}
   7586 		}
   7587 
   7588 		mutex_exit(rxq->rxq_lock);
   7589 
   7590 		/* Pass it on. */
   7591 		if_percpuq_enqueue(sc->sc_ipq, m);
   7592 
   7593 		mutex_enter(rxq->rxq_lock);
   7594 
   7595 		if (rxq->rxq_stopping)
   7596 			break;
   7597 	}
   7598 
   7599 	/* Update the receive pointer. */
   7600 	rxq->rxq_ptr = i;
   7601 	if (count != 0)
   7602 		rnd_add_uint32(&sc->rnd_source, count);
   7603 
   7604 	DPRINTF(WM_DEBUG_RX,
   7605 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   7606 }
   7607 
   7608 /*
   7609  * wm_linkintr_gmii:
   7610  *
   7611  *	Helper; handle link interrupts for GMII.
   7612  */
   7613 static void
   7614 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   7615 {
   7616 
   7617 	KASSERT(WM_CORE_LOCKED(sc));
   7618 
   7619 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7620 		__func__));
   7621 
   7622 	if (icr & ICR_LSC) {
   7623 		uint32_t reg;
   7624 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   7625 
   7626 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   7627 			wm_gig_downshift_workaround_ich8lan(sc);
   7628 
   7629 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   7630 			device_xname(sc->sc_dev)));
   7631 		mii_pollstat(&sc->sc_mii);
   7632 		if (sc->sc_type == WM_T_82543) {
   7633 			int miistatus, active;
   7634 
   7635 			/*
   7636 			 * With 82543, we need to force speed and
   7637 			 * duplex on the MAC equal to what the PHY
   7638 			 * speed and duplex configuration is.
   7639 			 */
   7640 			miistatus = sc->sc_mii.mii_media_status;
   7641 
   7642 			if (miistatus & IFM_ACTIVE) {
   7643 				active = sc->sc_mii.mii_media_active;
   7644 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   7645 				switch (IFM_SUBTYPE(active)) {
   7646 				case IFM_10_T:
   7647 					sc->sc_ctrl |= CTRL_SPEED_10;
   7648 					break;
   7649 				case IFM_100_TX:
   7650 					sc->sc_ctrl |= CTRL_SPEED_100;
   7651 					break;
   7652 				case IFM_1000_T:
   7653 					sc->sc_ctrl |= CTRL_SPEED_1000;
   7654 					break;
   7655 				default:
   7656 					/*
   7657 					 * fiber?
   7658 					 * Shoud not enter here.
   7659 					 */
   7660 					printf("unknown media (%x)\n", active);
   7661 					break;
   7662 				}
   7663 				if (active & IFM_FDX)
   7664 					sc->sc_ctrl |= CTRL_FD;
   7665 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7666 			}
   7667 		} else if ((sc->sc_type == WM_T_ICH8)
   7668 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   7669 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   7670 		} else if (sc->sc_type == WM_T_PCH) {
   7671 			wm_k1_gig_workaround_hv(sc,
   7672 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   7673 		}
   7674 
   7675 		if ((sc->sc_phytype == WMPHY_82578)
   7676 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   7677 			== IFM_1000_T)) {
   7678 
   7679 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   7680 				delay(200*1000); /* XXX too big */
   7681 
   7682 				/* Link stall fix for link up */
   7683 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   7684 				    HV_MUX_DATA_CTRL,
   7685 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   7686 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   7687 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   7688 				    HV_MUX_DATA_CTRL,
   7689 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   7690 			}
   7691 		}
   7692 		/*
   7693 		 * I217 Packet Loss issue:
   7694 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
   7695 		 * on power up.
   7696 		 * Set the Beacon Duration for I217 to 8 usec
   7697 		 */
   7698 		if ((sc->sc_type == WM_T_PCH_LPT)
   7699 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   7700 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   7701 			reg &= ~FEXTNVM4_BEACON_DURATION;
   7702 			reg |= FEXTNVM4_BEACON_DURATION_8US;
   7703 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   7704 		}
   7705 
   7706 		/* XXX Work-around I218 hang issue */
   7707 		/* e1000_k1_workaround_lpt_lp() */
   7708 
   7709 		if ((sc->sc_type == WM_T_PCH_LPT)
   7710 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   7711 			/*
   7712 			 * Set platform power management values for Latency
   7713 			 * Tolerance Reporting (LTR)
   7714 			 */
   7715 			wm_platform_pm_pch_lpt(sc,
   7716 				((sc->sc_mii.mii_media_status & IFM_ACTIVE)
   7717 				    != 0));
   7718 		}
   7719 
   7720 		/* FEXTNVM6 K1-off workaround */
   7721 		if (sc->sc_type == WM_T_PCH_SPT) {
   7722 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
   7723 			if (CSR_READ(sc, WMREG_PCIEANACFG)
   7724 			    & FEXTNVM6_K1_OFF_ENABLE)
   7725 				reg |= FEXTNVM6_K1_OFF_ENABLE;
   7726 			else
   7727 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   7728 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   7729 		}
   7730 	} else if (icr & ICR_RXSEQ) {
   7731 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   7732 			device_xname(sc->sc_dev)));
   7733 	}
   7734 }
   7735 
   7736 /*
   7737  * wm_linkintr_tbi:
   7738  *
   7739  *	Helper; handle link interrupts for TBI mode.
   7740  */
   7741 static void
   7742 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   7743 {
   7744 	uint32_t status;
   7745 
   7746 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7747 		__func__));
   7748 
   7749 	status = CSR_READ(sc, WMREG_STATUS);
   7750 	if (icr & ICR_LSC) {
   7751 		if (status & STATUS_LU) {
   7752 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   7753 			    device_xname(sc->sc_dev),
   7754 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   7755 			/*
   7756 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   7757 			 * so we should update sc->sc_ctrl
   7758 			 */
   7759 
   7760 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   7761 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   7762 			sc->sc_fcrtl &= ~FCRTL_XONE;
   7763 			if (status & STATUS_FD)
   7764 				sc->sc_tctl |=
   7765 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   7766 			else
   7767 				sc->sc_tctl |=
   7768 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   7769 			if (sc->sc_ctrl & CTRL_TFCE)
   7770 				sc->sc_fcrtl |= FCRTL_XONE;
   7771 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   7772 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   7773 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   7774 				      sc->sc_fcrtl);
   7775 			sc->sc_tbi_linkup = 1;
   7776 		} else {
   7777 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   7778 			    device_xname(sc->sc_dev)));
   7779 			sc->sc_tbi_linkup = 0;
   7780 		}
   7781 		/* Update LED */
   7782 		wm_tbi_serdes_set_linkled(sc);
   7783 	} else if (icr & ICR_RXSEQ) {
   7784 		DPRINTF(WM_DEBUG_LINK,
   7785 		    ("%s: LINK: Receive sequence error\n",
   7786 		    device_xname(sc->sc_dev)));
   7787 	}
   7788 }
   7789 
   7790 /*
   7791  * wm_linkintr_serdes:
   7792  *
   7793  *	Helper; handle link interrupts for TBI mode.
   7794  */
   7795 static void
   7796 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   7797 {
   7798 	struct mii_data *mii = &sc->sc_mii;
   7799 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   7800 	uint32_t pcs_adv, pcs_lpab, reg;
   7801 
   7802 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7803 		__func__));
   7804 
   7805 	if (icr & ICR_LSC) {
   7806 		/* Check PCS */
   7807 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   7808 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   7809 			mii->mii_media_status |= IFM_ACTIVE;
   7810 			sc->sc_tbi_linkup = 1;
   7811 		} else {
   7812 			mii->mii_media_status |= IFM_NONE;
   7813 			sc->sc_tbi_linkup = 0;
   7814 			wm_tbi_serdes_set_linkled(sc);
   7815 			return;
   7816 		}
   7817 		mii->mii_media_active |= IFM_1000_SX;
   7818 		if ((reg & PCS_LSTS_FDX) != 0)
   7819 			mii->mii_media_active |= IFM_FDX;
   7820 		else
   7821 			mii->mii_media_active |= IFM_HDX;
   7822 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   7823 			/* Check flow */
   7824 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   7825 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   7826 				DPRINTF(WM_DEBUG_LINK,
   7827 				    ("XXX LINKOK but not ACOMP\n"));
   7828 				return;
   7829 			}
   7830 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   7831 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   7832 			DPRINTF(WM_DEBUG_LINK,
   7833 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   7834 			if ((pcs_adv & TXCW_SYM_PAUSE)
   7835 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   7836 				mii->mii_media_active |= IFM_FLOW
   7837 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   7838 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   7839 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   7840 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   7841 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   7842 				mii->mii_media_active |= IFM_FLOW
   7843 				    | IFM_ETH_TXPAUSE;
   7844 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   7845 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   7846 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   7847 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   7848 				mii->mii_media_active |= IFM_FLOW
   7849 				    | IFM_ETH_RXPAUSE;
   7850 		}
   7851 		/* Update LED */
   7852 		wm_tbi_serdes_set_linkled(sc);
   7853 	} else {
   7854 		DPRINTF(WM_DEBUG_LINK,
   7855 		    ("%s: LINK: Receive sequence error\n",
   7856 		    device_xname(sc->sc_dev)));
   7857 	}
   7858 }
   7859 
   7860 /*
   7861  * wm_linkintr:
   7862  *
   7863  *	Helper; handle link interrupts.
   7864  */
   7865 static void
   7866 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   7867 {
   7868 
   7869 	KASSERT(WM_CORE_LOCKED(sc));
   7870 
   7871 	if (sc->sc_flags & WM_F_HAS_MII)
   7872 		wm_linkintr_gmii(sc, icr);
   7873 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   7874 	    && (sc->sc_type >= WM_T_82575))
   7875 		wm_linkintr_serdes(sc, icr);
   7876 	else
   7877 		wm_linkintr_tbi(sc, icr);
   7878 }
   7879 
   7880 /*
   7881  * wm_intr_legacy:
   7882  *
   7883  *	Interrupt service routine for INTx and MSI.
   7884  */
   7885 static int
   7886 wm_intr_legacy(void *arg)
   7887 {
   7888 	struct wm_softc *sc = arg;
   7889 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7890 	struct wm_rxqueue *rxq = &sc->sc_queue[0].wmq_rxq;
   7891 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7892 	uint32_t icr, rndval = 0;
   7893 	int handled = 0;
   7894 
   7895 	DPRINTF(WM_DEBUG_TX,
   7896 	    ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
   7897 	while (1 /* CONSTCOND */) {
   7898 		icr = CSR_READ(sc, WMREG_ICR);
   7899 		if ((icr & sc->sc_icr) == 0)
   7900 			break;
   7901 		if (rndval == 0)
   7902 			rndval = icr;
   7903 
   7904 		mutex_enter(rxq->rxq_lock);
   7905 
   7906 		if (rxq->rxq_stopping) {
   7907 			mutex_exit(rxq->rxq_lock);
   7908 			break;
   7909 		}
   7910 
   7911 		handled = 1;
   7912 
   7913 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   7914 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   7915 			DPRINTF(WM_DEBUG_RX,
   7916 			    ("%s: RX: got Rx intr 0x%08x\n",
   7917 			    device_xname(sc->sc_dev),
   7918 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
   7919 			WM_Q_EVCNT_INCR(rxq, rxintr);
   7920 		}
   7921 #endif
   7922 		wm_rxeof(rxq);
   7923 
   7924 		mutex_exit(rxq->rxq_lock);
   7925 		mutex_enter(txq->txq_lock);
   7926 
   7927 		if (txq->txq_stopping) {
   7928 			mutex_exit(txq->txq_lock);
   7929 			break;
   7930 		}
   7931 
   7932 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   7933 		if (icr & ICR_TXDW) {
   7934 			DPRINTF(WM_DEBUG_TX,
   7935 			    ("%s: TX: got TXDW interrupt\n",
   7936 			    device_xname(sc->sc_dev)));
   7937 			WM_Q_EVCNT_INCR(txq, txdw);
   7938 		}
   7939 #endif
   7940 		wm_txeof(sc, txq);
   7941 
   7942 		mutex_exit(txq->txq_lock);
   7943 		WM_CORE_LOCK(sc);
   7944 
   7945 		if (sc->sc_core_stopping) {
   7946 			WM_CORE_UNLOCK(sc);
   7947 			break;
   7948 		}
   7949 
   7950 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   7951 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   7952 			wm_linkintr(sc, icr);
   7953 		}
   7954 
   7955 		WM_CORE_UNLOCK(sc);
   7956 
   7957 		if (icr & ICR_RXO) {
   7958 #if defined(WM_DEBUG)
   7959 			log(LOG_WARNING, "%s: Receive overrun\n",
   7960 			    device_xname(sc->sc_dev));
   7961 #endif /* defined(WM_DEBUG) */
   7962 		}
   7963 	}
   7964 
   7965 	rnd_add_uint32(&sc->rnd_source, rndval);
   7966 
   7967 	if (handled) {
   7968 		/* Try to get more packets going. */
   7969 		if_schedule_deferred_start(ifp);
   7970 	}
   7971 
   7972 	return handled;
   7973 }
   7974 
   7975 static int
   7976 wm_txrxintr_msix(void *arg)
   7977 {
   7978 	struct wm_queue *wmq = arg;
   7979 	struct wm_txqueue *txq = &wmq->wmq_txq;
   7980 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   7981 	struct wm_softc *sc = txq->txq_sc;
   7982 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7983 
   7984 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   7985 
   7986 	DPRINTF(WM_DEBUG_TX,
   7987 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   7988 
   7989 	if (sc->sc_type == WM_T_82574)
   7990 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   7991 	else if (sc->sc_type == WM_T_82575)
   7992 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   7993 	else
   7994 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   7995 
   7996 	mutex_enter(txq->txq_lock);
   7997 
   7998 	if (txq->txq_stopping) {
   7999 		mutex_exit(txq->txq_lock);
   8000 		return 0;
   8001 	}
   8002 
   8003 	WM_Q_EVCNT_INCR(txq, txdw);
   8004 	wm_txeof(sc, txq);
   8005 
   8006 	/* Try to get more packets going. */
   8007 	if (pcq_peek(txq->txq_interq) != NULL)
   8008 		if_schedule_deferred_start(ifp);
   8009 	/*
   8010 	 * There are still some upper layer processing which call
   8011 	 * ifp->if_start(). e.g. ALTQ
   8012 	 */
   8013 	if (wmq->wmq_id == 0)
   8014 		if_schedule_deferred_start(ifp);
   8015 
   8016 	mutex_exit(txq->txq_lock);
   8017 
   8018 	DPRINTF(WM_DEBUG_RX,
   8019 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   8020 	mutex_enter(rxq->rxq_lock);
   8021 
   8022 	if (rxq->rxq_stopping) {
   8023 		mutex_exit(rxq->rxq_lock);
   8024 		return 0;
   8025 	}
   8026 
   8027 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8028 	wm_rxeof(rxq);
   8029 	mutex_exit(rxq->rxq_lock);
   8030 
   8031 	if (sc->sc_type == WM_T_82574)
   8032 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8033 	else if (sc->sc_type == WM_T_82575)
   8034 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8035 	else
   8036 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   8037 
   8038 	return 1;
   8039 }
   8040 
   8041 /*
   8042  * wm_linkintr_msix:
   8043  *
   8044  *	Interrupt service routine for link status change for MSI-X.
   8045  */
   8046 static int
   8047 wm_linkintr_msix(void *arg)
   8048 {
   8049 	struct wm_softc *sc = arg;
   8050 	uint32_t reg;
   8051 
   8052 	DPRINTF(WM_DEBUG_LINK,
   8053 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   8054 
   8055 	reg = CSR_READ(sc, WMREG_ICR);
   8056 	WM_CORE_LOCK(sc);
   8057 	if ((sc->sc_core_stopping) || ((reg & ICR_LSC) == 0))
   8058 		goto out;
   8059 
   8060 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8061 	wm_linkintr(sc, ICR_LSC);
   8062 
   8063 out:
   8064 	WM_CORE_UNLOCK(sc);
   8065 
   8066 	if (sc->sc_type == WM_T_82574)
   8067 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   8068 	else if (sc->sc_type == WM_T_82575)
   8069 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   8070 	else
   8071 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   8072 
   8073 	return 1;
   8074 }
   8075 
   8076 /*
   8077  * Media related.
   8078  * GMII, SGMII, TBI (and SERDES)
   8079  */
   8080 
   8081 /* Common */
   8082 
   8083 /*
   8084  * wm_tbi_serdes_set_linkled:
   8085  *
   8086  *	Update the link LED on TBI and SERDES devices.
   8087  */
   8088 static void
   8089 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   8090 {
   8091 
   8092 	if (sc->sc_tbi_linkup)
   8093 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   8094 	else
   8095 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   8096 
   8097 	/* 82540 or newer devices are active low */
   8098 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   8099 
   8100 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8101 }
   8102 
   8103 /* GMII related */
   8104 
   8105 /*
   8106  * wm_gmii_reset:
   8107  *
   8108  *	Reset the PHY.
   8109  */
   8110 static void
   8111 wm_gmii_reset(struct wm_softc *sc)
   8112 {
   8113 	uint32_t reg;
   8114 	int rv;
   8115 
   8116 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   8117 		device_xname(sc->sc_dev), __func__));
   8118 
   8119 	rv = sc->phy.acquire(sc);
   8120 	if (rv != 0) {
   8121 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8122 		    __func__);
   8123 		return;
   8124 	}
   8125 
   8126 	switch (sc->sc_type) {
   8127 	case WM_T_82542_2_0:
   8128 	case WM_T_82542_2_1:
   8129 		/* null */
   8130 		break;
   8131 	case WM_T_82543:
   8132 		/*
   8133 		 * With 82543, we need to force speed and duplex on the MAC
   8134 		 * equal to what the PHY speed and duplex configuration is.
   8135 		 * In addition, we need to perform a hardware reset on the PHY
   8136 		 * to take it out of reset.
   8137 		 */
   8138 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   8139 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8140 
   8141 		/* The PHY reset pin is active-low. */
   8142 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   8143 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   8144 		    CTRL_EXT_SWDPIN(4));
   8145 		reg |= CTRL_EXT_SWDPIO(4);
   8146 
   8147 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   8148 		CSR_WRITE_FLUSH(sc);
   8149 		delay(10*1000);
   8150 
   8151 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   8152 		CSR_WRITE_FLUSH(sc);
   8153 		delay(150);
   8154 #if 0
   8155 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   8156 #endif
   8157 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   8158 		break;
   8159 	case WM_T_82544:	/* reset 10000us */
   8160 	case WM_T_82540:
   8161 	case WM_T_82545:
   8162 	case WM_T_82545_3:
   8163 	case WM_T_82546:
   8164 	case WM_T_82546_3:
   8165 	case WM_T_82541:
   8166 	case WM_T_82541_2:
   8167 	case WM_T_82547:
   8168 	case WM_T_82547_2:
   8169 	case WM_T_82571:	/* reset 100us */
   8170 	case WM_T_82572:
   8171 	case WM_T_82573:
   8172 	case WM_T_82574:
   8173 	case WM_T_82575:
   8174 	case WM_T_82576:
   8175 	case WM_T_82580:
   8176 	case WM_T_I350:
   8177 	case WM_T_I354:
   8178 	case WM_T_I210:
   8179 	case WM_T_I211:
   8180 	case WM_T_82583:
   8181 	case WM_T_80003:
   8182 		/* generic reset */
   8183 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8184 		CSR_WRITE_FLUSH(sc);
   8185 		delay(20000);
   8186 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8187 		CSR_WRITE_FLUSH(sc);
   8188 		delay(20000);
   8189 
   8190 		if ((sc->sc_type == WM_T_82541)
   8191 		    || (sc->sc_type == WM_T_82541_2)
   8192 		    || (sc->sc_type == WM_T_82547)
   8193 		    || (sc->sc_type == WM_T_82547_2)) {
   8194 			/* workaround for igp are done in igp_reset() */
   8195 			/* XXX add code to set LED after phy reset */
   8196 		}
   8197 		break;
   8198 	case WM_T_ICH8:
   8199 	case WM_T_ICH9:
   8200 	case WM_T_ICH10:
   8201 	case WM_T_PCH:
   8202 	case WM_T_PCH2:
   8203 	case WM_T_PCH_LPT:
   8204 	case WM_T_PCH_SPT:
   8205 		/* generic reset */
   8206 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8207 		CSR_WRITE_FLUSH(sc);
   8208 		delay(100);
   8209 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8210 		CSR_WRITE_FLUSH(sc);
   8211 		delay(150);
   8212 		break;
   8213 	default:
   8214 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   8215 		    __func__);
   8216 		break;
   8217 	}
   8218 
   8219 	sc->phy.release(sc);
   8220 
   8221 	/* get_cfg_done */
   8222 	wm_get_cfg_done(sc);
   8223 
   8224 	/* extra setup */
   8225 	switch (sc->sc_type) {
   8226 	case WM_T_82542_2_0:
   8227 	case WM_T_82542_2_1:
   8228 	case WM_T_82543:
   8229 	case WM_T_82544:
   8230 	case WM_T_82540:
   8231 	case WM_T_82545:
   8232 	case WM_T_82545_3:
   8233 	case WM_T_82546:
   8234 	case WM_T_82546_3:
   8235 	case WM_T_82541_2:
   8236 	case WM_T_82547_2:
   8237 	case WM_T_82571:
   8238 	case WM_T_82572:
   8239 	case WM_T_82573:
   8240 	case WM_T_82575:
   8241 	case WM_T_82576:
   8242 	case WM_T_82580:
   8243 	case WM_T_I350:
   8244 	case WM_T_I354:
   8245 	case WM_T_I210:
   8246 	case WM_T_I211:
   8247 	case WM_T_80003:
   8248 		/* null */
   8249 		break;
   8250 	case WM_T_82574:
   8251 	case WM_T_82583:
   8252 		wm_lplu_d0_disable(sc);
   8253 		break;
   8254 	case WM_T_82541:
   8255 	case WM_T_82547:
   8256 		/* XXX Configure actively LED after PHY reset */
   8257 		break;
   8258 	case WM_T_ICH8:
   8259 	case WM_T_ICH9:
   8260 	case WM_T_ICH10:
   8261 	case WM_T_PCH:
   8262 	case WM_T_PCH2:
   8263 	case WM_T_PCH_LPT:
   8264 	case WM_T_PCH_SPT:
   8265 		/* Allow time for h/w to get to a quiescent state afer reset */
   8266 		delay(10*1000);
   8267 
   8268 		if (sc->sc_type == WM_T_PCH)
   8269 			wm_hv_phy_workaround_ich8lan(sc);
   8270 
   8271 		if (sc->sc_type == WM_T_PCH2)
   8272 			wm_lv_phy_workaround_ich8lan(sc);
   8273 
   8274 		/* Clear the host wakeup bit after lcd reset */
   8275 		if (sc->sc_type >= WM_T_PCH) {
   8276 			reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   8277 			    BM_PORT_GEN_CFG);
   8278 			reg &= ~BM_WUC_HOST_WU_BIT;
   8279 			wm_gmii_hv_writereg(sc->sc_dev, 2,
   8280 			    BM_PORT_GEN_CFG, reg);
   8281 		}
   8282 
   8283 		/*
   8284 		 * XXX Configure the LCD with th extended configuration region
   8285 		 * in NVM
   8286 		 */
   8287 
   8288 		/* Disable D0 LPLU. */
   8289 		if (sc->sc_type >= WM_T_PCH)	/* PCH* */
   8290 			wm_lplu_d0_disable_pch(sc);
   8291 		else
   8292 			wm_lplu_d0_disable(sc);	/* ICH* */
   8293 		break;
   8294 	default:
   8295 		panic("%s: unknown type\n", __func__);
   8296 		break;
   8297 	}
   8298 }
   8299 
   8300 /*
   8301  * wm_get_phy_id_82575:
   8302  *
   8303  * Return PHY ID. Return -1 if it failed.
   8304  */
   8305 static int
   8306 wm_get_phy_id_82575(struct wm_softc *sc)
   8307 {
   8308 	uint32_t reg;
   8309 	int phyid = -1;
   8310 
   8311 	/* XXX */
   8312 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   8313 		return -1;
   8314 
   8315 	if (wm_sgmii_uses_mdio(sc)) {
   8316 		switch (sc->sc_type) {
   8317 		case WM_T_82575:
   8318 		case WM_T_82576:
   8319 			reg = CSR_READ(sc, WMREG_MDIC);
   8320 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   8321 			break;
   8322 		case WM_T_82580:
   8323 		case WM_T_I350:
   8324 		case WM_T_I354:
   8325 		case WM_T_I210:
   8326 		case WM_T_I211:
   8327 			reg = CSR_READ(sc, WMREG_MDICNFG);
   8328 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   8329 			break;
   8330 		default:
   8331 			return -1;
   8332 		}
   8333 	}
   8334 
   8335 	return phyid;
   8336 }
   8337 
   8338 
   8339 /*
   8340  * wm_gmii_mediainit:
   8341  *
   8342  *	Initialize media for use on 1000BASE-T devices.
   8343  */
   8344 static void
   8345 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   8346 {
   8347 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8348 	struct mii_data *mii = &sc->sc_mii;
   8349 	uint32_t reg;
   8350 
   8351 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   8352 		device_xname(sc->sc_dev), __func__));
   8353 
   8354 	/* We have GMII. */
   8355 	sc->sc_flags |= WM_F_HAS_MII;
   8356 
   8357 	if (sc->sc_type == WM_T_80003)
   8358 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   8359 	else
   8360 		sc->sc_tipg = TIPG_1000T_DFLT;
   8361 
   8362 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   8363 	if ((sc->sc_type == WM_T_82580)
   8364 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   8365 	    || (sc->sc_type == WM_T_I211)) {
   8366 		reg = CSR_READ(sc, WMREG_PHPM);
   8367 		reg &= ~PHPM_GO_LINK_D;
   8368 		CSR_WRITE(sc, WMREG_PHPM, reg);
   8369 	}
   8370 
   8371 	/*
   8372 	 * Let the chip set speed/duplex on its own based on
   8373 	 * signals from the PHY.
   8374 	 * XXXbouyer - I'm not sure this is right for the 80003,
   8375 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   8376 	 */
   8377 	sc->sc_ctrl |= CTRL_SLU;
   8378 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8379 
   8380 	/* Initialize our media structures and probe the GMII. */
   8381 	mii->mii_ifp = ifp;
   8382 
   8383 	/*
   8384 	 * Determine the PHY access method.
   8385 	 *
   8386 	 *  For SGMII, use SGMII specific method.
   8387 	 *
   8388 	 *  For some devices, we can determine the PHY access method
   8389 	 * from sc_type.
   8390 	 *
   8391 	 *  For ICH and PCH variants, it's difficult to determine the PHY
   8392 	 * access  method by sc_type, so use the PCI product ID for some
   8393 	 * devices.
   8394 	 * For other ICH8 variants, try to use igp's method. If the PHY
   8395 	 * can't detect, then use bm's method.
   8396 	 */
   8397 	switch (prodid) {
   8398 	case PCI_PRODUCT_INTEL_PCH_M_LM:
   8399 	case PCI_PRODUCT_INTEL_PCH_M_LC:
   8400 		/* 82577 */
   8401 		sc->sc_phytype = WMPHY_82577;
   8402 		break;
   8403 	case PCI_PRODUCT_INTEL_PCH_D_DM:
   8404 	case PCI_PRODUCT_INTEL_PCH_D_DC:
   8405 		/* 82578 */
   8406 		sc->sc_phytype = WMPHY_82578;
   8407 		break;
   8408 	case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   8409 	case PCI_PRODUCT_INTEL_PCH2_LV_V:
   8410 		/* 82579 */
   8411 		sc->sc_phytype = WMPHY_82579;
   8412 		break;
   8413 	case PCI_PRODUCT_INTEL_82801H_82567V_3:
   8414 	case PCI_PRODUCT_INTEL_82801I_BM:
   8415 	case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   8416 	case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   8417 	case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   8418 	case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   8419 	case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   8420 		/* ICH8, 9, 10 with 82567 */
   8421 		sc->sc_phytype = WMPHY_BM;
   8422 		mii->mii_readreg = wm_gmii_bm_readreg;
   8423 		mii->mii_writereg = wm_gmii_bm_writereg;
   8424 		break;
   8425 	default:
   8426 		if (((sc->sc_flags & WM_F_SGMII) != 0)
   8427 		    && !wm_sgmii_uses_mdio(sc)){
   8428 			/* SGMII */
   8429 			mii->mii_readreg = wm_sgmii_readreg;
   8430 			mii->mii_writereg = wm_sgmii_writereg;
   8431 		} else if ((sc->sc_type == WM_T_82574)
   8432 		    || (sc->sc_type == WM_T_82583)) {
   8433 			/* BM2 (phyaddr == 1) */
   8434 			sc->sc_phytype = WMPHY_BM;
   8435 			mii->mii_readreg = wm_gmii_bm_readreg;
   8436 			mii->mii_writereg = wm_gmii_bm_writereg;
   8437 		} else if (sc->sc_type >= WM_T_ICH8) {
   8438 			/* non-82567 ICH8, 9 and 10 */
   8439 			mii->mii_readreg = wm_gmii_i82544_readreg;
   8440 			mii->mii_writereg = wm_gmii_i82544_writereg;
   8441 		} else if (sc->sc_type >= WM_T_80003) {
   8442 			/* 80003 */
   8443 			sc->sc_phytype = WMPHY_GG82563;
   8444 			mii->mii_readreg = wm_gmii_i80003_readreg;
   8445 			mii->mii_writereg = wm_gmii_i80003_writereg;
   8446 		} else if (sc->sc_type >= WM_T_I210) {
   8447 			/* I210 and I211 */
   8448 			sc->sc_phytype = WMPHY_210;
   8449 			mii->mii_readreg = wm_gmii_gs40g_readreg;
   8450 			mii->mii_writereg = wm_gmii_gs40g_writereg;
   8451 		} else if (sc->sc_type >= WM_T_82580) {
   8452 			/* 82580, I350 and I354 */
   8453 			sc->sc_phytype = WMPHY_82580;
   8454 			mii->mii_readreg = wm_gmii_82580_readreg;
   8455 			mii->mii_writereg = wm_gmii_82580_writereg;
   8456 		} else if (sc->sc_type >= WM_T_82544) {
   8457 			/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   8458 			mii->mii_readreg = wm_gmii_i82544_readreg;
   8459 			mii->mii_writereg = wm_gmii_i82544_writereg;
   8460 		} else {
   8461 			mii->mii_readreg = wm_gmii_i82543_readreg;
   8462 			mii->mii_writereg = wm_gmii_i82543_writereg;
   8463 		}
   8464 		break;
   8465 	}
   8466 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
   8467 		/* All PCH* use _hv_ */
   8468 		mii->mii_readreg = wm_gmii_hv_readreg;
   8469 		mii->mii_writereg = wm_gmii_hv_writereg;
   8470 	}
   8471 	mii->mii_statchg = wm_gmii_statchg;
   8472 
   8473 	/* get PHY control from SMBus to PCIe */
   8474 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   8475 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
   8476 		wm_smbustopci(sc);
   8477 
   8478 	wm_gmii_reset(sc);
   8479 
   8480 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   8481 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   8482 	    wm_gmii_mediastatus);
   8483 
   8484 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   8485 	    || (sc->sc_type == WM_T_82580)
   8486 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   8487 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   8488 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   8489 			/* Attach only one port */
   8490 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   8491 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8492 		} else {
   8493 			int i, id;
   8494 			uint32_t ctrl_ext;
   8495 
   8496 			id = wm_get_phy_id_82575(sc);
   8497 			if (id != -1) {
   8498 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   8499 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   8500 			}
   8501 			if ((id == -1)
   8502 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   8503 				/* Power on sgmii phy if it is disabled */
   8504 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   8505 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   8506 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   8507 				CSR_WRITE_FLUSH(sc);
   8508 				delay(300*1000); /* XXX too long */
   8509 
   8510 				/* from 1 to 8 */
   8511 				for (i = 1; i < 8; i++)
   8512 					mii_attach(sc->sc_dev, &sc->sc_mii,
   8513 					    0xffffffff, i, MII_OFFSET_ANY,
   8514 					    MIIF_DOPAUSE);
   8515 
   8516 				/* restore previous sfp cage power state */
   8517 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   8518 			}
   8519 		}
   8520 	} else {
   8521 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8522 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8523 	}
   8524 
   8525 	/*
   8526 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   8527 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   8528 	 */
   8529 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   8530 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   8531 		wm_set_mdio_slow_mode_hv(sc);
   8532 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8533 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8534 	}
   8535 
   8536 	/*
   8537 	 * (For ICH8 variants)
   8538 	 * If PHY detection failed, use BM's r/w function and retry.
   8539 	 */
   8540 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   8541 		/* if failed, retry with *_bm_* */
   8542 		mii->mii_readreg = wm_gmii_bm_readreg;
   8543 		mii->mii_writereg = wm_gmii_bm_writereg;
   8544 
   8545 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8546 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8547 	}
   8548 
   8549 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   8550 		/* Any PHY wasn't find */
   8551 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   8552 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   8553 		sc->sc_phytype = WMPHY_NONE;
   8554 	} else {
   8555 		/*
   8556 		 * PHY Found!
   8557 		 * Check PHY type.
   8558 		 */
   8559 		uint32_t model;
   8560 		struct mii_softc *child;
   8561 
   8562 		child = LIST_FIRST(&mii->mii_phys);
   8563 		model = child->mii_mpd_model;
   8564 		if (model == MII_MODEL_yyINTEL_I82566)
   8565 			sc->sc_phytype = WMPHY_IGP_3;
   8566 
   8567 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   8568 	}
   8569 }
   8570 
   8571 /*
   8572  * wm_gmii_mediachange:	[ifmedia interface function]
   8573  *
   8574  *	Set hardware to newly-selected media on a 1000BASE-T device.
   8575  */
   8576 static int
   8577 wm_gmii_mediachange(struct ifnet *ifp)
   8578 {
   8579 	struct wm_softc *sc = ifp->if_softc;
   8580 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8581 	int rc;
   8582 
   8583 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   8584 		device_xname(sc->sc_dev), __func__));
   8585 	if ((ifp->if_flags & IFF_UP) == 0)
   8586 		return 0;
   8587 
   8588 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8589 	sc->sc_ctrl |= CTRL_SLU;
   8590 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   8591 	    || (sc->sc_type > WM_T_82543)) {
   8592 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   8593 	} else {
   8594 		sc->sc_ctrl &= ~CTRL_ASDE;
   8595 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   8596 		if (ife->ifm_media & IFM_FDX)
   8597 			sc->sc_ctrl |= CTRL_FD;
   8598 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   8599 		case IFM_10_T:
   8600 			sc->sc_ctrl |= CTRL_SPEED_10;
   8601 			break;
   8602 		case IFM_100_TX:
   8603 			sc->sc_ctrl |= CTRL_SPEED_100;
   8604 			break;
   8605 		case IFM_1000_T:
   8606 			sc->sc_ctrl |= CTRL_SPEED_1000;
   8607 			break;
   8608 		default:
   8609 			panic("wm_gmii_mediachange: bad media 0x%x",
   8610 			    ife->ifm_media);
   8611 		}
   8612 	}
   8613 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8614 	if (sc->sc_type <= WM_T_82543)
   8615 		wm_gmii_reset(sc);
   8616 
   8617 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   8618 		return 0;
   8619 	return rc;
   8620 }
   8621 
   8622 /*
   8623  * wm_gmii_mediastatus:	[ifmedia interface function]
   8624  *
   8625  *	Get the current interface media status on a 1000BASE-T device.
   8626  */
   8627 static void
   8628 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   8629 {
   8630 	struct wm_softc *sc = ifp->if_softc;
   8631 
   8632 	ether_mediastatus(ifp, ifmr);
   8633 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   8634 	    | sc->sc_flowflags;
   8635 }
   8636 
   8637 #define	MDI_IO		CTRL_SWDPIN(2)
   8638 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   8639 #define	MDI_CLK		CTRL_SWDPIN(3)
   8640 
   8641 static void
   8642 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   8643 {
   8644 	uint32_t i, v;
   8645 
   8646 	v = CSR_READ(sc, WMREG_CTRL);
   8647 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   8648 	v |= MDI_DIR | CTRL_SWDPIO(3);
   8649 
   8650 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   8651 		if (data & i)
   8652 			v |= MDI_IO;
   8653 		else
   8654 			v &= ~MDI_IO;
   8655 		CSR_WRITE(sc, WMREG_CTRL, v);
   8656 		CSR_WRITE_FLUSH(sc);
   8657 		delay(10);
   8658 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8659 		CSR_WRITE_FLUSH(sc);
   8660 		delay(10);
   8661 		CSR_WRITE(sc, WMREG_CTRL, v);
   8662 		CSR_WRITE_FLUSH(sc);
   8663 		delay(10);
   8664 	}
   8665 }
   8666 
   8667 static uint32_t
   8668 wm_i82543_mii_recvbits(struct wm_softc *sc)
   8669 {
   8670 	uint32_t v, i, data = 0;
   8671 
   8672 	v = CSR_READ(sc, WMREG_CTRL);
   8673 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   8674 	v |= CTRL_SWDPIO(3);
   8675 
   8676 	CSR_WRITE(sc, WMREG_CTRL, v);
   8677 	CSR_WRITE_FLUSH(sc);
   8678 	delay(10);
   8679 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8680 	CSR_WRITE_FLUSH(sc);
   8681 	delay(10);
   8682 	CSR_WRITE(sc, WMREG_CTRL, v);
   8683 	CSR_WRITE_FLUSH(sc);
   8684 	delay(10);
   8685 
   8686 	for (i = 0; i < 16; i++) {
   8687 		data <<= 1;
   8688 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8689 		CSR_WRITE_FLUSH(sc);
   8690 		delay(10);
   8691 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   8692 			data |= 1;
   8693 		CSR_WRITE(sc, WMREG_CTRL, v);
   8694 		CSR_WRITE_FLUSH(sc);
   8695 		delay(10);
   8696 	}
   8697 
   8698 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8699 	CSR_WRITE_FLUSH(sc);
   8700 	delay(10);
   8701 	CSR_WRITE(sc, WMREG_CTRL, v);
   8702 	CSR_WRITE_FLUSH(sc);
   8703 	delay(10);
   8704 
   8705 	return data;
   8706 }
   8707 
   8708 #undef MDI_IO
   8709 #undef MDI_DIR
   8710 #undef MDI_CLK
   8711 
   8712 /*
   8713  * wm_gmii_i82543_readreg:	[mii interface function]
   8714  *
   8715  *	Read a PHY register on the GMII (i82543 version).
   8716  */
   8717 static int
   8718 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
   8719 {
   8720 	struct wm_softc *sc = device_private(self);
   8721 	int rv;
   8722 
   8723 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   8724 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   8725 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   8726 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   8727 
   8728 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   8729 	    device_xname(sc->sc_dev), phy, reg, rv));
   8730 
   8731 	return rv;
   8732 }
   8733 
   8734 /*
   8735  * wm_gmii_i82543_writereg:	[mii interface function]
   8736  *
   8737  *	Write a PHY register on the GMII (i82543 version).
   8738  */
   8739 static void
   8740 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
   8741 {
   8742 	struct wm_softc *sc = device_private(self);
   8743 
   8744 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   8745 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   8746 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   8747 	    (MII_COMMAND_START << 30), 32);
   8748 }
   8749 
   8750 /*
   8751  * wm_gmii_mdic_readreg:	[mii interface function]
   8752  *
   8753  *	Read a PHY register on the GMII.
   8754  */
   8755 static int
   8756 wm_gmii_mdic_readreg(device_t self, int phy, int reg)
   8757 {
   8758 	struct wm_softc *sc = device_private(self);
   8759 	uint32_t mdic = 0;
   8760 	int i, rv;
   8761 
   8762 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   8763 	    MDIC_REGADD(reg));
   8764 
   8765 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   8766 		mdic = CSR_READ(sc, WMREG_MDIC);
   8767 		if (mdic & MDIC_READY)
   8768 			break;
   8769 		delay(50);
   8770 	}
   8771 
   8772 	if ((mdic & MDIC_READY) == 0) {
   8773 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   8774 		    device_xname(sc->sc_dev), phy, reg);
   8775 		rv = 0;
   8776 	} else if (mdic & MDIC_E) {
   8777 #if 0 /* This is normal if no PHY is present. */
   8778 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   8779 		    device_xname(sc->sc_dev), phy, reg);
   8780 #endif
   8781 		rv = 0;
   8782 	} else {
   8783 		rv = MDIC_DATA(mdic);
   8784 		if (rv == 0xffff)
   8785 			rv = 0;
   8786 	}
   8787 
   8788 	return rv;
   8789 }
   8790 
   8791 /*
   8792  * wm_gmii_mdic_writereg:	[mii interface function]
   8793  *
   8794  *	Write a PHY register on the GMII.
   8795  */
   8796 static void
   8797 wm_gmii_mdic_writereg(device_t self, int phy, int reg, int val)
   8798 {
   8799 	struct wm_softc *sc = device_private(self);
   8800 	uint32_t mdic = 0;
   8801 	int i;
   8802 
   8803 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   8804 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   8805 
   8806 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   8807 		mdic = CSR_READ(sc, WMREG_MDIC);
   8808 		if (mdic & MDIC_READY)
   8809 			break;
   8810 		delay(50);
   8811 	}
   8812 
   8813 	if ((mdic & MDIC_READY) == 0)
   8814 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   8815 		    device_xname(sc->sc_dev), phy, reg);
   8816 	else if (mdic & MDIC_E)
   8817 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   8818 		    device_xname(sc->sc_dev), phy, reg);
   8819 }
   8820 
   8821 /*
   8822  * wm_gmii_i82544_readreg:	[mii interface function]
   8823  *
   8824  *	Read a PHY register on the GMII.
   8825  */
   8826 static int
   8827 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
   8828 {
   8829 	struct wm_softc *sc = device_private(self);
   8830 	int rv;
   8831 
   8832 	if (sc->phy.acquire(sc)) {
   8833 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8834 		    __func__);
   8835 		return 0;
   8836 	}
   8837 	rv = wm_gmii_mdic_readreg(self, phy, reg);
   8838 	sc->phy.release(sc);
   8839 
   8840 	return rv;
   8841 }
   8842 
   8843 /*
   8844  * wm_gmii_i82544_writereg:	[mii interface function]
   8845  *
   8846  *	Write a PHY register on the GMII.
   8847  */
   8848 static void
   8849 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
   8850 {
   8851 	struct wm_softc *sc = device_private(self);
   8852 
   8853 	if (sc->phy.acquire(sc)) {
   8854 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8855 		    __func__);
   8856 	}
   8857 	wm_gmii_mdic_writereg(self, phy, reg, val);
   8858 	sc->phy.release(sc);
   8859 }
   8860 
   8861 /*
   8862  * wm_gmii_i80003_readreg:	[mii interface function]
   8863  *
   8864  *	Read a PHY register on the kumeran
   8865  * This could be handled by the PHY layer if we didn't have to lock the
   8866  * ressource ...
   8867  */
   8868 static int
   8869 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
   8870 {
   8871 	struct wm_softc *sc = device_private(self);
   8872 	int rv;
   8873 
   8874 	if (phy != 1) /* only one PHY on kumeran bus */
   8875 		return 0;
   8876 
   8877 	if (sc->phy.acquire(sc)) {
   8878 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8879 		    __func__);
   8880 		return 0;
   8881 	}
   8882 
   8883 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   8884 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   8885 		    reg >> GG82563_PAGE_SHIFT);
   8886 	} else {
   8887 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   8888 		    reg >> GG82563_PAGE_SHIFT);
   8889 	}
   8890 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   8891 	delay(200);
   8892 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
   8893 	delay(200);
   8894 	sc->phy.release(sc);
   8895 
   8896 	return rv;
   8897 }
   8898 
   8899 /*
   8900  * wm_gmii_i80003_writereg:	[mii interface function]
   8901  *
   8902  *	Write a PHY register on the kumeran.
   8903  * This could be handled by the PHY layer if we didn't have to lock the
   8904  * ressource ...
   8905  */
   8906 static void
   8907 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
   8908 {
   8909 	struct wm_softc *sc = device_private(self);
   8910 
   8911 	if (phy != 1) /* only one PHY on kumeran bus */
   8912 		return;
   8913 
   8914 	if (sc->phy.acquire(sc)) {
   8915 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8916 		    __func__);
   8917 		return;
   8918 	}
   8919 
   8920 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   8921 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   8922 		    reg >> GG82563_PAGE_SHIFT);
   8923 	} else {
   8924 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   8925 		    reg >> GG82563_PAGE_SHIFT);
   8926 	}
   8927 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   8928 	delay(200);
   8929 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
   8930 	delay(200);
   8931 
   8932 	sc->phy.release(sc);
   8933 }
   8934 
   8935 /*
   8936  * wm_gmii_bm_readreg:	[mii interface function]
   8937  *
   8938  *	Read a PHY register on the kumeran
   8939  * This could be handled by the PHY layer if we didn't have to lock the
   8940  * ressource ...
   8941  */
   8942 static int
   8943 wm_gmii_bm_readreg(device_t self, int phy, int reg)
   8944 {
   8945 	struct wm_softc *sc = device_private(self);
   8946 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   8947 	uint16_t val;
   8948 	int rv;
   8949 
   8950 	if (sc->phy.acquire(sc)) {
   8951 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8952 		    __func__);
   8953 		return 0;
   8954 	}
   8955 
   8956 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   8957 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   8958 		    || (reg == 31)) ? 1 : phy;
   8959 	/* Page 800 works differently than the rest so it has its own func */
   8960 	if (page == BM_WUC_PAGE) {
   8961 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   8962 		rv = val;
   8963 		goto release;
   8964 	}
   8965 
   8966 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   8967 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   8968 		    && (sc->sc_type != WM_T_82583))
   8969 			wm_gmii_mdic_writereg(self, phy,
   8970 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   8971 		else
   8972 			wm_gmii_mdic_writereg(self, phy,
   8973 			    BME1000_PHY_PAGE_SELECT, page);
   8974 	}
   8975 
   8976 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
   8977 
   8978 release:
   8979 	sc->phy.release(sc);
   8980 	return rv;
   8981 }
   8982 
   8983 /*
   8984  * wm_gmii_bm_writereg:	[mii interface function]
   8985  *
   8986  *	Write a PHY register on the kumeran.
   8987  * This could be handled by the PHY layer if we didn't have to lock the
   8988  * ressource ...
   8989  */
   8990 static void
   8991 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
   8992 {
   8993 	struct wm_softc *sc = device_private(self);
   8994 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   8995 
   8996 	if (sc->phy.acquire(sc)) {
   8997 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8998 		    __func__);
   8999 		return;
   9000 	}
   9001 
   9002 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   9003 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   9004 		    || (reg == 31)) ? 1 : phy;
   9005 	/* Page 800 works differently than the rest so it has its own func */
   9006 	if (page == BM_WUC_PAGE) {
   9007 		uint16_t tmp;
   9008 
   9009 		tmp = val;
   9010 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   9011 		goto release;
   9012 	}
   9013 
   9014 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9015 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   9016 		    && (sc->sc_type != WM_T_82583))
   9017 			wm_gmii_mdic_writereg(self, phy,
   9018 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   9019 		else
   9020 			wm_gmii_mdic_writereg(self, phy,
   9021 			    BME1000_PHY_PAGE_SELECT, page);
   9022 	}
   9023 
   9024 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
   9025 
   9026 release:
   9027 	sc->phy.release(sc);
   9028 }
   9029 
   9030 static void
   9031 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
   9032 {
   9033 	struct wm_softc *sc = device_private(self);
   9034 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   9035 	uint16_t wuce, reg;
   9036 
   9037 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9038 		device_xname(sc->sc_dev), __func__));
   9039 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   9040 	if (sc->sc_type == WM_T_PCH) {
   9041 		/* XXX e1000 driver do nothing... why? */
   9042 	}
   9043 
   9044 	/*
   9045 	 * 1) Enable PHY wakeup register first.
   9046 	 * See e1000_enable_phy_wakeup_reg_access_bm().
   9047 	 */
   9048 
   9049 	/* Set page 769 */
   9050 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9051 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   9052 
   9053 	/* Read WUCE and save it */
   9054 	wuce = wm_gmii_mdic_readreg(self, 1, BM_WUC_ENABLE_REG);
   9055 
   9056 	reg = wuce | BM_WUC_ENABLE_BIT;
   9057 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   9058 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, reg);
   9059 
   9060 	/* Select page 800 */
   9061 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9062 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   9063 
   9064 	/*
   9065 	 * 2) Access PHY wakeup register.
   9066 	 * See e1000_access_phy_wakeup_reg_bm.
   9067 	 */
   9068 
   9069 	/* Write page 800 */
   9070 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   9071 
   9072 	if (rd)
   9073 		*val = wm_gmii_mdic_readreg(self, 1, BM_WUC_DATA_OPCODE);
   9074 	else
   9075 		wm_gmii_mdic_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
   9076 
   9077 	/*
   9078 	 * 3) Disable PHY wakeup register.
   9079 	 * See e1000_disable_phy_wakeup_reg_access_bm().
   9080 	 */
   9081 	/* Set page 769 */
   9082 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9083 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   9084 
   9085 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
   9086 }
   9087 
   9088 /*
   9089  * wm_gmii_hv_readreg:	[mii interface function]
   9090  *
   9091  *	Read a PHY register on the kumeran
   9092  * This could be handled by the PHY layer if we didn't have to lock the
   9093  * ressource ...
   9094  */
   9095 static int
   9096 wm_gmii_hv_readreg(device_t self, int phy, int reg)
   9097 {
   9098 	struct wm_softc *sc = device_private(self);
   9099 	int rv;
   9100 
   9101 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9102 		device_xname(sc->sc_dev), __func__));
   9103 	if (sc->phy.acquire(sc)) {
   9104 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9105 		    __func__);
   9106 		return 0;
   9107 	}
   9108 
   9109 	rv = wm_gmii_hv_readreg_locked(self, phy, reg);
   9110 	sc->phy.release(sc);
   9111 	return rv;
   9112 }
   9113 
   9114 static int
   9115 wm_gmii_hv_readreg_locked(device_t self, int phy, int reg)
   9116 {
   9117 	uint16_t page = BM_PHY_REG_PAGE(reg);
   9118 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   9119 	uint16_t val;
   9120 	int rv;
   9121 
   9122 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   9123 
   9124 	/* Page 800 works differently than the rest so it has its own func */
   9125 	if (page == BM_WUC_PAGE) {
   9126 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   9127 		return val;
   9128 	}
   9129 
   9130 	/*
   9131 	 * Lower than page 768 works differently than the rest so it has its
   9132 	 * own func
   9133 	 */
   9134 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   9135 		printf("gmii_hv_readreg!!!\n");
   9136 		return 0;
   9137 	}
   9138 
   9139 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   9140 		wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9141 		    page << BME1000_PAGE_SHIFT);
   9142 	}
   9143 
   9144 	rv = wm_gmii_mdic_readreg(self, phy, regnum & MII_ADDRMASK);
   9145 	return rv;
   9146 }
   9147 
   9148 /*
   9149  * wm_gmii_hv_writereg:	[mii interface function]
   9150  *
   9151  *	Write a PHY register on the kumeran.
   9152  * This could be handled by the PHY layer if we didn't have to lock the
   9153  * ressource ...
   9154  */
   9155 static void
   9156 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
   9157 {
   9158 	struct wm_softc *sc = device_private(self);
   9159 
   9160 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9161 		device_xname(sc->sc_dev), __func__));
   9162 
   9163 	if (sc->phy.acquire(sc)) {
   9164 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9165 		    __func__);
   9166 		return;
   9167 	}
   9168 
   9169 	wm_gmii_hv_writereg_locked(self, phy, reg, val);
   9170 	sc->phy.release(sc);
   9171 }
   9172 
   9173 static void
   9174 wm_gmii_hv_writereg_locked(device_t self, int phy, int reg, int val)
   9175 {
   9176 	struct wm_softc *sc = device_private(self);
   9177 	uint16_t page = BM_PHY_REG_PAGE(reg);
   9178 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   9179 
   9180 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   9181 
   9182 	/* Page 800 works differently than the rest so it has its own func */
   9183 	if (page == BM_WUC_PAGE) {
   9184 		uint16_t tmp;
   9185 
   9186 		tmp = val;
   9187 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   9188 		return;
   9189 	}
   9190 
   9191 	/*
   9192 	 * Lower than page 768 works differently than the rest so it has its
   9193 	 * own func
   9194 	 */
   9195 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   9196 		printf("gmii_hv_writereg!!!\n");
   9197 		return;
   9198 	}
   9199 
   9200 	{
   9201 		/*
   9202 		 * XXX Workaround MDIO accesses being disabled after entering
   9203 		 * IEEE Power Down (whenever bit 11 of the PHY control
   9204 		 * register is set)
   9205 		 */
   9206 		if (sc->sc_phytype == WMPHY_82578) {
   9207 			struct mii_softc *child;
   9208 
   9209 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   9210 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   9211 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   9212 			    && ((val & (1 << 11)) != 0)) {
   9213 				printf("XXX need workaround\n");
   9214 			}
   9215 		}
   9216 
   9217 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   9218 			wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9219 			    page << BME1000_PAGE_SHIFT);
   9220 		}
   9221 	}
   9222 
   9223 	wm_gmii_mdic_writereg(self, phy, regnum & MII_ADDRMASK, val);
   9224 }
   9225 
   9226 /*
   9227  * wm_gmii_82580_readreg:	[mii interface function]
   9228  *
   9229  *	Read a PHY register on the 82580 and I350.
   9230  * This could be handled by the PHY layer if we didn't have to lock the
   9231  * ressource ...
   9232  */
   9233 static int
   9234 wm_gmii_82580_readreg(device_t self, int phy, int reg)
   9235 {
   9236 	struct wm_softc *sc = device_private(self);
   9237 	int rv;
   9238 
   9239 	if (sc->phy.acquire(sc) != 0) {
   9240 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9241 		    __func__);
   9242 		return 0;
   9243 	}
   9244 
   9245 	rv = wm_gmii_mdic_readreg(self, phy, reg);
   9246 
   9247 	sc->phy.release(sc);
   9248 	return rv;
   9249 }
   9250 
   9251 /*
   9252  * wm_gmii_82580_writereg:	[mii interface function]
   9253  *
   9254  *	Write a PHY register on the 82580 and I350.
   9255  * This could be handled by the PHY layer if we didn't have to lock the
   9256  * ressource ...
   9257  */
   9258 static void
   9259 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
   9260 {
   9261 	struct wm_softc *sc = device_private(self);
   9262 
   9263 	if (sc->phy.acquire(sc) != 0) {
   9264 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9265 		    __func__);
   9266 		return;
   9267 	}
   9268 
   9269 	wm_gmii_mdic_writereg(self, phy, reg, val);
   9270 
   9271 	sc->phy.release(sc);
   9272 }
   9273 
   9274 /*
   9275  * wm_gmii_gs40g_readreg:	[mii interface function]
   9276  *
   9277  *	Read a PHY register on the I2100 and I211.
   9278  * This could be handled by the PHY layer if we didn't have to lock the
   9279  * ressource ...
   9280  */
   9281 static int
   9282 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
   9283 {
   9284 	struct wm_softc *sc = device_private(self);
   9285 	int page, offset;
   9286 	int rv;
   9287 
   9288 	/* Acquire semaphore */
   9289 	if (sc->phy.acquire(sc)) {
   9290 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9291 		    __func__);
   9292 		return 0;
   9293 	}
   9294 
   9295 	/* Page select */
   9296 	page = reg >> GS40G_PAGE_SHIFT;
   9297 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
   9298 
   9299 	/* Read reg */
   9300 	offset = reg & GS40G_OFFSET_MASK;
   9301 	rv = wm_gmii_mdic_readreg(self, phy, offset);
   9302 
   9303 	sc->phy.release(sc);
   9304 	return rv;
   9305 }
   9306 
   9307 /*
   9308  * wm_gmii_gs40g_writereg:	[mii interface function]
   9309  *
   9310  *	Write a PHY register on the I210 and I211.
   9311  * This could be handled by the PHY layer if we didn't have to lock the
   9312  * ressource ...
   9313  */
   9314 static void
   9315 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
   9316 {
   9317 	struct wm_softc *sc = device_private(self);
   9318 	int page, offset;
   9319 
   9320 	/* Acquire semaphore */
   9321 	if (sc->phy.acquire(sc)) {
   9322 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9323 		    __func__);
   9324 		return;
   9325 	}
   9326 
   9327 	/* Page select */
   9328 	page = reg >> GS40G_PAGE_SHIFT;
   9329 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
   9330 
   9331 	/* Write reg */
   9332 	offset = reg & GS40G_OFFSET_MASK;
   9333 	wm_gmii_mdic_writereg(self, phy, offset, val);
   9334 
   9335 	/* Release semaphore */
   9336 	sc->phy.release(sc);
   9337 }
   9338 
   9339 /*
   9340  * wm_gmii_statchg:	[mii interface function]
   9341  *
   9342  *	Callback from MII layer when media changes.
   9343  */
   9344 static void
   9345 wm_gmii_statchg(struct ifnet *ifp)
   9346 {
   9347 	struct wm_softc *sc = ifp->if_softc;
   9348 	struct mii_data *mii = &sc->sc_mii;
   9349 
   9350 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   9351 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9352 	sc->sc_fcrtl &= ~FCRTL_XONE;
   9353 
   9354 	/*
   9355 	 * Get flow control negotiation result.
   9356 	 */
   9357 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   9358 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   9359 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   9360 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   9361 	}
   9362 
   9363 	if (sc->sc_flowflags & IFM_FLOW) {
   9364 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   9365 			sc->sc_ctrl |= CTRL_TFCE;
   9366 			sc->sc_fcrtl |= FCRTL_XONE;
   9367 		}
   9368 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   9369 			sc->sc_ctrl |= CTRL_RFCE;
   9370 	}
   9371 
   9372 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   9373 		DPRINTF(WM_DEBUG_LINK,
   9374 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   9375 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9376 	} else {
   9377 		DPRINTF(WM_DEBUG_LINK,
   9378 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   9379 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9380 	}
   9381 
   9382 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9383 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9384 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   9385 						 : WMREG_FCRTL, sc->sc_fcrtl);
   9386 	if (sc->sc_type == WM_T_80003) {
   9387 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   9388 		case IFM_1000_T:
   9389 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   9390 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   9391 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9392 			break;
   9393 		default:
   9394 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   9395 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   9396 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   9397 			break;
   9398 		}
   9399 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   9400 	}
   9401 }
   9402 
   9403 /* kumeran related (80003, ICH* and PCH*) */
   9404 
   9405 /*
   9406  * wm_kmrn_readreg:
   9407  *
   9408  *	Read a kumeran register
   9409  */
   9410 static int
   9411 wm_kmrn_readreg(struct wm_softc *sc, int reg)
   9412 {
   9413 	int rv;
   9414 
   9415 	if (sc->sc_type == WM_T_80003)
   9416 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9417 	else
   9418 		rv = sc->phy.acquire(sc);
   9419 	if (rv != 0) {
   9420 		aprint_error_dev(sc->sc_dev,
   9421 		    "%s: failed to get semaphore\n", __func__);
   9422 		return 0;
   9423 	}
   9424 
   9425 	rv = wm_kmrn_readreg_locked(sc, reg);
   9426 
   9427 	if (sc->sc_type == WM_T_80003)
   9428 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9429 	else
   9430 		sc->phy.release(sc);
   9431 
   9432 	return rv;
   9433 }
   9434 
   9435 static int
   9436 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg)
   9437 {
   9438 	int rv;
   9439 
   9440 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   9441 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   9442 	    KUMCTRLSTA_REN);
   9443 	CSR_WRITE_FLUSH(sc);
   9444 	delay(2);
   9445 
   9446 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   9447 
   9448 	return rv;
   9449 }
   9450 
   9451 /*
   9452  * wm_kmrn_writereg:
   9453  *
   9454  *	Write a kumeran register
   9455  */
   9456 static void
   9457 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
   9458 {
   9459 	int rv;
   9460 
   9461 	if (sc->sc_type == WM_T_80003)
   9462 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9463 	else
   9464 		rv = sc->phy.acquire(sc);
   9465 	if (rv != 0) {
   9466 		aprint_error_dev(sc->sc_dev,
   9467 		    "%s: failed to get semaphore\n", __func__);
   9468 		return;
   9469 	}
   9470 
   9471 	wm_kmrn_writereg_locked(sc, reg, val);
   9472 
   9473 	if (sc->sc_type == WM_T_80003)
   9474 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9475 	else
   9476 		sc->phy.release(sc);
   9477 }
   9478 
   9479 static void
   9480 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, int val)
   9481 {
   9482 
   9483 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   9484 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   9485 	    (val & KUMCTRLSTA_MASK));
   9486 }
   9487 
   9488 /* SGMII related */
   9489 
   9490 /*
   9491  * wm_sgmii_uses_mdio
   9492  *
   9493  * Check whether the transaction is to the internal PHY or the external
   9494  * MDIO interface. Return true if it's MDIO.
   9495  */
   9496 static bool
   9497 wm_sgmii_uses_mdio(struct wm_softc *sc)
   9498 {
   9499 	uint32_t reg;
   9500 	bool ismdio = false;
   9501 
   9502 	switch (sc->sc_type) {
   9503 	case WM_T_82575:
   9504 	case WM_T_82576:
   9505 		reg = CSR_READ(sc, WMREG_MDIC);
   9506 		ismdio = ((reg & MDIC_DEST) != 0);
   9507 		break;
   9508 	case WM_T_82580:
   9509 	case WM_T_I350:
   9510 	case WM_T_I354:
   9511 	case WM_T_I210:
   9512 	case WM_T_I211:
   9513 		reg = CSR_READ(sc, WMREG_MDICNFG);
   9514 		ismdio = ((reg & MDICNFG_DEST) != 0);
   9515 		break;
   9516 	default:
   9517 		break;
   9518 	}
   9519 
   9520 	return ismdio;
   9521 }
   9522 
   9523 /*
   9524  * wm_sgmii_readreg:	[mii interface function]
   9525  *
   9526  *	Read a PHY register on the SGMII
   9527  * This could be handled by the PHY layer if we didn't have to lock the
   9528  * ressource ...
   9529  */
   9530 static int
   9531 wm_sgmii_readreg(device_t self, int phy, int reg)
   9532 {
   9533 	struct wm_softc *sc = device_private(self);
   9534 	uint32_t i2ccmd;
   9535 	int i, rv;
   9536 
   9537 	if (sc->phy.acquire(sc)) {
   9538 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9539 		    __func__);
   9540 		return 0;
   9541 	}
   9542 
   9543 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   9544 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   9545 	    | I2CCMD_OPCODE_READ;
   9546 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9547 
   9548 	/* Poll the ready bit */
   9549 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9550 		delay(50);
   9551 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9552 		if (i2ccmd & I2CCMD_READY)
   9553 			break;
   9554 	}
   9555 	if ((i2ccmd & I2CCMD_READY) == 0)
   9556 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
   9557 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9558 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   9559 
   9560 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   9561 
   9562 	sc->phy.release(sc);
   9563 	return rv;
   9564 }
   9565 
   9566 /*
   9567  * wm_sgmii_writereg:	[mii interface function]
   9568  *
   9569  *	Write a PHY register on the SGMII.
   9570  * This could be handled by the PHY layer if we didn't have to lock the
   9571  * ressource ...
   9572  */
   9573 static void
   9574 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
   9575 {
   9576 	struct wm_softc *sc = device_private(self);
   9577 	uint32_t i2ccmd;
   9578 	int i;
   9579 	int val_swapped;
   9580 
   9581 	if (sc->phy.acquire(sc) != 0) {
   9582 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9583 		    __func__);
   9584 		return;
   9585 	}
   9586 	/* Swap the data bytes for the I2C interface */
   9587 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   9588 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   9589 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   9590 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   9591 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9592 
   9593 	/* Poll the ready bit */
   9594 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9595 		delay(50);
   9596 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9597 		if (i2ccmd & I2CCMD_READY)
   9598 			break;
   9599 	}
   9600 	if ((i2ccmd & I2CCMD_READY) == 0)
   9601 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
   9602 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9603 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   9604 
   9605 	sc->phy.release(sc);
   9606 }
   9607 
   9608 /* TBI related */
   9609 
   9610 /*
   9611  * wm_tbi_mediainit:
   9612  *
   9613  *	Initialize media for use on 1000BASE-X devices.
   9614  */
   9615 static void
   9616 wm_tbi_mediainit(struct wm_softc *sc)
   9617 {
   9618 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9619 	const char *sep = "";
   9620 
   9621 	if (sc->sc_type < WM_T_82543)
   9622 		sc->sc_tipg = TIPG_WM_DFLT;
   9623 	else
   9624 		sc->sc_tipg = TIPG_LG_DFLT;
   9625 
   9626 	sc->sc_tbi_serdes_anegticks = 5;
   9627 
   9628 	/* Initialize our media structures */
   9629 	sc->sc_mii.mii_ifp = ifp;
   9630 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9631 
   9632 	if ((sc->sc_type >= WM_T_82575)
   9633 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   9634 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   9635 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   9636 	else
   9637 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   9638 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   9639 
   9640 	/*
   9641 	 * SWD Pins:
   9642 	 *
   9643 	 *	0 = Link LED (output)
   9644 	 *	1 = Loss Of Signal (input)
   9645 	 */
   9646 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   9647 
   9648 	/* XXX Perhaps this is only for TBI */
   9649 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   9650 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   9651 
   9652 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9653 		sc->sc_ctrl &= ~CTRL_LRST;
   9654 
   9655 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9656 
   9657 #define	ADD(ss, mm, dd)							\
   9658 do {									\
   9659 	aprint_normal("%s%s", sep, ss);					\
   9660 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   9661 	sep = ", ";							\
   9662 } while (/*CONSTCOND*/0)
   9663 
   9664 	aprint_normal_dev(sc->sc_dev, "");
   9665 
   9666 	if (sc->sc_type == WM_T_I354) {
   9667 		uint32_t status;
   9668 
   9669 		status = CSR_READ(sc, WMREG_STATUS);
   9670 		if (((status & STATUS_2P5_SKU) != 0)
   9671 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   9672 			ADD("2500baseKX-FDX", IFM_2500_SX | IFM_FDX,ANAR_X_FD);
   9673 		} else
   9674 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX,ANAR_X_FD);
   9675 	} else if (sc->sc_type == WM_T_82545) {
   9676 		/* Only 82545 is LX (XXX except SFP) */
   9677 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   9678 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   9679 	} else {
   9680 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   9681 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   9682 	}
   9683 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   9684 	aprint_normal("\n");
   9685 
   9686 #undef ADD
   9687 
   9688 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   9689 }
   9690 
   9691 /*
   9692  * wm_tbi_mediachange:	[ifmedia interface function]
   9693  *
   9694  *	Set hardware to newly-selected media on a 1000BASE-X device.
   9695  */
   9696 static int
   9697 wm_tbi_mediachange(struct ifnet *ifp)
   9698 {
   9699 	struct wm_softc *sc = ifp->if_softc;
   9700 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9701 	uint32_t status;
   9702 	int i;
   9703 
   9704 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   9705 		/* XXX need some work for >= 82571 and < 82575 */
   9706 		if (sc->sc_type < WM_T_82575)
   9707 			return 0;
   9708 	}
   9709 
   9710 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   9711 	    || (sc->sc_type >= WM_T_82575))
   9712 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   9713 
   9714 	sc->sc_ctrl &= ~CTRL_LRST;
   9715 	sc->sc_txcw = TXCW_ANE;
   9716 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9717 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   9718 	else if (ife->ifm_media & IFM_FDX)
   9719 		sc->sc_txcw |= TXCW_FD;
   9720 	else
   9721 		sc->sc_txcw |= TXCW_HD;
   9722 
   9723 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   9724 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   9725 
   9726 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   9727 		    device_xname(sc->sc_dev), sc->sc_txcw));
   9728 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9729 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9730 	CSR_WRITE_FLUSH(sc);
   9731 	delay(1000);
   9732 
   9733 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   9734 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   9735 
   9736 	/*
   9737 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   9738 	 * optics detect a signal, 0 if they don't.
   9739 	 */
   9740 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   9741 		/* Have signal; wait for the link to come up. */
   9742 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   9743 			delay(10000);
   9744 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   9745 				break;
   9746 		}
   9747 
   9748 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   9749 			    device_xname(sc->sc_dev),i));
   9750 
   9751 		status = CSR_READ(sc, WMREG_STATUS);
   9752 		DPRINTF(WM_DEBUG_LINK,
   9753 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   9754 			device_xname(sc->sc_dev),status, STATUS_LU));
   9755 		if (status & STATUS_LU) {
   9756 			/* Link is up. */
   9757 			DPRINTF(WM_DEBUG_LINK,
   9758 			    ("%s: LINK: set media -> link up %s\n",
   9759 			    device_xname(sc->sc_dev),
   9760 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   9761 
   9762 			/*
   9763 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9764 			 * so we should update sc->sc_ctrl
   9765 			 */
   9766 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9767 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9768 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9769 			if (status & STATUS_FD)
   9770 				sc->sc_tctl |=
   9771 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9772 			else
   9773 				sc->sc_tctl |=
   9774 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9775 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   9776 				sc->sc_fcrtl |= FCRTL_XONE;
   9777 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9778 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9779 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   9780 				      sc->sc_fcrtl);
   9781 			sc->sc_tbi_linkup = 1;
   9782 		} else {
   9783 			if (i == WM_LINKUP_TIMEOUT)
   9784 				wm_check_for_link(sc);
   9785 			/* Link is down. */
   9786 			DPRINTF(WM_DEBUG_LINK,
   9787 			    ("%s: LINK: set media -> link down\n",
   9788 			    device_xname(sc->sc_dev)));
   9789 			sc->sc_tbi_linkup = 0;
   9790 		}
   9791 	} else {
   9792 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   9793 		    device_xname(sc->sc_dev)));
   9794 		sc->sc_tbi_linkup = 0;
   9795 	}
   9796 
   9797 	wm_tbi_serdes_set_linkled(sc);
   9798 
   9799 	return 0;
   9800 }
   9801 
   9802 /*
   9803  * wm_tbi_mediastatus:	[ifmedia interface function]
   9804  *
   9805  *	Get the current interface media status on a 1000BASE-X device.
   9806  */
   9807 static void
   9808 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9809 {
   9810 	struct wm_softc *sc = ifp->if_softc;
   9811 	uint32_t ctrl, status;
   9812 
   9813 	ifmr->ifm_status = IFM_AVALID;
   9814 	ifmr->ifm_active = IFM_ETHER;
   9815 
   9816 	status = CSR_READ(sc, WMREG_STATUS);
   9817 	if ((status & STATUS_LU) == 0) {
   9818 		ifmr->ifm_active |= IFM_NONE;
   9819 		return;
   9820 	}
   9821 
   9822 	ifmr->ifm_status |= IFM_ACTIVE;
   9823 	/* Only 82545 is LX */
   9824 	if (sc->sc_type == WM_T_82545)
   9825 		ifmr->ifm_active |= IFM_1000_LX;
   9826 	else
   9827 		ifmr->ifm_active |= IFM_1000_SX;
   9828 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   9829 		ifmr->ifm_active |= IFM_FDX;
   9830 	else
   9831 		ifmr->ifm_active |= IFM_HDX;
   9832 	ctrl = CSR_READ(sc, WMREG_CTRL);
   9833 	if (ctrl & CTRL_RFCE)
   9834 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   9835 	if (ctrl & CTRL_TFCE)
   9836 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   9837 }
   9838 
   9839 /* XXX TBI only */
   9840 static int
   9841 wm_check_for_link(struct wm_softc *sc)
   9842 {
   9843 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9844 	uint32_t rxcw;
   9845 	uint32_t ctrl;
   9846 	uint32_t status;
   9847 	uint32_t sig;
   9848 
   9849 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   9850 		/* XXX need some work for >= 82571 */
   9851 		if (sc->sc_type >= WM_T_82571) {
   9852 			sc->sc_tbi_linkup = 1;
   9853 			return 0;
   9854 		}
   9855 	}
   9856 
   9857 	rxcw = CSR_READ(sc, WMREG_RXCW);
   9858 	ctrl = CSR_READ(sc, WMREG_CTRL);
   9859 	status = CSR_READ(sc, WMREG_STATUS);
   9860 
   9861 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   9862 
   9863 	DPRINTF(WM_DEBUG_LINK,
   9864 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   9865 		device_xname(sc->sc_dev), __func__,
   9866 		((ctrl & CTRL_SWDPIN(1)) == sig),
   9867 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   9868 
   9869 	/*
   9870 	 * SWDPIN   LU RXCW
   9871 	 *      0    0    0
   9872 	 *      0    0    1	(should not happen)
   9873 	 *      0    1    0	(should not happen)
   9874 	 *      0    1    1	(should not happen)
   9875 	 *      1    0    0	Disable autonego and force linkup
   9876 	 *      1    0    1	got /C/ but not linkup yet
   9877 	 *      1    1    0	(linkup)
   9878 	 *      1    1    1	If IFM_AUTO, back to autonego
   9879 	 *
   9880 	 */
   9881 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   9882 	    && ((status & STATUS_LU) == 0)
   9883 	    && ((rxcw & RXCW_C) == 0)) {
   9884 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   9885 			__func__));
   9886 		sc->sc_tbi_linkup = 0;
   9887 		/* Disable auto-negotiation in the TXCW register */
   9888 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   9889 
   9890 		/*
   9891 		 * Force link-up and also force full-duplex.
   9892 		 *
   9893 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   9894 		 * so we should update sc->sc_ctrl
   9895 		 */
   9896 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   9897 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9898 	} else if (((status & STATUS_LU) != 0)
   9899 	    && ((rxcw & RXCW_C) != 0)
   9900 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   9901 		sc->sc_tbi_linkup = 1;
   9902 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   9903 			__func__));
   9904 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9905 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   9906 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   9907 	    && ((rxcw & RXCW_C) != 0)) {
   9908 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   9909 	} else {
   9910 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   9911 			status));
   9912 	}
   9913 
   9914 	return 0;
   9915 }
   9916 
   9917 /*
   9918  * wm_tbi_tick:
   9919  *
   9920  *	Check the link on TBI devices.
   9921  *	This function acts as mii_tick().
   9922  */
   9923 static void
   9924 wm_tbi_tick(struct wm_softc *sc)
   9925 {
   9926 	struct mii_data *mii = &sc->sc_mii;
   9927 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9928 	uint32_t status;
   9929 
   9930 	KASSERT(WM_CORE_LOCKED(sc));
   9931 
   9932 	status = CSR_READ(sc, WMREG_STATUS);
   9933 
   9934 	/* XXX is this needed? */
   9935 	(void)CSR_READ(sc, WMREG_RXCW);
   9936 	(void)CSR_READ(sc, WMREG_CTRL);
   9937 
   9938 	/* set link status */
   9939 	if ((status & STATUS_LU) == 0) {
   9940 		DPRINTF(WM_DEBUG_LINK,
   9941 		    ("%s: LINK: checklink -> down\n",
   9942 			device_xname(sc->sc_dev)));
   9943 		sc->sc_tbi_linkup = 0;
   9944 	} else if (sc->sc_tbi_linkup == 0) {
   9945 		DPRINTF(WM_DEBUG_LINK,
   9946 		    ("%s: LINK: checklink -> up %s\n",
   9947 			device_xname(sc->sc_dev),
   9948 			(status & STATUS_FD) ? "FDX" : "HDX"));
   9949 		sc->sc_tbi_linkup = 1;
   9950 		sc->sc_tbi_serdes_ticks = 0;
   9951 	}
   9952 
   9953 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   9954 		goto setled;
   9955 
   9956 	if ((status & STATUS_LU) == 0) {
   9957 		sc->sc_tbi_linkup = 0;
   9958 		/* If the timer expired, retry autonegotiation */
   9959 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9960 		    && (++sc->sc_tbi_serdes_ticks
   9961 			>= sc->sc_tbi_serdes_anegticks)) {
   9962 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   9963 			sc->sc_tbi_serdes_ticks = 0;
   9964 			/*
   9965 			 * Reset the link, and let autonegotiation do
   9966 			 * its thing
   9967 			 */
   9968 			sc->sc_ctrl |= CTRL_LRST;
   9969 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9970 			CSR_WRITE_FLUSH(sc);
   9971 			delay(1000);
   9972 			sc->sc_ctrl &= ~CTRL_LRST;
   9973 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9974 			CSR_WRITE_FLUSH(sc);
   9975 			delay(1000);
   9976 			CSR_WRITE(sc, WMREG_TXCW,
   9977 			    sc->sc_txcw & ~TXCW_ANE);
   9978 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9979 		}
   9980 	}
   9981 
   9982 setled:
   9983 	wm_tbi_serdes_set_linkled(sc);
   9984 }
   9985 
   9986 /* SERDES related */
   9987 static void
   9988 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   9989 {
   9990 	uint32_t reg;
   9991 
   9992 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   9993 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   9994 		return;
   9995 
   9996 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   9997 	reg |= PCS_CFG_PCS_EN;
   9998 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   9999 
   10000 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   10001 	reg &= ~CTRL_EXT_SWDPIN(3);
   10002 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   10003 	CSR_WRITE_FLUSH(sc);
   10004 }
   10005 
   10006 static int
   10007 wm_serdes_mediachange(struct ifnet *ifp)
   10008 {
   10009 	struct wm_softc *sc = ifp->if_softc;
   10010 	bool pcs_autoneg = true; /* XXX */
   10011 	uint32_t ctrl_ext, pcs_lctl, reg;
   10012 
   10013 	/* XXX Currently, this function is not called on 8257[12] */
   10014 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   10015 	    || (sc->sc_type >= WM_T_82575))
   10016 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   10017 
   10018 	wm_serdes_power_up_link_82575(sc);
   10019 
   10020 	sc->sc_ctrl |= CTRL_SLU;
   10021 
   10022 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   10023 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   10024 
   10025 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10026 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   10027 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   10028 	case CTRL_EXT_LINK_MODE_SGMII:
   10029 		pcs_autoneg = true;
   10030 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   10031 		break;
   10032 	case CTRL_EXT_LINK_MODE_1000KX:
   10033 		pcs_autoneg = false;
   10034 		/* FALLTHROUGH */
   10035 	default:
   10036 		if ((sc->sc_type == WM_T_82575)
   10037 		    || (sc->sc_type == WM_T_82576)) {
   10038 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   10039 				pcs_autoneg = false;
   10040 		}
   10041 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   10042 		    | CTRL_FRCFDX;
   10043 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   10044 	}
   10045 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10046 
   10047 	if (pcs_autoneg) {
   10048 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   10049 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   10050 
   10051 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   10052 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   10053 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   10054 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   10055 	} else
   10056 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   10057 
   10058 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   10059 
   10060 
   10061 	return 0;
   10062 }
   10063 
   10064 static void
   10065 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10066 {
   10067 	struct wm_softc *sc = ifp->if_softc;
   10068 	struct mii_data *mii = &sc->sc_mii;
   10069 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10070 	uint32_t pcs_adv, pcs_lpab, reg;
   10071 
   10072 	ifmr->ifm_status = IFM_AVALID;
   10073 	ifmr->ifm_active = IFM_ETHER;
   10074 
   10075 	/* Check PCS */
   10076 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10077 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   10078 		ifmr->ifm_active |= IFM_NONE;
   10079 		sc->sc_tbi_linkup = 0;
   10080 		goto setled;
   10081 	}
   10082 
   10083 	sc->sc_tbi_linkup = 1;
   10084 	ifmr->ifm_status |= IFM_ACTIVE;
   10085 	if (sc->sc_type == WM_T_I354) {
   10086 		uint32_t status;
   10087 
   10088 		status = CSR_READ(sc, WMREG_STATUS);
   10089 		if (((status & STATUS_2P5_SKU) != 0)
   10090 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   10091 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
   10092 		} else
   10093 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
   10094 	} else {
   10095 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   10096 		case PCS_LSTS_SPEED_10:
   10097 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   10098 			break;
   10099 		case PCS_LSTS_SPEED_100:
   10100 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   10101 			break;
   10102 		case PCS_LSTS_SPEED_1000:
   10103 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   10104 			break;
   10105 		default:
   10106 			device_printf(sc->sc_dev, "Unknown speed\n");
   10107 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   10108 			break;
   10109 		}
   10110 	}
   10111 	if ((reg & PCS_LSTS_FDX) != 0)
   10112 		ifmr->ifm_active |= IFM_FDX;
   10113 	else
   10114 		ifmr->ifm_active |= IFM_HDX;
   10115 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   10116 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   10117 		/* Check flow */
   10118 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10119 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   10120 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   10121 			goto setled;
   10122 		}
   10123 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   10124 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   10125 		DPRINTF(WM_DEBUG_LINK,
   10126 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   10127 		if ((pcs_adv & TXCW_SYM_PAUSE)
   10128 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   10129 			mii->mii_media_active |= IFM_FLOW
   10130 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   10131 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   10132 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   10133 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   10134 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   10135 			mii->mii_media_active |= IFM_FLOW
   10136 			    | IFM_ETH_TXPAUSE;
   10137 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   10138 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   10139 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   10140 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   10141 			mii->mii_media_active |= IFM_FLOW
   10142 			    | IFM_ETH_RXPAUSE;
   10143 		}
   10144 	}
   10145 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10146 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   10147 setled:
   10148 	wm_tbi_serdes_set_linkled(sc);
   10149 }
   10150 
   10151 /*
   10152  * wm_serdes_tick:
   10153  *
   10154  *	Check the link on serdes devices.
   10155  */
   10156 static void
   10157 wm_serdes_tick(struct wm_softc *sc)
   10158 {
   10159 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10160 	struct mii_data *mii = &sc->sc_mii;
   10161 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10162 	uint32_t reg;
   10163 
   10164 	KASSERT(WM_CORE_LOCKED(sc));
   10165 
   10166 	mii->mii_media_status = IFM_AVALID;
   10167 	mii->mii_media_active = IFM_ETHER;
   10168 
   10169 	/* Check PCS */
   10170 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10171 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   10172 		mii->mii_media_status |= IFM_ACTIVE;
   10173 		sc->sc_tbi_linkup = 1;
   10174 		sc->sc_tbi_serdes_ticks = 0;
   10175 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   10176 		if ((reg & PCS_LSTS_FDX) != 0)
   10177 			mii->mii_media_active |= IFM_FDX;
   10178 		else
   10179 			mii->mii_media_active |= IFM_HDX;
   10180 	} else {
   10181 		mii->mii_media_status |= IFM_NONE;
   10182 		sc->sc_tbi_linkup = 0;
   10183 		/* If the timer expired, retry autonegotiation */
   10184 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10185 		    && (++sc->sc_tbi_serdes_ticks
   10186 			>= sc->sc_tbi_serdes_anegticks)) {
   10187 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   10188 			sc->sc_tbi_serdes_ticks = 0;
   10189 			/* XXX */
   10190 			wm_serdes_mediachange(ifp);
   10191 		}
   10192 	}
   10193 
   10194 	wm_tbi_serdes_set_linkled(sc);
   10195 }
   10196 
   10197 /* SFP related */
   10198 
   10199 static int
   10200 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   10201 {
   10202 	uint32_t i2ccmd;
   10203 	int i;
   10204 
   10205 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   10206 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10207 
   10208 	/* Poll the ready bit */
   10209 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10210 		delay(50);
   10211 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10212 		if (i2ccmd & I2CCMD_READY)
   10213 			break;
   10214 	}
   10215 	if ((i2ccmd & I2CCMD_READY) == 0)
   10216 		return -1;
   10217 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10218 		return -1;
   10219 
   10220 	*data = i2ccmd & 0x00ff;
   10221 
   10222 	return 0;
   10223 }
   10224 
   10225 static uint32_t
   10226 wm_sfp_get_media_type(struct wm_softc *sc)
   10227 {
   10228 	uint32_t ctrl_ext;
   10229 	uint8_t val = 0;
   10230 	int timeout = 3;
   10231 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   10232 	int rv = -1;
   10233 
   10234 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10235 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   10236 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   10237 	CSR_WRITE_FLUSH(sc);
   10238 
   10239 	/* Read SFP module data */
   10240 	while (timeout) {
   10241 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   10242 		if (rv == 0)
   10243 			break;
   10244 		delay(100*1000); /* XXX too big */
   10245 		timeout--;
   10246 	}
   10247 	if (rv != 0)
   10248 		goto out;
   10249 	switch (val) {
   10250 	case SFF_SFP_ID_SFF:
   10251 		aprint_normal_dev(sc->sc_dev,
   10252 		    "Module/Connector soldered to board\n");
   10253 		break;
   10254 	case SFF_SFP_ID_SFP:
   10255 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   10256 		break;
   10257 	case SFF_SFP_ID_UNKNOWN:
   10258 		goto out;
   10259 	default:
   10260 		break;
   10261 	}
   10262 
   10263 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   10264 	if (rv != 0) {
   10265 		goto out;
   10266 	}
   10267 
   10268 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   10269 		mediatype = WM_MEDIATYPE_SERDES;
   10270 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   10271 		sc->sc_flags |= WM_F_SGMII;
   10272 		mediatype = WM_MEDIATYPE_COPPER;
   10273 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   10274 		sc->sc_flags |= WM_F_SGMII;
   10275 		mediatype = WM_MEDIATYPE_SERDES;
   10276 	}
   10277 
   10278 out:
   10279 	/* Restore I2C interface setting */
   10280 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   10281 
   10282 	return mediatype;
   10283 }
   10284 
   10285 /*
   10286  * NVM related.
   10287  * Microwire, SPI (w/wo EERD) and Flash.
   10288  */
   10289 
   10290 /* Both spi and uwire */
   10291 
   10292 /*
   10293  * wm_eeprom_sendbits:
   10294  *
   10295  *	Send a series of bits to the EEPROM.
   10296  */
   10297 static void
   10298 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   10299 {
   10300 	uint32_t reg;
   10301 	int x;
   10302 
   10303 	reg = CSR_READ(sc, WMREG_EECD);
   10304 
   10305 	for (x = nbits; x > 0; x--) {
   10306 		if (bits & (1U << (x - 1)))
   10307 			reg |= EECD_DI;
   10308 		else
   10309 			reg &= ~EECD_DI;
   10310 		CSR_WRITE(sc, WMREG_EECD, reg);
   10311 		CSR_WRITE_FLUSH(sc);
   10312 		delay(2);
   10313 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   10314 		CSR_WRITE_FLUSH(sc);
   10315 		delay(2);
   10316 		CSR_WRITE(sc, WMREG_EECD, reg);
   10317 		CSR_WRITE_FLUSH(sc);
   10318 		delay(2);
   10319 	}
   10320 }
   10321 
   10322 /*
   10323  * wm_eeprom_recvbits:
   10324  *
   10325  *	Receive a series of bits from the EEPROM.
   10326  */
   10327 static void
   10328 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   10329 {
   10330 	uint32_t reg, val;
   10331 	int x;
   10332 
   10333 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   10334 
   10335 	val = 0;
   10336 	for (x = nbits; x > 0; x--) {
   10337 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   10338 		CSR_WRITE_FLUSH(sc);
   10339 		delay(2);
   10340 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   10341 			val |= (1U << (x - 1));
   10342 		CSR_WRITE(sc, WMREG_EECD, reg);
   10343 		CSR_WRITE_FLUSH(sc);
   10344 		delay(2);
   10345 	}
   10346 	*valp = val;
   10347 }
   10348 
   10349 /* Microwire */
   10350 
   10351 /*
   10352  * wm_nvm_read_uwire:
   10353  *
   10354  *	Read a word from the EEPROM using the MicroWire protocol.
   10355  */
   10356 static int
   10357 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   10358 {
   10359 	uint32_t reg, val;
   10360 	int i;
   10361 
   10362 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10363 		device_xname(sc->sc_dev), __func__));
   10364 
   10365 	for (i = 0; i < wordcnt; i++) {
   10366 		/* Clear SK and DI. */
   10367 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   10368 		CSR_WRITE(sc, WMREG_EECD, reg);
   10369 
   10370 		/*
   10371 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   10372 		 * and Xen.
   10373 		 *
   10374 		 * We use this workaround only for 82540 because qemu's
   10375 		 * e1000 act as 82540.
   10376 		 */
   10377 		if (sc->sc_type == WM_T_82540) {
   10378 			reg |= EECD_SK;
   10379 			CSR_WRITE(sc, WMREG_EECD, reg);
   10380 			reg &= ~EECD_SK;
   10381 			CSR_WRITE(sc, WMREG_EECD, reg);
   10382 			CSR_WRITE_FLUSH(sc);
   10383 			delay(2);
   10384 		}
   10385 		/* XXX: end of workaround */
   10386 
   10387 		/* Set CHIP SELECT. */
   10388 		reg |= EECD_CS;
   10389 		CSR_WRITE(sc, WMREG_EECD, reg);
   10390 		CSR_WRITE_FLUSH(sc);
   10391 		delay(2);
   10392 
   10393 		/* Shift in the READ command. */
   10394 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   10395 
   10396 		/* Shift in address. */
   10397 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   10398 
   10399 		/* Shift out the data. */
   10400 		wm_eeprom_recvbits(sc, &val, 16);
   10401 		data[i] = val & 0xffff;
   10402 
   10403 		/* Clear CHIP SELECT. */
   10404 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   10405 		CSR_WRITE(sc, WMREG_EECD, reg);
   10406 		CSR_WRITE_FLUSH(sc);
   10407 		delay(2);
   10408 	}
   10409 
   10410 	return 0;
   10411 }
   10412 
   10413 /* SPI */
   10414 
   10415 /*
   10416  * Set SPI and FLASH related information from the EECD register.
   10417  * For 82541 and 82547, the word size is taken from EEPROM.
   10418  */
   10419 static int
   10420 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   10421 {
   10422 	int size;
   10423 	uint32_t reg;
   10424 	uint16_t data;
   10425 
   10426 	reg = CSR_READ(sc, WMREG_EECD);
   10427 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   10428 
   10429 	/* Read the size of NVM from EECD by default */
   10430 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   10431 	switch (sc->sc_type) {
   10432 	case WM_T_82541:
   10433 	case WM_T_82541_2:
   10434 	case WM_T_82547:
   10435 	case WM_T_82547_2:
   10436 		/* Set dummy value to access EEPROM */
   10437 		sc->sc_nvm_wordsize = 64;
   10438 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
   10439 		reg = data;
   10440 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   10441 		if (size == 0)
   10442 			size = 6; /* 64 word size */
   10443 		else
   10444 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   10445 		break;
   10446 	case WM_T_80003:
   10447 	case WM_T_82571:
   10448 	case WM_T_82572:
   10449 	case WM_T_82573: /* SPI case */
   10450 	case WM_T_82574: /* SPI case */
   10451 	case WM_T_82583: /* SPI case */
   10452 		size += NVM_WORD_SIZE_BASE_SHIFT;
   10453 		if (size > 14)
   10454 			size = 14;
   10455 		break;
   10456 	case WM_T_82575:
   10457 	case WM_T_82576:
   10458 	case WM_T_82580:
   10459 	case WM_T_I350:
   10460 	case WM_T_I354:
   10461 	case WM_T_I210:
   10462 	case WM_T_I211:
   10463 		size += NVM_WORD_SIZE_BASE_SHIFT;
   10464 		if (size > 15)
   10465 			size = 15;
   10466 		break;
   10467 	default:
   10468 		aprint_error_dev(sc->sc_dev,
   10469 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   10470 		return -1;
   10471 		break;
   10472 	}
   10473 
   10474 	sc->sc_nvm_wordsize = 1 << size;
   10475 
   10476 	return 0;
   10477 }
   10478 
   10479 /*
   10480  * wm_nvm_ready_spi:
   10481  *
   10482  *	Wait for a SPI EEPROM to be ready for commands.
   10483  */
   10484 static int
   10485 wm_nvm_ready_spi(struct wm_softc *sc)
   10486 {
   10487 	uint32_t val;
   10488 	int usec;
   10489 
   10490 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10491 		device_xname(sc->sc_dev), __func__));
   10492 
   10493 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   10494 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   10495 		wm_eeprom_recvbits(sc, &val, 8);
   10496 		if ((val & SPI_SR_RDY) == 0)
   10497 			break;
   10498 	}
   10499 	if (usec >= SPI_MAX_RETRIES) {
   10500 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   10501 		return 1;
   10502 	}
   10503 	return 0;
   10504 }
   10505 
   10506 /*
   10507  * wm_nvm_read_spi:
   10508  *
   10509  *	Read a work from the EEPROM using the SPI protocol.
   10510  */
   10511 static int
   10512 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   10513 {
   10514 	uint32_t reg, val;
   10515 	int i;
   10516 	uint8_t opc;
   10517 
   10518 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10519 		device_xname(sc->sc_dev), __func__));
   10520 
   10521 	/* Clear SK and CS. */
   10522 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   10523 	CSR_WRITE(sc, WMREG_EECD, reg);
   10524 	CSR_WRITE_FLUSH(sc);
   10525 	delay(2);
   10526 
   10527 	if (wm_nvm_ready_spi(sc))
   10528 		return 1;
   10529 
   10530 	/* Toggle CS to flush commands. */
   10531 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   10532 	CSR_WRITE_FLUSH(sc);
   10533 	delay(2);
   10534 	CSR_WRITE(sc, WMREG_EECD, reg);
   10535 	CSR_WRITE_FLUSH(sc);
   10536 	delay(2);
   10537 
   10538 	opc = SPI_OPC_READ;
   10539 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   10540 		opc |= SPI_OPC_A8;
   10541 
   10542 	wm_eeprom_sendbits(sc, opc, 8);
   10543 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   10544 
   10545 	for (i = 0; i < wordcnt; i++) {
   10546 		wm_eeprom_recvbits(sc, &val, 16);
   10547 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   10548 	}
   10549 
   10550 	/* Raise CS and clear SK. */
   10551 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   10552 	CSR_WRITE(sc, WMREG_EECD, reg);
   10553 	CSR_WRITE_FLUSH(sc);
   10554 	delay(2);
   10555 
   10556 	return 0;
   10557 }
   10558 
   10559 /* Using with EERD */
   10560 
   10561 static int
   10562 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   10563 {
   10564 	uint32_t attempts = 100000;
   10565 	uint32_t i, reg = 0;
   10566 	int32_t done = -1;
   10567 
   10568 	for (i = 0; i < attempts; i++) {
   10569 		reg = CSR_READ(sc, rw);
   10570 
   10571 		if (reg & EERD_DONE) {
   10572 			done = 0;
   10573 			break;
   10574 		}
   10575 		delay(5);
   10576 	}
   10577 
   10578 	return done;
   10579 }
   10580 
   10581 static int
   10582 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   10583     uint16_t *data)
   10584 {
   10585 	int i, eerd = 0;
   10586 	int error = 0;
   10587 
   10588 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10589 		device_xname(sc->sc_dev), __func__));
   10590 
   10591 	for (i = 0; i < wordcnt; i++) {
   10592 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   10593 
   10594 		CSR_WRITE(sc, WMREG_EERD, eerd);
   10595 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   10596 		if (error != 0)
   10597 			break;
   10598 
   10599 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   10600 	}
   10601 
   10602 	return error;
   10603 }
   10604 
   10605 /* Flash */
   10606 
   10607 static int
   10608 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   10609 {
   10610 	uint32_t eecd;
   10611 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   10612 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   10613 	uint8_t sig_byte = 0;
   10614 
   10615 	switch (sc->sc_type) {
   10616 	case WM_T_PCH_SPT:
   10617 		/*
   10618 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
   10619 		 * sector valid bits from the NVM.
   10620 		 */
   10621 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
   10622 		if ((*bank == 0) || (*bank == 1)) {
   10623 			aprint_error_dev(sc->sc_dev,
   10624 			    "%s: no valid NVM bank present (%u)\n", __func__,
   10625 				*bank);
   10626 			return -1;
   10627 		} else {
   10628 			*bank = *bank - 2;
   10629 			return 0;
   10630 		}
   10631 	case WM_T_ICH8:
   10632 	case WM_T_ICH9:
   10633 		eecd = CSR_READ(sc, WMREG_EECD);
   10634 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   10635 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   10636 			return 0;
   10637 		}
   10638 		/* FALLTHROUGH */
   10639 	default:
   10640 		/* Default to 0 */
   10641 		*bank = 0;
   10642 
   10643 		/* Check bank 0 */
   10644 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   10645 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   10646 			*bank = 0;
   10647 			return 0;
   10648 		}
   10649 
   10650 		/* Check bank 1 */
   10651 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   10652 		    &sig_byte);
   10653 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   10654 			*bank = 1;
   10655 			return 0;
   10656 		}
   10657 	}
   10658 
   10659 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   10660 		device_xname(sc->sc_dev)));
   10661 	return -1;
   10662 }
   10663 
   10664 /******************************************************************************
   10665  * This function does initial flash setup so that a new read/write/erase cycle
   10666  * can be started.
   10667  *
   10668  * sc - The pointer to the hw structure
   10669  ****************************************************************************/
   10670 static int32_t
   10671 wm_ich8_cycle_init(struct wm_softc *sc)
   10672 {
   10673 	uint16_t hsfsts;
   10674 	int32_t error = 1;
   10675 	int32_t i     = 0;
   10676 
   10677 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10678 
   10679 	/* May be check the Flash Des Valid bit in Hw status */
   10680 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   10681 		return error;
   10682 	}
   10683 
   10684 	/* Clear FCERR in Hw status by writing 1 */
   10685 	/* Clear DAEL in Hw status by writing a 1 */
   10686 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   10687 
   10688 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10689 
   10690 	/*
   10691 	 * Either we should have a hardware SPI cycle in progress bit to check
   10692 	 * against, in order to start a new cycle or FDONE bit should be
   10693 	 * changed in the hardware so that it is 1 after harware reset, which
   10694 	 * can then be used as an indication whether a cycle is in progress or
   10695 	 * has been completed .. we should also have some software semaphore
   10696 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   10697 	 * threads access to those bits can be sequentiallized or a way so that
   10698 	 * 2 threads dont start the cycle at the same time
   10699 	 */
   10700 
   10701 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   10702 		/*
   10703 		 * There is no cycle running at present, so we can start a
   10704 		 * cycle
   10705 		 */
   10706 
   10707 		/* Begin by setting Flash Cycle Done. */
   10708 		hsfsts |= HSFSTS_DONE;
   10709 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10710 		error = 0;
   10711 	} else {
   10712 		/*
   10713 		 * otherwise poll for sometime so the current cycle has a
   10714 		 * chance to end before giving up.
   10715 		 */
   10716 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   10717 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10718 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   10719 				error = 0;
   10720 				break;
   10721 			}
   10722 			delay(1);
   10723 		}
   10724 		if (error == 0) {
   10725 			/*
   10726 			 * Successful in waiting for previous cycle to timeout,
   10727 			 * now set the Flash Cycle Done.
   10728 			 */
   10729 			hsfsts |= HSFSTS_DONE;
   10730 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10731 		}
   10732 	}
   10733 	return error;
   10734 }
   10735 
   10736 /******************************************************************************
   10737  * This function starts a flash cycle and waits for its completion
   10738  *
   10739  * sc - The pointer to the hw structure
   10740  ****************************************************************************/
   10741 static int32_t
   10742 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   10743 {
   10744 	uint16_t hsflctl;
   10745 	uint16_t hsfsts;
   10746 	int32_t error = 1;
   10747 	uint32_t i = 0;
   10748 
   10749 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   10750 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   10751 	hsflctl |= HSFCTL_GO;
   10752 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   10753 
   10754 	/* Wait till FDONE bit is set to 1 */
   10755 	do {
   10756 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10757 		if (hsfsts & HSFSTS_DONE)
   10758 			break;
   10759 		delay(1);
   10760 		i++;
   10761 	} while (i < timeout);
   10762 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   10763 		error = 0;
   10764 
   10765 	return error;
   10766 }
   10767 
   10768 /******************************************************************************
   10769  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   10770  *
   10771  * sc - The pointer to the hw structure
   10772  * index - The index of the byte or word to read.
   10773  * size - Size of data to read, 1=byte 2=word, 4=dword
   10774  * data - Pointer to the word to store the value read.
   10775  *****************************************************************************/
   10776 static int32_t
   10777 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   10778     uint32_t size, uint32_t *data)
   10779 {
   10780 	uint16_t hsfsts;
   10781 	uint16_t hsflctl;
   10782 	uint32_t flash_linear_address;
   10783 	uint32_t flash_data = 0;
   10784 	int32_t error = 1;
   10785 	int32_t count = 0;
   10786 
   10787 	if (size < 1  || size > 4 || data == 0x0 ||
   10788 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   10789 		return error;
   10790 
   10791 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   10792 	    sc->sc_ich8_flash_base;
   10793 
   10794 	do {
   10795 		delay(1);
   10796 		/* Steps */
   10797 		error = wm_ich8_cycle_init(sc);
   10798 		if (error)
   10799 			break;
   10800 
   10801 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   10802 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   10803 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   10804 		    & HSFCTL_BCOUNT_MASK;
   10805 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   10806 		if (sc->sc_type == WM_T_PCH_SPT) {
   10807 			/*
   10808 			 * In SPT, This register is in Lan memory space, not
   10809 			 * flash. Therefore, only 32 bit access is supported.
   10810 			 */
   10811 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
   10812 			    (uint32_t)hsflctl);
   10813 		} else
   10814 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   10815 
   10816 		/*
   10817 		 * Write the last 24 bits of index into Flash Linear address
   10818 		 * field in Flash Address
   10819 		 */
   10820 		/* TODO: TBD maybe check the index against the size of flash */
   10821 
   10822 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   10823 
   10824 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   10825 
   10826 		/*
   10827 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   10828 		 * the whole sequence a few more times, else read in (shift in)
   10829 		 * the Flash Data0, the order is least significant byte first
   10830 		 * msb to lsb
   10831 		 */
   10832 		if (error == 0) {
   10833 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   10834 			if (size == 1)
   10835 				*data = (uint8_t)(flash_data & 0x000000FF);
   10836 			else if (size == 2)
   10837 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   10838 			else if (size == 4)
   10839 				*data = (uint32_t)flash_data;
   10840 			break;
   10841 		} else {
   10842 			/*
   10843 			 * If we've gotten here, then things are probably
   10844 			 * completely hosed, but if the error condition is
   10845 			 * detected, it won't hurt to give it another try...
   10846 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   10847 			 */
   10848 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10849 			if (hsfsts & HSFSTS_ERR) {
   10850 				/* Repeat for some time before giving up. */
   10851 				continue;
   10852 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   10853 				break;
   10854 		}
   10855 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   10856 
   10857 	return error;
   10858 }
   10859 
   10860 /******************************************************************************
   10861  * Reads a single byte from the NVM using the ICH8 flash access registers.
   10862  *
   10863  * sc - pointer to wm_hw structure
   10864  * index - The index of the byte to read.
   10865  * data - Pointer to a byte to store the value read.
   10866  *****************************************************************************/
   10867 static int32_t
   10868 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   10869 {
   10870 	int32_t status;
   10871 	uint32_t word = 0;
   10872 
   10873 	status = wm_read_ich8_data(sc, index, 1, &word);
   10874 	if (status == 0)
   10875 		*data = (uint8_t)word;
   10876 	else
   10877 		*data = 0;
   10878 
   10879 	return status;
   10880 }
   10881 
   10882 /******************************************************************************
   10883  * Reads a word from the NVM using the ICH8 flash access registers.
   10884  *
   10885  * sc - pointer to wm_hw structure
   10886  * index - The starting byte index of the word to read.
   10887  * data - Pointer to a word to store the value read.
   10888  *****************************************************************************/
   10889 static int32_t
   10890 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   10891 {
   10892 	int32_t status;
   10893 	uint32_t word = 0;
   10894 
   10895 	status = wm_read_ich8_data(sc, index, 2, &word);
   10896 	if (status == 0)
   10897 		*data = (uint16_t)word;
   10898 	else
   10899 		*data = 0;
   10900 
   10901 	return status;
   10902 }
   10903 
   10904 /******************************************************************************
   10905  * Reads a dword from the NVM using the ICH8 flash access registers.
   10906  *
   10907  * sc - pointer to wm_hw structure
   10908  * index - The starting byte index of the word to read.
   10909  * data - Pointer to a word to store the value read.
   10910  *****************************************************************************/
   10911 static int32_t
   10912 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   10913 {
   10914 	int32_t status;
   10915 
   10916 	status = wm_read_ich8_data(sc, index, 4, data);
   10917 	return status;
   10918 }
   10919 
   10920 /******************************************************************************
   10921  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   10922  * register.
   10923  *
   10924  * sc - Struct containing variables accessed by shared code
   10925  * offset - offset of word in the EEPROM to read
   10926  * data - word read from the EEPROM
   10927  * words - number of words to read
   10928  *****************************************************************************/
   10929 static int
   10930 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10931 {
   10932 	int32_t  error = 0;
   10933 	uint32_t flash_bank = 0;
   10934 	uint32_t act_offset = 0;
   10935 	uint32_t bank_offset = 0;
   10936 	uint16_t word = 0;
   10937 	uint16_t i = 0;
   10938 
   10939 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10940 		device_xname(sc->sc_dev), __func__));
   10941 
   10942 	/*
   10943 	 * We need to know which is the valid flash bank.  In the event
   10944 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   10945 	 * managing flash_bank.  So it cannot be trusted and needs
   10946 	 * to be updated with each read.
   10947 	 */
   10948 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   10949 	if (error) {
   10950 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   10951 			device_xname(sc->sc_dev)));
   10952 		flash_bank = 0;
   10953 	}
   10954 
   10955 	/*
   10956 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   10957 	 * size
   10958 	 */
   10959 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   10960 
   10961 	error = wm_get_swfwhw_semaphore(sc);
   10962 	if (error) {
   10963 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10964 		    __func__);
   10965 		return error;
   10966 	}
   10967 
   10968 	for (i = 0; i < words; i++) {
   10969 		/* The NVM part needs a byte offset, hence * 2 */
   10970 		act_offset = bank_offset + ((offset + i) * 2);
   10971 		error = wm_read_ich8_word(sc, act_offset, &word);
   10972 		if (error) {
   10973 			aprint_error_dev(sc->sc_dev,
   10974 			    "%s: failed to read NVM\n", __func__);
   10975 			break;
   10976 		}
   10977 		data[i] = word;
   10978 	}
   10979 
   10980 	wm_put_swfwhw_semaphore(sc);
   10981 	return error;
   10982 }
   10983 
   10984 /******************************************************************************
   10985  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   10986  * register.
   10987  *
   10988  * sc - Struct containing variables accessed by shared code
   10989  * offset - offset of word in the EEPROM to read
   10990  * data - word read from the EEPROM
   10991  * words - number of words to read
   10992  *****************************************************************************/
   10993 static int
   10994 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10995 {
   10996 	int32_t  error = 0;
   10997 	uint32_t flash_bank = 0;
   10998 	uint32_t act_offset = 0;
   10999 	uint32_t bank_offset = 0;
   11000 	uint32_t dword = 0;
   11001 	uint16_t i = 0;
   11002 
   11003 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11004 		device_xname(sc->sc_dev), __func__));
   11005 
   11006 	/*
   11007 	 * We need to know which is the valid flash bank.  In the event
   11008 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   11009 	 * managing flash_bank.  So it cannot be trusted and needs
   11010 	 * to be updated with each read.
   11011 	 */
   11012 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   11013 	if (error) {
   11014 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   11015 			device_xname(sc->sc_dev)));
   11016 		flash_bank = 0;
   11017 	}
   11018 
   11019 	/*
   11020 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   11021 	 * size
   11022 	 */
   11023 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   11024 
   11025 	error = wm_get_swfwhw_semaphore(sc);
   11026 	if (error) {
   11027 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11028 		    __func__);
   11029 		return error;
   11030 	}
   11031 
   11032 	for (i = 0; i < words; i++) {
   11033 		/* The NVM part needs a byte offset, hence * 2 */
   11034 		act_offset = bank_offset + ((offset + i) * 2);
   11035 		/* but we must read dword aligned, so mask ... */
   11036 		error = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   11037 		if (error) {
   11038 			aprint_error_dev(sc->sc_dev,
   11039 			    "%s: failed to read NVM\n", __func__);
   11040 			break;
   11041 		}
   11042 		/* ... and pick out low or high word */
   11043 		if ((act_offset & 0x2) == 0)
   11044 			data[i] = (uint16_t)(dword & 0xFFFF);
   11045 		else
   11046 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   11047 	}
   11048 
   11049 	wm_put_swfwhw_semaphore(sc);
   11050 	return error;
   11051 }
   11052 
   11053 /* iNVM */
   11054 
   11055 static int
   11056 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   11057 {
   11058 	int32_t  rv = 0;
   11059 	uint32_t invm_dword;
   11060 	uint16_t i;
   11061 	uint8_t record_type, word_address;
   11062 
   11063 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11064 		device_xname(sc->sc_dev), __func__));
   11065 
   11066 	for (i = 0; i < INVM_SIZE; i++) {
   11067 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   11068 		/* Get record type */
   11069 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   11070 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   11071 			break;
   11072 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   11073 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   11074 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   11075 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   11076 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   11077 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   11078 			if (word_address == address) {
   11079 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   11080 				rv = 0;
   11081 				break;
   11082 			}
   11083 		}
   11084 	}
   11085 
   11086 	return rv;
   11087 }
   11088 
   11089 static int
   11090 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11091 {
   11092 	int rv = 0;
   11093 	int i;
   11094 
   11095 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11096 		device_xname(sc->sc_dev), __func__));
   11097 
   11098 	for (i = 0; i < words; i++) {
   11099 		switch (offset + i) {
   11100 		case NVM_OFF_MACADDR:
   11101 		case NVM_OFF_MACADDR1:
   11102 		case NVM_OFF_MACADDR2:
   11103 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   11104 			if (rv != 0) {
   11105 				data[i] = 0xffff;
   11106 				rv = -1;
   11107 			}
   11108 			break;
   11109 		case NVM_OFF_CFG2:
   11110 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11111 			if (rv != 0) {
   11112 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   11113 				rv = 0;
   11114 			}
   11115 			break;
   11116 		case NVM_OFF_CFG4:
   11117 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11118 			if (rv != 0) {
   11119 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   11120 				rv = 0;
   11121 			}
   11122 			break;
   11123 		case NVM_OFF_LED_1_CFG:
   11124 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11125 			if (rv != 0) {
   11126 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   11127 				rv = 0;
   11128 			}
   11129 			break;
   11130 		case NVM_OFF_LED_0_2_CFG:
   11131 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11132 			if (rv != 0) {
   11133 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   11134 				rv = 0;
   11135 			}
   11136 			break;
   11137 		case NVM_OFF_ID_LED_SETTINGS:
   11138 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11139 			if (rv != 0) {
   11140 				*data = ID_LED_RESERVED_FFFF;
   11141 				rv = 0;
   11142 			}
   11143 			break;
   11144 		default:
   11145 			DPRINTF(WM_DEBUG_NVM,
   11146 			    ("NVM word 0x%02x is not mapped.\n", offset));
   11147 			*data = NVM_RESERVED_WORD;
   11148 			break;
   11149 		}
   11150 	}
   11151 
   11152 	return rv;
   11153 }
   11154 
   11155 /* Lock, detecting NVM type, validate checksum, version and read */
   11156 
   11157 /*
   11158  * wm_nvm_acquire:
   11159  *
   11160  *	Perform the EEPROM handshake required on some chips.
   11161  */
   11162 static int
   11163 wm_nvm_acquire(struct wm_softc *sc)
   11164 {
   11165 	uint32_t reg;
   11166 	int x;
   11167 	int ret = 0;
   11168 
   11169 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11170 		device_xname(sc->sc_dev), __func__));
   11171 
   11172 	if (sc->sc_type >= WM_T_ICH8) {
   11173 		ret = wm_get_nvm_ich8lan(sc);
   11174 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   11175 		ret = wm_get_swfwhw_semaphore(sc);
   11176 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
   11177 		/* This will also do wm_get_swsm_semaphore() if needed */
   11178 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
   11179 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11180 		ret = wm_get_swsm_semaphore(sc);
   11181 	}
   11182 
   11183 	if (ret) {
   11184 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11185 			__func__);
   11186 		return 1;
   11187 	}
   11188 
   11189 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   11190 		reg = CSR_READ(sc, WMREG_EECD);
   11191 
   11192 		/* Request EEPROM access. */
   11193 		reg |= EECD_EE_REQ;
   11194 		CSR_WRITE(sc, WMREG_EECD, reg);
   11195 
   11196 		/* ..and wait for it to be granted. */
   11197 		for (x = 0; x < 1000; x++) {
   11198 			reg = CSR_READ(sc, WMREG_EECD);
   11199 			if (reg & EECD_EE_GNT)
   11200 				break;
   11201 			delay(5);
   11202 		}
   11203 		if ((reg & EECD_EE_GNT) == 0) {
   11204 			aprint_error_dev(sc->sc_dev,
   11205 			    "could not acquire EEPROM GNT\n");
   11206 			reg &= ~EECD_EE_REQ;
   11207 			CSR_WRITE(sc, WMREG_EECD, reg);
   11208 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   11209 				wm_put_swfwhw_semaphore(sc);
   11210 			if (sc->sc_flags & WM_F_LOCK_SWFW)
   11211 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   11212 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
   11213 				wm_put_swsm_semaphore(sc);
   11214 			return 1;
   11215 		}
   11216 	}
   11217 
   11218 	return 0;
   11219 }
   11220 
   11221 /*
   11222  * wm_nvm_release:
   11223  *
   11224  *	Release the EEPROM mutex.
   11225  */
   11226 static void
   11227 wm_nvm_release(struct wm_softc *sc)
   11228 {
   11229 	uint32_t reg;
   11230 
   11231 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11232 		device_xname(sc->sc_dev), __func__));
   11233 
   11234 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   11235 		reg = CSR_READ(sc, WMREG_EECD);
   11236 		reg &= ~EECD_EE_REQ;
   11237 		CSR_WRITE(sc, WMREG_EECD, reg);
   11238 	}
   11239 
   11240 	if (sc->sc_type >= WM_T_ICH8) {
   11241 		wm_put_nvm_ich8lan(sc);
   11242 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   11243 		wm_put_swfwhw_semaphore(sc);
   11244 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   11245 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   11246 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
   11247 		wm_put_swsm_semaphore(sc);
   11248 }
   11249 
   11250 static int
   11251 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   11252 {
   11253 	uint32_t eecd = 0;
   11254 
   11255 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   11256 	    || sc->sc_type == WM_T_82583) {
   11257 		eecd = CSR_READ(sc, WMREG_EECD);
   11258 
   11259 		/* Isolate bits 15 & 16 */
   11260 		eecd = ((eecd >> 15) & 0x03);
   11261 
   11262 		/* If both bits are set, device is Flash type */
   11263 		if (eecd == 0x03)
   11264 			return 0;
   11265 	}
   11266 	return 1;
   11267 }
   11268 
   11269 static int
   11270 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   11271 {
   11272 	uint32_t eec;
   11273 
   11274 	eec = CSR_READ(sc, WMREG_EEC);
   11275 	if ((eec & EEC_FLASH_DETECTED) != 0)
   11276 		return 1;
   11277 
   11278 	return 0;
   11279 }
   11280 
   11281 /*
   11282  * wm_nvm_validate_checksum
   11283  *
   11284  * The checksum is defined as the sum of the first 64 (16 bit) words.
   11285  */
   11286 static int
   11287 wm_nvm_validate_checksum(struct wm_softc *sc)
   11288 {
   11289 	uint16_t checksum;
   11290 	uint16_t eeprom_data;
   11291 #ifdef WM_DEBUG
   11292 	uint16_t csum_wordaddr, valid_checksum;
   11293 #endif
   11294 	int i;
   11295 
   11296 	checksum = 0;
   11297 
   11298 	/* Don't check for I211 */
   11299 	if (sc->sc_type == WM_T_I211)
   11300 		return 0;
   11301 
   11302 #ifdef WM_DEBUG
   11303 	if (sc->sc_type == WM_T_PCH_LPT) {
   11304 		csum_wordaddr = NVM_OFF_COMPAT;
   11305 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   11306 	} else {
   11307 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   11308 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   11309 	}
   11310 
   11311 	/* Dump EEPROM image for debug */
   11312 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   11313 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   11314 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   11315 		/* XXX PCH_SPT? */
   11316 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   11317 		if ((eeprom_data & valid_checksum) == 0) {
   11318 			DPRINTF(WM_DEBUG_NVM,
   11319 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   11320 				device_xname(sc->sc_dev), eeprom_data,
   11321 				    valid_checksum));
   11322 		}
   11323 	}
   11324 
   11325 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   11326 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   11327 		for (i = 0; i < NVM_SIZE; i++) {
   11328 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   11329 				printf("XXXX ");
   11330 			else
   11331 				printf("%04hx ", eeprom_data);
   11332 			if (i % 8 == 7)
   11333 				printf("\n");
   11334 		}
   11335 	}
   11336 
   11337 #endif /* WM_DEBUG */
   11338 
   11339 	for (i = 0; i < NVM_SIZE; i++) {
   11340 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   11341 			return 1;
   11342 		checksum += eeprom_data;
   11343 	}
   11344 
   11345 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   11346 #ifdef WM_DEBUG
   11347 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   11348 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   11349 #endif
   11350 	}
   11351 
   11352 	return 0;
   11353 }
   11354 
   11355 static void
   11356 wm_nvm_version_invm(struct wm_softc *sc)
   11357 {
   11358 	uint32_t dword;
   11359 
   11360 	/*
   11361 	 * Linux's code to decode version is very strange, so we don't
   11362 	 * obey that algorithm and just use word 61 as the document.
   11363 	 * Perhaps it's not perfect though...
   11364 	 *
   11365 	 * Example:
   11366 	 *
   11367 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   11368 	 */
   11369 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   11370 	dword = __SHIFTOUT(dword, INVM_VER_1);
   11371 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   11372 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   11373 }
   11374 
   11375 static void
   11376 wm_nvm_version(struct wm_softc *sc)
   11377 {
   11378 	uint16_t major, minor, build, patch;
   11379 	uint16_t uid0, uid1;
   11380 	uint16_t nvm_data;
   11381 	uint16_t off;
   11382 	bool check_version = false;
   11383 	bool check_optionrom = false;
   11384 	bool have_build = false;
   11385 
   11386 	/*
   11387 	 * Version format:
   11388 	 *
   11389 	 * XYYZ
   11390 	 * X0YZ
   11391 	 * X0YY
   11392 	 *
   11393 	 * Example:
   11394 	 *
   11395 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   11396 	 *	82571	0x50a6	5.10.6?
   11397 	 *	82572	0x506a	5.6.10?
   11398 	 *	82572EI	0x5069	5.6.9?
   11399 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   11400 	 *		0x2013	2.1.3?
   11401 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   11402 	 */
   11403 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
   11404 	switch (sc->sc_type) {
   11405 	case WM_T_82571:
   11406 	case WM_T_82572:
   11407 	case WM_T_82574:
   11408 	case WM_T_82583:
   11409 		check_version = true;
   11410 		check_optionrom = true;
   11411 		have_build = true;
   11412 		break;
   11413 	case WM_T_82575:
   11414 	case WM_T_82576:
   11415 	case WM_T_82580:
   11416 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   11417 			check_version = true;
   11418 		break;
   11419 	case WM_T_I211:
   11420 		wm_nvm_version_invm(sc);
   11421 		goto printver;
   11422 	case WM_T_I210:
   11423 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   11424 			wm_nvm_version_invm(sc);
   11425 			goto printver;
   11426 		}
   11427 		/* FALLTHROUGH */
   11428 	case WM_T_I350:
   11429 	case WM_T_I354:
   11430 		check_version = true;
   11431 		check_optionrom = true;
   11432 		break;
   11433 	default:
   11434 		return;
   11435 	}
   11436 	if (check_version) {
   11437 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
   11438 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   11439 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   11440 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   11441 			build = nvm_data & NVM_BUILD_MASK;
   11442 			have_build = true;
   11443 		} else
   11444 			minor = nvm_data & 0x00ff;
   11445 
   11446 		/* Decimal */
   11447 		minor = (minor / 16) * 10 + (minor % 16);
   11448 		sc->sc_nvm_ver_major = major;
   11449 		sc->sc_nvm_ver_minor = minor;
   11450 
   11451 printver:
   11452 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   11453 		    sc->sc_nvm_ver_minor);
   11454 		if (have_build) {
   11455 			sc->sc_nvm_ver_build = build;
   11456 			aprint_verbose(".%d", build);
   11457 		}
   11458 	}
   11459 	if (check_optionrom) {
   11460 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
   11461 		/* Option ROM Version */
   11462 		if ((off != 0x0000) && (off != 0xffff)) {
   11463 			off += NVM_COMBO_VER_OFF;
   11464 			wm_nvm_read(sc, off + 1, 1, &uid1);
   11465 			wm_nvm_read(sc, off, 1, &uid0);
   11466 			if ((uid0 != 0) && (uid0 != 0xffff)
   11467 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   11468 				/* 16bits */
   11469 				major = uid0 >> 8;
   11470 				build = (uid0 << 8) | (uid1 >> 8);
   11471 				patch = uid1 & 0x00ff;
   11472 				aprint_verbose(", option ROM Version %d.%d.%d",
   11473 				    major, build, patch);
   11474 			}
   11475 		}
   11476 	}
   11477 
   11478 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
   11479 	aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   11480 }
   11481 
   11482 /*
   11483  * wm_nvm_read:
   11484  *
   11485  *	Read data from the serial EEPROM.
   11486  */
   11487 static int
   11488 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11489 {
   11490 	int rv;
   11491 
   11492 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11493 		device_xname(sc->sc_dev), __func__));
   11494 
   11495 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   11496 		return 1;
   11497 
   11498 	if (wm_nvm_acquire(sc))
   11499 		return 1;
   11500 
   11501 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   11502 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   11503 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   11504 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
   11505 	else if (sc->sc_type == WM_T_PCH_SPT)
   11506 		rv = wm_nvm_read_spt(sc, word, wordcnt, data);
   11507 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
   11508 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
   11509 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
   11510 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
   11511 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
   11512 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
   11513 	else
   11514 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
   11515 
   11516 	wm_nvm_release(sc);
   11517 	return rv;
   11518 }
   11519 
   11520 /*
   11521  * Hardware semaphores.
   11522  * Very complexed...
   11523  */
   11524 
   11525 static int
   11526 wm_get_null(struct wm_softc *sc)
   11527 {
   11528 
   11529 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11530 		device_xname(sc->sc_dev), __func__));
   11531 	return 0;
   11532 }
   11533 
   11534 static void
   11535 wm_put_null(struct wm_softc *sc)
   11536 {
   11537 
   11538 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11539 		device_xname(sc->sc_dev), __func__));
   11540 	return;
   11541 }
   11542 
   11543 /*
   11544  * Get hardware semaphore.
   11545  * Same as e1000_get_hw_semaphore_generic()
   11546  */
   11547 static int
   11548 wm_get_swsm_semaphore(struct wm_softc *sc)
   11549 {
   11550 	int32_t timeout;
   11551 	uint32_t swsm;
   11552 
   11553 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11554 		device_xname(sc->sc_dev), __func__));
   11555 	KASSERT(sc->sc_nvm_wordsize > 0);
   11556 
   11557 	/* Get the SW semaphore. */
   11558 	timeout = sc->sc_nvm_wordsize + 1;
   11559 	while (timeout) {
   11560 		swsm = CSR_READ(sc, WMREG_SWSM);
   11561 
   11562 		if ((swsm & SWSM_SMBI) == 0)
   11563 			break;
   11564 
   11565 		delay(50);
   11566 		timeout--;
   11567 	}
   11568 
   11569 	if (timeout == 0) {
   11570 		aprint_error_dev(sc->sc_dev,
   11571 		    "could not acquire SWSM SMBI\n");
   11572 		return 1;
   11573 	}
   11574 
   11575 	/* Get the FW semaphore. */
   11576 	timeout = sc->sc_nvm_wordsize + 1;
   11577 	while (timeout) {
   11578 		swsm = CSR_READ(sc, WMREG_SWSM);
   11579 		swsm |= SWSM_SWESMBI;
   11580 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   11581 		/* If we managed to set the bit we got the semaphore. */
   11582 		swsm = CSR_READ(sc, WMREG_SWSM);
   11583 		if (swsm & SWSM_SWESMBI)
   11584 			break;
   11585 
   11586 		delay(50);
   11587 		timeout--;
   11588 	}
   11589 
   11590 	if (timeout == 0) {
   11591 		aprint_error_dev(sc->sc_dev,
   11592 		    "could not acquire SWSM SWESMBI\n");
   11593 		/* Release semaphores */
   11594 		wm_put_swsm_semaphore(sc);
   11595 		return 1;
   11596 	}
   11597 	return 0;
   11598 }
   11599 
   11600 /*
   11601  * Put hardware semaphore.
   11602  * Same as e1000_put_hw_semaphore_generic()
   11603  */
   11604 static void
   11605 wm_put_swsm_semaphore(struct wm_softc *sc)
   11606 {
   11607 	uint32_t swsm;
   11608 
   11609 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11610 		device_xname(sc->sc_dev), __func__));
   11611 
   11612 	swsm = CSR_READ(sc, WMREG_SWSM);
   11613 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   11614 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   11615 }
   11616 
   11617 /*
   11618  * Get SW/FW semaphore.
   11619  * Same as e1000_acquire_swfw_sync_82575().
   11620  */
   11621 static int
   11622 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   11623 {
   11624 	uint32_t swfw_sync;
   11625 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   11626 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   11627 	int timeout = 200;
   11628 
   11629 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11630 		device_xname(sc->sc_dev), __func__));
   11631 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   11632 
   11633 	for (timeout = 0; timeout < 200; timeout++) {
   11634 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11635 			if (wm_get_swsm_semaphore(sc)) {
   11636 				aprint_error_dev(sc->sc_dev,
   11637 				    "%s: failed to get semaphore\n",
   11638 				    __func__);
   11639 				return 1;
   11640 			}
   11641 		}
   11642 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   11643 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   11644 			swfw_sync |= swmask;
   11645 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   11646 			if (sc->sc_flags & WM_F_LOCK_SWSM)
   11647 				wm_put_swsm_semaphore(sc);
   11648 			return 0;
   11649 		}
   11650 		if (sc->sc_flags & WM_F_LOCK_SWSM)
   11651 			wm_put_swsm_semaphore(sc);
   11652 		delay(5000);
   11653 	}
   11654 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   11655 	    device_xname(sc->sc_dev), mask, swfw_sync);
   11656 	return 1;
   11657 }
   11658 
   11659 static void
   11660 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   11661 {
   11662 	uint32_t swfw_sync;
   11663 
   11664 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11665 		device_xname(sc->sc_dev), __func__));
   11666 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   11667 
   11668 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11669 		while (wm_get_swsm_semaphore(sc) != 0)
   11670 			continue;
   11671 	}
   11672 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   11673 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   11674 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   11675 	if (sc->sc_flags & WM_F_LOCK_SWSM)
   11676 		wm_put_swsm_semaphore(sc);
   11677 }
   11678 
   11679 static int
   11680 wm_get_phy_82575(struct wm_softc *sc)
   11681 {
   11682 
   11683 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11684 		device_xname(sc->sc_dev), __func__));
   11685 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   11686 }
   11687 
   11688 static void
   11689 wm_put_phy_82575(struct wm_softc *sc)
   11690 {
   11691 
   11692 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11693 		device_xname(sc->sc_dev), __func__));
   11694 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   11695 }
   11696 
   11697 static int
   11698 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   11699 {
   11700 	uint32_t ext_ctrl;
   11701 	int timeout = 200;
   11702 
   11703 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11704 		device_xname(sc->sc_dev), __func__));
   11705 
   11706 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   11707 	for (timeout = 0; timeout < 200; timeout++) {
   11708 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11709 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11710 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11711 
   11712 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11713 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   11714 			return 0;
   11715 		delay(5000);
   11716 	}
   11717 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   11718 	    device_xname(sc->sc_dev), ext_ctrl);
   11719 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   11720 	return 1;
   11721 }
   11722 
   11723 static void
   11724 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   11725 {
   11726 	uint32_t ext_ctrl;
   11727 
   11728 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11729 		device_xname(sc->sc_dev), __func__));
   11730 
   11731 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11732 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11733 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11734 
   11735 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   11736 }
   11737 
   11738 static int
   11739 wm_get_swflag_ich8lan(struct wm_softc *sc)
   11740 {
   11741 	uint32_t ext_ctrl;
   11742 	int timeout;
   11743 
   11744 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11745 		device_xname(sc->sc_dev), __func__));
   11746 	mutex_enter(sc->sc_ich_phymtx);
   11747 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   11748 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11749 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   11750 			break;
   11751 		delay(1000);
   11752 	}
   11753 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   11754 		printf("%s: SW has already locked the resource\n",
   11755 		    device_xname(sc->sc_dev));
   11756 		goto out;
   11757 	}
   11758 
   11759 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11760 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11761 	for (timeout = 0; timeout < 1000; timeout++) {
   11762 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11763 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   11764 			break;
   11765 		delay(1000);
   11766 	}
   11767 	if (timeout >= 1000) {
   11768 		printf("%s: failed to acquire semaphore\n",
   11769 		    device_xname(sc->sc_dev));
   11770 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11771 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11772 		goto out;
   11773 	}
   11774 	return 0;
   11775 
   11776 out:
   11777 	mutex_exit(sc->sc_ich_phymtx);
   11778 	return 1;
   11779 }
   11780 
   11781 static void
   11782 wm_put_swflag_ich8lan(struct wm_softc *sc)
   11783 {
   11784 	uint32_t ext_ctrl;
   11785 
   11786 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11787 		device_xname(sc->sc_dev), __func__));
   11788 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11789 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   11790 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11791 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11792 	} else {
   11793 		printf("%s: Semaphore unexpectedly released\n",
   11794 		    device_xname(sc->sc_dev));
   11795 	}
   11796 
   11797 	mutex_exit(sc->sc_ich_phymtx);
   11798 }
   11799 
   11800 static int
   11801 wm_get_nvm_ich8lan(struct wm_softc *sc)
   11802 {
   11803 
   11804 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11805 		device_xname(sc->sc_dev), __func__));
   11806 	mutex_enter(sc->sc_ich_nvmmtx);
   11807 
   11808 	return 0;
   11809 }
   11810 
   11811 static void
   11812 wm_put_nvm_ich8lan(struct wm_softc *sc)
   11813 {
   11814 
   11815 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11816 		device_xname(sc->sc_dev), __func__));
   11817 	mutex_exit(sc->sc_ich_nvmmtx);
   11818 }
   11819 
   11820 static int
   11821 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   11822 {
   11823 	int i = 0;
   11824 	uint32_t reg;
   11825 
   11826 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11827 		device_xname(sc->sc_dev), __func__));
   11828 
   11829 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11830 	do {
   11831 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   11832 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   11833 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11834 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   11835 			break;
   11836 		delay(2*1000);
   11837 		i++;
   11838 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   11839 
   11840 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   11841 		wm_put_hw_semaphore_82573(sc);
   11842 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   11843 		    device_xname(sc->sc_dev));
   11844 		return -1;
   11845 	}
   11846 
   11847 	return 0;
   11848 }
   11849 
   11850 static void
   11851 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   11852 {
   11853 	uint32_t reg;
   11854 
   11855 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11856 		device_xname(sc->sc_dev), __func__));
   11857 
   11858 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11859 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11860 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   11861 }
   11862 
   11863 /*
   11864  * Management mode and power management related subroutines.
   11865  * BMC, AMT, suspend/resume and EEE.
   11866  */
   11867 
   11868 #ifdef WM_WOL
   11869 static int
   11870 wm_check_mng_mode(struct wm_softc *sc)
   11871 {
   11872 	int rv;
   11873 
   11874 	switch (sc->sc_type) {
   11875 	case WM_T_ICH8:
   11876 	case WM_T_ICH9:
   11877 	case WM_T_ICH10:
   11878 	case WM_T_PCH:
   11879 	case WM_T_PCH2:
   11880 	case WM_T_PCH_LPT:
   11881 	case WM_T_PCH_SPT:
   11882 		rv = wm_check_mng_mode_ich8lan(sc);
   11883 		break;
   11884 	case WM_T_82574:
   11885 	case WM_T_82583:
   11886 		rv = wm_check_mng_mode_82574(sc);
   11887 		break;
   11888 	case WM_T_82571:
   11889 	case WM_T_82572:
   11890 	case WM_T_82573:
   11891 	case WM_T_80003:
   11892 		rv = wm_check_mng_mode_generic(sc);
   11893 		break;
   11894 	default:
   11895 		/* noting to do */
   11896 		rv = 0;
   11897 		break;
   11898 	}
   11899 
   11900 	return rv;
   11901 }
   11902 
   11903 static int
   11904 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   11905 {
   11906 	uint32_t fwsm;
   11907 
   11908 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11909 
   11910 	if (((fwsm & FWSM_FW_VALID) != 0)
   11911 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   11912 		return 1;
   11913 
   11914 	return 0;
   11915 }
   11916 
   11917 static int
   11918 wm_check_mng_mode_82574(struct wm_softc *sc)
   11919 {
   11920 	uint16_t data;
   11921 
   11922 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   11923 
   11924 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   11925 		return 1;
   11926 
   11927 	return 0;
   11928 }
   11929 
   11930 static int
   11931 wm_check_mng_mode_generic(struct wm_softc *sc)
   11932 {
   11933 	uint32_t fwsm;
   11934 
   11935 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11936 
   11937 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   11938 		return 1;
   11939 
   11940 	return 0;
   11941 }
   11942 #endif /* WM_WOL */
   11943 
   11944 static int
   11945 wm_enable_mng_pass_thru(struct wm_softc *sc)
   11946 {
   11947 	uint32_t manc, fwsm, factps;
   11948 
   11949 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   11950 		return 0;
   11951 
   11952 	manc = CSR_READ(sc, WMREG_MANC);
   11953 
   11954 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   11955 		device_xname(sc->sc_dev), manc));
   11956 	if ((manc & MANC_RECV_TCO_EN) == 0)
   11957 		return 0;
   11958 
   11959 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   11960 		fwsm = CSR_READ(sc, WMREG_FWSM);
   11961 		factps = CSR_READ(sc, WMREG_FACTPS);
   11962 		if (((factps & FACTPS_MNGCG) == 0)
   11963 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   11964 			return 1;
   11965 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   11966 		uint16_t data;
   11967 
   11968 		factps = CSR_READ(sc, WMREG_FACTPS);
   11969 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   11970 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   11971 			device_xname(sc->sc_dev), factps, data));
   11972 		if (((factps & FACTPS_MNGCG) == 0)
   11973 		    && ((data & NVM_CFG2_MNGM_MASK)
   11974 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   11975 			return 1;
   11976 	} else if (((manc & MANC_SMBUS_EN) != 0)
   11977 	    && ((manc & MANC_ASF_EN) == 0))
   11978 		return 1;
   11979 
   11980 	return 0;
   11981 }
   11982 
   11983 static bool
   11984 wm_phy_resetisblocked(struct wm_softc *sc)
   11985 {
   11986 	bool blocked = false;
   11987 	uint32_t reg;
   11988 	int i = 0;
   11989 
   11990 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11991 		device_xname(sc->sc_dev), __func__));
   11992 
   11993 	switch (sc->sc_type) {
   11994 	case WM_T_ICH8:
   11995 	case WM_T_ICH9:
   11996 	case WM_T_ICH10:
   11997 	case WM_T_PCH:
   11998 	case WM_T_PCH2:
   11999 	case WM_T_PCH_LPT:
   12000 	case WM_T_PCH_SPT:
   12001 		do {
   12002 			reg = CSR_READ(sc, WMREG_FWSM);
   12003 			if ((reg & FWSM_RSPCIPHY) == 0) {
   12004 				blocked = true;
   12005 				delay(10*1000);
   12006 				continue;
   12007 			}
   12008 			blocked = false;
   12009 		} while (blocked && (i++ < 30));
   12010 		return blocked;
   12011 		break;
   12012 	case WM_T_82571:
   12013 	case WM_T_82572:
   12014 	case WM_T_82573:
   12015 	case WM_T_82574:
   12016 	case WM_T_82583:
   12017 	case WM_T_80003:
   12018 		reg = CSR_READ(sc, WMREG_MANC);
   12019 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   12020 			return true;
   12021 		else
   12022 			return false;
   12023 		break;
   12024 	default:
   12025 		/* no problem */
   12026 		break;
   12027 	}
   12028 
   12029 	return false;
   12030 }
   12031 
   12032 static void
   12033 wm_get_hw_control(struct wm_softc *sc)
   12034 {
   12035 	uint32_t reg;
   12036 
   12037 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12038 		device_xname(sc->sc_dev), __func__));
   12039 
   12040 	if (sc->sc_type == WM_T_82573) {
   12041 		reg = CSR_READ(sc, WMREG_SWSM);
   12042 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   12043 	} else if (sc->sc_type >= WM_T_82571) {
   12044 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12045 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   12046 	}
   12047 }
   12048 
   12049 static void
   12050 wm_release_hw_control(struct wm_softc *sc)
   12051 {
   12052 	uint32_t reg;
   12053 
   12054 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12055 		device_xname(sc->sc_dev), __func__));
   12056 
   12057 	if (sc->sc_type == WM_T_82573) {
   12058 		reg = CSR_READ(sc, WMREG_SWSM);
   12059 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   12060 	} else if (sc->sc_type >= WM_T_82571) {
   12061 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12062 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   12063 	}
   12064 }
   12065 
   12066 static void
   12067 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   12068 {
   12069 	uint32_t reg;
   12070 
   12071 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12072 		device_xname(sc->sc_dev), __func__));
   12073 
   12074 	if (sc->sc_type < WM_T_PCH2)
   12075 		return;
   12076 
   12077 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12078 
   12079 	if (gate)
   12080 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   12081 	else
   12082 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   12083 
   12084 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   12085 }
   12086 
   12087 static void
   12088 wm_smbustopci(struct wm_softc *sc)
   12089 {
   12090 	uint32_t fwsm, reg;
   12091 	int rv = 0;
   12092 
   12093 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12094 		device_xname(sc->sc_dev), __func__));
   12095 
   12096 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   12097 	wm_gate_hw_phy_config_ich8lan(sc, true);
   12098 
   12099 	/* Disable ULP */
   12100 	wm_ulp_disable(sc);
   12101 
   12102 	/* Acquire PHY semaphore */
   12103 	sc->phy.acquire(sc);
   12104 
   12105 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12106 	switch (sc->sc_type) {
   12107 	case WM_T_PCH_LPT:
   12108 	case WM_T_PCH_SPT:
   12109 		if (wm_phy_is_accessible_pchlan(sc))
   12110 			break;
   12111 
   12112 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12113 		reg |= CTRL_EXT_FORCE_SMBUS;
   12114 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12115 #if 0
   12116 		/* XXX Isn't this required??? */
   12117 		CSR_WRITE_FLUSH(sc);
   12118 #endif
   12119 		delay(50 * 1000);
   12120 		/* FALLTHROUGH */
   12121 	case WM_T_PCH2:
   12122 		if (wm_phy_is_accessible_pchlan(sc) == true)
   12123 			break;
   12124 		/* FALLTHROUGH */
   12125 	case WM_T_PCH:
   12126 		if (sc->sc_type == WM_T_PCH)
   12127 			if ((fwsm & FWSM_FW_VALID) != 0)
   12128 				break;
   12129 
   12130 		if (wm_phy_resetisblocked(sc) == true) {
   12131 			printf("XXX reset is blocked(3)\n");
   12132 			break;
   12133 		}
   12134 
   12135 		wm_toggle_lanphypc_pch_lpt(sc);
   12136 
   12137 		if (sc->sc_type >= WM_T_PCH_LPT) {
   12138 			if (wm_phy_is_accessible_pchlan(sc) == true)
   12139 				break;
   12140 
   12141 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12142 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   12143 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12144 
   12145 			if (wm_phy_is_accessible_pchlan(sc) == true)
   12146 				break;
   12147 			rv = -1;
   12148 		}
   12149 		break;
   12150 	default:
   12151 		break;
   12152 	}
   12153 
   12154 	/* Release semaphore */
   12155 	sc->phy.release(sc);
   12156 
   12157 	if (rv == 0) {
   12158 		if (wm_phy_resetisblocked(sc)) {
   12159 			printf("XXX reset is blocked(4)\n");
   12160 			goto out;
   12161 		}
   12162 		wm_reset_phy(sc);
   12163 		if (wm_phy_resetisblocked(sc))
   12164 			printf("XXX reset is blocked(4)\n");
   12165 	}
   12166 
   12167 out:
   12168 	/*
   12169 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   12170 	 */
   12171 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   12172 		delay(10*1000);
   12173 		wm_gate_hw_phy_config_ich8lan(sc, false);
   12174 	}
   12175 }
   12176 
   12177 static void
   12178 wm_init_manageability(struct wm_softc *sc)
   12179 {
   12180 
   12181 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12182 		device_xname(sc->sc_dev), __func__));
   12183 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   12184 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   12185 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   12186 
   12187 		/* Disable hardware interception of ARP */
   12188 		manc &= ~MANC_ARP_EN;
   12189 
   12190 		/* Enable receiving management packets to the host */
   12191 		if (sc->sc_type >= WM_T_82571) {
   12192 			manc |= MANC_EN_MNG2HOST;
   12193 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   12194 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   12195 		}
   12196 
   12197 		CSR_WRITE(sc, WMREG_MANC, manc);
   12198 	}
   12199 }
   12200 
   12201 static void
   12202 wm_release_manageability(struct wm_softc *sc)
   12203 {
   12204 
   12205 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   12206 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   12207 
   12208 		manc |= MANC_ARP_EN;
   12209 		if (sc->sc_type >= WM_T_82571)
   12210 			manc &= ~MANC_EN_MNG2HOST;
   12211 
   12212 		CSR_WRITE(sc, WMREG_MANC, manc);
   12213 	}
   12214 }
   12215 
   12216 static void
   12217 wm_get_wakeup(struct wm_softc *sc)
   12218 {
   12219 
   12220 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   12221 	switch (sc->sc_type) {
   12222 	case WM_T_82573:
   12223 	case WM_T_82583:
   12224 		sc->sc_flags |= WM_F_HAS_AMT;
   12225 		/* FALLTHROUGH */
   12226 	case WM_T_80003:
   12227 	case WM_T_82575:
   12228 	case WM_T_82576:
   12229 	case WM_T_82580:
   12230 	case WM_T_I350:
   12231 	case WM_T_I354:
   12232 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   12233 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   12234 		/* FALLTHROUGH */
   12235 	case WM_T_82541:
   12236 	case WM_T_82541_2:
   12237 	case WM_T_82547:
   12238 	case WM_T_82547_2:
   12239 	case WM_T_82571:
   12240 	case WM_T_82572:
   12241 	case WM_T_82574:
   12242 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   12243 		break;
   12244 	case WM_T_ICH8:
   12245 	case WM_T_ICH9:
   12246 	case WM_T_ICH10:
   12247 	case WM_T_PCH:
   12248 	case WM_T_PCH2:
   12249 	case WM_T_PCH_LPT:
   12250 	case WM_T_PCH_SPT:
   12251 		sc->sc_flags |= WM_F_HAS_AMT;
   12252 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   12253 		break;
   12254 	default:
   12255 		break;
   12256 	}
   12257 
   12258 	/* 1: HAS_MANAGE */
   12259 	if (wm_enable_mng_pass_thru(sc) != 0)
   12260 		sc->sc_flags |= WM_F_HAS_MANAGE;
   12261 
   12262 #ifdef WM_DEBUG
   12263 	printf("\n");
   12264 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   12265 		printf("HAS_AMT,");
   12266 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
   12267 		printf("ARC_SUBSYS_VALID,");
   12268 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
   12269 		printf("ASF_FIRMWARE_PRES,");
   12270 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
   12271 		printf("HAS_MANAGE,");
   12272 	printf("\n");
   12273 #endif
   12274 	/*
   12275 	 * Note that the WOL flags is set after the resetting of the eeprom
   12276 	 * stuff
   12277 	 */
   12278 }
   12279 
   12280 /*
   12281  * Unconfigure Ultra Low Power mode.
   12282  * Only for I217 and newer (see below).
   12283  */
   12284 static void
   12285 wm_ulp_disable(struct wm_softc *sc)
   12286 {
   12287 	uint32_t reg;
   12288 	int i = 0;
   12289 
   12290 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12291 		device_xname(sc->sc_dev), __func__));
   12292 	/* Exclude old devices */
   12293 	if ((sc->sc_type < WM_T_PCH_LPT)
   12294 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   12295 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   12296 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   12297 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   12298 		return;
   12299 
   12300 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   12301 		/* Request ME un-configure ULP mode in the PHY */
   12302 		reg = CSR_READ(sc, WMREG_H2ME);
   12303 		reg &= ~H2ME_ULP;
   12304 		reg |= H2ME_ENFORCE_SETTINGS;
   12305 		CSR_WRITE(sc, WMREG_H2ME, reg);
   12306 
   12307 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   12308 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   12309 			if (i++ == 30) {
   12310 				printf("%s timed out\n", __func__);
   12311 				return;
   12312 			}
   12313 			delay(10 * 1000);
   12314 		}
   12315 		reg = CSR_READ(sc, WMREG_H2ME);
   12316 		reg &= ~H2ME_ENFORCE_SETTINGS;
   12317 		CSR_WRITE(sc, WMREG_H2ME, reg);
   12318 
   12319 		return;
   12320 	}
   12321 
   12322 	/* Acquire semaphore */
   12323 	sc->phy.acquire(sc);
   12324 
   12325 	/* Toggle LANPHYPC */
   12326 	wm_toggle_lanphypc_pch_lpt(sc);
   12327 
   12328 	/* Unforce SMBus mode in PHY */
   12329 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   12330 	if (reg == 0x0000 || reg == 0xffff) {
   12331 		uint32_t reg2;
   12332 
   12333 		printf("%s: Force SMBus first.\n", __func__);
   12334 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   12335 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   12336 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   12337 		delay(50 * 1000);
   12338 
   12339 		reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   12340 	}
   12341 	reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   12342 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg);
   12343 
   12344 	/* Unforce SMBus mode in MAC */
   12345 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12346 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   12347 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12348 
   12349 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL);
   12350 	reg |= HV_PM_CTRL_K1_ENA;
   12351 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg);
   12352 
   12353 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1);
   12354 	reg &= ~(I218_ULP_CONFIG1_IND
   12355 	    | I218_ULP_CONFIG1_STICKY_ULP
   12356 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   12357 	    | I218_ULP_CONFIG1_WOL_HOST
   12358 	    | I218_ULP_CONFIG1_INBAND_EXIT
   12359 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   12360 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   12361 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   12362 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   12363 	reg |= I218_ULP_CONFIG1_START;
   12364 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   12365 
   12366 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   12367 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   12368 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   12369 
   12370 	/* Release semaphore */
   12371 	sc->phy.release(sc);
   12372 	wm_gmii_reset(sc);
   12373 	delay(50 * 1000);
   12374 }
   12375 
   12376 /* WOL in the newer chipset interfaces (pchlan) */
   12377 static void
   12378 wm_enable_phy_wakeup(struct wm_softc *sc)
   12379 {
   12380 #if 0
   12381 	uint16_t preg;
   12382 
   12383 	/* Copy MAC RARs to PHY RARs */
   12384 
   12385 	/* Copy MAC MTA to PHY MTA */
   12386 
   12387 	/* Configure PHY Rx Control register */
   12388 
   12389 	/* Enable PHY wakeup in MAC register */
   12390 
   12391 	/* Configure and enable PHY wakeup in PHY registers */
   12392 
   12393 	/* Activate PHY wakeup */
   12394 
   12395 	/* XXX */
   12396 #endif
   12397 }
   12398 
   12399 /* Power down workaround on D3 */
   12400 static void
   12401 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   12402 {
   12403 	uint32_t reg;
   12404 	int i;
   12405 
   12406 	for (i = 0; i < 2; i++) {
   12407 		/* Disable link */
   12408 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   12409 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   12410 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   12411 
   12412 		/*
   12413 		 * Call gig speed drop workaround on Gig disable before
   12414 		 * accessing any PHY registers
   12415 		 */
   12416 		if (sc->sc_type == WM_T_ICH8)
   12417 			wm_gig_downshift_workaround_ich8lan(sc);
   12418 
   12419 		/* Write VR power-down enable */
   12420 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   12421 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   12422 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   12423 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   12424 
   12425 		/* Read it back and test */
   12426 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   12427 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   12428 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   12429 			break;
   12430 
   12431 		/* Issue PHY reset and repeat at most one more time */
   12432 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   12433 	}
   12434 }
   12435 
   12436 static void
   12437 wm_enable_wakeup(struct wm_softc *sc)
   12438 {
   12439 	uint32_t reg, pmreg;
   12440 	pcireg_t pmode;
   12441 
   12442 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12443 		device_xname(sc->sc_dev), __func__));
   12444 
   12445 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   12446 		&pmreg, NULL) == 0)
   12447 		return;
   12448 
   12449 	/* Advertise the wakeup capability */
   12450 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   12451 	    | CTRL_SWDPIN(3));
   12452 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   12453 
   12454 	/* ICH workaround */
   12455 	switch (sc->sc_type) {
   12456 	case WM_T_ICH8:
   12457 	case WM_T_ICH9:
   12458 	case WM_T_ICH10:
   12459 	case WM_T_PCH:
   12460 	case WM_T_PCH2:
   12461 	case WM_T_PCH_LPT:
   12462 	case WM_T_PCH_SPT:
   12463 		/* Disable gig during WOL */
   12464 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   12465 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   12466 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   12467 		if (sc->sc_type == WM_T_PCH)
   12468 			wm_gmii_reset(sc);
   12469 
   12470 		/* Power down workaround */
   12471 		if (sc->sc_phytype == WMPHY_82577) {
   12472 			struct mii_softc *child;
   12473 
   12474 			/* Assume that the PHY is copper */
   12475 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   12476 			if (child->mii_mpd_rev <= 2)
   12477 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   12478 				    (768 << 5) | 25, 0x0444); /* magic num */
   12479 		}
   12480 		break;
   12481 	default:
   12482 		break;
   12483 	}
   12484 
   12485 	/* Keep the laser running on fiber adapters */
   12486 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   12487 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   12488 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12489 		reg |= CTRL_EXT_SWDPIN(3);
   12490 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12491 	}
   12492 
   12493 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   12494 #if 0	/* for the multicast packet */
   12495 	reg |= WUFC_MC;
   12496 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   12497 #endif
   12498 
   12499 	if (sc->sc_type >= WM_T_PCH)
   12500 		wm_enable_phy_wakeup(sc);
   12501 	else {
   12502 		CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
   12503 		CSR_WRITE(sc, WMREG_WUFC, reg);
   12504 	}
   12505 
   12506 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12507 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12508 		|| (sc->sc_type == WM_T_PCH2))
   12509 		    && (sc->sc_phytype == WMPHY_IGP_3))
   12510 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   12511 
   12512 	/* Request PME */
   12513 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   12514 #if 0
   12515 	/* Disable WOL */
   12516 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   12517 #else
   12518 	/* For WOL */
   12519 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   12520 #endif
   12521 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   12522 }
   12523 
   12524 /* LPLU */
   12525 
   12526 static void
   12527 wm_lplu_d0_disable(struct wm_softc *sc)
   12528 {
   12529 	uint32_t reg;
   12530 
   12531 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12532 		device_xname(sc->sc_dev), __func__));
   12533 
   12534 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   12535 	reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   12536 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   12537 }
   12538 
   12539 static void
   12540 wm_lplu_d0_disable_pch(struct wm_softc *sc)
   12541 {
   12542 	uint32_t reg;
   12543 
   12544 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12545 		device_xname(sc->sc_dev), __func__));
   12546 
   12547 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   12548 	reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   12549 	reg |= HV_OEM_BITS_ANEGNOW;
   12550 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   12551 }
   12552 
   12553 /* EEE */
   12554 
   12555 static void
   12556 wm_set_eee_i350(struct wm_softc *sc)
   12557 {
   12558 	uint32_t ipcnfg, eeer;
   12559 
   12560 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   12561 	eeer = CSR_READ(sc, WMREG_EEER);
   12562 
   12563 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   12564 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   12565 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   12566 		    | EEER_LPI_FC);
   12567 	} else {
   12568 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   12569 		ipcnfg &= ~IPCNFG_10BASE_TE;
   12570 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   12571 		    | EEER_LPI_FC);
   12572 	}
   12573 
   12574 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   12575 	CSR_WRITE(sc, WMREG_EEER, eeer);
   12576 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   12577 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   12578 }
   12579 
   12580 /*
   12581  * Workarounds (mainly PHY related).
   12582  * Basically, PHY's workarounds are in the PHY drivers.
   12583  */
   12584 
   12585 /* Work-around for 82566 Kumeran PCS lock loss */
   12586 static void
   12587 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   12588 {
   12589 #if 0
   12590 	int miistatus, active, i;
   12591 	int reg;
   12592 
   12593 	miistatus = sc->sc_mii.mii_media_status;
   12594 
   12595 	/* If the link is not up, do nothing */
   12596 	if ((miistatus & IFM_ACTIVE) == 0)
   12597 		return;
   12598 
   12599 	active = sc->sc_mii.mii_media_active;
   12600 
   12601 	/* Nothing to do if the link is other than 1Gbps */
   12602 	if (IFM_SUBTYPE(active) != IFM_1000_T)
   12603 		return;
   12604 
   12605 	for (i = 0; i < 10; i++) {
   12606 		/* read twice */
   12607 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   12608 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   12609 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   12610 			goto out;	/* GOOD! */
   12611 
   12612 		/* Reset the PHY */
   12613 		wm_gmii_reset(sc);
   12614 		delay(5*1000);
   12615 	}
   12616 
   12617 	/* Disable GigE link negotiation */
   12618 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   12619 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   12620 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   12621 
   12622 	/*
   12623 	 * Call gig speed drop workaround on Gig disable before accessing
   12624 	 * any PHY registers.
   12625 	 */
   12626 	wm_gig_downshift_workaround_ich8lan(sc);
   12627 
   12628 out:
   12629 	return;
   12630 #endif
   12631 }
   12632 
   12633 /* WOL from S5 stops working */
   12634 static void
   12635 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   12636 {
   12637 	uint16_t kmrn_reg;
   12638 
   12639 	/* Only for igp3 */
   12640 	if (sc->sc_phytype == WMPHY_IGP_3) {
   12641 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
   12642 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
   12643 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   12644 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
   12645 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   12646 	}
   12647 }
   12648 
   12649 /*
   12650  * Workaround for pch's PHYs
   12651  * XXX should be moved to new PHY driver?
   12652  */
   12653 static void
   12654 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   12655 {
   12656 
   12657 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12658 		device_xname(sc->sc_dev), __func__));
   12659 	KASSERT(sc->sc_type == WM_T_PCH);
   12660 
   12661 	if (sc->sc_phytype == WMPHY_82577)
   12662 		wm_set_mdio_slow_mode_hv(sc);
   12663 
   12664 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   12665 
   12666 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   12667 
   12668 	/* 82578 */
   12669 	if (sc->sc_phytype == WMPHY_82578) {
   12670 		struct mii_softc *child;
   12671 
   12672 		/*
   12673 		 * Return registers to default by doing a soft reset then
   12674 		 * writing 0x3140 to the control register
   12675 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   12676 		 */
   12677 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   12678 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   12679 			PHY_RESET(child);
   12680 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   12681 			    0x3140);
   12682 		}
   12683 	}
   12684 
   12685 	/* Select page 0 */
   12686 	sc->phy.acquire(sc);
   12687 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   12688 	sc->phy.release(sc);
   12689 
   12690 	/*
   12691 	 * Configure the K1 Si workaround during phy reset assuming there is
   12692 	 * link so that it disables K1 if link is in 1Gbps.
   12693 	 */
   12694 	wm_k1_gig_workaround_hv(sc, 1);
   12695 }
   12696 
   12697 static void
   12698 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   12699 {
   12700 
   12701 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12702 		device_xname(sc->sc_dev), __func__));
   12703 	KASSERT(sc->sc_type == WM_T_PCH2);
   12704 
   12705 	wm_set_mdio_slow_mode_hv(sc);
   12706 }
   12707 
   12708 static int
   12709 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   12710 {
   12711 	int k1_enable = sc->sc_nvm_k1_enabled;
   12712 
   12713 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12714 		device_xname(sc->sc_dev), __func__));
   12715 
   12716 	if (sc->phy.acquire(sc) != 0)
   12717 		return -1;
   12718 
   12719 	if (link) {
   12720 		k1_enable = 0;
   12721 
   12722 		/* Link stall fix for link up */
   12723 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   12724 	} else {
   12725 		/* Link stall fix for link down */
   12726 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   12727 	}
   12728 
   12729 	wm_configure_k1_ich8lan(sc, k1_enable);
   12730 	sc->phy.release(sc);
   12731 
   12732 	return 0;
   12733 }
   12734 
   12735 static void
   12736 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   12737 {
   12738 	uint32_t reg;
   12739 
   12740 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   12741 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   12742 	    reg | HV_KMRN_MDIO_SLOW);
   12743 }
   12744 
   12745 static void
   12746 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   12747 {
   12748 	uint32_t ctrl, ctrl_ext, tmp;
   12749 	uint16_t kmrn_reg;
   12750 
   12751 	kmrn_reg = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
   12752 
   12753 	if (k1_enable)
   12754 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
   12755 	else
   12756 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
   12757 
   12758 	wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
   12759 
   12760 	delay(20);
   12761 
   12762 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12763 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12764 
   12765 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   12766 	tmp |= CTRL_FRCSPD;
   12767 
   12768 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   12769 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   12770 	CSR_WRITE_FLUSH(sc);
   12771 	delay(20);
   12772 
   12773 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   12774 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12775 	CSR_WRITE_FLUSH(sc);
   12776 	delay(20);
   12777 }
   12778 
   12779 /* special case - for 82575 - need to do manual init ... */
   12780 static void
   12781 wm_reset_init_script_82575(struct wm_softc *sc)
   12782 {
   12783 	/*
   12784 	 * remark: this is untested code - we have no board without EEPROM
   12785 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   12786 	 */
   12787 
   12788 	/* SerDes configuration via SERDESCTRL */
   12789 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   12790 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   12791 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   12792 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   12793 
   12794 	/* CCM configuration via CCMCTL register */
   12795 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   12796 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   12797 
   12798 	/* PCIe lanes configuration */
   12799 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   12800 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   12801 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   12802 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   12803 
   12804 	/* PCIe PLL Configuration */
   12805 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   12806 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   12807 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   12808 }
   12809 
   12810 static void
   12811 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   12812 {
   12813 	uint32_t reg;
   12814 	uint16_t nvmword;
   12815 	int rv;
   12816 
   12817 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   12818 		return;
   12819 
   12820 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   12821 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   12822 	if (rv != 0) {
   12823 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   12824 		    __func__);
   12825 		return;
   12826 	}
   12827 
   12828 	reg = CSR_READ(sc, WMREG_MDICNFG);
   12829 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   12830 		reg |= MDICNFG_DEST;
   12831 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   12832 		reg |= MDICNFG_COM_MDIO;
   12833 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   12834 }
   12835 
   12836 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   12837 
   12838 static bool
   12839 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   12840 {
   12841 	int i;
   12842 	uint32_t reg;
   12843 	uint16_t id1, id2;
   12844 
   12845 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12846 		device_xname(sc->sc_dev), __func__));
   12847 	id1 = id2 = 0xffff;
   12848 	for (i = 0; i < 2; i++) {
   12849 		id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1);
   12850 		if (MII_INVALIDID(id1))
   12851 			continue;
   12852 		id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2);
   12853 		if (MII_INVALIDID(id2))
   12854 			continue;
   12855 		break;
   12856 	}
   12857 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) {
   12858 		goto out;
   12859 	}
   12860 
   12861 	if (sc->sc_type < WM_T_PCH_LPT) {
   12862 		sc->phy.release(sc);
   12863 		wm_set_mdio_slow_mode_hv(sc);
   12864 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
   12865 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
   12866 		sc->phy.acquire(sc);
   12867 	}
   12868 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   12869 		printf("XXX return with false\n");
   12870 		return false;
   12871 	}
   12872 out:
   12873 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   12874 		/* Only unforce SMBus if ME is not active */
   12875 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   12876 			/* Unforce SMBus mode in PHY */
   12877 			reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   12878 			    CV_SMB_CTRL);
   12879 			reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   12880 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   12881 			    CV_SMB_CTRL, reg);
   12882 
   12883 			/* Unforce SMBus mode in MAC */
   12884 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12885 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   12886 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12887 		}
   12888 	}
   12889 	return true;
   12890 }
   12891 
   12892 static void
   12893 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   12894 {
   12895 	uint32_t reg;
   12896 	int i;
   12897 
   12898 	/* Set PHY Config Counter to 50msec */
   12899 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   12900 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   12901 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   12902 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   12903 
   12904 	/* Toggle LANPHYPC */
   12905 	reg = CSR_READ(sc, WMREG_CTRL);
   12906 	reg |= CTRL_LANPHYPC_OVERRIDE;
   12907 	reg &= ~CTRL_LANPHYPC_VALUE;
   12908 	CSR_WRITE(sc, WMREG_CTRL, reg);
   12909 	CSR_WRITE_FLUSH(sc);
   12910 	delay(1000);
   12911 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   12912 	CSR_WRITE(sc, WMREG_CTRL, reg);
   12913 	CSR_WRITE_FLUSH(sc);
   12914 
   12915 	if (sc->sc_type < WM_T_PCH_LPT)
   12916 		delay(50 * 1000);
   12917 	else {
   12918 		i = 20;
   12919 
   12920 		do {
   12921 			delay(5 * 1000);
   12922 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   12923 		    && i--);
   12924 
   12925 		delay(30 * 1000);
   12926 	}
   12927 }
   12928 
   12929 static int
   12930 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   12931 {
   12932 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   12933 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   12934 	uint32_t rxa;
   12935 	uint16_t scale = 0, lat_enc = 0;
   12936 	int64_t lat_ns, value;
   12937 
   12938 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12939 		device_xname(sc->sc_dev), __func__));
   12940 
   12941 	if (link) {
   12942 		pcireg_t preg;
   12943 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   12944 
   12945 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   12946 
   12947 		/*
   12948 		 * Determine the maximum latency tolerated by the device.
   12949 		 *
   12950 		 * Per the PCIe spec, the tolerated latencies are encoded as
   12951 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   12952 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   12953 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   12954 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   12955 		 */
   12956 		lat_ns = ((int64_t)rxa * 1024 -
   12957 		    (2 * (int64_t)sc->sc_ethercom.ec_if.if_mtu)) * 8 * 1000;
   12958 		if (lat_ns < 0)
   12959 			lat_ns = 0;
   12960 		else {
   12961 			uint32_t status;
   12962 			uint16_t speed;
   12963 
   12964 			status = CSR_READ(sc, WMREG_STATUS);
   12965 			switch (__SHIFTOUT(status, STATUS_SPEED)) {
   12966 			case STATUS_SPEED_10:
   12967 				speed = 10;
   12968 				break;
   12969 			case STATUS_SPEED_100:
   12970 				speed = 100;
   12971 				break;
   12972 			case STATUS_SPEED_1000:
   12973 				speed = 1000;
   12974 				break;
   12975 			default:
   12976 				printf("%s: Unknown speed (status = %08x)\n",
   12977 				    device_xname(sc->sc_dev), status);
   12978 				return -1;
   12979 			}
   12980 			lat_ns /= speed;
   12981 		}
   12982 		value = lat_ns;
   12983 
   12984 		while (value > LTRV_VALUE) {
   12985 			scale ++;
   12986 			value = howmany(value, __BIT(5));
   12987 		}
   12988 		if (scale > LTRV_SCALE_MAX) {
   12989 			printf("%s: Invalid LTR latency scale %d\n",
   12990 			    device_xname(sc->sc_dev), scale);
   12991 			return -1;
   12992 		}
   12993 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   12994 
   12995 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   12996 		    WM_PCI_LTR_CAP_LPT);
   12997 		max_snoop = preg & 0xffff;
   12998 		max_nosnoop = preg >> 16;
   12999 
   13000 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   13001 
   13002 		if (lat_enc > max_ltr_enc) {
   13003 			lat_enc = max_ltr_enc;
   13004 		}
   13005 	}
   13006 	/* Snoop and No-Snoop latencies the same */
   13007 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   13008 	CSR_WRITE(sc, WMREG_LTRV, reg);
   13009 
   13010 	return 0;
   13011 }
   13012 
   13013 /*
   13014  * I210 Errata 25 and I211 Errata 10
   13015  * Slow System Clock.
   13016  */
   13017 static void
   13018 wm_pll_workaround_i210(struct wm_softc *sc)
   13019 {
   13020 	uint32_t mdicnfg, wuc;
   13021 	uint32_t reg;
   13022 	pcireg_t pcireg;
   13023 	uint32_t pmreg;
   13024 	uint16_t nvmword, tmp_nvmword;
   13025 	int phyval;
   13026 	bool wa_done = false;
   13027 	int i;
   13028 
   13029 	/* Save WUC and MDICNFG registers */
   13030 	wuc = CSR_READ(sc, WMREG_WUC);
   13031 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   13032 
   13033 	reg = mdicnfg & ~MDICNFG_DEST;
   13034 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   13035 
   13036 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   13037 		nvmword = INVM_DEFAULT_AL;
   13038 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   13039 
   13040 	/* Get Power Management cap offset */
   13041 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   13042 		&pmreg, NULL) == 0)
   13043 		return;
   13044 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   13045 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   13046 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   13047 
   13048 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   13049 			break; /* OK */
   13050 		}
   13051 
   13052 		wa_done = true;
   13053 		/* Directly reset the internal PHY */
   13054 		reg = CSR_READ(sc, WMREG_CTRL);
   13055 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   13056 
   13057 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13058 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   13059 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13060 
   13061 		CSR_WRITE(sc, WMREG_WUC, 0);
   13062 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   13063 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   13064 
   13065 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   13066 		    pmreg + PCI_PMCSR);
   13067 		pcireg |= PCI_PMCSR_STATE_D3;
   13068 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   13069 		    pmreg + PCI_PMCSR, pcireg);
   13070 		delay(1000);
   13071 		pcireg &= ~PCI_PMCSR_STATE_D3;
   13072 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   13073 		    pmreg + PCI_PMCSR, pcireg);
   13074 
   13075 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   13076 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   13077 
   13078 		/* Restore WUC register */
   13079 		CSR_WRITE(sc, WMREG_WUC, wuc);
   13080 	}
   13081 
   13082 	/* Restore MDICNFG setting */
   13083 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   13084 	if (wa_done)
   13085 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   13086 }
   13087