Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.515
      1 /*	$NetBSD: if_wm.c,v 1.515 2017/06/26 04:15:06 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- Disable D0 LPLU on 8257[12356], 82580 and I350.
     77  *	- TX Multi queue improvement (refine queue selection logic)
     78  *	- Split header buffer for newer descriptors
     79  *	- EEE (Energy Efficiency Ethernet)
     80  *	- Virtual Function
     81  *	- Set LED correctly (based on contents in EEPROM)
     82  *	- Rework how parameters are loaded from the EEPROM.
     83  *	- Image Unique ID
     84  */
     85 
     86 #include <sys/cdefs.h>
     87 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.515 2017/06/26 04:15:06 msaitoh Exp $");
     88 
     89 #ifdef _KERNEL_OPT
     90 #include "opt_net_mpsafe.h"
     91 #include "opt_if_wm.h"
     92 #endif
     93 
     94 #include <sys/param.h>
     95 #include <sys/systm.h>
     96 #include <sys/callout.h>
     97 #include <sys/mbuf.h>
     98 #include <sys/malloc.h>
     99 #include <sys/kmem.h>
    100 #include <sys/kernel.h>
    101 #include <sys/socket.h>
    102 #include <sys/ioctl.h>
    103 #include <sys/errno.h>
    104 #include <sys/device.h>
    105 #include <sys/queue.h>
    106 #include <sys/syslog.h>
    107 #include <sys/interrupt.h>
    108 #include <sys/cpu.h>
    109 #include <sys/pcq.h>
    110 
    111 #include <sys/rndsource.h>
    112 
    113 #include <net/if.h>
    114 #include <net/if_dl.h>
    115 #include <net/if_media.h>
    116 #include <net/if_ether.h>
    117 
    118 #include <net/bpf.h>
    119 
    120 #include <netinet/in.h>			/* XXX for struct ip */
    121 #include <netinet/in_systm.h>		/* XXX for struct ip */
    122 #include <netinet/ip.h>			/* XXX for struct ip */
    123 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    124 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    125 
    126 #include <sys/bus.h>
    127 #include <sys/intr.h>
    128 #include <machine/endian.h>
    129 
    130 #include <dev/mii/mii.h>
    131 #include <dev/mii/miivar.h>
    132 #include <dev/mii/miidevs.h>
    133 #include <dev/mii/mii_bitbang.h>
    134 #include <dev/mii/ikphyreg.h>
    135 #include <dev/mii/igphyreg.h>
    136 #include <dev/mii/igphyvar.h>
    137 #include <dev/mii/inbmphyreg.h>
    138 
    139 #include <dev/pci/pcireg.h>
    140 #include <dev/pci/pcivar.h>
    141 #include <dev/pci/pcidevs.h>
    142 
    143 #include <dev/pci/if_wmreg.h>
    144 #include <dev/pci/if_wmvar.h>
    145 
    146 #ifdef WM_DEBUG
    147 #define	WM_DEBUG_LINK		__BIT(0)
    148 #define	WM_DEBUG_TX		__BIT(1)
    149 #define	WM_DEBUG_RX		__BIT(2)
    150 #define	WM_DEBUG_GMII		__BIT(3)
    151 #define	WM_DEBUG_MANAGE		__BIT(4)
    152 #define	WM_DEBUG_NVM		__BIT(5)
    153 #define	WM_DEBUG_INIT		__BIT(6)
    154 #define	WM_DEBUG_LOCK		__BIT(7)
    155 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    156     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    157 
    158 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    159 #else
    160 #define	DPRINTF(x, y)	/* nothing */
    161 #endif /* WM_DEBUG */
    162 
    163 #ifdef NET_MPSAFE
    164 #define WM_MPSAFE	1
    165 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    166 #else
    167 #define CALLOUT_FLAGS	0
    168 #endif
    169 
    170 /*
    171  * This device driver's max interrupt numbers.
    172  */
    173 #define WM_MAX_NQUEUEINTR	16
    174 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    175 
    176 #ifndef WM_DISABLE_MSI
    177 #define	WM_DISABLE_MSI 0
    178 #endif
    179 #ifndef WM_DISABLE_MSIX
    180 #define	WM_DISABLE_MSIX 0
    181 #endif
    182 
    183 int wm_disable_msi = WM_DISABLE_MSI;
    184 int wm_disable_msix = WM_DISABLE_MSIX;
    185 
    186 /*
    187  * Transmit descriptor list size.  Due to errata, we can only have
    188  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    189  * on >= 82544.  We tell the upper layers that they can queue a lot
    190  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    191  * of them at a time.
    192  *
    193  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    194  * chains containing many small mbufs have been observed in zero-copy
    195  * situations with jumbo frames.
    196  */
    197 #define	WM_NTXSEGS		256
    198 #define	WM_IFQUEUELEN		256
    199 #define	WM_TXQUEUELEN_MAX	64
    200 #define	WM_TXQUEUELEN_MAX_82547	16
    201 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    202 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    203 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    204 #define	WM_NTXDESC_82542	256
    205 #define	WM_NTXDESC_82544	4096
    206 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    207 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    208 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    209 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    210 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    211 
    212 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    213 
    214 #define	WM_TXINTERQSIZE		256
    215 
    216 /*
    217  * Receive descriptor list size.  We have one Rx buffer for normal
    218  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    219  * packet.  We allocate 256 receive descriptors, each with a 2k
    220  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    221  */
    222 #define	WM_NRXDESC		256
    223 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    224 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    225 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    226 
    227 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    228 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    229 #endif
    230 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    231 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    232 #endif
    233 
    234 typedef union txdescs {
    235 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    236 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    237 } txdescs_t;
    238 
    239 typedef union rxdescs {
    240 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    241 	ext_rxdesc_t      sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    242 	nq_rxdesc_t      sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    243 } rxdescs_t;
    244 
    245 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    246 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    247 
    248 /*
    249  * Software state for transmit jobs.
    250  */
    251 struct wm_txsoft {
    252 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    253 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    254 	int txs_firstdesc;		/* first descriptor in packet */
    255 	int txs_lastdesc;		/* last descriptor in packet */
    256 	int txs_ndesc;			/* # of descriptors used */
    257 };
    258 
    259 /*
    260  * Software state for receive buffers.  Each descriptor gets a
    261  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    262  * more than one buffer, we chain them together.
    263  */
    264 struct wm_rxsoft {
    265 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    266 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    267 };
    268 
    269 #define WM_LINKUP_TIMEOUT	50
    270 
    271 static uint16_t swfwphysem[] = {
    272 	SWFW_PHY0_SM,
    273 	SWFW_PHY1_SM,
    274 	SWFW_PHY2_SM,
    275 	SWFW_PHY3_SM
    276 };
    277 
    278 static const uint32_t wm_82580_rxpbs_table[] = {
    279 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    280 };
    281 
    282 struct wm_softc;
    283 
    284 #ifdef WM_EVENT_COUNTERS
    285 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    286 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    287 	struct evcnt qname##_ev_##evname;
    288 
    289 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    290 	do{								\
    291 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    292 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    293 		    "%s%02d%s", #qname, (qnum), #evname);		\
    294 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    295 		    (evtype), NULL, (xname),				\
    296 		    (q)->qname##_##evname##_evcnt_name);		\
    297 	}while(0)
    298 
    299 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    300 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    301 
    302 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    303 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    304 
    305 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    306 	evcnt_detach(&(q)->qname##_ev_##evname);
    307 #endif /* WM_EVENT_COUNTERS */
    308 
    309 struct wm_txqueue {
    310 	kmutex_t *txq_lock;		/* lock for tx operations */
    311 
    312 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    313 
    314 	/* Software state for the transmit descriptors. */
    315 	int txq_num;			/* must be a power of two */
    316 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    317 
    318 	/* TX control data structures. */
    319 	int txq_ndesc;			/* must be a power of two */
    320 	size_t txq_descsize;		/* a tx descriptor size */
    321 	txdescs_t *txq_descs_u;
    322         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    323 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    324 	int txq_desc_rseg;		/* real number of control segment */
    325 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    326 #define	txq_descs	txq_descs_u->sctxu_txdescs
    327 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    328 
    329 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    330 
    331 	int txq_free;			/* number of free Tx descriptors */
    332 	int txq_next;			/* next ready Tx descriptor */
    333 
    334 	int txq_sfree;			/* number of free Tx jobs */
    335 	int txq_snext;			/* next free Tx job */
    336 	int txq_sdirty;			/* dirty Tx jobs */
    337 
    338 	/* These 4 variables are used only on the 82547. */
    339 	int txq_fifo_size;		/* Tx FIFO size */
    340 	int txq_fifo_head;		/* current head of FIFO */
    341 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    342 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    343 
    344 	/*
    345 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    346 	 * CPUs. This queue intermediate them without block.
    347 	 */
    348 	pcq_t *txq_interq;
    349 
    350 	/*
    351 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    352 	 * to manage Tx H/W queue's busy flag.
    353 	 */
    354 	int txq_flags;			/* flags for H/W queue, see below */
    355 #define	WM_TXQ_NO_SPACE	0x1
    356 
    357 	bool txq_stopping;
    358 
    359 	uint32_t txq_packets;		/* for AIM */
    360 	uint32_t txq_bytes;		/* for AIM */
    361 #ifdef WM_EVENT_COUNTERS
    362 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
    363 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
    364 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
    365 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
    366 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
    367 						/* XXX not used? */
    368 
    369 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
    370 	WM_Q_EVCNT_DEFINE(txq,txtusum)		/* TCP/UDP cksums comp. out-bound */
    371 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
    372 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
    373 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
    374 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
    375 
    376 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
    377 
    378 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
    379 
    380 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    381 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    382 #endif /* WM_EVENT_COUNTERS */
    383 };
    384 
    385 struct wm_rxqueue {
    386 	kmutex_t *rxq_lock;		/* lock for rx operations */
    387 
    388 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    389 
    390 	/* Software state for the receive descriptors. */
    391 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    392 
    393 	/* RX control data structures. */
    394 	int rxq_ndesc;			/* must be a power of two */
    395 	size_t rxq_descsize;		/* a rx descriptor size */
    396 	rxdescs_t *rxq_descs_u;
    397 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    398 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    399 	int rxq_desc_rseg;		/* real number of control segment */
    400 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    401 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    402 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    403 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    404 
    405 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    406 
    407 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    408 	int rxq_discard;
    409 	int rxq_len;
    410 	struct mbuf *rxq_head;
    411 	struct mbuf *rxq_tail;
    412 	struct mbuf **rxq_tailp;
    413 
    414 	bool rxq_stopping;
    415 
    416 	uint32_t rxq_packets;		/* for AIM */
    417 	uint32_t rxq_bytes;		/* for AIM */
    418 #ifdef WM_EVENT_COUNTERS
    419 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
    420 
    421 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
    422 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
    423 #endif
    424 };
    425 
    426 struct wm_queue {
    427 	int wmq_id;			/* index of transmit and receive queues */
    428 	int wmq_intr_idx;		/* index of MSI-X tables */
    429 
    430 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    431 	bool wmq_set_itr;
    432 
    433 	struct wm_txqueue wmq_txq;
    434 	struct wm_rxqueue wmq_rxq;
    435 
    436 	void *wmq_si;
    437 };
    438 
    439 struct wm_phyop {
    440 	int (*acquire)(struct wm_softc *);
    441 	void (*release)(struct wm_softc *);
    442 	int reset_delay_us;
    443 };
    444 
    445 /*
    446  * Software state per device.
    447  */
    448 struct wm_softc {
    449 	device_t sc_dev;		/* generic device information */
    450 	bus_space_tag_t sc_st;		/* bus space tag */
    451 	bus_space_handle_t sc_sh;	/* bus space handle */
    452 	bus_size_t sc_ss;		/* bus space size */
    453 	bus_space_tag_t sc_iot;		/* I/O space tag */
    454 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    455 	bus_size_t sc_ios;		/* I/O space size */
    456 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    457 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    458 	bus_size_t sc_flashs;		/* flash registers space size */
    459 	off_t sc_flashreg_offset;	/*
    460 					 * offset to flash registers from
    461 					 * start of BAR
    462 					 */
    463 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    464 
    465 	struct ethercom sc_ethercom;	/* ethernet common data */
    466 	struct mii_data sc_mii;		/* MII/media information */
    467 
    468 	pci_chipset_tag_t sc_pc;
    469 	pcitag_t sc_pcitag;
    470 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    471 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    472 
    473 	uint16_t sc_pcidevid;		/* PCI device ID */
    474 	wm_chip_type sc_type;		/* MAC type */
    475 	int sc_rev;			/* MAC revision */
    476 	wm_phy_type sc_phytype;		/* PHY type */
    477 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    478 #define	WM_MEDIATYPE_UNKNOWN		0x00
    479 #define	WM_MEDIATYPE_FIBER		0x01
    480 #define	WM_MEDIATYPE_COPPER		0x02
    481 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    482 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    483 	int sc_flags;			/* flags; see below */
    484 	int sc_if_flags;		/* last if_flags */
    485 	int sc_flowflags;		/* 802.3x flow control flags */
    486 	int sc_align_tweak;
    487 
    488 	void *sc_ihs[WM_MAX_NINTR];	/*
    489 					 * interrupt cookie.
    490 					 * - legacy and msi use sc_ihs[0] only
    491 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    492 					 */
    493 	pci_intr_handle_t *sc_intrs;	/*
    494 					 * legacy and msi use sc_intrs[0] only
    495 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    496 					 */
    497 	int sc_nintrs;			/* number of interrupts */
    498 
    499 	int sc_link_intr_idx;		/* index of MSI-X tables */
    500 
    501 	callout_t sc_tick_ch;		/* tick callout */
    502 	bool sc_core_stopping;
    503 
    504 	int sc_nvm_ver_major;
    505 	int sc_nvm_ver_minor;
    506 	int sc_nvm_ver_build;
    507 	int sc_nvm_addrbits;		/* NVM address bits */
    508 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    509 	int sc_ich8_flash_base;
    510 	int sc_ich8_flash_bank_size;
    511 	int sc_nvm_k1_enabled;
    512 
    513 	int sc_nqueues;
    514 	struct wm_queue *sc_queue;
    515 	u_int sc_rx_process_limit;	/* Rx processing repeat limit in softint */
    516 	u_int sc_rx_intr_process_limit;	/* Rx processing repeat limit in H/W intr */
    517 
    518 	int sc_affinity_offset;
    519 
    520 #ifdef WM_EVENT_COUNTERS
    521 	/* Event counters. */
    522 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    523 
    524         /* WM_T_82542_2_1 only */
    525 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    526 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    527 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    528 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    529 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    530 #endif /* WM_EVENT_COUNTERS */
    531 
    532 	/* This variable are used only on the 82547. */
    533 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    534 
    535 	uint32_t sc_ctrl;		/* prototype CTRL register */
    536 #if 0
    537 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    538 #endif
    539 	uint32_t sc_icr;		/* prototype interrupt bits */
    540 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    541 	uint32_t sc_tctl;		/* prototype TCTL register */
    542 	uint32_t sc_rctl;		/* prototype RCTL register */
    543 	uint32_t sc_txcw;		/* prototype TXCW register */
    544 	uint32_t sc_tipg;		/* prototype TIPG register */
    545 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    546 	uint32_t sc_pba;		/* prototype PBA register */
    547 
    548 	int sc_tbi_linkup;		/* TBI link status */
    549 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    550 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    551 
    552 	int sc_mchash_type;		/* multicast filter offset */
    553 
    554 	krndsource_t rnd_source;	/* random source */
    555 
    556 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    557 
    558 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    559 	kmutex_t *sc_ich_phymtx;	/*
    560 					 * 82574/82583/ICH/PCH specific PHY
    561 					 * mutex. For 82574/82583, the mutex
    562 					 * is used for both PHY and NVM.
    563 					 */
    564 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    565 
    566 	struct wm_phyop phy;
    567 };
    568 
    569 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    570 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    571 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    572 
    573 #define	WM_RXCHAIN_RESET(rxq)						\
    574 do {									\
    575 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    576 	*(rxq)->rxq_tailp = NULL;					\
    577 	(rxq)->rxq_len = 0;						\
    578 } while (/*CONSTCOND*/0)
    579 
    580 #define	WM_RXCHAIN_LINK(rxq, m)						\
    581 do {									\
    582 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    583 	(rxq)->rxq_tailp = &(m)->m_next;				\
    584 } while (/*CONSTCOND*/0)
    585 
    586 #ifdef WM_EVENT_COUNTERS
    587 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    588 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    589 
    590 #define WM_Q_EVCNT_INCR(qname, evname)			\
    591 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    592 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    593 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    594 #else /* !WM_EVENT_COUNTERS */
    595 #define	WM_EVCNT_INCR(ev)	/* nothing */
    596 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    597 
    598 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    599 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    600 #endif /* !WM_EVENT_COUNTERS */
    601 
    602 #define	CSR_READ(sc, reg)						\
    603 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    604 #define	CSR_WRITE(sc, reg, val)						\
    605 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    606 #define	CSR_WRITE_FLUSH(sc)						\
    607 	(void) CSR_READ((sc), WMREG_STATUS)
    608 
    609 #define ICH8_FLASH_READ32(sc, reg)					\
    610 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    611 	    (reg) + sc->sc_flashreg_offset)
    612 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    613 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    614 	    (reg) + sc->sc_flashreg_offset, (data))
    615 
    616 #define ICH8_FLASH_READ16(sc, reg)					\
    617 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    618 	    (reg) + sc->sc_flashreg_offset)
    619 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    620 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    621 	    (reg) + sc->sc_flashreg_offset, (data))
    622 
    623 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    624 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    625 
    626 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    627 #define	WM_CDTXADDR_HI(txq, x)						\
    628 	(sizeof(bus_addr_t) == 8 ?					\
    629 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    630 
    631 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    632 #define	WM_CDRXADDR_HI(rxq, x)						\
    633 	(sizeof(bus_addr_t) == 8 ?					\
    634 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    635 
    636 /*
    637  * Register read/write functions.
    638  * Other than CSR_{READ|WRITE}().
    639  */
    640 #if 0
    641 static inline uint32_t wm_io_read(struct wm_softc *, int);
    642 #endif
    643 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    644 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    645 	uint32_t, uint32_t);
    646 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    647 
    648 /*
    649  * Descriptor sync/init functions.
    650  */
    651 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    652 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    653 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    654 
    655 /*
    656  * Device driver interface functions and commonly used functions.
    657  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    658  */
    659 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    660 static int	wm_match(device_t, cfdata_t, void *);
    661 static void	wm_attach(device_t, device_t, void *);
    662 static int	wm_detach(device_t, int);
    663 static bool	wm_suspend(device_t, const pmf_qual_t *);
    664 static bool	wm_resume(device_t, const pmf_qual_t *);
    665 static void	wm_watchdog(struct ifnet *);
    666 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
    667 static void	wm_tick(void *);
    668 static int	wm_ifflags_cb(struct ethercom *);
    669 static int	wm_ioctl(struct ifnet *, u_long, void *);
    670 /* MAC address related */
    671 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    672 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    673 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    674 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    675 static void	wm_set_filter(struct wm_softc *);
    676 /* Reset and init related */
    677 static void	wm_set_vlan(struct wm_softc *);
    678 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    679 static void	wm_get_auto_rd_done(struct wm_softc *);
    680 static void	wm_lan_init_done(struct wm_softc *);
    681 static void	wm_get_cfg_done(struct wm_softc *);
    682 static void	wm_initialize_hardware_bits(struct wm_softc *);
    683 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    684 static void	wm_reset_phy(struct wm_softc *);
    685 static void	wm_flush_desc_rings(struct wm_softc *);
    686 static void	wm_reset(struct wm_softc *);
    687 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    688 static void	wm_rxdrain(struct wm_rxqueue *);
    689 static void	wm_rss_getkey(uint8_t *);
    690 static void	wm_init_rss(struct wm_softc *);
    691 static void	wm_adjust_qnum(struct wm_softc *, int);
    692 static inline bool	wm_is_using_msix(struct wm_softc *);
    693 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    694 static int	wm_softint_establish(struct wm_softc *, int, int);
    695 static int	wm_setup_legacy(struct wm_softc *);
    696 static int	wm_setup_msix(struct wm_softc *);
    697 static int	wm_init(struct ifnet *);
    698 static int	wm_init_locked(struct ifnet *);
    699 static void	wm_turnon(struct wm_softc *);
    700 static void	wm_turnoff(struct wm_softc *);
    701 static void	wm_stop(struct ifnet *, int);
    702 static void	wm_stop_locked(struct ifnet *, int);
    703 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    704 static void	wm_82547_txfifo_stall(void *);
    705 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    706 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    707 /* DMA related */
    708 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    709 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    710 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    711 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    712     struct wm_txqueue *);
    713 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    714 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    715 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    716     struct wm_rxqueue *);
    717 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    718 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    719 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    720 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    721 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    722 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    723 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    724     struct wm_txqueue *);
    725 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    726     struct wm_rxqueue *);
    727 static int	wm_alloc_txrx_queues(struct wm_softc *);
    728 static void	wm_free_txrx_queues(struct wm_softc *);
    729 static int	wm_init_txrx_queues(struct wm_softc *);
    730 /* Start */
    731 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    732     struct wm_txsoft *, uint32_t *, uint8_t *);
    733 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    734 static void	wm_start(struct ifnet *);
    735 static void	wm_start_locked(struct ifnet *);
    736 static int	wm_transmit(struct ifnet *, struct mbuf *);
    737 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    738 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    739 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    740     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    741 static void	wm_nq_start(struct ifnet *);
    742 static void	wm_nq_start_locked(struct ifnet *);
    743 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    744 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    745 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    746 static void	wm_deferred_start_locked(struct wm_txqueue *);
    747 static void	wm_handle_queue(void *);
    748 /* Interrupt */
    749 static int	wm_txeof(struct wm_softc *, struct wm_txqueue *);
    750 static void	wm_rxeof(struct wm_rxqueue *, u_int);
    751 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    752 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    753 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    754 static void	wm_linkintr(struct wm_softc *, uint32_t);
    755 static int	wm_intr_legacy(void *);
    756 static inline void	wm_txrxintr_disable(struct wm_queue *);
    757 static inline void	wm_txrxintr_enable(struct wm_queue *);
    758 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    759 static int	wm_txrxintr_msix(void *);
    760 static int	wm_linkintr_msix(void *);
    761 
    762 /*
    763  * Media related.
    764  * GMII, SGMII, TBI, SERDES and SFP.
    765  */
    766 /* Common */
    767 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    768 /* GMII related */
    769 static void	wm_gmii_reset(struct wm_softc *);
    770 static void	wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t, uint16_t);
    771 static int	wm_get_phy_id_82575(struct wm_softc *);
    772 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    773 static int	wm_gmii_mediachange(struct ifnet *);
    774 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    775 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    776 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    777 static int	wm_gmii_i82543_readreg(device_t, int, int);
    778 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    779 static int	wm_gmii_mdic_readreg(device_t, int, int);
    780 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    781 static int	wm_gmii_i82544_readreg(device_t, int, int);
    782 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    783 static int	wm_gmii_i80003_readreg(device_t, int, int);
    784 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    785 static int	wm_gmii_bm_readreg(device_t, int, int);
    786 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    787 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    788 static int	wm_gmii_hv_readreg(device_t, int, int);
    789 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
    790 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    791 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
    792 static int	wm_gmii_82580_readreg(device_t, int, int);
    793 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    794 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    795 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    796 static void	wm_gmii_statchg(struct ifnet *);
    797 /*
    798  * kumeran related (80003, ICH* and PCH*).
    799  * These functions are not for accessing MII registers but for accessing
    800  * kumeran specific registers.
    801  */
    802 static int	wm_kmrn_readreg(struct wm_softc *, int);
    803 static int	wm_kmrn_readreg_locked(struct wm_softc *, int);
    804 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
    805 static void	wm_kmrn_writereg_locked(struct wm_softc *, int, int);
    806 /* SGMII */
    807 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    808 static int	wm_sgmii_readreg(device_t, int, int);
    809 static void	wm_sgmii_writereg(device_t, int, int, int);
    810 /* TBI related */
    811 static void	wm_tbi_mediainit(struct wm_softc *);
    812 static int	wm_tbi_mediachange(struct ifnet *);
    813 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    814 static int	wm_check_for_link(struct wm_softc *);
    815 static void	wm_tbi_tick(struct wm_softc *);
    816 /* SERDES related */
    817 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    818 static int	wm_serdes_mediachange(struct ifnet *);
    819 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    820 static void	wm_serdes_tick(struct wm_softc *);
    821 /* SFP related */
    822 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    823 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    824 
    825 /*
    826  * NVM related.
    827  * Microwire, SPI (w/wo EERD) and Flash.
    828  */
    829 /* Misc functions */
    830 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    831 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    832 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    833 /* Microwire */
    834 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    835 /* SPI */
    836 static int	wm_nvm_ready_spi(struct wm_softc *);
    837 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    838 /* Using with EERD */
    839 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    840 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    841 /* Flash */
    842 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    843     unsigned int *);
    844 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    845 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    846 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    847 	uint32_t *);
    848 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    849 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    850 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    851 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    852 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    853 /* iNVM */
    854 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    855 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    856 /* Lock, detecting NVM type, validate checksum and read */
    857 static int	wm_nvm_acquire(struct wm_softc *);
    858 static void	wm_nvm_release(struct wm_softc *);
    859 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    860 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    861 static int	wm_nvm_validate_checksum(struct wm_softc *);
    862 static void	wm_nvm_version_invm(struct wm_softc *);
    863 static void	wm_nvm_version(struct wm_softc *);
    864 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    865 
    866 /*
    867  * Hardware semaphores.
    868  * Very complexed...
    869  */
    870 static int	wm_get_null(struct wm_softc *);
    871 static void	wm_put_null(struct wm_softc *);
    872 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    873 static void	wm_put_swsm_semaphore(struct wm_softc *);
    874 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    875 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    876 static int	wm_get_phy_82575(struct wm_softc *);
    877 static void	wm_put_phy_82575(struct wm_softc *);
    878 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    879 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    880 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    881 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    882 static int	wm_get_nvm_ich8lan(struct wm_softc *);		/* For NVM */
    883 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    884 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    885 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    886 
    887 /*
    888  * Management mode and power management related subroutines.
    889  * BMC, AMT, suspend/resume and EEE.
    890  */
    891 #if 0
    892 static int	wm_check_mng_mode(struct wm_softc *);
    893 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    894 static int	wm_check_mng_mode_82574(struct wm_softc *);
    895 static int	wm_check_mng_mode_generic(struct wm_softc *);
    896 #endif
    897 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    898 static bool	wm_phy_resetisblocked(struct wm_softc *);
    899 static void	wm_get_hw_control(struct wm_softc *);
    900 static void	wm_release_hw_control(struct wm_softc *);
    901 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    902 static void	wm_smbustopci(struct wm_softc *);
    903 static void	wm_init_manageability(struct wm_softc *);
    904 static void	wm_release_manageability(struct wm_softc *);
    905 static void	wm_get_wakeup(struct wm_softc *);
    906 static void	wm_ulp_disable(struct wm_softc *);
    907 static void	wm_enable_phy_wakeup(struct wm_softc *);
    908 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    909 static void	wm_enable_wakeup(struct wm_softc *);
    910 /* LPLU (Low Power Link Up) */
    911 static void	wm_lplu_d0_disable(struct wm_softc *);
    912 static void	wm_lplu_d0_disable_pch(struct wm_softc *);
    913 /* EEE */
    914 static void	wm_set_eee_i350(struct wm_softc *);
    915 
    916 /*
    917  * Workarounds (mainly PHY related).
    918  * Basically, PHY's workarounds are in the PHY drivers.
    919  */
    920 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    921 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    922 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    923 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    924 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    925 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    926 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    927 static void	wm_reset_init_script_82575(struct wm_softc *);
    928 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    929 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
    930 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
    931 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
    932 static void	wm_pll_workaround_i210(struct wm_softc *);
    933 
    934 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    935     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    936 
    937 /*
    938  * Devices supported by this driver.
    939  */
    940 static const struct wm_product {
    941 	pci_vendor_id_t		wmp_vendor;
    942 	pci_product_id_t	wmp_product;
    943 	const char		*wmp_name;
    944 	wm_chip_type		wmp_type;
    945 	uint32_t		wmp_flags;
    946 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    947 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    948 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    949 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    950 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    951 } wm_products[] = {
    952 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    953 	  "Intel i82542 1000BASE-X Ethernet",
    954 	  WM_T_82542_2_1,	WMP_F_FIBER },
    955 
    956 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    957 	  "Intel i82543GC 1000BASE-X Ethernet",
    958 	  WM_T_82543,		WMP_F_FIBER },
    959 
    960 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    961 	  "Intel i82543GC 1000BASE-T Ethernet",
    962 	  WM_T_82543,		WMP_F_COPPER },
    963 
    964 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    965 	  "Intel i82544EI 1000BASE-T Ethernet",
    966 	  WM_T_82544,		WMP_F_COPPER },
    967 
    968 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    969 	  "Intel i82544EI 1000BASE-X Ethernet",
    970 	  WM_T_82544,		WMP_F_FIBER },
    971 
    972 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    973 	  "Intel i82544GC 1000BASE-T Ethernet",
    974 	  WM_T_82544,		WMP_F_COPPER },
    975 
    976 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    977 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    978 	  WM_T_82544,		WMP_F_COPPER },
    979 
    980 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    981 	  "Intel i82540EM 1000BASE-T Ethernet",
    982 	  WM_T_82540,		WMP_F_COPPER },
    983 
    984 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    985 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    986 	  WM_T_82540,		WMP_F_COPPER },
    987 
    988 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    989 	  "Intel i82540EP 1000BASE-T Ethernet",
    990 	  WM_T_82540,		WMP_F_COPPER },
    991 
    992 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    993 	  "Intel i82540EP 1000BASE-T Ethernet",
    994 	  WM_T_82540,		WMP_F_COPPER },
    995 
    996 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    997 	  "Intel i82540EP 1000BASE-T Ethernet",
    998 	  WM_T_82540,		WMP_F_COPPER },
    999 
   1000 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1001 	  "Intel i82545EM 1000BASE-T Ethernet",
   1002 	  WM_T_82545,		WMP_F_COPPER },
   1003 
   1004 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1005 	  "Intel i82545GM 1000BASE-T Ethernet",
   1006 	  WM_T_82545_3,		WMP_F_COPPER },
   1007 
   1008 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1009 	  "Intel i82545GM 1000BASE-X Ethernet",
   1010 	  WM_T_82545_3,		WMP_F_FIBER },
   1011 
   1012 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1013 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1014 	  WM_T_82545_3,		WMP_F_SERDES },
   1015 
   1016 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1017 	  "Intel i82546EB 1000BASE-T Ethernet",
   1018 	  WM_T_82546,		WMP_F_COPPER },
   1019 
   1020 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1021 	  "Intel i82546EB 1000BASE-T Ethernet",
   1022 	  WM_T_82546,		WMP_F_COPPER },
   1023 
   1024 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1025 	  "Intel i82545EM 1000BASE-X Ethernet",
   1026 	  WM_T_82545,		WMP_F_FIBER },
   1027 
   1028 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1029 	  "Intel i82546EB 1000BASE-X Ethernet",
   1030 	  WM_T_82546,		WMP_F_FIBER },
   1031 
   1032 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1033 	  "Intel i82546GB 1000BASE-T Ethernet",
   1034 	  WM_T_82546_3,		WMP_F_COPPER },
   1035 
   1036 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1037 	  "Intel i82546GB 1000BASE-X Ethernet",
   1038 	  WM_T_82546_3,		WMP_F_FIBER },
   1039 
   1040 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1041 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1042 	  WM_T_82546_3,		WMP_F_SERDES },
   1043 
   1044 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1045 	  "i82546GB quad-port Gigabit Ethernet",
   1046 	  WM_T_82546_3,		WMP_F_COPPER },
   1047 
   1048 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1049 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1050 	  WM_T_82546_3,		WMP_F_COPPER },
   1051 
   1052 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1053 	  "Intel PRO/1000MT (82546GB)",
   1054 	  WM_T_82546_3,		WMP_F_COPPER },
   1055 
   1056 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1057 	  "Intel i82541EI 1000BASE-T Ethernet",
   1058 	  WM_T_82541,		WMP_F_COPPER },
   1059 
   1060 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1061 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1062 	  WM_T_82541,		WMP_F_COPPER },
   1063 
   1064 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1065 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1066 	  WM_T_82541,		WMP_F_COPPER },
   1067 
   1068 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1069 	  "Intel i82541ER 1000BASE-T Ethernet",
   1070 	  WM_T_82541_2,		WMP_F_COPPER },
   1071 
   1072 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1073 	  "Intel i82541GI 1000BASE-T Ethernet",
   1074 	  WM_T_82541_2,		WMP_F_COPPER },
   1075 
   1076 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1077 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1078 	  WM_T_82541_2,		WMP_F_COPPER },
   1079 
   1080 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1081 	  "Intel i82541PI 1000BASE-T Ethernet",
   1082 	  WM_T_82541_2,		WMP_F_COPPER },
   1083 
   1084 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1085 	  "Intel i82547EI 1000BASE-T Ethernet",
   1086 	  WM_T_82547,		WMP_F_COPPER },
   1087 
   1088 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1089 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1090 	  WM_T_82547,		WMP_F_COPPER },
   1091 
   1092 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1093 	  "Intel i82547GI 1000BASE-T Ethernet",
   1094 	  WM_T_82547_2,		WMP_F_COPPER },
   1095 
   1096 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1097 	  "Intel PRO/1000 PT (82571EB)",
   1098 	  WM_T_82571,		WMP_F_COPPER },
   1099 
   1100 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1101 	  "Intel PRO/1000 PF (82571EB)",
   1102 	  WM_T_82571,		WMP_F_FIBER },
   1103 
   1104 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1105 	  "Intel PRO/1000 PB (82571EB)",
   1106 	  WM_T_82571,		WMP_F_SERDES },
   1107 
   1108 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1109 	  "Intel PRO/1000 QT (82571EB)",
   1110 	  WM_T_82571,		WMP_F_COPPER },
   1111 
   1112 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1113 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1114 	  WM_T_82571,		WMP_F_COPPER, },
   1115 
   1116 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1117 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1118 	  WM_T_82571,		WMP_F_COPPER, },
   1119 
   1120 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1121 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1122 	  WM_T_82571,		WMP_F_SERDES, },
   1123 
   1124 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1125 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1126 	  WM_T_82571,		WMP_F_SERDES, },
   1127 
   1128 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1129 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1130 	  WM_T_82571,		WMP_F_FIBER, },
   1131 
   1132 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1133 	  "Intel i82572EI 1000baseT Ethernet",
   1134 	  WM_T_82572,		WMP_F_COPPER },
   1135 
   1136 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1137 	  "Intel i82572EI 1000baseX Ethernet",
   1138 	  WM_T_82572,		WMP_F_FIBER },
   1139 
   1140 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1141 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1142 	  WM_T_82572,		WMP_F_SERDES },
   1143 
   1144 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1145 	  "Intel i82572EI 1000baseT Ethernet",
   1146 	  WM_T_82572,		WMP_F_COPPER },
   1147 
   1148 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1149 	  "Intel i82573E",
   1150 	  WM_T_82573,		WMP_F_COPPER },
   1151 
   1152 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1153 	  "Intel i82573E IAMT",
   1154 	  WM_T_82573,		WMP_F_COPPER },
   1155 
   1156 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1157 	  "Intel i82573L Gigabit Ethernet",
   1158 	  WM_T_82573,		WMP_F_COPPER },
   1159 
   1160 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1161 	  "Intel i82574L",
   1162 	  WM_T_82574,		WMP_F_COPPER },
   1163 
   1164 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1165 	  "Intel i82574L",
   1166 	  WM_T_82574,		WMP_F_COPPER },
   1167 
   1168 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1169 	  "Intel i82583V",
   1170 	  WM_T_82583,		WMP_F_COPPER },
   1171 
   1172 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1173 	  "i80003 dual 1000baseT Ethernet",
   1174 	  WM_T_80003,		WMP_F_COPPER },
   1175 
   1176 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1177 	  "i80003 dual 1000baseX Ethernet",
   1178 	  WM_T_80003,		WMP_F_COPPER },
   1179 
   1180 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1181 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1182 	  WM_T_80003,		WMP_F_SERDES },
   1183 
   1184 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1185 	  "Intel i80003 1000baseT Ethernet",
   1186 	  WM_T_80003,		WMP_F_COPPER },
   1187 
   1188 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1189 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1190 	  WM_T_80003,		WMP_F_SERDES },
   1191 
   1192 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1193 	  "Intel i82801H (M_AMT) LAN Controller",
   1194 	  WM_T_ICH8,		WMP_F_COPPER },
   1195 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1196 	  "Intel i82801H (AMT) LAN Controller",
   1197 	  WM_T_ICH8,		WMP_F_COPPER },
   1198 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1199 	  "Intel i82801H LAN Controller",
   1200 	  WM_T_ICH8,		WMP_F_COPPER },
   1201 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1202 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1203 	  WM_T_ICH8,		WMP_F_COPPER },
   1204 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1205 	  "Intel i82801H (M) LAN Controller",
   1206 	  WM_T_ICH8,		WMP_F_COPPER },
   1207 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1208 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1209 	  WM_T_ICH8,		WMP_F_COPPER },
   1210 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1211 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1212 	  WM_T_ICH8,		WMP_F_COPPER },
   1213 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1214 	  "82567V-3 LAN Controller",
   1215 	  WM_T_ICH8,		WMP_F_COPPER },
   1216 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1217 	  "82801I (AMT) LAN Controller",
   1218 	  WM_T_ICH9,		WMP_F_COPPER },
   1219 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1220 	  "82801I 10/100 LAN Controller",
   1221 	  WM_T_ICH9,		WMP_F_COPPER },
   1222 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1223 	  "82801I (G) 10/100 LAN Controller",
   1224 	  WM_T_ICH9,		WMP_F_COPPER },
   1225 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1226 	  "82801I (GT) 10/100 LAN Controller",
   1227 	  WM_T_ICH9,		WMP_F_COPPER },
   1228 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1229 	  "82801I (C) LAN Controller",
   1230 	  WM_T_ICH9,		WMP_F_COPPER },
   1231 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1232 	  "82801I mobile LAN Controller",
   1233 	  WM_T_ICH9,		WMP_F_COPPER },
   1234 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1235 	  "82801I mobile (V) LAN Controller",
   1236 	  WM_T_ICH9,		WMP_F_COPPER },
   1237 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1238 	  "82801I mobile (AMT) LAN Controller",
   1239 	  WM_T_ICH9,		WMP_F_COPPER },
   1240 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1241 	  "82567LM-4 LAN Controller",
   1242 	  WM_T_ICH9,		WMP_F_COPPER },
   1243 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1244 	  "82567LM-2 LAN Controller",
   1245 	  WM_T_ICH10,		WMP_F_COPPER },
   1246 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1247 	  "82567LF-2 LAN Controller",
   1248 	  WM_T_ICH10,		WMP_F_COPPER },
   1249 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1250 	  "82567LM-3 LAN Controller",
   1251 	  WM_T_ICH10,		WMP_F_COPPER },
   1252 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1253 	  "82567LF-3 LAN Controller",
   1254 	  WM_T_ICH10,		WMP_F_COPPER },
   1255 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1256 	  "82567V-2 LAN Controller",
   1257 	  WM_T_ICH10,		WMP_F_COPPER },
   1258 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1259 	  "82567V-3? LAN Controller",
   1260 	  WM_T_ICH10,		WMP_F_COPPER },
   1261 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1262 	  "HANKSVILLE LAN Controller",
   1263 	  WM_T_ICH10,		WMP_F_COPPER },
   1264 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1265 	  "PCH LAN (82577LM) Controller",
   1266 	  WM_T_PCH,		WMP_F_COPPER },
   1267 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1268 	  "PCH LAN (82577LC) Controller",
   1269 	  WM_T_PCH,		WMP_F_COPPER },
   1270 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1271 	  "PCH LAN (82578DM) Controller",
   1272 	  WM_T_PCH,		WMP_F_COPPER },
   1273 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1274 	  "PCH LAN (82578DC) Controller",
   1275 	  WM_T_PCH,		WMP_F_COPPER },
   1276 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1277 	  "PCH2 LAN (82579LM) Controller",
   1278 	  WM_T_PCH2,		WMP_F_COPPER },
   1279 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1280 	  "PCH2 LAN (82579V) Controller",
   1281 	  WM_T_PCH2,		WMP_F_COPPER },
   1282 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1283 	  "82575EB dual-1000baseT Ethernet",
   1284 	  WM_T_82575,		WMP_F_COPPER },
   1285 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1286 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1287 	  WM_T_82575,		WMP_F_SERDES },
   1288 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1289 	  "82575GB quad-1000baseT Ethernet",
   1290 	  WM_T_82575,		WMP_F_COPPER },
   1291 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1292 	  "82575GB quad-1000baseT Ethernet (PM)",
   1293 	  WM_T_82575,		WMP_F_COPPER },
   1294 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1295 	  "82576 1000BaseT Ethernet",
   1296 	  WM_T_82576,		WMP_F_COPPER },
   1297 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1298 	  "82576 1000BaseX Ethernet",
   1299 	  WM_T_82576,		WMP_F_FIBER },
   1300 
   1301 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1302 	  "82576 gigabit Ethernet (SERDES)",
   1303 	  WM_T_82576,		WMP_F_SERDES },
   1304 
   1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1306 	  "82576 quad-1000BaseT Ethernet",
   1307 	  WM_T_82576,		WMP_F_COPPER },
   1308 
   1309 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1310 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1311 	  WM_T_82576,		WMP_F_COPPER },
   1312 
   1313 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1314 	  "82576 gigabit Ethernet",
   1315 	  WM_T_82576,		WMP_F_COPPER },
   1316 
   1317 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1318 	  "82576 gigabit Ethernet (SERDES)",
   1319 	  WM_T_82576,		WMP_F_SERDES },
   1320 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1321 	  "82576 quad-gigabit Ethernet (SERDES)",
   1322 	  WM_T_82576,		WMP_F_SERDES },
   1323 
   1324 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1325 	  "82580 1000BaseT Ethernet",
   1326 	  WM_T_82580,		WMP_F_COPPER },
   1327 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1328 	  "82580 1000BaseX Ethernet",
   1329 	  WM_T_82580,		WMP_F_FIBER },
   1330 
   1331 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1332 	  "82580 1000BaseT Ethernet (SERDES)",
   1333 	  WM_T_82580,		WMP_F_SERDES },
   1334 
   1335 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1336 	  "82580 gigabit Ethernet (SGMII)",
   1337 	  WM_T_82580,		WMP_F_COPPER },
   1338 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1339 	  "82580 dual-1000BaseT Ethernet",
   1340 	  WM_T_82580,		WMP_F_COPPER },
   1341 
   1342 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1343 	  "82580 quad-1000BaseX Ethernet",
   1344 	  WM_T_82580,		WMP_F_FIBER },
   1345 
   1346 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1347 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1348 	  WM_T_82580,		WMP_F_COPPER },
   1349 
   1350 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1351 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1352 	  WM_T_82580,		WMP_F_SERDES },
   1353 
   1354 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1355 	  "DH89XXCC 1000BASE-KX Ethernet",
   1356 	  WM_T_82580,		WMP_F_SERDES },
   1357 
   1358 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1359 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1360 	  WM_T_82580,		WMP_F_SERDES },
   1361 
   1362 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1363 	  "I350 Gigabit Network Connection",
   1364 	  WM_T_I350,		WMP_F_COPPER },
   1365 
   1366 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1367 	  "I350 Gigabit Fiber Network Connection",
   1368 	  WM_T_I350,		WMP_F_FIBER },
   1369 
   1370 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1371 	  "I350 Gigabit Backplane Connection",
   1372 	  WM_T_I350,		WMP_F_SERDES },
   1373 
   1374 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1375 	  "I350 Quad Port Gigabit Ethernet",
   1376 	  WM_T_I350,		WMP_F_SERDES },
   1377 
   1378 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1379 	  "I350 Gigabit Connection",
   1380 	  WM_T_I350,		WMP_F_COPPER },
   1381 
   1382 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1383 	  "I354 Gigabit Ethernet (KX)",
   1384 	  WM_T_I354,		WMP_F_SERDES },
   1385 
   1386 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1387 	  "I354 Gigabit Ethernet (SGMII)",
   1388 	  WM_T_I354,		WMP_F_COPPER },
   1389 
   1390 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1391 	  "I354 Gigabit Ethernet (2.5G)",
   1392 	  WM_T_I354,		WMP_F_COPPER },
   1393 
   1394 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1395 	  "I210-T1 Ethernet Server Adapter",
   1396 	  WM_T_I210,		WMP_F_COPPER },
   1397 
   1398 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1399 	  "I210 Ethernet (Copper OEM)",
   1400 	  WM_T_I210,		WMP_F_COPPER },
   1401 
   1402 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1403 	  "I210 Ethernet (Copper IT)",
   1404 	  WM_T_I210,		WMP_F_COPPER },
   1405 
   1406 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1407 	  "I210 Ethernet (FLASH less)",
   1408 	  WM_T_I210,		WMP_F_COPPER },
   1409 
   1410 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1411 	  "I210 Gigabit Ethernet (Fiber)",
   1412 	  WM_T_I210,		WMP_F_FIBER },
   1413 
   1414 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1415 	  "I210 Gigabit Ethernet (SERDES)",
   1416 	  WM_T_I210,		WMP_F_SERDES },
   1417 
   1418 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1419 	  "I210 Gigabit Ethernet (FLASH less)",
   1420 	  WM_T_I210,		WMP_F_SERDES },
   1421 
   1422 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1423 	  "I210 Gigabit Ethernet (SGMII)",
   1424 	  WM_T_I210,		WMP_F_COPPER },
   1425 
   1426 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1427 	  "I211 Ethernet (COPPER)",
   1428 	  WM_T_I211,		WMP_F_COPPER },
   1429 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1430 	  "I217 V Ethernet Connection",
   1431 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1432 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1433 	  "I217 LM Ethernet Connection",
   1434 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1435 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1436 	  "I218 V Ethernet Connection",
   1437 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1438 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1439 	  "I218 V Ethernet Connection",
   1440 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1441 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1442 	  "I218 V Ethernet Connection",
   1443 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1444 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1445 	  "I218 LM Ethernet Connection",
   1446 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1447 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1448 	  "I218 LM Ethernet Connection",
   1449 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1450 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1451 	  "I218 LM Ethernet Connection",
   1452 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1453 #if 0
   1454 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1455 	  "I219 V Ethernet Connection",
   1456 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1457 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1458 	  "I219 V Ethernet Connection",
   1459 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1460 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1461 	  "I219 V Ethernet Connection",
   1462 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1463 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1464 	  "I219 V Ethernet Connection",
   1465 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1466 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1467 	  "I219 LM Ethernet Connection",
   1468 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1469 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1470 	  "I219 LM Ethernet Connection",
   1471 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1472 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1473 	  "I219 LM Ethernet Connection",
   1474 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1475 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1476 	  "I219 LM Ethernet Connection",
   1477 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1478 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1479 	  "I219 LM Ethernet Connection",
   1480 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1481 #endif
   1482 	{ 0,			0,
   1483 	  NULL,
   1484 	  0,			0 },
   1485 };
   1486 
   1487 /*
   1488  * Register read/write functions.
   1489  * Other than CSR_{READ|WRITE}().
   1490  */
   1491 
   1492 #if 0 /* Not currently used */
   1493 static inline uint32_t
   1494 wm_io_read(struct wm_softc *sc, int reg)
   1495 {
   1496 
   1497 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1498 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1499 }
   1500 #endif
   1501 
   1502 static inline void
   1503 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1504 {
   1505 
   1506 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1507 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1508 }
   1509 
   1510 static inline void
   1511 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1512     uint32_t data)
   1513 {
   1514 	uint32_t regval;
   1515 	int i;
   1516 
   1517 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1518 
   1519 	CSR_WRITE(sc, reg, regval);
   1520 
   1521 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1522 		delay(5);
   1523 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1524 			break;
   1525 	}
   1526 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1527 		aprint_error("%s: WARNING:"
   1528 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1529 		    device_xname(sc->sc_dev), reg);
   1530 	}
   1531 }
   1532 
   1533 static inline void
   1534 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1535 {
   1536 	wa->wa_low = htole32(v & 0xffffffffU);
   1537 	if (sizeof(bus_addr_t) == 8)
   1538 		wa->wa_high = htole32((uint64_t) v >> 32);
   1539 	else
   1540 		wa->wa_high = 0;
   1541 }
   1542 
   1543 /*
   1544  * Descriptor sync/init functions.
   1545  */
   1546 static inline void
   1547 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1548 {
   1549 	struct wm_softc *sc = txq->txq_sc;
   1550 
   1551 	/* If it will wrap around, sync to the end of the ring. */
   1552 	if ((start + num) > WM_NTXDESC(txq)) {
   1553 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1554 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1555 		    (WM_NTXDESC(txq) - start), ops);
   1556 		num -= (WM_NTXDESC(txq) - start);
   1557 		start = 0;
   1558 	}
   1559 
   1560 	/* Now sync whatever is left. */
   1561 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1562 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1563 }
   1564 
   1565 static inline void
   1566 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1567 {
   1568 	struct wm_softc *sc = rxq->rxq_sc;
   1569 
   1570 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1571 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1572 }
   1573 
   1574 static inline void
   1575 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1576 {
   1577 	struct wm_softc *sc = rxq->rxq_sc;
   1578 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1579 	struct mbuf *m = rxs->rxs_mbuf;
   1580 
   1581 	/*
   1582 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1583 	 * so that the payload after the Ethernet header is aligned
   1584 	 * to a 4-byte boundary.
   1585 
   1586 	 * XXX BRAINDAMAGE ALERT!
   1587 	 * The stupid chip uses the same size for every buffer, which
   1588 	 * is set in the Receive Control register.  We are using the 2K
   1589 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1590 	 * reason, we can't "scoot" packets longer than the standard
   1591 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1592 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1593 	 * the upper layer copy the headers.
   1594 	 */
   1595 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1596 
   1597 	if (sc->sc_type == WM_T_82574) {
   1598 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1599 		rxd->erx_data.erxd_addr =
   1600 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1601 		rxd->erx_data.erxd_dd = 0;
   1602 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1603 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1604 
   1605 		rxd->nqrx_data.nrxd_paddr =
   1606 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1607 		/* Currently, split header is not supported. */
   1608 		rxd->nqrx_data.nrxd_haddr = 0;
   1609 	} else {
   1610 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1611 
   1612 		wm_set_dma_addr(&rxd->wrx_addr,
   1613 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1614 		rxd->wrx_len = 0;
   1615 		rxd->wrx_cksum = 0;
   1616 		rxd->wrx_status = 0;
   1617 		rxd->wrx_errors = 0;
   1618 		rxd->wrx_special = 0;
   1619 	}
   1620 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1621 
   1622 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1623 }
   1624 
   1625 /*
   1626  * Device driver interface functions and commonly used functions.
   1627  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1628  */
   1629 
   1630 /* Lookup supported device table */
   1631 static const struct wm_product *
   1632 wm_lookup(const struct pci_attach_args *pa)
   1633 {
   1634 	const struct wm_product *wmp;
   1635 
   1636 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1637 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1638 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1639 			return wmp;
   1640 	}
   1641 	return NULL;
   1642 }
   1643 
   1644 /* The match function (ca_match) */
   1645 static int
   1646 wm_match(device_t parent, cfdata_t cf, void *aux)
   1647 {
   1648 	struct pci_attach_args *pa = aux;
   1649 
   1650 	if (wm_lookup(pa) != NULL)
   1651 		return 1;
   1652 
   1653 	return 0;
   1654 }
   1655 
   1656 /* The attach function (ca_attach) */
   1657 static void
   1658 wm_attach(device_t parent, device_t self, void *aux)
   1659 {
   1660 	struct wm_softc *sc = device_private(self);
   1661 	struct pci_attach_args *pa = aux;
   1662 	prop_dictionary_t dict;
   1663 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1664 	pci_chipset_tag_t pc = pa->pa_pc;
   1665 	int counts[PCI_INTR_TYPE_SIZE];
   1666 	pci_intr_type_t max_type;
   1667 	const char *eetype, *xname;
   1668 	bus_space_tag_t memt;
   1669 	bus_space_handle_t memh;
   1670 	bus_size_t memsize;
   1671 	int memh_valid;
   1672 	int i, error;
   1673 	const struct wm_product *wmp;
   1674 	prop_data_t ea;
   1675 	prop_number_t pn;
   1676 	uint8_t enaddr[ETHER_ADDR_LEN];
   1677 	char buf[256];
   1678 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1679 	pcireg_t preg, memtype;
   1680 	uint16_t eeprom_data, apme_mask;
   1681 	bool force_clear_smbi;
   1682 	uint32_t link_mode;
   1683 	uint32_t reg;
   1684 
   1685 	sc->sc_dev = self;
   1686 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1687 	sc->sc_core_stopping = false;
   1688 
   1689 	wmp = wm_lookup(pa);
   1690 #ifdef DIAGNOSTIC
   1691 	if (wmp == NULL) {
   1692 		printf("\n");
   1693 		panic("wm_attach: impossible");
   1694 	}
   1695 #endif
   1696 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1697 
   1698 	sc->sc_pc = pa->pa_pc;
   1699 	sc->sc_pcitag = pa->pa_tag;
   1700 
   1701 	if (pci_dma64_available(pa))
   1702 		sc->sc_dmat = pa->pa_dmat64;
   1703 	else
   1704 		sc->sc_dmat = pa->pa_dmat;
   1705 
   1706 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1707 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1708 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1709 
   1710 	sc->sc_type = wmp->wmp_type;
   1711 
   1712 	/* Set default function pointers */
   1713 	sc->phy.acquire = wm_get_null;
   1714 	sc->phy.release = wm_put_null;
   1715 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1716 
   1717 	if (sc->sc_type < WM_T_82543) {
   1718 		if (sc->sc_rev < 2) {
   1719 			aprint_error_dev(sc->sc_dev,
   1720 			    "i82542 must be at least rev. 2\n");
   1721 			return;
   1722 		}
   1723 		if (sc->sc_rev < 3)
   1724 			sc->sc_type = WM_T_82542_2_0;
   1725 	}
   1726 
   1727 	/*
   1728 	 * Disable MSI for Errata:
   1729 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1730 	 *
   1731 	 *  82544: Errata 25
   1732 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1733 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1734 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1735 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1736 	 *
   1737 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1738 	 *
   1739 	 *  82571 & 82572: Errata 63
   1740 	 */
   1741 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1742 	    || (sc->sc_type == WM_T_82572))
   1743 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1744 
   1745 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1746 	    || (sc->sc_type == WM_T_82580)
   1747 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1748 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1749 		sc->sc_flags |= WM_F_NEWQUEUE;
   1750 
   1751 	/* Set device properties (mactype) */
   1752 	dict = device_properties(sc->sc_dev);
   1753 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1754 
   1755 	/*
   1756 	 * Map the device.  All devices support memory-mapped acccess,
   1757 	 * and it is really required for normal operation.
   1758 	 */
   1759 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1760 	switch (memtype) {
   1761 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1762 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1763 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1764 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1765 		break;
   1766 	default:
   1767 		memh_valid = 0;
   1768 		break;
   1769 	}
   1770 
   1771 	if (memh_valid) {
   1772 		sc->sc_st = memt;
   1773 		sc->sc_sh = memh;
   1774 		sc->sc_ss = memsize;
   1775 	} else {
   1776 		aprint_error_dev(sc->sc_dev,
   1777 		    "unable to map device registers\n");
   1778 		return;
   1779 	}
   1780 
   1781 	/*
   1782 	 * In addition, i82544 and later support I/O mapped indirect
   1783 	 * register access.  It is not desirable (nor supported in
   1784 	 * this driver) to use it for normal operation, though it is
   1785 	 * required to work around bugs in some chip versions.
   1786 	 */
   1787 	if (sc->sc_type >= WM_T_82544) {
   1788 		/* First we have to find the I/O BAR. */
   1789 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1790 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1791 			if (memtype == PCI_MAPREG_TYPE_IO)
   1792 				break;
   1793 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1794 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1795 				i += 4;	/* skip high bits, too */
   1796 		}
   1797 		if (i < PCI_MAPREG_END) {
   1798 			/*
   1799 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1800 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1801 			 * It's no problem because newer chips has no this
   1802 			 * bug.
   1803 			 *
   1804 			 * The i8254x doesn't apparently respond when the
   1805 			 * I/O BAR is 0, which looks somewhat like it's not
   1806 			 * been configured.
   1807 			 */
   1808 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1809 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1810 				aprint_error_dev(sc->sc_dev,
   1811 				    "WARNING: I/O BAR at zero.\n");
   1812 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1813 					0, &sc->sc_iot, &sc->sc_ioh,
   1814 					NULL, &sc->sc_ios) == 0) {
   1815 				sc->sc_flags |= WM_F_IOH_VALID;
   1816 			} else {
   1817 				aprint_error_dev(sc->sc_dev,
   1818 				    "WARNING: unable to map I/O space\n");
   1819 			}
   1820 		}
   1821 
   1822 	}
   1823 
   1824 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1825 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1826 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1827 	if (sc->sc_type < WM_T_82542_2_1)
   1828 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1829 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1830 
   1831 	/* power up chip */
   1832 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1833 	    NULL)) && error != EOPNOTSUPP) {
   1834 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1835 		return;
   1836 	}
   1837 
   1838 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1839 
   1840 	/* Allocation settings */
   1841 	max_type = PCI_INTR_TYPE_MSIX;
   1842 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1;
   1843 	counts[PCI_INTR_TYPE_MSI] = 1;
   1844 	counts[PCI_INTR_TYPE_INTX] = 1;
   1845 	/* overridden by disable flags */
   1846 	if (wm_disable_msi != 0) {
   1847 		counts[PCI_INTR_TYPE_MSI] = 0;
   1848 		if (wm_disable_msix != 0) {
   1849 			max_type = PCI_INTR_TYPE_INTX;
   1850 			counts[PCI_INTR_TYPE_MSIX] = 0;
   1851 		}
   1852 	} else if (wm_disable_msix != 0) {
   1853 		max_type = PCI_INTR_TYPE_MSI;
   1854 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1855 	}
   1856 
   1857 alloc_retry:
   1858 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1859 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1860 		return;
   1861 	}
   1862 
   1863 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1864 		error = wm_setup_msix(sc);
   1865 		if (error) {
   1866 			pci_intr_release(pc, sc->sc_intrs,
   1867 			    counts[PCI_INTR_TYPE_MSIX]);
   1868 
   1869 			/* Setup for MSI: Disable MSI-X */
   1870 			max_type = PCI_INTR_TYPE_MSI;
   1871 			counts[PCI_INTR_TYPE_MSI] = 1;
   1872 			counts[PCI_INTR_TYPE_INTX] = 1;
   1873 			goto alloc_retry;
   1874 		}
   1875 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1876 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1877 		error = wm_setup_legacy(sc);
   1878 		if (error) {
   1879 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1880 			    counts[PCI_INTR_TYPE_MSI]);
   1881 
   1882 			/* The next try is for INTx: Disable MSI */
   1883 			max_type = PCI_INTR_TYPE_INTX;
   1884 			counts[PCI_INTR_TYPE_INTX] = 1;
   1885 			goto alloc_retry;
   1886 		}
   1887 	} else {
   1888 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1889 		error = wm_setup_legacy(sc);
   1890 		if (error) {
   1891 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1892 			    counts[PCI_INTR_TYPE_INTX]);
   1893 			return;
   1894 		}
   1895 	}
   1896 
   1897 	/*
   1898 	 * Check the function ID (unit number of the chip).
   1899 	 */
   1900 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1901 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1902 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1903 	    || (sc->sc_type == WM_T_82580)
   1904 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1905 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1906 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1907 	else
   1908 		sc->sc_funcid = 0;
   1909 
   1910 	/*
   1911 	 * Determine a few things about the bus we're connected to.
   1912 	 */
   1913 	if (sc->sc_type < WM_T_82543) {
   1914 		/* We don't really know the bus characteristics here. */
   1915 		sc->sc_bus_speed = 33;
   1916 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1917 		/*
   1918 		 * CSA (Communication Streaming Architecture) is about as fast
   1919 		 * a 32-bit 66MHz PCI Bus.
   1920 		 */
   1921 		sc->sc_flags |= WM_F_CSA;
   1922 		sc->sc_bus_speed = 66;
   1923 		aprint_verbose_dev(sc->sc_dev,
   1924 		    "Communication Streaming Architecture\n");
   1925 		if (sc->sc_type == WM_T_82547) {
   1926 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1927 			callout_setfunc(&sc->sc_txfifo_ch,
   1928 					wm_82547_txfifo_stall, sc);
   1929 			aprint_verbose_dev(sc->sc_dev,
   1930 			    "using 82547 Tx FIFO stall work-around\n");
   1931 		}
   1932 	} else if (sc->sc_type >= WM_T_82571) {
   1933 		sc->sc_flags |= WM_F_PCIE;
   1934 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1935 		    && (sc->sc_type != WM_T_ICH10)
   1936 		    && (sc->sc_type != WM_T_PCH)
   1937 		    && (sc->sc_type != WM_T_PCH2)
   1938 		    && (sc->sc_type != WM_T_PCH_LPT)
   1939 		    && (sc->sc_type != WM_T_PCH_SPT)) {
   1940 			/* ICH* and PCH* have no PCIe capability registers */
   1941 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1942 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1943 				NULL) == 0)
   1944 				aprint_error_dev(sc->sc_dev,
   1945 				    "unable to find PCIe capability\n");
   1946 		}
   1947 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1948 	} else {
   1949 		reg = CSR_READ(sc, WMREG_STATUS);
   1950 		if (reg & STATUS_BUS64)
   1951 			sc->sc_flags |= WM_F_BUS64;
   1952 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1953 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1954 
   1955 			sc->sc_flags |= WM_F_PCIX;
   1956 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1957 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1958 				aprint_error_dev(sc->sc_dev,
   1959 				    "unable to find PCIX capability\n");
   1960 			else if (sc->sc_type != WM_T_82545_3 &&
   1961 				 sc->sc_type != WM_T_82546_3) {
   1962 				/*
   1963 				 * Work around a problem caused by the BIOS
   1964 				 * setting the max memory read byte count
   1965 				 * incorrectly.
   1966 				 */
   1967 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1968 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1969 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1970 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1971 
   1972 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1973 				    PCIX_CMD_BYTECNT_SHIFT;
   1974 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1975 				    PCIX_STATUS_MAXB_SHIFT;
   1976 				if (bytecnt > maxb) {
   1977 					aprint_verbose_dev(sc->sc_dev,
   1978 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1979 					    512 << bytecnt, 512 << maxb);
   1980 					pcix_cmd = (pcix_cmd &
   1981 					    ~PCIX_CMD_BYTECNT_MASK) |
   1982 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1983 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1984 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1985 					    pcix_cmd);
   1986 				}
   1987 			}
   1988 		}
   1989 		/*
   1990 		 * The quad port adapter is special; it has a PCIX-PCIX
   1991 		 * bridge on the board, and can run the secondary bus at
   1992 		 * a higher speed.
   1993 		 */
   1994 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   1995 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   1996 								      : 66;
   1997 		} else if (sc->sc_flags & WM_F_PCIX) {
   1998 			switch (reg & STATUS_PCIXSPD_MASK) {
   1999 			case STATUS_PCIXSPD_50_66:
   2000 				sc->sc_bus_speed = 66;
   2001 				break;
   2002 			case STATUS_PCIXSPD_66_100:
   2003 				sc->sc_bus_speed = 100;
   2004 				break;
   2005 			case STATUS_PCIXSPD_100_133:
   2006 				sc->sc_bus_speed = 133;
   2007 				break;
   2008 			default:
   2009 				aprint_error_dev(sc->sc_dev,
   2010 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2011 				    reg & STATUS_PCIXSPD_MASK);
   2012 				sc->sc_bus_speed = 66;
   2013 				break;
   2014 			}
   2015 		} else
   2016 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2017 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2018 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2019 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2020 	}
   2021 
   2022 	/* clear interesting stat counters */
   2023 	CSR_READ(sc, WMREG_COLC);
   2024 	CSR_READ(sc, WMREG_RXERRC);
   2025 
   2026 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2027 	    || (sc->sc_type >= WM_T_ICH8))
   2028 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2029 	if (sc->sc_type >= WM_T_ICH8)
   2030 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2031 
   2032 	/* Set PHY, NVM mutex related stuff */
   2033 	switch (sc->sc_type) {
   2034 	case WM_T_82542_2_0:
   2035 	case WM_T_82542_2_1:
   2036 	case WM_T_82543:
   2037 	case WM_T_82544:
   2038 		/* Microwire */
   2039 		sc->sc_nvm_wordsize = 64;
   2040 		sc->sc_nvm_addrbits = 6;
   2041 		break;
   2042 	case WM_T_82540:
   2043 	case WM_T_82545:
   2044 	case WM_T_82545_3:
   2045 	case WM_T_82546:
   2046 	case WM_T_82546_3:
   2047 		/* Microwire */
   2048 		reg = CSR_READ(sc, WMREG_EECD);
   2049 		if (reg & EECD_EE_SIZE) {
   2050 			sc->sc_nvm_wordsize = 256;
   2051 			sc->sc_nvm_addrbits = 8;
   2052 		} else {
   2053 			sc->sc_nvm_wordsize = 64;
   2054 			sc->sc_nvm_addrbits = 6;
   2055 		}
   2056 		sc->sc_flags |= WM_F_LOCK_EECD;
   2057 		break;
   2058 	case WM_T_82541:
   2059 	case WM_T_82541_2:
   2060 	case WM_T_82547:
   2061 	case WM_T_82547_2:
   2062 		sc->sc_flags |= WM_F_LOCK_EECD;
   2063 		reg = CSR_READ(sc, WMREG_EECD);
   2064 		if (reg & EECD_EE_TYPE) {
   2065 			/* SPI */
   2066 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2067 			wm_nvm_set_addrbits_size_eecd(sc);
   2068 		} else {
   2069 			/* Microwire */
   2070 			if ((reg & EECD_EE_ABITS) != 0) {
   2071 				sc->sc_nvm_wordsize = 256;
   2072 				sc->sc_nvm_addrbits = 8;
   2073 			} else {
   2074 				sc->sc_nvm_wordsize = 64;
   2075 				sc->sc_nvm_addrbits = 6;
   2076 			}
   2077 		}
   2078 		break;
   2079 	case WM_T_82571:
   2080 	case WM_T_82572:
   2081 		/* SPI */
   2082 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2083 		wm_nvm_set_addrbits_size_eecd(sc);
   2084 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
   2085 		sc->phy.acquire = wm_get_swsm_semaphore;
   2086 		sc->phy.release = wm_put_swsm_semaphore;
   2087 		break;
   2088 	case WM_T_82573:
   2089 	case WM_T_82574:
   2090 	case WM_T_82583:
   2091 		if (sc->sc_type == WM_T_82573) {
   2092 			sc->sc_flags |= WM_F_LOCK_SWSM;
   2093 			sc->phy.acquire = wm_get_swsm_semaphore;
   2094 			sc->phy.release = wm_put_swsm_semaphore;
   2095 		} else {
   2096 			sc->sc_flags |= WM_F_LOCK_EXTCNF;
   2097 			/* Both PHY and NVM use the same semaphore. */
   2098 			sc->phy.acquire
   2099 			    = wm_get_swfwhw_semaphore;
   2100 			sc->phy.release
   2101 			    = wm_put_swfwhw_semaphore;
   2102 		}
   2103 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2104 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2105 			sc->sc_nvm_wordsize = 2048;
   2106 		} else {
   2107 			/* SPI */
   2108 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2109 			wm_nvm_set_addrbits_size_eecd(sc);
   2110 		}
   2111 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2112 		break;
   2113 	case WM_T_82575:
   2114 	case WM_T_82576:
   2115 	case WM_T_82580:
   2116 	case WM_T_I350:
   2117 	case WM_T_I354:
   2118 	case WM_T_80003:
   2119 		/* SPI */
   2120 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2121 		wm_nvm_set_addrbits_size_eecd(sc);
   2122 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
   2123 		    | WM_F_LOCK_SWSM;
   2124 		sc->phy.acquire = wm_get_phy_82575;
   2125 		sc->phy.release = wm_put_phy_82575;
   2126 		break;
   2127 	case WM_T_ICH8:
   2128 	case WM_T_ICH9:
   2129 	case WM_T_ICH10:
   2130 	case WM_T_PCH:
   2131 	case WM_T_PCH2:
   2132 	case WM_T_PCH_LPT:
   2133 		/* FLASH */
   2134 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2135 		sc->sc_nvm_wordsize = 2048;
   2136 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2137 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2138 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2139 			aprint_error_dev(sc->sc_dev,
   2140 			    "can't map FLASH registers\n");
   2141 			goto out;
   2142 		}
   2143 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2144 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2145 		    ICH_FLASH_SECTOR_SIZE;
   2146 		sc->sc_ich8_flash_bank_size =
   2147 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2148 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2149 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2150 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2151 		sc->sc_flashreg_offset = 0;
   2152 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2153 		sc->phy.release = wm_put_swflag_ich8lan;
   2154 		break;
   2155 	case WM_T_PCH_SPT:
   2156 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2157 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2158 		sc->sc_flasht = sc->sc_st;
   2159 		sc->sc_flashh = sc->sc_sh;
   2160 		sc->sc_ich8_flash_base = 0;
   2161 		sc->sc_nvm_wordsize =
   2162 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2163 			* NVM_SIZE_MULTIPLIER;
   2164 		/* It is size in bytes, we want words */
   2165 		sc->sc_nvm_wordsize /= 2;
   2166 		/* assume 2 banks */
   2167 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2168 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2169 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2170 		sc->phy.release = wm_put_swflag_ich8lan;
   2171 		break;
   2172 	case WM_T_I210:
   2173 	case WM_T_I211:
   2174 		if (wm_nvm_get_flash_presence_i210(sc)) {
   2175 			wm_nvm_set_addrbits_size_eecd(sc);
   2176 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2177 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2178 		} else {
   2179 			sc->sc_nvm_wordsize = INVM_SIZE;
   2180 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2181 		}
   2182 		sc->sc_flags |= WM_F_LOCK_SWFW | WM_F_LOCK_SWSM;
   2183 		sc->phy.acquire = wm_get_phy_82575;
   2184 		sc->phy.release = wm_put_phy_82575;
   2185 		break;
   2186 	default:
   2187 		break;
   2188 	}
   2189 
   2190 	/* Reset the chip to a known state. */
   2191 	wm_reset(sc);
   2192 
   2193 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2194 	switch (sc->sc_type) {
   2195 	case WM_T_82571:
   2196 	case WM_T_82572:
   2197 		reg = CSR_READ(sc, WMREG_SWSM2);
   2198 		if ((reg & SWSM2_LOCK) == 0) {
   2199 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2200 			force_clear_smbi = true;
   2201 		} else
   2202 			force_clear_smbi = false;
   2203 		break;
   2204 	case WM_T_82573:
   2205 	case WM_T_82574:
   2206 	case WM_T_82583:
   2207 		force_clear_smbi = true;
   2208 		break;
   2209 	default:
   2210 		force_clear_smbi = false;
   2211 		break;
   2212 	}
   2213 	if (force_clear_smbi) {
   2214 		reg = CSR_READ(sc, WMREG_SWSM);
   2215 		if ((reg & SWSM_SMBI) != 0)
   2216 			aprint_error_dev(sc->sc_dev,
   2217 			    "Please update the Bootagent\n");
   2218 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2219 	}
   2220 
   2221 	/*
   2222 	 * Defer printing the EEPROM type until after verifying the checksum
   2223 	 * This allows the EEPROM type to be printed correctly in the case
   2224 	 * that no EEPROM is attached.
   2225 	 */
   2226 	/*
   2227 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2228 	 * this for later, so we can fail future reads from the EEPROM.
   2229 	 */
   2230 	if (wm_nvm_validate_checksum(sc)) {
   2231 		/*
   2232 		 * Read twice again because some PCI-e parts fail the
   2233 		 * first check due to the link being in sleep state.
   2234 		 */
   2235 		if (wm_nvm_validate_checksum(sc))
   2236 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2237 	}
   2238 
   2239 	/* Set device properties (macflags) */
   2240 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2241 
   2242 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2243 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2244 	else {
   2245 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2246 		    sc->sc_nvm_wordsize);
   2247 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2248 			aprint_verbose("iNVM");
   2249 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2250 			aprint_verbose("FLASH(HW)");
   2251 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2252 			aprint_verbose("FLASH");
   2253 		else {
   2254 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2255 				eetype = "SPI";
   2256 			else
   2257 				eetype = "MicroWire";
   2258 			aprint_verbose("(%d address bits) %s EEPROM",
   2259 			    sc->sc_nvm_addrbits, eetype);
   2260 		}
   2261 	}
   2262 	wm_nvm_version(sc);
   2263 	aprint_verbose("\n");
   2264 
   2265 	/* Check for I21[01] PLL workaround */
   2266 	if (sc->sc_type == WM_T_I210)
   2267 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2268 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2269 		/* NVM image release 3.25 has a workaround */
   2270 		if ((sc->sc_nvm_ver_major < 3)
   2271 		    || ((sc->sc_nvm_ver_major == 3)
   2272 			&& (sc->sc_nvm_ver_minor < 25))) {
   2273 			aprint_verbose_dev(sc->sc_dev,
   2274 			    "ROM image version %d.%d is older than 3.25\n",
   2275 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2276 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2277 		}
   2278 	}
   2279 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2280 		wm_pll_workaround_i210(sc);
   2281 
   2282 	wm_get_wakeup(sc);
   2283 
   2284 	/* Non-AMT based hardware can now take control from firmware */
   2285 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2286 		wm_get_hw_control(sc);
   2287 
   2288 	/*
   2289 	 * Read the Ethernet address from the EEPROM, if not first found
   2290 	 * in device properties.
   2291 	 */
   2292 	ea = prop_dictionary_get(dict, "mac-address");
   2293 	if (ea != NULL) {
   2294 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2295 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2296 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2297 	} else {
   2298 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2299 			aprint_error_dev(sc->sc_dev,
   2300 			    "unable to read Ethernet address\n");
   2301 			goto out;
   2302 		}
   2303 	}
   2304 
   2305 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2306 	    ether_sprintf(enaddr));
   2307 
   2308 	/*
   2309 	 * Read the config info from the EEPROM, and set up various
   2310 	 * bits in the control registers based on their contents.
   2311 	 */
   2312 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2313 	if (pn != NULL) {
   2314 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2315 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2316 	} else {
   2317 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2318 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2319 			goto out;
   2320 		}
   2321 	}
   2322 
   2323 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2324 	if (pn != NULL) {
   2325 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2326 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2327 	} else {
   2328 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2329 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2330 			goto out;
   2331 		}
   2332 	}
   2333 
   2334 	/* check for WM_F_WOL */
   2335 	switch (sc->sc_type) {
   2336 	case WM_T_82542_2_0:
   2337 	case WM_T_82542_2_1:
   2338 	case WM_T_82543:
   2339 		/* dummy? */
   2340 		eeprom_data = 0;
   2341 		apme_mask = NVM_CFG3_APME;
   2342 		break;
   2343 	case WM_T_82544:
   2344 		apme_mask = NVM_CFG2_82544_APM_EN;
   2345 		eeprom_data = cfg2;
   2346 		break;
   2347 	case WM_T_82546:
   2348 	case WM_T_82546_3:
   2349 	case WM_T_82571:
   2350 	case WM_T_82572:
   2351 	case WM_T_82573:
   2352 	case WM_T_82574:
   2353 	case WM_T_82583:
   2354 	case WM_T_80003:
   2355 	default:
   2356 		apme_mask = NVM_CFG3_APME;
   2357 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2358 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2359 		break;
   2360 	case WM_T_82575:
   2361 	case WM_T_82576:
   2362 	case WM_T_82580:
   2363 	case WM_T_I350:
   2364 	case WM_T_I354: /* XXX ok? */
   2365 	case WM_T_ICH8:
   2366 	case WM_T_ICH9:
   2367 	case WM_T_ICH10:
   2368 	case WM_T_PCH:
   2369 	case WM_T_PCH2:
   2370 	case WM_T_PCH_LPT:
   2371 	case WM_T_PCH_SPT:
   2372 		/* XXX The funcid should be checked on some devices */
   2373 		apme_mask = WUC_APME;
   2374 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2375 		break;
   2376 	}
   2377 
   2378 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2379 	if ((eeprom_data & apme_mask) != 0)
   2380 		sc->sc_flags |= WM_F_WOL;
   2381 
   2382 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2383 		/* Check NVM for autonegotiation */
   2384 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2385 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2386 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2387 		}
   2388 	}
   2389 
   2390 	/*
   2391 	 * XXX need special handling for some multiple port cards
   2392 	 * to disable a paticular port.
   2393 	 */
   2394 
   2395 	if (sc->sc_type >= WM_T_82544) {
   2396 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2397 		if (pn != NULL) {
   2398 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2399 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2400 		} else {
   2401 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2402 				aprint_error_dev(sc->sc_dev,
   2403 				    "unable to read SWDPIN\n");
   2404 				goto out;
   2405 			}
   2406 		}
   2407 	}
   2408 
   2409 	if (cfg1 & NVM_CFG1_ILOS)
   2410 		sc->sc_ctrl |= CTRL_ILOS;
   2411 
   2412 	/*
   2413 	 * XXX
   2414 	 * This code isn't correct because pin 2 and 3 are located
   2415 	 * in different position on newer chips. Check all datasheet.
   2416 	 *
   2417 	 * Until resolve this problem, check if a chip < 82580
   2418 	 */
   2419 	if (sc->sc_type <= WM_T_82580) {
   2420 		if (sc->sc_type >= WM_T_82544) {
   2421 			sc->sc_ctrl |=
   2422 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2423 			    CTRL_SWDPIO_SHIFT;
   2424 			sc->sc_ctrl |=
   2425 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2426 			    CTRL_SWDPINS_SHIFT;
   2427 		} else {
   2428 			sc->sc_ctrl |=
   2429 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2430 			    CTRL_SWDPIO_SHIFT;
   2431 		}
   2432 	}
   2433 
   2434 	/* XXX For other than 82580? */
   2435 	if (sc->sc_type == WM_T_82580) {
   2436 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2437 		if (nvmword & __BIT(13))
   2438 			sc->sc_ctrl |= CTRL_ILOS;
   2439 	}
   2440 
   2441 #if 0
   2442 	if (sc->sc_type >= WM_T_82544) {
   2443 		if (cfg1 & NVM_CFG1_IPS0)
   2444 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2445 		if (cfg1 & NVM_CFG1_IPS1)
   2446 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2447 		sc->sc_ctrl_ext |=
   2448 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2449 		    CTRL_EXT_SWDPIO_SHIFT;
   2450 		sc->sc_ctrl_ext |=
   2451 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2452 		    CTRL_EXT_SWDPINS_SHIFT;
   2453 	} else {
   2454 		sc->sc_ctrl_ext |=
   2455 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2456 		    CTRL_EXT_SWDPIO_SHIFT;
   2457 	}
   2458 #endif
   2459 
   2460 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2461 #if 0
   2462 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2463 #endif
   2464 
   2465 	if (sc->sc_type == WM_T_PCH) {
   2466 		uint16_t val;
   2467 
   2468 		/* Save the NVM K1 bit setting */
   2469 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2470 
   2471 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2472 			sc->sc_nvm_k1_enabled = 1;
   2473 		else
   2474 			sc->sc_nvm_k1_enabled = 0;
   2475 	}
   2476 
   2477 	/*
   2478 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
   2479 	 * media structures accordingly.
   2480 	 */
   2481 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2482 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2483 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2484 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
   2485 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2486 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
   2487 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2488 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2489 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2490 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2491 	    || (sc->sc_type ==WM_T_I211)) {
   2492 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2493 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2494 		switch (link_mode) {
   2495 		case CTRL_EXT_LINK_MODE_1000KX:
   2496 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2497 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2498 			break;
   2499 		case CTRL_EXT_LINK_MODE_SGMII:
   2500 			if (wm_sgmii_uses_mdio(sc)) {
   2501 				aprint_verbose_dev(sc->sc_dev,
   2502 				    "SGMII(MDIO)\n");
   2503 				sc->sc_flags |= WM_F_SGMII;
   2504 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2505 				break;
   2506 			}
   2507 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2508 			/*FALLTHROUGH*/
   2509 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2510 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2511 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2512 				if (link_mode
   2513 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2514 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2515 					sc->sc_flags |= WM_F_SGMII;
   2516 				} else {
   2517 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2518 					aprint_verbose_dev(sc->sc_dev,
   2519 					    "SERDES\n");
   2520 				}
   2521 				break;
   2522 			}
   2523 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2524 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2525 
   2526 			/* Change current link mode setting */
   2527 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2528 			switch (sc->sc_mediatype) {
   2529 			case WM_MEDIATYPE_COPPER:
   2530 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2531 				break;
   2532 			case WM_MEDIATYPE_SERDES:
   2533 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2534 				break;
   2535 			default:
   2536 				break;
   2537 			}
   2538 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2539 			break;
   2540 		case CTRL_EXT_LINK_MODE_GMII:
   2541 		default:
   2542 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2543 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2544 			break;
   2545 		}
   2546 
   2547 		reg &= ~CTRL_EXT_I2C_ENA;
   2548 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2549 			reg |= CTRL_EXT_I2C_ENA;
   2550 		else
   2551 			reg &= ~CTRL_EXT_I2C_ENA;
   2552 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2553 
   2554 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2555 			wm_gmii_mediainit(sc, wmp->wmp_product);
   2556 		else
   2557 			wm_tbi_mediainit(sc);
   2558 	} else if (sc->sc_type < WM_T_82543 ||
   2559 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2560 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2561 			aprint_error_dev(sc->sc_dev,
   2562 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2563 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2564 		}
   2565 		wm_tbi_mediainit(sc);
   2566 	} else {
   2567 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2568 			aprint_error_dev(sc->sc_dev,
   2569 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2570 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2571 		}
   2572 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2573 	}
   2574 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2575 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2576 
   2577 	ifp = &sc->sc_ethercom.ec_if;
   2578 	xname = device_xname(sc->sc_dev);
   2579 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2580 	ifp->if_softc = sc;
   2581 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2582 #ifdef WM_MPSAFE
   2583 	ifp->if_extflags = IFEF_START_MPSAFE;
   2584 #endif
   2585 	ifp->if_ioctl = wm_ioctl;
   2586 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2587 		ifp->if_start = wm_nq_start;
   2588 		/*
   2589 		 * When the number of CPUs is one and the controller can use
   2590 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2591 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2592 		 * and the other is used for link status changing.
   2593 		 * In this situation, wm_nq_transmit() is disadvantageous
   2594 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2595 		 */
   2596 		if (wm_is_using_multiqueue(sc))
   2597 			ifp->if_transmit = wm_nq_transmit;
   2598 	} else {
   2599 		ifp->if_start = wm_start;
   2600 		/*
   2601 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2602 		 */
   2603 		if (wm_is_using_multiqueue(sc))
   2604 			ifp->if_transmit = wm_transmit;
   2605 	}
   2606 	ifp->if_watchdog = wm_watchdog;
   2607 	ifp->if_init = wm_init;
   2608 	ifp->if_stop = wm_stop;
   2609 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2610 	IFQ_SET_READY(&ifp->if_snd);
   2611 
   2612 	/* Check for jumbo frame */
   2613 	switch (sc->sc_type) {
   2614 	case WM_T_82573:
   2615 		/* XXX limited to 9234 if ASPM is disabled */
   2616 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2617 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2618 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2619 		break;
   2620 	case WM_T_82571:
   2621 	case WM_T_82572:
   2622 	case WM_T_82574:
   2623 	case WM_T_82575:
   2624 	case WM_T_82576:
   2625 	case WM_T_82580:
   2626 	case WM_T_I350:
   2627 	case WM_T_I354: /* XXXX ok? */
   2628 	case WM_T_I210:
   2629 	case WM_T_I211:
   2630 	case WM_T_80003:
   2631 	case WM_T_ICH9:
   2632 	case WM_T_ICH10:
   2633 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2634 	case WM_T_PCH_LPT:
   2635 	case WM_T_PCH_SPT:
   2636 		/* XXX limited to 9234 */
   2637 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2638 		break;
   2639 	case WM_T_PCH:
   2640 		/* XXX limited to 4096 */
   2641 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2642 		break;
   2643 	case WM_T_82542_2_0:
   2644 	case WM_T_82542_2_1:
   2645 	case WM_T_82583:
   2646 	case WM_T_ICH8:
   2647 		/* No support for jumbo frame */
   2648 		break;
   2649 	default:
   2650 		/* ETHER_MAX_LEN_JUMBO */
   2651 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2652 		break;
   2653 	}
   2654 
   2655 	/* If we're a i82543 or greater, we can support VLANs. */
   2656 	if (sc->sc_type >= WM_T_82543)
   2657 		sc->sc_ethercom.ec_capabilities |=
   2658 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2659 
   2660 	/*
   2661 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2662 	 * on i82543 and later.
   2663 	 */
   2664 	if (sc->sc_type >= WM_T_82543) {
   2665 		ifp->if_capabilities |=
   2666 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2667 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2668 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2669 		    IFCAP_CSUM_TCPv6_Tx |
   2670 		    IFCAP_CSUM_UDPv6_Tx;
   2671 	}
   2672 
   2673 	/*
   2674 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2675 	 *
   2676 	 *	82541GI (8086:1076) ... no
   2677 	 *	82572EI (8086:10b9) ... yes
   2678 	 */
   2679 	if (sc->sc_type >= WM_T_82571) {
   2680 		ifp->if_capabilities |=
   2681 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2682 	}
   2683 
   2684 	/*
   2685 	 * If we're a i82544 or greater (except i82547), we can do
   2686 	 * TCP segmentation offload.
   2687 	 */
   2688 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2689 		ifp->if_capabilities |= IFCAP_TSOv4;
   2690 	}
   2691 
   2692 	if (sc->sc_type >= WM_T_82571) {
   2693 		ifp->if_capabilities |= IFCAP_TSOv6;
   2694 	}
   2695 
   2696 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2697 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2698 
   2699 #ifdef WM_MPSAFE
   2700 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2701 #else
   2702 	sc->sc_core_lock = NULL;
   2703 #endif
   2704 
   2705 	/* Attach the interface. */
   2706 	if_initialize(ifp);
   2707 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2708 	ether_ifattach(ifp, enaddr);
   2709 	if_register(ifp);
   2710 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2711 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2712 			  RND_FLAG_DEFAULT);
   2713 
   2714 #ifdef WM_EVENT_COUNTERS
   2715 	/* Attach event counters. */
   2716 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2717 	    NULL, xname, "linkintr");
   2718 
   2719 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2720 	    NULL, xname, "tx_xoff");
   2721 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2722 	    NULL, xname, "tx_xon");
   2723 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2724 	    NULL, xname, "rx_xoff");
   2725 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2726 	    NULL, xname, "rx_xon");
   2727 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2728 	    NULL, xname, "rx_macctl");
   2729 #endif /* WM_EVENT_COUNTERS */
   2730 
   2731 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2732 		pmf_class_network_register(self, ifp);
   2733 	else
   2734 		aprint_error_dev(self, "couldn't establish power handler\n");
   2735 
   2736 	sc->sc_flags |= WM_F_ATTACHED;
   2737  out:
   2738 	return;
   2739 }
   2740 
   2741 /* The detach function (ca_detach) */
   2742 static int
   2743 wm_detach(device_t self, int flags __unused)
   2744 {
   2745 	struct wm_softc *sc = device_private(self);
   2746 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2747 	int i;
   2748 
   2749 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2750 		return 0;
   2751 
   2752 	/* Stop the interface. Callouts are stopped in it. */
   2753 	wm_stop(ifp, 1);
   2754 
   2755 	pmf_device_deregister(self);
   2756 
   2757 #ifdef WM_EVENT_COUNTERS
   2758 	evcnt_detach(&sc->sc_ev_linkintr);
   2759 
   2760 	evcnt_detach(&sc->sc_ev_tx_xoff);
   2761 	evcnt_detach(&sc->sc_ev_tx_xon);
   2762 	evcnt_detach(&sc->sc_ev_rx_xoff);
   2763 	evcnt_detach(&sc->sc_ev_rx_xon);
   2764 	evcnt_detach(&sc->sc_ev_rx_macctl);
   2765 #endif /* WM_EVENT_COUNTERS */
   2766 
   2767 	/* Tell the firmware about the release */
   2768 	WM_CORE_LOCK(sc);
   2769 	wm_release_manageability(sc);
   2770 	wm_release_hw_control(sc);
   2771 	wm_enable_wakeup(sc);
   2772 	WM_CORE_UNLOCK(sc);
   2773 
   2774 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2775 
   2776 	/* Delete all remaining media. */
   2777 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2778 
   2779 	ether_ifdetach(ifp);
   2780 	if_detach(ifp);
   2781 	if_percpuq_destroy(sc->sc_ipq);
   2782 
   2783 	/* Unload RX dmamaps and free mbufs */
   2784 	for (i = 0; i < sc->sc_nqueues; i++) {
   2785 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2786 		mutex_enter(rxq->rxq_lock);
   2787 		wm_rxdrain(rxq);
   2788 		mutex_exit(rxq->rxq_lock);
   2789 	}
   2790 	/* Must unlock here */
   2791 
   2792 	/* Disestablish the interrupt handler */
   2793 	for (i = 0; i < sc->sc_nintrs; i++) {
   2794 		if (sc->sc_ihs[i] != NULL) {
   2795 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2796 			sc->sc_ihs[i] = NULL;
   2797 		}
   2798 	}
   2799 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2800 
   2801 	wm_free_txrx_queues(sc);
   2802 
   2803 	/* Unmap the registers */
   2804 	if (sc->sc_ss) {
   2805 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2806 		sc->sc_ss = 0;
   2807 	}
   2808 	if (sc->sc_ios) {
   2809 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2810 		sc->sc_ios = 0;
   2811 	}
   2812 	if (sc->sc_flashs) {
   2813 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2814 		sc->sc_flashs = 0;
   2815 	}
   2816 
   2817 	if (sc->sc_core_lock)
   2818 		mutex_obj_free(sc->sc_core_lock);
   2819 	if (sc->sc_ich_phymtx)
   2820 		mutex_obj_free(sc->sc_ich_phymtx);
   2821 	if (sc->sc_ich_nvmmtx)
   2822 		mutex_obj_free(sc->sc_ich_nvmmtx);
   2823 
   2824 	return 0;
   2825 }
   2826 
   2827 static bool
   2828 wm_suspend(device_t self, const pmf_qual_t *qual)
   2829 {
   2830 	struct wm_softc *sc = device_private(self);
   2831 
   2832 	wm_release_manageability(sc);
   2833 	wm_release_hw_control(sc);
   2834 	wm_enable_wakeup(sc);
   2835 
   2836 	return true;
   2837 }
   2838 
   2839 static bool
   2840 wm_resume(device_t self, const pmf_qual_t *qual)
   2841 {
   2842 	struct wm_softc *sc = device_private(self);
   2843 
   2844 	wm_init_manageability(sc);
   2845 
   2846 	return true;
   2847 }
   2848 
   2849 /*
   2850  * wm_watchdog:		[ifnet interface function]
   2851  *
   2852  *	Watchdog timer handler.
   2853  */
   2854 static void
   2855 wm_watchdog(struct ifnet *ifp)
   2856 {
   2857 	int qid;
   2858 	struct wm_softc *sc = ifp->if_softc;
   2859 
   2860 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2861 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2862 
   2863 		wm_watchdog_txq(ifp, txq);
   2864 	}
   2865 
   2866 	/* Reset the interface. */
   2867 	(void) wm_init(ifp);
   2868 
   2869 	/*
   2870 	 * There are still some upper layer processing which call
   2871 	 * ifp->if_start(). e.g. ALTQ or one CPU system
   2872 	 */
   2873 	/* Try to get more packets going. */
   2874 	ifp->if_start(ifp);
   2875 }
   2876 
   2877 static void
   2878 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
   2879 {
   2880 	struct wm_softc *sc = ifp->if_softc;
   2881 
   2882 	/*
   2883 	 * Since we're using delayed interrupts, sweep up
   2884 	 * before we report an error.
   2885 	 */
   2886 	mutex_enter(txq->txq_lock);
   2887 	wm_txeof(sc, txq);
   2888 	mutex_exit(txq->txq_lock);
   2889 
   2890 	if (txq->txq_free != WM_NTXDESC(txq)) {
   2891 #ifdef WM_DEBUG
   2892 		int i, j;
   2893 		struct wm_txsoft *txs;
   2894 #endif
   2895 		log(LOG_ERR,
   2896 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2897 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   2898 		    txq->txq_next);
   2899 		ifp->if_oerrors++;
   2900 #ifdef WM_DEBUG
   2901 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   2902 		    i = WM_NEXTTXS(txq, i)) {
   2903 		    txs = &txq->txq_soft[i];
   2904 		    printf("txs %d tx %d -> %d\n",
   2905 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2906 		    for (j = txs->txs_firstdesc; ;
   2907 			j = WM_NEXTTX(txq, j)) {
   2908 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2909 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   2910 			printf("\t %#08x%08x\n",
   2911 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   2912 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   2913 			if (j == txs->txs_lastdesc)
   2914 				break;
   2915 			}
   2916 		}
   2917 #endif
   2918 	}
   2919 }
   2920 
   2921 /*
   2922  * wm_tick:
   2923  *
   2924  *	One second timer, used to check link status, sweep up
   2925  *	completed transmit jobs, etc.
   2926  */
   2927 static void
   2928 wm_tick(void *arg)
   2929 {
   2930 	struct wm_softc *sc = arg;
   2931 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2932 #ifndef WM_MPSAFE
   2933 	int s = splnet();
   2934 #endif
   2935 
   2936 	WM_CORE_LOCK(sc);
   2937 
   2938 	if (sc->sc_core_stopping)
   2939 		goto out;
   2940 
   2941 	if (sc->sc_type >= WM_T_82542_2_1) {
   2942 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2943 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2944 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2945 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2946 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2947 	}
   2948 
   2949 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2950 	ifp->if_ierrors += 0ULL /* ensure quad_t */
   2951 	    + CSR_READ(sc, WMREG_CRCERRS)
   2952 	    + CSR_READ(sc, WMREG_ALGNERRC)
   2953 	    + CSR_READ(sc, WMREG_SYMERRC)
   2954 	    + CSR_READ(sc, WMREG_RXERRC)
   2955 	    + CSR_READ(sc, WMREG_SEC)
   2956 	    + CSR_READ(sc, WMREG_CEXTERR)
   2957 	    + CSR_READ(sc, WMREG_RLEC);
   2958 	/*
   2959 	 * WMREG_RNBC is incremented when there is no available buffers in host
   2960 	 * memory. It does not mean the number of dropped packet. Because
   2961 	 * ethernet controller can receive packets in such case if there is
   2962 	 * space in phy's FIFO.
   2963 	 *
   2964 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   2965 	 * own EVCNT instead of if_iqdrops.
   2966 	 */
   2967 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   2968 
   2969 	if (sc->sc_flags & WM_F_HAS_MII)
   2970 		mii_tick(&sc->sc_mii);
   2971 	else if ((sc->sc_type >= WM_T_82575)
   2972 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   2973 		wm_serdes_tick(sc);
   2974 	else
   2975 		wm_tbi_tick(sc);
   2976 
   2977 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2978 out:
   2979 	WM_CORE_UNLOCK(sc);
   2980 #ifndef WM_MPSAFE
   2981 	splx(s);
   2982 #endif
   2983 }
   2984 
   2985 static int
   2986 wm_ifflags_cb(struct ethercom *ec)
   2987 {
   2988 	struct ifnet *ifp = &ec->ec_if;
   2989 	struct wm_softc *sc = ifp->if_softc;
   2990 	int rc = 0;
   2991 
   2992 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   2993 		device_xname(sc->sc_dev), __func__));
   2994 
   2995 	WM_CORE_LOCK(sc);
   2996 
   2997 	int change = ifp->if_flags ^ sc->sc_if_flags;
   2998 	sc->sc_if_flags = ifp->if_flags;
   2999 
   3000 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3001 		rc = ENETRESET;
   3002 		goto out;
   3003 	}
   3004 
   3005 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   3006 		wm_set_filter(sc);
   3007 
   3008 	wm_set_vlan(sc);
   3009 
   3010 out:
   3011 	WM_CORE_UNLOCK(sc);
   3012 
   3013 	return rc;
   3014 }
   3015 
   3016 /*
   3017  * wm_ioctl:		[ifnet interface function]
   3018  *
   3019  *	Handle control requests from the operator.
   3020  */
   3021 static int
   3022 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3023 {
   3024 	struct wm_softc *sc = ifp->if_softc;
   3025 	struct ifreq *ifr = (struct ifreq *) data;
   3026 	struct ifaddr *ifa = (struct ifaddr *)data;
   3027 	struct sockaddr_dl *sdl;
   3028 	int s, error;
   3029 
   3030 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3031 		device_xname(sc->sc_dev), __func__));
   3032 
   3033 #ifndef WM_MPSAFE
   3034 	s = splnet();
   3035 #endif
   3036 	switch (cmd) {
   3037 	case SIOCSIFMEDIA:
   3038 	case SIOCGIFMEDIA:
   3039 		WM_CORE_LOCK(sc);
   3040 		/* Flow control requires full-duplex mode. */
   3041 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3042 		    (ifr->ifr_media & IFM_FDX) == 0)
   3043 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3044 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3045 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3046 				/* We can do both TXPAUSE and RXPAUSE. */
   3047 				ifr->ifr_media |=
   3048 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3049 			}
   3050 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3051 		}
   3052 		WM_CORE_UNLOCK(sc);
   3053 #ifdef WM_MPSAFE
   3054 		s = splnet();
   3055 #endif
   3056 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3057 #ifdef WM_MPSAFE
   3058 		splx(s);
   3059 #endif
   3060 		break;
   3061 	case SIOCINITIFADDR:
   3062 		WM_CORE_LOCK(sc);
   3063 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3064 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3065 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3066 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3067 			/* unicast address is first multicast entry */
   3068 			wm_set_filter(sc);
   3069 			error = 0;
   3070 			WM_CORE_UNLOCK(sc);
   3071 			break;
   3072 		}
   3073 		WM_CORE_UNLOCK(sc);
   3074 		/*FALLTHROUGH*/
   3075 	default:
   3076 #ifdef WM_MPSAFE
   3077 		s = splnet();
   3078 #endif
   3079 		/* It may call wm_start, so unlock here */
   3080 		error = ether_ioctl(ifp, cmd, data);
   3081 #ifdef WM_MPSAFE
   3082 		splx(s);
   3083 #endif
   3084 		if (error != ENETRESET)
   3085 			break;
   3086 
   3087 		error = 0;
   3088 
   3089 		if (cmd == SIOCSIFCAP) {
   3090 			error = (*ifp->if_init)(ifp);
   3091 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3092 			;
   3093 		else if (ifp->if_flags & IFF_RUNNING) {
   3094 			/*
   3095 			 * Multicast list has changed; set the hardware filter
   3096 			 * accordingly.
   3097 			 */
   3098 			WM_CORE_LOCK(sc);
   3099 			wm_set_filter(sc);
   3100 			WM_CORE_UNLOCK(sc);
   3101 		}
   3102 		break;
   3103 	}
   3104 
   3105 #ifndef WM_MPSAFE
   3106 	splx(s);
   3107 #endif
   3108 	return error;
   3109 }
   3110 
   3111 /* MAC address related */
   3112 
   3113 /*
   3114  * Get the offset of MAC address and return it.
   3115  * If error occured, use offset 0.
   3116  */
   3117 static uint16_t
   3118 wm_check_alt_mac_addr(struct wm_softc *sc)
   3119 {
   3120 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3121 	uint16_t offset = NVM_OFF_MACADDR;
   3122 
   3123 	/* Try to read alternative MAC address pointer */
   3124 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3125 		return 0;
   3126 
   3127 	/* Check pointer if it's valid or not. */
   3128 	if ((offset == 0x0000) || (offset == 0xffff))
   3129 		return 0;
   3130 
   3131 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3132 	/*
   3133 	 * Check whether alternative MAC address is valid or not.
   3134 	 * Some cards have non 0xffff pointer but those don't use
   3135 	 * alternative MAC address in reality.
   3136 	 *
   3137 	 * Check whether the broadcast bit is set or not.
   3138 	 */
   3139 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3140 		if (((myea[0] & 0xff) & 0x01) == 0)
   3141 			return offset; /* Found */
   3142 
   3143 	/* Not found */
   3144 	return 0;
   3145 }
   3146 
   3147 static int
   3148 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3149 {
   3150 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3151 	uint16_t offset = NVM_OFF_MACADDR;
   3152 	int do_invert = 0;
   3153 
   3154 	switch (sc->sc_type) {
   3155 	case WM_T_82580:
   3156 	case WM_T_I350:
   3157 	case WM_T_I354:
   3158 		/* EEPROM Top Level Partitioning */
   3159 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3160 		break;
   3161 	case WM_T_82571:
   3162 	case WM_T_82575:
   3163 	case WM_T_82576:
   3164 	case WM_T_80003:
   3165 	case WM_T_I210:
   3166 	case WM_T_I211:
   3167 		offset = wm_check_alt_mac_addr(sc);
   3168 		if (offset == 0)
   3169 			if ((sc->sc_funcid & 0x01) == 1)
   3170 				do_invert = 1;
   3171 		break;
   3172 	default:
   3173 		if ((sc->sc_funcid & 0x01) == 1)
   3174 			do_invert = 1;
   3175 		break;
   3176 	}
   3177 
   3178 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3179 		goto bad;
   3180 
   3181 	enaddr[0] = myea[0] & 0xff;
   3182 	enaddr[1] = myea[0] >> 8;
   3183 	enaddr[2] = myea[1] & 0xff;
   3184 	enaddr[3] = myea[1] >> 8;
   3185 	enaddr[4] = myea[2] & 0xff;
   3186 	enaddr[5] = myea[2] >> 8;
   3187 
   3188 	/*
   3189 	 * Toggle the LSB of the MAC address on the second port
   3190 	 * of some dual port cards.
   3191 	 */
   3192 	if (do_invert != 0)
   3193 		enaddr[5] ^= 1;
   3194 
   3195 	return 0;
   3196 
   3197  bad:
   3198 	return -1;
   3199 }
   3200 
   3201 /*
   3202  * wm_set_ral:
   3203  *
   3204  *	Set an entery in the receive address list.
   3205  */
   3206 static void
   3207 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3208 {
   3209 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3210 	uint32_t wlock_mac;
   3211 	int rv;
   3212 
   3213 	if (enaddr != NULL) {
   3214 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3215 		    (enaddr[3] << 24);
   3216 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3217 		ral_hi |= RAL_AV;
   3218 	} else {
   3219 		ral_lo = 0;
   3220 		ral_hi = 0;
   3221 	}
   3222 
   3223 	switch (sc->sc_type) {
   3224 	case WM_T_82542_2_0:
   3225 	case WM_T_82542_2_1:
   3226 	case WM_T_82543:
   3227 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3228 		CSR_WRITE_FLUSH(sc);
   3229 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3230 		CSR_WRITE_FLUSH(sc);
   3231 		break;
   3232 	case WM_T_PCH2:
   3233 	case WM_T_PCH_LPT:
   3234 	case WM_T_PCH_SPT:
   3235 		if (idx == 0) {
   3236 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3237 			CSR_WRITE_FLUSH(sc);
   3238 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3239 			CSR_WRITE_FLUSH(sc);
   3240 			return;
   3241 		}
   3242 		if (sc->sc_type != WM_T_PCH2) {
   3243 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3244 			    FWSM_WLOCK_MAC);
   3245 			addrl = WMREG_SHRAL(idx - 1);
   3246 			addrh = WMREG_SHRAH(idx - 1);
   3247 		} else {
   3248 			wlock_mac = 0;
   3249 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3250 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3251 		}
   3252 
   3253 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3254 			rv = wm_get_swflag_ich8lan(sc);
   3255 			if (rv != 0)
   3256 				return;
   3257 			CSR_WRITE(sc, addrl, ral_lo);
   3258 			CSR_WRITE_FLUSH(sc);
   3259 			CSR_WRITE(sc, addrh, ral_hi);
   3260 			CSR_WRITE_FLUSH(sc);
   3261 			wm_put_swflag_ich8lan(sc);
   3262 		}
   3263 
   3264 		break;
   3265 	default:
   3266 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3267 		CSR_WRITE_FLUSH(sc);
   3268 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3269 		CSR_WRITE_FLUSH(sc);
   3270 		break;
   3271 	}
   3272 }
   3273 
   3274 /*
   3275  * wm_mchash:
   3276  *
   3277  *	Compute the hash of the multicast address for the 4096-bit
   3278  *	multicast filter.
   3279  */
   3280 static uint32_t
   3281 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3282 {
   3283 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3284 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3285 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3286 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3287 	uint32_t hash;
   3288 
   3289 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3290 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3291 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3292 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   3293 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3294 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3295 		return (hash & 0x3ff);
   3296 	}
   3297 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3298 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3299 
   3300 	return (hash & 0xfff);
   3301 }
   3302 
   3303 /*
   3304  * wm_set_filter:
   3305  *
   3306  *	Set up the receive filter.
   3307  */
   3308 static void
   3309 wm_set_filter(struct wm_softc *sc)
   3310 {
   3311 	struct ethercom *ec = &sc->sc_ethercom;
   3312 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3313 	struct ether_multi *enm;
   3314 	struct ether_multistep step;
   3315 	bus_addr_t mta_reg;
   3316 	uint32_t hash, reg, bit;
   3317 	int i, size, ralmax;
   3318 
   3319 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3320 		device_xname(sc->sc_dev), __func__));
   3321 
   3322 	if (sc->sc_type >= WM_T_82544)
   3323 		mta_reg = WMREG_CORDOVA_MTA;
   3324 	else
   3325 		mta_reg = WMREG_MTA;
   3326 
   3327 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3328 
   3329 	if (ifp->if_flags & IFF_BROADCAST)
   3330 		sc->sc_rctl |= RCTL_BAM;
   3331 	if (ifp->if_flags & IFF_PROMISC) {
   3332 		sc->sc_rctl |= RCTL_UPE;
   3333 		goto allmulti;
   3334 	}
   3335 
   3336 	/*
   3337 	 * Set the station address in the first RAL slot, and
   3338 	 * clear the remaining slots.
   3339 	 */
   3340 	if (sc->sc_type == WM_T_ICH8)
   3341 		size = WM_RAL_TABSIZE_ICH8 -1;
   3342 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3343 	    || (sc->sc_type == WM_T_PCH))
   3344 		size = WM_RAL_TABSIZE_ICH8;
   3345 	else if (sc->sc_type == WM_T_PCH2)
   3346 		size = WM_RAL_TABSIZE_PCH2;
   3347 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
   3348 		size = WM_RAL_TABSIZE_PCH_LPT;
   3349 	else if (sc->sc_type == WM_T_82575)
   3350 		size = WM_RAL_TABSIZE_82575;
   3351 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3352 		size = WM_RAL_TABSIZE_82576;
   3353 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3354 		size = WM_RAL_TABSIZE_I350;
   3355 	else
   3356 		size = WM_RAL_TABSIZE;
   3357 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3358 
   3359 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   3360 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3361 		switch (i) {
   3362 		case 0:
   3363 			/* We can use all entries */
   3364 			ralmax = size;
   3365 			break;
   3366 		case 1:
   3367 			/* Only RAR[0] */
   3368 			ralmax = 1;
   3369 			break;
   3370 		default:
   3371 			/* available SHRA + RAR[0] */
   3372 			ralmax = i + 1;
   3373 		}
   3374 	} else
   3375 		ralmax = size;
   3376 	for (i = 1; i < size; i++) {
   3377 		if (i < ralmax)
   3378 			wm_set_ral(sc, NULL, i);
   3379 	}
   3380 
   3381 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3382 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3383 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3384 	    || (sc->sc_type == WM_T_PCH_SPT))
   3385 		size = WM_ICH8_MC_TABSIZE;
   3386 	else
   3387 		size = WM_MC_TABSIZE;
   3388 	/* Clear out the multicast table. */
   3389 	for (i = 0; i < size; i++) {
   3390 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3391 		CSR_WRITE_FLUSH(sc);
   3392 	}
   3393 
   3394 	ETHER_LOCK(ec);
   3395 	ETHER_FIRST_MULTI(step, ec, enm);
   3396 	while (enm != NULL) {
   3397 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3398 			ETHER_UNLOCK(ec);
   3399 			/*
   3400 			 * We must listen to a range of multicast addresses.
   3401 			 * For now, just accept all multicasts, rather than
   3402 			 * trying to set only those filter bits needed to match
   3403 			 * the range.  (At this time, the only use of address
   3404 			 * ranges is for IP multicast routing, for which the
   3405 			 * range is big enough to require all bits set.)
   3406 			 */
   3407 			goto allmulti;
   3408 		}
   3409 
   3410 		hash = wm_mchash(sc, enm->enm_addrlo);
   3411 
   3412 		reg = (hash >> 5);
   3413 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3414 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3415 		    || (sc->sc_type == WM_T_PCH2)
   3416 		    || (sc->sc_type == WM_T_PCH_LPT)
   3417 		    || (sc->sc_type == WM_T_PCH_SPT))
   3418 			reg &= 0x1f;
   3419 		else
   3420 			reg &= 0x7f;
   3421 		bit = hash & 0x1f;
   3422 
   3423 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3424 		hash |= 1U << bit;
   3425 
   3426 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3427 			/*
   3428 			 * 82544 Errata 9: Certain register cannot be written
   3429 			 * with particular alignments in PCI-X bus operation
   3430 			 * (FCAH, MTA and VFTA).
   3431 			 */
   3432 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3433 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3434 			CSR_WRITE_FLUSH(sc);
   3435 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3436 			CSR_WRITE_FLUSH(sc);
   3437 		} else {
   3438 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3439 			CSR_WRITE_FLUSH(sc);
   3440 		}
   3441 
   3442 		ETHER_NEXT_MULTI(step, enm);
   3443 	}
   3444 	ETHER_UNLOCK(ec);
   3445 
   3446 	ifp->if_flags &= ~IFF_ALLMULTI;
   3447 	goto setit;
   3448 
   3449  allmulti:
   3450 	ifp->if_flags |= IFF_ALLMULTI;
   3451 	sc->sc_rctl |= RCTL_MPE;
   3452 
   3453  setit:
   3454 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3455 }
   3456 
   3457 /* Reset and init related */
   3458 
   3459 static void
   3460 wm_set_vlan(struct wm_softc *sc)
   3461 {
   3462 
   3463 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3464 		device_xname(sc->sc_dev), __func__));
   3465 
   3466 	/* Deal with VLAN enables. */
   3467 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3468 		sc->sc_ctrl |= CTRL_VME;
   3469 	else
   3470 		sc->sc_ctrl &= ~CTRL_VME;
   3471 
   3472 	/* Write the control registers. */
   3473 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3474 }
   3475 
   3476 static void
   3477 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3478 {
   3479 	uint32_t gcr;
   3480 	pcireg_t ctrl2;
   3481 
   3482 	gcr = CSR_READ(sc, WMREG_GCR);
   3483 
   3484 	/* Only take action if timeout value is defaulted to 0 */
   3485 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3486 		goto out;
   3487 
   3488 	if ((gcr & GCR_CAP_VER2) == 0) {
   3489 		gcr |= GCR_CMPL_TMOUT_10MS;
   3490 		goto out;
   3491 	}
   3492 
   3493 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3494 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3495 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3496 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3497 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3498 
   3499 out:
   3500 	/* Disable completion timeout resend */
   3501 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3502 
   3503 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3504 }
   3505 
   3506 void
   3507 wm_get_auto_rd_done(struct wm_softc *sc)
   3508 {
   3509 	int i;
   3510 
   3511 	/* wait for eeprom to reload */
   3512 	switch (sc->sc_type) {
   3513 	case WM_T_82571:
   3514 	case WM_T_82572:
   3515 	case WM_T_82573:
   3516 	case WM_T_82574:
   3517 	case WM_T_82583:
   3518 	case WM_T_82575:
   3519 	case WM_T_82576:
   3520 	case WM_T_82580:
   3521 	case WM_T_I350:
   3522 	case WM_T_I354:
   3523 	case WM_T_I210:
   3524 	case WM_T_I211:
   3525 	case WM_T_80003:
   3526 	case WM_T_ICH8:
   3527 	case WM_T_ICH9:
   3528 		for (i = 0; i < 10; i++) {
   3529 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3530 				break;
   3531 			delay(1000);
   3532 		}
   3533 		if (i == 10) {
   3534 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3535 			    "complete\n", device_xname(sc->sc_dev));
   3536 		}
   3537 		break;
   3538 	default:
   3539 		break;
   3540 	}
   3541 }
   3542 
   3543 void
   3544 wm_lan_init_done(struct wm_softc *sc)
   3545 {
   3546 	uint32_t reg = 0;
   3547 	int i;
   3548 
   3549 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3550 		device_xname(sc->sc_dev), __func__));
   3551 
   3552 	/* Wait for eeprom to reload */
   3553 	switch (sc->sc_type) {
   3554 	case WM_T_ICH10:
   3555 	case WM_T_PCH:
   3556 	case WM_T_PCH2:
   3557 	case WM_T_PCH_LPT:
   3558 	case WM_T_PCH_SPT:
   3559 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3560 			reg = CSR_READ(sc, WMREG_STATUS);
   3561 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3562 				break;
   3563 			delay(100);
   3564 		}
   3565 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3566 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3567 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3568 		}
   3569 		break;
   3570 	default:
   3571 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3572 		    __func__);
   3573 		break;
   3574 	}
   3575 
   3576 	reg &= ~STATUS_LAN_INIT_DONE;
   3577 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3578 }
   3579 
   3580 void
   3581 wm_get_cfg_done(struct wm_softc *sc)
   3582 {
   3583 	int mask;
   3584 	uint32_t reg;
   3585 	int i;
   3586 
   3587 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3588 		device_xname(sc->sc_dev), __func__));
   3589 
   3590 	/* Wait for eeprom to reload */
   3591 	switch (sc->sc_type) {
   3592 	case WM_T_82542_2_0:
   3593 	case WM_T_82542_2_1:
   3594 		/* null */
   3595 		break;
   3596 	case WM_T_82543:
   3597 	case WM_T_82544:
   3598 	case WM_T_82540:
   3599 	case WM_T_82545:
   3600 	case WM_T_82545_3:
   3601 	case WM_T_82546:
   3602 	case WM_T_82546_3:
   3603 	case WM_T_82541:
   3604 	case WM_T_82541_2:
   3605 	case WM_T_82547:
   3606 	case WM_T_82547_2:
   3607 	case WM_T_82573:
   3608 	case WM_T_82574:
   3609 	case WM_T_82583:
   3610 		/* generic */
   3611 		delay(10*1000);
   3612 		break;
   3613 	case WM_T_80003:
   3614 	case WM_T_82571:
   3615 	case WM_T_82572:
   3616 	case WM_T_82575:
   3617 	case WM_T_82576:
   3618 	case WM_T_82580:
   3619 	case WM_T_I350:
   3620 	case WM_T_I354:
   3621 	case WM_T_I210:
   3622 	case WM_T_I211:
   3623 		if (sc->sc_type == WM_T_82571) {
   3624 			/* Only 82571 shares port 0 */
   3625 			mask = EEMNGCTL_CFGDONE_0;
   3626 		} else
   3627 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3628 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3629 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3630 				break;
   3631 			delay(1000);
   3632 		}
   3633 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3634 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3635 				device_xname(sc->sc_dev), __func__));
   3636 		}
   3637 		break;
   3638 	case WM_T_ICH8:
   3639 	case WM_T_ICH9:
   3640 	case WM_T_ICH10:
   3641 	case WM_T_PCH:
   3642 	case WM_T_PCH2:
   3643 	case WM_T_PCH_LPT:
   3644 	case WM_T_PCH_SPT:
   3645 		delay(10*1000);
   3646 		if (sc->sc_type >= WM_T_ICH10)
   3647 			wm_lan_init_done(sc);
   3648 		else
   3649 			wm_get_auto_rd_done(sc);
   3650 
   3651 		reg = CSR_READ(sc, WMREG_STATUS);
   3652 		if ((reg & STATUS_PHYRA) != 0)
   3653 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3654 		break;
   3655 	default:
   3656 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3657 		    __func__);
   3658 		break;
   3659 	}
   3660 }
   3661 
   3662 /* Init hardware bits */
   3663 void
   3664 wm_initialize_hardware_bits(struct wm_softc *sc)
   3665 {
   3666 	uint32_t tarc0, tarc1, reg;
   3667 
   3668 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3669 		device_xname(sc->sc_dev), __func__));
   3670 
   3671 	/* For 82571 variant, 80003 and ICHs */
   3672 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3673 	    || (sc->sc_type >= WM_T_80003)) {
   3674 
   3675 		/* Transmit Descriptor Control 0 */
   3676 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3677 		reg |= TXDCTL_COUNT_DESC;
   3678 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3679 
   3680 		/* Transmit Descriptor Control 1 */
   3681 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3682 		reg |= TXDCTL_COUNT_DESC;
   3683 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3684 
   3685 		/* TARC0 */
   3686 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3687 		switch (sc->sc_type) {
   3688 		case WM_T_82571:
   3689 		case WM_T_82572:
   3690 		case WM_T_82573:
   3691 		case WM_T_82574:
   3692 		case WM_T_82583:
   3693 		case WM_T_80003:
   3694 			/* Clear bits 30..27 */
   3695 			tarc0 &= ~__BITS(30, 27);
   3696 			break;
   3697 		default:
   3698 			break;
   3699 		}
   3700 
   3701 		switch (sc->sc_type) {
   3702 		case WM_T_82571:
   3703 		case WM_T_82572:
   3704 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3705 
   3706 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3707 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3708 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3709 			/* 8257[12] Errata No.7 */
   3710 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3711 
   3712 			/* TARC1 bit 28 */
   3713 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3714 				tarc1 &= ~__BIT(28);
   3715 			else
   3716 				tarc1 |= __BIT(28);
   3717 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3718 
   3719 			/*
   3720 			 * 8257[12] Errata No.13
   3721 			 * Disable Dyamic Clock Gating.
   3722 			 */
   3723 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3724 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3725 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3726 			break;
   3727 		case WM_T_82573:
   3728 		case WM_T_82574:
   3729 		case WM_T_82583:
   3730 			if ((sc->sc_type == WM_T_82574)
   3731 			    || (sc->sc_type == WM_T_82583))
   3732 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3733 
   3734 			/* Extended Device Control */
   3735 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3736 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3737 			reg |= __BIT(22);	/* Set bit 22 */
   3738 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3739 
   3740 			/* Device Control */
   3741 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   3742 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3743 
   3744 			/* PCIe Control Register */
   3745 			/*
   3746 			 * 82573 Errata (unknown).
   3747 			 *
   3748 			 * 82574 Errata 25 and 82583 Errata 12
   3749 			 * "Dropped Rx Packets":
   3750 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   3751 			 */
   3752 			reg = CSR_READ(sc, WMREG_GCR);
   3753 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   3754 			CSR_WRITE(sc, WMREG_GCR, reg);
   3755 
   3756 			if ((sc->sc_type == WM_T_82574)
   3757 			    || (sc->sc_type == WM_T_82583)) {
   3758 				/*
   3759 				 * Document says this bit must be set for
   3760 				 * proper operation.
   3761 				 */
   3762 				reg = CSR_READ(sc, WMREG_GCR);
   3763 				reg |= __BIT(22);
   3764 				CSR_WRITE(sc, WMREG_GCR, reg);
   3765 
   3766 				/*
   3767 				 * Apply workaround for hardware errata
   3768 				 * documented in errata docs Fixes issue where
   3769 				 * some error prone or unreliable PCIe
   3770 				 * completions are occurring, particularly
   3771 				 * with ASPM enabled. Without fix, issue can
   3772 				 * cause Tx timeouts.
   3773 				 */
   3774 				reg = CSR_READ(sc, WMREG_GCR2);
   3775 				reg |= __BIT(0);
   3776 				CSR_WRITE(sc, WMREG_GCR2, reg);
   3777 			}
   3778 			break;
   3779 		case WM_T_80003:
   3780 			/* TARC0 */
   3781 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   3782 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3783 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   3784 
   3785 			/* TARC1 bit 28 */
   3786 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3787 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3788 				tarc1 &= ~__BIT(28);
   3789 			else
   3790 				tarc1 |= __BIT(28);
   3791 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3792 			break;
   3793 		case WM_T_ICH8:
   3794 		case WM_T_ICH9:
   3795 		case WM_T_ICH10:
   3796 		case WM_T_PCH:
   3797 		case WM_T_PCH2:
   3798 		case WM_T_PCH_LPT:
   3799 		case WM_T_PCH_SPT:
   3800 			/* TARC0 */
   3801 			if ((sc->sc_type == WM_T_ICH8)
   3802 			    || (sc->sc_type == WM_T_PCH_SPT)) {
   3803 				/* Set TARC0 bits 29 and 28 */
   3804 				tarc0 |= __BITS(29, 28);
   3805 			}
   3806 			/* Set TARC0 bits 23,24,26,27 */
   3807 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   3808 
   3809 			/* CTRL_EXT */
   3810 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3811 			reg |= __BIT(22);	/* Set bit 22 */
   3812 			/*
   3813 			 * Enable PHY low-power state when MAC is at D3
   3814 			 * w/o WoL
   3815 			 */
   3816 			if (sc->sc_type >= WM_T_PCH)
   3817 				reg |= CTRL_EXT_PHYPDEN;
   3818 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3819 
   3820 			/* TARC1 */
   3821 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3822 			/* bit 28 */
   3823 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3824 				tarc1 &= ~__BIT(28);
   3825 			else
   3826 				tarc1 |= __BIT(28);
   3827 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   3828 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3829 
   3830 			/* Device Status */
   3831 			if (sc->sc_type == WM_T_ICH8) {
   3832 				reg = CSR_READ(sc, WMREG_STATUS);
   3833 				reg &= ~__BIT(31);
   3834 				CSR_WRITE(sc, WMREG_STATUS, reg);
   3835 
   3836 			}
   3837 
   3838 			/* IOSFPC */
   3839 			if (sc->sc_type == WM_T_PCH_SPT) {
   3840 				reg = CSR_READ(sc, WMREG_IOSFPC);
   3841 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   3842 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   3843 			}
   3844 			/*
   3845 			 * Work-around descriptor data corruption issue during
   3846 			 * NFS v2 UDP traffic, just disable the NFS filtering
   3847 			 * capability.
   3848 			 */
   3849 			reg = CSR_READ(sc, WMREG_RFCTL);
   3850 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   3851 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3852 			break;
   3853 		default:
   3854 			break;
   3855 		}
   3856 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   3857 
   3858 		switch (sc->sc_type) {
   3859 		/*
   3860 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   3861 		 * Avoid RSS Hash Value bug.
   3862 		 */
   3863 		case WM_T_82571:
   3864 		case WM_T_82572:
   3865 		case WM_T_82573:
   3866 		case WM_T_80003:
   3867 		case WM_T_ICH8:
   3868 			reg = CSR_READ(sc, WMREG_RFCTL);
   3869 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   3870 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3871 			break;
   3872 		case WM_T_82574:
   3873 			/* use extened Rx descriptor. */
   3874 			reg = CSR_READ(sc, WMREG_RFCTL);
   3875 			reg |= WMREG_RFCTL_EXSTEN;
   3876 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3877 			break;
   3878 		default:
   3879 			break;
   3880 		}
   3881 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   3882 		/*
   3883 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   3884 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   3885 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   3886 		 * Correctly by the Device"
   3887 		 *
   3888 		 * I354(C2000) Errata AVR53:
   3889 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   3890 		 * Hang"
   3891 		 */
   3892 		reg = CSR_READ(sc, WMREG_RFCTL);
   3893 		reg |= WMREG_RFCTL_IPV6EXDIS;
   3894 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   3895 	}
   3896 }
   3897 
   3898 static uint32_t
   3899 wm_rxpbs_adjust_82580(uint32_t val)
   3900 {
   3901 	uint32_t rv = 0;
   3902 
   3903 	if (val < __arraycount(wm_82580_rxpbs_table))
   3904 		rv = wm_82580_rxpbs_table[val];
   3905 
   3906 	return rv;
   3907 }
   3908 
   3909 /*
   3910  * wm_reset_phy:
   3911  *
   3912  *	generic PHY reset function.
   3913  *	Same as e1000_phy_hw_reset_generic()
   3914  */
   3915 static void
   3916 wm_reset_phy(struct wm_softc *sc)
   3917 {
   3918 	uint32_t reg;
   3919 
   3920 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3921 		device_xname(sc->sc_dev), __func__));
   3922 	if (wm_phy_resetisblocked(sc))
   3923 		return;
   3924 
   3925 	sc->phy.acquire(sc);
   3926 
   3927 	reg = CSR_READ(sc, WMREG_CTRL);
   3928 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   3929 	CSR_WRITE_FLUSH(sc);
   3930 
   3931 	delay(sc->phy.reset_delay_us);
   3932 
   3933 	CSR_WRITE(sc, WMREG_CTRL, reg);
   3934 	CSR_WRITE_FLUSH(sc);
   3935 
   3936 	delay(150);
   3937 
   3938 	sc->phy.release(sc);
   3939 
   3940 	wm_get_cfg_done(sc);
   3941 }
   3942 
   3943 static void
   3944 wm_flush_desc_rings(struct wm_softc *sc)
   3945 {
   3946 	pcireg_t preg;
   3947 	uint32_t reg;
   3948 	int nexttx;
   3949 
   3950 	/* First, disable MULR fix in FEXTNVM11 */
   3951 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   3952 	reg |= FEXTNVM11_DIS_MULRFIX;
   3953 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   3954 
   3955 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   3956 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   3957 	if (((preg & DESCRING_STATUS_FLUSH_REQ) != 0) && (reg != 0)) {
   3958 		struct wm_txqueue *txq;
   3959 		wiseman_txdesc_t *txd;
   3960 
   3961 		/* TX */
   3962 		printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   3963 		    device_xname(sc->sc_dev), preg, reg);
   3964 		reg = CSR_READ(sc, WMREG_TCTL);
   3965 		CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   3966 
   3967 		txq = &sc->sc_queue[0].wmq_txq;
   3968 		nexttx = txq->txq_next;
   3969 		txd = &txq->txq_descs[nexttx];
   3970 		wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   3971 		txd->wtx_cmdlen = htole32(WTX_CMD_IFCS| 512);
   3972 		txd->wtx_fields.wtxu_status = 0;
   3973 		txd->wtx_fields.wtxu_options = 0;
   3974 		txd->wtx_fields.wtxu_vlan = 0;
   3975 
   3976 		bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   3977 			BUS_SPACE_BARRIER_WRITE);
   3978 
   3979 		txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   3980 		CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   3981 		bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   3982 			BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   3983 		delay(250);
   3984 	}
   3985 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   3986 	if (preg & DESCRING_STATUS_FLUSH_REQ) {
   3987 		uint32_t rctl;
   3988 
   3989 		/* RX */
   3990 		printf("%s: Need RX flush (reg = %08x)\n",
   3991 		    device_xname(sc->sc_dev), preg);
   3992 		rctl = CSR_READ(sc, WMREG_RCTL);
   3993 		CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   3994 		CSR_WRITE_FLUSH(sc);
   3995 		delay(150);
   3996 
   3997 		reg = CSR_READ(sc, WMREG_RXDCTL(0));
   3998 		/* zero the lower 14 bits (prefetch and host thresholds) */
   3999 		reg &= 0xffffc000;
   4000 		/*
   4001 		 * update thresholds: prefetch threshold to 31, host threshold
   4002 		 * to 1 and make sure the granularity is "descriptors" and not
   4003 		 * "cache lines"
   4004 		 */
   4005 		reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4006 		CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4007 
   4008 		/*
   4009 		 * momentarily enable the RX ring for the changes to take
   4010 		 * effect
   4011 		 */
   4012 		CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4013 		CSR_WRITE_FLUSH(sc);
   4014 		delay(150);
   4015 		CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4016 	}
   4017 }
   4018 
   4019 /*
   4020  * wm_reset:
   4021  *
   4022  *	Reset the i82542 chip.
   4023  */
   4024 static void
   4025 wm_reset(struct wm_softc *sc)
   4026 {
   4027 	int phy_reset = 0;
   4028 	int i, error = 0;
   4029 	uint32_t reg;
   4030 
   4031 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4032 		device_xname(sc->sc_dev), __func__));
   4033 	KASSERT(sc->sc_type != 0);
   4034 
   4035 	/*
   4036 	 * Allocate on-chip memory according to the MTU size.
   4037 	 * The Packet Buffer Allocation register must be written
   4038 	 * before the chip is reset.
   4039 	 */
   4040 	switch (sc->sc_type) {
   4041 	case WM_T_82547:
   4042 	case WM_T_82547_2:
   4043 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4044 		    PBA_22K : PBA_30K;
   4045 		for (i = 0; i < sc->sc_nqueues; i++) {
   4046 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4047 			txq->txq_fifo_head = 0;
   4048 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4049 			txq->txq_fifo_size =
   4050 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4051 			txq->txq_fifo_stall = 0;
   4052 		}
   4053 		break;
   4054 	case WM_T_82571:
   4055 	case WM_T_82572:
   4056 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4057 	case WM_T_80003:
   4058 		sc->sc_pba = PBA_32K;
   4059 		break;
   4060 	case WM_T_82573:
   4061 		sc->sc_pba = PBA_12K;
   4062 		break;
   4063 	case WM_T_82574:
   4064 	case WM_T_82583:
   4065 		sc->sc_pba = PBA_20K;
   4066 		break;
   4067 	case WM_T_82576:
   4068 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4069 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4070 		break;
   4071 	case WM_T_82580:
   4072 	case WM_T_I350:
   4073 	case WM_T_I354:
   4074 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4075 		break;
   4076 	case WM_T_I210:
   4077 	case WM_T_I211:
   4078 		sc->sc_pba = PBA_34K;
   4079 		break;
   4080 	case WM_T_ICH8:
   4081 		/* Workaround for a bit corruption issue in FIFO memory */
   4082 		sc->sc_pba = PBA_8K;
   4083 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4084 		break;
   4085 	case WM_T_ICH9:
   4086 	case WM_T_ICH10:
   4087 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4088 		    PBA_14K : PBA_10K;
   4089 		break;
   4090 	case WM_T_PCH:
   4091 	case WM_T_PCH2:
   4092 	case WM_T_PCH_LPT:
   4093 	case WM_T_PCH_SPT:
   4094 		sc->sc_pba = PBA_26K;
   4095 		break;
   4096 	default:
   4097 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4098 		    PBA_40K : PBA_48K;
   4099 		break;
   4100 	}
   4101 	/*
   4102 	 * Only old or non-multiqueue devices have the PBA register
   4103 	 * XXX Need special handling for 82575.
   4104 	 */
   4105 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4106 	    || (sc->sc_type == WM_T_82575))
   4107 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4108 
   4109 	/* Prevent the PCI-E bus from sticking */
   4110 	if (sc->sc_flags & WM_F_PCIE) {
   4111 		int timeout = 800;
   4112 
   4113 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4114 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4115 
   4116 		while (timeout--) {
   4117 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4118 			    == 0)
   4119 				break;
   4120 			delay(100);
   4121 		}
   4122 		if (timeout == 0)
   4123 			device_printf(sc->sc_dev,
   4124 			    "failed to disable busmastering\n");
   4125 	}
   4126 
   4127 	/* Set the completion timeout for interface */
   4128 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4129 	    || (sc->sc_type == WM_T_82580)
   4130 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4131 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4132 		wm_set_pcie_completion_timeout(sc);
   4133 
   4134 	/* Clear interrupt */
   4135 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4136 	if (wm_is_using_msix(sc)) {
   4137 		if (sc->sc_type != WM_T_82574) {
   4138 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4139 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4140 		} else {
   4141 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4142 		}
   4143 	}
   4144 
   4145 	/* Stop the transmit and receive processes. */
   4146 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4147 	sc->sc_rctl &= ~RCTL_EN;
   4148 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4149 	CSR_WRITE_FLUSH(sc);
   4150 
   4151 	/* XXX set_tbi_sbp_82543() */
   4152 
   4153 	delay(10*1000);
   4154 
   4155 	/* Must acquire the MDIO ownership before MAC reset */
   4156 	switch (sc->sc_type) {
   4157 	case WM_T_82573:
   4158 	case WM_T_82574:
   4159 	case WM_T_82583:
   4160 		error = wm_get_hw_semaphore_82573(sc);
   4161 		break;
   4162 	default:
   4163 		break;
   4164 	}
   4165 
   4166 	/*
   4167 	 * 82541 Errata 29? & 82547 Errata 28?
   4168 	 * See also the description about PHY_RST bit in CTRL register
   4169 	 * in 8254x_GBe_SDM.pdf.
   4170 	 */
   4171 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4172 		CSR_WRITE(sc, WMREG_CTRL,
   4173 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4174 		CSR_WRITE_FLUSH(sc);
   4175 		delay(5000);
   4176 	}
   4177 
   4178 	switch (sc->sc_type) {
   4179 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4180 	case WM_T_82541:
   4181 	case WM_T_82541_2:
   4182 	case WM_T_82547:
   4183 	case WM_T_82547_2:
   4184 		/*
   4185 		 * On some chipsets, a reset through a memory-mapped write
   4186 		 * cycle can cause the chip to reset before completing the
   4187 		 * write cycle.  This causes major headache that can be
   4188 		 * avoided by issuing the reset via indirect register writes
   4189 		 * through I/O space.
   4190 		 *
   4191 		 * So, if we successfully mapped the I/O BAR at attach time,
   4192 		 * use that.  Otherwise, try our luck with a memory-mapped
   4193 		 * reset.
   4194 		 */
   4195 		if (sc->sc_flags & WM_F_IOH_VALID)
   4196 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4197 		else
   4198 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4199 		break;
   4200 	case WM_T_82545_3:
   4201 	case WM_T_82546_3:
   4202 		/* Use the shadow control register on these chips. */
   4203 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4204 		break;
   4205 	case WM_T_80003:
   4206 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4207 		sc->phy.acquire(sc);
   4208 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4209 		sc->phy.release(sc);
   4210 		break;
   4211 	case WM_T_ICH8:
   4212 	case WM_T_ICH9:
   4213 	case WM_T_ICH10:
   4214 	case WM_T_PCH:
   4215 	case WM_T_PCH2:
   4216 	case WM_T_PCH_LPT:
   4217 	case WM_T_PCH_SPT:
   4218 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4219 		if (wm_phy_resetisblocked(sc) == false) {
   4220 			/*
   4221 			 * Gate automatic PHY configuration by hardware on
   4222 			 * non-managed 82579
   4223 			 */
   4224 			if ((sc->sc_type == WM_T_PCH2)
   4225 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4226 				== 0))
   4227 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4228 
   4229 			reg |= CTRL_PHY_RESET;
   4230 			phy_reset = 1;
   4231 		} else
   4232 			printf("XXX reset is blocked!!!\n");
   4233 		sc->phy.acquire(sc);
   4234 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4235 		/* Don't insert a completion barrier when reset */
   4236 		delay(20*1000);
   4237 		mutex_exit(sc->sc_ich_phymtx);
   4238 		break;
   4239 	case WM_T_82580:
   4240 	case WM_T_I350:
   4241 	case WM_T_I354:
   4242 	case WM_T_I210:
   4243 	case WM_T_I211:
   4244 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4245 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4246 			CSR_WRITE_FLUSH(sc);
   4247 		delay(5000);
   4248 		break;
   4249 	case WM_T_82542_2_0:
   4250 	case WM_T_82542_2_1:
   4251 	case WM_T_82543:
   4252 	case WM_T_82540:
   4253 	case WM_T_82545:
   4254 	case WM_T_82546:
   4255 	case WM_T_82571:
   4256 	case WM_T_82572:
   4257 	case WM_T_82573:
   4258 	case WM_T_82574:
   4259 	case WM_T_82575:
   4260 	case WM_T_82576:
   4261 	case WM_T_82583:
   4262 	default:
   4263 		/* Everything else can safely use the documented method. */
   4264 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4265 		break;
   4266 	}
   4267 
   4268 	/* Must release the MDIO ownership after MAC reset */
   4269 	switch (sc->sc_type) {
   4270 	case WM_T_82573:
   4271 	case WM_T_82574:
   4272 	case WM_T_82583:
   4273 		if (error == 0)
   4274 			wm_put_hw_semaphore_82573(sc);
   4275 		break;
   4276 	default:
   4277 		break;
   4278 	}
   4279 
   4280 	if (phy_reset != 0)
   4281 		wm_get_cfg_done(sc);
   4282 
   4283 	/* reload EEPROM */
   4284 	switch (sc->sc_type) {
   4285 	case WM_T_82542_2_0:
   4286 	case WM_T_82542_2_1:
   4287 	case WM_T_82543:
   4288 	case WM_T_82544:
   4289 		delay(10);
   4290 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4291 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4292 		CSR_WRITE_FLUSH(sc);
   4293 		delay(2000);
   4294 		break;
   4295 	case WM_T_82540:
   4296 	case WM_T_82545:
   4297 	case WM_T_82545_3:
   4298 	case WM_T_82546:
   4299 	case WM_T_82546_3:
   4300 		delay(5*1000);
   4301 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4302 		break;
   4303 	case WM_T_82541:
   4304 	case WM_T_82541_2:
   4305 	case WM_T_82547:
   4306 	case WM_T_82547_2:
   4307 		delay(20000);
   4308 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4309 		break;
   4310 	case WM_T_82571:
   4311 	case WM_T_82572:
   4312 	case WM_T_82573:
   4313 	case WM_T_82574:
   4314 	case WM_T_82583:
   4315 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4316 			delay(10);
   4317 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4318 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4319 			CSR_WRITE_FLUSH(sc);
   4320 		}
   4321 		/* check EECD_EE_AUTORD */
   4322 		wm_get_auto_rd_done(sc);
   4323 		/*
   4324 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4325 		 * is set.
   4326 		 */
   4327 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4328 		    || (sc->sc_type == WM_T_82583))
   4329 			delay(25*1000);
   4330 		break;
   4331 	case WM_T_82575:
   4332 	case WM_T_82576:
   4333 	case WM_T_82580:
   4334 	case WM_T_I350:
   4335 	case WM_T_I354:
   4336 	case WM_T_I210:
   4337 	case WM_T_I211:
   4338 	case WM_T_80003:
   4339 		/* check EECD_EE_AUTORD */
   4340 		wm_get_auto_rd_done(sc);
   4341 		break;
   4342 	case WM_T_ICH8:
   4343 	case WM_T_ICH9:
   4344 	case WM_T_ICH10:
   4345 	case WM_T_PCH:
   4346 	case WM_T_PCH2:
   4347 	case WM_T_PCH_LPT:
   4348 	case WM_T_PCH_SPT:
   4349 		break;
   4350 	default:
   4351 		panic("%s: unknown type\n", __func__);
   4352 	}
   4353 
   4354 	/* Check whether EEPROM is present or not */
   4355 	switch (sc->sc_type) {
   4356 	case WM_T_82575:
   4357 	case WM_T_82576:
   4358 	case WM_T_82580:
   4359 	case WM_T_I350:
   4360 	case WM_T_I354:
   4361 	case WM_T_ICH8:
   4362 	case WM_T_ICH9:
   4363 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4364 			/* Not found */
   4365 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4366 			if (sc->sc_type == WM_T_82575)
   4367 				wm_reset_init_script_82575(sc);
   4368 		}
   4369 		break;
   4370 	default:
   4371 		break;
   4372 	}
   4373 
   4374 	if ((sc->sc_type == WM_T_82580)
   4375 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4376 		/* clear global device reset status bit */
   4377 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4378 	}
   4379 
   4380 	/* Clear any pending interrupt events. */
   4381 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4382 	reg = CSR_READ(sc, WMREG_ICR);
   4383 	if (wm_is_using_msix(sc)) {
   4384 		if (sc->sc_type != WM_T_82574) {
   4385 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4386 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4387 		} else
   4388 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4389 	}
   4390 
   4391 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4392 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4393 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4394 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   4395 		reg = CSR_READ(sc, WMREG_KABGTXD);
   4396 		reg |= KABGTXD_BGSQLBIAS;
   4397 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   4398 	}
   4399 
   4400 	/* reload sc_ctrl */
   4401 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4402 
   4403 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4404 		wm_set_eee_i350(sc);
   4405 
   4406 	/* Clear the host wakeup bit after lcd reset */
   4407 	if (sc->sc_type >= WM_T_PCH) {
   4408 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   4409 		    BM_PORT_GEN_CFG);
   4410 		reg &= ~BM_WUC_HOST_WU_BIT;
   4411 		wm_gmii_hv_writereg(sc->sc_dev, 2,
   4412 		    BM_PORT_GEN_CFG, reg);
   4413 	}
   4414 
   4415 	/*
   4416 	 * For PCH, this write will make sure that any noise will be detected
   4417 	 * as a CRC error and be dropped rather than show up as a bad packet
   4418 	 * to the DMA engine
   4419 	 */
   4420 	if (sc->sc_type == WM_T_PCH)
   4421 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4422 
   4423 	if (sc->sc_type >= WM_T_82544)
   4424 		CSR_WRITE(sc, WMREG_WUC, 0);
   4425 
   4426 	wm_reset_mdicnfg_82580(sc);
   4427 
   4428 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4429 		wm_pll_workaround_i210(sc);
   4430 }
   4431 
   4432 /*
   4433  * wm_add_rxbuf:
   4434  *
   4435  *	Add a receive buffer to the indiciated descriptor.
   4436  */
   4437 static int
   4438 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4439 {
   4440 	struct wm_softc *sc = rxq->rxq_sc;
   4441 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4442 	struct mbuf *m;
   4443 	int error;
   4444 
   4445 	KASSERT(mutex_owned(rxq->rxq_lock));
   4446 
   4447 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4448 	if (m == NULL)
   4449 		return ENOBUFS;
   4450 
   4451 	MCLGET(m, M_DONTWAIT);
   4452 	if ((m->m_flags & M_EXT) == 0) {
   4453 		m_freem(m);
   4454 		return ENOBUFS;
   4455 	}
   4456 
   4457 	if (rxs->rxs_mbuf != NULL)
   4458 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4459 
   4460 	rxs->rxs_mbuf = m;
   4461 
   4462 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4463 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4464 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4465 	if (error) {
   4466 		/* XXX XXX XXX */
   4467 		aprint_error_dev(sc->sc_dev,
   4468 		    "unable to load rx DMA map %d, error = %d\n",
   4469 		    idx, error);
   4470 		panic("wm_add_rxbuf");
   4471 	}
   4472 
   4473 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4474 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4475 
   4476 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4477 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4478 			wm_init_rxdesc(rxq, idx);
   4479 	} else
   4480 		wm_init_rxdesc(rxq, idx);
   4481 
   4482 	return 0;
   4483 }
   4484 
   4485 /*
   4486  * wm_rxdrain:
   4487  *
   4488  *	Drain the receive queue.
   4489  */
   4490 static void
   4491 wm_rxdrain(struct wm_rxqueue *rxq)
   4492 {
   4493 	struct wm_softc *sc = rxq->rxq_sc;
   4494 	struct wm_rxsoft *rxs;
   4495 	int i;
   4496 
   4497 	KASSERT(mutex_owned(rxq->rxq_lock));
   4498 
   4499 	for (i = 0; i < WM_NRXDESC; i++) {
   4500 		rxs = &rxq->rxq_soft[i];
   4501 		if (rxs->rxs_mbuf != NULL) {
   4502 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4503 			m_freem(rxs->rxs_mbuf);
   4504 			rxs->rxs_mbuf = NULL;
   4505 		}
   4506 	}
   4507 }
   4508 
   4509 
   4510 /*
   4511  * XXX copy from FreeBSD's sys/net/rss_config.c
   4512  */
   4513 /*
   4514  * RSS secret key, intended to prevent attacks on load-balancing.  Its
   4515  * effectiveness may be limited by algorithm choice and available entropy
   4516  * during the boot.
   4517  *
   4518  * XXXRW: And that we don't randomize it yet!
   4519  *
   4520  * This is the default Microsoft RSS specification key which is also
   4521  * the Chelsio T5 firmware default key.
   4522  */
   4523 #define RSS_KEYSIZE 40
   4524 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
   4525 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
   4526 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
   4527 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
   4528 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
   4529 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
   4530 };
   4531 
   4532 /*
   4533  * Caller must pass an array of size sizeof(rss_key).
   4534  *
   4535  * XXX
   4536  * As if_ixgbe may use this function, this function should not be
   4537  * if_wm specific function.
   4538  */
   4539 static void
   4540 wm_rss_getkey(uint8_t *key)
   4541 {
   4542 
   4543 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
   4544 }
   4545 
   4546 /*
   4547  * Setup registers for RSS.
   4548  *
   4549  * XXX not yet VMDq support
   4550  */
   4551 static void
   4552 wm_init_rss(struct wm_softc *sc)
   4553 {
   4554 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4555 	int i;
   4556 
   4557 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
   4558 
   4559 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4560 		int qid, reta_ent;
   4561 
   4562 		qid  = i % sc->sc_nqueues;
   4563 		switch(sc->sc_type) {
   4564 		case WM_T_82574:
   4565 			reta_ent = __SHIFTIN(qid,
   4566 			    RETA_ENT_QINDEX_MASK_82574);
   4567 			break;
   4568 		case WM_T_82575:
   4569 			reta_ent = __SHIFTIN(qid,
   4570 			    RETA_ENT_QINDEX1_MASK_82575);
   4571 			break;
   4572 		default:
   4573 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4574 			break;
   4575 		}
   4576 
   4577 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4578 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4579 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4580 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4581 	}
   4582 
   4583 	wm_rss_getkey((uint8_t *)rss_key);
   4584 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4585 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4586 
   4587 	if (sc->sc_type == WM_T_82574)
   4588 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4589 	else
   4590 		mrqc = MRQC_ENABLE_RSS_MQ;
   4591 
   4592 	/*
   4593 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   4594 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   4595 	 */
   4596 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4597 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4598 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4599 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4600 
   4601 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4602 }
   4603 
   4604 /*
   4605  * Adjust TX and RX queue numbers which the system actulally uses.
   4606  *
   4607  * The numbers are affected by below parameters.
   4608  *     - The nubmer of hardware queues
   4609  *     - The number of MSI-X vectors (= "nvectors" argument)
   4610  *     - ncpu
   4611  */
   4612 static void
   4613 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4614 {
   4615 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4616 
   4617 	if (nvectors < 2) {
   4618 		sc->sc_nqueues = 1;
   4619 		return;
   4620 	}
   4621 
   4622 	switch(sc->sc_type) {
   4623 	case WM_T_82572:
   4624 		hw_ntxqueues = 2;
   4625 		hw_nrxqueues = 2;
   4626 		break;
   4627 	case WM_T_82574:
   4628 		hw_ntxqueues = 2;
   4629 		hw_nrxqueues = 2;
   4630 		break;
   4631 	case WM_T_82575:
   4632 		hw_ntxqueues = 4;
   4633 		hw_nrxqueues = 4;
   4634 		break;
   4635 	case WM_T_82576:
   4636 		hw_ntxqueues = 16;
   4637 		hw_nrxqueues = 16;
   4638 		break;
   4639 	case WM_T_82580:
   4640 	case WM_T_I350:
   4641 	case WM_T_I354:
   4642 		hw_ntxqueues = 8;
   4643 		hw_nrxqueues = 8;
   4644 		break;
   4645 	case WM_T_I210:
   4646 		hw_ntxqueues = 4;
   4647 		hw_nrxqueues = 4;
   4648 		break;
   4649 	case WM_T_I211:
   4650 		hw_ntxqueues = 2;
   4651 		hw_nrxqueues = 2;
   4652 		break;
   4653 		/*
   4654 		 * As below ethernet controllers does not support MSI-X,
   4655 		 * this driver let them not use multiqueue.
   4656 		 *     - WM_T_80003
   4657 		 *     - WM_T_ICH8
   4658 		 *     - WM_T_ICH9
   4659 		 *     - WM_T_ICH10
   4660 		 *     - WM_T_PCH
   4661 		 *     - WM_T_PCH2
   4662 		 *     - WM_T_PCH_LPT
   4663 		 */
   4664 	default:
   4665 		hw_ntxqueues = 1;
   4666 		hw_nrxqueues = 1;
   4667 		break;
   4668 	}
   4669 
   4670 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
   4671 
   4672 	/*
   4673 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   4674 	 * the number of queues used actually.
   4675 	 */
   4676 	if (nvectors < hw_nqueues + 1) {
   4677 		sc->sc_nqueues = nvectors - 1;
   4678 	} else {
   4679 		sc->sc_nqueues = hw_nqueues;
   4680 	}
   4681 
   4682 	/*
   4683 	 * As queues more then cpus cannot improve scaling, we limit
   4684 	 * the number of queues used actually.
   4685 	 */
   4686 	if (ncpu < sc->sc_nqueues)
   4687 		sc->sc_nqueues = ncpu;
   4688 }
   4689 
   4690 static inline bool
   4691 wm_is_using_msix(struct wm_softc *sc)
   4692 {
   4693 
   4694 	return (sc->sc_nintrs > 1);
   4695 }
   4696 
   4697 static inline bool
   4698 wm_is_using_multiqueue(struct wm_softc *sc)
   4699 {
   4700 
   4701 	return (sc->sc_nqueues > 1);
   4702 }
   4703 
   4704 static int
   4705 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   4706 {
   4707 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   4708 	wmq->wmq_id = qidx;
   4709 	wmq->wmq_intr_idx = intr_idx;
   4710 	wmq->wmq_si = softint_establish(SOFTINT_NET
   4711 #ifdef WM_MPSAFE
   4712 	    | SOFTINT_MPSAFE
   4713 #endif
   4714 	    , wm_handle_queue, wmq);
   4715 	if (wmq->wmq_si != NULL)
   4716 		return 0;
   4717 
   4718 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   4719 	    wmq->wmq_id);
   4720 
   4721 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   4722 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4723 	return ENOMEM;
   4724 }
   4725 
   4726 /*
   4727  * Both single interrupt MSI and INTx can use this function.
   4728  */
   4729 static int
   4730 wm_setup_legacy(struct wm_softc *sc)
   4731 {
   4732 	pci_chipset_tag_t pc = sc->sc_pc;
   4733 	const char *intrstr = NULL;
   4734 	char intrbuf[PCI_INTRSTR_LEN];
   4735 	int error;
   4736 
   4737 	error = wm_alloc_txrx_queues(sc);
   4738 	if (error) {
   4739 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4740 		    error);
   4741 		return ENOMEM;
   4742 	}
   4743 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   4744 	    sizeof(intrbuf));
   4745 #ifdef WM_MPSAFE
   4746 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   4747 #endif
   4748 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   4749 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   4750 	if (sc->sc_ihs[0] == NULL) {
   4751 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   4752 		    (pci_intr_type(pc, sc->sc_intrs[0])
   4753 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   4754 		return ENOMEM;
   4755 	}
   4756 
   4757 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   4758 	sc->sc_nintrs = 1;
   4759 
   4760 	return wm_softint_establish(sc, 0, 0);
   4761 }
   4762 
   4763 static int
   4764 wm_setup_msix(struct wm_softc *sc)
   4765 {
   4766 	void *vih;
   4767 	kcpuset_t *affinity;
   4768 	int qidx, error, intr_idx, txrx_established;
   4769 	pci_chipset_tag_t pc = sc->sc_pc;
   4770 	const char *intrstr = NULL;
   4771 	char intrbuf[PCI_INTRSTR_LEN];
   4772 	char intr_xname[INTRDEVNAMEBUF];
   4773 
   4774 	if (sc->sc_nqueues < ncpu) {
   4775 		/*
   4776 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   4777 		 * interrupts start from CPU#1.
   4778 		 */
   4779 		sc->sc_affinity_offset = 1;
   4780 	} else {
   4781 		/*
   4782 		 * In this case, this device use all CPUs. So, we unify
   4783 		 * affinitied cpu_index to msix vector number for readability.
   4784 		 */
   4785 		sc->sc_affinity_offset = 0;
   4786 	}
   4787 
   4788 	error = wm_alloc_txrx_queues(sc);
   4789 	if (error) {
   4790 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4791 		    error);
   4792 		return ENOMEM;
   4793 	}
   4794 
   4795 	kcpuset_create(&affinity, false);
   4796 	intr_idx = 0;
   4797 
   4798 	/*
   4799 	 * TX and RX
   4800 	 */
   4801 	txrx_established = 0;
   4802 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   4803 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4804 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   4805 
   4806 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4807 		    sizeof(intrbuf));
   4808 #ifdef WM_MPSAFE
   4809 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   4810 		    PCI_INTR_MPSAFE, true);
   4811 #endif
   4812 		memset(intr_xname, 0, sizeof(intr_xname));
   4813 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   4814 		    device_xname(sc->sc_dev), qidx);
   4815 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4816 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   4817 		if (vih == NULL) {
   4818 			aprint_error_dev(sc->sc_dev,
   4819 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   4820 			    intrstr ? " at " : "",
   4821 			    intrstr ? intrstr : "");
   4822 
   4823 			goto fail;
   4824 		}
   4825 		kcpuset_zero(affinity);
   4826 		/* Round-robin affinity */
   4827 		kcpuset_set(affinity, affinity_to);
   4828 		error = interrupt_distribute(vih, affinity, NULL);
   4829 		if (error == 0) {
   4830 			aprint_normal_dev(sc->sc_dev,
   4831 			    "for TX and RX interrupting at %s affinity to %u\n",
   4832 			    intrstr, affinity_to);
   4833 		} else {
   4834 			aprint_normal_dev(sc->sc_dev,
   4835 			    "for TX and RX interrupting at %s\n", intrstr);
   4836 		}
   4837 		sc->sc_ihs[intr_idx] = vih;
   4838 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   4839 			goto fail;
   4840 		txrx_established++;
   4841 		intr_idx++;
   4842 	}
   4843 
   4844 	/*
   4845 	 * LINK
   4846 	 */
   4847 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4848 	    sizeof(intrbuf));
   4849 #ifdef WM_MPSAFE
   4850 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   4851 #endif
   4852 	memset(intr_xname, 0, sizeof(intr_xname));
   4853 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   4854 	    device_xname(sc->sc_dev));
   4855 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4856 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   4857 	if (vih == NULL) {
   4858 		aprint_error_dev(sc->sc_dev,
   4859 		    "unable to establish MSI-X(for LINK)%s%s\n",
   4860 		    intrstr ? " at " : "",
   4861 		    intrstr ? intrstr : "");
   4862 
   4863 		goto fail;
   4864 	}
   4865 	/* keep default affinity to LINK interrupt */
   4866 	aprint_normal_dev(sc->sc_dev,
   4867 	    "for LINK interrupting at %s\n", intrstr);
   4868 	sc->sc_ihs[intr_idx] = vih;
   4869 	sc->sc_link_intr_idx = intr_idx;
   4870 
   4871 	sc->sc_nintrs = sc->sc_nqueues + 1;
   4872 	kcpuset_destroy(affinity);
   4873 	return 0;
   4874 
   4875  fail:
   4876 	for (qidx = 0; qidx < txrx_established; qidx++) {
   4877 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4878 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   4879 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4880 	}
   4881 
   4882 	kcpuset_destroy(affinity);
   4883 	return ENOMEM;
   4884 }
   4885 
   4886 static void
   4887 wm_turnon(struct wm_softc *sc)
   4888 {
   4889 	int i;
   4890 
   4891 	KASSERT(WM_CORE_LOCKED(sc));
   4892 
   4893 	/*
   4894 	 * must unset stopping flags in ascending order.
   4895 	 */
   4896 	for(i = 0; i < sc->sc_nqueues; i++) {
   4897 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4898 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   4899 
   4900 		mutex_enter(txq->txq_lock);
   4901 		txq->txq_stopping = false;
   4902 		mutex_exit(txq->txq_lock);
   4903 
   4904 		mutex_enter(rxq->rxq_lock);
   4905 		rxq->rxq_stopping = false;
   4906 		mutex_exit(rxq->rxq_lock);
   4907 	}
   4908 
   4909 	sc->sc_core_stopping = false;
   4910 }
   4911 
   4912 static void
   4913 wm_turnoff(struct wm_softc *sc)
   4914 {
   4915 	int i;
   4916 
   4917 	KASSERT(WM_CORE_LOCKED(sc));
   4918 
   4919 	sc->sc_core_stopping = true;
   4920 
   4921 	/*
   4922 	 * must set stopping flags in ascending order.
   4923 	 */
   4924 	for(i = 0; i < sc->sc_nqueues; i++) {
   4925 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   4926 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4927 
   4928 		mutex_enter(rxq->rxq_lock);
   4929 		rxq->rxq_stopping = true;
   4930 		mutex_exit(rxq->rxq_lock);
   4931 
   4932 		mutex_enter(txq->txq_lock);
   4933 		txq->txq_stopping = true;
   4934 		mutex_exit(txq->txq_lock);
   4935 	}
   4936 }
   4937 
   4938 /*
   4939  * write interrupt interval value to ITR or EITR
   4940  */
   4941 static void
   4942 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   4943 {
   4944 
   4945 	if (!wmq->wmq_set_itr)
   4946 		return;
   4947 
   4948 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4949 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   4950 
   4951 		/*
   4952 		 * 82575 doesn't have CNT_INGR field.
   4953 		 * So, overwrite counter field by software.
   4954 		 */
   4955 		if (sc->sc_type == WM_T_82575)
   4956 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   4957 		else
   4958 			eitr |= EITR_CNT_INGR;
   4959 
   4960 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   4961 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   4962 		/*
   4963 		 * 82574 has both ITR and EITR. SET EITR when we use
   4964 		 * the multi queue function with MSI-X.
   4965 		 */
   4966 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   4967 			    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   4968 	} else {
   4969 		KASSERT(wmq->wmq_id == 0);
   4970 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   4971 	}
   4972 
   4973 	wmq->wmq_set_itr = false;
   4974 }
   4975 
   4976 /*
   4977  * TODO
   4978  * Below dynamic calculation of itr is almost the same as linux igb,
   4979  * however it does not fit to wm(4). So, we will have been disable AIM
   4980  * until we will find appropriate calculation of itr.
   4981  */
   4982 /*
   4983  * calculate interrupt interval value to be going to write register in
   4984  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   4985  */
   4986 static void
   4987 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   4988 {
   4989 #ifdef NOTYET
   4990 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   4991 	struct wm_txqueue *txq = &wmq->wmq_txq;
   4992 	uint32_t avg_size = 0;
   4993 	uint32_t new_itr;
   4994 
   4995 	if (rxq->rxq_packets)
   4996 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   4997 	if (txq->txq_packets)
   4998 		avg_size = max(avg_size, txq->txq_bytes / txq->txq_packets);
   4999 
   5000 	if (avg_size == 0) {
   5001 		new_itr = 450; /* restore default value */
   5002 		goto out;
   5003 	}
   5004 
   5005 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5006 	avg_size += 24;
   5007 
   5008 	/* Don't starve jumbo frames */
   5009 	avg_size = min(avg_size, 3000);
   5010 
   5011 	/* Give a little boost to mid-size frames */
   5012 	if ((avg_size > 300) && (avg_size < 1200))
   5013 		new_itr = avg_size / 3;
   5014 	else
   5015 		new_itr = avg_size / 2;
   5016 
   5017 out:
   5018 	/*
   5019 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5020 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5021 	 */
   5022 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5023 		new_itr *= 4;
   5024 
   5025 	if (new_itr != wmq->wmq_itr) {
   5026 		wmq->wmq_itr = new_itr;
   5027 		wmq->wmq_set_itr = true;
   5028 	} else
   5029 		wmq->wmq_set_itr = false;
   5030 
   5031 	rxq->rxq_packets = 0;
   5032 	rxq->rxq_bytes = 0;
   5033 	txq->txq_packets = 0;
   5034 	txq->txq_bytes = 0;
   5035 #endif
   5036 }
   5037 
   5038 /*
   5039  * wm_init:		[ifnet interface function]
   5040  *
   5041  *	Initialize the interface.
   5042  */
   5043 static int
   5044 wm_init(struct ifnet *ifp)
   5045 {
   5046 	struct wm_softc *sc = ifp->if_softc;
   5047 	int ret;
   5048 
   5049 	WM_CORE_LOCK(sc);
   5050 	ret = wm_init_locked(ifp);
   5051 	WM_CORE_UNLOCK(sc);
   5052 
   5053 	return ret;
   5054 }
   5055 
   5056 static int
   5057 wm_init_locked(struct ifnet *ifp)
   5058 {
   5059 	struct wm_softc *sc = ifp->if_softc;
   5060 	int i, j, trynum, error = 0;
   5061 	uint32_t reg;
   5062 
   5063 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5064 		device_xname(sc->sc_dev), __func__));
   5065 	KASSERT(WM_CORE_LOCKED(sc));
   5066 
   5067 	/*
   5068 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5069 	 * There is a small but measurable benefit to avoiding the adjusment
   5070 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5071 	 * on such platforms.  One possibility is that the DMA itself is
   5072 	 * slightly more efficient if the front of the entire packet (instead
   5073 	 * of the front of the headers) is aligned.
   5074 	 *
   5075 	 * Note we must always set align_tweak to 0 if we are using
   5076 	 * jumbo frames.
   5077 	 */
   5078 #ifdef __NO_STRICT_ALIGNMENT
   5079 	sc->sc_align_tweak = 0;
   5080 #else
   5081 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5082 		sc->sc_align_tweak = 0;
   5083 	else
   5084 		sc->sc_align_tweak = 2;
   5085 #endif /* __NO_STRICT_ALIGNMENT */
   5086 
   5087 	/* Cancel any pending I/O. */
   5088 	wm_stop_locked(ifp, 0);
   5089 
   5090 	/* update statistics before reset */
   5091 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   5092 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   5093 
   5094 	/* PCH_SPT hardware workaround */
   5095 	if (sc->sc_type == WM_T_PCH_SPT)
   5096 		wm_flush_desc_rings(sc);
   5097 
   5098 	/* Reset the chip to a known state. */
   5099 	wm_reset(sc);
   5100 
   5101 	/* AMT based hardware can now take control from firmware */
   5102 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5103 		wm_get_hw_control(sc);
   5104 
   5105 	/* Init hardware bits */
   5106 	wm_initialize_hardware_bits(sc);
   5107 
   5108 	/* Reset the PHY. */
   5109 	if (sc->sc_flags & WM_F_HAS_MII)
   5110 		wm_gmii_reset(sc);
   5111 
   5112 	/* Calculate (E)ITR value */
   5113 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5114 		/*
   5115 		 * For NEWQUEUE's EITR (except for 82575).
   5116 		 * 82575's EITR should be set same throttling value as other
   5117 		 * old controllers' ITR because the interrupt/sec calculation
   5118 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5119 		 *
   5120 		 * 82574's EITR should be set same throttling value as ITR.
   5121 		 *
   5122 		 * For N interrupts/sec, set this value to:
   5123 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5124 		 */
   5125 		sc->sc_itr_init = 450;
   5126 	} else if (sc->sc_type >= WM_T_82543) {
   5127 		/*
   5128 		 * Set up the interrupt throttling register (units of 256ns)
   5129 		 * Note that a footnote in Intel's documentation says this
   5130 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5131 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5132 		 * that that is also true for the 1024ns units of the other
   5133 		 * interrupt-related timer registers -- so, really, we ought
   5134 		 * to divide this value by 4 when the link speed is low.
   5135 		 *
   5136 		 * XXX implement this division at link speed change!
   5137 		 */
   5138 
   5139 		/*
   5140 		 * For N interrupts/sec, set this value to:
   5141 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5142 		 * absolute and packet timer values to this value
   5143 		 * divided by 4 to get "simple timer" behavior.
   5144 		 */
   5145 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5146 	}
   5147 
   5148 	error = wm_init_txrx_queues(sc);
   5149 	if (error)
   5150 		goto out;
   5151 
   5152 	/*
   5153 	 * Clear out the VLAN table -- we don't use it (yet).
   5154 	 */
   5155 	CSR_WRITE(sc, WMREG_VET, 0);
   5156 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5157 		trynum = 10; /* Due to hw errata */
   5158 	else
   5159 		trynum = 1;
   5160 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5161 		for (j = 0; j < trynum; j++)
   5162 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5163 
   5164 	/*
   5165 	 * Set up flow-control parameters.
   5166 	 *
   5167 	 * XXX Values could probably stand some tuning.
   5168 	 */
   5169 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5170 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5171 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5172 	    && (sc->sc_type != WM_T_PCH_SPT)) {
   5173 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5174 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5175 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5176 	}
   5177 
   5178 	sc->sc_fcrtl = FCRTL_DFLT;
   5179 	if (sc->sc_type < WM_T_82543) {
   5180 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5181 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5182 	} else {
   5183 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5184 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5185 	}
   5186 
   5187 	if (sc->sc_type == WM_T_80003)
   5188 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5189 	else
   5190 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5191 
   5192 	/* Writes the control register. */
   5193 	wm_set_vlan(sc);
   5194 
   5195 	if (sc->sc_flags & WM_F_HAS_MII) {
   5196 		int val;
   5197 
   5198 		switch (sc->sc_type) {
   5199 		case WM_T_80003:
   5200 		case WM_T_ICH8:
   5201 		case WM_T_ICH9:
   5202 		case WM_T_ICH10:
   5203 		case WM_T_PCH:
   5204 		case WM_T_PCH2:
   5205 		case WM_T_PCH_LPT:
   5206 		case WM_T_PCH_SPT:
   5207 			/*
   5208 			 * Set the mac to wait the maximum time between each
   5209 			 * iteration and increase the max iterations when
   5210 			 * polling the phy; this fixes erroneous timeouts at
   5211 			 * 10Mbps.
   5212 			 */
   5213 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5214 			    0xFFFF);
   5215 			val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
   5216 			val |= 0x3F;
   5217 			wm_kmrn_writereg(sc,
   5218 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
   5219 			break;
   5220 		default:
   5221 			break;
   5222 		}
   5223 
   5224 		if (sc->sc_type == WM_T_80003) {
   5225 			val = CSR_READ(sc, WMREG_CTRL_EXT);
   5226 			val &= ~CTRL_EXT_LINK_MODE_MASK;
   5227 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   5228 
   5229 			/* Bypass RX and TX FIFO's */
   5230 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5231 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5232 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5233 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5234 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5235 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5236 		}
   5237 	}
   5238 #if 0
   5239 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5240 #endif
   5241 
   5242 	/* Set up checksum offload parameters. */
   5243 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5244 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5245 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5246 		reg |= RXCSUM_IPOFL;
   5247 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5248 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5249 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5250 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5251 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5252 
   5253 	/* Set registers about MSI-X */
   5254 	if (wm_is_using_msix(sc)) {
   5255 		uint32_t ivar;
   5256 		struct wm_queue *wmq;
   5257 		int qid, qintr_idx;
   5258 
   5259 		if (sc->sc_type == WM_T_82575) {
   5260 			/* Interrupt control */
   5261 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5262 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5263 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5264 
   5265 			/* TX and RX */
   5266 			for (i = 0; i < sc->sc_nqueues; i++) {
   5267 				wmq = &sc->sc_queue[i];
   5268 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5269 				    EITR_TX_QUEUE(wmq->wmq_id)
   5270 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5271 			}
   5272 			/* Link status */
   5273 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5274 			    EITR_OTHER);
   5275 		} else if (sc->sc_type == WM_T_82574) {
   5276 			/* Interrupt control */
   5277 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5278 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5279 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5280 
   5281 			/*
   5282 			 * workaround issue with spurious interrupts
   5283 			 * in MSI-X mode.
   5284 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5285 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5286 			 */
   5287 			reg = CSR_READ(sc, WMREG_RFCTL);
   5288 			reg |= WMREG_RFCTL_ACKDIS;
   5289 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5290 
   5291 			ivar = 0;
   5292 			/* TX and RX */
   5293 			for (i = 0; i < sc->sc_nqueues; i++) {
   5294 				wmq = &sc->sc_queue[i];
   5295 				qid = wmq->wmq_id;
   5296 				qintr_idx = wmq->wmq_intr_idx;
   5297 
   5298 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5299 				    IVAR_TX_MASK_Q_82574(qid));
   5300 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5301 				    IVAR_RX_MASK_Q_82574(qid));
   5302 			}
   5303 			/* Link status */
   5304 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5305 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5306 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5307 		} else {
   5308 			/* Interrupt control */
   5309 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5310 			    | GPIE_EIAME | GPIE_PBA);
   5311 
   5312 			switch (sc->sc_type) {
   5313 			case WM_T_82580:
   5314 			case WM_T_I350:
   5315 			case WM_T_I354:
   5316 			case WM_T_I210:
   5317 			case WM_T_I211:
   5318 				/* TX and RX */
   5319 				for (i = 0; i < sc->sc_nqueues; i++) {
   5320 					wmq = &sc->sc_queue[i];
   5321 					qid = wmq->wmq_id;
   5322 					qintr_idx = wmq->wmq_intr_idx;
   5323 
   5324 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5325 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5326 					ivar |= __SHIFTIN((qintr_idx
   5327 						| IVAR_VALID),
   5328 					    IVAR_TX_MASK_Q(qid));
   5329 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5330 					ivar |= __SHIFTIN((qintr_idx
   5331 						| IVAR_VALID),
   5332 					    IVAR_RX_MASK_Q(qid));
   5333 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5334 				}
   5335 				break;
   5336 			case WM_T_82576:
   5337 				/* TX and RX */
   5338 				for (i = 0; i < sc->sc_nqueues; i++) {
   5339 					wmq = &sc->sc_queue[i];
   5340 					qid = wmq->wmq_id;
   5341 					qintr_idx = wmq->wmq_intr_idx;
   5342 
   5343 					ivar = CSR_READ(sc,
   5344 					    WMREG_IVAR_Q_82576(qid));
   5345 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   5346 					ivar |= __SHIFTIN((qintr_idx
   5347 						| IVAR_VALID),
   5348 					    IVAR_TX_MASK_Q_82576(qid));
   5349 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   5350 					ivar |= __SHIFTIN((qintr_idx
   5351 						| IVAR_VALID),
   5352 					    IVAR_RX_MASK_Q_82576(qid));
   5353 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   5354 					    ivar);
   5355 				}
   5356 				break;
   5357 			default:
   5358 				break;
   5359 			}
   5360 
   5361 			/* Link status */
   5362 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   5363 			    IVAR_MISC_OTHER);
   5364 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   5365 		}
   5366 
   5367 		if (wm_is_using_multiqueue(sc)) {
   5368 			wm_init_rss(sc);
   5369 
   5370 			/*
   5371 			** NOTE: Receive Full-Packet Checksum Offload
   5372 			** is mutually exclusive with Multiqueue. However
   5373 			** this is not the same as TCP/IP checksums which
   5374 			** still work.
   5375 			*/
   5376 			reg = CSR_READ(sc, WMREG_RXCSUM);
   5377 			reg |= RXCSUM_PCSD;
   5378 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5379 		}
   5380 	}
   5381 
   5382 	/* Set up the interrupt registers. */
   5383 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5384 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   5385 	    ICR_RXO | ICR_RXT0;
   5386 	if (wm_is_using_msix(sc)) {
   5387 		uint32_t mask;
   5388 		struct wm_queue *wmq;
   5389 
   5390 		switch (sc->sc_type) {
   5391 		case WM_T_82574:
   5392 			mask = 0;
   5393 			for (i = 0; i < sc->sc_nqueues; i++) {
   5394 				wmq = &sc->sc_queue[i];
   5395 				mask |= ICR_TXQ(wmq->wmq_id);
   5396 				mask |= ICR_RXQ(wmq->wmq_id);
   5397 			}
   5398 			mask |= ICR_OTHER;
   5399 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   5400 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   5401 			break;
   5402 		default:
   5403 			if (sc->sc_type == WM_T_82575) {
   5404 				mask = 0;
   5405 				for (i = 0; i < sc->sc_nqueues; i++) {
   5406 					wmq = &sc->sc_queue[i];
   5407 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   5408 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   5409 				}
   5410 				mask |= EITR_OTHER;
   5411 			} else {
   5412 				mask = 0;
   5413 				for (i = 0; i < sc->sc_nqueues; i++) {
   5414 					wmq = &sc->sc_queue[i];
   5415 					mask |= 1 << wmq->wmq_intr_idx;
   5416 				}
   5417 				mask |= 1 << sc->sc_link_intr_idx;
   5418 			}
   5419 			CSR_WRITE(sc, WMREG_EIAC, mask);
   5420 			CSR_WRITE(sc, WMREG_EIAM, mask);
   5421 			CSR_WRITE(sc, WMREG_EIMS, mask);
   5422 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   5423 			break;
   5424 		}
   5425 	} else
   5426 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5427 
   5428 	/* Set up the inter-packet gap. */
   5429 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   5430 
   5431 	if (sc->sc_type >= WM_T_82543) {
   5432 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5433 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   5434 			wm_itrs_writereg(sc, wmq);
   5435 		}
   5436 		/*
   5437 		 * Link interrupts occur much less than TX
   5438 		 * interrupts and RX interrupts. So, we don't
   5439 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   5440 		 * FreeBSD's if_igb.
   5441 		 */
   5442 	}
   5443 
   5444 	/* Set the VLAN ethernetype. */
   5445 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   5446 
   5447 	/*
   5448 	 * Set up the transmit control register; we start out with
   5449 	 * a collision distance suitable for FDX, but update it whe
   5450 	 * we resolve the media type.
   5451 	 */
   5452 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   5453 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   5454 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5455 	if (sc->sc_type >= WM_T_82571)
   5456 		sc->sc_tctl |= TCTL_MULR;
   5457 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5458 
   5459 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5460 		/* Write TDT after TCTL.EN is set. See the document. */
   5461 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   5462 	}
   5463 
   5464 	if (sc->sc_type == WM_T_80003) {
   5465 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   5466 		reg &= ~TCTL_EXT_GCEX_MASK;
   5467 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   5468 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   5469 	}
   5470 
   5471 	/* Set the media. */
   5472 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   5473 		goto out;
   5474 
   5475 	/* Configure for OS presence */
   5476 	wm_init_manageability(sc);
   5477 
   5478 	/*
   5479 	 * Set up the receive control register; we actually program
   5480 	 * the register when we set the receive filter.  Use multicast
   5481 	 * address offset type 0.
   5482 	 *
   5483 	 * Only the i82544 has the ability to strip the incoming
   5484 	 * CRC, so we don't enable that feature.
   5485 	 */
   5486 	sc->sc_mchash_type = 0;
   5487 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   5488 	    | RCTL_MO(sc->sc_mchash_type);
   5489 
   5490 	/*
   5491 	 * 82574 use one buffer extended Rx descriptor.
   5492 	 */
   5493 	if (sc->sc_type == WM_T_82574)
   5494 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   5495 
   5496 	/*
   5497 	 * The I350 has a bug where it always strips the CRC whether
   5498 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   5499 	 */
   5500 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5501 	    || (sc->sc_type == WM_T_I210))
   5502 		sc->sc_rctl |= RCTL_SECRC;
   5503 
   5504 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5505 	    && (ifp->if_mtu > ETHERMTU)) {
   5506 		sc->sc_rctl |= RCTL_LPE;
   5507 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5508 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5509 	}
   5510 
   5511 	if (MCLBYTES == 2048) {
   5512 		sc->sc_rctl |= RCTL_2k;
   5513 	} else {
   5514 		if (sc->sc_type >= WM_T_82543) {
   5515 			switch (MCLBYTES) {
   5516 			case 4096:
   5517 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5518 				break;
   5519 			case 8192:
   5520 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5521 				break;
   5522 			case 16384:
   5523 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5524 				break;
   5525 			default:
   5526 				panic("wm_init: MCLBYTES %d unsupported",
   5527 				    MCLBYTES);
   5528 				break;
   5529 			}
   5530 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   5531 	}
   5532 
   5533 	/* Enable ECC */
   5534 	switch (sc->sc_type) {
   5535 	case WM_T_82571:
   5536 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5537 		reg |= PBA_ECC_CORR_EN;
   5538 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5539 		break;
   5540 	case WM_T_PCH_LPT:
   5541 	case WM_T_PCH_SPT:
   5542 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5543 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5544 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5545 
   5546 		sc->sc_ctrl |= CTRL_MEHE;
   5547 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5548 		break;
   5549 	default:
   5550 		break;
   5551 	}
   5552 
   5553 	/* On 575 and later set RDT only if RX enabled */
   5554 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5555 		int qidx;
   5556 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5557 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5558 			for (i = 0; i < WM_NRXDESC; i++) {
   5559 				mutex_enter(rxq->rxq_lock);
   5560 				wm_init_rxdesc(rxq, i);
   5561 				mutex_exit(rxq->rxq_lock);
   5562 
   5563 			}
   5564 		}
   5565 	}
   5566 
   5567 	/* Set the receive filter. */
   5568 	wm_set_filter(sc);
   5569 
   5570 	wm_turnon(sc);
   5571 
   5572 	/* Start the one second link check clock. */
   5573 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5574 
   5575 	/* ...all done! */
   5576 	ifp->if_flags |= IFF_RUNNING;
   5577 	ifp->if_flags &= ~IFF_OACTIVE;
   5578 
   5579  out:
   5580 	sc->sc_if_flags = ifp->if_flags;
   5581 	if (error)
   5582 		log(LOG_ERR, "%s: interface not running\n",
   5583 		    device_xname(sc->sc_dev));
   5584 	return error;
   5585 }
   5586 
   5587 /*
   5588  * wm_stop:		[ifnet interface function]
   5589  *
   5590  *	Stop transmission on the interface.
   5591  */
   5592 static void
   5593 wm_stop(struct ifnet *ifp, int disable)
   5594 {
   5595 	struct wm_softc *sc = ifp->if_softc;
   5596 
   5597 	WM_CORE_LOCK(sc);
   5598 	wm_stop_locked(ifp, disable);
   5599 	WM_CORE_UNLOCK(sc);
   5600 }
   5601 
   5602 static void
   5603 wm_stop_locked(struct ifnet *ifp, int disable)
   5604 {
   5605 	struct wm_softc *sc = ifp->if_softc;
   5606 	struct wm_txsoft *txs;
   5607 	int i, qidx;
   5608 
   5609 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5610 		device_xname(sc->sc_dev), __func__));
   5611 	KASSERT(WM_CORE_LOCKED(sc));
   5612 
   5613 	wm_turnoff(sc);
   5614 
   5615 	/* Stop the one second clock. */
   5616 	callout_stop(&sc->sc_tick_ch);
   5617 
   5618 	/* Stop the 82547 Tx FIFO stall check timer. */
   5619 	if (sc->sc_type == WM_T_82547)
   5620 		callout_stop(&sc->sc_txfifo_ch);
   5621 
   5622 	if (sc->sc_flags & WM_F_HAS_MII) {
   5623 		/* Down the MII. */
   5624 		mii_down(&sc->sc_mii);
   5625 	} else {
   5626 #if 0
   5627 		/* Should we clear PHY's status properly? */
   5628 		wm_reset(sc);
   5629 #endif
   5630 	}
   5631 
   5632 	/* Stop the transmit and receive processes. */
   5633 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5634 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5635 	sc->sc_rctl &= ~RCTL_EN;
   5636 
   5637 	/*
   5638 	 * Clear the interrupt mask to ensure the device cannot assert its
   5639 	 * interrupt line.
   5640 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5641 	 * service any currently pending or shared interrupt.
   5642 	 */
   5643 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5644 	sc->sc_icr = 0;
   5645 	if (wm_is_using_msix(sc)) {
   5646 		if (sc->sc_type != WM_T_82574) {
   5647 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5648 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5649 		} else
   5650 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5651 	}
   5652 
   5653 	/* Release any queued transmit buffers. */
   5654 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5655 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5656 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5657 		mutex_enter(txq->txq_lock);
   5658 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5659 			txs = &txq->txq_soft[i];
   5660 			if (txs->txs_mbuf != NULL) {
   5661 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   5662 				m_freem(txs->txs_mbuf);
   5663 				txs->txs_mbuf = NULL;
   5664 			}
   5665 		}
   5666 		mutex_exit(txq->txq_lock);
   5667 	}
   5668 
   5669 	/* Mark the interface as down and cancel the watchdog timer. */
   5670 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5671 	ifp->if_timer = 0;
   5672 
   5673 	if (disable) {
   5674 		for (i = 0; i < sc->sc_nqueues; i++) {
   5675 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5676 			mutex_enter(rxq->rxq_lock);
   5677 			wm_rxdrain(rxq);
   5678 			mutex_exit(rxq->rxq_lock);
   5679 		}
   5680 	}
   5681 
   5682 #if 0 /* notyet */
   5683 	if (sc->sc_type >= WM_T_82544)
   5684 		CSR_WRITE(sc, WMREG_WUC, 0);
   5685 #endif
   5686 }
   5687 
   5688 static void
   5689 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   5690 {
   5691 	struct mbuf *m;
   5692 	int i;
   5693 
   5694 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   5695 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   5696 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   5697 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   5698 		    m->m_data, m->m_len, m->m_flags);
   5699 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   5700 	    i, i == 1 ? "" : "s");
   5701 }
   5702 
   5703 /*
   5704  * wm_82547_txfifo_stall:
   5705  *
   5706  *	Callout used to wait for the 82547 Tx FIFO to drain,
   5707  *	reset the FIFO pointers, and restart packet transmission.
   5708  */
   5709 static void
   5710 wm_82547_txfifo_stall(void *arg)
   5711 {
   5712 	struct wm_softc *sc = arg;
   5713 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5714 
   5715 	mutex_enter(txq->txq_lock);
   5716 
   5717 	if (txq->txq_stopping)
   5718 		goto out;
   5719 
   5720 	if (txq->txq_fifo_stall) {
   5721 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   5722 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   5723 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   5724 			/*
   5725 			 * Packets have drained.  Stop transmitter, reset
   5726 			 * FIFO pointers, restart transmitter, and kick
   5727 			 * the packet queue.
   5728 			 */
   5729 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   5730 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   5731 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   5732 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   5733 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   5734 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   5735 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   5736 			CSR_WRITE_FLUSH(sc);
   5737 
   5738 			txq->txq_fifo_head = 0;
   5739 			txq->txq_fifo_stall = 0;
   5740 			wm_start_locked(&sc->sc_ethercom.ec_if);
   5741 		} else {
   5742 			/*
   5743 			 * Still waiting for packets to drain; try again in
   5744 			 * another tick.
   5745 			 */
   5746 			callout_schedule(&sc->sc_txfifo_ch, 1);
   5747 		}
   5748 	}
   5749 
   5750 out:
   5751 	mutex_exit(txq->txq_lock);
   5752 }
   5753 
   5754 /*
   5755  * wm_82547_txfifo_bugchk:
   5756  *
   5757  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   5758  *	prevent enqueueing a packet that would wrap around the end
   5759  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   5760  *
   5761  *	We do this by checking the amount of space before the end
   5762  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   5763  *	the Tx FIFO, wait for all remaining packets to drain, reset
   5764  *	the internal FIFO pointers to the beginning, and restart
   5765  *	transmission on the interface.
   5766  */
   5767 #define	WM_FIFO_HDR		0x10
   5768 #define	WM_82547_PAD_LEN	0x3e0
   5769 static int
   5770 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   5771 {
   5772 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5773 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   5774 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   5775 
   5776 	/* Just return if already stalled. */
   5777 	if (txq->txq_fifo_stall)
   5778 		return 1;
   5779 
   5780 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   5781 		/* Stall only occurs in half-duplex mode. */
   5782 		goto send_packet;
   5783 	}
   5784 
   5785 	if (len >= WM_82547_PAD_LEN + space) {
   5786 		txq->txq_fifo_stall = 1;
   5787 		callout_schedule(&sc->sc_txfifo_ch, 1);
   5788 		return 1;
   5789 	}
   5790 
   5791  send_packet:
   5792 	txq->txq_fifo_head += len;
   5793 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   5794 		txq->txq_fifo_head -= txq->txq_fifo_size;
   5795 
   5796 	return 0;
   5797 }
   5798 
   5799 static int
   5800 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5801 {
   5802 	int error;
   5803 
   5804 	/*
   5805 	 * Allocate the control data structures, and create and load the
   5806 	 * DMA map for it.
   5807 	 *
   5808 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5809 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5810 	 * both sets within the same 4G segment.
   5811 	 */
   5812 	if (sc->sc_type < WM_T_82544)
   5813 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   5814 	else
   5815 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   5816 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5817 		txq->txq_descsize = sizeof(nq_txdesc_t);
   5818 	else
   5819 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   5820 
   5821 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   5822 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   5823 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   5824 		aprint_error_dev(sc->sc_dev,
   5825 		    "unable to allocate TX control data, error = %d\n",
   5826 		    error);
   5827 		goto fail_0;
   5828 	}
   5829 
   5830 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   5831 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   5832 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5833 		aprint_error_dev(sc->sc_dev,
   5834 		    "unable to map TX control data, error = %d\n", error);
   5835 		goto fail_1;
   5836 	}
   5837 
   5838 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   5839 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   5840 		aprint_error_dev(sc->sc_dev,
   5841 		    "unable to create TX control data DMA map, error = %d\n",
   5842 		    error);
   5843 		goto fail_2;
   5844 	}
   5845 
   5846 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   5847 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   5848 		aprint_error_dev(sc->sc_dev,
   5849 		    "unable to load TX control data DMA map, error = %d\n",
   5850 		    error);
   5851 		goto fail_3;
   5852 	}
   5853 
   5854 	return 0;
   5855 
   5856  fail_3:
   5857 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5858  fail_2:
   5859 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5860 	    WM_TXDESCS_SIZE(txq));
   5861  fail_1:
   5862 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5863  fail_0:
   5864 	return error;
   5865 }
   5866 
   5867 static void
   5868 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5869 {
   5870 
   5871 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   5872 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5873 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5874 	    WM_TXDESCS_SIZE(txq));
   5875 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5876 }
   5877 
   5878 static int
   5879 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5880 {
   5881 	int error;
   5882 	size_t rxq_descs_size;
   5883 
   5884 	/*
   5885 	 * Allocate the control data structures, and create and load the
   5886 	 * DMA map for it.
   5887 	 *
   5888 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5889 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5890 	 * both sets within the same 4G segment.
   5891 	 */
   5892 	rxq->rxq_ndesc = WM_NRXDESC;
   5893 	if (sc->sc_type == WM_T_82574)
   5894 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   5895 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5896 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   5897 	else
   5898 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   5899 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   5900 
   5901 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   5902 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   5903 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   5904 		aprint_error_dev(sc->sc_dev,
   5905 		    "unable to allocate RX control data, error = %d\n",
   5906 		    error);
   5907 		goto fail_0;
   5908 	}
   5909 
   5910 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   5911 		    rxq->rxq_desc_rseg, rxq_descs_size,
   5912 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5913 		aprint_error_dev(sc->sc_dev,
   5914 		    "unable to map RX control data, error = %d\n", error);
   5915 		goto fail_1;
   5916 	}
   5917 
   5918 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   5919 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   5920 		aprint_error_dev(sc->sc_dev,
   5921 		    "unable to create RX control data DMA map, error = %d\n",
   5922 		    error);
   5923 		goto fail_2;
   5924 	}
   5925 
   5926 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   5927 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   5928 		aprint_error_dev(sc->sc_dev,
   5929 		    "unable to load RX control data DMA map, error = %d\n",
   5930 		    error);
   5931 		goto fail_3;
   5932 	}
   5933 
   5934 	return 0;
   5935 
   5936  fail_3:
   5937 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5938  fail_2:
   5939 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   5940 	    rxq_descs_size);
   5941  fail_1:
   5942 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5943  fail_0:
   5944 	return error;
   5945 }
   5946 
   5947 static void
   5948 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5949 {
   5950 
   5951 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5952 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5953 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   5954 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   5955 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5956 }
   5957 
   5958 
   5959 static int
   5960 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5961 {
   5962 	int i, error;
   5963 
   5964 	/* Create the transmit buffer DMA maps. */
   5965 	WM_TXQUEUELEN(txq) =
   5966 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   5967 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   5968 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5969 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   5970 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   5971 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   5972 			aprint_error_dev(sc->sc_dev,
   5973 			    "unable to create Tx DMA map %d, error = %d\n",
   5974 			    i, error);
   5975 			goto fail;
   5976 		}
   5977 	}
   5978 
   5979 	return 0;
   5980 
   5981  fail:
   5982 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5983 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5984 			bus_dmamap_destroy(sc->sc_dmat,
   5985 			    txq->txq_soft[i].txs_dmamap);
   5986 	}
   5987 	return error;
   5988 }
   5989 
   5990 static void
   5991 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5992 {
   5993 	int i;
   5994 
   5995 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5996 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5997 			bus_dmamap_destroy(sc->sc_dmat,
   5998 			    txq->txq_soft[i].txs_dmamap);
   5999 	}
   6000 }
   6001 
   6002 static int
   6003 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6004 {
   6005 	int i, error;
   6006 
   6007 	/* Create the receive buffer DMA maps. */
   6008 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6009 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6010 			    MCLBYTES, 0, 0,
   6011 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6012 			aprint_error_dev(sc->sc_dev,
   6013 			    "unable to create Rx DMA map %d error = %d\n",
   6014 			    i, error);
   6015 			goto fail;
   6016 		}
   6017 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6018 	}
   6019 
   6020 	return 0;
   6021 
   6022  fail:
   6023 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6024 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6025 			bus_dmamap_destroy(sc->sc_dmat,
   6026 			    rxq->rxq_soft[i].rxs_dmamap);
   6027 	}
   6028 	return error;
   6029 }
   6030 
   6031 static void
   6032 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6033 {
   6034 	int i;
   6035 
   6036 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6037 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6038 			bus_dmamap_destroy(sc->sc_dmat,
   6039 			    rxq->rxq_soft[i].rxs_dmamap);
   6040 	}
   6041 }
   6042 
   6043 /*
   6044  * wm_alloc_quques:
   6045  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6046  */
   6047 static int
   6048 wm_alloc_txrx_queues(struct wm_softc *sc)
   6049 {
   6050 	int i, error, tx_done, rx_done;
   6051 
   6052 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6053 	    KM_SLEEP);
   6054 	if (sc->sc_queue == NULL) {
   6055 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6056 		error = ENOMEM;
   6057 		goto fail_0;
   6058 	}
   6059 
   6060 	/*
   6061 	 * For transmission
   6062 	 */
   6063 	error = 0;
   6064 	tx_done = 0;
   6065 	for (i = 0; i < sc->sc_nqueues; i++) {
   6066 #ifdef WM_EVENT_COUNTERS
   6067 		int j;
   6068 		const char *xname;
   6069 #endif
   6070 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6071 		txq->txq_sc = sc;
   6072 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6073 
   6074 		error = wm_alloc_tx_descs(sc, txq);
   6075 		if (error)
   6076 			break;
   6077 		error = wm_alloc_tx_buffer(sc, txq);
   6078 		if (error) {
   6079 			wm_free_tx_descs(sc, txq);
   6080 			break;
   6081 		}
   6082 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6083 		if (txq->txq_interq == NULL) {
   6084 			wm_free_tx_descs(sc, txq);
   6085 			wm_free_tx_buffer(sc, txq);
   6086 			error = ENOMEM;
   6087 			break;
   6088 		}
   6089 
   6090 #ifdef WM_EVENT_COUNTERS
   6091 		xname = device_xname(sc->sc_dev);
   6092 
   6093 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6094 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6095 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
   6096 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6097 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6098 
   6099 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
   6100 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
   6101 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
   6102 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
   6103 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
   6104 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
   6105 
   6106 		for (j = 0; j < WM_NTXSEGS; j++) {
   6107 			snprintf(txq->txq_txseg_evcnt_names[j],
   6108 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6109 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6110 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6111 		}
   6112 
   6113 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
   6114 
   6115 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
   6116 #endif /* WM_EVENT_COUNTERS */
   6117 
   6118 		tx_done++;
   6119 	}
   6120 	if (error)
   6121 		goto fail_1;
   6122 
   6123 	/*
   6124 	 * For recieve
   6125 	 */
   6126 	error = 0;
   6127 	rx_done = 0;
   6128 	for (i = 0; i < sc->sc_nqueues; i++) {
   6129 #ifdef WM_EVENT_COUNTERS
   6130 		const char *xname;
   6131 #endif
   6132 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6133 		rxq->rxq_sc = sc;
   6134 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6135 
   6136 		error = wm_alloc_rx_descs(sc, rxq);
   6137 		if (error)
   6138 			break;
   6139 
   6140 		error = wm_alloc_rx_buffer(sc, rxq);
   6141 		if (error) {
   6142 			wm_free_rx_descs(sc, rxq);
   6143 			break;
   6144 		}
   6145 
   6146 #ifdef WM_EVENT_COUNTERS
   6147 		xname = device_xname(sc->sc_dev);
   6148 
   6149 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
   6150 
   6151 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
   6152 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
   6153 #endif /* WM_EVENT_COUNTERS */
   6154 
   6155 		rx_done++;
   6156 	}
   6157 	if (error)
   6158 		goto fail_2;
   6159 
   6160 	return 0;
   6161 
   6162  fail_2:
   6163 	for (i = 0; i < rx_done; i++) {
   6164 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6165 		wm_free_rx_buffer(sc, rxq);
   6166 		wm_free_rx_descs(sc, rxq);
   6167 		if (rxq->rxq_lock)
   6168 			mutex_obj_free(rxq->rxq_lock);
   6169 	}
   6170  fail_1:
   6171 	for (i = 0; i < tx_done; i++) {
   6172 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6173 		pcq_destroy(txq->txq_interq);
   6174 		wm_free_tx_buffer(sc, txq);
   6175 		wm_free_tx_descs(sc, txq);
   6176 		if (txq->txq_lock)
   6177 			mutex_obj_free(txq->txq_lock);
   6178 	}
   6179 
   6180 	kmem_free(sc->sc_queue,
   6181 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6182  fail_0:
   6183 	return error;
   6184 }
   6185 
   6186 /*
   6187  * wm_free_quques:
   6188  *	Free {tx,rx}descs and {tx,rx} buffers
   6189  */
   6190 static void
   6191 wm_free_txrx_queues(struct wm_softc *sc)
   6192 {
   6193 	int i;
   6194 
   6195 	for (i = 0; i < sc->sc_nqueues; i++) {
   6196 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6197 
   6198 #ifdef WM_EVENT_COUNTERS
   6199 		WM_Q_EVCNT_DETACH(rxq, rxintr, rxq, i);
   6200 		WM_Q_EVCNT_DETACH(rxq, rxipsum, rxq, i);
   6201 		WM_Q_EVCNT_DETACH(rxq, rxtusum, rxq, i);
   6202 #endif /* WM_EVENT_COUNTERS */
   6203 
   6204 		wm_free_rx_buffer(sc, rxq);
   6205 		wm_free_rx_descs(sc, rxq);
   6206 		if (rxq->rxq_lock)
   6207 			mutex_obj_free(rxq->rxq_lock);
   6208 	}
   6209 
   6210 	for (i = 0; i < sc->sc_nqueues; i++) {
   6211 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6212 		struct mbuf *m;
   6213 #ifdef WM_EVENT_COUNTERS
   6214 		int j;
   6215 
   6216 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6217 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6218 		WM_Q_EVCNT_DETACH(txq, txfifo_stall, txq, i);
   6219 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6220 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6221 		WM_Q_EVCNT_DETACH(txq, txipsum, txq, i);
   6222 		WM_Q_EVCNT_DETACH(txq, txtusum, txq, i);
   6223 		WM_Q_EVCNT_DETACH(txq, txtusum6, txq, i);
   6224 		WM_Q_EVCNT_DETACH(txq, txtso, txq, i);
   6225 		WM_Q_EVCNT_DETACH(txq, txtso6, txq, i);
   6226 		WM_Q_EVCNT_DETACH(txq, txtsopain, txq, i);
   6227 
   6228 		for (j = 0; j < WM_NTXSEGS; j++)
   6229 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6230 
   6231 		WM_Q_EVCNT_DETACH(txq, txdrop, txq, i);
   6232 		WM_Q_EVCNT_DETACH(txq, tu, txq, i);
   6233 #endif /* WM_EVENT_COUNTERS */
   6234 
   6235 		/* drain txq_interq */
   6236 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6237 			m_freem(m);
   6238 		pcq_destroy(txq->txq_interq);
   6239 
   6240 		wm_free_tx_buffer(sc, txq);
   6241 		wm_free_tx_descs(sc, txq);
   6242 		if (txq->txq_lock)
   6243 			mutex_obj_free(txq->txq_lock);
   6244 	}
   6245 
   6246 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6247 }
   6248 
   6249 static void
   6250 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6251 {
   6252 
   6253 	KASSERT(mutex_owned(txq->txq_lock));
   6254 
   6255 	/* Initialize the transmit descriptor ring. */
   6256 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6257 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6258 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6259 	txq->txq_free = WM_NTXDESC(txq);
   6260 	txq->txq_next = 0;
   6261 }
   6262 
   6263 static void
   6264 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6265     struct wm_txqueue *txq)
   6266 {
   6267 
   6268 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6269 		device_xname(sc->sc_dev), __func__));
   6270 	KASSERT(mutex_owned(txq->txq_lock));
   6271 
   6272 	if (sc->sc_type < WM_T_82543) {
   6273 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6274 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6275 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6276 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6277 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6278 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6279 	} else {
   6280 		int qid = wmq->wmq_id;
   6281 
   6282 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6283 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6284 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6285 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6286 
   6287 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6288 			/*
   6289 			 * Don't write TDT before TCTL.EN is set.
   6290 			 * See the document.
   6291 			 */
   6292 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6293 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6294 			    | TXDCTL_WTHRESH(0));
   6295 		else {
   6296 			/* XXX should update with AIM? */
   6297 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6298 			if (sc->sc_type >= WM_T_82540) {
   6299 				/* should be same */
   6300 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6301 			}
   6302 
   6303 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6304 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6305 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6306 		}
   6307 	}
   6308 }
   6309 
   6310 static void
   6311 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6312 {
   6313 	int i;
   6314 
   6315 	KASSERT(mutex_owned(txq->txq_lock));
   6316 
   6317 	/* Initialize the transmit job descriptors. */
   6318 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6319 		txq->txq_soft[i].txs_mbuf = NULL;
   6320 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6321 	txq->txq_snext = 0;
   6322 	txq->txq_sdirty = 0;
   6323 }
   6324 
   6325 static void
   6326 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6327     struct wm_txqueue *txq)
   6328 {
   6329 
   6330 	KASSERT(mutex_owned(txq->txq_lock));
   6331 
   6332 	/*
   6333 	 * Set up some register offsets that are different between
   6334 	 * the i82542 and the i82543 and later chips.
   6335 	 */
   6336 	if (sc->sc_type < WM_T_82543)
   6337 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   6338 	else
   6339 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   6340 
   6341 	wm_init_tx_descs(sc, txq);
   6342 	wm_init_tx_regs(sc, wmq, txq);
   6343 	wm_init_tx_buffer(sc, txq);
   6344 }
   6345 
   6346 static void
   6347 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6348     struct wm_rxqueue *rxq)
   6349 {
   6350 
   6351 	KASSERT(mutex_owned(rxq->rxq_lock));
   6352 
   6353 	/*
   6354 	 * Initialize the receive descriptor and receive job
   6355 	 * descriptor rings.
   6356 	 */
   6357 	if (sc->sc_type < WM_T_82543) {
   6358 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   6359 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   6360 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   6361 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6362 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   6363 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   6364 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   6365 
   6366 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   6367 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   6368 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   6369 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   6370 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   6371 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   6372 	} else {
   6373 		int qid = wmq->wmq_id;
   6374 
   6375 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   6376 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   6377 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_descsize * rxq->rxq_ndesc);
   6378 
   6379 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6380 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   6381 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   6382 
   6383 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   6384 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   6385 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   6386 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   6387 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   6388 			    | RXDCTL_WTHRESH(1));
   6389 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6390 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6391 		} else {
   6392 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6393 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6394 			/* XXX should update with AIM? */
   6395 			CSR_WRITE(sc, WMREG_RDTR, (wmq->wmq_itr / 4) | RDTR_FPD);
   6396 			/* MUST be same */
   6397 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   6398 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   6399 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   6400 		}
   6401 	}
   6402 }
   6403 
   6404 static int
   6405 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6406 {
   6407 	struct wm_rxsoft *rxs;
   6408 	int error, i;
   6409 
   6410 	KASSERT(mutex_owned(rxq->rxq_lock));
   6411 
   6412 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6413 		rxs = &rxq->rxq_soft[i];
   6414 		if (rxs->rxs_mbuf == NULL) {
   6415 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   6416 				log(LOG_ERR, "%s: unable to allocate or map "
   6417 				    "rx buffer %d, error = %d\n",
   6418 				    device_xname(sc->sc_dev), i, error);
   6419 				/*
   6420 				 * XXX Should attempt to run with fewer receive
   6421 				 * XXX buffers instead of just failing.
   6422 				 */
   6423 				wm_rxdrain(rxq);
   6424 				return ENOMEM;
   6425 			}
   6426 		} else {
   6427 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   6428 				wm_init_rxdesc(rxq, i);
   6429 			/*
   6430 			 * For 82575 and newer device, the RX descriptors
   6431 			 * must be initialized after the setting of RCTL.EN in
   6432 			 * wm_set_filter()
   6433 			 */
   6434 		}
   6435 	}
   6436 	rxq->rxq_ptr = 0;
   6437 	rxq->rxq_discard = 0;
   6438 	WM_RXCHAIN_RESET(rxq);
   6439 
   6440 	return 0;
   6441 }
   6442 
   6443 static int
   6444 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6445     struct wm_rxqueue *rxq)
   6446 {
   6447 
   6448 	KASSERT(mutex_owned(rxq->rxq_lock));
   6449 
   6450 	/*
   6451 	 * Set up some register offsets that are different between
   6452 	 * the i82542 and the i82543 and later chips.
   6453 	 */
   6454 	if (sc->sc_type < WM_T_82543)
   6455 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   6456 	else
   6457 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   6458 
   6459 	wm_init_rx_regs(sc, wmq, rxq);
   6460 	return wm_init_rx_buffer(sc, rxq);
   6461 }
   6462 
   6463 /*
   6464  * wm_init_quques:
   6465  *	Initialize {tx,rx}descs and {tx,rx} buffers
   6466  */
   6467 static int
   6468 wm_init_txrx_queues(struct wm_softc *sc)
   6469 {
   6470 	int i, error = 0;
   6471 
   6472 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6473 		device_xname(sc->sc_dev), __func__));
   6474 
   6475 	for (i = 0; i < sc->sc_nqueues; i++) {
   6476 		struct wm_queue *wmq = &sc->sc_queue[i];
   6477 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6478 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6479 
   6480 		/*
   6481 		 * TODO
   6482 		 * Currently, use constant variable instead of AIM.
   6483 		 * Furthermore, the interrupt interval of multiqueue which use
   6484 		 * polling mode is less than default value.
   6485 		 * More tuning and AIM are required.
   6486 		 */
   6487 		if (wm_is_using_multiqueue(sc))
   6488 			wmq->wmq_itr = 50;
   6489 		else
   6490 			wmq->wmq_itr = sc->sc_itr_init;
   6491 		wmq->wmq_set_itr = true;
   6492 
   6493 		mutex_enter(txq->txq_lock);
   6494 		wm_init_tx_queue(sc, wmq, txq);
   6495 		mutex_exit(txq->txq_lock);
   6496 
   6497 		mutex_enter(rxq->rxq_lock);
   6498 		error = wm_init_rx_queue(sc, wmq, rxq);
   6499 		mutex_exit(rxq->rxq_lock);
   6500 		if (error)
   6501 			break;
   6502 	}
   6503 
   6504 	return error;
   6505 }
   6506 
   6507 /*
   6508  * wm_tx_offload:
   6509  *
   6510  *	Set up TCP/IP checksumming parameters for the
   6511  *	specified packet.
   6512  */
   6513 static int
   6514 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   6515     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   6516 {
   6517 	struct mbuf *m0 = txs->txs_mbuf;
   6518 	struct livengood_tcpip_ctxdesc *t;
   6519 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   6520 	uint32_t ipcse;
   6521 	struct ether_header *eh;
   6522 	int offset, iphl;
   6523 	uint8_t fields;
   6524 
   6525 	/*
   6526 	 * XXX It would be nice if the mbuf pkthdr had offset
   6527 	 * fields for the protocol headers.
   6528 	 */
   6529 
   6530 	eh = mtod(m0, struct ether_header *);
   6531 	switch (htons(eh->ether_type)) {
   6532 	case ETHERTYPE_IP:
   6533 	case ETHERTYPE_IPV6:
   6534 		offset = ETHER_HDR_LEN;
   6535 		break;
   6536 
   6537 	case ETHERTYPE_VLAN:
   6538 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6539 		break;
   6540 
   6541 	default:
   6542 		/*
   6543 		 * Don't support this protocol or encapsulation.
   6544 		 */
   6545 		*fieldsp = 0;
   6546 		*cmdp = 0;
   6547 		return 0;
   6548 	}
   6549 
   6550 	if ((m0->m_pkthdr.csum_flags &
   6551 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6552 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6553 	} else {
   6554 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6555 	}
   6556 	ipcse = offset + iphl - 1;
   6557 
   6558 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   6559 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   6560 	seg = 0;
   6561 	fields = 0;
   6562 
   6563 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6564 		int hlen = offset + iphl;
   6565 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6566 
   6567 		if (__predict_false(m0->m_len <
   6568 				    (hlen + sizeof(struct tcphdr)))) {
   6569 			/*
   6570 			 * TCP/IP headers are not in the first mbuf; we need
   6571 			 * to do this the slow and painful way.  Let's just
   6572 			 * hope this doesn't happen very often.
   6573 			 */
   6574 			struct tcphdr th;
   6575 
   6576 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6577 
   6578 			m_copydata(m0, hlen, sizeof(th), &th);
   6579 			if (v4) {
   6580 				struct ip ip;
   6581 
   6582 				m_copydata(m0, offset, sizeof(ip), &ip);
   6583 				ip.ip_len = 0;
   6584 				m_copyback(m0,
   6585 				    offset + offsetof(struct ip, ip_len),
   6586 				    sizeof(ip.ip_len), &ip.ip_len);
   6587 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6588 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6589 			} else {
   6590 				struct ip6_hdr ip6;
   6591 
   6592 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6593 				ip6.ip6_plen = 0;
   6594 				m_copyback(m0,
   6595 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6596 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6597 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6598 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6599 			}
   6600 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6601 			    sizeof(th.th_sum), &th.th_sum);
   6602 
   6603 			hlen += th.th_off << 2;
   6604 		} else {
   6605 			/*
   6606 			 * TCP/IP headers are in the first mbuf; we can do
   6607 			 * this the easy way.
   6608 			 */
   6609 			struct tcphdr *th;
   6610 
   6611 			if (v4) {
   6612 				struct ip *ip =
   6613 				    (void *)(mtod(m0, char *) + offset);
   6614 				th = (void *)(mtod(m0, char *) + hlen);
   6615 
   6616 				ip->ip_len = 0;
   6617 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6618 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6619 			} else {
   6620 				struct ip6_hdr *ip6 =
   6621 				    (void *)(mtod(m0, char *) + offset);
   6622 				th = (void *)(mtod(m0, char *) + hlen);
   6623 
   6624 				ip6->ip6_plen = 0;
   6625 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6626 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6627 			}
   6628 			hlen += th->th_off << 2;
   6629 		}
   6630 
   6631 		if (v4) {
   6632 			WM_Q_EVCNT_INCR(txq, txtso);
   6633 			cmdlen |= WTX_TCPIP_CMD_IP;
   6634 		} else {
   6635 			WM_Q_EVCNT_INCR(txq, txtso6);
   6636 			ipcse = 0;
   6637 		}
   6638 		cmd |= WTX_TCPIP_CMD_TSE;
   6639 		cmdlen |= WTX_TCPIP_CMD_TSE |
   6640 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   6641 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   6642 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   6643 	}
   6644 
   6645 	/*
   6646 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   6647 	 * offload feature, if we load the context descriptor, we
   6648 	 * MUST provide valid values for IPCSS and TUCSS fields.
   6649 	 */
   6650 
   6651 	ipcs = WTX_TCPIP_IPCSS(offset) |
   6652 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   6653 	    WTX_TCPIP_IPCSE(ipcse);
   6654 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   6655 		WM_Q_EVCNT_INCR(txq, txipsum);
   6656 		fields |= WTX_IXSM;
   6657 	}
   6658 
   6659 	offset += iphl;
   6660 
   6661 	if (m0->m_pkthdr.csum_flags &
   6662 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   6663 		WM_Q_EVCNT_INCR(txq, txtusum);
   6664 		fields |= WTX_TXSM;
   6665 		tucs = WTX_TCPIP_TUCSS(offset) |
   6666 		    WTX_TCPIP_TUCSO(offset +
   6667 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   6668 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6669 	} else if ((m0->m_pkthdr.csum_flags &
   6670 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   6671 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6672 		fields |= WTX_TXSM;
   6673 		tucs = WTX_TCPIP_TUCSS(offset) |
   6674 		    WTX_TCPIP_TUCSO(offset +
   6675 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   6676 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6677 	} else {
   6678 		/* Just initialize it to a valid TCP context. */
   6679 		tucs = WTX_TCPIP_TUCSS(offset) |
   6680 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   6681 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6682 	}
   6683 
   6684 	/*
   6685 	 * We don't have to write context descriptor for every packet
   6686 	 * except for 82574. For 82574, we must write context descriptor
   6687 	 * for every packet when we use two descriptor queues.
   6688 	 * It would be overhead to write context descriptor for every packet,
   6689 	 * however it does not cause problems.
   6690 	 */
   6691 	/* Fill in the context descriptor. */
   6692 	t = (struct livengood_tcpip_ctxdesc *)
   6693 	    &txq->txq_descs[txq->txq_next];
   6694 	t->tcpip_ipcs = htole32(ipcs);
   6695 	t->tcpip_tucs = htole32(tucs);
   6696 	t->tcpip_cmdlen = htole32(cmdlen);
   6697 	t->tcpip_seg = htole32(seg);
   6698 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6699 
   6700 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6701 	txs->txs_ndesc++;
   6702 
   6703 	*cmdp = cmd;
   6704 	*fieldsp = fields;
   6705 
   6706 	return 0;
   6707 }
   6708 
   6709 static inline int
   6710 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   6711 {
   6712 	struct wm_softc *sc = ifp->if_softc;
   6713 	u_int cpuid = cpu_index(curcpu());
   6714 
   6715 	/*
   6716 	 * Currently, simple distribute strategy.
   6717 	 * TODO:
   6718 	 * distribute by flowid(RSS has value).
   6719 	 */
   6720         return (cpuid + ncpu - sc->sc_affinity_offset) % sc->sc_nqueues;
   6721 }
   6722 
   6723 /*
   6724  * wm_start:		[ifnet interface function]
   6725  *
   6726  *	Start packet transmission on the interface.
   6727  */
   6728 static void
   6729 wm_start(struct ifnet *ifp)
   6730 {
   6731 	struct wm_softc *sc = ifp->if_softc;
   6732 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6733 
   6734 #ifdef WM_MPSAFE
   6735 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6736 #endif
   6737 	/*
   6738 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   6739 	 */
   6740 
   6741 	mutex_enter(txq->txq_lock);
   6742 	if (!txq->txq_stopping)
   6743 		wm_start_locked(ifp);
   6744 	mutex_exit(txq->txq_lock);
   6745 }
   6746 
   6747 static void
   6748 wm_start_locked(struct ifnet *ifp)
   6749 {
   6750 	struct wm_softc *sc = ifp->if_softc;
   6751 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6752 
   6753 	wm_send_common_locked(ifp, txq, false);
   6754 }
   6755 
   6756 static int
   6757 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   6758 {
   6759 	int qid;
   6760 	struct wm_softc *sc = ifp->if_softc;
   6761 	struct wm_txqueue *txq;
   6762 
   6763 	qid = wm_select_txqueue(ifp, m);
   6764 	txq = &sc->sc_queue[qid].wmq_txq;
   6765 
   6766 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   6767 		m_freem(m);
   6768 		WM_Q_EVCNT_INCR(txq, txdrop);
   6769 		return ENOBUFS;
   6770 	}
   6771 
   6772 	/*
   6773 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   6774 	 */
   6775 	ifp->if_obytes += m->m_pkthdr.len;
   6776 	if (m->m_flags & M_MCAST)
   6777 		ifp->if_omcasts++;
   6778 
   6779 	if (mutex_tryenter(txq->txq_lock)) {
   6780 		if (!txq->txq_stopping)
   6781 			wm_transmit_locked(ifp, txq);
   6782 		mutex_exit(txq->txq_lock);
   6783 	}
   6784 
   6785 	return 0;
   6786 }
   6787 
   6788 static void
   6789 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   6790 {
   6791 
   6792 	wm_send_common_locked(ifp, txq, true);
   6793 }
   6794 
   6795 static void
   6796 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   6797     bool is_transmit)
   6798 {
   6799 	struct wm_softc *sc = ifp->if_softc;
   6800 	struct mbuf *m0;
   6801 	struct m_tag *mtag;
   6802 	struct wm_txsoft *txs;
   6803 	bus_dmamap_t dmamap;
   6804 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   6805 	bus_addr_t curaddr;
   6806 	bus_size_t seglen, curlen;
   6807 	uint32_t cksumcmd;
   6808 	uint8_t cksumfields;
   6809 
   6810 	KASSERT(mutex_owned(txq->txq_lock));
   6811 
   6812 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   6813 		return;
   6814 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   6815 		return;
   6816 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   6817 		return;
   6818 
   6819 	/* Remember the previous number of free descriptors. */
   6820 	ofree = txq->txq_free;
   6821 
   6822 	/*
   6823 	 * Loop through the send queue, setting up transmit descriptors
   6824 	 * until we drain the queue, or use up all available transmit
   6825 	 * descriptors.
   6826 	 */
   6827 	for (;;) {
   6828 		m0 = NULL;
   6829 
   6830 		/* Get a work queue entry. */
   6831 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6832 			wm_txeof(sc, txq);
   6833 			if (txq->txq_sfree == 0) {
   6834 				DPRINTF(WM_DEBUG_TX,
   6835 				    ("%s: TX: no free job descriptors\n",
   6836 					device_xname(sc->sc_dev)));
   6837 				WM_Q_EVCNT_INCR(txq, txsstall);
   6838 				break;
   6839 			}
   6840 		}
   6841 
   6842 		/* Grab a packet off the queue. */
   6843 		if (is_transmit)
   6844 			m0 = pcq_get(txq->txq_interq);
   6845 		else
   6846 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   6847 		if (m0 == NULL)
   6848 			break;
   6849 
   6850 		DPRINTF(WM_DEBUG_TX,
   6851 		    ("%s: TX: have packet to transmit: %p\n",
   6852 		    device_xname(sc->sc_dev), m0));
   6853 
   6854 		txs = &txq->txq_soft[txq->txq_snext];
   6855 		dmamap = txs->txs_dmamap;
   6856 
   6857 		use_tso = (m0->m_pkthdr.csum_flags &
   6858 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   6859 
   6860 		/*
   6861 		 * So says the Linux driver:
   6862 		 * The controller does a simple calculation to make sure
   6863 		 * there is enough room in the FIFO before initiating the
   6864 		 * DMA for each buffer.  The calc is:
   6865 		 *	4 = ceil(buffer len / MSS)
   6866 		 * To make sure we don't overrun the FIFO, adjust the max
   6867 		 * buffer len if the MSS drops.
   6868 		 */
   6869 		dmamap->dm_maxsegsz =
   6870 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   6871 		    ? m0->m_pkthdr.segsz << 2
   6872 		    : WTX_MAX_LEN;
   6873 
   6874 		/*
   6875 		 * Load the DMA map.  If this fails, the packet either
   6876 		 * didn't fit in the allotted number of segments, or we
   6877 		 * were short on resources.  For the too-many-segments
   6878 		 * case, we simply report an error and drop the packet,
   6879 		 * since we can't sanely copy a jumbo packet to a single
   6880 		 * buffer.
   6881 		 */
   6882 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6883 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   6884 		if (error) {
   6885 			if (error == EFBIG) {
   6886 				WM_Q_EVCNT_INCR(txq, txdrop);
   6887 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6888 				    "DMA segments, dropping...\n",
   6889 				    device_xname(sc->sc_dev));
   6890 				wm_dump_mbuf_chain(sc, m0);
   6891 				m_freem(m0);
   6892 				continue;
   6893 			}
   6894 			/*  Short on resources, just stop for now. */
   6895 			DPRINTF(WM_DEBUG_TX,
   6896 			    ("%s: TX: dmamap load failed: %d\n",
   6897 			    device_xname(sc->sc_dev), error));
   6898 			break;
   6899 		}
   6900 
   6901 		segs_needed = dmamap->dm_nsegs;
   6902 		if (use_tso) {
   6903 			/* For sentinel descriptor; see below. */
   6904 			segs_needed++;
   6905 		}
   6906 
   6907 		/*
   6908 		 * Ensure we have enough descriptors free to describe
   6909 		 * the packet.  Note, we always reserve one descriptor
   6910 		 * at the end of the ring due to the semantics of the
   6911 		 * TDT register, plus one more in the event we need
   6912 		 * to load offload context.
   6913 		 */
   6914 		if (segs_needed > txq->txq_free - 2) {
   6915 			/*
   6916 			 * Not enough free descriptors to transmit this
   6917 			 * packet.  We haven't committed anything yet,
   6918 			 * so just unload the DMA map, put the packet
   6919 			 * pack on the queue, and punt.  Notify the upper
   6920 			 * layer that there are no more slots left.
   6921 			 */
   6922 			DPRINTF(WM_DEBUG_TX,
   6923 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6924 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6925 			    segs_needed, txq->txq_free - 1));
   6926 			if (!is_transmit)
   6927 				ifp->if_flags |= IFF_OACTIVE;
   6928 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   6929 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6930 			WM_Q_EVCNT_INCR(txq, txdstall);
   6931 			break;
   6932 		}
   6933 
   6934 		/*
   6935 		 * Check for 82547 Tx FIFO bug.  We need to do this
   6936 		 * once we know we can transmit the packet, since we
   6937 		 * do some internal FIFO space accounting here.
   6938 		 */
   6939 		if (sc->sc_type == WM_T_82547 &&
   6940 		    wm_82547_txfifo_bugchk(sc, m0)) {
   6941 			DPRINTF(WM_DEBUG_TX,
   6942 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   6943 			    device_xname(sc->sc_dev)));
   6944 			if (!is_transmit)
   6945 				ifp->if_flags |= IFF_OACTIVE;
   6946 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   6947 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6948 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
   6949 			break;
   6950 		}
   6951 
   6952 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6953 
   6954 		DPRINTF(WM_DEBUG_TX,
   6955 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6956 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6957 
   6958 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   6959 
   6960 		/*
   6961 		 * Store a pointer to the packet so that we can free it
   6962 		 * later.
   6963 		 *
   6964 		 * Initially, we consider the number of descriptors the
   6965 		 * packet uses the number of DMA segments.  This may be
   6966 		 * incremented by 1 if we do checksum offload (a descriptor
   6967 		 * is used to set the checksum context).
   6968 		 */
   6969 		txs->txs_mbuf = m0;
   6970 		txs->txs_firstdesc = txq->txq_next;
   6971 		txs->txs_ndesc = segs_needed;
   6972 
   6973 		/* Set up offload parameters for this packet. */
   6974 		if (m0->m_pkthdr.csum_flags &
   6975 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   6976 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   6977 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   6978 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   6979 					  &cksumfields) != 0) {
   6980 				/* Error message already displayed. */
   6981 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   6982 				continue;
   6983 			}
   6984 		} else {
   6985 			cksumcmd = 0;
   6986 			cksumfields = 0;
   6987 		}
   6988 
   6989 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   6990 
   6991 		/* Sync the DMA map. */
   6992 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   6993 		    BUS_DMASYNC_PREWRITE);
   6994 
   6995 		/* Initialize the transmit descriptor. */
   6996 		for (nexttx = txq->txq_next, seg = 0;
   6997 		     seg < dmamap->dm_nsegs; seg++) {
   6998 			for (seglen = dmamap->dm_segs[seg].ds_len,
   6999 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7000 			     seglen != 0;
   7001 			     curaddr += curlen, seglen -= curlen,
   7002 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7003 				curlen = seglen;
   7004 
   7005 				/*
   7006 				 * So says the Linux driver:
   7007 				 * Work around for premature descriptor
   7008 				 * write-backs in TSO mode.  Append a
   7009 				 * 4-byte sentinel descriptor.
   7010 				 */
   7011 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7012 				    curlen > 8)
   7013 					curlen -= 4;
   7014 
   7015 				wm_set_dma_addr(
   7016 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7017 				txq->txq_descs[nexttx].wtx_cmdlen
   7018 				    = htole32(cksumcmd | curlen);
   7019 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7020 				    = 0;
   7021 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7022 				    = cksumfields;
   7023 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7024 				lasttx = nexttx;
   7025 
   7026 				DPRINTF(WM_DEBUG_TX,
   7027 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7028 				     "len %#04zx\n",
   7029 				    device_xname(sc->sc_dev), nexttx,
   7030 				    (uint64_t)curaddr, curlen));
   7031 			}
   7032 		}
   7033 
   7034 		KASSERT(lasttx != -1);
   7035 
   7036 		/*
   7037 		 * Set up the command byte on the last descriptor of
   7038 		 * the packet.  If we're in the interrupt delay window,
   7039 		 * delay the interrupt.
   7040 		 */
   7041 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7042 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7043 
   7044 		/*
   7045 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7046 		 * up the descriptor to encapsulate the packet for us.
   7047 		 *
   7048 		 * This is only valid on the last descriptor of the packet.
   7049 		 */
   7050 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   7051 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7052 			    htole32(WTX_CMD_VLE);
   7053 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7054 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   7055 		}
   7056 
   7057 		txs->txs_lastdesc = lasttx;
   7058 
   7059 		DPRINTF(WM_DEBUG_TX,
   7060 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7061 		    device_xname(sc->sc_dev),
   7062 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7063 
   7064 		/* Sync the descriptors we're using. */
   7065 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7066 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7067 
   7068 		/* Give the packet to the chip. */
   7069 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7070 
   7071 		DPRINTF(WM_DEBUG_TX,
   7072 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7073 
   7074 		DPRINTF(WM_DEBUG_TX,
   7075 		    ("%s: TX: finished transmitting packet, job %d\n",
   7076 		    device_xname(sc->sc_dev), txq->txq_snext));
   7077 
   7078 		/* Advance the tx pointer. */
   7079 		txq->txq_free -= txs->txs_ndesc;
   7080 		txq->txq_next = nexttx;
   7081 
   7082 		txq->txq_sfree--;
   7083 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7084 
   7085 		/* Pass the packet to any BPF listeners. */
   7086 		bpf_mtap(ifp, m0);
   7087 	}
   7088 
   7089 	if (m0 != NULL) {
   7090 		if (!is_transmit)
   7091 			ifp->if_flags |= IFF_OACTIVE;
   7092 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7093 		WM_Q_EVCNT_INCR(txq, txdrop);
   7094 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7095 			__func__));
   7096 		m_freem(m0);
   7097 	}
   7098 
   7099 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7100 		/* No more slots; notify upper layer. */
   7101 		if (!is_transmit)
   7102 			ifp->if_flags |= IFF_OACTIVE;
   7103 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7104 	}
   7105 
   7106 	if (txq->txq_free != ofree) {
   7107 		/* Set a watchdog timer in case the chip flakes out. */
   7108 		ifp->if_timer = 5;
   7109 	}
   7110 }
   7111 
   7112 /*
   7113  * wm_nq_tx_offload:
   7114  *
   7115  *	Set up TCP/IP checksumming parameters for the
   7116  *	specified packet, for NEWQUEUE devices
   7117  */
   7118 static int
   7119 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7120     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7121 {
   7122 	struct mbuf *m0 = txs->txs_mbuf;
   7123 	struct m_tag *mtag;
   7124 	uint32_t vl_len, mssidx, cmdc;
   7125 	struct ether_header *eh;
   7126 	int offset, iphl;
   7127 
   7128 	/*
   7129 	 * XXX It would be nice if the mbuf pkthdr had offset
   7130 	 * fields for the protocol headers.
   7131 	 */
   7132 	*cmdlenp = 0;
   7133 	*fieldsp = 0;
   7134 
   7135 	eh = mtod(m0, struct ether_header *);
   7136 	switch (htons(eh->ether_type)) {
   7137 	case ETHERTYPE_IP:
   7138 	case ETHERTYPE_IPV6:
   7139 		offset = ETHER_HDR_LEN;
   7140 		break;
   7141 
   7142 	case ETHERTYPE_VLAN:
   7143 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7144 		break;
   7145 
   7146 	default:
   7147 		/* Don't support this protocol or encapsulation. */
   7148 		*do_csum = false;
   7149 		return 0;
   7150 	}
   7151 	*do_csum = true;
   7152 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7153 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7154 
   7155 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7156 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7157 
   7158 	if ((m0->m_pkthdr.csum_flags &
   7159 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7160 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7161 	} else {
   7162 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   7163 	}
   7164 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7165 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7166 
   7167 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   7168 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   7169 		     << NQTXC_VLLEN_VLAN_SHIFT);
   7170 		*cmdlenp |= NQTX_CMD_VLE;
   7171 	}
   7172 
   7173 	mssidx = 0;
   7174 
   7175 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7176 		int hlen = offset + iphl;
   7177 		int tcp_hlen;
   7178 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7179 
   7180 		if (__predict_false(m0->m_len <
   7181 				    (hlen + sizeof(struct tcphdr)))) {
   7182 			/*
   7183 			 * TCP/IP headers are not in the first mbuf; we need
   7184 			 * to do this the slow and painful way.  Let's just
   7185 			 * hope this doesn't happen very often.
   7186 			 */
   7187 			struct tcphdr th;
   7188 
   7189 			WM_Q_EVCNT_INCR(txq, txtsopain);
   7190 
   7191 			m_copydata(m0, hlen, sizeof(th), &th);
   7192 			if (v4) {
   7193 				struct ip ip;
   7194 
   7195 				m_copydata(m0, offset, sizeof(ip), &ip);
   7196 				ip.ip_len = 0;
   7197 				m_copyback(m0,
   7198 				    offset + offsetof(struct ip, ip_len),
   7199 				    sizeof(ip.ip_len), &ip.ip_len);
   7200 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7201 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7202 			} else {
   7203 				struct ip6_hdr ip6;
   7204 
   7205 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7206 				ip6.ip6_plen = 0;
   7207 				m_copyback(m0,
   7208 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7209 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7210 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7211 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7212 			}
   7213 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7214 			    sizeof(th.th_sum), &th.th_sum);
   7215 
   7216 			tcp_hlen = th.th_off << 2;
   7217 		} else {
   7218 			/*
   7219 			 * TCP/IP headers are in the first mbuf; we can do
   7220 			 * this the easy way.
   7221 			 */
   7222 			struct tcphdr *th;
   7223 
   7224 			if (v4) {
   7225 				struct ip *ip =
   7226 				    (void *)(mtod(m0, char *) + offset);
   7227 				th = (void *)(mtod(m0, char *) + hlen);
   7228 
   7229 				ip->ip_len = 0;
   7230 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7231 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7232 			} else {
   7233 				struct ip6_hdr *ip6 =
   7234 				    (void *)(mtod(m0, char *) + offset);
   7235 				th = (void *)(mtod(m0, char *) + hlen);
   7236 
   7237 				ip6->ip6_plen = 0;
   7238 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7239 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7240 			}
   7241 			tcp_hlen = th->th_off << 2;
   7242 		}
   7243 		hlen += tcp_hlen;
   7244 		*cmdlenp |= NQTX_CMD_TSE;
   7245 
   7246 		if (v4) {
   7247 			WM_Q_EVCNT_INCR(txq, txtso);
   7248 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7249 		} else {
   7250 			WM_Q_EVCNT_INCR(txq, txtso6);
   7251 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7252 		}
   7253 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7254 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7255 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7256 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7257 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7258 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7259 	} else {
   7260 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7261 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7262 	}
   7263 
   7264 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7265 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7266 		cmdc |= NQTXC_CMD_IP4;
   7267 	}
   7268 
   7269 	if (m0->m_pkthdr.csum_flags &
   7270 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7271 		WM_Q_EVCNT_INCR(txq, txtusum);
   7272 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7273 			cmdc |= NQTXC_CMD_TCP;
   7274 		} else {
   7275 			cmdc |= NQTXC_CMD_UDP;
   7276 		}
   7277 		cmdc |= NQTXC_CMD_IP4;
   7278 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7279 	}
   7280 	if (m0->m_pkthdr.csum_flags &
   7281 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7282 		WM_Q_EVCNT_INCR(txq, txtusum6);
   7283 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7284 			cmdc |= NQTXC_CMD_TCP;
   7285 		} else {
   7286 			cmdc |= NQTXC_CMD_UDP;
   7287 		}
   7288 		cmdc |= NQTXC_CMD_IP6;
   7289 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7290 	}
   7291 
   7292 	/*
   7293 	 * We don't have to write context descriptor for every packet to
   7294 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   7295 	 * I210 and I211. It is enough to write once per a Tx queue for these
   7296 	 * controllers.
   7297 	 * It would be overhead to write context descriptor for every packet,
   7298 	 * however it does not cause problems.
   7299 	 */
   7300 	/* Fill in the context descriptor. */
   7301 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   7302 	    htole32(vl_len);
   7303 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   7304 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   7305 	    htole32(cmdc);
   7306 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   7307 	    htole32(mssidx);
   7308 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7309 	DPRINTF(WM_DEBUG_TX,
   7310 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   7311 	    txq->txq_next, 0, vl_len));
   7312 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   7313 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7314 	txs->txs_ndesc++;
   7315 	return 0;
   7316 }
   7317 
   7318 /*
   7319  * wm_nq_start:		[ifnet interface function]
   7320  *
   7321  *	Start packet transmission on the interface for NEWQUEUE devices
   7322  */
   7323 static void
   7324 wm_nq_start(struct ifnet *ifp)
   7325 {
   7326 	struct wm_softc *sc = ifp->if_softc;
   7327 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7328 
   7329 #ifdef WM_MPSAFE
   7330 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   7331 #endif
   7332 	/*
   7333 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7334 	 */
   7335 
   7336 	mutex_enter(txq->txq_lock);
   7337 	if (!txq->txq_stopping)
   7338 		wm_nq_start_locked(ifp);
   7339 	mutex_exit(txq->txq_lock);
   7340 }
   7341 
   7342 static void
   7343 wm_nq_start_locked(struct ifnet *ifp)
   7344 {
   7345 	struct wm_softc *sc = ifp->if_softc;
   7346 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7347 
   7348 	wm_nq_send_common_locked(ifp, txq, false);
   7349 }
   7350 
   7351 static int
   7352 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   7353 {
   7354 	int qid;
   7355 	struct wm_softc *sc = ifp->if_softc;
   7356 	struct wm_txqueue *txq;
   7357 
   7358 	qid = wm_select_txqueue(ifp, m);
   7359 	txq = &sc->sc_queue[qid].wmq_txq;
   7360 
   7361 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7362 		m_freem(m);
   7363 		WM_Q_EVCNT_INCR(txq, txdrop);
   7364 		return ENOBUFS;
   7365 	}
   7366 
   7367 	/*
   7368 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7369 	 */
   7370 	ifp->if_obytes += m->m_pkthdr.len;
   7371 	if (m->m_flags & M_MCAST)
   7372 		ifp->if_omcasts++;
   7373 
   7374 	/*
   7375 	 * The situations which this mutex_tryenter() fails at running time
   7376 	 * are below two patterns.
   7377 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   7378 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   7379 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   7380 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   7381 	 * In the case of (2), the last packet enqueued to txq->txq_interq is also
   7382 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck, either.
   7383 	 */
   7384 	if (mutex_tryenter(txq->txq_lock)) {
   7385 		if (!txq->txq_stopping)
   7386 			wm_nq_transmit_locked(ifp, txq);
   7387 		mutex_exit(txq->txq_lock);
   7388 	}
   7389 
   7390 	return 0;
   7391 }
   7392 
   7393 static void
   7394 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7395 {
   7396 
   7397 	wm_nq_send_common_locked(ifp, txq, true);
   7398 }
   7399 
   7400 static void
   7401 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7402     bool is_transmit)
   7403 {
   7404 	struct wm_softc *sc = ifp->if_softc;
   7405 	struct mbuf *m0;
   7406 	struct m_tag *mtag;
   7407 	struct wm_txsoft *txs;
   7408 	bus_dmamap_t dmamap;
   7409 	int error, nexttx, lasttx = -1, seg, segs_needed;
   7410 	bool do_csum, sent;
   7411 
   7412 	KASSERT(mutex_owned(txq->txq_lock));
   7413 
   7414 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7415 		return;
   7416 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7417 		return;
   7418 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7419 		return;
   7420 
   7421 	sent = false;
   7422 
   7423 	/*
   7424 	 * Loop through the send queue, setting up transmit descriptors
   7425 	 * until we drain the queue, or use up all available transmit
   7426 	 * descriptors.
   7427 	 */
   7428 	for (;;) {
   7429 		m0 = NULL;
   7430 
   7431 		/* Get a work queue entry. */
   7432 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7433 			wm_txeof(sc, txq);
   7434 			if (txq->txq_sfree == 0) {
   7435 				DPRINTF(WM_DEBUG_TX,
   7436 				    ("%s: TX: no free job descriptors\n",
   7437 					device_xname(sc->sc_dev)));
   7438 				WM_Q_EVCNT_INCR(txq, txsstall);
   7439 				break;
   7440 			}
   7441 		}
   7442 
   7443 		/* Grab a packet off the queue. */
   7444 		if (is_transmit)
   7445 			m0 = pcq_get(txq->txq_interq);
   7446 		else
   7447 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7448 		if (m0 == NULL)
   7449 			break;
   7450 
   7451 		DPRINTF(WM_DEBUG_TX,
   7452 		    ("%s: TX: have packet to transmit: %p\n",
   7453 		    device_xname(sc->sc_dev), m0));
   7454 
   7455 		txs = &txq->txq_soft[txq->txq_snext];
   7456 		dmamap = txs->txs_dmamap;
   7457 
   7458 		/*
   7459 		 * Load the DMA map.  If this fails, the packet either
   7460 		 * didn't fit in the allotted number of segments, or we
   7461 		 * were short on resources.  For the too-many-segments
   7462 		 * case, we simply report an error and drop the packet,
   7463 		 * since we can't sanely copy a jumbo packet to a single
   7464 		 * buffer.
   7465 		 */
   7466 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7467 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7468 		if (error) {
   7469 			if (error == EFBIG) {
   7470 				WM_Q_EVCNT_INCR(txq, txdrop);
   7471 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7472 				    "DMA segments, dropping...\n",
   7473 				    device_xname(sc->sc_dev));
   7474 				wm_dump_mbuf_chain(sc, m0);
   7475 				m_freem(m0);
   7476 				continue;
   7477 			}
   7478 			/* Short on resources, just stop for now. */
   7479 			DPRINTF(WM_DEBUG_TX,
   7480 			    ("%s: TX: dmamap load failed: %d\n",
   7481 			    device_xname(sc->sc_dev), error));
   7482 			break;
   7483 		}
   7484 
   7485 		segs_needed = dmamap->dm_nsegs;
   7486 
   7487 		/*
   7488 		 * Ensure we have enough descriptors free to describe
   7489 		 * the packet.  Note, we always reserve one descriptor
   7490 		 * at the end of the ring due to the semantics of the
   7491 		 * TDT register, plus one more in the event we need
   7492 		 * to load offload context.
   7493 		 */
   7494 		if (segs_needed > txq->txq_free - 2) {
   7495 			/*
   7496 			 * Not enough free descriptors to transmit this
   7497 			 * packet.  We haven't committed anything yet,
   7498 			 * so just unload the DMA map, put the packet
   7499 			 * pack on the queue, and punt.  Notify the upper
   7500 			 * layer that there are no more slots left.
   7501 			 */
   7502 			DPRINTF(WM_DEBUG_TX,
   7503 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7504 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7505 			    segs_needed, txq->txq_free - 1));
   7506 			if (!is_transmit)
   7507 				ifp->if_flags |= IFF_OACTIVE;
   7508 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7509 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7510 			WM_Q_EVCNT_INCR(txq, txdstall);
   7511 			break;
   7512 		}
   7513 
   7514 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7515 
   7516 		DPRINTF(WM_DEBUG_TX,
   7517 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7518 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7519 
   7520 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7521 
   7522 		/*
   7523 		 * Store a pointer to the packet so that we can free it
   7524 		 * later.
   7525 		 *
   7526 		 * Initially, we consider the number of descriptors the
   7527 		 * packet uses the number of DMA segments.  This may be
   7528 		 * incremented by 1 if we do checksum offload (a descriptor
   7529 		 * is used to set the checksum context).
   7530 		 */
   7531 		txs->txs_mbuf = m0;
   7532 		txs->txs_firstdesc = txq->txq_next;
   7533 		txs->txs_ndesc = segs_needed;
   7534 
   7535 		/* Set up offload parameters for this packet. */
   7536 		uint32_t cmdlen, fields, dcmdlen;
   7537 		if (m0->m_pkthdr.csum_flags &
   7538 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7539 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7540 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7541 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   7542 			    &do_csum) != 0) {
   7543 				/* Error message already displayed. */
   7544 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7545 				continue;
   7546 			}
   7547 		} else {
   7548 			do_csum = false;
   7549 			cmdlen = 0;
   7550 			fields = 0;
   7551 		}
   7552 
   7553 		/* Sync the DMA map. */
   7554 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7555 		    BUS_DMASYNC_PREWRITE);
   7556 
   7557 		/* Initialize the first transmit descriptor. */
   7558 		nexttx = txq->txq_next;
   7559 		if (!do_csum) {
   7560 			/* setup a legacy descriptor */
   7561 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   7562 			    dmamap->dm_segs[0].ds_addr);
   7563 			txq->txq_descs[nexttx].wtx_cmdlen =
   7564 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   7565 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   7566 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   7567 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   7568 			    NULL) {
   7569 				txq->txq_descs[nexttx].wtx_cmdlen |=
   7570 				    htole32(WTX_CMD_VLE);
   7571 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   7572 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   7573 			} else {
   7574 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7575 			}
   7576 			dcmdlen = 0;
   7577 		} else {
   7578 			/* setup an advanced data descriptor */
   7579 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7580 			    htole64(dmamap->dm_segs[0].ds_addr);
   7581 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   7582 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7583 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   7584 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   7585 			    htole32(fields);
   7586 			DPRINTF(WM_DEBUG_TX,
   7587 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   7588 			    device_xname(sc->sc_dev), nexttx,
   7589 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   7590 			DPRINTF(WM_DEBUG_TX,
   7591 			    ("\t 0x%08x%08x\n", fields,
   7592 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   7593 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   7594 		}
   7595 
   7596 		lasttx = nexttx;
   7597 		nexttx = WM_NEXTTX(txq, nexttx);
   7598 		/*
   7599 		 * fill in the next descriptors. legacy or adcanced format
   7600 		 * is the same here
   7601 		 */
   7602 		for (seg = 1; seg < dmamap->dm_nsegs;
   7603 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   7604 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7605 			    htole64(dmamap->dm_segs[seg].ds_addr);
   7606 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7607 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   7608 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   7609 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   7610 			lasttx = nexttx;
   7611 
   7612 			DPRINTF(WM_DEBUG_TX,
   7613 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   7614 			     "len %#04zx\n",
   7615 			    device_xname(sc->sc_dev), nexttx,
   7616 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   7617 			    dmamap->dm_segs[seg].ds_len));
   7618 		}
   7619 
   7620 		KASSERT(lasttx != -1);
   7621 
   7622 		/*
   7623 		 * Set up the command byte on the last descriptor of
   7624 		 * the packet.  If we're in the interrupt delay window,
   7625 		 * delay the interrupt.
   7626 		 */
   7627 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   7628 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   7629 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7630 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7631 
   7632 		txs->txs_lastdesc = lasttx;
   7633 
   7634 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7635 		    device_xname(sc->sc_dev),
   7636 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7637 
   7638 		/* Sync the descriptors we're using. */
   7639 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7640 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7641 
   7642 		/* Give the packet to the chip. */
   7643 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7644 		sent = true;
   7645 
   7646 		DPRINTF(WM_DEBUG_TX,
   7647 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7648 
   7649 		DPRINTF(WM_DEBUG_TX,
   7650 		    ("%s: TX: finished transmitting packet, job %d\n",
   7651 		    device_xname(sc->sc_dev), txq->txq_snext));
   7652 
   7653 		/* Advance the tx pointer. */
   7654 		txq->txq_free -= txs->txs_ndesc;
   7655 		txq->txq_next = nexttx;
   7656 
   7657 		txq->txq_sfree--;
   7658 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7659 
   7660 		/* Pass the packet to any BPF listeners. */
   7661 		bpf_mtap(ifp, m0);
   7662 	}
   7663 
   7664 	if (m0 != NULL) {
   7665 		if (!is_transmit)
   7666 			ifp->if_flags |= IFF_OACTIVE;
   7667 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7668 		WM_Q_EVCNT_INCR(txq, txdrop);
   7669 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7670 			__func__));
   7671 		m_freem(m0);
   7672 	}
   7673 
   7674 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7675 		/* No more slots; notify upper layer. */
   7676 		if (!is_transmit)
   7677 			ifp->if_flags |= IFF_OACTIVE;
   7678 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7679 	}
   7680 
   7681 	if (sent) {
   7682 		/* Set a watchdog timer in case the chip flakes out. */
   7683 		ifp->if_timer = 5;
   7684 	}
   7685 }
   7686 
   7687 static void
   7688 wm_deferred_start_locked(struct wm_txqueue *txq)
   7689 {
   7690 	struct wm_softc *sc = txq->txq_sc;
   7691 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7692 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   7693 	int qid = wmq->wmq_id;
   7694 
   7695 	KASSERT(mutex_owned(txq->txq_lock));
   7696 
   7697 	if (txq->txq_stopping) {
   7698 		mutex_exit(txq->txq_lock);
   7699 		return;
   7700 	}
   7701 
   7702 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7703 		/* XXX need for ALTQ or one CPU system */
   7704 		if (qid == 0)
   7705 			wm_nq_start_locked(ifp);
   7706 		wm_nq_transmit_locked(ifp, txq);
   7707 	} else {
   7708 		/* XXX need for ALTQ or one CPU system */
   7709 		if (qid == 0)
   7710 			wm_start_locked(ifp);
   7711 		wm_transmit_locked(ifp, txq);
   7712 	}
   7713 }
   7714 
   7715 /* Interrupt */
   7716 
   7717 /*
   7718  * wm_txeof:
   7719  *
   7720  *	Helper; handle transmit interrupts.
   7721  */
   7722 static int
   7723 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
   7724 {
   7725 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7726 	struct wm_txsoft *txs;
   7727 	bool processed = false;
   7728 	int count = 0;
   7729 	int i;
   7730 	uint8_t status;
   7731 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   7732 
   7733 	KASSERT(mutex_owned(txq->txq_lock));
   7734 
   7735 	if (txq->txq_stopping)
   7736 		return 0;
   7737 
   7738 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   7739 	/* for ALTQ and legacy(not use multiqueue) ethernet controller */
   7740 	if (wmq->wmq_id == 0)
   7741 		ifp->if_flags &= ~IFF_OACTIVE;
   7742 
   7743 	/*
   7744 	 * Go through the Tx list and free mbufs for those
   7745 	 * frames which have been transmitted.
   7746 	 */
   7747 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   7748 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   7749 		txs = &txq->txq_soft[i];
   7750 
   7751 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   7752 			device_xname(sc->sc_dev), i));
   7753 
   7754 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   7755 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   7756 
   7757 		status =
   7758 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   7759 		if ((status & WTX_ST_DD) == 0) {
   7760 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   7761 			    BUS_DMASYNC_PREREAD);
   7762 			break;
   7763 		}
   7764 
   7765 		processed = true;
   7766 		count++;
   7767 		DPRINTF(WM_DEBUG_TX,
   7768 		    ("%s: TX: job %d done: descs %d..%d\n",
   7769 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   7770 		    txs->txs_lastdesc));
   7771 
   7772 		/*
   7773 		 * XXX We should probably be using the statistics
   7774 		 * XXX registers, but I don't know if they exist
   7775 		 * XXX on chips before the i82544.
   7776 		 */
   7777 
   7778 #ifdef WM_EVENT_COUNTERS
   7779 		if (status & WTX_ST_TU)
   7780 			WM_Q_EVCNT_INCR(txq, tu);
   7781 #endif /* WM_EVENT_COUNTERS */
   7782 
   7783 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   7784 			ifp->if_oerrors++;
   7785 			if (status & WTX_ST_LC)
   7786 				log(LOG_WARNING, "%s: late collision\n",
   7787 				    device_xname(sc->sc_dev));
   7788 			else if (status & WTX_ST_EC) {
   7789 				ifp->if_collisions += 16;
   7790 				log(LOG_WARNING, "%s: excessive collisions\n",
   7791 				    device_xname(sc->sc_dev));
   7792 			}
   7793 		} else
   7794 			ifp->if_opackets++;
   7795 
   7796 		txq->txq_packets++;
   7797 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   7798 
   7799 		txq->txq_free += txs->txs_ndesc;
   7800 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   7801 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   7802 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   7803 		m_freem(txs->txs_mbuf);
   7804 		txs->txs_mbuf = NULL;
   7805 	}
   7806 
   7807 	/* Update the dirty transmit buffer pointer. */
   7808 	txq->txq_sdirty = i;
   7809 	DPRINTF(WM_DEBUG_TX,
   7810 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   7811 
   7812 	if (count != 0)
   7813 		rnd_add_uint32(&sc->rnd_source, count);
   7814 
   7815 	/*
   7816 	 * If there are no more pending transmissions, cancel the watchdog
   7817 	 * timer.
   7818 	 */
   7819 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   7820 		ifp->if_timer = 0;
   7821 
   7822 	return processed;
   7823 }
   7824 
   7825 static inline uint32_t
   7826 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   7827 {
   7828 	struct wm_softc *sc = rxq->rxq_sc;
   7829 
   7830 	if (sc->sc_type == WM_T_82574)
   7831 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   7832 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7833 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   7834 	else
   7835 		return rxq->rxq_descs[idx].wrx_status;
   7836 }
   7837 
   7838 static inline uint32_t
   7839 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   7840 {
   7841 	struct wm_softc *sc = rxq->rxq_sc;
   7842 
   7843 	if (sc->sc_type == WM_T_82574)
   7844 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   7845 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7846 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   7847 	else
   7848 		return rxq->rxq_descs[idx].wrx_errors;
   7849 }
   7850 
   7851 static inline uint16_t
   7852 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   7853 {
   7854 	struct wm_softc *sc = rxq->rxq_sc;
   7855 
   7856 	if (sc->sc_type == WM_T_82574)
   7857 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   7858 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7859 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   7860 	else
   7861 		return rxq->rxq_descs[idx].wrx_special;
   7862 }
   7863 
   7864 static inline int
   7865 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   7866 {
   7867 	struct wm_softc *sc = rxq->rxq_sc;
   7868 
   7869 	if (sc->sc_type == WM_T_82574)
   7870 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   7871 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7872 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   7873 	else
   7874 		return rxq->rxq_descs[idx].wrx_len;
   7875 }
   7876 
   7877 #ifdef WM_DEBUG
   7878 static inline uint32_t
   7879 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   7880 {
   7881 	struct wm_softc *sc = rxq->rxq_sc;
   7882 
   7883 	if (sc->sc_type == WM_T_82574)
   7884 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   7885 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7886 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   7887 	else
   7888 		return 0;
   7889 }
   7890 
   7891 static inline uint8_t
   7892 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   7893 {
   7894 	struct wm_softc *sc = rxq->rxq_sc;
   7895 
   7896 	if (sc->sc_type == WM_T_82574)
   7897 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   7898 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7899 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   7900 	else
   7901 		return 0;
   7902 }
   7903 #endif /* WM_DEBUG */
   7904 
   7905 static inline bool
   7906 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   7907     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   7908 {
   7909 
   7910 	if (sc->sc_type == WM_T_82574)
   7911 		return (status & ext_bit) != 0;
   7912 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7913 		return (status & nq_bit) != 0;
   7914 	else
   7915 		return (status & legacy_bit) != 0;
   7916 }
   7917 
   7918 static inline bool
   7919 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   7920     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   7921 {
   7922 
   7923 	if (sc->sc_type == WM_T_82574)
   7924 		return (error & ext_bit) != 0;
   7925 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7926 		return (error & nq_bit) != 0;
   7927 	else
   7928 		return (error & legacy_bit) != 0;
   7929 }
   7930 
   7931 static inline bool
   7932 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   7933 {
   7934 
   7935 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   7936 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   7937 		return true;
   7938 	else
   7939 		return false;
   7940 }
   7941 
   7942 static inline bool
   7943 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   7944 {
   7945 	struct wm_softc *sc = rxq->rxq_sc;
   7946 
   7947 	/* XXXX missing error bit for newqueue? */
   7948 	if (wm_rxdesc_is_set_error(sc, errors,
   7949 		WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE,
   7950 		EXTRXC_ERROR_CE|EXTRXC_ERROR_SE|EXTRXC_ERROR_SEQ|EXTRXC_ERROR_CXE|EXTRXC_ERROR_RXE,
   7951 		NQRXC_ERROR_RXE)) {
   7952 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE, EXTRXC_ERROR_SE, 0))
   7953 			log(LOG_WARNING, "%s: symbol error\n",
   7954 			    device_xname(sc->sc_dev));
   7955 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ, EXTRXC_ERROR_SEQ, 0))
   7956 			log(LOG_WARNING, "%s: receive sequence error\n",
   7957 			    device_xname(sc->sc_dev));
   7958 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE, EXTRXC_ERROR_CE, 0))
   7959 			log(LOG_WARNING, "%s: CRC error\n",
   7960 			    device_xname(sc->sc_dev));
   7961 		return true;
   7962 	}
   7963 
   7964 	return false;
   7965 }
   7966 
   7967 static inline bool
   7968 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   7969 {
   7970 	struct wm_softc *sc = rxq->rxq_sc;
   7971 
   7972 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   7973 		NQRXC_STATUS_DD)) {
   7974 		/* We have processed all of the receive descriptors. */
   7975 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   7976 		return false;
   7977 	}
   7978 
   7979 	return true;
   7980 }
   7981 
   7982 static inline bool
   7983 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status, uint16_t vlantag,
   7984     struct mbuf *m)
   7985 {
   7986 	struct ifnet *ifp = &rxq->rxq_sc->sc_ethercom.ec_if;
   7987 
   7988 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   7989 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   7990 		VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), return false);
   7991 	}
   7992 
   7993 	return true;
   7994 }
   7995 
   7996 static inline void
   7997 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   7998     uint32_t errors, struct mbuf *m)
   7999 {
   8000 	struct wm_softc *sc = rxq->rxq_sc;
   8001 
   8002 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8003 		if (wm_rxdesc_is_set_status(sc, status,
   8004 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8005 			WM_Q_EVCNT_INCR(rxq, rxipsum);
   8006 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8007 			if (wm_rxdesc_is_set_error(sc, errors,
   8008 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8009 				m->m_pkthdr.csum_flags |=
   8010 					M_CSUM_IPv4_BAD;
   8011 		}
   8012 		if (wm_rxdesc_is_set_status(sc, status,
   8013 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8014 			/*
   8015 			 * Note: we don't know if this was TCP or UDP,
   8016 			 * so we just set both bits, and expect the
   8017 			 * upper layers to deal.
   8018 			 */
   8019 			WM_Q_EVCNT_INCR(rxq, rxtusum);
   8020 			m->m_pkthdr.csum_flags |=
   8021 				M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8022 				M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8023 			if (wm_rxdesc_is_set_error(sc, errors,
   8024 				WRX_ER_TCPE, EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8025 				m->m_pkthdr.csum_flags |=
   8026 					M_CSUM_TCP_UDP_BAD;
   8027 		}
   8028 	}
   8029 }
   8030 
   8031 /*
   8032  * wm_rxeof:
   8033  *
   8034  *	Helper; handle receive interrupts.
   8035  */
   8036 static void
   8037 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8038 {
   8039 	struct wm_softc *sc = rxq->rxq_sc;
   8040 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8041 	struct wm_rxsoft *rxs;
   8042 	struct mbuf *m;
   8043 	int i, len;
   8044 	int count = 0;
   8045 	uint32_t status, errors;
   8046 	uint16_t vlantag;
   8047 
   8048 	KASSERT(mutex_owned(rxq->rxq_lock));
   8049 
   8050 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8051 		if (limit-- == 0) {
   8052 			rxq->rxq_ptr = i;
   8053 			break;
   8054 		}
   8055 
   8056 		rxs = &rxq->rxq_soft[i];
   8057 
   8058 		DPRINTF(WM_DEBUG_RX,
   8059 		    ("%s: RX: checking descriptor %d\n",
   8060 		    device_xname(sc->sc_dev), i));
   8061 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   8062 
   8063 		status = wm_rxdesc_get_status(rxq, i);
   8064 		errors = wm_rxdesc_get_errors(rxq, i);
   8065 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8066 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8067 #ifdef WM_DEBUG
   8068 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8069 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8070 #endif
   8071 
   8072 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8073 			/*
   8074 			 * Update the receive pointer holding rxq_lock
   8075 			 * consistent with increment counter.
   8076 			 */
   8077 			rxq->rxq_ptr = i;
   8078 			break;
   8079 		}
   8080 
   8081 		count++;
   8082 		if (__predict_false(rxq->rxq_discard)) {
   8083 			DPRINTF(WM_DEBUG_RX,
   8084 			    ("%s: RX: discarding contents of descriptor %d\n",
   8085 			    device_xname(sc->sc_dev), i));
   8086 			wm_init_rxdesc(rxq, i);
   8087 			if (wm_rxdesc_is_eop(rxq, status)) {
   8088 				/* Reset our state. */
   8089 				DPRINTF(WM_DEBUG_RX,
   8090 				    ("%s: RX: resetting rxdiscard -> 0\n",
   8091 				    device_xname(sc->sc_dev)));
   8092 				rxq->rxq_discard = 0;
   8093 			}
   8094 			continue;
   8095 		}
   8096 
   8097 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8098 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   8099 
   8100 		m = rxs->rxs_mbuf;
   8101 
   8102 		/*
   8103 		 * Add a new receive buffer to the ring, unless of
   8104 		 * course the length is zero. Treat the latter as a
   8105 		 * failed mapping.
   8106 		 */
   8107 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   8108 			/*
   8109 			 * Failed, throw away what we've done so
   8110 			 * far, and discard the rest of the packet.
   8111 			 */
   8112 			ifp->if_ierrors++;
   8113 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8114 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   8115 			wm_init_rxdesc(rxq, i);
   8116 			if (!wm_rxdesc_is_eop(rxq, status))
   8117 				rxq->rxq_discard = 1;
   8118 			if (rxq->rxq_head != NULL)
   8119 				m_freem(rxq->rxq_head);
   8120 			WM_RXCHAIN_RESET(rxq);
   8121 			DPRINTF(WM_DEBUG_RX,
   8122 			    ("%s: RX: Rx buffer allocation failed, "
   8123 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8124 			    rxq->rxq_discard ? " (discard)" : ""));
   8125 			continue;
   8126 		}
   8127 
   8128 		m->m_len = len;
   8129 		rxq->rxq_len += len;
   8130 		DPRINTF(WM_DEBUG_RX,
   8131 		    ("%s: RX: buffer at %p len %d\n",
   8132 		    device_xname(sc->sc_dev), m->m_data, len));
   8133 
   8134 		/* If this is not the end of the packet, keep looking. */
   8135 		if (!wm_rxdesc_is_eop(rxq, status)) {
   8136 			WM_RXCHAIN_LINK(rxq, m);
   8137 			DPRINTF(WM_DEBUG_RX,
   8138 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   8139 			    device_xname(sc->sc_dev), rxq->rxq_len));
   8140 			continue;
   8141 		}
   8142 
   8143 		/*
   8144 		 * Okay, we have the entire packet now.  The chip is
   8145 		 * configured to include the FCS except I350 and I21[01]
   8146 		 * (not all chips can be configured to strip it),
   8147 		 * so we need to trim it.
   8148 		 * May need to adjust length of previous mbuf in the
   8149 		 * chain if the current mbuf is too short.
   8150 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   8151 		 * is always set in I350, so we don't trim it.
   8152 		 */
   8153 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   8154 		    && (sc->sc_type != WM_T_I210)
   8155 		    && (sc->sc_type != WM_T_I211)) {
   8156 			if (m->m_len < ETHER_CRC_LEN) {
   8157 				rxq->rxq_tail->m_len
   8158 				    -= (ETHER_CRC_LEN - m->m_len);
   8159 				m->m_len = 0;
   8160 			} else
   8161 				m->m_len -= ETHER_CRC_LEN;
   8162 			len = rxq->rxq_len - ETHER_CRC_LEN;
   8163 		} else
   8164 			len = rxq->rxq_len;
   8165 
   8166 		WM_RXCHAIN_LINK(rxq, m);
   8167 
   8168 		*rxq->rxq_tailp = NULL;
   8169 		m = rxq->rxq_head;
   8170 
   8171 		WM_RXCHAIN_RESET(rxq);
   8172 
   8173 		DPRINTF(WM_DEBUG_RX,
   8174 		    ("%s: RX: have entire packet, len -> %d\n",
   8175 		    device_xname(sc->sc_dev), len));
   8176 
   8177 		/* If an error occurred, update stats and drop the packet. */
   8178 		if (wm_rxdesc_has_errors(rxq, errors)) {
   8179 			m_freem(m);
   8180 			continue;
   8181 		}
   8182 
   8183 		/* No errors.  Receive the packet. */
   8184 		m_set_rcvif(m, ifp);
   8185 		m->m_pkthdr.len = len;
   8186 		/*
   8187 		 * TODO
   8188 		 * should be save rsshash and rsstype to this mbuf.
   8189 		 */
   8190 		DPRINTF(WM_DEBUG_RX,
   8191 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   8192 			device_xname(sc->sc_dev), rsstype, rsshash));
   8193 
   8194 		/*
   8195 		 * If VLANs are enabled, VLAN packets have been unwrapped
   8196 		 * for us.  Associate the tag with the packet.
   8197 		 */
   8198 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   8199 			continue;
   8200 
   8201 		/* Set up checksum info for this packet. */
   8202 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   8203 		/*
   8204 		 * Update the receive pointer holding rxq_lock consistent with
   8205 		 * increment counter.
   8206 		 */
   8207 		rxq->rxq_ptr = i;
   8208 		rxq->rxq_packets++;
   8209 		rxq->rxq_bytes += len;
   8210 		mutex_exit(rxq->rxq_lock);
   8211 
   8212 		/* Pass it on. */
   8213 		if_percpuq_enqueue(sc->sc_ipq, m);
   8214 
   8215 		mutex_enter(rxq->rxq_lock);
   8216 
   8217 		if (rxq->rxq_stopping)
   8218 			break;
   8219 	}
   8220 
   8221 	if (count != 0)
   8222 		rnd_add_uint32(&sc->rnd_source, count);
   8223 
   8224 	DPRINTF(WM_DEBUG_RX,
   8225 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8226 }
   8227 
   8228 /*
   8229  * wm_linkintr_gmii:
   8230  *
   8231  *	Helper; handle link interrupts for GMII.
   8232  */
   8233 static void
   8234 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8235 {
   8236 
   8237 	KASSERT(WM_CORE_LOCKED(sc));
   8238 
   8239 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8240 		__func__));
   8241 
   8242 	if (icr & ICR_LSC) {
   8243 		uint32_t reg;
   8244 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   8245 
   8246 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   8247 			wm_gig_downshift_workaround_ich8lan(sc);
   8248 
   8249 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   8250 			device_xname(sc->sc_dev)));
   8251 		mii_pollstat(&sc->sc_mii);
   8252 		if (sc->sc_type == WM_T_82543) {
   8253 			int miistatus, active;
   8254 
   8255 			/*
   8256 			 * With 82543, we need to force speed and
   8257 			 * duplex on the MAC equal to what the PHY
   8258 			 * speed and duplex configuration is.
   8259 			 */
   8260 			miistatus = sc->sc_mii.mii_media_status;
   8261 
   8262 			if (miistatus & IFM_ACTIVE) {
   8263 				active = sc->sc_mii.mii_media_active;
   8264 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8265 				switch (IFM_SUBTYPE(active)) {
   8266 				case IFM_10_T:
   8267 					sc->sc_ctrl |= CTRL_SPEED_10;
   8268 					break;
   8269 				case IFM_100_TX:
   8270 					sc->sc_ctrl |= CTRL_SPEED_100;
   8271 					break;
   8272 				case IFM_1000_T:
   8273 					sc->sc_ctrl |= CTRL_SPEED_1000;
   8274 					break;
   8275 				default:
   8276 					/*
   8277 					 * fiber?
   8278 					 * Shoud not enter here.
   8279 					 */
   8280 					printf("unknown media (%x)\n", active);
   8281 					break;
   8282 				}
   8283 				if (active & IFM_FDX)
   8284 					sc->sc_ctrl |= CTRL_FD;
   8285 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8286 			}
   8287 		} else if ((sc->sc_type == WM_T_ICH8)
   8288 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   8289 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   8290 		} else if (sc->sc_type == WM_T_PCH) {
   8291 			wm_k1_gig_workaround_hv(sc,
   8292 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   8293 		}
   8294 
   8295 		if ((sc->sc_phytype == WMPHY_82578)
   8296 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   8297 			== IFM_1000_T)) {
   8298 
   8299 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   8300 				delay(200*1000); /* XXX too big */
   8301 
   8302 				/* Link stall fix for link up */
   8303 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8304 				    HV_MUX_DATA_CTRL,
   8305 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   8306 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   8307 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8308 				    HV_MUX_DATA_CTRL,
   8309 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   8310 			}
   8311 		}
   8312 		/*
   8313 		 * I217 Packet Loss issue:
   8314 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
   8315 		 * on power up.
   8316 		 * Set the Beacon Duration for I217 to 8 usec
   8317 		 */
   8318 		if ((sc->sc_type == WM_T_PCH_LPT)
   8319 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8320 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   8321 			reg &= ~FEXTNVM4_BEACON_DURATION;
   8322 			reg |= FEXTNVM4_BEACON_DURATION_8US;
   8323 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   8324 		}
   8325 
   8326 		/* XXX Work-around I218 hang issue */
   8327 		/* e1000_k1_workaround_lpt_lp() */
   8328 
   8329 		if ((sc->sc_type == WM_T_PCH_LPT)
   8330 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8331 			/*
   8332 			 * Set platform power management values for Latency
   8333 			 * Tolerance Reporting (LTR)
   8334 			 */
   8335 			wm_platform_pm_pch_lpt(sc,
   8336 				((sc->sc_mii.mii_media_status & IFM_ACTIVE)
   8337 				    != 0));
   8338 		}
   8339 
   8340 		/* FEXTNVM6 K1-off workaround */
   8341 		if (sc->sc_type == WM_T_PCH_SPT) {
   8342 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
   8343 			if (CSR_READ(sc, WMREG_PCIEANACFG)
   8344 			    & FEXTNVM6_K1_OFF_ENABLE)
   8345 				reg |= FEXTNVM6_K1_OFF_ENABLE;
   8346 			else
   8347 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   8348 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   8349 		}
   8350 	} else if (icr & ICR_RXSEQ) {
   8351 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   8352 			device_xname(sc->sc_dev)));
   8353 	}
   8354 }
   8355 
   8356 /*
   8357  * wm_linkintr_tbi:
   8358  *
   8359  *	Helper; handle link interrupts for TBI mode.
   8360  */
   8361 static void
   8362 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   8363 {
   8364 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8365 	uint32_t status;
   8366 
   8367 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8368 		__func__));
   8369 
   8370 	status = CSR_READ(sc, WMREG_STATUS);
   8371 	if (icr & ICR_LSC) {
   8372 		if (status & STATUS_LU) {
   8373 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8374 			    device_xname(sc->sc_dev),
   8375 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   8376 			/*
   8377 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   8378 			 * so we should update sc->sc_ctrl
   8379 			 */
   8380 
   8381 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   8382 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8383 			sc->sc_fcrtl &= ~FCRTL_XONE;
   8384 			if (status & STATUS_FD)
   8385 				sc->sc_tctl |=
   8386 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8387 			else
   8388 				sc->sc_tctl |=
   8389 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8390 			if (sc->sc_ctrl & CTRL_TFCE)
   8391 				sc->sc_fcrtl |= FCRTL_XONE;
   8392 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8393 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   8394 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   8395 				      sc->sc_fcrtl);
   8396 			sc->sc_tbi_linkup = 1;
   8397 			if_link_state_change(ifp, LINK_STATE_UP);
   8398 		} else {
   8399 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8400 			    device_xname(sc->sc_dev)));
   8401 			sc->sc_tbi_linkup = 0;
   8402 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8403 		}
   8404 		/* Update LED */
   8405 		wm_tbi_serdes_set_linkled(sc);
   8406 	} else if (icr & ICR_RXSEQ) {
   8407 		DPRINTF(WM_DEBUG_LINK,
   8408 		    ("%s: LINK: Receive sequence error\n",
   8409 		    device_xname(sc->sc_dev)));
   8410 	}
   8411 }
   8412 
   8413 /*
   8414  * wm_linkintr_serdes:
   8415  *
   8416  *	Helper; handle link interrupts for TBI mode.
   8417  */
   8418 static void
   8419 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   8420 {
   8421 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8422 	struct mii_data *mii = &sc->sc_mii;
   8423 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8424 	uint32_t pcs_adv, pcs_lpab, reg;
   8425 
   8426 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8427 		__func__));
   8428 
   8429 	if (icr & ICR_LSC) {
   8430 		/* Check PCS */
   8431 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8432 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   8433 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   8434 				device_xname(sc->sc_dev)));
   8435 			mii->mii_media_status |= IFM_ACTIVE;
   8436 			sc->sc_tbi_linkup = 1;
   8437 			if_link_state_change(ifp, LINK_STATE_UP);
   8438 		} else {
   8439 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8440 				device_xname(sc->sc_dev)));
   8441 			mii->mii_media_status |= IFM_NONE;
   8442 			sc->sc_tbi_linkup = 0;
   8443 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8444 			wm_tbi_serdes_set_linkled(sc);
   8445 			return;
   8446 		}
   8447 		mii->mii_media_active |= IFM_1000_SX;
   8448 		if ((reg & PCS_LSTS_FDX) != 0)
   8449 			mii->mii_media_active |= IFM_FDX;
   8450 		else
   8451 			mii->mii_media_active |= IFM_HDX;
   8452 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   8453 			/* Check flow */
   8454 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8455 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   8456 				DPRINTF(WM_DEBUG_LINK,
   8457 				    ("XXX LINKOK but not ACOMP\n"));
   8458 				return;
   8459 			}
   8460 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   8461 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   8462 			DPRINTF(WM_DEBUG_LINK,
   8463 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   8464 			if ((pcs_adv & TXCW_SYM_PAUSE)
   8465 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   8466 				mii->mii_media_active |= IFM_FLOW
   8467 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   8468 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   8469 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8470 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   8471 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8472 				mii->mii_media_active |= IFM_FLOW
   8473 				    | IFM_ETH_TXPAUSE;
   8474 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   8475 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8476 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   8477 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8478 				mii->mii_media_active |= IFM_FLOW
   8479 				    | IFM_ETH_RXPAUSE;
   8480 		}
   8481 		/* Update LED */
   8482 		wm_tbi_serdes_set_linkled(sc);
   8483 	} else {
   8484 		DPRINTF(WM_DEBUG_LINK,
   8485 		    ("%s: LINK: Receive sequence error\n",
   8486 		    device_xname(sc->sc_dev)));
   8487 	}
   8488 }
   8489 
   8490 /*
   8491  * wm_linkintr:
   8492  *
   8493  *	Helper; handle link interrupts.
   8494  */
   8495 static void
   8496 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   8497 {
   8498 
   8499 	KASSERT(WM_CORE_LOCKED(sc));
   8500 
   8501 	if (sc->sc_flags & WM_F_HAS_MII)
   8502 		wm_linkintr_gmii(sc, icr);
   8503 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   8504 	    && (sc->sc_type >= WM_T_82575))
   8505 		wm_linkintr_serdes(sc, icr);
   8506 	else
   8507 		wm_linkintr_tbi(sc, icr);
   8508 }
   8509 
   8510 /*
   8511  * wm_intr_legacy:
   8512  *
   8513  *	Interrupt service routine for INTx and MSI.
   8514  */
   8515 static int
   8516 wm_intr_legacy(void *arg)
   8517 {
   8518 	struct wm_softc *sc = arg;
   8519 	struct wm_queue *wmq = &sc->sc_queue[0];
   8520 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8521 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8522 	uint32_t icr, rndval = 0;
   8523 	int handled = 0;
   8524 
   8525 	while (1 /* CONSTCOND */) {
   8526 		icr = CSR_READ(sc, WMREG_ICR);
   8527 		if ((icr & sc->sc_icr) == 0)
   8528 			break;
   8529 		if (handled == 0) {
   8530 			DPRINTF(WM_DEBUG_TX,
   8531 			    ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
   8532 		}
   8533 		if (rndval == 0)
   8534 			rndval = icr;
   8535 
   8536 		mutex_enter(rxq->rxq_lock);
   8537 
   8538 		if (rxq->rxq_stopping) {
   8539 			mutex_exit(rxq->rxq_lock);
   8540 			break;
   8541 		}
   8542 
   8543 		handled = 1;
   8544 
   8545 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8546 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   8547 			DPRINTF(WM_DEBUG_RX,
   8548 			    ("%s: RX: got Rx intr 0x%08x\n",
   8549 			    device_xname(sc->sc_dev),
   8550 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
   8551 			WM_Q_EVCNT_INCR(rxq, rxintr);
   8552 		}
   8553 #endif
   8554 		wm_rxeof(rxq, UINT_MAX);
   8555 
   8556 		mutex_exit(rxq->rxq_lock);
   8557 		mutex_enter(txq->txq_lock);
   8558 
   8559 		if (txq->txq_stopping) {
   8560 			mutex_exit(txq->txq_lock);
   8561 			break;
   8562 		}
   8563 
   8564 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8565 		if (icr & ICR_TXDW) {
   8566 			DPRINTF(WM_DEBUG_TX,
   8567 			    ("%s: TX: got TXDW interrupt\n",
   8568 			    device_xname(sc->sc_dev)));
   8569 			WM_Q_EVCNT_INCR(txq, txdw);
   8570 		}
   8571 #endif
   8572 		wm_txeof(sc, txq);
   8573 
   8574 		mutex_exit(txq->txq_lock);
   8575 		WM_CORE_LOCK(sc);
   8576 
   8577 		if (sc->sc_core_stopping) {
   8578 			WM_CORE_UNLOCK(sc);
   8579 			break;
   8580 		}
   8581 
   8582 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   8583 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8584 			wm_linkintr(sc, icr);
   8585 		}
   8586 
   8587 		WM_CORE_UNLOCK(sc);
   8588 
   8589 		if (icr & ICR_RXO) {
   8590 #if defined(WM_DEBUG)
   8591 			log(LOG_WARNING, "%s: Receive overrun\n",
   8592 			    device_xname(sc->sc_dev));
   8593 #endif /* defined(WM_DEBUG) */
   8594 		}
   8595 	}
   8596 
   8597 	rnd_add_uint32(&sc->rnd_source, rndval);
   8598 
   8599 	if (handled) {
   8600 		/* Try to get more packets going. */
   8601 		softint_schedule(wmq->wmq_si);
   8602 	}
   8603 
   8604 	return handled;
   8605 }
   8606 
   8607 static inline void
   8608 wm_txrxintr_disable(struct wm_queue *wmq)
   8609 {
   8610 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8611 
   8612 	if (sc->sc_type == WM_T_82574)
   8613 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8614 	else if (sc->sc_type == WM_T_82575)
   8615 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8616 	else
   8617 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   8618 }
   8619 
   8620 static inline void
   8621 wm_txrxintr_enable(struct wm_queue *wmq)
   8622 {
   8623 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8624 
   8625 	wm_itrs_calculate(sc, wmq);
   8626 
   8627 	if (sc->sc_type == WM_T_82574)
   8628 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8629 	else if (sc->sc_type == WM_T_82575)
   8630 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8631 	else
   8632 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   8633 }
   8634 
   8635 static int
   8636 wm_txrxintr_msix(void *arg)
   8637 {
   8638 	struct wm_queue *wmq = arg;
   8639 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8640 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8641 	struct wm_softc *sc = txq->txq_sc;
   8642 	u_int limit = sc->sc_rx_intr_process_limit;
   8643 
   8644 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   8645 
   8646 	DPRINTF(WM_DEBUG_TX,
   8647 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   8648 
   8649 	wm_txrxintr_disable(wmq);
   8650 
   8651 	mutex_enter(txq->txq_lock);
   8652 
   8653 	if (txq->txq_stopping) {
   8654 		mutex_exit(txq->txq_lock);
   8655 		return 0;
   8656 	}
   8657 
   8658 	WM_Q_EVCNT_INCR(txq, txdw);
   8659 	wm_txeof(sc, txq);
   8660 	/* wm_deferred start() is done in wm_handle_queue(). */
   8661 	mutex_exit(txq->txq_lock);
   8662 
   8663 	DPRINTF(WM_DEBUG_RX,
   8664 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   8665 	mutex_enter(rxq->rxq_lock);
   8666 
   8667 	if (rxq->rxq_stopping) {
   8668 		mutex_exit(rxq->rxq_lock);
   8669 		return 0;
   8670 	}
   8671 
   8672 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8673 	wm_rxeof(rxq, limit);
   8674 	mutex_exit(rxq->rxq_lock);
   8675 
   8676 	wm_itrs_writereg(sc, wmq);
   8677 
   8678 	softint_schedule(wmq->wmq_si);
   8679 
   8680 	return 1;
   8681 }
   8682 
   8683 static void
   8684 wm_handle_queue(void *arg)
   8685 {
   8686 	struct wm_queue *wmq = arg;
   8687 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8688 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8689 	struct wm_softc *sc = txq->txq_sc;
   8690 	u_int limit = sc->sc_rx_process_limit;
   8691 
   8692 	mutex_enter(txq->txq_lock);
   8693 	if (txq->txq_stopping) {
   8694 		mutex_exit(txq->txq_lock);
   8695 		return;
   8696 	}
   8697 	wm_txeof(sc, txq);
   8698 	wm_deferred_start_locked(txq);
   8699 	mutex_exit(txq->txq_lock);
   8700 
   8701 	mutex_enter(rxq->rxq_lock);
   8702 	if (rxq->rxq_stopping) {
   8703 		mutex_exit(rxq->rxq_lock);
   8704 		return;
   8705 	}
   8706 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8707 	wm_rxeof(rxq, limit);
   8708 	mutex_exit(rxq->rxq_lock);
   8709 
   8710 	wm_txrxintr_enable(wmq);
   8711 }
   8712 
   8713 /*
   8714  * wm_linkintr_msix:
   8715  *
   8716  *	Interrupt service routine for link status change for MSI-X.
   8717  */
   8718 static int
   8719 wm_linkintr_msix(void *arg)
   8720 {
   8721 	struct wm_softc *sc = arg;
   8722 	uint32_t reg;
   8723 
   8724 	DPRINTF(WM_DEBUG_LINK,
   8725 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   8726 
   8727 	reg = CSR_READ(sc, WMREG_ICR);
   8728 	WM_CORE_LOCK(sc);
   8729 	if ((sc->sc_core_stopping) || ((reg & ICR_LSC) == 0))
   8730 		goto out;
   8731 
   8732 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8733 	wm_linkintr(sc, ICR_LSC);
   8734 
   8735 out:
   8736 	WM_CORE_UNLOCK(sc);
   8737 
   8738 	if (sc->sc_type == WM_T_82574)
   8739 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   8740 	else if (sc->sc_type == WM_T_82575)
   8741 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   8742 	else
   8743 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   8744 
   8745 	return 1;
   8746 }
   8747 
   8748 /*
   8749  * Media related.
   8750  * GMII, SGMII, TBI (and SERDES)
   8751  */
   8752 
   8753 /* Common */
   8754 
   8755 /*
   8756  * wm_tbi_serdes_set_linkled:
   8757  *
   8758  *	Update the link LED on TBI and SERDES devices.
   8759  */
   8760 static void
   8761 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   8762 {
   8763 
   8764 	if (sc->sc_tbi_linkup)
   8765 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   8766 	else
   8767 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   8768 
   8769 	/* 82540 or newer devices are active low */
   8770 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   8771 
   8772 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8773 }
   8774 
   8775 /* GMII related */
   8776 
   8777 /*
   8778  * wm_gmii_reset:
   8779  *
   8780  *	Reset the PHY.
   8781  */
   8782 static void
   8783 wm_gmii_reset(struct wm_softc *sc)
   8784 {
   8785 	uint32_t reg;
   8786 	int rv;
   8787 
   8788 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   8789 		device_xname(sc->sc_dev), __func__));
   8790 
   8791 	rv = sc->phy.acquire(sc);
   8792 	if (rv != 0) {
   8793 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8794 		    __func__);
   8795 		return;
   8796 	}
   8797 
   8798 	switch (sc->sc_type) {
   8799 	case WM_T_82542_2_0:
   8800 	case WM_T_82542_2_1:
   8801 		/* null */
   8802 		break;
   8803 	case WM_T_82543:
   8804 		/*
   8805 		 * With 82543, we need to force speed and duplex on the MAC
   8806 		 * equal to what the PHY speed and duplex configuration is.
   8807 		 * In addition, we need to perform a hardware reset on the PHY
   8808 		 * to take it out of reset.
   8809 		 */
   8810 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   8811 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8812 
   8813 		/* The PHY reset pin is active-low. */
   8814 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   8815 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   8816 		    CTRL_EXT_SWDPIN(4));
   8817 		reg |= CTRL_EXT_SWDPIO(4);
   8818 
   8819 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   8820 		CSR_WRITE_FLUSH(sc);
   8821 		delay(10*1000);
   8822 
   8823 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   8824 		CSR_WRITE_FLUSH(sc);
   8825 		delay(150);
   8826 #if 0
   8827 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   8828 #endif
   8829 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   8830 		break;
   8831 	case WM_T_82544:	/* reset 10000us */
   8832 	case WM_T_82540:
   8833 	case WM_T_82545:
   8834 	case WM_T_82545_3:
   8835 	case WM_T_82546:
   8836 	case WM_T_82546_3:
   8837 	case WM_T_82541:
   8838 	case WM_T_82541_2:
   8839 	case WM_T_82547:
   8840 	case WM_T_82547_2:
   8841 	case WM_T_82571:	/* reset 100us */
   8842 	case WM_T_82572:
   8843 	case WM_T_82573:
   8844 	case WM_T_82574:
   8845 	case WM_T_82575:
   8846 	case WM_T_82576:
   8847 	case WM_T_82580:
   8848 	case WM_T_I350:
   8849 	case WM_T_I354:
   8850 	case WM_T_I210:
   8851 	case WM_T_I211:
   8852 	case WM_T_82583:
   8853 	case WM_T_80003:
   8854 		/* generic reset */
   8855 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8856 		CSR_WRITE_FLUSH(sc);
   8857 		delay(20000);
   8858 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8859 		CSR_WRITE_FLUSH(sc);
   8860 		delay(20000);
   8861 
   8862 		if ((sc->sc_type == WM_T_82541)
   8863 		    || (sc->sc_type == WM_T_82541_2)
   8864 		    || (sc->sc_type == WM_T_82547)
   8865 		    || (sc->sc_type == WM_T_82547_2)) {
   8866 			/* workaround for igp are done in igp_reset() */
   8867 			/* XXX add code to set LED after phy reset */
   8868 		}
   8869 		break;
   8870 	case WM_T_ICH8:
   8871 	case WM_T_ICH9:
   8872 	case WM_T_ICH10:
   8873 	case WM_T_PCH:
   8874 	case WM_T_PCH2:
   8875 	case WM_T_PCH_LPT:
   8876 	case WM_T_PCH_SPT:
   8877 		/* generic reset */
   8878 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8879 		CSR_WRITE_FLUSH(sc);
   8880 		delay(100);
   8881 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8882 		CSR_WRITE_FLUSH(sc);
   8883 		delay(150);
   8884 		break;
   8885 	default:
   8886 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   8887 		    __func__);
   8888 		break;
   8889 	}
   8890 
   8891 	sc->phy.release(sc);
   8892 
   8893 	/* get_cfg_done */
   8894 	wm_get_cfg_done(sc);
   8895 
   8896 	/* extra setup */
   8897 	switch (sc->sc_type) {
   8898 	case WM_T_82542_2_0:
   8899 	case WM_T_82542_2_1:
   8900 	case WM_T_82543:
   8901 	case WM_T_82544:
   8902 	case WM_T_82540:
   8903 	case WM_T_82545:
   8904 	case WM_T_82545_3:
   8905 	case WM_T_82546:
   8906 	case WM_T_82546_3:
   8907 	case WM_T_82541_2:
   8908 	case WM_T_82547_2:
   8909 	case WM_T_82571:
   8910 	case WM_T_82572:
   8911 	case WM_T_82573:
   8912 	case WM_T_82575:
   8913 	case WM_T_82576:
   8914 	case WM_T_82580:
   8915 	case WM_T_I350:
   8916 	case WM_T_I354:
   8917 	case WM_T_I210:
   8918 	case WM_T_I211:
   8919 	case WM_T_80003:
   8920 		/* null */
   8921 		break;
   8922 	case WM_T_82574:
   8923 	case WM_T_82583:
   8924 		wm_lplu_d0_disable(sc);
   8925 		break;
   8926 	case WM_T_82541:
   8927 	case WM_T_82547:
   8928 		/* XXX Configure actively LED after PHY reset */
   8929 		break;
   8930 	case WM_T_ICH8:
   8931 	case WM_T_ICH9:
   8932 	case WM_T_ICH10:
   8933 	case WM_T_PCH:
   8934 	case WM_T_PCH2:
   8935 	case WM_T_PCH_LPT:
   8936 	case WM_T_PCH_SPT:
   8937 		/* Allow time for h/w to get to a quiescent state afer reset */
   8938 		delay(10*1000);
   8939 
   8940 		if (sc->sc_type == WM_T_PCH)
   8941 			wm_hv_phy_workaround_ich8lan(sc);
   8942 
   8943 		if (sc->sc_type == WM_T_PCH2)
   8944 			wm_lv_phy_workaround_ich8lan(sc);
   8945 
   8946 		/* Clear the host wakeup bit after lcd reset */
   8947 		if (sc->sc_type >= WM_T_PCH) {
   8948 			reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   8949 			    BM_PORT_GEN_CFG);
   8950 			reg &= ~BM_WUC_HOST_WU_BIT;
   8951 			wm_gmii_hv_writereg(sc->sc_dev, 2,
   8952 			    BM_PORT_GEN_CFG, reg);
   8953 		}
   8954 
   8955 		/*
   8956 		 * XXX Configure the LCD with th extended configuration region
   8957 		 * in NVM
   8958 		 */
   8959 
   8960 		/* Disable D0 LPLU. */
   8961 		if (sc->sc_type >= WM_T_PCH)	/* PCH* */
   8962 			wm_lplu_d0_disable_pch(sc);
   8963 		else
   8964 			wm_lplu_d0_disable(sc);	/* ICH* */
   8965 		break;
   8966 	default:
   8967 		panic("%s: unknown type\n", __func__);
   8968 		break;
   8969 	}
   8970 }
   8971 
   8972 /*
   8973  * Setup sc_phytype and mii_{read|write}reg.
   8974  *
   8975  *  To identify PHY type, correct read/write function should be selected.
   8976  * To select correct read/write function, PCI ID or MAC type are required
   8977  * without accessing PHY registers.
   8978  *
   8979  *  On the first call of this function, PHY ID is not known yet. Check
   8980  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   8981  * result might be incorrect.
   8982  *
   8983  *  In the second call, PHY OUI and model is used to identify PHY type.
   8984  * It might not be perfpect because of the lack of compared entry, but it
   8985  * would be better than the first call.
   8986  *
   8987  *  If the detected new result and previous assumption is different,
   8988  * diagnous message will be printed.
   8989  */
   8990 static void
   8991 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   8992     uint16_t phy_model)
   8993 {
   8994 	device_t dev = sc->sc_dev;
   8995 	struct mii_data *mii = &sc->sc_mii;
   8996 	uint16_t new_phytype = WMPHY_UNKNOWN;
   8997 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   8998 	mii_readreg_t new_readreg;
   8999 	mii_writereg_t new_writereg;
   9000 
   9001 	if (mii->mii_readreg == NULL) {
   9002 		/*
   9003 		 *  This is the first call of this function. For ICH and PCH
   9004 		 * variants, it's difficult to determine the PHY access method
   9005 		 * by sc_type, so use the PCI product ID for some devices.
   9006 		 */
   9007 
   9008 		switch (sc->sc_pcidevid) {
   9009 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   9010 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   9011 			/* 82577 */
   9012 			new_phytype = WMPHY_82577;
   9013 			break;
   9014 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   9015 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   9016 			/* 82578 */
   9017 			new_phytype = WMPHY_82578;
   9018 			break;
   9019 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   9020 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   9021 			/* 82579 */
   9022 			new_phytype = WMPHY_82579;
   9023 			break;
   9024 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   9025 		case PCI_PRODUCT_INTEL_82801I_BM:
   9026 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   9027 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   9028 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   9029 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   9030 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   9031 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   9032 			/* ICH8, 9, 10 with 82567 */
   9033 			new_phytype = WMPHY_BM;
   9034 			break;
   9035 		default:
   9036 			break;
   9037 		}
   9038 	} else {
   9039 		/* It's not the first call. Use PHY OUI and model */
   9040 		switch (phy_oui) {
   9041 		case MII_OUI_ATHEROS: /* XXX ??? */
   9042 			switch (phy_model) {
   9043 			case 0x0004: /* XXX */
   9044 				new_phytype = WMPHY_82578;
   9045 				break;
   9046 			default:
   9047 				break;
   9048 			}
   9049 			break;
   9050 		case MII_OUI_xxMARVELL:
   9051 			switch (phy_model) {
   9052 			case MII_MODEL_xxMARVELL_I210:
   9053 				new_phytype = WMPHY_I210;
   9054 				break;
   9055 			case MII_MODEL_xxMARVELL_E1011:
   9056 			case MII_MODEL_xxMARVELL_E1000_3:
   9057 			case MII_MODEL_xxMARVELL_E1000_5:
   9058 			case MII_MODEL_xxMARVELL_E1112:
   9059 				new_phytype = WMPHY_M88;
   9060 				break;
   9061 			case MII_MODEL_xxMARVELL_E1149:
   9062 				new_phytype = WMPHY_BM;
   9063 				break;
   9064 			case MII_MODEL_xxMARVELL_E1111:
   9065 			case MII_MODEL_xxMARVELL_I347:
   9066 			case MII_MODEL_xxMARVELL_E1512:
   9067 			case MII_MODEL_xxMARVELL_E1340M:
   9068 			case MII_MODEL_xxMARVELL_E1543:
   9069 				new_phytype = WMPHY_M88;
   9070 				break;
   9071 			case MII_MODEL_xxMARVELL_I82563:
   9072 				new_phytype = WMPHY_GG82563;
   9073 				break;
   9074 			default:
   9075 				break;
   9076 			}
   9077 			break;
   9078 		case MII_OUI_INTEL:
   9079 			switch (phy_model) {
   9080 			case MII_MODEL_INTEL_I82577:
   9081 				new_phytype = WMPHY_82577;
   9082 				break;
   9083 			case MII_MODEL_INTEL_I82579:
   9084 				new_phytype = WMPHY_82579;
   9085 				break;
   9086 			case MII_MODEL_INTEL_I217:
   9087 				new_phytype = WMPHY_I217;
   9088 				break;
   9089 			case MII_MODEL_INTEL_I82580:
   9090 			case MII_MODEL_INTEL_I350:
   9091 				new_phytype = WMPHY_82580;
   9092 				break;
   9093 			default:
   9094 				break;
   9095 			}
   9096 			break;
   9097 		case MII_OUI_yyINTEL:
   9098 			switch (phy_model) {
   9099 			case MII_MODEL_yyINTEL_I82562G:
   9100 			case MII_MODEL_yyINTEL_I82562EM:
   9101 			case MII_MODEL_yyINTEL_I82562ET:
   9102 				new_phytype = WMPHY_IFE;
   9103 				break;
   9104 			case MII_MODEL_yyINTEL_IGP01E1000:
   9105 				new_phytype = WMPHY_IGP;
   9106 				break;
   9107 			case MII_MODEL_yyINTEL_I82566:
   9108 				new_phytype = WMPHY_IGP_3;
   9109 				break;
   9110 			default:
   9111 				break;
   9112 			}
   9113 			break;
   9114 		default:
   9115 			break;
   9116 		}
   9117 		if (new_phytype == WMPHY_UNKNOWN)
   9118 			aprint_verbose_dev(dev, "%s: unknown PHY model\n",
   9119 			    __func__);
   9120 
   9121 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9122 		    && (sc->sc_phytype != new_phytype )) {
   9123 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9124 			    "was incorrect. PHY type from PHY ID = %u\n",
   9125 			    sc->sc_phytype, new_phytype);
   9126 		}
   9127 	}
   9128 
   9129 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   9130 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   9131 		/* SGMII */
   9132 		new_readreg = wm_sgmii_readreg;
   9133 		new_writereg = wm_sgmii_writereg;
   9134 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   9135 		/* BM2 (phyaddr == 1) */
   9136 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9137 		    && (new_phytype != WMPHY_BM)
   9138 		    && (new_phytype != WMPHY_UNKNOWN))
   9139 			doubt_phytype = new_phytype;
   9140 		new_phytype = WMPHY_BM;
   9141 		new_readreg = wm_gmii_bm_readreg;
   9142 		new_writereg = wm_gmii_bm_writereg;
   9143 	} else if (sc->sc_type >= WM_T_PCH) {
   9144 		/* All PCH* use _hv_ */
   9145 		new_readreg = wm_gmii_hv_readreg;
   9146 		new_writereg = wm_gmii_hv_writereg;
   9147 	} else if (sc->sc_type >= WM_T_ICH8) {
   9148 		/* non-82567 ICH8, 9 and 10 */
   9149 		new_readreg = wm_gmii_i82544_readreg;
   9150 		new_writereg = wm_gmii_i82544_writereg;
   9151 	} else if (sc->sc_type >= WM_T_80003) {
   9152 		/* 80003 */
   9153 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9154 		    && (new_phytype != WMPHY_GG82563)
   9155 		    && (new_phytype != WMPHY_UNKNOWN))
   9156 			doubt_phytype = new_phytype;
   9157 		new_phytype = WMPHY_GG82563;
   9158 		new_readreg = wm_gmii_i80003_readreg;
   9159 		new_writereg = wm_gmii_i80003_writereg;
   9160 	} else if (sc->sc_type >= WM_T_I210) {
   9161 		/* I210 and I211 */
   9162 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9163 		    && (new_phytype != WMPHY_I210)
   9164 		    && (new_phytype != WMPHY_UNKNOWN))
   9165 			doubt_phytype = new_phytype;
   9166 		new_phytype = WMPHY_I210;
   9167 		new_readreg = wm_gmii_gs40g_readreg;
   9168 		new_writereg = wm_gmii_gs40g_writereg;
   9169 	} else if (sc->sc_type >= WM_T_82580) {
   9170 		/* 82580, I350 and I354 */
   9171 		new_readreg = wm_gmii_82580_readreg;
   9172 		new_writereg = wm_gmii_82580_writereg;
   9173 	} else if (sc->sc_type >= WM_T_82544) {
   9174 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   9175 		new_readreg = wm_gmii_i82544_readreg;
   9176 		new_writereg = wm_gmii_i82544_writereg;
   9177 	} else {
   9178 		new_readreg = wm_gmii_i82543_readreg;
   9179 		new_writereg = wm_gmii_i82543_writereg;
   9180 	}
   9181 
   9182 	if (new_phytype == WMPHY_BM) {
   9183 		/* All BM use _bm_ */
   9184 		new_readreg = wm_gmii_bm_readreg;
   9185 		new_writereg = wm_gmii_bm_writereg;
   9186 	}
   9187 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
   9188 		/* All PCH* use _hv_ */
   9189 		new_readreg = wm_gmii_hv_readreg;
   9190 		new_writereg = wm_gmii_hv_writereg;
   9191 	}
   9192 
   9193 	/* Diag output */
   9194 	if (doubt_phytype != WMPHY_UNKNOWN)
   9195 		aprint_error_dev(dev, "Assumed new PHY type was "
   9196 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   9197 		    new_phytype);
   9198 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9199 	    && (sc->sc_phytype != new_phytype ))
   9200 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9201 		    "was incorrect. New PHY type = %u\n",
   9202 		    sc->sc_phytype, new_phytype);
   9203 
   9204 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   9205 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   9206 
   9207 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   9208 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   9209 		    "function was incorrect.\n");
   9210 
   9211 	/* Update now */
   9212 	sc->sc_phytype = new_phytype;
   9213 	mii->mii_readreg = new_readreg;
   9214 	mii->mii_writereg = new_writereg;
   9215 }
   9216 
   9217 /*
   9218  * wm_get_phy_id_82575:
   9219  *
   9220  * Return PHY ID. Return -1 if it failed.
   9221  */
   9222 static int
   9223 wm_get_phy_id_82575(struct wm_softc *sc)
   9224 {
   9225 	uint32_t reg;
   9226 	int phyid = -1;
   9227 
   9228 	/* XXX */
   9229 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   9230 		return -1;
   9231 
   9232 	if (wm_sgmii_uses_mdio(sc)) {
   9233 		switch (sc->sc_type) {
   9234 		case WM_T_82575:
   9235 		case WM_T_82576:
   9236 			reg = CSR_READ(sc, WMREG_MDIC);
   9237 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   9238 			break;
   9239 		case WM_T_82580:
   9240 		case WM_T_I350:
   9241 		case WM_T_I354:
   9242 		case WM_T_I210:
   9243 		case WM_T_I211:
   9244 			reg = CSR_READ(sc, WMREG_MDICNFG);
   9245 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   9246 			break;
   9247 		default:
   9248 			return -1;
   9249 		}
   9250 	}
   9251 
   9252 	return phyid;
   9253 }
   9254 
   9255 
   9256 /*
   9257  * wm_gmii_mediainit:
   9258  *
   9259  *	Initialize media for use on 1000BASE-T devices.
   9260  */
   9261 static void
   9262 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   9263 {
   9264 	device_t dev = sc->sc_dev;
   9265 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9266 	struct mii_data *mii = &sc->sc_mii;
   9267 	uint32_t reg;
   9268 
   9269 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9270 		device_xname(sc->sc_dev), __func__));
   9271 
   9272 	/* We have GMII. */
   9273 	sc->sc_flags |= WM_F_HAS_MII;
   9274 
   9275 	if (sc->sc_type == WM_T_80003)
   9276 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9277 	else
   9278 		sc->sc_tipg = TIPG_1000T_DFLT;
   9279 
   9280 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   9281 	if ((sc->sc_type == WM_T_82580)
   9282 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   9283 	    || (sc->sc_type == WM_T_I211)) {
   9284 		reg = CSR_READ(sc, WMREG_PHPM);
   9285 		reg &= ~PHPM_GO_LINK_D;
   9286 		CSR_WRITE(sc, WMREG_PHPM, reg);
   9287 	}
   9288 
   9289 	/*
   9290 	 * Let the chip set speed/duplex on its own based on
   9291 	 * signals from the PHY.
   9292 	 * XXXbouyer - I'm not sure this is right for the 80003,
   9293 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   9294 	 */
   9295 	sc->sc_ctrl |= CTRL_SLU;
   9296 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9297 
   9298 	/* Initialize our media structures and probe the GMII. */
   9299 	mii->mii_ifp = ifp;
   9300 
   9301 	/*
   9302 	 * The first call of wm_mii_setup_phytype. The result might be
   9303 	 * incorrect.
   9304 	 */
   9305 	wm_gmii_setup_phytype(sc, 0, 0);
   9306 
   9307 	mii->mii_statchg = wm_gmii_statchg;
   9308 
   9309 	/* get PHY control from SMBus to PCIe */
   9310 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   9311 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
   9312 		wm_smbustopci(sc);
   9313 
   9314 	wm_gmii_reset(sc);
   9315 
   9316 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9317 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   9318 	    wm_gmii_mediastatus);
   9319 
   9320 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   9321 	    || (sc->sc_type == WM_T_82580)
   9322 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   9323 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   9324 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   9325 			/* Attach only one port */
   9326 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   9327 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9328 		} else {
   9329 			int i, id;
   9330 			uint32_t ctrl_ext;
   9331 
   9332 			id = wm_get_phy_id_82575(sc);
   9333 			if (id != -1) {
   9334 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   9335 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   9336 			}
   9337 			if ((id == -1)
   9338 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9339 				/* Power on sgmii phy if it is disabled */
   9340 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9341 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   9342 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   9343 				CSR_WRITE_FLUSH(sc);
   9344 				delay(300*1000); /* XXX too long */
   9345 
   9346 				/* from 1 to 8 */
   9347 				for (i = 1; i < 8; i++)
   9348 					mii_attach(sc->sc_dev, &sc->sc_mii,
   9349 					    0xffffffff, i, MII_OFFSET_ANY,
   9350 					    MIIF_DOPAUSE);
   9351 
   9352 				/* restore previous sfp cage power state */
   9353 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9354 			}
   9355 		}
   9356 	} else {
   9357 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9358 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9359 	}
   9360 
   9361 	/*
   9362 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   9363 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   9364 	 */
   9365 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   9366 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9367 		wm_set_mdio_slow_mode_hv(sc);
   9368 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9369 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9370 	}
   9371 
   9372 	/*
   9373 	 * (For ICH8 variants)
   9374 	 * If PHY detection failed, use BM's r/w function and retry.
   9375 	 */
   9376 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9377 		/* if failed, retry with *_bm_* */
   9378 		aprint_verbose_dev(dev, "Assumed PHY access function "
   9379 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   9380 		    sc->sc_phytype);
   9381 		sc->sc_phytype = WMPHY_BM;
   9382 		mii->mii_readreg = wm_gmii_bm_readreg;
   9383 		mii->mii_writereg = wm_gmii_bm_writereg;
   9384 
   9385 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9386 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9387 	}
   9388 
   9389 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9390 		/* Any PHY wasn't find */
   9391 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   9392 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   9393 		sc->sc_phytype = WMPHY_NONE;
   9394 	} else {
   9395 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   9396 
   9397 		/*
   9398 		 * PHY Found! Check PHY type again by the second call of
   9399 		 * wm_mii_setup_phytype.
   9400 		 */
   9401 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   9402 		    child->mii_mpd_model);
   9403 
   9404 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   9405 	}
   9406 }
   9407 
   9408 /*
   9409  * wm_gmii_mediachange:	[ifmedia interface function]
   9410  *
   9411  *	Set hardware to newly-selected media on a 1000BASE-T device.
   9412  */
   9413 static int
   9414 wm_gmii_mediachange(struct ifnet *ifp)
   9415 {
   9416 	struct wm_softc *sc = ifp->if_softc;
   9417 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9418 	int rc;
   9419 
   9420 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9421 		device_xname(sc->sc_dev), __func__));
   9422 	if ((ifp->if_flags & IFF_UP) == 0)
   9423 		return 0;
   9424 
   9425 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9426 	sc->sc_ctrl |= CTRL_SLU;
   9427 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9428 	    || (sc->sc_type > WM_T_82543)) {
   9429 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   9430 	} else {
   9431 		sc->sc_ctrl &= ~CTRL_ASDE;
   9432 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9433 		if (ife->ifm_media & IFM_FDX)
   9434 			sc->sc_ctrl |= CTRL_FD;
   9435 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   9436 		case IFM_10_T:
   9437 			sc->sc_ctrl |= CTRL_SPEED_10;
   9438 			break;
   9439 		case IFM_100_TX:
   9440 			sc->sc_ctrl |= CTRL_SPEED_100;
   9441 			break;
   9442 		case IFM_1000_T:
   9443 			sc->sc_ctrl |= CTRL_SPEED_1000;
   9444 			break;
   9445 		default:
   9446 			panic("wm_gmii_mediachange: bad media 0x%x",
   9447 			    ife->ifm_media);
   9448 		}
   9449 	}
   9450 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9451 	CSR_WRITE_FLUSH(sc);
   9452 	if (sc->sc_type <= WM_T_82543)
   9453 		wm_gmii_reset(sc);
   9454 
   9455 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   9456 		return 0;
   9457 	return rc;
   9458 }
   9459 
   9460 /*
   9461  * wm_gmii_mediastatus:	[ifmedia interface function]
   9462  *
   9463  *	Get the current interface media status on a 1000BASE-T device.
   9464  */
   9465 static void
   9466 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9467 {
   9468 	struct wm_softc *sc = ifp->if_softc;
   9469 
   9470 	ether_mediastatus(ifp, ifmr);
   9471 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9472 	    | sc->sc_flowflags;
   9473 }
   9474 
   9475 #define	MDI_IO		CTRL_SWDPIN(2)
   9476 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   9477 #define	MDI_CLK		CTRL_SWDPIN(3)
   9478 
   9479 static void
   9480 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   9481 {
   9482 	uint32_t i, v;
   9483 
   9484 	v = CSR_READ(sc, WMREG_CTRL);
   9485 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9486 	v |= MDI_DIR | CTRL_SWDPIO(3);
   9487 
   9488 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   9489 		if (data & i)
   9490 			v |= MDI_IO;
   9491 		else
   9492 			v &= ~MDI_IO;
   9493 		CSR_WRITE(sc, WMREG_CTRL, v);
   9494 		CSR_WRITE_FLUSH(sc);
   9495 		delay(10);
   9496 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9497 		CSR_WRITE_FLUSH(sc);
   9498 		delay(10);
   9499 		CSR_WRITE(sc, WMREG_CTRL, v);
   9500 		CSR_WRITE_FLUSH(sc);
   9501 		delay(10);
   9502 	}
   9503 }
   9504 
   9505 static uint32_t
   9506 wm_i82543_mii_recvbits(struct wm_softc *sc)
   9507 {
   9508 	uint32_t v, i, data = 0;
   9509 
   9510 	v = CSR_READ(sc, WMREG_CTRL);
   9511 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9512 	v |= CTRL_SWDPIO(3);
   9513 
   9514 	CSR_WRITE(sc, WMREG_CTRL, v);
   9515 	CSR_WRITE_FLUSH(sc);
   9516 	delay(10);
   9517 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9518 	CSR_WRITE_FLUSH(sc);
   9519 	delay(10);
   9520 	CSR_WRITE(sc, WMREG_CTRL, v);
   9521 	CSR_WRITE_FLUSH(sc);
   9522 	delay(10);
   9523 
   9524 	for (i = 0; i < 16; i++) {
   9525 		data <<= 1;
   9526 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9527 		CSR_WRITE_FLUSH(sc);
   9528 		delay(10);
   9529 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   9530 			data |= 1;
   9531 		CSR_WRITE(sc, WMREG_CTRL, v);
   9532 		CSR_WRITE_FLUSH(sc);
   9533 		delay(10);
   9534 	}
   9535 
   9536 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9537 	CSR_WRITE_FLUSH(sc);
   9538 	delay(10);
   9539 	CSR_WRITE(sc, WMREG_CTRL, v);
   9540 	CSR_WRITE_FLUSH(sc);
   9541 	delay(10);
   9542 
   9543 	return data;
   9544 }
   9545 
   9546 #undef MDI_IO
   9547 #undef MDI_DIR
   9548 #undef MDI_CLK
   9549 
   9550 /*
   9551  * wm_gmii_i82543_readreg:	[mii interface function]
   9552  *
   9553  *	Read a PHY register on the GMII (i82543 version).
   9554  */
   9555 static int
   9556 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
   9557 {
   9558 	struct wm_softc *sc = device_private(self);
   9559 	int rv;
   9560 
   9561 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9562 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   9563 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   9564 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   9565 
   9566 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   9567 	    device_xname(sc->sc_dev), phy, reg, rv));
   9568 
   9569 	return rv;
   9570 }
   9571 
   9572 /*
   9573  * wm_gmii_i82543_writereg:	[mii interface function]
   9574  *
   9575  *	Write a PHY register on the GMII (i82543 version).
   9576  */
   9577 static void
   9578 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
   9579 {
   9580 	struct wm_softc *sc = device_private(self);
   9581 
   9582 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9583 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   9584 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   9585 	    (MII_COMMAND_START << 30), 32);
   9586 }
   9587 
   9588 /*
   9589  * wm_gmii_mdic_readreg:	[mii interface function]
   9590  *
   9591  *	Read a PHY register on the GMII.
   9592  */
   9593 static int
   9594 wm_gmii_mdic_readreg(device_t self, int phy, int reg)
   9595 {
   9596 	struct wm_softc *sc = device_private(self);
   9597 	uint32_t mdic = 0;
   9598 	int i, rv;
   9599 
   9600 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   9601 	    MDIC_REGADD(reg));
   9602 
   9603 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9604 		mdic = CSR_READ(sc, WMREG_MDIC);
   9605 		if (mdic & MDIC_READY)
   9606 			break;
   9607 		delay(50);
   9608 	}
   9609 
   9610 	if ((mdic & MDIC_READY) == 0) {
   9611 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   9612 		    device_xname(sc->sc_dev), phy, reg);
   9613 		rv = 0;
   9614 	} else if (mdic & MDIC_E) {
   9615 #if 0 /* This is normal if no PHY is present. */
   9616 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   9617 		    device_xname(sc->sc_dev), phy, reg);
   9618 #endif
   9619 		rv = 0;
   9620 	} else {
   9621 		rv = MDIC_DATA(mdic);
   9622 		if (rv == 0xffff)
   9623 			rv = 0;
   9624 	}
   9625 
   9626 	return rv;
   9627 }
   9628 
   9629 /*
   9630  * wm_gmii_mdic_writereg:	[mii interface function]
   9631  *
   9632  *	Write a PHY register on the GMII.
   9633  */
   9634 static void
   9635 wm_gmii_mdic_writereg(device_t self, int phy, int reg, int val)
   9636 {
   9637 	struct wm_softc *sc = device_private(self);
   9638 	uint32_t mdic = 0;
   9639 	int i;
   9640 
   9641 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   9642 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   9643 
   9644 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9645 		mdic = CSR_READ(sc, WMREG_MDIC);
   9646 		if (mdic & MDIC_READY)
   9647 			break;
   9648 		delay(50);
   9649 	}
   9650 
   9651 	if ((mdic & MDIC_READY) == 0)
   9652 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   9653 		    device_xname(sc->sc_dev), phy, reg);
   9654 	else if (mdic & MDIC_E)
   9655 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   9656 		    device_xname(sc->sc_dev), phy, reg);
   9657 }
   9658 
   9659 /*
   9660  * wm_gmii_i82544_readreg:	[mii interface function]
   9661  *
   9662  *	Read a PHY register on the GMII.
   9663  */
   9664 static int
   9665 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
   9666 {
   9667 	struct wm_softc *sc = device_private(self);
   9668 	int rv;
   9669 
   9670 	if (sc->phy.acquire(sc)) {
   9671 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9672 		    __func__);
   9673 		return 0;
   9674 	}
   9675 	rv = wm_gmii_mdic_readreg(self, phy, reg);
   9676 	sc->phy.release(sc);
   9677 
   9678 	return rv;
   9679 }
   9680 
   9681 /*
   9682  * wm_gmii_i82544_writereg:	[mii interface function]
   9683  *
   9684  *	Write a PHY register on the GMII.
   9685  */
   9686 static void
   9687 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
   9688 {
   9689 	struct wm_softc *sc = device_private(self);
   9690 
   9691 	if (sc->phy.acquire(sc)) {
   9692 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9693 		    __func__);
   9694 	}
   9695 	wm_gmii_mdic_writereg(self, phy, reg, val);
   9696 	sc->phy.release(sc);
   9697 }
   9698 
   9699 /*
   9700  * wm_gmii_i80003_readreg:	[mii interface function]
   9701  *
   9702  *	Read a PHY register on the kumeran
   9703  * This could be handled by the PHY layer if we didn't have to lock the
   9704  * ressource ...
   9705  */
   9706 static int
   9707 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
   9708 {
   9709 	struct wm_softc *sc = device_private(self);
   9710 	int rv;
   9711 
   9712 	if (phy != 1) /* only one PHY on kumeran bus */
   9713 		return 0;
   9714 
   9715 	if (sc->phy.acquire(sc)) {
   9716 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9717 		    __func__);
   9718 		return 0;
   9719 	}
   9720 
   9721 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   9722 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   9723 		    reg >> GG82563_PAGE_SHIFT);
   9724 	} else {
   9725 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   9726 		    reg >> GG82563_PAGE_SHIFT);
   9727 	}
   9728 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   9729 	delay(200);
   9730 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
   9731 	delay(200);
   9732 	sc->phy.release(sc);
   9733 
   9734 	return rv;
   9735 }
   9736 
   9737 /*
   9738  * wm_gmii_i80003_writereg:	[mii interface function]
   9739  *
   9740  *	Write a PHY register on the kumeran.
   9741  * This could be handled by the PHY layer if we didn't have to lock the
   9742  * ressource ...
   9743  */
   9744 static void
   9745 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
   9746 {
   9747 	struct wm_softc *sc = device_private(self);
   9748 
   9749 	if (phy != 1) /* only one PHY on kumeran bus */
   9750 		return;
   9751 
   9752 	if (sc->phy.acquire(sc)) {
   9753 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9754 		    __func__);
   9755 		return;
   9756 	}
   9757 
   9758 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   9759 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   9760 		    reg >> GG82563_PAGE_SHIFT);
   9761 	} else {
   9762 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   9763 		    reg >> GG82563_PAGE_SHIFT);
   9764 	}
   9765 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   9766 	delay(200);
   9767 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
   9768 	delay(200);
   9769 
   9770 	sc->phy.release(sc);
   9771 }
   9772 
   9773 /*
   9774  * wm_gmii_bm_readreg:	[mii interface function]
   9775  *
   9776  *	Read a PHY register on the kumeran
   9777  * This could be handled by the PHY layer if we didn't have to lock the
   9778  * ressource ...
   9779  */
   9780 static int
   9781 wm_gmii_bm_readreg(device_t self, int phy, int reg)
   9782 {
   9783 	struct wm_softc *sc = device_private(self);
   9784 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   9785 	uint16_t val;
   9786 	int rv;
   9787 
   9788 	if (sc->phy.acquire(sc)) {
   9789 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9790 		    __func__);
   9791 		return 0;
   9792 	}
   9793 
   9794 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   9795 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   9796 		    || (reg == 31)) ? 1 : phy;
   9797 	/* Page 800 works differently than the rest so it has its own func */
   9798 	if (page == BM_WUC_PAGE) {
   9799 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   9800 		rv = val;
   9801 		goto release;
   9802 	}
   9803 
   9804 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9805 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   9806 		    && (sc->sc_type != WM_T_82583))
   9807 			wm_gmii_mdic_writereg(self, phy,
   9808 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   9809 		else
   9810 			wm_gmii_mdic_writereg(self, phy,
   9811 			    BME1000_PHY_PAGE_SELECT, page);
   9812 	}
   9813 
   9814 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
   9815 
   9816 release:
   9817 	sc->phy.release(sc);
   9818 	return rv;
   9819 }
   9820 
   9821 /*
   9822  * wm_gmii_bm_writereg:	[mii interface function]
   9823  *
   9824  *	Write a PHY register on the kumeran.
   9825  * This could be handled by the PHY layer if we didn't have to lock the
   9826  * ressource ...
   9827  */
   9828 static void
   9829 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
   9830 {
   9831 	struct wm_softc *sc = device_private(self);
   9832 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   9833 
   9834 	if (sc->phy.acquire(sc)) {
   9835 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9836 		    __func__);
   9837 		return;
   9838 	}
   9839 
   9840 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   9841 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   9842 		    || (reg == 31)) ? 1 : phy;
   9843 	/* Page 800 works differently than the rest so it has its own func */
   9844 	if (page == BM_WUC_PAGE) {
   9845 		uint16_t tmp;
   9846 
   9847 		tmp = val;
   9848 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   9849 		goto release;
   9850 	}
   9851 
   9852 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9853 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   9854 		    && (sc->sc_type != WM_T_82583))
   9855 			wm_gmii_mdic_writereg(self, phy,
   9856 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   9857 		else
   9858 			wm_gmii_mdic_writereg(self, phy,
   9859 			    BME1000_PHY_PAGE_SELECT, page);
   9860 	}
   9861 
   9862 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
   9863 
   9864 release:
   9865 	sc->phy.release(sc);
   9866 }
   9867 
   9868 static void
   9869 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
   9870 {
   9871 	struct wm_softc *sc = device_private(self);
   9872 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   9873 	uint16_t wuce, reg;
   9874 
   9875 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9876 		device_xname(sc->sc_dev), __func__));
   9877 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   9878 	if (sc->sc_type == WM_T_PCH) {
   9879 		/* XXX e1000 driver do nothing... why? */
   9880 	}
   9881 
   9882 	/*
   9883 	 * 1) Enable PHY wakeup register first.
   9884 	 * See e1000_enable_phy_wakeup_reg_access_bm().
   9885 	 */
   9886 
   9887 	/* Set page 769 */
   9888 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9889 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   9890 
   9891 	/* Read WUCE and save it */
   9892 	wuce = wm_gmii_mdic_readreg(self, 1, BM_WUC_ENABLE_REG);
   9893 
   9894 	reg = wuce | BM_WUC_ENABLE_BIT;
   9895 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   9896 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, reg);
   9897 
   9898 	/* Select page 800 */
   9899 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9900 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   9901 
   9902 	/*
   9903 	 * 2) Access PHY wakeup register.
   9904 	 * See e1000_access_phy_wakeup_reg_bm.
   9905 	 */
   9906 
   9907 	/* Write page 800 */
   9908 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   9909 
   9910 	if (rd)
   9911 		*val = wm_gmii_mdic_readreg(self, 1, BM_WUC_DATA_OPCODE);
   9912 	else
   9913 		wm_gmii_mdic_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
   9914 
   9915 	/*
   9916 	 * 3) Disable PHY wakeup register.
   9917 	 * See e1000_disable_phy_wakeup_reg_access_bm().
   9918 	 */
   9919 	/* Set page 769 */
   9920 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9921 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   9922 
   9923 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
   9924 }
   9925 
   9926 /*
   9927  * wm_gmii_hv_readreg:	[mii interface function]
   9928  *
   9929  *	Read a PHY register on the kumeran
   9930  * This could be handled by the PHY layer if we didn't have to lock the
   9931  * ressource ...
   9932  */
   9933 static int
   9934 wm_gmii_hv_readreg(device_t self, int phy, int reg)
   9935 {
   9936 	struct wm_softc *sc = device_private(self);
   9937 	int rv;
   9938 
   9939 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9940 		device_xname(sc->sc_dev), __func__));
   9941 	if (sc->phy.acquire(sc)) {
   9942 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9943 		    __func__);
   9944 		return 0;
   9945 	}
   9946 
   9947 	rv = wm_gmii_hv_readreg_locked(self, phy, reg);
   9948 	sc->phy.release(sc);
   9949 	return rv;
   9950 }
   9951 
   9952 static int
   9953 wm_gmii_hv_readreg_locked(device_t self, int phy, int reg)
   9954 {
   9955 	uint16_t page = BM_PHY_REG_PAGE(reg);
   9956 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   9957 	uint16_t val;
   9958 	int rv;
   9959 
   9960 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   9961 
   9962 	/* Page 800 works differently than the rest so it has its own func */
   9963 	if (page == BM_WUC_PAGE) {
   9964 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   9965 		return val;
   9966 	}
   9967 
   9968 	/*
   9969 	 * Lower than page 768 works differently than the rest so it has its
   9970 	 * own func
   9971 	 */
   9972 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   9973 		printf("gmii_hv_readreg!!!\n");
   9974 		return 0;
   9975 	}
   9976 
   9977 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   9978 		wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9979 		    page << BME1000_PAGE_SHIFT);
   9980 	}
   9981 
   9982 	rv = wm_gmii_mdic_readreg(self, phy, regnum & MII_ADDRMASK);
   9983 	return rv;
   9984 }
   9985 
   9986 /*
   9987  * wm_gmii_hv_writereg:	[mii interface function]
   9988  *
   9989  *	Write a PHY register on the kumeran.
   9990  * This could be handled by the PHY layer if we didn't have to lock the
   9991  * ressource ...
   9992  */
   9993 static void
   9994 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
   9995 {
   9996 	struct wm_softc *sc = device_private(self);
   9997 
   9998 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9999 		device_xname(sc->sc_dev), __func__));
   10000 
   10001 	if (sc->phy.acquire(sc)) {
   10002 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10003 		    __func__);
   10004 		return;
   10005 	}
   10006 
   10007 	wm_gmii_hv_writereg_locked(self, phy, reg, val);
   10008 	sc->phy.release(sc);
   10009 }
   10010 
   10011 static void
   10012 wm_gmii_hv_writereg_locked(device_t self, int phy, int reg, int val)
   10013 {
   10014 	struct wm_softc *sc = device_private(self);
   10015 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10016 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10017 
   10018 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10019 
   10020 	/* Page 800 works differently than the rest so it has its own func */
   10021 	if (page == BM_WUC_PAGE) {
   10022 		uint16_t tmp;
   10023 
   10024 		tmp = val;
   10025 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   10026 		return;
   10027 	}
   10028 
   10029 	/*
   10030 	 * Lower than page 768 works differently than the rest so it has its
   10031 	 * own func
   10032 	 */
   10033 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10034 		printf("gmii_hv_writereg!!!\n");
   10035 		return;
   10036 	}
   10037 
   10038 	{
   10039 		/*
   10040 		 * XXX Workaround MDIO accesses being disabled after entering
   10041 		 * IEEE Power Down (whenever bit 11 of the PHY control
   10042 		 * register is set)
   10043 		 */
   10044 		if (sc->sc_phytype == WMPHY_82578) {
   10045 			struct mii_softc *child;
   10046 
   10047 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   10048 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   10049 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   10050 			    && ((val & (1 << 11)) != 0)) {
   10051 				printf("XXX need workaround\n");
   10052 			}
   10053 		}
   10054 
   10055 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10056 			wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   10057 			    page << BME1000_PAGE_SHIFT);
   10058 		}
   10059 	}
   10060 
   10061 	wm_gmii_mdic_writereg(self, phy, regnum & MII_ADDRMASK, val);
   10062 }
   10063 
   10064 /*
   10065  * wm_gmii_82580_readreg:	[mii interface function]
   10066  *
   10067  *	Read a PHY register on the 82580 and I350.
   10068  * This could be handled by the PHY layer if we didn't have to lock the
   10069  * ressource ...
   10070  */
   10071 static int
   10072 wm_gmii_82580_readreg(device_t self, int phy, int reg)
   10073 {
   10074 	struct wm_softc *sc = device_private(self);
   10075 	int rv;
   10076 
   10077 	if (sc->phy.acquire(sc) != 0) {
   10078 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10079 		    __func__);
   10080 		return 0;
   10081 	}
   10082 
   10083 	rv = wm_gmii_mdic_readreg(self, phy, reg);
   10084 
   10085 	sc->phy.release(sc);
   10086 	return rv;
   10087 }
   10088 
   10089 /*
   10090  * wm_gmii_82580_writereg:	[mii interface function]
   10091  *
   10092  *	Write a PHY register on the 82580 and I350.
   10093  * This could be handled by the PHY layer if we didn't have to lock the
   10094  * ressource ...
   10095  */
   10096 static void
   10097 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
   10098 {
   10099 	struct wm_softc *sc = device_private(self);
   10100 
   10101 	if (sc->phy.acquire(sc) != 0) {
   10102 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10103 		    __func__);
   10104 		return;
   10105 	}
   10106 
   10107 	wm_gmii_mdic_writereg(self, phy, reg, val);
   10108 
   10109 	sc->phy.release(sc);
   10110 }
   10111 
   10112 /*
   10113  * wm_gmii_gs40g_readreg:	[mii interface function]
   10114  *
   10115  *	Read a PHY register on the I2100 and I211.
   10116  * This could be handled by the PHY layer if we didn't have to lock the
   10117  * ressource ...
   10118  */
   10119 static int
   10120 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
   10121 {
   10122 	struct wm_softc *sc = device_private(self);
   10123 	int page, offset;
   10124 	int rv;
   10125 
   10126 	/* Acquire semaphore */
   10127 	if (sc->phy.acquire(sc)) {
   10128 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10129 		    __func__);
   10130 		return 0;
   10131 	}
   10132 
   10133 	/* Page select */
   10134 	page = reg >> GS40G_PAGE_SHIFT;
   10135 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
   10136 
   10137 	/* Read reg */
   10138 	offset = reg & GS40G_OFFSET_MASK;
   10139 	rv = wm_gmii_mdic_readreg(self, phy, offset);
   10140 
   10141 	sc->phy.release(sc);
   10142 	return rv;
   10143 }
   10144 
   10145 /*
   10146  * wm_gmii_gs40g_writereg:	[mii interface function]
   10147  *
   10148  *	Write a PHY register on the I210 and I211.
   10149  * This could be handled by the PHY layer if we didn't have to lock the
   10150  * ressource ...
   10151  */
   10152 static void
   10153 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
   10154 {
   10155 	struct wm_softc *sc = device_private(self);
   10156 	int page, offset;
   10157 
   10158 	/* Acquire semaphore */
   10159 	if (sc->phy.acquire(sc)) {
   10160 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10161 		    __func__);
   10162 		return;
   10163 	}
   10164 
   10165 	/* Page select */
   10166 	page = reg >> GS40G_PAGE_SHIFT;
   10167 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
   10168 
   10169 	/* Write reg */
   10170 	offset = reg & GS40G_OFFSET_MASK;
   10171 	wm_gmii_mdic_writereg(self, phy, offset, val);
   10172 
   10173 	/* Release semaphore */
   10174 	sc->phy.release(sc);
   10175 }
   10176 
   10177 /*
   10178  * wm_gmii_statchg:	[mii interface function]
   10179  *
   10180  *	Callback from MII layer when media changes.
   10181  */
   10182 static void
   10183 wm_gmii_statchg(struct ifnet *ifp)
   10184 {
   10185 	struct wm_softc *sc = ifp->if_softc;
   10186 	struct mii_data *mii = &sc->sc_mii;
   10187 
   10188 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   10189 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10190 	sc->sc_fcrtl &= ~FCRTL_XONE;
   10191 
   10192 	/*
   10193 	 * Get flow control negotiation result.
   10194 	 */
   10195 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   10196 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   10197 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   10198 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   10199 	}
   10200 
   10201 	if (sc->sc_flowflags & IFM_FLOW) {
   10202 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   10203 			sc->sc_ctrl |= CTRL_TFCE;
   10204 			sc->sc_fcrtl |= FCRTL_XONE;
   10205 		}
   10206 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   10207 			sc->sc_ctrl |= CTRL_RFCE;
   10208 	}
   10209 
   10210 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   10211 		DPRINTF(WM_DEBUG_LINK,
   10212 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   10213 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10214 	} else {
   10215 		DPRINTF(WM_DEBUG_LINK,
   10216 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   10217 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10218 	}
   10219 
   10220 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10221 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10222 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   10223 						 : WMREG_FCRTL, sc->sc_fcrtl);
   10224 	if (sc->sc_type == WM_T_80003) {
   10225 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   10226 		case IFM_1000_T:
   10227 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10228 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   10229 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10230 			break;
   10231 		default:
   10232 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10233 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   10234 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   10235 			break;
   10236 		}
   10237 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   10238 	}
   10239 }
   10240 
   10241 /* kumeran related (80003, ICH* and PCH*) */
   10242 
   10243 /*
   10244  * wm_kmrn_readreg:
   10245  *
   10246  *	Read a kumeran register
   10247  */
   10248 static int
   10249 wm_kmrn_readreg(struct wm_softc *sc, int reg)
   10250 {
   10251 	int rv;
   10252 
   10253 	if (sc->sc_type == WM_T_80003)
   10254 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10255 	else
   10256 		rv = sc->phy.acquire(sc);
   10257 	if (rv != 0) {
   10258 		aprint_error_dev(sc->sc_dev,
   10259 		    "%s: failed to get semaphore\n", __func__);
   10260 		return 0;
   10261 	}
   10262 
   10263 	rv = wm_kmrn_readreg_locked(sc, reg);
   10264 
   10265 	if (sc->sc_type == WM_T_80003)
   10266 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10267 	else
   10268 		sc->phy.release(sc);
   10269 
   10270 	return rv;
   10271 }
   10272 
   10273 static int
   10274 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg)
   10275 {
   10276 	int rv;
   10277 
   10278 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10279 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10280 	    KUMCTRLSTA_REN);
   10281 	CSR_WRITE_FLUSH(sc);
   10282 	delay(2);
   10283 
   10284 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   10285 
   10286 	return rv;
   10287 }
   10288 
   10289 /*
   10290  * wm_kmrn_writereg:
   10291  *
   10292  *	Write a kumeran register
   10293  */
   10294 static void
   10295 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
   10296 {
   10297 	int rv;
   10298 
   10299 	if (sc->sc_type == WM_T_80003)
   10300 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10301 	else
   10302 		rv = sc->phy.acquire(sc);
   10303 	if (rv != 0) {
   10304 		aprint_error_dev(sc->sc_dev,
   10305 		    "%s: failed to get semaphore\n", __func__);
   10306 		return;
   10307 	}
   10308 
   10309 	wm_kmrn_writereg_locked(sc, reg, val);
   10310 
   10311 	if (sc->sc_type == WM_T_80003)
   10312 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10313 	else
   10314 		sc->phy.release(sc);
   10315 }
   10316 
   10317 static void
   10318 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, int val)
   10319 {
   10320 
   10321 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10322 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10323 	    (val & KUMCTRLSTA_MASK));
   10324 }
   10325 
   10326 /* SGMII related */
   10327 
   10328 /*
   10329  * wm_sgmii_uses_mdio
   10330  *
   10331  * Check whether the transaction is to the internal PHY or the external
   10332  * MDIO interface. Return true if it's MDIO.
   10333  */
   10334 static bool
   10335 wm_sgmii_uses_mdio(struct wm_softc *sc)
   10336 {
   10337 	uint32_t reg;
   10338 	bool ismdio = false;
   10339 
   10340 	switch (sc->sc_type) {
   10341 	case WM_T_82575:
   10342 	case WM_T_82576:
   10343 		reg = CSR_READ(sc, WMREG_MDIC);
   10344 		ismdio = ((reg & MDIC_DEST) != 0);
   10345 		break;
   10346 	case WM_T_82580:
   10347 	case WM_T_I350:
   10348 	case WM_T_I354:
   10349 	case WM_T_I210:
   10350 	case WM_T_I211:
   10351 		reg = CSR_READ(sc, WMREG_MDICNFG);
   10352 		ismdio = ((reg & MDICNFG_DEST) != 0);
   10353 		break;
   10354 	default:
   10355 		break;
   10356 	}
   10357 
   10358 	return ismdio;
   10359 }
   10360 
   10361 /*
   10362  * wm_sgmii_readreg:	[mii interface function]
   10363  *
   10364  *	Read a PHY register on the SGMII
   10365  * This could be handled by the PHY layer if we didn't have to lock the
   10366  * ressource ...
   10367  */
   10368 static int
   10369 wm_sgmii_readreg(device_t self, int phy, int reg)
   10370 {
   10371 	struct wm_softc *sc = device_private(self);
   10372 	uint32_t i2ccmd;
   10373 	int i, rv;
   10374 
   10375 	if (sc->phy.acquire(sc)) {
   10376 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10377 		    __func__);
   10378 		return 0;
   10379 	}
   10380 
   10381 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10382 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10383 	    | I2CCMD_OPCODE_READ;
   10384 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10385 
   10386 	/* Poll the ready bit */
   10387 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10388 		delay(50);
   10389 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10390 		if (i2ccmd & I2CCMD_READY)
   10391 			break;
   10392 	}
   10393 	if ((i2ccmd & I2CCMD_READY) == 0)
   10394 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
   10395 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10396 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   10397 
   10398 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   10399 
   10400 	sc->phy.release(sc);
   10401 	return rv;
   10402 }
   10403 
   10404 /*
   10405  * wm_sgmii_writereg:	[mii interface function]
   10406  *
   10407  *	Write a PHY register on the SGMII.
   10408  * This could be handled by the PHY layer if we didn't have to lock the
   10409  * ressource ...
   10410  */
   10411 static void
   10412 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
   10413 {
   10414 	struct wm_softc *sc = device_private(self);
   10415 	uint32_t i2ccmd;
   10416 	int i;
   10417 	int val_swapped;
   10418 
   10419 	if (sc->phy.acquire(sc) != 0) {
   10420 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10421 		    __func__);
   10422 		return;
   10423 	}
   10424 	/* Swap the data bytes for the I2C interface */
   10425 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   10426 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10427 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10428 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   10429 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10430 
   10431 	/* Poll the ready bit */
   10432 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10433 		delay(50);
   10434 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10435 		if (i2ccmd & I2CCMD_READY)
   10436 			break;
   10437 	}
   10438 	if ((i2ccmd & I2CCMD_READY) == 0)
   10439 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
   10440 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10441 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   10442 
   10443 	sc->phy.release(sc);
   10444 }
   10445 
   10446 /* TBI related */
   10447 
   10448 /*
   10449  * wm_tbi_mediainit:
   10450  *
   10451  *	Initialize media for use on 1000BASE-X devices.
   10452  */
   10453 static void
   10454 wm_tbi_mediainit(struct wm_softc *sc)
   10455 {
   10456 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10457 	const char *sep = "";
   10458 
   10459 	if (sc->sc_type < WM_T_82543)
   10460 		sc->sc_tipg = TIPG_WM_DFLT;
   10461 	else
   10462 		sc->sc_tipg = TIPG_LG_DFLT;
   10463 
   10464 	sc->sc_tbi_serdes_anegticks = 5;
   10465 
   10466 	/* Initialize our media structures */
   10467 	sc->sc_mii.mii_ifp = ifp;
   10468 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10469 
   10470 	if ((sc->sc_type >= WM_T_82575)
   10471 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   10472 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10473 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   10474 	else
   10475 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10476 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   10477 
   10478 	/*
   10479 	 * SWD Pins:
   10480 	 *
   10481 	 *	0 = Link LED (output)
   10482 	 *	1 = Loss Of Signal (input)
   10483 	 */
   10484 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   10485 
   10486 	/* XXX Perhaps this is only for TBI */
   10487 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   10488 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   10489 
   10490 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   10491 		sc->sc_ctrl &= ~CTRL_LRST;
   10492 
   10493 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10494 
   10495 #define	ADD(ss, mm, dd)							\
   10496 do {									\
   10497 	aprint_normal("%s%s", sep, ss);					\
   10498 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   10499 	sep = ", ";							\
   10500 } while (/*CONSTCOND*/0)
   10501 
   10502 	aprint_normal_dev(sc->sc_dev, "");
   10503 
   10504 	if (sc->sc_type == WM_T_I354) {
   10505 		uint32_t status;
   10506 
   10507 		status = CSR_READ(sc, WMREG_STATUS);
   10508 		if (((status & STATUS_2P5_SKU) != 0)
   10509 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   10510 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   10511 		} else
   10512 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   10513 	} else if (sc->sc_type == WM_T_82545) {
   10514 		/* Only 82545 is LX (XXX except SFP) */
   10515 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   10516 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   10517 	} else {
   10518 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   10519 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   10520 	}
   10521 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   10522 	aprint_normal("\n");
   10523 
   10524 #undef ADD
   10525 
   10526 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   10527 }
   10528 
   10529 /*
   10530  * wm_tbi_mediachange:	[ifmedia interface function]
   10531  *
   10532  *	Set hardware to newly-selected media on a 1000BASE-X device.
   10533  */
   10534 static int
   10535 wm_tbi_mediachange(struct ifnet *ifp)
   10536 {
   10537 	struct wm_softc *sc = ifp->if_softc;
   10538 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10539 	uint32_t status;
   10540 	int i;
   10541 
   10542 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   10543 		/* XXX need some work for >= 82571 and < 82575 */
   10544 		if (sc->sc_type < WM_T_82575)
   10545 			return 0;
   10546 	}
   10547 
   10548 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   10549 	    || (sc->sc_type >= WM_T_82575))
   10550 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   10551 
   10552 	sc->sc_ctrl &= ~CTRL_LRST;
   10553 	sc->sc_txcw = TXCW_ANE;
   10554 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10555 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   10556 	else if (ife->ifm_media & IFM_FDX)
   10557 		sc->sc_txcw |= TXCW_FD;
   10558 	else
   10559 		sc->sc_txcw |= TXCW_HD;
   10560 
   10561 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   10562 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   10563 
   10564 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   10565 		    device_xname(sc->sc_dev), sc->sc_txcw));
   10566 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10567 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10568 	CSR_WRITE_FLUSH(sc);
   10569 	delay(1000);
   10570 
   10571 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   10572 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   10573 
   10574 	/*
   10575 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   10576 	 * optics detect a signal, 0 if they don't.
   10577 	 */
   10578 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   10579 		/* Have signal; wait for the link to come up. */
   10580 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   10581 			delay(10000);
   10582 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   10583 				break;
   10584 		}
   10585 
   10586 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   10587 			    device_xname(sc->sc_dev),i));
   10588 
   10589 		status = CSR_READ(sc, WMREG_STATUS);
   10590 		DPRINTF(WM_DEBUG_LINK,
   10591 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   10592 			device_xname(sc->sc_dev),status, STATUS_LU));
   10593 		if (status & STATUS_LU) {
   10594 			/* Link is up. */
   10595 			DPRINTF(WM_DEBUG_LINK,
   10596 			    ("%s: LINK: set media -> link up %s\n",
   10597 			    device_xname(sc->sc_dev),
   10598 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   10599 
   10600 			/*
   10601 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   10602 			 * so we should update sc->sc_ctrl
   10603 			 */
   10604 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   10605 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10606 			sc->sc_fcrtl &= ~FCRTL_XONE;
   10607 			if (status & STATUS_FD)
   10608 				sc->sc_tctl |=
   10609 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10610 			else
   10611 				sc->sc_tctl |=
   10612 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10613 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   10614 				sc->sc_fcrtl |= FCRTL_XONE;
   10615 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10616 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   10617 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   10618 				      sc->sc_fcrtl);
   10619 			sc->sc_tbi_linkup = 1;
   10620 		} else {
   10621 			if (i == WM_LINKUP_TIMEOUT)
   10622 				wm_check_for_link(sc);
   10623 			/* Link is down. */
   10624 			DPRINTF(WM_DEBUG_LINK,
   10625 			    ("%s: LINK: set media -> link down\n",
   10626 			    device_xname(sc->sc_dev)));
   10627 			sc->sc_tbi_linkup = 0;
   10628 		}
   10629 	} else {
   10630 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   10631 		    device_xname(sc->sc_dev)));
   10632 		sc->sc_tbi_linkup = 0;
   10633 	}
   10634 
   10635 	wm_tbi_serdes_set_linkled(sc);
   10636 
   10637 	return 0;
   10638 }
   10639 
   10640 /*
   10641  * wm_tbi_mediastatus:	[ifmedia interface function]
   10642  *
   10643  *	Get the current interface media status on a 1000BASE-X device.
   10644  */
   10645 static void
   10646 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10647 {
   10648 	struct wm_softc *sc = ifp->if_softc;
   10649 	uint32_t ctrl, status;
   10650 
   10651 	ifmr->ifm_status = IFM_AVALID;
   10652 	ifmr->ifm_active = IFM_ETHER;
   10653 
   10654 	status = CSR_READ(sc, WMREG_STATUS);
   10655 	if ((status & STATUS_LU) == 0) {
   10656 		ifmr->ifm_active |= IFM_NONE;
   10657 		return;
   10658 	}
   10659 
   10660 	ifmr->ifm_status |= IFM_ACTIVE;
   10661 	/* Only 82545 is LX */
   10662 	if (sc->sc_type == WM_T_82545)
   10663 		ifmr->ifm_active |= IFM_1000_LX;
   10664 	else
   10665 		ifmr->ifm_active |= IFM_1000_SX;
   10666 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   10667 		ifmr->ifm_active |= IFM_FDX;
   10668 	else
   10669 		ifmr->ifm_active |= IFM_HDX;
   10670 	ctrl = CSR_READ(sc, WMREG_CTRL);
   10671 	if (ctrl & CTRL_RFCE)
   10672 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   10673 	if (ctrl & CTRL_TFCE)
   10674 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   10675 }
   10676 
   10677 /* XXX TBI only */
   10678 static int
   10679 wm_check_for_link(struct wm_softc *sc)
   10680 {
   10681 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10682 	uint32_t rxcw;
   10683 	uint32_t ctrl;
   10684 	uint32_t status;
   10685 	uint32_t sig;
   10686 
   10687 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   10688 		/* XXX need some work for >= 82571 */
   10689 		if (sc->sc_type >= WM_T_82571) {
   10690 			sc->sc_tbi_linkup = 1;
   10691 			return 0;
   10692 		}
   10693 	}
   10694 
   10695 	rxcw = CSR_READ(sc, WMREG_RXCW);
   10696 	ctrl = CSR_READ(sc, WMREG_CTRL);
   10697 	status = CSR_READ(sc, WMREG_STATUS);
   10698 
   10699 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   10700 
   10701 	DPRINTF(WM_DEBUG_LINK,
   10702 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   10703 		device_xname(sc->sc_dev), __func__,
   10704 		((ctrl & CTRL_SWDPIN(1)) == sig),
   10705 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   10706 
   10707 	/*
   10708 	 * SWDPIN   LU RXCW
   10709 	 *      0    0    0
   10710 	 *      0    0    1	(should not happen)
   10711 	 *      0    1    0	(should not happen)
   10712 	 *      0    1    1	(should not happen)
   10713 	 *      1    0    0	Disable autonego and force linkup
   10714 	 *      1    0    1	got /C/ but not linkup yet
   10715 	 *      1    1    0	(linkup)
   10716 	 *      1    1    1	If IFM_AUTO, back to autonego
   10717 	 *
   10718 	 */
   10719 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   10720 	    && ((status & STATUS_LU) == 0)
   10721 	    && ((rxcw & RXCW_C) == 0)) {
   10722 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   10723 			__func__));
   10724 		sc->sc_tbi_linkup = 0;
   10725 		/* Disable auto-negotiation in the TXCW register */
   10726 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   10727 
   10728 		/*
   10729 		 * Force link-up and also force full-duplex.
   10730 		 *
   10731 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   10732 		 * so we should update sc->sc_ctrl
   10733 		 */
   10734 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   10735 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10736 	} else if (((status & STATUS_LU) != 0)
   10737 	    && ((rxcw & RXCW_C) != 0)
   10738 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   10739 		sc->sc_tbi_linkup = 1;
   10740 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   10741 			__func__));
   10742 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10743 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   10744 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   10745 	    && ((rxcw & RXCW_C) != 0)) {
   10746 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   10747 	} else {
   10748 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   10749 			status));
   10750 	}
   10751 
   10752 	return 0;
   10753 }
   10754 
   10755 /*
   10756  * wm_tbi_tick:
   10757  *
   10758  *	Check the link on TBI devices.
   10759  *	This function acts as mii_tick().
   10760  */
   10761 static void
   10762 wm_tbi_tick(struct wm_softc *sc)
   10763 {
   10764 	struct mii_data *mii = &sc->sc_mii;
   10765 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10766 	uint32_t status;
   10767 
   10768 	KASSERT(WM_CORE_LOCKED(sc));
   10769 
   10770 	status = CSR_READ(sc, WMREG_STATUS);
   10771 
   10772 	/* XXX is this needed? */
   10773 	(void)CSR_READ(sc, WMREG_RXCW);
   10774 	(void)CSR_READ(sc, WMREG_CTRL);
   10775 
   10776 	/* set link status */
   10777 	if ((status & STATUS_LU) == 0) {
   10778 		DPRINTF(WM_DEBUG_LINK,
   10779 		    ("%s: LINK: checklink -> down\n",
   10780 			device_xname(sc->sc_dev)));
   10781 		sc->sc_tbi_linkup = 0;
   10782 	} else if (sc->sc_tbi_linkup == 0) {
   10783 		DPRINTF(WM_DEBUG_LINK,
   10784 		    ("%s: LINK: checklink -> up %s\n",
   10785 			device_xname(sc->sc_dev),
   10786 			(status & STATUS_FD) ? "FDX" : "HDX"));
   10787 		sc->sc_tbi_linkup = 1;
   10788 		sc->sc_tbi_serdes_ticks = 0;
   10789 	}
   10790 
   10791 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   10792 		goto setled;
   10793 
   10794 	if ((status & STATUS_LU) == 0) {
   10795 		sc->sc_tbi_linkup = 0;
   10796 		/* If the timer expired, retry autonegotiation */
   10797 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10798 		    && (++sc->sc_tbi_serdes_ticks
   10799 			>= sc->sc_tbi_serdes_anegticks)) {
   10800 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   10801 			sc->sc_tbi_serdes_ticks = 0;
   10802 			/*
   10803 			 * Reset the link, and let autonegotiation do
   10804 			 * its thing
   10805 			 */
   10806 			sc->sc_ctrl |= CTRL_LRST;
   10807 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10808 			CSR_WRITE_FLUSH(sc);
   10809 			delay(1000);
   10810 			sc->sc_ctrl &= ~CTRL_LRST;
   10811 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10812 			CSR_WRITE_FLUSH(sc);
   10813 			delay(1000);
   10814 			CSR_WRITE(sc, WMREG_TXCW,
   10815 			    sc->sc_txcw & ~TXCW_ANE);
   10816 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10817 		}
   10818 	}
   10819 
   10820 setled:
   10821 	wm_tbi_serdes_set_linkled(sc);
   10822 }
   10823 
   10824 /* SERDES related */
   10825 static void
   10826 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   10827 {
   10828 	uint32_t reg;
   10829 
   10830 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   10831 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   10832 		return;
   10833 
   10834 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   10835 	reg |= PCS_CFG_PCS_EN;
   10836 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   10837 
   10838 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   10839 	reg &= ~CTRL_EXT_SWDPIN(3);
   10840 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   10841 	CSR_WRITE_FLUSH(sc);
   10842 }
   10843 
   10844 static int
   10845 wm_serdes_mediachange(struct ifnet *ifp)
   10846 {
   10847 	struct wm_softc *sc = ifp->if_softc;
   10848 	bool pcs_autoneg = true; /* XXX */
   10849 	uint32_t ctrl_ext, pcs_lctl, reg;
   10850 
   10851 	/* XXX Currently, this function is not called on 8257[12] */
   10852 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   10853 	    || (sc->sc_type >= WM_T_82575))
   10854 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   10855 
   10856 	wm_serdes_power_up_link_82575(sc);
   10857 
   10858 	sc->sc_ctrl |= CTRL_SLU;
   10859 
   10860 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   10861 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   10862 
   10863 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10864 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   10865 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   10866 	case CTRL_EXT_LINK_MODE_SGMII:
   10867 		pcs_autoneg = true;
   10868 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   10869 		break;
   10870 	case CTRL_EXT_LINK_MODE_1000KX:
   10871 		pcs_autoneg = false;
   10872 		/* FALLTHROUGH */
   10873 	default:
   10874 		if ((sc->sc_type == WM_T_82575)
   10875 		    || (sc->sc_type == WM_T_82576)) {
   10876 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   10877 				pcs_autoneg = false;
   10878 		}
   10879 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   10880 		    | CTRL_FRCFDX;
   10881 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   10882 	}
   10883 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10884 
   10885 	if (pcs_autoneg) {
   10886 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   10887 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   10888 
   10889 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   10890 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   10891 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   10892 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   10893 	} else
   10894 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   10895 
   10896 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   10897 
   10898 
   10899 	return 0;
   10900 }
   10901 
   10902 static void
   10903 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10904 {
   10905 	struct wm_softc *sc = ifp->if_softc;
   10906 	struct mii_data *mii = &sc->sc_mii;
   10907 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10908 	uint32_t pcs_adv, pcs_lpab, reg;
   10909 
   10910 	ifmr->ifm_status = IFM_AVALID;
   10911 	ifmr->ifm_active = IFM_ETHER;
   10912 
   10913 	/* Check PCS */
   10914 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10915 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   10916 		ifmr->ifm_active |= IFM_NONE;
   10917 		sc->sc_tbi_linkup = 0;
   10918 		goto setled;
   10919 	}
   10920 
   10921 	sc->sc_tbi_linkup = 1;
   10922 	ifmr->ifm_status |= IFM_ACTIVE;
   10923 	if (sc->sc_type == WM_T_I354) {
   10924 		uint32_t status;
   10925 
   10926 		status = CSR_READ(sc, WMREG_STATUS);
   10927 		if (((status & STATUS_2P5_SKU) != 0)
   10928 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   10929 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
   10930 		} else
   10931 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
   10932 	} else {
   10933 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   10934 		case PCS_LSTS_SPEED_10:
   10935 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   10936 			break;
   10937 		case PCS_LSTS_SPEED_100:
   10938 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   10939 			break;
   10940 		case PCS_LSTS_SPEED_1000:
   10941 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   10942 			break;
   10943 		default:
   10944 			device_printf(sc->sc_dev, "Unknown speed\n");
   10945 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   10946 			break;
   10947 		}
   10948 	}
   10949 	if ((reg & PCS_LSTS_FDX) != 0)
   10950 		ifmr->ifm_active |= IFM_FDX;
   10951 	else
   10952 		ifmr->ifm_active |= IFM_HDX;
   10953 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   10954 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   10955 		/* Check flow */
   10956 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10957 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   10958 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   10959 			goto setled;
   10960 		}
   10961 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   10962 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   10963 		DPRINTF(WM_DEBUG_LINK,
   10964 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   10965 		if ((pcs_adv & TXCW_SYM_PAUSE)
   10966 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   10967 			mii->mii_media_active |= IFM_FLOW
   10968 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   10969 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   10970 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   10971 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   10972 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   10973 			mii->mii_media_active |= IFM_FLOW
   10974 			    | IFM_ETH_TXPAUSE;
   10975 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   10976 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   10977 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   10978 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   10979 			mii->mii_media_active |= IFM_FLOW
   10980 			    | IFM_ETH_RXPAUSE;
   10981 		}
   10982 	}
   10983 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10984 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   10985 setled:
   10986 	wm_tbi_serdes_set_linkled(sc);
   10987 }
   10988 
   10989 /*
   10990  * wm_serdes_tick:
   10991  *
   10992  *	Check the link on serdes devices.
   10993  */
   10994 static void
   10995 wm_serdes_tick(struct wm_softc *sc)
   10996 {
   10997 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10998 	struct mii_data *mii = &sc->sc_mii;
   10999 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11000 	uint32_t reg;
   11001 
   11002 	KASSERT(WM_CORE_LOCKED(sc));
   11003 
   11004 	mii->mii_media_status = IFM_AVALID;
   11005 	mii->mii_media_active = IFM_ETHER;
   11006 
   11007 	/* Check PCS */
   11008 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11009 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   11010 		mii->mii_media_status |= IFM_ACTIVE;
   11011 		sc->sc_tbi_linkup = 1;
   11012 		sc->sc_tbi_serdes_ticks = 0;
   11013 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   11014 		if ((reg & PCS_LSTS_FDX) != 0)
   11015 			mii->mii_media_active |= IFM_FDX;
   11016 		else
   11017 			mii->mii_media_active |= IFM_HDX;
   11018 	} else {
   11019 		mii->mii_media_status |= IFM_NONE;
   11020 		sc->sc_tbi_linkup = 0;
   11021 		/* If the timer expired, retry autonegotiation */
   11022 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11023 		    && (++sc->sc_tbi_serdes_ticks
   11024 			>= sc->sc_tbi_serdes_anegticks)) {
   11025 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11026 			sc->sc_tbi_serdes_ticks = 0;
   11027 			/* XXX */
   11028 			wm_serdes_mediachange(ifp);
   11029 		}
   11030 	}
   11031 
   11032 	wm_tbi_serdes_set_linkled(sc);
   11033 }
   11034 
   11035 /* SFP related */
   11036 
   11037 static int
   11038 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   11039 {
   11040 	uint32_t i2ccmd;
   11041 	int i;
   11042 
   11043 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11044 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11045 
   11046 	/* Poll the ready bit */
   11047 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11048 		delay(50);
   11049 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11050 		if (i2ccmd & I2CCMD_READY)
   11051 			break;
   11052 	}
   11053 	if ((i2ccmd & I2CCMD_READY) == 0)
   11054 		return -1;
   11055 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   11056 		return -1;
   11057 
   11058 	*data = i2ccmd & 0x00ff;
   11059 
   11060 	return 0;
   11061 }
   11062 
   11063 static uint32_t
   11064 wm_sfp_get_media_type(struct wm_softc *sc)
   11065 {
   11066 	uint32_t ctrl_ext;
   11067 	uint8_t val = 0;
   11068 	int timeout = 3;
   11069 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   11070 	int rv = -1;
   11071 
   11072 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11073 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   11074 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   11075 	CSR_WRITE_FLUSH(sc);
   11076 
   11077 	/* Read SFP module data */
   11078 	while (timeout) {
   11079 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   11080 		if (rv == 0)
   11081 			break;
   11082 		delay(100*1000); /* XXX too big */
   11083 		timeout--;
   11084 	}
   11085 	if (rv != 0)
   11086 		goto out;
   11087 	switch (val) {
   11088 	case SFF_SFP_ID_SFF:
   11089 		aprint_normal_dev(sc->sc_dev,
   11090 		    "Module/Connector soldered to board\n");
   11091 		break;
   11092 	case SFF_SFP_ID_SFP:
   11093 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   11094 		break;
   11095 	case SFF_SFP_ID_UNKNOWN:
   11096 		goto out;
   11097 	default:
   11098 		break;
   11099 	}
   11100 
   11101 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   11102 	if (rv != 0) {
   11103 		goto out;
   11104 	}
   11105 
   11106 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   11107 		mediatype = WM_MEDIATYPE_SERDES;
   11108 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   11109 		sc->sc_flags |= WM_F_SGMII;
   11110 		mediatype = WM_MEDIATYPE_COPPER;
   11111 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   11112 		sc->sc_flags |= WM_F_SGMII;
   11113 		mediatype = WM_MEDIATYPE_SERDES;
   11114 	}
   11115 
   11116 out:
   11117 	/* Restore I2C interface setting */
   11118 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11119 
   11120 	return mediatype;
   11121 }
   11122 
   11123 /*
   11124  * NVM related.
   11125  * Microwire, SPI (w/wo EERD) and Flash.
   11126  */
   11127 
   11128 /* Both spi and uwire */
   11129 
   11130 /*
   11131  * wm_eeprom_sendbits:
   11132  *
   11133  *	Send a series of bits to the EEPROM.
   11134  */
   11135 static void
   11136 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   11137 {
   11138 	uint32_t reg;
   11139 	int x;
   11140 
   11141 	reg = CSR_READ(sc, WMREG_EECD);
   11142 
   11143 	for (x = nbits; x > 0; x--) {
   11144 		if (bits & (1U << (x - 1)))
   11145 			reg |= EECD_DI;
   11146 		else
   11147 			reg &= ~EECD_DI;
   11148 		CSR_WRITE(sc, WMREG_EECD, reg);
   11149 		CSR_WRITE_FLUSH(sc);
   11150 		delay(2);
   11151 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11152 		CSR_WRITE_FLUSH(sc);
   11153 		delay(2);
   11154 		CSR_WRITE(sc, WMREG_EECD, reg);
   11155 		CSR_WRITE_FLUSH(sc);
   11156 		delay(2);
   11157 	}
   11158 }
   11159 
   11160 /*
   11161  * wm_eeprom_recvbits:
   11162  *
   11163  *	Receive a series of bits from the EEPROM.
   11164  */
   11165 static void
   11166 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   11167 {
   11168 	uint32_t reg, val;
   11169 	int x;
   11170 
   11171 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   11172 
   11173 	val = 0;
   11174 	for (x = nbits; x > 0; x--) {
   11175 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11176 		CSR_WRITE_FLUSH(sc);
   11177 		delay(2);
   11178 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   11179 			val |= (1U << (x - 1));
   11180 		CSR_WRITE(sc, WMREG_EECD, reg);
   11181 		CSR_WRITE_FLUSH(sc);
   11182 		delay(2);
   11183 	}
   11184 	*valp = val;
   11185 }
   11186 
   11187 /* Microwire */
   11188 
   11189 /*
   11190  * wm_nvm_read_uwire:
   11191  *
   11192  *	Read a word from the EEPROM using the MicroWire protocol.
   11193  */
   11194 static int
   11195 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11196 {
   11197 	uint32_t reg, val;
   11198 	int i;
   11199 
   11200 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11201 		device_xname(sc->sc_dev), __func__));
   11202 
   11203 	for (i = 0; i < wordcnt; i++) {
   11204 		/* Clear SK and DI. */
   11205 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   11206 		CSR_WRITE(sc, WMREG_EECD, reg);
   11207 
   11208 		/*
   11209 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   11210 		 * and Xen.
   11211 		 *
   11212 		 * We use this workaround only for 82540 because qemu's
   11213 		 * e1000 act as 82540.
   11214 		 */
   11215 		if (sc->sc_type == WM_T_82540) {
   11216 			reg |= EECD_SK;
   11217 			CSR_WRITE(sc, WMREG_EECD, reg);
   11218 			reg &= ~EECD_SK;
   11219 			CSR_WRITE(sc, WMREG_EECD, reg);
   11220 			CSR_WRITE_FLUSH(sc);
   11221 			delay(2);
   11222 		}
   11223 		/* XXX: end of workaround */
   11224 
   11225 		/* Set CHIP SELECT. */
   11226 		reg |= EECD_CS;
   11227 		CSR_WRITE(sc, WMREG_EECD, reg);
   11228 		CSR_WRITE_FLUSH(sc);
   11229 		delay(2);
   11230 
   11231 		/* Shift in the READ command. */
   11232 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   11233 
   11234 		/* Shift in address. */
   11235 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   11236 
   11237 		/* Shift out the data. */
   11238 		wm_eeprom_recvbits(sc, &val, 16);
   11239 		data[i] = val & 0xffff;
   11240 
   11241 		/* Clear CHIP SELECT. */
   11242 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   11243 		CSR_WRITE(sc, WMREG_EECD, reg);
   11244 		CSR_WRITE_FLUSH(sc);
   11245 		delay(2);
   11246 	}
   11247 
   11248 	return 0;
   11249 }
   11250 
   11251 /* SPI */
   11252 
   11253 /*
   11254  * Set SPI and FLASH related information from the EECD register.
   11255  * For 82541 and 82547, the word size is taken from EEPROM.
   11256  */
   11257 static int
   11258 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   11259 {
   11260 	int size;
   11261 	uint32_t reg;
   11262 	uint16_t data;
   11263 
   11264 	reg = CSR_READ(sc, WMREG_EECD);
   11265 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   11266 
   11267 	/* Read the size of NVM from EECD by default */
   11268 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11269 	switch (sc->sc_type) {
   11270 	case WM_T_82541:
   11271 	case WM_T_82541_2:
   11272 	case WM_T_82547:
   11273 	case WM_T_82547_2:
   11274 		/* Set dummy value to access EEPROM */
   11275 		sc->sc_nvm_wordsize = 64;
   11276 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
   11277 		reg = data;
   11278 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11279 		if (size == 0)
   11280 			size = 6; /* 64 word size */
   11281 		else
   11282 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   11283 		break;
   11284 	case WM_T_80003:
   11285 	case WM_T_82571:
   11286 	case WM_T_82572:
   11287 	case WM_T_82573: /* SPI case */
   11288 	case WM_T_82574: /* SPI case */
   11289 	case WM_T_82583: /* SPI case */
   11290 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11291 		if (size > 14)
   11292 			size = 14;
   11293 		break;
   11294 	case WM_T_82575:
   11295 	case WM_T_82576:
   11296 	case WM_T_82580:
   11297 	case WM_T_I350:
   11298 	case WM_T_I354:
   11299 	case WM_T_I210:
   11300 	case WM_T_I211:
   11301 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11302 		if (size > 15)
   11303 			size = 15;
   11304 		break;
   11305 	default:
   11306 		aprint_error_dev(sc->sc_dev,
   11307 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   11308 		return -1;
   11309 		break;
   11310 	}
   11311 
   11312 	sc->sc_nvm_wordsize = 1 << size;
   11313 
   11314 	return 0;
   11315 }
   11316 
   11317 /*
   11318  * wm_nvm_ready_spi:
   11319  *
   11320  *	Wait for a SPI EEPROM to be ready for commands.
   11321  */
   11322 static int
   11323 wm_nvm_ready_spi(struct wm_softc *sc)
   11324 {
   11325 	uint32_t val;
   11326 	int usec;
   11327 
   11328 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11329 		device_xname(sc->sc_dev), __func__));
   11330 
   11331 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   11332 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   11333 		wm_eeprom_recvbits(sc, &val, 8);
   11334 		if ((val & SPI_SR_RDY) == 0)
   11335 			break;
   11336 	}
   11337 	if (usec >= SPI_MAX_RETRIES) {
   11338 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   11339 		return 1;
   11340 	}
   11341 	return 0;
   11342 }
   11343 
   11344 /*
   11345  * wm_nvm_read_spi:
   11346  *
   11347  *	Read a work from the EEPROM using the SPI protocol.
   11348  */
   11349 static int
   11350 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11351 {
   11352 	uint32_t reg, val;
   11353 	int i;
   11354 	uint8_t opc;
   11355 
   11356 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11357 		device_xname(sc->sc_dev), __func__));
   11358 
   11359 	/* Clear SK and CS. */
   11360 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   11361 	CSR_WRITE(sc, WMREG_EECD, reg);
   11362 	CSR_WRITE_FLUSH(sc);
   11363 	delay(2);
   11364 
   11365 	if (wm_nvm_ready_spi(sc))
   11366 		return 1;
   11367 
   11368 	/* Toggle CS to flush commands. */
   11369 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   11370 	CSR_WRITE_FLUSH(sc);
   11371 	delay(2);
   11372 	CSR_WRITE(sc, WMREG_EECD, reg);
   11373 	CSR_WRITE_FLUSH(sc);
   11374 	delay(2);
   11375 
   11376 	opc = SPI_OPC_READ;
   11377 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   11378 		opc |= SPI_OPC_A8;
   11379 
   11380 	wm_eeprom_sendbits(sc, opc, 8);
   11381 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   11382 
   11383 	for (i = 0; i < wordcnt; i++) {
   11384 		wm_eeprom_recvbits(sc, &val, 16);
   11385 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   11386 	}
   11387 
   11388 	/* Raise CS and clear SK. */
   11389 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   11390 	CSR_WRITE(sc, WMREG_EECD, reg);
   11391 	CSR_WRITE_FLUSH(sc);
   11392 	delay(2);
   11393 
   11394 	return 0;
   11395 }
   11396 
   11397 /* Using with EERD */
   11398 
   11399 static int
   11400 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   11401 {
   11402 	uint32_t attempts = 100000;
   11403 	uint32_t i, reg = 0;
   11404 	int32_t done = -1;
   11405 
   11406 	for (i = 0; i < attempts; i++) {
   11407 		reg = CSR_READ(sc, rw);
   11408 
   11409 		if (reg & EERD_DONE) {
   11410 			done = 0;
   11411 			break;
   11412 		}
   11413 		delay(5);
   11414 	}
   11415 
   11416 	return done;
   11417 }
   11418 
   11419 static int
   11420 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   11421     uint16_t *data)
   11422 {
   11423 	int i, eerd = 0;
   11424 	int error = 0;
   11425 
   11426 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11427 		device_xname(sc->sc_dev), __func__));
   11428 
   11429 	for (i = 0; i < wordcnt; i++) {
   11430 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   11431 
   11432 		CSR_WRITE(sc, WMREG_EERD, eerd);
   11433 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   11434 		if (error != 0)
   11435 			break;
   11436 
   11437 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   11438 	}
   11439 
   11440 	return error;
   11441 }
   11442 
   11443 /* Flash */
   11444 
   11445 static int
   11446 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   11447 {
   11448 	uint32_t eecd;
   11449 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   11450 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   11451 	uint8_t sig_byte = 0;
   11452 
   11453 	switch (sc->sc_type) {
   11454 	case WM_T_PCH_SPT:
   11455 		/*
   11456 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
   11457 		 * sector valid bits from the NVM.
   11458 		 */
   11459 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
   11460 		if ((*bank == 0) || (*bank == 1)) {
   11461 			aprint_error_dev(sc->sc_dev,
   11462 			    "%s: no valid NVM bank present (%u)\n", __func__,
   11463 				*bank);
   11464 			return -1;
   11465 		} else {
   11466 			*bank = *bank - 2;
   11467 			return 0;
   11468 		}
   11469 	case WM_T_ICH8:
   11470 	case WM_T_ICH9:
   11471 		eecd = CSR_READ(sc, WMREG_EECD);
   11472 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   11473 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   11474 			return 0;
   11475 		}
   11476 		/* FALLTHROUGH */
   11477 	default:
   11478 		/* Default to 0 */
   11479 		*bank = 0;
   11480 
   11481 		/* Check bank 0 */
   11482 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   11483 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11484 			*bank = 0;
   11485 			return 0;
   11486 		}
   11487 
   11488 		/* Check bank 1 */
   11489 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   11490 		    &sig_byte);
   11491 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11492 			*bank = 1;
   11493 			return 0;
   11494 		}
   11495 	}
   11496 
   11497 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   11498 		device_xname(sc->sc_dev)));
   11499 	return -1;
   11500 }
   11501 
   11502 /******************************************************************************
   11503  * This function does initial flash setup so that a new read/write/erase cycle
   11504  * can be started.
   11505  *
   11506  * sc - The pointer to the hw structure
   11507  ****************************************************************************/
   11508 static int32_t
   11509 wm_ich8_cycle_init(struct wm_softc *sc)
   11510 {
   11511 	uint16_t hsfsts;
   11512 	int32_t error = 1;
   11513 	int32_t i     = 0;
   11514 
   11515 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11516 
   11517 	/* May be check the Flash Des Valid bit in Hw status */
   11518 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   11519 		return error;
   11520 	}
   11521 
   11522 	/* Clear FCERR in Hw status by writing 1 */
   11523 	/* Clear DAEL in Hw status by writing a 1 */
   11524 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   11525 
   11526 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11527 
   11528 	/*
   11529 	 * Either we should have a hardware SPI cycle in progress bit to check
   11530 	 * against, in order to start a new cycle or FDONE bit should be
   11531 	 * changed in the hardware so that it is 1 after harware reset, which
   11532 	 * can then be used as an indication whether a cycle is in progress or
   11533 	 * has been completed .. we should also have some software semaphore
   11534 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   11535 	 * threads access to those bits can be sequentiallized or a way so that
   11536 	 * 2 threads dont start the cycle at the same time
   11537 	 */
   11538 
   11539 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11540 		/*
   11541 		 * There is no cycle running at present, so we can start a
   11542 		 * cycle
   11543 		 */
   11544 
   11545 		/* Begin by setting Flash Cycle Done. */
   11546 		hsfsts |= HSFSTS_DONE;
   11547 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11548 		error = 0;
   11549 	} else {
   11550 		/*
   11551 		 * otherwise poll for sometime so the current cycle has a
   11552 		 * chance to end before giving up.
   11553 		 */
   11554 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   11555 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11556 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11557 				error = 0;
   11558 				break;
   11559 			}
   11560 			delay(1);
   11561 		}
   11562 		if (error == 0) {
   11563 			/*
   11564 			 * Successful in waiting for previous cycle to timeout,
   11565 			 * now set the Flash Cycle Done.
   11566 			 */
   11567 			hsfsts |= HSFSTS_DONE;
   11568 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11569 		}
   11570 	}
   11571 	return error;
   11572 }
   11573 
   11574 /******************************************************************************
   11575  * This function starts a flash cycle and waits for its completion
   11576  *
   11577  * sc - The pointer to the hw structure
   11578  ****************************************************************************/
   11579 static int32_t
   11580 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   11581 {
   11582 	uint16_t hsflctl;
   11583 	uint16_t hsfsts;
   11584 	int32_t error = 1;
   11585 	uint32_t i = 0;
   11586 
   11587 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   11588 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11589 	hsflctl |= HSFCTL_GO;
   11590 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11591 
   11592 	/* Wait till FDONE bit is set to 1 */
   11593 	do {
   11594 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11595 		if (hsfsts & HSFSTS_DONE)
   11596 			break;
   11597 		delay(1);
   11598 		i++;
   11599 	} while (i < timeout);
   11600 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   11601 		error = 0;
   11602 
   11603 	return error;
   11604 }
   11605 
   11606 /******************************************************************************
   11607  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   11608  *
   11609  * sc - The pointer to the hw structure
   11610  * index - The index of the byte or word to read.
   11611  * size - Size of data to read, 1=byte 2=word, 4=dword
   11612  * data - Pointer to the word to store the value read.
   11613  *****************************************************************************/
   11614 static int32_t
   11615 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   11616     uint32_t size, uint32_t *data)
   11617 {
   11618 	uint16_t hsfsts;
   11619 	uint16_t hsflctl;
   11620 	uint32_t flash_linear_address;
   11621 	uint32_t flash_data = 0;
   11622 	int32_t error = 1;
   11623 	int32_t count = 0;
   11624 
   11625 	if (size < 1  || size > 4 || data == 0x0 ||
   11626 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   11627 		return error;
   11628 
   11629 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   11630 	    sc->sc_ich8_flash_base;
   11631 
   11632 	do {
   11633 		delay(1);
   11634 		/* Steps */
   11635 		error = wm_ich8_cycle_init(sc);
   11636 		if (error)
   11637 			break;
   11638 
   11639 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11640 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   11641 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   11642 		    & HSFCTL_BCOUNT_MASK;
   11643 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   11644 		if (sc->sc_type == WM_T_PCH_SPT) {
   11645 			/*
   11646 			 * In SPT, This register is in Lan memory space, not
   11647 			 * flash. Therefore, only 32 bit access is supported.
   11648 			 */
   11649 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
   11650 			    (uint32_t)hsflctl);
   11651 		} else
   11652 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11653 
   11654 		/*
   11655 		 * Write the last 24 bits of index into Flash Linear address
   11656 		 * field in Flash Address
   11657 		 */
   11658 		/* TODO: TBD maybe check the index against the size of flash */
   11659 
   11660 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   11661 
   11662 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   11663 
   11664 		/*
   11665 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   11666 		 * the whole sequence a few more times, else read in (shift in)
   11667 		 * the Flash Data0, the order is least significant byte first
   11668 		 * msb to lsb
   11669 		 */
   11670 		if (error == 0) {
   11671 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   11672 			if (size == 1)
   11673 				*data = (uint8_t)(flash_data & 0x000000FF);
   11674 			else if (size == 2)
   11675 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   11676 			else if (size == 4)
   11677 				*data = (uint32_t)flash_data;
   11678 			break;
   11679 		} else {
   11680 			/*
   11681 			 * If we've gotten here, then things are probably
   11682 			 * completely hosed, but if the error condition is
   11683 			 * detected, it won't hurt to give it another try...
   11684 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   11685 			 */
   11686 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11687 			if (hsfsts & HSFSTS_ERR) {
   11688 				/* Repeat for some time before giving up. */
   11689 				continue;
   11690 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   11691 				break;
   11692 		}
   11693 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   11694 
   11695 	return error;
   11696 }
   11697 
   11698 /******************************************************************************
   11699  * Reads a single byte from the NVM using the ICH8 flash access registers.
   11700  *
   11701  * sc - pointer to wm_hw structure
   11702  * index - The index of the byte to read.
   11703  * data - Pointer to a byte to store the value read.
   11704  *****************************************************************************/
   11705 static int32_t
   11706 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   11707 {
   11708 	int32_t status;
   11709 	uint32_t word = 0;
   11710 
   11711 	status = wm_read_ich8_data(sc, index, 1, &word);
   11712 	if (status == 0)
   11713 		*data = (uint8_t)word;
   11714 	else
   11715 		*data = 0;
   11716 
   11717 	return status;
   11718 }
   11719 
   11720 /******************************************************************************
   11721  * Reads a word from the NVM using the ICH8 flash access registers.
   11722  *
   11723  * sc - pointer to wm_hw structure
   11724  * index - The starting byte index of the word to read.
   11725  * data - Pointer to a word to store the value read.
   11726  *****************************************************************************/
   11727 static int32_t
   11728 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   11729 {
   11730 	int32_t status;
   11731 	uint32_t word = 0;
   11732 
   11733 	status = wm_read_ich8_data(sc, index, 2, &word);
   11734 	if (status == 0)
   11735 		*data = (uint16_t)word;
   11736 	else
   11737 		*data = 0;
   11738 
   11739 	return status;
   11740 }
   11741 
   11742 /******************************************************************************
   11743  * Reads a dword from the NVM using the ICH8 flash access registers.
   11744  *
   11745  * sc - pointer to wm_hw structure
   11746  * index - The starting byte index of the word to read.
   11747  * data - Pointer to a word to store the value read.
   11748  *****************************************************************************/
   11749 static int32_t
   11750 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   11751 {
   11752 	int32_t status;
   11753 
   11754 	status = wm_read_ich8_data(sc, index, 4, data);
   11755 	return status;
   11756 }
   11757 
   11758 /******************************************************************************
   11759  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   11760  * register.
   11761  *
   11762  * sc - Struct containing variables accessed by shared code
   11763  * offset - offset of word in the EEPROM to read
   11764  * data - word read from the EEPROM
   11765  * words - number of words to read
   11766  *****************************************************************************/
   11767 static int
   11768 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11769 {
   11770 	int32_t  error = 0;
   11771 	uint32_t flash_bank = 0;
   11772 	uint32_t act_offset = 0;
   11773 	uint32_t bank_offset = 0;
   11774 	uint16_t word = 0;
   11775 	uint16_t i = 0;
   11776 
   11777 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11778 		device_xname(sc->sc_dev), __func__));
   11779 
   11780 	/*
   11781 	 * We need to know which is the valid flash bank.  In the event
   11782 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   11783 	 * managing flash_bank.  So it cannot be trusted and needs
   11784 	 * to be updated with each read.
   11785 	 */
   11786 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   11787 	if (error) {
   11788 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   11789 			device_xname(sc->sc_dev)));
   11790 		flash_bank = 0;
   11791 	}
   11792 
   11793 	/*
   11794 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   11795 	 * size
   11796 	 */
   11797 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   11798 
   11799 	error = wm_get_swfwhw_semaphore(sc);
   11800 	if (error) {
   11801 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11802 		    __func__);
   11803 		return error;
   11804 	}
   11805 
   11806 	for (i = 0; i < words; i++) {
   11807 		/* The NVM part needs a byte offset, hence * 2 */
   11808 		act_offset = bank_offset + ((offset + i) * 2);
   11809 		error = wm_read_ich8_word(sc, act_offset, &word);
   11810 		if (error) {
   11811 			aprint_error_dev(sc->sc_dev,
   11812 			    "%s: failed to read NVM\n", __func__);
   11813 			break;
   11814 		}
   11815 		data[i] = word;
   11816 	}
   11817 
   11818 	wm_put_swfwhw_semaphore(sc);
   11819 	return error;
   11820 }
   11821 
   11822 /******************************************************************************
   11823  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   11824  * register.
   11825  *
   11826  * sc - Struct containing variables accessed by shared code
   11827  * offset - offset of word in the EEPROM to read
   11828  * data - word read from the EEPROM
   11829  * words - number of words to read
   11830  *****************************************************************************/
   11831 static int
   11832 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11833 {
   11834 	int32_t  error = 0;
   11835 	uint32_t flash_bank = 0;
   11836 	uint32_t act_offset = 0;
   11837 	uint32_t bank_offset = 0;
   11838 	uint32_t dword = 0;
   11839 	uint16_t i = 0;
   11840 
   11841 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11842 		device_xname(sc->sc_dev), __func__));
   11843 
   11844 	/*
   11845 	 * We need to know which is the valid flash bank.  In the event
   11846 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   11847 	 * managing flash_bank.  So it cannot be trusted and needs
   11848 	 * to be updated with each read.
   11849 	 */
   11850 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   11851 	if (error) {
   11852 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   11853 			device_xname(sc->sc_dev)));
   11854 		flash_bank = 0;
   11855 	}
   11856 
   11857 	/*
   11858 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   11859 	 * size
   11860 	 */
   11861 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   11862 
   11863 	error = wm_get_swfwhw_semaphore(sc);
   11864 	if (error) {
   11865 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11866 		    __func__);
   11867 		return error;
   11868 	}
   11869 
   11870 	for (i = 0; i < words; i++) {
   11871 		/* The NVM part needs a byte offset, hence * 2 */
   11872 		act_offset = bank_offset + ((offset + i) * 2);
   11873 		/* but we must read dword aligned, so mask ... */
   11874 		error = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   11875 		if (error) {
   11876 			aprint_error_dev(sc->sc_dev,
   11877 			    "%s: failed to read NVM\n", __func__);
   11878 			break;
   11879 		}
   11880 		/* ... and pick out low or high word */
   11881 		if ((act_offset & 0x2) == 0)
   11882 			data[i] = (uint16_t)(dword & 0xFFFF);
   11883 		else
   11884 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   11885 	}
   11886 
   11887 	wm_put_swfwhw_semaphore(sc);
   11888 	return error;
   11889 }
   11890 
   11891 /* iNVM */
   11892 
   11893 static int
   11894 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   11895 {
   11896 	int32_t  rv = 0;
   11897 	uint32_t invm_dword;
   11898 	uint16_t i;
   11899 	uint8_t record_type, word_address;
   11900 
   11901 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11902 		device_xname(sc->sc_dev), __func__));
   11903 
   11904 	for (i = 0; i < INVM_SIZE; i++) {
   11905 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   11906 		/* Get record type */
   11907 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   11908 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   11909 			break;
   11910 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   11911 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   11912 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   11913 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   11914 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   11915 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   11916 			if (word_address == address) {
   11917 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   11918 				rv = 0;
   11919 				break;
   11920 			}
   11921 		}
   11922 	}
   11923 
   11924 	return rv;
   11925 }
   11926 
   11927 static int
   11928 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11929 {
   11930 	int rv = 0;
   11931 	int i;
   11932 
   11933 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11934 		device_xname(sc->sc_dev), __func__));
   11935 
   11936 	for (i = 0; i < words; i++) {
   11937 		switch (offset + i) {
   11938 		case NVM_OFF_MACADDR:
   11939 		case NVM_OFF_MACADDR1:
   11940 		case NVM_OFF_MACADDR2:
   11941 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   11942 			if (rv != 0) {
   11943 				data[i] = 0xffff;
   11944 				rv = -1;
   11945 			}
   11946 			break;
   11947 		case NVM_OFF_CFG2:
   11948 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11949 			if (rv != 0) {
   11950 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   11951 				rv = 0;
   11952 			}
   11953 			break;
   11954 		case NVM_OFF_CFG4:
   11955 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11956 			if (rv != 0) {
   11957 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   11958 				rv = 0;
   11959 			}
   11960 			break;
   11961 		case NVM_OFF_LED_1_CFG:
   11962 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11963 			if (rv != 0) {
   11964 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   11965 				rv = 0;
   11966 			}
   11967 			break;
   11968 		case NVM_OFF_LED_0_2_CFG:
   11969 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11970 			if (rv != 0) {
   11971 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   11972 				rv = 0;
   11973 			}
   11974 			break;
   11975 		case NVM_OFF_ID_LED_SETTINGS:
   11976 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11977 			if (rv != 0) {
   11978 				*data = ID_LED_RESERVED_FFFF;
   11979 				rv = 0;
   11980 			}
   11981 			break;
   11982 		default:
   11983 			DPRINTF(WM_DEBUG_NVM,
   11984 			    ("NVM word 0x%02x is not mapped.\n", offset));
   11985 			*data = NVM_RESERVED_WORD;
   11986 			break;
   11987 		}
   11988 	}
   11989 
   11990 	return rv;
   11991 }
   11992 
   11993 /* Lock, detecting NVM type, validate checksum, version and read */
   11994 
   11995 /*
   11996  * wm_nvm_acquire:
   11997  *
   11998  *	Perform the EEPROM handshake required on some chips.
   11999  */
   12000 static int
   12001 wm_nvm_acquire(struct wm_softc *sc)
   12002 {
   12003 	uint32_t reg;
   12004 	int x;
   12005 	int ret = 0;
   12006 
   12007 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12008 		device_xname(sc->sc_dev), __func__));
   12009 
   12010 	if (sc->sc_type >= WM_T_ICH8) {
   12011 		ret = wm_get_nvm_ich8lan(sc);
   12012 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   12013 		ret = wm_get_swfwhw_semaphore(sc);
   12014 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
   12015 		/* This will also do wm_get_swsm_semaphore() if needed */
   12016 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
   12017 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
   12018 		ret = wm_get_swsm_semaphore(sc);
   12019 	}
   12020 
   12021 	if (ret) {
   12022 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   12023 			__func__);
   12024 		return 1;
   12025 	}
   12026 
   12027 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   12028 		reg = CSR_READ(sc, WMREG_EECD);
   12029 
   12030 		/* Request EEPROM access. */
   12031 		reg |= EECD_EE_REQ;
   12032 		CSR_WRITE(sc, WMREG_EECD, reg);
   12033 
   12034 		/* ..and wait for it to be granted. */
   12035 		for (x = 0; x < 1000; x++) {
   12036 			reg = CSR_READ(sc, WMREG_EECD);
   12037 			if (reg & EECD_EE_GNT)
   12038 				break;
   12039 			delay(5);
   12040 		}
   12041 		if ((reg & EECD_EE_GNT) == 0) {
   12042 			aprint_error_dev(sc->sc_dev,
   12043 			    "could not acquire EEPROM GNT\n");
   12044 			reg &= ~EECD_EE_REQ;
   12045 			CSR_WRITE(sc, WMREG_EECD, reg);
   12046 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   12047 				wm_put_swfwhw_semaphore(sc);
   12048 			if (sc->sc_flags & WM_F_LOCK_SWFW)
   12049 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   12050 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
   12051 				wm_put_swsm_semaphore(sc);
   12052 			return 1;
   12053 		}
   12054 	}
   12055 
   12056 	return 0;
   12057 }
   12058 
   12059 /*
   12060  * wm_nvm_release:
   12061  *
   12062  *	Release the EEPROM mutex.
   12063  */
   12064 static void
   12065 wm_nvm_release(struct wm_softc *sc)
   12066 {
   12067 	uint32_t reg;
   12068 
   12069 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12070 		device_xname(sc->sc_dev), __func__));
   12071 
   12072 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   12073 		reg = CSR_READ(sc, WMREG_EECD);
   12074 		reg &= ~EECD_EE_REQ;
   12075 		CSR_WRITE(sc, WMREG_EECD, reg);
   12076 	}
   12077 
   12078 	if (sc->sc_type >= WM_T_ICH8) {
   12079 		wm_put_nvm_ich8lan(sc);
   12080 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   12081 		wm_put_swfwhw_semaphore(sc);
   12082 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   12083 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   12084 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
   12085 		wm_put_swsm_semaphore(sc);
   12086 }
   12087 
   12088 static int
   12089 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   12090 {
   12091 	uint32_t eecd = 0;
   12092 
   12093 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   12094 	    || sc->sc_type == WM_T_82583) {
   12095 		eecd = CSR_READ(sc, WMREG_EECD);
   12096 
   12097 		/* Isolate bits 15 & 16 */
   12098 		eecd = ((eecd >> 15) & 0x03);
   12099 
   12100 		/* If both bits are set, device is Flash type */
   12101 		if (eecd == 0x03)
   12102 			return 0;
   12103 	}
   12104 	return 1;
   12105 }
   12106 
   12107 static int
   12108 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   12109 {
   12110 	uint32_t eec;
   12111 
   12112 	eec = CSR_READ(sc, WMREG_EEC);
   12113 	if ((eec & EEC_FLASH_DETECTED) != 0)
   12114 		return 1;
   12115 
   12116 	return 0;
   12117 }
   12118 
   12119 /*
   12120  * wm_nvm_validate_checksum
   12121  *
   12122  * The checksum is defined as the sum of the first 64 (16 bit) words.
   12123  */
   12124 static int
   12125 wm_nvm_validate_checksum(struct wm_softc *sc)
   12126 {
   12127 	uint16_t checksum;
   12128 	uint16_t eeprom_data;
   12129 #ifdef WM_DEBUG
   12130 	uint16_t csum_wordaddr, valid_checksum;
   12131 #endif
   12132 	int i;
   12133 
   12134 	checksum = 0;
   12135 
   12136 	/* Don't check for I211 */
   12137 	if (sc->sc_type == WM_T_I211)
   12138 		return 0;
   12139 
   12140 #ifdef WM_DEBUG
   12141 	if (sc->sc_type == WM_T_PCH_LPT) {
   12142 		csum_wordaddr = NVM_OFF_COMPAT;
   12143 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   12144 	} else {
   12145 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   12146 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   12147 	}
   12148 
   12149 	/* Dump EEPROM image for debug */
   12150 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12151 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12152 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   12153 		/* XXX PCH_SPT? */
   12154 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   12155 		if ((eeprom_data & valid_checksum) == 0) {
   12156 			DPRINTF(WM_DEBUG_NVM,
   12157 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   12158 				device_xname(sc->sc_dev), eeprom_data,
   12159 				    valid_checksum));
   12160 		}
   12161 	}
   12162 
   12163 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   12164 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   12165 		for (i = 0; i < NVM_SIZE; i++) {
   12166 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12167 				printf("XXXX ");
   12168 			else
   12169 				printf("%04hx ", eeprom_data);
   12170 			if (i % 8 == 7)
   12171 				printf("\n");
   12172 		}
   12173 	}
   12174 
   12175 #endif /* WM_DEBUG */
   12176 
   12177 	for (i = 0; i < NVM_SIZE; i++) {
   12178 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12179 			return 1;
   12180 		checksum += eeprom_data;
   12181 	}
   12182 
   12183 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   12184 #ifdef WM_DEBUG
   12185 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   12186 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   12187 #endif
   12188 	}
   12189 
   12190 	return 0;
   12191 }
   12192 
   12193 static void
   12194 wm_nvm_version_invm(struct wm_softc *sc)
   12195 {
   12196 	uint32_t dword;
   12197 
   12198 	/*
   12199 	 * Linux's code to decode version is very strange, so we don't
   12200 	 * obey that algorithm and just use word 61 as the document.
   12201 	 * Perhaps it's not perfect though...
   12202 	 *
   12203 	 * Example:
   12204 	 *
   12205 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   12206 	 */
   12207 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   12208 	dword = __SHIFTOUT(dword, INVM_VER_1);
   12209 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   12210 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   12211 }
   12212 
   12213 static void
   12214 wm_nvm_version(struct wm_softc *sc)
   12215 {
   12216 	uint16_t major, minor, build, patch;
   12217 	uint16_t uid0, uid1;
   12218 	uint16_t nvm_data;
   12219 	uint16_t off;
   12220 	bool check_version = false;
   12221 	bool check_optionrom = false;
   12222 	bool have_build = false;
   12223 	bool have_uid = true;
   12224 
   12225 	/*
   12226 	 * Version format:
   12227 	 *
   12228 	 * XYYZ
   12229 	 * X0YZ
   12230 	 * X0YY
   12231 	 *
   12232 	 * Example:
   12233 	 *
   12234 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   12235 	 *	82571	0x50a6	5.10.6?
   12236 	 *	82572	0x506a	5.6.10?
   12237 	 *	82572EI	0x5069	5.6.9?
   12238 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   12239 	 *		0x2013	2.1.3?
   12240 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   12241 	 */
   12242 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
   12243 	switch (sc->sc_type) {
   12244 	case WM_T_82571:
   12245 	case WM_T_82572:
   12246 	case WM_T_82574:
   12247 	case WM_T_82583:
   12248 		check_version = true;
   12249 		check_optionrom = true;
   12250 		have_build = true;
   12251 		break;
   12252 	case WM_T_82575:
   12253 	case WM_T_82576:
   12254 	case WM_T_82580:
   12255 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   12256 			check_version = true;
   12257 		break;
   12258 	case WM_T_I211:
   12259 		wm_nvm_version_invm(sc);
   12260 		have_uid = false;
   12261 		goto printver;
   12262 	case WM_T_I210:
   12263 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   12264 			wm_nvm_version_invm(sc);
   12265 			have_uid = false;
   12266 			goto printver;
   12267 		}
   12268 		/* FALLTHROUGH */
   12269 	case WM_T_I350:
   12270 	case WM_T_I354:
   12271 		check_version = true;
   12272 		check_optionrom = true;
   12273 		break;
   12274 	default:
   12275 		return;
   12276 	}
   12277 	if (check_version) {
   12278 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
   12279 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   12280 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   12281 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   12282 			build = nvm_data & NVM_BUILD_MASK;
   12283 			have_build = true;
   12284 		} else
   12285 			minor = nvm_data & 0x00ff;
   12286 
   12287 		/* Decimal */
   12288 		minor = (minor / 16) * 10 + (minor % 16);
   12289 		sc->sc_nvm_ver_major = major;
   12290 		sc->sc_nvm_ver_minor = minor;
   12291 
   12292 printver:
   12293 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   12294 		    sc->sc_nvm_ver_minor);
   12295 		if (have_build) {
   12296 			sc->sc_nvm_ver_build = build;
   12297 			aprint_verbose(".%d", build);
   12298 		}
   12299 	}
   12300 	if (check_optionrom) {
   12301 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
   12302 		/* Option ROM Version */
   12303 		if ((off != 0x0000) && (off != 0xffff)) {
   12304 			off += NVM_COMBO_VER_OFF;
   12305 			wm_nvm_read(sc, off + 1, 1, &uid1);
   12306 			wm_nvm_read(sc, off, 1, &uid0);
   12307 			if ((uid0 != 0) && (uid0 != 0xffff)
   12308 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   12309 				/* 16bits */
   12310 				major = uid0 >> 8;
   12311 				build = (uid0 << 8) | (uid1 >> 8);
   12312 				patch = uid1 & 0x00ff;
   12313 				aprint_verbose(", option ROM Version %d.%d.%d",
   12314 				    major, build, patch);
   12315 			}
   12316 		}
   12317 	}
   12318 
   12319 	if (have_uid) {
   12320 		wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
   12321 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   12322 	}
   12323 }
   12324 
   12325 /*
   12326  * wm_nvm_read:
   12327  *
   12328  *	Read data from the serial EEPROM.
   12329  */
   12330 static int
   12331 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12332 {
   12333 	int rv;
   12334 
   12335 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12336 		device_xname(sc->sc_dev), __func__));
   12337 
   12338 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   12339 		return 1;
   12340 
   12341 	if (wm_nvm_acquire(sc))
   12342 		return 1;
   12343 
   12344 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12345 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12346 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   12347 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
   12348 	else if (sc->sc_type == WM_T_PCH_SPT)
   12349 		rv = wm_nvm_read_spt(sc, word, wordcnt, data);
   12350 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
   12351 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
   12352 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
   12353 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
   12354 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
   12355 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
   12356 	else
   12357 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
   12358 
   12359 	wm_nvm_release(sc);
   12360 	return rv;
   12361 }
   12362 
   12363 /*
   12364  * Hardware semaphores.
   12365  * Very complexed...
   12366  */
   12367 
   12368 static int
   12369 wm_get_null(struct wm_softc *sc)
   12370 {
   12371 
   12372 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12373 		device_xname(sc->sc_dev), __func__));
   12374 	return 0;
   12375 }
   12376 
   12377 static void
   12378 wm_put_null(struct wm_softc *sc)
   12379 {
   12380 
   12381 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12382 		device_xname(sc->sc_dev), __func__));
   12383 	return;
   12384 }
   12385 
   12386 /*
   12387  * Get hardware semaphore.
   12388  * Same as e1000_get_hw_semaphore_generic()
   12389  */
   12390 static int
   12391 wm_get_swsm_semaphore(struct wm_softc *sc)
   12392 {
   12393 	int32_t timeout;
   12394 	uint32_t swsm;
   12395 
   12396 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12397 		device_xname(sc->sc_dev), __func__));
   12398 	KASSERT(sc->sc_nvm_wordsize > 0);
   12399 
   12400 	/* Get the SW semaphore. */
   12401 	timeout = sc->sc_nvm_wordsize + 1;
   12402 	while (timeout) {
   12403 		swsm = CSR_READ(sc, WMREG_SWSM);
   12404 
   12405 		if ((swsm & SWSM_SMBI) == 0)
   12406 			break;
   12407 
   12408 		delay(50);
   12409 		timeout--;
   12410 	}
   12411 
   12412 	if (timeout == 0) {
   12413 		aprint_error_dev(sc->sc_dev,
   12414 		    "could not acquire SWSM SMBI\n");
   12415 		return 1;
   12416 	}
   12417 
   12418 	/* Get the FW semaphore. */
   12419 	timeout = sc->sc_nvm_wordsize + 1;
   12420 	while (timeout) {
   12421 		swsm = CSR_READ(sc, WMREG_SWSM);
   12422 		swsm |= SWSM_SWESMBI;
   12423 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   12424 		/* If we managed to set the bit we got the semaphore. */
   12425 		swsm = CSR_READ(sc, WMREG_SWSM);
   12426 		if (swsm & SWSM_SWESMBI)
   12427 			break;
   12428 
   12429 		delay(50);
   12430 		timeout--;
   12431 	}
   12432 
   12433 	if (timeout == 0) {
   12434 		aprint_error_dev(sc->sc_dev,
   12435 		    "could not acquire SWSM SWESMBI\n");
   12436 		/* Release semaphores */
   12437 		wm_put_swsm_semaphore(sc);
   12438 		return 1;
   12439 	}
   12440 	return 0;
   12441 }
   12442 
   12443 /*
   12444  * Put hardware semaphore.
   12445  * Same as e1000_put_hw_semaphore_generic()
   12446  */
   12447 static void
   12448 wm_put_swsm_semaphore(struct wm_softc *sc)
   12449 {
   12450 	uint32_t swsm;
   12451 
   12452 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12453 		device_xname(sc->sc_dev), __func__));
   12454 
   12455 	swsm = CSR_READ(sc, WMREG_SWSM);
   12456 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   12457 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   12458 }
   12459 
   12460 /*
   12461  * Get SW/FW semaphore.
   12462  * Same as e1000_acquire_swfw_sync_82575().
   12463  */
   12464 static int
   12465 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12466 {
   12467 	uint32_t swfw_sync;
   12468 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   12469 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   12470 	int timeout = 200;
   12471 
   12472 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12473 		device_xname(sc->sc_dev), __func__));
   12474 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   12475 
   12476 	for (timeout = 0; timeout < 200; timeout++) {
   12477 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
   12478 			if (wm_get_swsm_semaphore(sc)) {
   12479 				aprint_error_dev(sc->sc_dev,
   12480 				    "%s: failed to get semaphore\n",
   12481 				    __func__);
   12482 				return 1;
   12483 			}
   12484 		}
   12485 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12486 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   12487 			swfw_sync |= swmask;
   12488 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12489 			if (sc->sc_flags & WM_F_LOCK_SWSM)
   12490 				wm_put_swsm_semaphore(sc);
   12491 			return 0;
   12492 		}
   12493 		if (sc->sc_flags & WM_F_LOCK_SWSM)
   12494 			wm_put_swsm_semaphore(sc);
   12495 		delay(5000);
   12496 	}
   12497 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   12498 	    device_xname(sc->sc_dev), mask, swfw_sync);
   12499 	return 1;
   12500 }
   12501 
   12502 static void
   12503 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12504 {
   12505 	uint32_t swfw_sync;
   12506 
   12507 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12508 		device_xname(sc->sc_dev), __func__));
   12509 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   12510 
   12511 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   12512 		while (wm_get_swsm_semaphore(sc) != 0)
   12513 			continue;
   12514 	}
   12515 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12516 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   12517 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12518 	if (sc->sc_flags & WM_F_LOCK_SWSM)
   12519 		wm_put_swsm_semaphore(sc);
   12520 }
   12521 
   12522 static int
   12523 wm_get_phy_82575(struct wm_softc *sc)
   12524 {
   12525 
   12526 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12527 		device_xname(sc->sc_dev), __func__));
   12528 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   12529 }
   12530 
   12531 static void
   12532 wm_put_phy_82575(struct wm_softc *sc)
   12533 {
   12534 
   12535 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12536 		device_xname(sc->sc_dev), __func__));
   12537 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   12538 }
   12539 
   12540 static int
   12541 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   12542 {
   12543 	uint32_t ext_ctrl;
   12544 	int timeout = 200;
   12545 
   12546 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12547 		device_xname(sc->sc_dev), __func__));
   12548 
   12549 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12550 	for (timeout = 0; timeout < 200; timeout++) {
   12551 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12552 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12553 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12554 
   12555 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12556 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   12557 			return 0;
   12558 		delay(5000);
   12559 	}
   12560 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   12561 	    device_xname(sc->sc_dev), ext_ctrl);
   12562 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12563 	return 1;
   12564 }
   12565 
   12566 static void
   12567 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   12568 {
   12569 	uint32_t ext_ctrl;
   12570 
   12571 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12572 		device_xname(sc->sc_dev), __func__));
   12573 
   12574 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12575 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12576 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12577 
   12578 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12579 }
   12580 
   12581 static int
   12582 wm_get_swflag_ich8lan(struct wm_softc *sc)
   12583 {
   12584 	uint32_t ext_ctrl;
   12585 	int timeout;
   12586 
   12587 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12588 		device_xname(sc->sc_dev), __func__));
   12589 	mutex_enter(sc->sc_ich_phymtx);
   12590 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   12591 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12592 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   12593 			break;
   12594 		delay(1000);
   12595 	}
   12596 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   12597 		printf("%s: SW has already locked the resource\n",
   12598 		    device_xname(sc->sc_dev));
   12599 		goto out;
   12600 	}
   12601 
   12602 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12603 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12604 	for (timeout = 0; timeout < 1000; timeout++) {
   12605 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12606 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   12607 			break;
   12608 		delay(1000);
   12609 	}
   12610 	if (timeout >= 1000) {
   12611 		printf("%s: failed to acquire semaphore\n",
   12612 		    device_xname(sc->sc_dev));
   12613 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12614 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12615 		goto out;
   12616 	}
   12617 	return 0;
   12618 
   12619 out:
   12620 	mutex_exit(sc->sc_ich_phymtx);
   12621 	return 1;
   12622 }
   12623 
   12624 static void
   12625 wm_put_swflag_ich8lan(struct wm_softc *sc)
   12626 {
   12627 	uint32_t ext_ctrl;
   12628 
   12629 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12630 		device_xname(sc->sc_dev), __func__));
   12631 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12632 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   12633 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12634 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12635 	} else {
   12636 		printf("%s: Semaphore unexpectedly released\n",
   12637 		    device_xname(sc->sc_dev));
   12638 	}
   12639 
   12640 	mutex_exit(sc->sc_ich_phymtx);
   12641 }
   12642 
   12643 static int
   12644 wm_get_nvm_ich8lan(struct wm_softc *sc)
   12645 {
   12646 
   12647 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12648 		device_xname(sc->sc_dev), __func__));
   12649 	mutex_enter(sc->sc_ich_nvmmtx);
   12650 
   12651 	return 0;
   12652 }
   12653 
   12654 static void
   12655 wm_put_nvm_ich8lan(struct wm_softc *sc)
   12656 {
   12657 
   12658 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12659 		device_xname(sc->sc_dev), __func__));
   12660 	mutex_exit(sc->sc_ich_nvmmtx);
   12661 }
   12662 
   12663 static int
   12664 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   12665 {
   12666 	int i = 0;
   12667 	uint32_t reg;
   12668 
   12669 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12670 		device_xname(sc->sc_dev), __func__));
   12671 
   12672 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12673 	do {
   12674 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   12675 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   12676 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12677 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   12678 			break;
   12679 		delay(2*1000);
   12680 		i++;
   12681 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   12682 
   12683 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   12684 		wm_put_hw_semaphore_82573(sc);
   12685 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   12686 		    device_xname(sc->sc_dev));
   12687 		return -1;
   12688 	}
   12689 
   12690 	return 0;
   12691 }
   12692 
   12693 static void
   12694 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   12695 {
   12696 	uint32_t reg;
   12697 
   12698 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12699 		device_xname(sc->sc_dev), __func__));
   12700 
   12701 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12702 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12703 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   12704 }
   12705 
   12706 /*
   12707  * Management mode and power management related subroutines.
   12708  * BMC, AMT, suspend/resume and EEE.
   12709  */
   12710 
   12711 #ifdef WM_WOL
   12712 static int
   12713 wm_check_mng_mode(struct wm_softc *sc)
   12714 {
   12715 	int rv;
   12716 
   12717 	switch (sc->sc_type) {
   12718 	case WM_T_ICH8:
   12719 	case WM_T_ICH9:
   12720 	case WM_T_ICH10:
   12721 	case WM_T_PCH:
   12722 	case WM_T_PCH2:
   12723 	case WM_T_PCH_LPT:
   12724 	case WM_T_PCH_SPT:
   12725 		rv = wm_check_mng_mode_ich8lan(sc);
   12726 		break;
   12727 	case WM_T_82574:
   12728 	case WM_T_82583:
   12729 		rv = wm_check_mng_mode_82574(sc);
   12730 		break;
   12731 	case WM_T_82571:
   12732 	case WM_T_82572:
   12733 	case WM_T_82573:
   12734 	case WM_T_80003:
   12735 		rv = wm_check_mng_mode_generic(sc);
   12736 		break;
   12737 	default:
   12738 		/* noting to do */
   12739 		rv = 0;
   12740 		break;
   12741 	}
   12742 
   12743 	return rv;
   12744 }
   12745 
   12746 static int
   12747 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   12748 {
   12749 	uint32_t fwsm;
   12750 
   12751 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12752 
   12753 	if (((fwsm & FWSM_FW_VALID) != 0)
   12754 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   12755 		return 1;
   12756 
   12757 	return 0;
   12758 }
   12759 
   12760 static int
   12761 wm_check_mng_mode_82574(struct wm_softc *sc)
   12762 {
   12763 	uint16_t data;
   12764 
   12765 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   12766 
   12767 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   12768 		return 1;
   12769 
   12770 	return 0;
   12771 }
   12772 
   12773 static int
   12774 wm_check_mng_mode_generic(struct wm_softc *sc)
   12775 {
   12776 	uint32_t fwsm;
   12777 
   12778 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12779 
   12780 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   12781 		return 1;
   12782 
   12783 	return 0;
   12784 }
   12785 #endif /* WM_WOL */
   12786 
   12787 static int
   12788 wm_enable_mng_pass_thru(struct wm_softc *sc)
   12789 {
   12790 	uint32_t manc, fwsm, factps;
   12791 
   12792 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   12793 		return 0;
   12794 
   12795 	manc = CSR_READ(sc, WMREG_MANC);
   12796 
   12797 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   12798 		device_xname(sc->sc_dev), manc));
   12799 	if ((manc & MANC_RECV_TCO_EN) == 0)
   12800 		return 0;
   12801 
   12802 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   12803 		fwsm = CSR_READ(sc, WMREG_FWSM);
   12804 		factps = CSR_READ(sc, WMREG_FACTPS);
   12805 		if (((factps & FACTPS_MNGCG) == 0)
   12806 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   12807 			return 1;
   12808 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   12809 		uint16_t data;
   12810 
   12811 		factps = CSR_READ(sc, WMREG_FACTPS);
   12812 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   12813 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   12814 			device_xname(sc->sc_dev), factps, data));
   12815 		if (((factps & FACTPS_MNGCG) == 0)
   12816 		    && ((data & NVM_CFG2_MNGM_MASK)
   12817 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   12818 			return 1;
   12819 	} else if (((manc & MANC_SMBUS_EN) != 0)
   12820 	    && ((manc & MANC_ASF_EN) == 0))
   12821 		return 1;
   12822 
   12823 	return 0;
   12824 }
   12825 
   12826 static bool
   12827 wm_phy_resetisblocked(struct wm_softc *sc)
   12828 {
   12829 	bool blocked = false;
   12830 	uint32_t reg;
   12831 	int i = 0;
   12832 
   12833 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12834 		device_xname(sc->sc_dev), __func__));
   12835 
   12836 	switch (sc->sc_type) {
   12837 	case WM_T_ICH8:
   12838 	case WM_T_ICH9:
   12839 	case WM_T_ICH10:
   12840 	case WM_T_PCH:
   12841 	case WM_T_PCH2:
   12842 	case WM_T_PCH_LPT:
   12843 	case WM_T_PCH_SPT:
   12844 		do {
   12845 			reg = CSR_READ(sc, WMREG_FWSM);
   12846 			if ((reg & FWSM_RSPCIPHY) == 0) {
   12847 				blocked = true;
   12848 				delay(10*1000);
   12849 				continue;
   12850 			}
   12851 			blocked = false;
   12852 		} while (blocked && (i++ < 30));
   12853 		return blocked;
   12854 		break;
   12855 	case WM_T_82571:
   12856 	case WM_T_82572:
   12857 	case WM_T_82573:
   12858 	case WM_T_82574:
   12859 	case WM_T_82583:
   12860 	case WM_T_80003:
   12861 		reg = CSR_READ(sc, WMREG_MANC);
   12862 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   12863 			return true;
   12864 		else
   12865 			return false;
   12866 		break;
   12867 	default:
   12868 		/* no problem */
   12869 		break;
   12870 	}
   12871 
   12872 	return false;
   12873 }
   12874 
   12875 static void
   12876 wm_get_hw_control(struct wm_softc *sc)
   12877 {
   12878 	uint32_t reg;
   12879 
   12880 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12881 		device_xname(sc->sc_dev), __func__));
   12882 
   12883 	if (sc->sc_type == WM_T_82573) {
   12884 		reg = CSR_READ(sc, WMREG_SWSM);
   12885 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   12886 	} else if (sc->sc_type >= WM_T_82571) {
   12887 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12888 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   12889 	}
   12890 }
   12891 
   12892 static void
   12893 wm_release_hw_control(struct wm_softc *sc)
   12894 {
   12895 	uint32_t reg;
   12896 
   12897 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12898 		device_xname(sc->sc_dev), __func__));
   12899 
   12900 	if (sc->sc_type == WM_T_82573) {
   12901 		reg = CSR_READ(sc, WMREG_SWSM);
   12902 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   12903 	} else if (sc->sc_type >= WM_T_82571) {
   12904 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12905 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   12906 	}
   12907 }
   12908 
   12909 static void
   12910 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   12911 {
   12912 	uint32_t reg;
   12913 
   12914 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12915 		device_xname(sc->sc_dev), __func__));
   12916 
   12917 	if (sc->sc_type < WM_T_PCH2)
   12918 		return;
   12919 
   12920 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12921 
   12922 	if (gate)
   12923 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   12924 	else
   12925 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   12926 
   12927 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   12928 }
   12929 
   12930 static void
   12931 wm_smbustopci(struct wm_softc *sc)
   12932 {
   12933 	uint32_t fwsm, reg;
   12934 	int rv = 0;
   12935 
   12936 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12937 		device_xname(sc->sc_dev), __func__));
   12938 
   12939 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   12940 	wm_gate_hw_phy_config_ich8lan(sc, true);
   12941 
   12942 	/* Disable ULP */
   12943 	wm_ulp_disable(sc);
   12944 
   12945 	/* Acquire PHY semaphore */
   12946 	sc->phy.acquire(sc);
   12947 
   12948 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12949 	switch (sc->sc_type) {
   12950 	case WM_T_PCH_LPT:
   12951 	case WM_T_PCH_SPT:
   12952 		if (wm_phy_is_accessible_pchlan(sc))
   12953 			break;
   12954 
   12955 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12956 		reg |= CTRL_EXT_FORCE_SMBUS;
   12957 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12958 #if 0
   12959 		/* XXX Isn't this required??? */
   12960 		CSR_WRITE_FLUSH(sc);
   12961 #endif
   12962 		delay(50 * 1000);
   12963 		/* FALLTHROUGH */
   12964 	case WM_T_PCH2:
   12965 		if (wm_phy_is_accessible_pchlan(sc) == true)
   12966 			break;
   12967 		/* FALLTHROUGH */
   12968 	case WM_T_PCH:
   12969 		if (sc->sc_type == WM_T_PCH)
   12970 			if ((fwsm & FWSM_FW_VALID) != 0)
   12971 				break;
   12972 
   12973 		if (wm_phy_resetisblocked(sc) == true) {
   12974 			printf("XXX reset is blocked(3)\n");
   12975 			break;
   12976 		}
   12977 
   12978 		wm_toggle_lanphypc_pch_lpt(sc);
   12979 
   12980 		if (sc->sc_type >= WM_T_PCH_LPT) {
   12981 			if (wm_phy_is_accessible_pchlan(sc) == true)
   12982 				break;
   12983 
   12984 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12985 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   12986 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12987 
   12988 			if (wm_phy_is_accessible_pchlan(sc) == true)
   12989 				break;
   12990 			rv = -1;
   12991 		}
   12992 		break;
   12993 	default:
   12994 		break;
   12995 	}
   12996 
   12997 	/* Release semaphore */
   12998 	sc->phy.release(sc);
   12999 
   13000 	if (rv == 0) {
   13001 		if (wm_phy_resetisblocked(sc)) {
   13002 			printf("XXX reset is blocked(4)\n");
   13003 			goto out;
   13004 		}
   13005 		wm_reset_phy(sc);
   13006 		if (wm_phy_resetisblocked(sc))
   13007 			printf("XXX reset is blocked(4)\n");
   13008 	}
   13009 
   13010 out:
   13011 	/*
   13012 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   13013 	 */
   13014 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   13015 		delay(10*1000);
   13016 		wm_gate_hw_phy_config_ich8lan(sc, false);
   13017 	}
   13018 }
   13019 
   13020 static void
   13021 wm_init_manageability(struct wm_softc *sc)
   13022 {
   13023 
   13024 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13025 		device_xname(sc->sc_dev), __func__));
   13026 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13027 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   13028 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13029 
   13030 		/* Disable hardware interception of ARP */
   13031 		manc &= ~MANC_ARP_EN;
   13032 
   13033 		/* Enable receiving management packets to the host */
   13034 		if (sc->sc_type >= WM_T_82571) {
   13035 			manc |= MANC_EN_MNG2HOST;
   13036 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   13037 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   13038 		}
   13039 
   13040 		CSR_WRITE(sc, WMREG_MANC, manc);
   13041 	}
   13042 }
   13043 
   13044 static void
   13045 wm_release_manageability(struct wm_softc *sc)
   13046 {
   13047 
   13048 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13049 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13050 
   13051 		manc |= MANC_ARP_EN;
   13052 		if (sc->sc_type >= WM_T_82571)
   13053 			manc &= ~MANC_EN_MNG2HOST;
   13054 
   13055 		CSR_WRITE(sc, WMREG_MANC, manc);
   13056 	}
   13057 }
   13058 
   13059 static void
   13060 wm_get_wakeup(struct wm_softc *sc)
   13061 {
   13062 
   13063 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   13064 	switch (sc->sc_type) {
   13065 	case WM_T_82573:
   13066 	case WM_T_82583:
   13067 		sc->sc_flags |= WM_F_HAS_AMT;
   13068 		/* FALLTHROUGH */
   13069 	case WM_T_80003:
   13070 	case WM_T_82575:
   13071 	case WM_T_82576:
   13072 	case WM_T_82580:
   13073 	case WM_T_I350:
   13074 	case WM_T_I354:
   13075 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   13076 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   13077 		/* FALLTHROUGH */
   13078 	case WM_T_82541:
   13079 	case WM_T_82541_2:
   13080 	case WM_T_82547:
   13081 	case WM_T_82547_2:
   13082 	case WM_T_82571:
   13083 	case WM_T_82572:
   13084 	case WM_T_82574:
   13085 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13086 		break;
   13087 	case WM_T_ICH8:
   13088 	case WM_T_ICH9:
   13089 	case WM_T_ICH10:
   13090 	case WM_T_PCH:
   13091 	case WM_T_PCH2:
   13092 	case WM_T_PCH_LPT:
   13093 	case WM_T_PCH_SPT:
   13094 		sc->sc_flags |= WM_F_HAS_AMT;
   13095 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13096 		break;
   13097 	default:
   13098 		break;
   13099 	}
   13100 
   13101 	/* 1: HAS_MANAGE */
   13102 	if (wm_enable_mng_pass_thru(sc) != 0)
   13103 		sc->sc_flags |= WM_F_HAS_MANAGE;
   13104 
   13105 	/*
   13106 	 * Note that the WOL flags is set after the resetting of the eeprom
   13107 	 * stuff
   13108 	 */
   13109 }
   13110 
   13111 /*
   13112  * Unconfigure Ultra Low Power mode.
   13113  * Only for I217 and newer (see below).
   13114  */
   13115 static void
   13116 wm_ulp_disable(struct wm_softc *sc)
   13117 {
   13118 	uint32_t reg;
   13119 	int i = 0;
   13120 
   13121 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13122 		device_xname(sc->sc_dev), __func__));
   13123 	/* Exclude old devices */
   13124 	if ((sc->sc_type < WM_T_PCH_LPT)
   13125 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   13126 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   13127 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   13128 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   13129 		return;
   13130 
   13131 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   13132 		/* Request ME un-configure ULP mode in the PHY */
   13133 		reg = CSR_READ(sc, WMREG_H2ME);
   13134 		reg &= ~H2ME_ULP;
   13135 		reg |= H2ME_ENFORCE_SETTINGS;
   13136 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13137 
   13138 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   13139 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   13140 			if (i++ == 30) {
   13141 				printf("%s timed out\n", __func__);
   13142 				return;
   13143 			}
   13144 			delay(10 * 1000);
   13145 		}
   13146 		reg = CSR_READ(sc, WMREG_H2ME);
   13147 		reg &= ~H2ME_ENFORCE_SETTINGS;
   13148 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13149 
   13150 		return;
   13151 	}
   13152 
   13153 	/* Acquire semaphore */
   13154 	sc->phy.acquire(sc);
   13155 
   13156 	/* Toggle LANPHYPC */
   13157 	wm_toggle_lanphypc_pch_lpt(sc);
   13158 
   13159 	/* Unforce SMBus mode in PHY */
   13160 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13161 	if (reg == 0x0000 || reg == 0xffff) {
   13162 		uint32_t reg2;
   13163 
   13164 		printf("%s: Force SMBus first.\n", __func__);
   13165 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   13166 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   13167 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   13168 		delay(50 * 1000);
   13169 
   13170 		reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13171 	}
   13172 	reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13173 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg);
   13174 
   13175 	/* Unforce SMBus mode in MAC */
   13176 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13177 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   13178 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13179 
   13180 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL);
   13181 	reg |= HV_PM_CTRL_K1_ENA;
   13182 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg);
   13183 
   13184 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1);
   13185 	reg &= ~(I218_ULP_CONFIG1_IND
   13186 	    | I218_ULP_CONFIG1_STICKY_ULP
   13187 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   13188 	    | I218_ULP_CONFIG1_WOL_HOST
   13189 	    | I218_ULP_CONFIG1_INBAND_EXIT
   13190 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   13191 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   13192 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   13193 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13194 	reg |= I218_ULP_CONFIG1_START;
   13195 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13196 
   13197 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   13198 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   13199 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   13200 
   13201 	/* Release semaphore */
   13202 	sc->phy.release(sc);
   13203 	wm_gmii_reset(sc);
   13204 	delay(50 * 1000);
   13205 }
   13206 
   13207 /* WOL in the newer chipset interfaces (pchlan) */
   13208 static void
   13209 wm_enable_phy_wakeup(struct wm_softc *sc)
   13210 {
   13211 #if 0
   13212 	uint16_t preg;
   13213 
   13214 	/* Copy MAC RARs to PHY RARs */
   13215 
   13216 	/* Copy MAC MTA to PHY MTA */
   13217 
   13218 	/* Configure PHY Rx Control register */
   13219 
   13220 	/* Enable PHY wakeup in MAC register */
   13221 
   13222 	/* Configure and enable PHY wakeup in PHY registers */
   13223 
   13224 	/* Activate PHY wakeup */
   13225 
   13226 	/* XXX */
   13227 #endif
   13228 }
   13229 
   13230 /* Power down workaround on D3 */
   13231 static void
   13232 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   13233 {
   13234 	uint32_t reg;
   13235 	int i;
   13236 
   13237 	for (i = 0; i < 2; i++) {
   13238 		/* Disable link */
   13239 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13240 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13241 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13242 
   13243 		/*
   13244 		 * Call gig speed drop workaround on Gig disable before
   13245 		 * accessing any PHY registers
   13246 		 */
   13247 		if (sc->sc_type == WM_T_ICH8)
   13248 			wm_gig_downshift_workaround_ich8lan(sc);
   13249 
   13250 		/* Write VR power-down enable */
   13251 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13252 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13253 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   13254 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   13255 
   13256 		/* Read it back and test */
   13257 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13258 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13259 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   13260 			break;
   13261 
   13262 		/* Issue PHY reset and repeat at most one more time */
   13263 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   13264 	}
   13265 }
   13266 
   13267 static void
   13268 wm_enable_wakeup(struct wm_softc *sc)
   13269 {
   13270 	uint32_t reg, pmreg;
   13271 	pcireg_t pmode;
   13272 
   13273 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13274 		device_xname(sc->sc_dev), __func__));
   13275 
   13276 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   13277 		&pmreg, NULL) == 0)
   13278 		return;
   13279 
   13280 	/* Advertise the wakeup capability */
   13281 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   13282 	    | CTRL_SWDPIN(3));
   13283 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   13284 
   13285 	/* ICH workaround */
   13286 	switch (sc->sc_type) {
   13287 	case WM_T_ICH8:
   13288 	case WM_T_ICH9:
   13289 	case WM_T_ICH10:
   13290 	case WM_T_PCH:
   13291 	case WM_T_PCH2:
   13292 	case WM_T_PCH_LPT:
   13293 	case WM_T_PCH_SPT:
   13294 		/* Disable gig during WOL */
   13295 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13296 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   13297 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13298 		if (sc->sc_type == WM_T_PCH)
   13299 			wm_gmii_reset(sc);
   13300 
   13301 		/* Power down workaround */
   13302 		if (sc->sc_phytype == WMPHY_82577) {
   13303 			struct mii_softc *child;
   13304 
   13305 			/* Assume that the PHY is copper */
   13306 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13307 			if ((child != NULL) && (child->mii_mpd_rev <= 2))
   13308 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   13309 				    (768 << 5) | 25, 0x0444); /* magic num */
   13310 		}
   13311 		break;
   13312 	default:
   13313 		break;
   13314 	}
   13315 
   13316 	/* Keep the laser running on fiber adapters */
   13317 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   13318 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   13319 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13320 		reg |= CTRL_EXT_SWDPIN(3);
   13321 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13322 	}
   13323 
   13324 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   13325 #if 0	/* for the multicast packet */
   13326 	reg |= WUFC_MC;
   13327 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   13328 #endif
   13329 
   13330 	if (sc->sc_type >= WM_T_PCH)
   13331 		wm_enable_phy_wakeup(sc);
   13332 	else {
   13333 		CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
   13334 		CSR_WRITE(sc, WMREG_WUFC, reg);
   13335 	}
   13336 
   13337 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13338 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13339 		|| (sc->sc_type == WM_T_PCH2))
   13340 		    && (sc->sc_phytype == WMPHY_IGP_3))
   13341 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   13342 
   13343 	/* Request PME */
   13344 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   13345 #if 0
   13346 	/* Disable WOL */
   13347 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   13348 #else
   13349 	/* For WOL */
   13350 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   13351 #endif
   13352 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   13353 }
   13354 
   13355 /* LPLU */
   13356 
   13357 static void
   13358 wm_lplu_d0_disable(struct wm_softc *sc)
   13359 {
   13360 	uint32_t reg;
   13361 
   13362 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13363 		device_xname(sc->sc_dev), __func__));
   13364 
   13365 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13366 	reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   13367 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13368 }
   13369 
   13370 static void
   13371 wm_lplu_d0_disable_pch(struct wm_softc *sc)
   13372 {
   13373 	uint32_t reg;
   13374 
   13375 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13376 		device_xname(sc->sc_dev), __func__));
   13377 
   13378 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   13379 	reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   13380 	reg |= HV_OEM_BITS_ANEGNOW;
   13381 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   13382 }
   13383 
   13384 /* EEE */
   13385 
   13386 static void
   13387 wm_set_eee_i350(struct wm_softc *sc)
   13388 {
   13389 	uint32_t ipcnfg, eeer;
   13390 
   13391 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   13392 	eeer = CSR_READ(sc, WMREG_EEER);
   13393 
   13394 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   13395 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   13396 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   13397 		    | EEER_LPI_FC);
   13398 	} else {
   13399 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   13400 		ipcnfg &= ~IPCNFG_10BASE_TE;
   13401 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   13402 		    | EEER_LPI_FC);
   13403 	}
   13404 
   13405 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   13406 	CSR_WRITE(sc, WMREG_EEER, eeer);
   13407 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   13408 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   13409 }
   13410 
   13411 /*
   13412  * Workarounds (mainly PHY related).
   13413  * Basically, PHY's workarounds are in the PHY drivers.
   13414  */
   13415 
   13416 /* Work-around for 82566 Kumeran PCS lock loss */
   13417 static void
   13418 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   13419 {
   13420 #if 0
   13421 	int miistatus, active, i;
   13422 	int reg;
   13423 
   13424 	miistatus = sc->sc_mii.mii_media_status;
   13425 
   13426 	/* If the link is not up, do nothing */
   13427 	if ((miistatus & IFM_ACTIVE) == 0)
   13428 		return;
   13429 
   13430 	active = sc->sc_mii.mii_media_active;
   13431 
   13432 	/* Nothing to do if the link is other than 1Gbps */
   13433 	if (IFM_SUBTYPE(active) != IFM_1000_T)
   13434 		return;
   13435 
   13436 	for (i = 0; i < 10; i++) {
   13437 		/* read twice */
   13438 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   13439 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   13440 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   13441 			goto out;	/* GOOD! */
   13442 
   13443 		/* Reset the PHY */
   13444 		wm_gmii_reset(sc);
   13445 		delay(5*1000);
   13446 	}
   13447 
   13448 	/* Disable GigE link negotiation */
   13449 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13450 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13451 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13452 
   13453 	/*
   13454 	 * Call gig speed drop workaround on Gig disable before accessing
   13455 	 * any PHY registers.
   13456 	 */
   13457 	wm_gig_downshift_workaround_ich8lan(sc);
   13458 
   13459 out:
   13460 	return;
   13461 #endif
   13462 }
   13463 
   13464 /* WOL from S5 stops working */
   13465 static void
   13466 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   13467 {
   13468 	uint16_t kmrn_reg;
   13469 
   13470 	/* Only for igp3 */
   13471 	if (sc->sc_phytype == WMPHY_IGP_3) {
   13472 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
   13473 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
   13474 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   13475 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
   13476 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   13477 	}
   13478 }
   13479 
   13480 /*
   13481  * Workaround for pch's PHYs
   13482  * XXX should be moved to new PHY driver?
   13483  */
   13484 static void
   13485 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   13486 {
   13487 
   13488 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13489 		device_xname(sc->sc_dev), __func__));
   13490 	KASSERT(sc->sc_type == WM_T_PCH);
   13491 
   13492 	if (sc->sc_phytype == WMPHY_82577)
   13493 		wm_set_mdio_slow_mode_hv(sc);
   13494 
   13495 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   13496 
   13497 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   13498 
   13499 	/* 82578 */
   13500 	if (sc->sc_phytype == WMPHY_82578) {
   13501 		struct mii_softc *child;
   13502 
   13503 		/*
   13504 		 * Return registers to default by doing a soft reset then
   13505 		 * writing 0x3140 to the control register
   13506 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   13507 		 */
   13508 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13509 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   13510 			PHY_RESET(child);
   13511 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   13512 			    0x3140);
   13513 		}
   13514 	}
   13515 
   13516 	/* Select page 0 */
   13517 	sc->phy.acquire(sc);
   13518 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   13519 	sc->phy.release(sc);
   13520 
   13521 	/*
   13522 	 * Configure the K1 Si workaround during phy reset assuming there is
   13523 	 * link so that it disables K1 if link is in 1Gbps.
   13524 	 */
   13525 	wm_k1_gig_workaround_hv(sc, 1);
   13526 }
   13527 
   13528 static void
   13529 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   13530 {
   13531 
   13532 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13533 		device_xname(sc->sc_dev), __func__));
   13534 	KASSERT(sc->sc_type == WM_T_PCH2);
   13535 
   13536 	wm_set_mdio_slow_mode_hv(sc);
   13537 }
   13538 
   13539 static int
   13540 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   13541 {
   13542 	int k1_enable = sc->sc_nvm_k1_enabled;
   13543 
   13544 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13545 		device_xname(sc->sc_dev), __func__));
   13546 
   13547 	if (sc->phy.acquire(sc) != 0)
   13548 		return -1;
   13549 
   13550 	if (link) {
   13551 		k1_enable = 0;
   13552 
   13553 		/* Link stall fix for link up */
   13554 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   13555 	} else {
   13556 		/* Link stall fix for link down */
   13557 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   13558 	}
   13559 
   13560 	wm_configure_k1_ich8lan(sc, k1_enable);
   13561 	sc->phy.release(sc);
   13562 
   13563 	return 0;
   13564 }
   13565 
   13566 static void
   13567 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   13568 {
   13569 	uint32_t reg;
   13570 
   13571 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   13572 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   13573 	    reg | HV_KMRN_MDIO_SLOW);
   13574 }
   13575 
   13576 static void
   13577 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   13578 {
   13579 	uint32_t ctrl, ctrl_ext, tmp;
   13580 	uint16_t kmrn_reg;
   13581 
   13582 	kmrn_reg = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
   13583 
   13584 	if (k1_enable)
   13585 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
   13586 	else
   13587 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
   13588 
   13589 	wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
   13590 
   13591 	delay(20);
   13592 
   13593 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13594 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   13595 
   13596 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   13597 	tmp |= CTRL_FRCSPD;
   13598 
   13599 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   13600 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   13601 	CSR_WRITE_FLUSH(sc);
   13602 	delay(20);
   13603 
   13604 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   13605 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13606 	CSR_WRITE_FLUSH(sc);
   13607 	delay(20);
   13608 }
   13609 
   13610 /* special case - for 82575 - need to do manual init ... */
   13611 static void
   13612 wm_reset_init_script_82575(struct wm_softc *sc)
   13613 {
   13614 	/*
   13615 	 * remark: this is untested code - we have no board without EEPROM
   13616 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   13617 	 */
   13618 
   13619 	/* SerDes configuration via SERDESCTRL */
   13620 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   13621 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   13622 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   13623 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   13624 
   13625 	/* CCM configuration via CCMCTL register */
   13626 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   13627 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   13628 
   13629 	/* PCIe lanes configuration */
   13630 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   13631 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   13632 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   13633 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   13634 
   13635 	/* PCIe PLL Configuration */
   13636 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   13637 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   13638 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   13639 }
   13640 
   13641 static void
   13642 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   13643 {
   13644 	uint32_t reg;
   13645 	uint16_t nvmword;
   13646 	int rv;
   13647 
   13648 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   13649 		return;
   13650 
   13651 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   13652 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   13653 	if (rv != 0) {
   13654 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   13655 		    __func__);
   13656 		return;
   13657 	}
   13658 
   13659 	reg = CSR_READ(sc, WMREG_MDICNFG);
   13660 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   13661 		reg |= MDICNFG_DEST;
   13662 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   13663 		reg |= MDICNFG_COM_MDIO;
   13664 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   13665 }
   13666 
   13667 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   13668 
   13669 static bool
   13670 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   13671 {
   13672 	int i;
   13673 	uint32_t reg;
   13674 	uint16_t id1, id2;
   13675 
   13676 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13677 		device_xname(sc->sc_dev), __func__));
   13678 	id1 = id2 = 0xffff;
   13679 	for (i = 0; i < 2; i++) {
   13680 		id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1);
   13681 		if (MII_INVALIDID(id1))
   13682 			continue;
   13683 		id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2);
   13684 		if (MII_INVALIDID(id2))
   13685 			continue;
   13686 		break;
   13687 	}
   13688 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) {
   13689 		goto out;
   13690 	}
   13691 
   13692 	if (sc->sc_type < WM_T_PCH_LPT) {
   13693 		sc->phy.release(sc);
   13694 		wm_set_mdio_slow_mode_hv(sc);
   13695 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
   13696 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
   13697 		sc->phy.acquire(sc);
   13698 	}
   13699 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   13700 		printf("XXX return with false\n");
   13701 		return false;
   13702 	}
   13703 out:
   13704 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   13705 		/* Only unforce SMBus if ME is not active */
   13706 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   13707 			/* Unforce SMBus mode in PHY */
   13708 			reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   13709 			    CV_SMB_CTRL);
   13710 			reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13711 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   13712 			    CV_SMB_CTRL, reg);
   13713 
   13714 			/* Unforce SMBus mode in MAC */
   13715 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13716 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   13717 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13718 		}
   13719 	}
   13720 	return true;
   13721 }
   13722 
   13723 static void
   13724 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   13725 {
   13726 	uint32_t reg;
   13727 	int i;
   13728 
   13729 	/* Set PHY Config Counter to 50msec */
   13730 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   13731 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   13732 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   13733 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   13734 
   13735 	/* Toggle LANPHYPC */
   13736 	reg = CSR_READ(sc, WMREG_CTRL);
   13737 	reg |= CTRL_LANPHYPC_OVERRIDE;
   13738 	reg &= ~CTRL_LANPHYPC_VALUE;
   13739 	CSR_WRITE(sc, WMREG_CTRL, reg);
   13740 	CSR_WRITE_FLUSH(sc);
   13741 	delay(1000);
   13742 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   13743 	CSR_WRITE(sc, WMREG_CTRL, reg);
   13744 	CSR_WRITE_FLUSH(sc);
   13745 
   13746 	if (sc->sc_type < WM_T_PCH_LPT)
   13747 		delay(50 * 1000);
   13748 	else {
   13749 		i = 20;
   13750 
   13751 		do {
   13752 			delay(5 * 1000);
   13753 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   13754 		    && i--);
   13755 
   13756 		delay(30 * 1000);
   13757 	}
   13758 }
   13759 
   13760 static int
   13761 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   13762 {
   13763 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   13764 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   13765 	uint32_t rxa;
   13766 	uint16_t scale = 0, lat_enc = 0;
   13767 	int64_t lat_ns, value;
   13768 
   13769 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13770 		device_xname(sc->sc_dev), __func__));
   13771 
   13772 	if (link) {
   13773 		pcireg_t preg;
   13774 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   13775 
   13776 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   13777 
   13778 		/*
   13779 		 * Determine the maximum latency tolerated by the device.
   13780 		 *
   13781 		 * Per the PCIe spec, the tolerated latencies are encoded as
   13782 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   13783 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   13784 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   13785 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   13786 		 */
   13787 		lat_ns = ((int64_t)rxa * 1024 -
   13788 		    (2 * (int64_t)sc->sc_ethercom.ec_if.if_mtu)) * 8 * 1000;
   13789 		if (lat_ns < 0)
   13790 			lat_ns = 0;
   13791 		else {
   13792 			uint32_t status;
   13793 			uint16_t speed;
   13794 
   13795 			status = CSR_READ(sc, WMREG_STATUS);
   13796 			switch (__SHIFTOUT(status, STATUS_SPEED)) {
   13797 			case STATUS_SPEED_10:
   13798 				speed = 10;
   13799 				break;
   13800 			case STATUS_SPEED_100:
   13801 				speed = 100;
   13802 				break;
   13803 			case STATUS_SPEED_1000:
   13804 				speed = 1000;
   13805 				break;
   13806 			default:
   13807 				printf("%s: Unknown speed (status = %08x)\n",
   13808 				    device_xname(sc->sc_dev), status);
   13809 				return -1;
   13810 			}
   13811 			lat_ns /= speed;
   13812 		}
   13813 		value = lat_ns;
   13814 
   13815 		while (value > LTRV_VALUE) {
   13816 			scale ++;
   13817 			value = howmany(value, __BIT(5));
   13818 		}
   13819 		if (scale > LTRV_SCALE_MAX) {
   13820 			printf("%s: Invalid LTR latency scale %d\n",
   13821 			    device_xname(sc->sc_dev), scale);
   13822 			return -1;
   13823 		}
   13824 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   13825 
   13826 		/* Determine the maximum latency tolerated by the platform */
   13827 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   13828 		    WM_PCI_LTR_CAP_LPT);
   13829 		max_snoop = preg & 0xffff;
   13830 		max_nosnoop = preg >> 16;
   13831 
   13832 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   13833 
   13834 		if (lat_enc > max_ltr_enc) {
   13835 			lat_enc = max_ltr_enc;
   13836 		}
   13837 	}
   13838 	/* Snoop and No-Snoop latencies the same */
   13839 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   13840 	CSR_WRITE(sc, WMREG_LTRV, reg);
   13841 
   13842 	return 0;
   13843 }
   13844 
   13845 /*
   13846  * I210 Errata 25 and I211 Errata 10
   13847  * Slow System Clock.
   13848  */
   13849 static void
   13850 wm_pll_workaround_i210(struct wm_softc *sc)
   13851 {
   13852 	uint32_t mdicnfg, wuc;
   13853 	uint32_t reg;
   13854 	pcireg_t pcireg;
   13855 	uint32_t pmreg;
   13856 	uint16_t nvmword, tmp_nvmword;
   13857 	int phyval;
   13858 	bool wa_done = false;
   13859 	int i;
   13860 
   13861 	/* Save WUC and MDICNFG registers */
   13862 	wuc = CSR_READ(sc, WMREG_WUC);
   13863 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   13864 
   13865 	reg = mdicnfg & ~MDICNFG_DEST;
   13866 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   13867 
   13868 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   13869 		nvmword = INVM_DEFAULT_AL;
   13870 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   13871 
   13872 	/* Get Power Management cap offset */
   13873 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   13874 		&pmreg, NULL) == 0)
   13875 		return;
   13876 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   13877 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   13878 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   13879 
   13880 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   13881 			break; /* OK */
   13882 		}
   13883 
   13884 		wa_done = true;
   13885 		/* Directly reset the internal PHY */
   13886 		reg = CSR_READ(sc, WMREG_CTRL);
   13887 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   13888 
   13889 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13890 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   13891 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13892 
   13893 		CSR_WRITE(sc, WMREG_WUC, 0);
   13894 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   13895 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   13896 
   13897 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   13898 		    pmreg + PCI_PMCSR);
   13899 		pcireg |= PCI_PMCSR_STATE_D3;
   13900 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   13901 		    pmreg + PCI_PMCSR, pcireg);
   13902 		delay(1000);
   13903 		pcireg &= ~PCI_PMCSR_STATE_D3;
   13904 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   13905 		    pmreg + PCI_PMCSR, pcireg);
   13906 
   13907 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   13908 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   13909 
   13910 		/* Restore WUC register */
   13911 		CSR_WRITE(sc, WMREG_WUC, wuc);
   13912 	}
   13913 
   13914 	/* Restore MDICNFG setting */
   13915 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   13916 	if (wa_done)
   13917 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   13918 }
   13919