Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.550
      1 /*	$NetBSD: if_wm.c,v 1.550 2017/12/28 06:13:50 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet)
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  *	- Image Unique ID
     83  */
     84 
     85 #include <sys/cdefs.h>
     86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.550 2017/12/28 06:13:50 msaitoh Exp $");
     87 
     88 #ifdef _KERNEL_OPT
     89 #include "opt_net_mpsafe.h"
     90 #include "opt_if_wm.h"
     91 #endif
     92 
     93 #include <sys/param.h>
     94 #include <sys/systm.h>
     95 #include <sys/callout.h>
     96 #include <sys/mbuf.h>
     97 #include <sys/malloc.h>
     98 #include <sys/kmem.h>
     99 #include <sys/kernel.h>
    100 #include <sys/socket.h>
    101 #include <sys/ioctl.h>
    102 #include <sys/errno.h>
    103 #include <sys/device.h>
    104 #include <sys/queue.h>
    105 #include <sys/syslog.h>
    106 #include <sys/interrupt.h>
    107 #include <sys/cpu.h>
    108 #include <sys/pcq.h>
    109 
    110 #include <sys/rndsource.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <netinet/in.h>			/* XXX for struct ip */
    120 #include <netinet/in_systm.h>		/* XXX for struct ip */
    121 #include <netinet/ip.h>			/* XXX for struct ip */
    122 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    123 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    124 
    125 #include <sys/bus.h>
    126 #include <sys/intr.h>
    127 #include <machine/endian.h>
    128 
    129 #include <dev/mii/mii.h>
    130 #include <dev/mii/miivar.h>
    131 #include <dev/mii/miidevs.h>
    132 #include <dev/mii/mii_bitbang.h>
    133 #include <dev/mii/ikphyreg.h>
    134 #include <dev/mii/igphyreg.h>
    135 #include <dev/mii/igphyvar.h>
    136 #include <dev/mii/inbmphyreg.h>
    137 #include <dev/mii/ihphyreg.h>
    138 
    139 #include <dev/pci/pcireg.h>
    140 #include <dev/pci/pcivar.h>
    141 #include <dev/pci/pcidevs.h>
    142 
    143 #include <dev/pci/if_wmreg.h>
    144 #include <dev/pci/if_wmvar.h>
    145 
    146 #ifdef WM_DEBUG
    147 #define	WM_DEBUG_LINK		__BIT(0)
    148 #define	WM_DEBUG_TX		__BIT(1)
    149 #define	WM_DEBUG_RX		__BIT(2)
    150 #define	WM_DEBUG_GMII		__BIT(3)
    151 #define	WM_DEBUG_MANAGE		__BIT(4)
    152 #define	WM_DEBUG_NVM		__BIT(5)
    153 #define	WM_DEBUG_INIT		__BIT(6)
    154 #define	WM_DEBUG_LOCK		__BIT(7)
    155 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    156     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    157 
    158 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    159 #else
    160 #define	DPRINTF(x, y)	/* nothing */
    161 #endif /* WM_DEBUG */
    162 
    163 #ifdef NET_MPSAFE
    164 #define WM_MPSAFE	1
    165 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    166 #else
    167 #define CALLOUT_FLAGS	0
    168 #endif
    169 
    170 /*
    171  * This device driver's max interrupt numbers.
    172  */
    173 #define WM_MAX_NQUEUEINTR	16
    174 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    175 
    176 #ifndef WM_DISABLE_MSI
    177 #define	WM_DISABLE_MSI 0
    178 #endif
    179 #ifndef WM_DISABLE_MSIX
    180 #define	WM_DISABLE_MSIX 0
    181 #endif
    182 
    183 int wm_disable_msi = WM_DISABLE_MSI;
    184 int wm_disable_msix = WM_DISABLE_MSIX;
    185 
    186 /*
    187  * Transmit descriptor list size.  Due to errata, we can only have
    188  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    189  * on >= 82544.  We tell the upper layers that they can queue a lot
    190  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    191  * of them at a time.
    192  *
    193  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    194  * chains containing many small mbufs have been observed in zero-copy
    195  * situations with jumbo frames.
    196  */
    197 #define	WM_NTXSEGS		256
    198 #define	WM_IFQUEUELEN		256
    199 #define	WM_TXQUEUELEN_MAX	64
    200 #define	WM_TXQUEUELEN_MAX_82547	16
    201 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    202 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    203 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    204 #define	WM_NTXDESC_82542	256
    205 #define	WM_NTXDESC_82544	4096
    206 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    207 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    208 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    209 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    210 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    211 
    212 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    213 
    214 #define	WM_TXINTERQSIZE		256
    215 
    216 /*
    217  * Receive descriptor list size.  We have one Rx buffer for normal
    218  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    219  * packet.  We allocate 256 receive descriptors, each with a 2k
    220  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    221  */
    222 #define	WM_NRXDESC		256
    223 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    224 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    225 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    226 
    227 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    228 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    229 #endif
    230 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    231 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    232 #endif
    233 
    234 typedef union txdescs {
    235 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    236 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    237 } txdescs_t;
    238 
    239 typedef union rxdescs {
    240 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    241 	ext_rxdesc_t      sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    242 	nq_rxdesc_t      sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    243 } rxdescs_t;
    244 
    245 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    246 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    247 
    248 /*
    249  * Software state for transmit jobs.
    250  */
    251 struct wm_txsoft {
    252 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    253 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    254 	int txs_firstdesc;		/* first descriptor in packet */
    255 	int txs_lastdesc;		/* last descriptor in packet */
    256 	int txs_ndesc;			/* # of descriptors used */
    257 };
    258 
    259 /*
    260  * Software state for receive buffers.  Each descriptor gets a
    261  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    262  * more than one buffer, we chain them together.
    263  */
    264 struct wm_rxsoft {
    265 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    266 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    267 };
    268 
    269 #define WM_LINKUP_TIMEOUT	50
    270 
    271 static uint16_t swfwphysem[] = {
    272 	SWFW_PHY0_SM,
    273 	SWFW_PHY1_SM,
    274 	SWFW_PHY2_SM,
    275 	SWFW_PHY3_SM
    276 };
    277 
    278 static const uint32_t wm_82580_rxpbs_table[] = {
    279 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    280 };
    281 
    282 struct wm_softc;
    283 
    284 #ifdef WM_EVENT_COUNTERS
    285 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    286 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    287 	struct evcnt qname##_ev_##evname;
    288 
    289 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    290 	do{								\
    291 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    292 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    293 		    "%s%02d%s", #qname, (qnum), #evname);		\
    294 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    295 		    (evtype), NULL, (xname),				\
    296 		    (q)->qname##_##evname##_evcnt_name);		\
    297 	}while(0)
    298 
    299 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    300 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    301 
    302 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    303 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    304 
    305 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    306 	evcnt_detach(&(q)->qname##_ev_##evname);
    307 #endif /* WM_EVENT_COUNTERS */
    308 
    309 struct wm_txqueue {
    310 	kmutex_t *txq_lock;		/* lock for tx operations */
    311 
    312 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    313 
    314 	/* Software state for the transmit descriptors. */
    315 	int txq_num;			/* must be a power of two */
    316 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    317 
    318 	/* TX control data structures. */
    319 	int txq_ndesc;			/* must be a power of two */
    320 	size_t txq_descsize;		/* a tx descriptor size */
    321 	txdescs_t *txq_descs_u;
    322         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    323 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    324 	int txq_desc_rseg;		/* real number of control segment */
    325 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    326 #define	txq_descs	txq_descs_u->sctxu_txdescs
    327 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    328 
    329 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    330 
    331 	int txq_free;			/* number of free Tx descriptors */
    332 	int txq_next;			/* next ready Tx descriptor */
    333 
    334 	int txq_sfree;			/* number of free Tx jobs */
    335 	int txq_snext;			/* next free Tx job */
    336 	int txq_sdirty;			/* dirty Tx jobs */
    337 
    338 	/* These 4 variables are used only on the 82547. */
    339 	int txq_fifo_size;		/* Tx FIFO size */
    340 	int txq_fifo_head;		/* current head of FIFO */
    341 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    342 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    343 
    344 	/*
    345 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    346 	 * CPUs. This queue intermediate them without block.
    347 	 */
    348 	pcq_t *txq_interq;
    349 
    350 	/*
    351 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    352 	 * to manage Tx H/W queue's busy flag.
    353 	 */
    354 	int txq_flags;			/* flags for H/W queue, see below */
    355 #define	WM_TXQ_NO_SPACE	0x1
    356 
    357 	bool txq_stopping;
    358 
    359 	uint32_t txq_packets;		/* for AIM */
    360 	uint32_t txq_bytes;		/* for AIM */
    361 #ifdef WM_EVENT_COUNTERS
    362 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
    363 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
    364 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
    365 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
    366 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
    367 						/* XXX not used? */
    368 
    369 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
    370 	WM_Q_EVCNT_DEFINE(txq,txtusum)		/* TCP/UDP cksums comp. out-bound */
    371 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
    372 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
    373 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
    374 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
    375 
    376 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
    377 
    378 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
    379 
    380 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    381 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    382 #endif /* WM_EVENT_COUNTERS */
    383 };
    384 
    385 struct wm_rxqueue {
    386 	kmutex_t *rxq_lock;		/* lock for rx operations */
    387 
    388 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    389 
    390 	/* Software state for the receive descriptors. */
    391 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    392 
    393 	/* RX control data structures. */
    394 	int rxq_ndesc;			/* must be a power of two */
    395 	size_t rxq_descsize;		/* a rx descriptor size */
    396 	rxdescs_t *rxq_descs_u;
    397 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    398 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    399 	int rxq_desc_rseg;		/* real number of control segment */
    400 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    401 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    402 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    403 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    404 
    405 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    406 
    407 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    408 	int rxq_discard;
    409 	int rxq_len;
    410 	struct mbuf *rxq_head;
    411 	struct mbuf *rxq_tail;
    412 	struct mbuf **rxq_tailp;
    413 
    414 	bool rxq_stopping;
    415 
    416 	uint32_t rxq_packets;		/* for AIM */
    417 	uint32_t rxq_bytes;		/* for AIM */
    418 #ifdef WM_EVENT_COUNTERS
    419 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
    420 
    421 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
    422 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
    423 #endif
    424 };
    425 
    426 struct wm_queue {
    427 	int wmq_id;			/* index of transmit and receive queues */
    428 	int wmq_intr_idx;		/* index of MSI-X tables */
    429 
    430 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    431 	bool wmq_set_itr;
    432 
    433 	struct wm_txqueue wmq_txq;
    434 	struct wm_rxqueue wmq_rxq;
    435 
    436 	void *wmq_si;
    437 };
    438 
    439 struct wm_phyop {
    440 	int (*acquire)(struct wm_softc *);
    441 	void (*release)(struct wm_softc *);
    442 	int reset_delay_us;
    443 };
    444 
    445 struct wm_nvmop {
    446 	int (*acquire)(struct wm_softc *);
    447 	void (*release)(struct wm_softc *);
    448 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    449 };
    450 
    451 /*
    452  * Software state per device.
    453  */
    454 struct wm_softc {
    455 	device_t sc_dev;		/* generic device information */
    456 	bus_space_tag_t sc_st;		/* bus space tag */
    457 	bus_space_handle_t sc_sh;	/* bus space handle */
    458 	bus_size_t sc_ss;		/* bus space size */
    459 	bus_space_tag_t sc_iot;		/* I/O space tag */
    460 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    461 	bus_size_t sc_ios;		/* I/O space size */
    462 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    463 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    464 	bus_size_t sc_flashs;		/* flash registers space size */
    465 	off_t sc_flashreg_offset;	/*
    466 					 * offset to flash registers from
    467 					 * start of BAR
    468 					 */
    469 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    470 
    471 	struct ethercom sc_ethercom;	/* ethernet common data */
    472 	struct mii_data sc_mii;		/* MII/media information */
    473 
    474 	pci_chipset_tag_t sc_pc;
    475 	pcitag_t sc_pcitag;
    476 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    477 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    478 
    479 	uint16_t sc_pcidevid;		/* PCI device ID */
    480 	wm_chip_type sc_type;		/* MAC type */
    481 	int sc_rev;			/* MAC revision */
    482 	wm_phy_type sc_phytype;		/* PHY type */
    483 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    484 #define	WM_MEDIATYPE_UNKNOWN		0x00
    485 #define	WM_MEDIATYPE_FIBER		0x01
    486 #define	WM_MEDIATYPE_COPPER		0x02
    487 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    488 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    489 	int sc_flags;			/* flags; see below */
    490 	int sc_if_flags;		/* last if_flags */
    491 	int sc_flowflags;		/* 802.3x flow control flags */
    492 	int sc_align_tweak;
    493 
    494 	void *sc_ihs[WM_MAX_NINTR];	/*
    495 					 * interrupt cookie.
    496 					 * - legacy and msi use sc_ihs[0] only
    497 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    498 					 */
    499 	pci_intr_handle_t *sc_intrs;	/*
    500 					 * legacy and msi use sc_intrs[0] only
    501 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    502 					 */
    503 	int sc_nintrs;			/* number of interrupts */
    504 
    505 	int sc_link_intr_idx;		/* index of MSI-X tables */
    506 
    507 	callout_t sc_tick_ch;		/* tick callout */
    508 	bool sc_core_stopping;
    509 
    510 	int sc_nvm_ver_major;
    511 	int sc_nvm_ver_minor;
    512 	int sc_nvm_ver_build;
    513 	int sc_nvm_addrbits;		/* NVM address bits */
    514 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    515 	int sc_ich8_flash_base;
    516 	int sc_ich8_flash_bank_size;
    517 	int sc_nvm_k1_enabled;
    518 
    519 	int sc_nqueues;
    520 	struct wm_queue *sc_queue;
    521 	u_int sc_rx_process_limit;	/* Rx processing repeat limit in softint */
    522 	u_int sc_rx_intr_process_limit;	/* Rx processing repeat limit in H/W intr */
    523 
    524 	int sc_affinity_offset;
    525 
    526 #ifdef WM_EVENT_COUNTERS
    527 	/* Event counters. */
    528 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    529 
    530         /* WM_T_82542_2_1 only */
    531 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    532 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    533 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    534 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    535 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    536 #endif /* WM_EVENT_COUNTERS */
    537 
    538 	/* This variable are used only on the 82547. */
    539 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    540 
    541 	uint32_t sc_ctrl;		/* prototype CTRL register */
    542 #if 0
    543 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    544 #endif
    545 	uint32_t sc_icr;		/* prototype interrupt bits */
    546 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    547 	uint32_t sc_tctl;		/* prototype TCTL register */
    548 	uint32_t sc_rctl;		/* prototype RCTL register */
    549 	uint32_t sc_txcw;		/* prototype TXCW register */
    550 	uint32_t sc_tipg;		/* prototype TIPG register */
    551 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    552 	uint32_t sc_pba;		/* prototype PBA register */
    553 
    554 	int sc_tbi_linkup;		/* TBI link status */
    555 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    556 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    557 
    558 	int sc_mchash_type;		/* multicast filter offset */
    559 
    560 	krndsource_t rnd_source;	/* random source */
    561 
    562 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    563 
    564 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    565 	kmutex_t *sc_ich_phymtx;	/*
    566 					 * 82574/82583/ICH/PCH specific PHY
    567 					 * mutex. For 82574/82583, the mutex
    568 					 * is used for both PHY and NVM.
    569 					 */
    570 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    571 
    572 	struct wm_phyop phy;
    573 	struct wm_nvmop nvm;
    574 };
    575 
    576 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    577 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    578 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    579 
    580 #define	WM_RXCHAIN_RESET(rxq)						\
    581 do {									\
    582 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    583 	*(rxq)->rxq_tailp = NULL;					\
    584 	(rxq)->rxq_len = 0;						\
    585 } while (/*CONSTCOND*/0)
    586 
    587 #define	WM_RXCHAIN_LINK(rxq, m)						\
    588 do {									\
    589 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    590 	(rxq)->rxq_tailp = &(m)->m_next;				\
    591 } while (/*CONSTCOND*/0)
    592 
    593 #ifdef WM_EVENT_COUNTERS
    594 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    595 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    596 
    597 #define WM_Q_EVCNT_INCR(qname, evname)			\
    598 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    599 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    600 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    601 #else /* !WM_EVENT_COUNTERS */
    602 #define	WM_EVCNT_INCR(ev)	/* nothing */
    603 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    604 
    605 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    606 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    607 #endif /* !WM_EVENT_COUNTERS */
    608 
    609 #define	CSR_READ(sc, reg)						\
    610 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    611 #define	CSR_WRITE(sc, reg, val)						\
    612 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    613 #define	CSR_WRITE_FLUSH(sc)						\
    614 	(void) CSR_READ((sc), WMREG_STATUS)
    615 
    616 #define ICH8_FLASH_READ32(sc, reg)					\
    617 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    618 	    (reg) + sc->sc_flashreg_offset)
    619 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    620 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    621 	    (reg) + sc->sc_flashreg_offset, (data))
    622 
    623 #define ICH8_FLASH_READ16(sc, reg)					\
    624 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    625 	    (reg) + sc->sc_flashreg_offset)
    626 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    627 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    628 	    (reg) + sc->sc_flashreg_offset, (data))
    629 
    630 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    631 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    632 
    633 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    634 #define	WM_CDTXADDR_HI(txq, x)						\
    635 	(sizeof(bus_addr_t) == 8 ?					\
    636 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    637 
    638 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    639 #define	WM_CDRXADDR_HI(rxq, x)						\
    640 	(sizeof(bus_addr_t) == 8 ?					\
    641 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    642 
    643 /*
    644  * Register read/write functions.
    645  * Other than CSR_{READ|WRITE}().
    646  */
    647 #if 0
    648 static inline uint32_t wm_io_read(struct wm_softc *, int);
    649 #endif
    650 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    651 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    652 	uint32_t, uint32_t);
    653 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    654 
    655 /*
    656  * Descriptor sync/init functions.
    657  */
    658 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    659 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    660 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    661 
    662 /*
    663  * Device driver interface functions and commonly used functions.
    664  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    665  */
    666 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    667 static int	wm_match(device_t, cfdata_t, void *);
    668 static void	wm_attach(device_t, device_t, void *);
    669 static int	wm_detach(device_t, int);
    670 static bool	wm_suspend(device_t, const pmf_qual_t *);
    671 static bool	wm_resume(device_t, const pmf_qual_t *);
    672 static void	wm_watchdog(struct ifnet *);
    673 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
    674 static void	wm_tick(void *);
    675 static int	wm_ifflags_cb(struct ethercom *);
    676 static int	wm_ioctl(struct ifnet *, u_long, void *);
    677 /* MAC address related */
    678 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    679 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    680 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    681 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    682 static void	wm_set_filter(struct wm_softc *);
    683 /* Reset and init related */
    684 static void	wm_set_vlan(struct wm_softc *);
    685 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    686 static void	wm_get_auto_rd_done(struct wm_softc *);
    687 static void	wm_lan_init_done(struct wm_softc *);
    688 static void	wm_get_cfg_done(struct wm_softc *);
    689 static void	wm_phy_post_reset(struct wm_softc *);
    690 static void	wm_write_smbus_addr(struct wm_softc *);
    691 static void	wm_init_lcd_from_nvm(struct wm_softc *);
    692 static void	wm_initialize_hardware_bits(struct wm_softc *);
    693 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    694 static void	wm_reset_phy(struct wm_softc *);
    695 static void	wm_flush_desc_rings(struct wm_softc *);
    696 static void	wm_reset(struct wm_softc *);
    697 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    698 static void	wm_rxdrain(struct wm_rxqueue *);
    699 static void	wm_rss_getkey(uint8_t *);
    700 static void	wm_init_rss(struct wm_softc *);
    701 static void	wm_adjust_qnum(struct wm_softc *, int);
    702 static inline bool	wm_is_using_msix(struct wm_softc *);
    703 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    704 static int	wm_softint_establish(struct wm_softc *, int, int);
    705 static int	wm_setup_legacy(struct wm_softc *);
    706 static int	wm_setup_msix(struct wm_softc *);
    707 static int	wm_init(struct ifnet *);
    708 static int	wm_init_locked(struct ifnet *);
    709 static void	wm_unset_stopping_flags(struct wm_softc *);
    710 static void	wm_set_stopping_flags(struct wm_softc *);
    711 static void	wm_stop(struct ifnet *, int);
    712 static void	wm_stop_locked(struct ifnet *, int);
    713 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    714 static void	wm_82547_txfifo_stall(void *);
    715 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    716 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    717 /* DMA related */
    718 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    719 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    720 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    721 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    722     struct wm_txqueue *);
    723 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    724 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    725 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    726     struct wm_rxqueue *);
    727 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    728 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    729 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    730 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    731 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    732 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    733 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    734     struct wm_txqueue *);
    735 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    736     struct wm_rxqueue *);
    737 static int	wm_alloc_txrx_queues(struct wm_softc *);
    738 static void	wm_free_txrx_queues(struct wm_softc *);
    739 static int	wm_init_txrx_queues(struct wm_softc *);
    740 /* Start */
    741 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    742     struct wm_txsoft *, uint32_t *, uint8_t *);
    743 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    744 static void	wm_start(struct ifnet *);
    745 static void	wm_start_locked(struct ifnet *);
    746 static int	wm_transmit(struct ifnet *, struct mbuf *);
    747 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    748 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    749 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    750     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    751 static void	wm_nq_start(struct ifnet *);
    752 static void	wm_nq_start_locked(struct ifnet *);
    753 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    754 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    755 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    756 static void	wm_deferred_start_locked(struct wm_txqueue *);
    757 static void	wm_handle_queue(void *);
    758 /* Interrupt */
    759 static int	wm_txeof(struct wm_softc *, struct wm_txqueue *);
    760 static void	wm_rxeof(struct wm_rxqueue *, u_int);
    761 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    762 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    763 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    764 static void	wm_linkintr(struct wm_softc *, uint32_t);
    765 static int	wm_intr_legacy(void *);
    766 static inline void	wm_txrxintr_disable(struct wm_queue *);
    767 static inline void	wm_txrxintr_enable(struct wm_queue *);
    768 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    769 static int	wm_txrxintr_msix(void *);
    770 static int	wm_linkintr_msix(void *);
    771 
    772 /*
    773  * Media related.
    774  * GMII, SGMII, TBI, SERDES and SFP.
    775  */
    776 /* Common */
    777 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    778 /* GMII related */
    779 static void	wm_gmii_reset(struct wm_softc *);
    780 static void	wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t, uint16_t);
    781 static int	wm_get_phy_id_82575(struct wm_softc *);
    782 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    783 static int	wm_gmii_mediachange(struct ifnet *);
    784 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    785 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    786 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    787 static int	wm_gmii_i82543_readreg(device_t, int, int);
    788 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    789 static int	wm_gmii_mdic_readreg(device_t, int, int);
    790 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    791 static int	wm_gmii_i82544_readreg(device_t, int, int);
    792 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    793 static int	wm_gmii_i80003_readreg(device_t, int, int);
    794 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    795 static int	wm_gmii_bm_readreg(device_t, int, int);
    796 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    797 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    798 static int	wm_gmii_hv_readreg(device_t, int, int);
    799 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
    800 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    801 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
    802 static int	wm_gmii_82580_readreg(device_t, int, int);
    803 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    804 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    805 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    806 static void	wm_gmii_statchg(struct ifnet *);
    807 /*
    808  * kumeran related (80003, ICH* and PCH*).
    809  * These functions are not for accessing MII registers but for accessing
    810  * kumeran specific registers.
    811  */
    812 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    813 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    814 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    815 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    816 /* SGMII */
    817 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    818 static int	wm_sgmii_readreg(device_t, int, int);
    819 static void	wm_sgmii_writereg(device_t, int, int, int);
    820 /* TBI related */
    821 static void	wm_tbi_mediainit(struct wm_softc *);
    822 static int	wm_tbi_mediachange(struct ifnet *);
    823 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    824 static int	wm_check_for_link(struct wm_softc *);
    825 static void	wm_tbi_tick(struct wm_softc *);
    826 /* SERDES related */
    827 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    828 static int	wm_serdes_mediachange(struct ifnet *);
    829 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    830 static void	wm_serdes_tick(struct wm_softc *);
    831 /* SFP related */
    832 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    833 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    834 
    835 /*
    836  * NVM related.
    837  * Microwire, SPI (w/wo EERD) and Flash.
    838  */
    839 /* Misc functions */
    840 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    841 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    842 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    843 /* Microwire */
    844 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    845 /* SPI */
    846 static int	wm_nvm_ready_spi(struct wm_softc *);
    847 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    848 /* Using with EERD */
    849 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    850 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    851 /* Flash */
    852 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    853     unsigned int *);
    854 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    855 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    856 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    857 	uint32_t *);
    858 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    859 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    860 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    861 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    862 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    863 /* iNVM */
    864 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    865 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    866 /* Lock, detecting NVM type, validate checksum and read */
    867 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    868 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    869 static int	wm_nvm_validate_checksum(struct wm_softc *);
    870 static void	wm_nvm_version_invm(struct wm_softc *);
    871 static void	wm_nvm_version(struct wm_softc *);
    872 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    873 
    874 /*
    875  * Hardware semaphores.
    876  * Very complexed...
    877  */
    878 static int	wm_get_null(struct wm_softc *);
    879 static void	wm_put_null(struct wm_softc *);
    880 static int	wm_get_eecd(struct wm_softc *);
    881 static void	wm_put_eecd(struct wm_softc *);
    882 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    883 static void	wm_put_swsm_semaphore(struct wm_softc *);
    884 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    885 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    886 static int	wm_get_nvm_80003(struct wm_softc *);
    887 static void	wm_put_nvm_80003(struct wm_softc *);
    888 static int	wm_get_nvm_82571(struct wm_softc *);
    889 static void	wm_put_nvm_82571(struct wm_softc *);
    890 static int	wm_get_phy_82575(struct wm_softc *);
    891 static void	wm_put_phy_82575(struct wm_softc *);
    892 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    893 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    894 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    895 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    896 static int	wm_get_nvm_ich8lan(struct wm_softc *);
    897 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    898 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    899 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    900 
    901 /*
    902  * Management mode and power management related subroutines.
    903  * BMC, AMT, suspend/resume and EEE.
    904  */
    905 #if 0
    906 static int	wm_check_mng_mode(struct wm_softc *);
    907 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    908 static int	wm_check_mng_mode_82574(struct wm_softc *);
    909 static int	wm_check_mng_mode_generic(struct wm_softc *);
    910 #endif
    911 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    912 static bool	wm_phy_resetisblocked(struct wm_softc *);
    913 static void	wm_get_hw_control(struct wm_softc *);
    914 static void	wm_release_hw_control(struct wm_softc *);
    915 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    916 static void	wm_smbustopci(struct wm_softc *);
    917 static void	wm_init_manageability(struct wm_softc *);
    918 static void	wm_release_manageability(struct wm_softc *);
    919 static void	wm_get_wakeup(struct wm_softc *);
    920 static void	wm_ulp_disable(struct wm_softc *);
    921 static void	wm_enable_phy_wakeup(struct wm_softc *);
    922 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    923 static void	wm_enable_wakeup(struct wm_softc *);
    924 /* LPLU (Low Power Link Up) */
    925 static void	wm_lplu_d0_disable(struct wm_softc *);
    926 /* EEE */
    927 static void	wm_set_eee_i350(struct wm_softc *);
    928 
    929 /*
    930  * Workarounds (mainly PHY related).
    931  * Basically, PHY's workarounds are in the PHY drivers.
    932  */
    933 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    934 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    935 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    936 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    937 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    938 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    939 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    940 static void	wm_reset_init_script_82575(struct wm_softc *);
    941 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    942 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
    943 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
    944 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
    945 static void	wm_pll_workaround_i210(struct wm_softc *);
    946 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
    947 
    948 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    949     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    950 
    951 /*
    952  * Devices supported by this driver.
    953  */
    954 static const struct wm_product {
    955 	pci_vendor_id_t		wmp_vendor;
    956 	pci_product_id_t	wmp_product;
    957 	const char		*wmp_name;
    958 	wm_chip_type		wmp_type;
    959 	uint32_t		wmp_flags;
    960 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    961 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    962 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    963 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    964 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    965 } wm_products[] = {
    966 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    967 	  "Intel i82542 1000BASE-X Ethernet",
    968 	  WM_T_82542_2_1,	WMP_F_FIBER },
    969 
    970 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    971 	  "Intel i82543GC 1000BASE-X Ethernet",
    972 	  WM_T_82543,		WMP_F_FIBER },
    973 
    974 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    975 	  "Intel i82543GC 1000BASE-T Ethernet",
    976 	  WM_T_82543,		WMP_F_COPPER },
    977 
    978 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    979 	  "Intel i82544EI 1000BASE-T Ethernet",
    980 	  WM_T_82544,		WMP_F_COPPER },
    981 
    982 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    983 	  "Intel i82544EI 1000BASE-X Ethernet",
    984 	  WM_T_82544,		WMP_F_FIBER },
    985 
    986 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    987 	  "Intel i82544GC 1000BASE-T Ethernet",
    988 	  WM_T_82544,		WMP_F_COPPER },
    989 
    990 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    991 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    992 	  WM_T_82544,		WMP_F_COPPER },
    993 
    994 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    995 	  "Intel i82540EM 1000BASE-T Ethernet",
    996 	  WM_T_82540,		WMP_F_COPPER },
    997 
    998 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    999 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1000 	  WM_T_82540,		WMP_F_COPPER },
   1001 
   1002 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1003 	  "Intel i82540EP 1000BASE-T Ethernet",
   1004 	  WM_T_82540,		WMP_F_COPPER },
   1005 
   1006 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1007 	  "Intel i82540EP 1000BASE-T Ethernet",
   1008 	  WM_T_82540,		WMP_F_COPPER },
   1009 
   1010 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1011 	  "Intel i82540EP 1000BASE-T Ethernet",
   1012 	  WM_T_82540,		WMP_F_COPPER },
   1013 
   1014 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1015 	  "Intel i82545EM 1000BASE-T Ethernet",
   1016 	  WM_T_82545,		WMP_F_COPPER },
   1017 
   1018 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1019 	  "Intel i82545GM 1000BASE-T Ethernet",
   1020 	  WM_T_82545_3,		WMP_F_COPPER },
   1021 
   1022 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1023 	  "Intel i82545GM 1000BASE-X Ethernet",
   1024 	  WM_T_82545_3,		WMP_F_FIBER },
   1025 
   1026 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1027 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1028 	  WM_T_82545_3,		WMP_F_SERDES },
   1029 
   1030 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1031 	  "Intel i82546EB 1000BASE-T Ethernet",
   1032 	  WM_T_82546,		WMP_F_COPPER },
   1033 
   1034 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1035 	  "Intel i82546EB 1000BASE-T Ethernet",
   1036 	  WM_T_82546,		WMP_F_COPPER },
   1037 
   1038 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1039 	  "Intel i82545EM 1000BASE-X Ethernet",
   1040 	  WM_T_82545,		WMP_F_FIBER },
   1041 
   1042 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1043 	  "Intel i82546EB 1000BASE-X Ethernet",
   1044 	  WM_T_82546,		WMP_F_FIBER },
   1045 
   1046 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1047 	  "Intel i82546GB 1000BASE-T Ethernet",
   1048 	  WM_T_82546_3,		WMP_F_COPPER },
   1049 
   1050 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1051 	  "Intel i82546GB 1000BASE-X Ethernet",
   1052 	  WM_T_82546_3,		WMP_F_FIBER },
   1053 
   1054 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1055 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1056 	  WM_T_82546_3,		WMP_F_SERDES },
   1057 
   1058 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1059 	  "i82546GB quad-port Gigabit Ethernet",
   1060 	  WM_T_82546_3,		WMP_F_COPPER },
   1061 
   1062 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1063 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1064 	  WM_T_82546_3,		WMP_F_COPPER },
   1065 
   1066 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1067 	  "Intel PRO/1000MT (82546GB)",
   1068 	  WM_T_82546_3,		WMP_F_COPPER },
   1069 
   1070 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1071 	  "Intel i82541EI 1000BASE-T Ethernet",
   1072 	  WM_T_82541,		WMP_F_COPPER },
   1073 
   1074 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1075 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1076 	  WM_T_82541,		WMP_F_COPPER },
   1077 
   1078 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1079 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1080 	  WM_T_82541,		WMP_F_COPPER },
   1081 
   1082 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1083 	  "Intel i82541ER 1000BASE-T Ethernet",
   1084 	  WM_T_82541_2,		WMP_F_COPPER },
   1085 
   1086 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1087 	  "Intel i82541GI 1000BASE-T Ethernet",
   1088 	  WM_T_82541_2,		WMP_F_COPPER },
   1089 
   1090 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1091 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1092 	  WM_T_82541_2,		WMP_F_COPPER },
   1093 
   1094 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1095 	  "Intel i82541PI 1000BASE-T Ethernet",
   1096 	  WM_T_82541_2,		WMP_F_COPPER },
   1097 
   1098 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1099 	  "Intel i82547EI 1000BASE-T Ethernet",
   1100 	  WM_T_82547,		WMP_F_COPPER },
   1101 
   1102 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1103 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1104 	  WM_T_82547,		WMP_F_COPPER },
   1105 
   1106 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1107 	  "Intel i82547GI 1000BASE-T Ethernet",
   1108 	  WM_T_82547_2,		WMP_F_COPPER },
   1109 
   1110 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1111 	  "Intel PRO/1000 PT (82571EB)",
   1112 	  WM_T_82571,		WMP_F_COPPER },
   1113 
   1114 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1115 	  "Intel PRO/1000 PF (82571EB)",
   1116 	  WM_T_82571,		WMP_F_FIBER },
   1117 
   1118 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1119 	  "Intel PRO/1000 PB (82571EB)",
   1120 	  WM_T_82571,		WMP_F_SERDES },
   1121 
   1122 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1123 	  "Intel PRO/1000 QT (82571EB)",
   1124 	  WM_T_82571,		WMP_F_COPPER },
   1125 
   1126 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1127 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1128 	  WM_T_82571,		WMP_F_COPPER, },
   1129 
   1130 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1131 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1132 	  WM_T_82571,		WMP_F_COPPER, },
   1133 
   1134 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1135 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1136 	  WM_T_82571,		WMP_F_SERDES, },
   1137 
   1138 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1139 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1140 	  WM_T_82571,		WMP_F_SERDES, },
   1141 
   1142 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1143 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1144 	  WM_T_82571,		WMP_F_FIBER, },
   1145 
   1146 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1147 	  "Intel i82572EI 1000baseT Ethernet",
   1148 	  WM_T_82572,		WMP_F_COPPER },
   1149 
   1150 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1151 	  "Intel i82572EI 1000baseX Ethernet",
   1152 	  WM_T_82572,		WMP_F_FIBER },
   1153 
   1154 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1155 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1156 	  WM_T_82572,		WMP_F_SERDES },
   1157 
   1158 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1159 	  "Intel i82572EI 1000baseT Ethernet",
   1160 	  WM_T_82572,		WMP_F_COPPER },
   1161 
   1162 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1163 	  "Intel i82573E",
   1164 	  WM_T_82573,		WMP_F_COPPER },
   1165 
   1166 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1167 	  "Intel i82573E IAMT",
   1168 	  WM_T_82573,		WMP_F_COPPER },
   1169 
   1170 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1171 	  "Intel i82573L Gigabit Ethernet",
   1172 	  WM_T_82573,		WMP_F_COPPER },
   1173 
   1174 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1175 	  "Intel i82574L",
   1176 	  WM_T_82574,		WMP_F_COPPER },
   1177 
   1178 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1179 	  "Intel i82574L",
   1180 	  WM_T_82574,		WMP_F_COPPER },
   1181 
   1182 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1183 	  "Intel i82583V",
   1184 	  WM_T_82583,		WMP_F_COPPER },
   1185 
   1186 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1187 	  "i80003 dual 1000baseT Ethernet",
   1188 	  WM_T_80003,		WMP_F_COPPER },
   1189 
   1190 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1191 	  "i80003 dual 1000baseX Ethernet",
   1192 	  WM_T_80003,		WMP_F_COPPER },
   1193 
   1194 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1195 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1196 	  WM_T_80003,		WMP_F_SERDES },
   1197 
   1198 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1199 	  "Intel i80003 1000baseT Ethernet",
   1200 	  WM_T_80003,		WMP_F_COPPER },
   1201 
   1202 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1203 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1204 	  WM_T_80003,		WMP_F_SERDES },
   1205 
   1206 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1207 	  "Intel i82801H (M_AMT) LAN Controller",
   1208 	  WM_T_ICH8,		WMP_F_COPPER },
   1209 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1210 	  "Intel i82801H (AMT) LAN Controller",
   1211 	  WM_T_ICH8,		WMP_F_COPPER },
   1212 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1213 	  "Intel i82801H LAN Controller",
   1214 	  WM_T_ICH8,		WMP_F_COPPER },
   1215 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1216 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1217 	  WM_T_ICH8,		WMP_F_COPPER },
   1218 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1219 	  "Intel i82801H (M) LAN Controller",
   1220 	  WM_T_ICH8,		WMP_F_COPPER },
   1221 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1222 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1223 	  WM_T_ICH8,		WMP_F_COPPER },
   1224 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1225 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1226 	  WM_T_ICH8,		WMP_F_COPPER },
   1227 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1228 	  "82567V-3 LAN Controller",
   1229 	  WM_T_ICH8,		WMP_F_COPPER },
   1230 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1231 	  "82801I (AMT) LAN Controller",
   1232 	  WM_T_ICH9,		WMP_F_COPPER },
   1233 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1234 	  "82801I 10/100 LAN Controller",
   1235 	  WM_T_ICH9,		WMP_F_COPPER },
   1236 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1237 	  "82801I (G) 10/100 LAN Controller",
   1238 	  WM_T_ICH9,		WMP_F_COPPER },
   1239 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1240 	  "82801I (GT) 10/100 LAN Controller",
   1241 	  WM_T_ICH9,		WMP_F_COPPER },
   1242 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1243 	  "82801I (C) LAN Controller",
   1244 	  WM_T_ICH9,		WMP_F_COPPER },
   1245 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1246 	  "82801I mobile LAN Controller",
   1247 	  WM_T_ICH9,		WMP_F_COPPER },
   1248 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1249 	  "82801I mobile (V) LAN Controller",
   1250 	  WM_T_ICH9,		WMP_F_COPPER },
   1251 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1252 	  "82801I mobile (AMT) LAN Controller",
   1253 	  WM_T_ICH9,		WMP_F_COPPER },
   1254 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1255 	  "82567LM-4 LAN Controller",
   1256 	  WM_T_ICH9,		WMP_F_COPPER },
   1257 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1258 	  "82567LM-2 LAN Controller",
   1259 	  WM_T_ICH10,		WMP_F_COPPER },
   1260 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1261 	  "82567LF-2 LAN Controller",
   1262 	  WM_T_ICH10,		WMP_F_COPPER },
   1263 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1264 	  "82567LM-3 LAN Controller",
   1265 	  WM_T_ICH10,		WMP_F_COPPER },
   1266 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1267 	  "82567LF-3 LAN Controller",
   1268 	  WM_T_ICH10,		WMP_F_COPPER },
   1269 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1270 	  "82567V-2 LAN Controller",
   1271 	  WM_T_ICH10,		WMP_F_COPPER },
   1272 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1273 	  "82567V-3? LAN Controller",
   1274 	  WM_T_ICH10,		WMP_F_COPPER },
   1275 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1276 	  "HANKSVILLE LAN Controller",
   1277 	  WM_T_ICH10,		WMP_F_COPPER },
   1278 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1279 	  "PCH LAN (82577LM) Controller",
   1280 	  WM_T_PCH,		WMP_F_COPPER },
   1281 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1282 	  "PCH LAN (82577LC) Controller",
   1283 	  WM_T_PCH,		WMP_F_COPPER },
   1284 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1285 	  "PCH LAN (82578DM) Controller",
   1286 	  WM_T_PCH,		WMP_F_COPPER },
   1287 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1288 	  "PCH LAN (82578DC) Controller",
   1289 	  WM_T_PCH,		WMP_F_COPPER },
   1290 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1291 	  "PCH2 LAN (82579LM) Controller",
   1292 	  WM_T_PCH2,		WMP_F_COPPER },
   1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1294 	  "PCH2 LAN (82579V) Controller",
   1295 	  WM_T_PCH2,		WMP_F_COPPER },
   1296 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1297 	  "82575EB dual-1000baseT Ethernet",
   1298 	  WM_T_82575,		WMP_F_COPPER },
   1299 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1300 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1301 	  WM_T_82575,		WMP_F_SERDES },
   1302 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1303 	  "82575GB quad-1000baseT Ethernet",
   1304 	  WM_T_82575,		WMP_F_COPPER },
   1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1306 	  "82575GB quad-1000baseT Ethernet (PM)",
   1307 	  WM_T_82575,		WMP_F_COPPER },
   1308 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1309 	  "82576 1000BaseT Ethernet",
   1310 	  WM_T_82576,		WMP_F_COPPER },
   1311 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1312 	  "82576 1000BaseX Ethernet",
   1313 	  WM_T_82576,		WMP_F_FIBER },
   1314 
   1315 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1316 	  "82576 gigabit Ethernet (SERDES)",
   1317 	  WM_T_82576,		WMP_F_SERDES },
   1318 
   1319 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1320 	  "82576 quad-1000BaseT Ethernet",
   1321 	  WM_T_82576,		WMP_F_COPPER },
   1322 
   1323 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1324 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1325 	  WM_T_82576,		WMP_F_COPPER },
   1326 
   1327 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1328 	  "82576 gigabit Ethernet",
   1329 	  WM_T_82576,		WMP_F_COPPER },
   1330 
   1331 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1332 	  "82576 gigabit Ethernet (SERDES)",
   1333 	  WM_T_82576,		WMP_F_SERDES },
   1334 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1335 	  "82576 quad-gigabit Ethernet (SERDES)",
   1336 	  WM_T_82576,		WMP_F_SERDES },
   1337 
   1338 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1339 	  "82580 1000BaseT Ethernet",
   1340 	  WM_T_82580,		WMP_F_COPPER },
   1341 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1342 	  "82580 1000BaseX Ethernet",
   1343 	  WM_T_82580,		WMP_F_FIBER },
   1344 
   1345 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1346 	  "82580 1000BaseT Ethernet (SERDES)",
   1347 	  WM_T_82580,		WMP_F_SERDES },
   1348 
   1349 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1350 	  "82580 gigabit Ethernet (SGMII)",
   1351 	  WM_T_82580,		WMP_F_COPPER },
   1352 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1353 	  "82580 dual-1000BaseT Ethernet",
   1354 	  WM_T_82580,		WMP_F_COPPER },
   1355 
   1356 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1357 	  "82580 quad-1000BaseX Ethernet",
   1358 	  WM_T_82580,		WMP_F_FIBER },
   1359 
   1360 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1361 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1362 	  WM_T_82580,		WMP_F_COPPER },
   1363 
   1364 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1365 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1366 	  WM_T_82580,		WMP_F_SERDES },
   1367 
   1368 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1369 	  "DH89XXCC 1000BASE-KX Ethernet",
   1370 	  WM_T_82580,		WMP_F_SERDES },
   1371 
   1372 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1373 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1374 	  WM_T_82580,		WMP_F_SERDES },
   1375 
   1376 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1377 	  "I350 Gigabit Network Connection",
   1378 	  WM_T_I350,		WMP_F_COPPER },
   1379 
   1380 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1381 	  "I350 Gigabit Fiber Network Connection",
   1382 	  WM_T_I350,		WMP_F_FIBER },
   1383 
   1384 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1385 	  "I350 Gigabit Backplane Connection",
   1386 	  WM_T_I350,		WMP_F_SERDES },
   1387 
   1388 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1389 	  "I350 Quad Port Gigabit Ethernet",
   1390 	  WM_T_I350,		WMP_F_SERDES },
   1391 
   1392 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1393 	  "I350 Gigabit Connection",
   1394 	  WM_T_I350,		WMP_F_COPPER },
   1395 
   1396 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1397 	  "I354 Gigabit Ethernet (KX)",
   1398 	  WM_T_I354,		WMP_F_SERDES },
   1399 
   1400 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1401 	  "I354 Gigabit Ethernet (SGMII)",
   1402 	  WM_T_I354,		WMP_F_COPPER },
   1403 
   1404 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1405 	  "I354 Gigabit Ethernet (2.5G)",
   1406 	  WM_T_I354,		WMP_F_COPPER },
   1407 
   1408 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1409 	  "I210-T1 Ethernet Server Adapter",
   1410 	  WM_T_I210,		WMP_F_COPPER },
   1411 
   1412 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1413 	  "I210 Ethernet (Copper OEM)",
   1414 	  WM_T_I210,		WMP_F_COPPER },
   1415 
   1416 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1417 	  "I210 Ethernet (Copper IT)",
   1418 	  WM_T_I210,		WMP_F_COPPER },
   1419 
   1420 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1421 	  "I210 Ethernet (FLASH less)",
   1422 	  WM_T_I210,		WMP_F_COPPER },
   1423 
   1424 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1425 	  "I210 Gigabit Ethernet (Fiber)",
   1426 	  WM_T_I210,		WMP_F_FIBER },
   1427 
   1428 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1429 	  "I210 Gigabit Ethernet (SERDES)",
   1430 	  WM_T_I210,		WMP_F_SERDES },
   1431 
   1432 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1433 	  "I210 Gigabit Ethernet (FLASH less)",
   1434 	  WM_T_I210,		WMP_F_SERDES },
   1435 
   1436 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1437 	  "I210 Gigabit Ethernet (SGMII)",
   1438 	  WM_T_I210,		WMP_F_COPPER },
   1439 
   1440 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1441 	  "I211 Ethernet (COPPER)",
   1442 	  WM_T_I211,		WMP_F_COPPER },
   1443 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1444 	  "I217 V Ethernet Connection",
   1445 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1446 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1447 	  "I217 LM Ethernet Connection",
   1448 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1449 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1450 	  "I218 V Ethernet Connection",
   1451 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1452 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1453 	  "I218 V Ethernet Connection",
   1454 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1455 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1456 	  "I218 V Ethernet Connection",
   1457 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1458 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1459 	  "I218 LM Ethernet Connection",
   1460 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1461 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1462 	  "I218 LM Ethernet Connection",
   1463 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1464 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1465 	  "I218 LM Ethernet Connection",
   1466 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1467 #if 0
   1468 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1469 	  "I219 V Ethernet Connection",
   1470 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1471 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1472 	  "I219 V Ethernet Connection",
   1473 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1474 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1475 	  "I219 V Ethernet Connection",
   1476 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1477 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1478 	  "I219 V Ethernet Connection",
   1479 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1480 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1481 	  "I219 LM Ethernet Connection",
   1482 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1483 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1484 	  "I219 LM Ethernet Connection",
   1485 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1486 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1487 	  "I219 LM Ethernet Connection",
   1488 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1489 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1490 	  "I219 LM Ethernet Connection",
   1491 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1492 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1493 	  "I219 LM Ethernet Connection",
   1494 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1495 #endif
   1496 	{ 0,			0,
   1497 	  NULL,
   1498 	  0,			0 },
   1499 };
   1500 
   1501 /*
   1502  * Register read/write functions.
   1503  * Other than CSR_{READ|WRITE}().
   1504  */
   1505 
   1506 #if 0 /* Not currently used */
   1507 static inline uint32_t
   1508 wm_io_read(struct wm_softc *sc, int reg)
   1509 {
   1510 
   1511 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1512 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1513 }
   1514 #endif
   1515 
   1516 static inline void
   1517 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1518 {
   1519 
   1520 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1521 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1522 }
   1523 
   1524 static inline void
   1525 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1526     uint32_t data)
   1527 {
   1528 	uint32_t regval;
   1529 	int i;
   1530 
   1531 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1532 
   1533 	CSR_WRITE(sc, reg, regval);
   1534 
   1535 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1536 		delay(5);
   1537 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1538 			break;
   1539 	}
   1540 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1541 		aprint_error("%s: WARNING:"
   1542 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1543 		    device_xname(sc->sc_dev), reg);
   1544 	}
   1545 }
   1546 
   1547 static inline void
   1548 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1549 {
   1550 	wa->wa_low = htole32(v & 0xffffffffU);
   1551 	if (sizeof(bus_addr_t) == 8)
   1552 		wa->wa_high = htole32((uint64_t) v >> 32);
   1553 	else
   1554 		wa->wa_high = 0;
   1555 }
   1556 
   1557 /*
   1558  * Descriptor sync/init functions.
   1559  */
   1560 static inline void
   1561 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1562 {
   1563 	struct wm_softc *sc = txq->txq_sc;
   1564 
   1565 	/* If it will wrap around, sync to the end of the ring. */
   1566 	if ((start + num) > WM_NTXDESC(txq)) {
   1567 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1568 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1569 		    (WM_NTXDESC(txq) - start), ops);
   1570 		num -= (WM_NTXDESC(txq) - start);
   1571 		start = 0;
   1572 	}
   1573 
   1574 	/* Now sync whatever is left. */
   1575 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1576 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1577 }
   1578 
   1579 static inline void
   1580 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1581 {
   1582 	struct wm_softc *sc = rxq->rxq_sc;
   1583 
   1584 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1585 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1586 }
   1587 
   1588 static inline void
   1589 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1590 {
   1591 	struct wm_softc *sc = rxq->rxq_sc;
   1592 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1593 	struct mbuf *m = rxs->rxs_mbuf;
   1594 
   1595 	/*
   1596 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1597 	 * so that the payload after the Ethernet header is aligned
   1598 	 * to a 4-byte boundary.
   1599 
   1600 	 * XXX BRAINDAMAGE ALERT!
   1601 	 * The stupid chip uses the same size for every buffer, which
   1602 	 * is set in the Receive Control register.  We are using the 2K
   1603 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1604 	 * reason, we can't "scoot" packets longer than the standard
   1605 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1606 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1607 	 * the upper layer copy the headers.
   1608 	 */
   1609 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1610 
   1611 	if (sc->sc_type == WM_T_82574) {
   1612 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1613 		rxd->erx_data.erxd_addr =
   1614 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1615 		rxd->erx_data.erxd_dd = 0;
   1616 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1617 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1618 
   1619 		rxd->nqrx_data.nrxd_paddr =
   1620 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1621 		/* Currently, split header is not supported. */
   1622 		rxd->nqrx_data.nrxd_haddr = 0;
   1623 	} else {
   1624 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1625 
   1626 		wm_set_dma_addr(&rxd->wrx_addr,
   1627 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1628 		rxd->wrx_len = 0;
   1629 		rxd->wrx_cksum = 0;
   1630 		rxd->wrx_status = 0;
   1631 		rxd->wrx_errors = 0;
   1632 		rxd->wrx_special = 0;
   1633 	}
   1634 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1635 
   1636 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1637 }
   1638 
   1639 /*
   1640  * Device driver interface functions and commonly used functions.
   1641  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1642  */
   1643 
   1644 /* Lookup supported device table */
   1645 static const struct wm_product *
   1646 wm_lookup(const struct pci_attach_args *pa)
   1647 {
   1648 	const struct wm_product *wmp;
   1649 
   1650 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1651 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1652 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1653 			return wmp;
   1654 	}
   1655 	return NULL;
   1656 }
   1657 
   1658 /* The match function (ca_match) */
   1659 static int
   1660 wm_match(device_t parent, cfdata_t cf, void *aux)
   1661 {
   1662 	struct pci_attach_args *pa = aux;
   1663 
   1664 	if (wm_lookup(pa) != NULL)
   1665 		return 1;
   1666 
   1667 	return 0;
   1668 }
   1669 
   1670 /* The attach function (ca_attach) */
   1671 static void
   1672 wm_attach(device_t parent, device_t self, void *aux)
   1673 {
   1674 	struct wm_softc *sc = device_private(self);
   1675 	struct pci_attach_args *pa = aux;
   1676 	prop_dictionary_t dict;
   1677 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1678 	pci_chipset_tag_t pc = pa->pa_pc;
   1679 	int counts[PCI_INTR_TYPE_SIZE];
   1680 	pci_intr_type_t max_type;
   1681 	const char *eetype, *xname;
   1682 	bus_space_tag_t memt;
   1683 	bus_space_handle_t memh;
   1684 	bus_size_t memsize;
   1685 	int memh_valid;
   1686 	int i, error;
   1687 	const struct wm_product *wmp;
   1688 	prop_data_t ea;
   1689 	prop_number_t pn;
   1690 	uint8_t enaddr[ETHER_ADDR_LEN];
   1691 	char buf[256];
   1692 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1693 	pcireg_t preg, memtype;
   1694 	uint16_t eeprom_data, apme_mask;
   1695 	bool force_clear_smbi;
   1696 	uint32_t link_mode;
   1697 	uint32_t reg;
   1698 
   1699 	sc->sc_dev = self;
   1700 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1701 	sc->sc_core_stopping = false;
   1702 
   1703 	wmp = wm_lookup(pa);
   1704 #ifdef DIAGNOSTIC
   1705 	if (wmp == NULL) {
   1706 		printf("\n");
   1707 		panic("wm_attach: impossible");
   1708 	}
   1709 #endif
   1710 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1711 
   1712 	sc->sc_pc = pa->pa_pc;
   1713 	sc->sc_pcitag = pa->pa_tag;
   1714 
   1715 	if (pci_dma64_available(pa))
   1716 		sc->sc_dmat = pa->pa_dmat64;
   1717 	else
   1718 		sc->sc_dmat = pa->pa_dmat;
   1719 
   1720 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1721 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1722 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1723 
   1724 	sc->sc_type = wmp->wmp_type;
   1725 
   1726 	/* Set default function pointers */
   1727 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1728 	sc->phy.release = sc->nvm.release = wm_put_null;
   1729 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1730 
   1731 	if (sc->sc_type < WM_T_82543) {
   1732 		if (sc->sc_rev < 2) {
   1733 			aprint_error_dev(sc->sc_dev,
   1734 			    "i82542 must be at least rev. 2\n");
   1735 			return;
   1736 		}
   1737 		if (sc->sc_rev < 3)
   1738 			sc->sc_type = WM_T_82542_2_0;
   1739 	}
   1740 
   1741 	/*
   1742 	 * Disable MSI for Errata:
   1743 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1744 	 *
   1745 	 *  82544: Errata 25
   1746 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1747 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1748 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1749 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1750 	 *
   1751 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1752 	 *
   1753 	 *  82571 & 82572: Errata 63
   1754 	 */
   1755 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1756 	    || (sc->sc_type == WM_T_82572))
   1757 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1758 
   1759 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1760 	    || (sc->sc_type == WM_T_82580)
   1761 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1762 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1763 		sc->sc_flags |= WM_F_NEWQUEUE;
   1764 
   1765 	/* Set device properties (mactype) */
   1766 	dict = device_properties(sc->sc_dev);
   1767 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1768 
   1769 	/*
   1770 	 * Map the device.  All devices support memory-mapped acccess,
   1771 	 * and it is really required for normal operation.
   1772 	 */
   1773 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1774 	switch (memtype) {
   1775 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1776 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1777 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1778 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1779 		break;
   1780 	default:
   1781 		memh_valid = 0;
   1782 		break;
   1783 	}
   1784 
   1785 	if (memh_valid) {
   1786 		sc->sc_st = memt;
   1787 		sc->sc_sh = memh;
   1788 		sc->sc_ss = memsize;
   1789 	} else {
   1790 		aprint_error_dev(sc->sc_dev,
   1791 		    "unable to map device registers\n");
   1792 		return;
   1793 	}
   1794 
   1795 	/*
   1796 	 * In addition, i82544 and later support I/O mapped indirect
   1797 	 * register access.  It is not desirable (nor supported in
   1798 	 * this driver) to use it for normal operation, though it is
   1799 	 * required to work around bugs in some chip versions.
   1800 	 */
   1801 	if (sc->sc_type >= WM_T_82544) {
   1802 		/* First we have to find the I/O BAR. */
   1803 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1804 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1805 			if (memtype == PCI_MAPREG_TYPE_IO)
   1806 				break;
   1807 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1808 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1809 				i += 4;	/* skip high bits, too */
   1810 		}
   1811 		if (i < PCI_MAPREG_END) {
   1812 			/*
   1813 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1814 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1815 			 * It's no problem because newer chips has no this
   1816 			 * bug.
   1817 			 *
   1818 			 * The i8254x doesn't apparently respond when the
   1819 			 * I/O BAR is 0, which looks somewhat like it's not
   1820 			 * been configured.
   1821 			 */
   1822 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1823 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1824 				aprint_error_dev(sc->sc_dev,
   1825 				    "WARNING: I/O BAR at zero.\n");
   1826 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1827 					0, &sc->sc_iot, &sc->sc_ioh,
   1828 					NULL, &sc->sc_ios) == 0) {
   1829 				sc->sc_flags |= WM_F_IOH_VALID;
   1830 			} else {
   1831 				aprint_error_dev(sc->sc_dev,
   1832 				    "WARNING: unable to map I/O space\n");
   1833 			}
   1834 		}
   1835 
   1836 	}
   1837 
   1838 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1839 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1840 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1841 	if (sc->sc_type < WM_T_82542_2_1)
   1842 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1843 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1844 
   1845 	/* power up chip */
   1846 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1847 	    NULL)) && error != EOPNOTSUPP) {
   1848 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1849 		return;
   1850 	}
   1851 
   1852 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1853 	/*
   1854 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   1855 	 * resource.
   1856 	 */
   1857 	if (sc->sc_nqueues > 1) {
   1858 		max_type = PCI_INTR_TYPE_MSIX;
   1859 		/*
   1860 		 *  82583 has a MSI-X capability in the PCI configuration space
   1861 		 * but it doesn't support it. At least the document doesn't
   1862 		 * say anything about MSI-X.
   1863 		 */
   1864 		counts[PCI_INTR_TYPE_MSIX]
   1865 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   1866 	} else {
   1867 		max_type = PCI_INTR_TYPE_MSI;
   1868 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1869 	}
   1870 
   1871 	/* Allocation settings */
   1872 	counts[PCI_INTR_TYPE_MSI] = 1;
   1873 	counts[PCI_INTR_TYPE_INTX] = 1;
   1874 	/* overridden by disable flags */
   1875 	if (wm_disable_msi != 0) {
   1876 		counts[PCI_INTR_TYPE_MSI] = 0;
   1877 		if (wm_disable_msix != 0) {
   1878 			max_type = PCI_INTR_TYPE_INTX;
   1879 			counts[PCI_INTR_TYPE_MSIX] = 0;
   1880 		}
   1881 	} else if (wm_disable_msix != 0) {
   1882 		max_type = PCI_INTR_TYPE_MSI;
   1883 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1884 	}
   1885 
   1886 alloc_retry:
   1887 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1888 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1889 		return;
   1890 	}
   1891 
   1892 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1893 		error = wm_setup_msix(sc);
   1894 		if (error) {
   1895 			pci_intr_release(pc, sc->sc_intrs,
   1896 			    counts[PCI_INTR_TYPE_MSIX]);
   1897 
   1898 			/* Setup for MSI: Disable MSI-X */
   1899 			max_type = PCI_INTR_TYPE_MSI;
   1900 			counts[PCI_INTR_TYPE_MSI] = 1;
   1901 			counts[PCI_INTR_TYPE_INTX] = 1;
   1902 			goto alloc_retry;
   1903 		}
   1904 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1905 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1906 		error = wm_setup_legacy(sc);
   1907 		if (error) {
   1908 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1909 			    counts[PCI_INTR_TYPE_MSI]);
   1910 
   1911 			/* The next try is for INTx: Disable MSI */
   1912 			max_type = PCI_INTR_TYPE_INTX;
   1913 			counts[PCI_INTR_TYPE_INTX] = 1;
   1914 			goto alloc_retry;
   1915 		}
   1916 	} else {
   1917 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1918 		error = wm_setup_legacy(sc);
   1919 		if (error) {
   1920 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1921 			    counts[PCI_INTR_TYPE_INTX]);
   1922 			return;
   1923 		}
   1924 	}
   1925 
   1926 	/*
   1927 	 * Check the function ID (unit number of the chip).
   1928 	 */
   1929 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1930 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1931 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1932 	    || (sc->sc_type == WM_T_82580)
   1933 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1934 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1935 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1936 	else
   1937 		sc->sc_funcid = 0;
   1938 
   1939 	/*
   1940 	 * Determine a few things about the bus we're connected to.
   1941 	 */
   1942 	if (sc->sc_type < WM_T_82543) {
   1943 		/* We don't really know the bus characteristics here. */
   1944 		sc->sc_bus_speed = 33;
   1945 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1946 		/*
   1947 		 * CSA (Communication Streaming Architecture) is about as fast
   1948 		 * a 32-bit 66MHz PCI Bus.
   1949 		 */
   1950 		sc->sc_flags |= WM_F_CSA;
   1951 		sc->sc_bus_speed = 66;
   1952 		aprint_verbose_dev(sc->sc_dev,
   1953 		    "Communication Streaming Architecture\n");
   1954 		if (sc->sc_type == WM_T_82547) {
   1955 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1956 			callout_setfunc(&sc->sc_txfifo_ch,
   1957 					wm_82547_txfifo_stall, sc);
   1958 			aprint_verbose_dev(sc->sc_dev,
   1959 			    "using 82547 Tx FIFO stall work-around\n");
   1960 		}
   1961 	} else if (sc->sc_type >= WM_T_82571) {
   1962 		sc->sc_flags |= WM_F_PCIE;
   1963 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1964 		    && (sc->sc_type != WM_T_ICH10)
   1965 		    && (sc->sc_type != WM_T_PCH)
   1966 		    && (sc->sc_type != WM_T_PCH2)
   1967 		    && (sc->sc_type != WM_T_PCH_LPT)
   1968 		    && (sc->sc_type != WM_T_PCH_SPT)) {
   1969 			/* ICH* and PCH* have no PCIe capability registers */
   1970 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1971 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1972 				NULL) == 0)
   1973 				aprint_error_dev(sc->sc_dev,
   1974 				    "unable to find PCIe capability\n");
   1975 		}
   1976 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1977 	} else {
   1978 		reg = CSR_READ(sc, WMREG_STATUS);
   1979 		if (reg & STATUS_BUS64)
   1980 			sc->sc_flags |= WM_F_BUS64;
   1981 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1982 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1983 
   1984 			sc->sc_flags |= WM_F_PCIX;
   1985 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1986 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1987 				aprint_error_dev(sc->sc_dev,
   1988 				    "unable to find PCIX capability\n");
   1989 			else if (sc->sc_type != WM_T_82545_3 &&
   1990 				 sc->sc_type != WM_T_82546_3) {
   1991 				/*
   1992 				 * Work around a problem caused by the BIOS
   1993 				 * setting the max memory read byte count
   1994 				 * incorrectly.
   1995 				 */
   1996 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1997 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1998 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1999 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2000 
   2001 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2002 				    PCIX_CMD_BYTECNT_SHIFT;
   2003 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2004 				    PCIX_STATUS_MAXB_SHIFT;
   2005 				if (bytecnt > maxb) {
   2006 					aprint_verbose_dev(sc->sc_dev,
   2007 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2008 					    512 << bytecnt, 512 << maxb);
   2009 					pcix_cmd = (pcix_cmd &
   2010 					    ~PCIX_CMD_BYTECNT_MASK) |
   2011 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2012 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2013 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2014 					    pcix_cmd);
   2015 				}
   2016 			}
   2017 		}
   2018 		/*
   2019 		 * The quad port adapter is special; it has a PCIX-PCIX
   2020 		 * bridge on the board, and can run the secondary bus at
   2021 		 * a higher speed.
   2022 		 */
   2023 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2024 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2025 								      : 66;
   2026 		} else if (sc->sc_flags & WM_F_PCIX) {
   2027 			switch (reg & STATUS_PCIXSPD_MASK) {
   2028 			case STATUS_PCIXSPD_50_66:
   2029 				sc->sc_bus_speed = 66;
   2030 				break;
   2031 			case STATUS_PCIXSPD_66_100:
   2032 				sc->sc_bus_speed = 100;
   2033 				break;
   2034 			case STATUS_PCIXSPD_100_133:
   2035 				sc->sc_bus_speed = 133;
   2036 				break;
   2037 			default:
   2038 				aprint_error_dev(sc->sc_dev,
   2039 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2040 				    reg & STATUS_PCIXSPD_MASK);
   2041 				sc->sc_bus_speed = 66;
   2042 				break;
   2043 			}
   2044 		} else
   2045 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2046 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2047 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2048 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2049 	}
   2050 
   2051 	/* clear interesting stat counters */
   2052 	CSR_READ(sc, WMREG_COLC);
   2053 	CSR_READ(sc, WMREG_RXERRC);
   2054 
   2055 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2056 	    || (sc->sc_type >= WM_T_ICH8))
   2057 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2058 	if (sc->sc_type >= WM_T_ICH8)
   2059 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2060 
   2061 	/* Set PHY, NVM mutex related stuff */
   2062 	switch (sc->sc_type) {
   2063 	case WM_T_82542_2_0:
   2064 	case WM_T_82542_2_1:
   2065 	case WM_T_82543:
   2066 	case WM_T_82544:
   2067 		/* Microwire */
   2068 		sc->nvm.read = wm_nvm_read_uwire;
   2069 		sc->sc_nvm_wordsize = 64;
   2070 		sc->sc_nvm_addrbits = 6;
   2071 		break;
   2072 	case WM_T_82540:
   2073 	case WM_T_82545:
   2074 	case WM_T_82545_3:
   2075 	case WM_T_82546:
   2076 	case WM_T_82546_3:
   2077 		/* Microwire */
   2078 		sc->nvm.read = wm_nvm_read_uwire;
   2079 		reg = CSR_READ(sc, WMREG_EECD);
   2080 		if (reg & EECD_EE_SIZE) {
   2081 			sc->sc_nvm_wordsize = 256;
   2082 			sc->sc_nvm_addrbits = 8;
   2083 		} else {
   2084 			sc->sc_nvm_wordsize = 64;
   2085 			sc->sc_nvm_addrbits = 6;
   2086 		}
   2087 		sc->sc_flags |= WM_F_LOCK_EECD;
   2088 		sc->nvm.acquire = wm_get_eecd;
   2089 		sc->nvm.release = wm_put_eecd;
   2090 		break;
   2091 	case WM_T_82541:
   2092 	case WM_T_82541_2:
   2093 	case WM_T_82547:
   2094 	case WM_T_82547_2:
   2095 		reg = CSR_READ(sc, WMREG_EECD);
   2096 		/*
   2097 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2098 		 * on 8254[17], so set flags and functios before calling it.
   2099 		 */
   2100 		sc->sc_flags |= WM_F_LOCK_EECD;
   2101 		sc->nvm.acquire = wm_get_eecd;
   2102 		sc->nvm.release = wm_put_eecd;
   2103 		if (reg & EECD_EE_TYPE) {
   2104 			/* SPI */
   2105 			sc->nvm.read = wm_nvm_read_spi;
   2106 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2107 			wm_nvm_set_addrbits_size_eecd(sc);
   2108 		} else {
   2109 			/* Microwire */
   2110 			sc->nvm.read = wm_nvm_read_uwire;
   2111 			if ((reg & EECD_EE_ABITS) != 0) {
   2112 				sc->sc_nvm_wordsize = 256;
   2113 				sc->sc_nvm_addrbits = 8;
   2114 			} else {
   2115 				sc->sc_nvm_wordsize = 64;
   2116 				sc->sc_nvm_addrbits = 6;
   2117 			}
   2118 		}
   2119 		break;
   2120 	case WM_T_82571:
   2121 	case WM_T_82572:
   2122 		/* SPI */
   2123 		sc->nvm.read = wm_nvm_read_eerd;
   2124 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2125 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2126 		wm_nvm_set_addrbits_size_eecd(sc);
   2127 		sc->phy.acquire = wm_get_swsm_semaphore;
   2128 		sc->phy.release = wm_put_swsm_semaphore;
   2129 		sc->nvm.acquire = wm_get_nvm_82571;
   2130 		sc->nvm.release = wm_put_nvm_82571;
   2131 		break;
   2132 	case WM_T_82573:
   2133 	case WM_T_82574:
   2134 	case WM_T_82583:
   2135 		sc->nvm.read = wm_nvm_read_eerd;
   2136 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2137 		if (sc->sc_type == WM_T_82573) {
   2138 			sc->phy.acquire = wm_get_swsm_semaphore;
   2139 			sc->phy.release = wm_put_swsm_semaphore;
   2140 			sc->nvm.acquire = wm_get_nvm_82571;
   2141 			sc->nvm.release = wm_put_nvm_82571;
   2142 		} else {
   2143 			/* Both PHY and NVM use the same semaphore. */
   2144 			sc->phy.acquire = sc->nvm.acquire
   2145 			    = wm_get_swfwhw_semaphore;
   2146 			sc->phy.release = sc->nvm.release
   2147 			    = wm_put_swfwhw_semaphore;
   2148 		}
   2149 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2150 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2151 			sc->sc_nvm_wordsize = 2048;
   2152 		} else {
   2153 			/* SPI */
   2154 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2155 			wm_nvm_set_addrbits_size_eecd(sc);
   2156 		}
   2157 		break;
   2158 	case WM_T_82575:
   2159 	case WM_T_82576:
   2160 	case WM_T_82580:
   2161 	case WM_T_I350:
   2162 	case WM_T_I354:
   2163 	case WM_T_80003:
   2164 		/* SPI */
   2165 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2166 		wm_nvm_set_addrbits_size_eecd(sc);
   2167 		if((sc->sc_type == WM_T_80003)
   2168 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2169 			sc->nvm.read = wm_nvm_read_eerd;
   2170 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2171 		} else {
   2172 			sc->nvm.read = wm_nvm_read_spi;
   2173 			sc->sc_flags |= WM_F_LOCK_EECD;
   2174 		}
   2175 		sc->phy.acquire = wm_get_phy_82575;
   2176 		sc->phy.release = wm_put_phy_82575;
   2177 		sc->nvm.acquire = wm_get_nvm_80003;
   2178 		sc->nvm.release = wm_put_nvm_80003;
   2179 		break;
   2180 	case WM_T_ICH8:
   2181 	case WM_T_ICH9:
   2182 	case WM_T_ICH10:
   2183 	case WM_T_PCH:
   2184 	case WM_T_PCH2:
   2185 	case WM_T_PCH_LPT:
   2186 		sc->nvm.read = wm_nvm_read_ich8;
   2187 		/* FLASH */
   2188 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2189 		sc->sc_nvm_wordsize = 2048;
   2190 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2191 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2192 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2193 			aprint_error_dev(sc->sc_dev,
   2194 			    "can't map FLASH registers\n");
   2195 			goto out;
   2196 		}
   2197 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2198 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2199 		    ICH_FLASH_SECTOR_SIZE;
   2200 		sc->sc_ich8_flash_bank_size =
   2201 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2202 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2203 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2204 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2205 		sc->sc_flashreg_offset = 0;
   2206 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2207 		sc->phy.release = wm_put_swflag_ich8lan;
   2208 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2209 		sc->nvm.release = wm_put_nvm_ich8lan;
   2210 		break;
   2211 	case WM_T_PCH_SPT:
   2212 		sc->nvm.read = wm_nvm_read_spt;
   2213 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2214 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2215 		sc->sc_flasht = sc->sc_st;
   2216 		sc->sc_flashh = sc->sc_sh;
   2217 		sc->sc_ich8_flash_base = 0;
   2218 		sc->sc_nvm_wordsize =
   2219 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2220 			* NVM_SIZE_MULTIPLIER;
   2221 		/* It is size in bytes, we want words */
   2222 		sc->sc_nvm_wordsize /= 2;
   2223 		/* assume 2 banks */
   2224 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2225 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2226 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2227 		sc->phy.release = wm_put_swflag_ich8lan;
   2228 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2229 		sc->nvm.release = wm_put_nvm_ich8lan;
   2230 		break;
   2231 	case WM_T_I210:
   2232 	case WM_T_I211:
   2233 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2234 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2235 		if (wm_nvm_get_flash_presence_i210(sc)) {
   2236 			sc->nvm.read = wm_nvm_read_eerd;
   2237 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2238 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2239 			wm_nvm_set_addrbits_size_eecd(sc);
   2240 		} else {
   2241 			sc->nvm.read = wm_nvm_read_invm;
   2242 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2243 			sc->sc_nvm_wordsize = INVM_SIZE;
   2244 		}
   2245 		sc->phy.acquire = wm_get_phy_82575;
   2246 		sc->phy.release = wm_put_phy_82575;
   2247 		sc->nvm.acquire = wm_get_nvm_80003;
   2248 		sc->nvm.release = wm_put_nvm_80003;
   2249 		break;
   2250 	default:
   2251 		break;
   2252 	}
   2253 
   2254 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2255 	switch (sc->sc_type) {
   2256 	case WM_T_82571:
   2257 	case WM_T_82572:
   2258 		reg = CSR_READ(sc, WMREG_SWSM2);
   2259 		if ((reg & SWSM2_LOCK) == 0) {
   2260 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2261 			force_clear_smbi = true;
   2262 		} else
   2263 			force_clear_smbi = false;
   2264 		break;
   2265 	case WM_T_82573:
   2266 	case WM_T_82574:
   2267 	case WM_T_82583:
   2268 		force_clear_smbi = true;
   2269 		break;
   2270 	default:
   2271 		force_clear_smbi = false;
   2272 		break;
   2273 	}
   2274 	if (force_clear_smbi) {
   2275 		reg = CSR_READ(sc, WMREG_SWSM);
   2276 		if ((reg & SWSM_SMBI) != 0)
   2277 			aprint_error_dev(sc->sc_dev,
   2278 			    "Please update the Bootagent\n");
   2279 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2280 	}
   2281 
   2282 	/*
   2283 	 * Defer printing the EEPROM type until after verifying the checksum
   2284 	 * This allows the EEPROM type to be printed correctly in the case
   2285 	 * that no EEPROM is attached.
   2286 	 */
   2287 	/*
   2288 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2289 	 * this for later, so we can fail future reads from the EEPROM.
   2290 	 */
   2291 	if (wm_nvm_validate_checksum(sc)) {
   2292 		/*
   2293 		 * Read twice again because some PCI-e parts fail the
   2294 		 * first check due to the link being in sleep state.
   2295 		 */
   2296 		if (wm_nvm_validate_checksum(sc))
   2297 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2298 	}
   2299 
   2300 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2301 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2302 	else {
   2303 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2304 		    sc->sc_nvm_wordsize);
   2305 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2306 			aprint_verbose("iNVM");
   2307 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2308 			aprint_verbose("FLASH(HW)");
   2309 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2310 			aprint_verbose("FLASH");
   2311 		else {
   2312 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2313 				eetype = "SPI";
   2314 			else
   2315 				eetype = "MicroWire";
   2316 			aprint_verbose("(%d address bits) %s EEPROM",
   2317 			    sc->sc_nvm_addrbits, eetype);
   2318 		}
   2319 	}
   2320 	wm_nvm_version(sc);
   2321 	aprint_verbose("\n");
   2322 
   2323 	/*
   2324 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2325 	 * incorrect.
   2326 	 */
   2327 	wm_gmii_setup_phytype(sc, 0, 0);
   2328 
   2329 	/* Reset the chip to a known state. */
   2330 	wm_reset(sc);
   2331 
   2332 	/* Check for I21[01] PLL workaround */
   2333 	if (sc->sc_type == WM_T_I210)
   2334 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2335 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2336 		/* NVM image release 3.25 has a workaround */
   2337 		if ((sc->sc_nvm_ver_major < 3)
   2338 		    || ((sc->sc_nvm_ver_major == 3)
   2339 			&& (sc->sc_nvm_ver_minor < 25))) {
   2340 			aprint_verbose_dev(sc->sc_dev,
   2341 			    "ROM image version %d.%d is older than 3.25\n",
   2342 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2343 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2344 		}
   2345 	}
   2346 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2347 		wm_pll_workaround_i210(sc);
   2348 
   2349 	wm_get_wakeup(sc);
   2350 
   2351 	/* Non-AMT based hardware can now take control from firmware */
   2352 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2353 		wm_get_hw_control(sc);
   2354 
   2355 	/*
   2356 	 * Read the Ethernet address from the EEPROM, if not first found
   2357 	 * in device properties.
   2358 	 */
   2359 	ea = prop_dictionary_get(dict, "mac-address");
   2360 	if (ea != NULL) {
   2361 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2362 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2363 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2364 	} else {
   2365 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2366 			aprint_error_dev(sc->sc_dev,
   2367 			    "unable to read Ethernet address\n");
   2368 			goto out;
   2369 		}
   2370 	}
   2371 
   2372 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2373 	    ether_sprintf(enaddr));
   2374 
   2375 	/*
   2376 	 * Read the config info from the EEPROM, and set up various
   2377 	 * bits in the control registers based on their contents.
   2378 	 */
   2379 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2380 	if (pn != NULL) {
   2381 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2382 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2383 	} else {
   2384 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2385 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2386 			goto out;
   2387 		}
   2388 	}
   2389 
   2390 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2391 	if (pn != NULL) {
   2392 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2393 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2394 	} else {
   2395 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2396 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2397 			goto out;
   2398 		}
   2399 	}
   2400 
   2401 	/* check for WM_F_WOL */
   2402 	switch (sc->sc_type) {
   2403 	case WM_T_82542_2_0:
   2404 	case WM_T_82542_2_1:
   2405 	case WM_T_82543:
   2406 		/* dummy? */
   2407 		eeprom_data = 0;
   2408 		apme_mask = NVM_CFG3_APME;
   2409 		break;
   2410 	case WM_T_82544:
   2411 		apme_mask = NVM_CFG2_82544_APM_EN;
   2412 		eeprom_data = cfg2;
   2413 		break;
   2414 	case WM_T_82546:
   2415 	case WM_T_82546_3:
   2416 	case WM_T_82571:
   2417 	case WM_T_82572:
   2418 	case WM_T_82573:
   2419 	case WM_T_82574:
   2420 	case WM_T_82583:
   2421 	case WM_T_80003:
   2422 	default:
   2423 		apme_mask = NVM_CFG3_APME;
   2424 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2425 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2426 		break;
   2427 	case WM_T_82575:
   2428 	case WM_T_82576:
   2429 	case WM_T_82580:
   2430 	case WM_T_I350:
   2431 	case WM_T_I354: /* XXX ok? */
   2432 	case WM_T_ICH8:
   2433 	case WM_T_ICH9:
   2434 	case WM_T_ICH10:
   2435 	case WM_T_PCH:
   2436 	case WM_T_PCH2:
   2437 	case WM_T_PCH_LPT:
   2438 	case WM_T_PCH_SPT:
   2439 		/* XXX The funcid should be checked on some devices */
   2440 		apme_mask = WUC_APME;
   2441 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2442 		break;
   2443 	}
   2444 
   2445 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2446 	if ((eeprom_data & apme_mask) != 0)
   2447 		sc->sc_flags |= WM_F_WOL;
   2448 
   2449 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2450 		/* Check NVM for autonegotiation */
   2451 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2452 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2453 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2454 		}
   2455 	}
   2456 
   2457 	/*
   2458 	 * XXX need special handling for some multiple port cards
   2459 	 * to disable a paticular port.
   2460 	 */
   2461 
   2462 	if (sc->sc_type >= WM_T_82544) {
   2463 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2464 		if (pn != NULL) {
   2465 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2466 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2467 		} else {
   2468 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2469 				aprint_error_dev(sc->sc_dev,
   2470 				    "unable to read SWDPIN\n");
   2471 				goto out;
   2472 			}
   2473 		}
   2474 	}
   2475 
   2476 	if (cfg1 & NVM_CFG1_ILOS)
   2477 		sc->sc_ctrl |= CTRL_ILOS;
   2478 
   2479 	/*
   2480 	 * XXX
   2481 	 * This code isn't correct because pin 2 and 3 are located
   2482 	 * in different position on newer chips. Check all datasheet.
   2483 	 *
   2484 	 * Until resolve this problem, check if a chip < 82580
   2485 	 */
   2486 	if (sc->sc_type <= WM_T_82580) {
   2487 		if (sc->sc_type >= WM_T_82544) {
   2488 			sc->sc_ctrl |=
   2489 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2490 			    CTRL_SWDPIO_SHIFT;
   2491 			sc->sc_ctrl |=
   2492 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2493 			    CTRL_SWDPINS_SHIFT;
   2494 		} else {
   2495 			sc->sc_ctrl |=
   2496 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2497 			    CTRL_SWDPIO_SHIFT;
   2498 		}
   2499 	}
   2500 
   2501 	/* XXX For other than 82580? */
   2502 	if (sc->sc_type == WM_T_82580) {
   2503 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2504 		if (nvmword & __BIT(13))
   2505 			sc->sc_ctrl |= CTRL_ILOS;
   2506 	}
   2507 
   2508 #if 0
   2509 	if (sc->sc_type >= WM_T_82544) {
   2510 		if (cfg1 & NVM_CFG1_IPS0)
   2511 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2512 		if (cfg1 & NVM_CFG1_IPS1)
   2513 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2514 		sc->sc_ctrl_ext |=
   2515 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2516 		    CTRL_EXT_SWDPIO_SHIFT;
   2517 		sc->sc_ctrl_ext |=
   2518 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2519 		    CTRL_EXT_SWDPINS_SHIFT;
   2520 	} else {
   2521 		sc->sc_ctrl_ext |=
   2522 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2523 		    CTRL_EXT_SWDPIO_SHIFT;
   2524 	}
   2525 #endif
   2526 
   2527 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2528 #if 0
   2529 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2530 #endif
   2531 
   2532 	if (sc->sc_type == WM_T_PCH) {
   2533 		uint16_t val;
   2534 
   2535 		/* Save the NVM K1 bit setting */
   2536 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2537 
   2538 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2539 			sc->sc_nvm_k1_enabled = 1;
   2540 		else
   2541 			sc->sc_nvm_k1_enabled = 0;
   2542 	}
   2543 
   2544 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2545 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2546 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2547 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2548 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
   2549 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2550 		/* Copper only */
   2551 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2552 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2553 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2554 	    || (sc->sc_type ==WM_T_I211)) {
   2555 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2556 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2557 		switch (link_mode) {
   2558 		case CTRL_EXT_LINK_MODE_1000KX:
   2559 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2560 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2561 			break;
   2562 		case CTRL_EXT_LINK_MODE_SGMII:
   2563 			if (wm_sgmii_uses_mdio(sc)) {
   2564 				aprint_verbose_dev(sc->sc_dev,
   2565 				    "SGMII(MDIO)\n");
   2566 				sc->sc_flags |= WM_F_SGMII;
   2567 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2568 				break;
   2569 			}
   2570 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2571 			/*FALLTHROUGH*/
   2572 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2573 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2574 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2575 				if (link_mode
   2576 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2577 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2578 					sc->sc_flags |= WM_F_SGMII;
   2579 				} else {
   2580 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2581 					aprint_verbose_dev(sc->sc_dev,
   2582 					    "SERDES\n");
   2583 				}
   2584 				break;
   2585 			}
   2586 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2587 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2588 
   2589 			/* Change current link mode setting */
   2590 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2591 			switch (sc->sc_mediatype) {
   2592 			case WM_MEDIATYPE_COPPER:
   2593 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2594 				break;
   2595 			case WM_MEDIATYPE_SERDES:
   2596 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2597 				break;
   2598 			default:
   2599 				break;
   2600 			}
   2601 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2602 			break;
   2603 		case CTRL_EXT_LINK_MODE_GMII:
   2604 		default:
   2605 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2606 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2607 			break;
   2608 		}
   2609 
   2610 		reg &= ~CTRL_EXT_I2C_ENA;
   2611 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2612 			reg |= CTRL_EXT_I2C_ENA;
   2613 		else
   2614 			reg &= ~CTRL_EXT_I2C_ENA;
   2615 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2616 	} else if (sc->sc_type < WM_T_82543 ||
   2617 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2618 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2619 			aprint_error_dev(sc->sc_dev,
   2620 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2621 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2622 		}
   2623 	} else {
   2624 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2625 			aprint_error_dev(sc->sc_dev,
   2626 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2627 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2628 		}
   2629 	}
   2630 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2631 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2632 
   2633 	/* Set device properties (macflags) */
   2634 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2635 
   2636 	/* Initialize the media structures accordingly. */
   2637 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2638 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2639 	else
   2640 		wm_tbi_mediainit(sc); /* All others */
   2641 
   2642 	ifp = &sc->sc_ethercom.ec_if;
   2643 	xname = device_xname(sc->sc_dev);
   2644 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2645 	ifp->if_softc = sc;
   2646 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2647 #ifdef WM_MPSAFE
   2648 	ifp->if_extflags = IFEF_MPSAFE;
   2649 #endif
   2650 	ifp->if_ioctl = wm_ioctl;
   2651 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2652 		ifp->if_start = wm_nq_start;
   2653 		/*
   2654 		 * When the number of CPUs is one and the controller can use
   2655 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2656 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2657 		 * and the other is used for link status changing.
   2658 		 * In this situation, wm_nq_transmit() is disadvantageous
   2659 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2660 		 */
   2661 		if (wm_is_using_multiqueue(sc))
   2662 			ifp->if_transmit = wm_nq_transmit;
   2663 	} else {
   2664 		ifp->if_start = wm_start;
   2665 		/*
   2666 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2667 		 */
   2668 		if (wm_is_using_multiqueue(sc))
   2669 			ifp->if_transmit = wm_transmit;
   2670 	}
   2671 	ifp->if_watchdog = wm_watchdog;
   2672 	ifp->if_init = wm_init;
   2673 	ifp->if_stop = wm_stop;
   2674 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2675 	IFQ_SET_READY(&ifp->if_snd);
   2676 
   2677 	/* Check for jumbo frame */
   2678 	switch (sc->sc_type) {
   2679 	case WM_T_82573:
   2680 		/* XXX limited to 9234 if ASPM is disabled */
   2681 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2682 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2683 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2684 		break;
   2685 	case WM_T_82571:
   2686 	case WM_T_82572:
   2687 	case WM_T_82574:
   2688 	case WM_T_82583:
   2689 	case WM_T_82575:
   2690 	case WM_T_82576:
   2691 	case WM_T_82580:
   2692 	case WM_T_I350:
   2693 	case WM_T_I354:
   2694 	case WM_T_I210:
   2695 	case WM_T_I211:
   2696 	case WM_T_80003:
   2697 	case WM_T_ICH9:
   2698 	case WM_T_ICH10:
   2699 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2700 	case WM_T_PCH_LPT:
   2701 	case WM_T_PCH_SPT:
   2702 		/* XXX limited to 9234 */
   2703 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2704 		break;
   2705 	case WM_T_PCH:
   2706 		/* XXX limited to 4096 */
   2707 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2708 		break;
   2709 	case WM_T_82542_2_0:
   2710 	case WM_T_82542_2_1:
   2711 	case WM_T_ICH8:
   2712 		/* No support for jumbo frame */
   2713 		break;
   2714 	default:
   2715 		/* ETHER_MAX_LEN_JUMBO */
   2716 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2717 		break;
   2718 	}
   2719 
   2720 	/* If we're a i82543 or greater, we can support VLANs. */
   2721 	if (sc->sc_type >= WM_T_82543)
   2722 		sc->sc_ethercom.ec_capabilities |=
   2723 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2724 
   2725 	/*
   2726 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2727 	 * on i82543 and later.
   2728 	 */
   2729 	if (sc->sc_type >= WM_T_82543) {
   2730 		ifp->if_capabilities |=
   2731 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2732 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2733 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2734 		    IFCAP_CSUM_TCPv6_Tx |
   2735 		    IFCAP_CSUM_UDPv6_Tx;
   2736 	}
   2737 
   2738 	/*
   2739 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2740 	 *
   2741 	 *	82541GI (8086:1076) ... no
   2742 	 *	82572EI (8086:10b9) ... yes
   2743 	 */
   2744 	if (sc->sc_type >= WM_T_82571) {
   2745 		ifp->if_capabilities |=
   2746 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2747 	}
   2748 
   2749 	/*
   2750 	 * If we're a i82544 or greater (except i82547), we can do
   2751 	 * TCP segmentation offload.
   2752 	 */
   2753 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2754 		ifp->if_capabilities |= IFCAP_TSOv4;
   2755 	}
   2756 
   2757 	if (sc->sc_type >= WM_T_82571) {
   2758 		ifp->if_capabilities |= IFCAP_TSOv6;
   2759 	}
   2760 
   2761 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2762 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2763 
   2764 #ifdef WM_MPSAFE
   2765 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2766 #else
   2767 	sc->sc_core_lock = NULL;
   2768 #endif
   2769 
   2770 	/* Attach the interface. */
   2771 	error = if_initialize(ifp);
   2772 	if (error != 0) {
   2773 		aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
   2774 		    error);
   2775 		return; /* Error */
   2776 	}
   2777 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2778 	ether_ifattach(ifp, enaddr);
   2779 	if_register(ifp);
   2780 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2781 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2782 			  RND_FLAG_DEFAULT);
   2783 
   2784 #ifdef WM_EVENT_COUNTERS
   2785 	/* Attach event counters. */
   2786 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2787 	    NULL, xname, "linkintr");
   2788 
   2789 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2790 	    NULL, xname, "tx_xoff");
   2791 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2792 	    NULL, xname, "tx_xon");
   2793 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2794 	    NULL, xname, "rx_xoff");
   2795 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2796 	    NULL, xname, "rx_xon");
   2797 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2798 	    NULL, xname, "rx_macctl");
   2799 #endif /* WM_EVENT_COUNTERS */
   2800 
   2801 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2802 		pmf_class_network_register(self, ifp);
   2803 	else
   2804 		aprint_error_dev(self, "couldn't establish power handler\n");
   2805 
   2806 	sc->sc_flags |= WM_F_ATTACHED;
   2807  out:
   2808 	return;
   2809 }
   2810 
   2811 /* The detach function (ca_detach) */
   2812 static int
   2813 wm_detach(device_t self, int flags __unused)
   2814 {
   2815 	struct wm_softc *sc = device_private(self);
   2816 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2817 	int i;
   2818 
   2819 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2820 		return 0;
   2821 
   2822 	/* Stop the interface. Callouts are stopped in it. */
   2823 	wm_stop(ifp, 1);
   2824 
   2825 	pmf_device_deregister(self);
   2826 
   2827 #ifdef WM_EVENT_COUNTERS
   2828 	evcnt_detach(&sc->sc_ev_linkintr);
   2829 
   2830 	evcnt_detach(&sc->sc_ev_tx_xoff);
   2831 	evcnt_detach(&sc->sc_ev_tx_xon);
   2832 	evcnt_detach(&sc->sc_ev_rx_xoff);
   2833 	evcnt_detach(&sc->sc_ev_rx_xon);
   2834 	evcnt_detach(&sc->sc_ev_rx_macctl);
   2835 #endif /* WM_EVENT_COUNTERS */
   2836 
   2837 	/* Tell the firmware about the release */
   2838 	WM_CORE_LOCK(sc);
   2839 	wm_release_manageability(sc);
   2840 	wm_release_hw_control(sc);
   2841 	wm_enable_wakeup(sc);
   2842 	WM_CORE_UNLOCK(sc);
   2843 
   2844 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2845 
   2846 	/* Delete all remaining media. */
   2847 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2848 
   2849 	ether_ifdetach(ifp);
   2850 	if_detach(ifp);
   2851 	if_percpuq_destroy(sc->sc_ipq);
   2852 
   2853 	/* Unload RX dmamaps and free mbufs */
   2854 	for (i = 0; i < sc->sc_nqueues; i++) {
   2855 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2856 		mutex_enter(rxq->rxq_lock);
   2857 		wm_rxdrain(rxq);
   2858 		mutex_exit(rxq->rxq_lock);
   2859 	}
   2860 	/* Must unlock here */
   2861 
   2862 	/* Disestablish the interrupt handler */
   2863 	for (i = 0; i < sc->sc_nintrs; i++) {
   2864 		if (sc->sc_ihs[i] != NULL) {
   2865 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2866 			sc->sc_ihs[i] = NULL;
   2867 		}
   2868 	}
   2869 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2870 
   2871 	wm_free_txrx_queues(sc);
   2872 
   2873 	/* Unmap the registers */
   2874 	if (sc->sc_ss) {
   2875 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2876 		sc->sc_ss = 0;
   2877 	}
   2878 	if (sc->sc_ios) {
   2879 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2880 		sc->sc_ios = 0;
   2881 	}
   2882 	if (sc->sc_flashs) {
   2883 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2884 		sc->sc_flashs = 0;
   2885 	}
   2886 
   2887 	if (sc->sc_core_lock)
   2888 		mutex_obj_free(sc->sc_core_lock);
   2889 	if (sc->sc_ich_phymtx)
   2890 		mutex_obj_free(sc->sc_ich_phymtx);
   2891 	if (sc->sc_ich_nvmmtx)
   2892 		mutex_obj_free(sc->sc_ich_nvmmtx);
   2893 
   2894 	return 0;
   2895 }
   2896 
   2897 static bool
   2898 wm_suspend(device_t self, const pmf_qual_t *qual)
   2899 {
   2900 	struct wm_softc *sc = device_private(self);
   2901 
   2902 	wm_release_manageability(sc);
   2903 	wm_release_hw_control(sc);
   2904 	wm_enable_wakeup(sc);
   2905 
   2906 	return true;
   2907 }
   2908 
   2909 static bool
   2910 wm_resume(device_t self, const pmf_qual_t *qual)
   2911 {
   2912 	struct wm_softc *sc = device_private(self);
   2913 
   2914 	wm_init_manageability(sc);
   2915 
   2916 	return true;
   2917 }
   2918 
   2919 /*
   2920  * wm_watchdog:		[ifnet interface function]
   2921  *
   2922  *	Watchdog timer handler.
   2923  */
   2924 static void
   2925 wm_watchdog(struct ifnet *ifp)
   2926 {
   2927 	int qid;
   2928 	struct wm_softc *sc = ifp->if_softc;
   2929 
   2930 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2931 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2932 
   2933 		wm_watchdog_txq(ifp, txq);
   2934 	}
   2935 
   2936 	/* Reset the interface. */
   2937 	(void) wm_init(ifp);
   2938 
   2939 	/*
   2940 	 * There are still some upper layer processing which call
   2941 	 * ifp->if_start(). e.g. ALTQ or one CPU system
   2942 	 */
   2943 	/* Try to get more packets going. */
   2944 	ifp->if_start(ifp);
   2945 }
   2946 
   2947 static void
   2948 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
   2949 {
   2950 	struct wm_softc *sc = ifp->if_softc;
   2951 
   2952 	/*
   2953 	 * Since we're using delayed interrupts, sweep up
   2954 	 * before we report an error.
   2955 	 */
   2956 	mutex_enter(txq->txq_lock);
   2957 	wm_txeof(sc, txq);
   2958 	mutex_exit(txq->txq_lock);
   2959 
   2960 	if (txq->txq_free != WM_NTXDESC(txq)) {
   2961 #ifdef WM_DEBUG
   2962 		int i, j;
   2963 		struct wm_txsoft *txs;
   2964 #endif
   2965 		log(LOG_ERR,
   2966 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2967 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   2968 		    txq->txq_next);
   2969 		ifp->if_oerrors++;
   2970 #ifdef WM_DEBUG
   2971 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   2972 		    i = WM_NEXTTXS(txq, i)) {
   2973 		    txs = &txq->txq_soft[i];
   2974 		    printf("txs %d tx %d -> %d\n",
   2975 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2976 		    for (j = txs->txs_firstdesc; ;
   2977 			j = WM_NEXTTX(txq, j)) {
   2978 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2979 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   2980 			printf("\t %#08x%08x\n",
   2981 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   2982 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   2983 			if (j == txs->txs_lastdesc)
   2984 				break;
   2985 			}
   2986 		}
   2987 #endif
   2988 	}
   2989 }
   2990 
   2991 /*
   2992  * wm_tick:
   2993  *
   2994  *	One second timer, used to check link status, sweep up
   2995  *	completed transmit jobs, etc.
   2996  */
   2997 static void
   2998 wm_tick(void *arg)
   2999 {
   3000 	struct wm_softc *sc = arg;
   3001 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3002 #ifndef WM_MPSAFE
   3003 	int s = splnet();
   3004 #endif
   3005 
   3006 	WM_CORE_LOCK(sc);
   3007 
   3008 	if (sc->sc_core_stopping)
   3009 		goto out;
   3010 
   3011 	if (sc->sc_type >= WM_T_82542_2_1) {
   3012 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3013 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3014 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3015 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3016 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3017 	}
   3018 
   3019 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   3020 	ifp->if_ierrors += 0ULL /* ensure quad_t */
   3021 	    + CSR_READ(sc, WMREG_CRCERRS)
   3022 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3023 	    + CSR_READ(sc, WMREG_SYMERRC)
   3024 	    + CSR_READ(sc, WMREG_RXERRC)
   3025 	    + CSR_READ(sc, WMREG_SEC)
   3026 	    + CSR_READ(sc, WMREG_CEXTERR)
   3027 	    + CSR_READ(sc, WMREG_RLEC);
   3028 	/*
   3029 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3030 	 * memory. It does not mean the number of dropped packet. Because
   3031 	 * ethernet controller can receive packets in such case if there is
   3032 	 * space in phy's FIFO.
   3033 	 *
   3034 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3035 	 * own EVCNT instead of if_iqdrops.
   3036 	 */
   3037 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   3038 
   3039 	if (sc->sc_flags & WM_F_HAS_MII)
   3040 		mii_tick(&sc->sc_mii);
   3041 	else if ((sc->sc_type >= WM_T_82575)
   3042 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3043 		wm_serdes_tick(sc);
   3044 	else
   3045 		wm_tbi_tick(sc);
   3046 
   3047 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   3048 out:
   3049 	WM_CORE_UNLOCK(sc);
   3050 #ifndef WM_MPSAFE
   3051 	splx(s);
   3052 #endif
   3053 }
   3054 
   3055 static int
   3056 wm_ifflags_cb(struct ethercom *ec)
   3057 {
   3058 	struct ifnet *ifp = &ec->ec_if;
   3059 	struct wm_softc *sc = ifp->if_softc;
   3060 	int rc = 0;
   3061 
   3062 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3063 		device_xname(sc->sc_dev), __func__));
   3064 
   3065 	WM_CORE_LOCK(sc);
   3066 
   3067 	int change = ifp->if_flags ^ sc->sc_if_flags;
   3068 	sc->sc_if_flags = ifp->if_flags;
   3069 
   3070 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3071 		rc = ENETRESET;
   3072 		goto out;
   3073 	}
   3074 
   3075 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   3076 		wm_set_filter(sc);
   3077 
   3078 	wm_set_vlan(sc);
   3079 
   3080 out:
   3081 	WM_CORE_UNLOCK(sc);
   3082 
   3083 	return rc;
   3084 }
   3085 
   3086 /*
   3087  * wm_ioctl:		[ifnet interface function]
   3088  *
   3089  *	Handle control requests from the operator.
   3090  */
   3091 static int
   3092 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3093 {
   3094 	struct wm_softc *sc = ifp->if_softc;
   3095 	struct ifreq *ifr = (struct ifreq *) data;
   3096 	struct ifaddr *ifa = (struct ifaddr *)data;
   3097 	struct sockaddr_dl *sdl;
   3098 	int s, error;
   3099 
   3100 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3101 		device_xname(sc->sc_dev), __func__));
   3102 
   3103 #ifndef WM_MPSAFE
   3104 	s = splnet();
   3105 #endif
   3106 	switch (cmd) {
   3107 	case SIOCSIFMEDIA:
   3108 	case SIOCGIFMEDIA:
   3109 		WM_CORE_LOCK(sc);
   3110 		/* Flow control requires full-duplex mode. */
   3111 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3112 		    (ifr->ifr_media & IFM_FDX) == 0)
   3113 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3114 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3115 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3116 				/* We can do both TXPAUSE and RXPAUSE. */
   3117 				ifr->ifr_media |=
   3118 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3119 			}
   3120 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3121 		}
   3122 		WM_CORE_UNLOCK(sc);
   3123 #ifdef WM_MPSAFE
   3124 		s = splnet();
   3125 #endif
   3126 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3127 #ifdef WM_MPSAFE
   3128 		splx(s);
   3129 #endif
   3130 		break;
   3131 	case SIOCINITIFADDR:
   3132 		WM_CORE_LOCK(sc);
   3133 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3134 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3135 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3136 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3137 			/* unicast address is first multicast entry */
   3138 			wm_set_filter(sc);
   3139 			error = 0;
   3140 			WM_CORE_UNLOCK(sc);
   3141 			break;
   3142 		}
   3143 		WM_CORE_UNLOCK(sc);
   3144 		/*FALLTHROUGH*/
   3145 	default:
   3146 #ifdef WM_MPSAFE
   3147 		s = splnet();
   3148 #endif
   3149 		/* It may call wm_start, so unlock here */
   3150 		error = ether_ioctl(ifp, cmd, data);
   3151 #ifdef WM_MPSAFE
   3152 		splx(s);
   3153 #endif
   3154 		if (error != ENETRESET)
   3155 			break;
   3156 
   3157 		error = 0;
   3158 
   3159 		if (cmd == SIOCSIFCAP) {
   3160 			error = (*ifp->if_init)(ifp);
   3161 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3162 			;
   3163 		else if (ifp->if_flags & IFF_RUNNING) {
   3164 			/*
   3165 			 * Multicast list has changed; set the hardware filter
   3166 			 * accordingly.
   3167 			 */
   3168 			WM_CORE_LOCK(sc);
   3169 			wm_set_filter(sc);
   3170 			WM_CORE_UNLOCK(sc);
   3171 		}
   3172 		break;
   3173 	}
   3174 
   3175 #ifndef WM_MPSAFE
   3176 	splx(s);
   3177 #endif
   3178 	return error;
   3179 }
   3180 
   3181 /* MAC address related */
   3182 
   3183 /*
   3184  * Get the offset of MAC address and return it.
   3185  * If error occured, use offset 0.
   3186  */
   3187 static uint16_t
   3188 wm_check_alt_mac_addr(struct wm_softc *sc)
   3189 {
   3190 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3191 	uint16_t offset = NVM_OFF_MACADDR;
   3192 
   3193 	/* Try to read alternative MAC address pointer */
   3194 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3195 		return 0;
   3196 
   3197 	/* Check pointer if it's valid or not. */
   3198 	if ((offset == 0x0000) || (offset == 0xffff))
   3199 		return 0;
   3200 
   3201 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3202 	/*
   3203 	 * Check whether alternative MAC address is valid or not.
   3204 	 * Some cards have non 0xffff pointer but those don't use
   3205 	 * alternative MAC address in reality.
   3206 	 *
   3207 	 * Check whether the broadcast bit is set or not.
   3208 	 */
   3209 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3210 		if (((myea[0] & 0xff) & 0x01) == 0)
   3211 			return offset; /* Found */
   3212 
   3213 	/* Not found */
   3214 	return 0;
   3215 }
   3216 
   3217 static int
   3218 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3219 {
   3220 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3221 	uint16_t offset = NVM_OFF_MACADDR;
   3222 	int do_invert = 0;
   3223 
   3224 	switch (sc->sc_type) {
   3225 	case WM_T_82580:
   3226 	case WM_T_I350:
   3227 	case WM_T_I354:
   3228 		/* EEPROM Top Level Partitioning */
   3229 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3230 		break;
   3231 	case WM_T_82571:
   3232 	case WM_T_82575:
   3233 	case WM_T_82576:
   3234 	case WM_T_80003:
   3235 	case WM_T_I210:
   3236 	case WM_T_I211:
   3237 		offset = wm_check_alt_mac_addr(sc);
   3238 		if (offset == 0)
   3239 			if ((sc->sc_funcid & 0x01) == 1)
   3240 				do_invert = 1;
   3241 		break;
   3242 	default:
   3243 		if ((sc->sc_funcid & 0x01) == 1)
   3244 			do_invert = 1;
   3245 		break;
   3246 	}
   3247 
   3248 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3249 		goto bad;
   3250 
   3251 	enaddr[0] = myea[0] & 0xff;
   3252 	enaddr[1] = myea[0] >> 8;
   3253 	enaddr[2] = myea[1] & 0xff;
   3254 	enaddr[3] = myea[1] >> 8;
   3255 	enaddr[4] = myea[2] & 0xff;
   3256 	enaddr[5] = myea[2] >> 8;
   3257 
   3258 	/*
   3259 	 * Toggle the LSB of the MAC address on the second port
   3260 	 * of some dual port cards.
   3261 	 */
   3262 	if (do_invert != 0)
   3263 		enaddr[5] ^= 1;
   3264 
   3265 	return 0;
   3266 
   3267  bad:
   3268 	return -1;
   3269 }
   3270 
   3271 /*
   3272  * wm_set_ral:
   3273  *
   3274  *	Set an entery in the receive address list.
   3275  */
   3276 static void
   3277 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3278 {
   3279 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3280 	uint32_t wlock_mac;
   3281 	int rv;
   3282 
   3283 	if (enaddr != NULL) {
   3284 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3285 		    (enaddr[3] << 24);
   3286 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3287 		ral_hi |= RAL_AV;
   3288 	} else {
   3289 		ral_lo = 0;
   3290 		ral_hi = 0;
   3291 	}
   3292 
   3293 	switch (sc->sc_type) {
   3294 	case WM_T_82542_2_0:
   3295 	case WM_T_82542_2_1:
   3296 	case WM_T_82543:
   3297 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3298 		CSR_WRITE_FLUSH(sc);
   3299 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3300 		CSR_WRITE_FLUSH(sc);
   3301 		break;
   3302 	case WM_T_PCH2:
   3303 	case WM_T_PCH_LPT:
   3304 	case WM_T_PCH_SPT:
   3305 		if (idx == 0) {
   3306 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3307 			CSR_WRITE_FLUSH(sc);
   3308 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3309 			CSR_WRITE_FLUSH(sc);
   3310 			return;
   3311 		}
   3312 		if (sc->sc_type != WM_T_PCH2) {
   3313 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3314 			    FWSM_WLOCK_MAC);
   3315 			addrl = WMREG_SHRAL(idx - 1);
   3316 			addrh = WMREG_SHRAH(idx - 1);
   3317 		} else {
   3318 			wlock_mac = 0;
   3319 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3320 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3321 		}
   3322 
   3323 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3324 			rv = wm_get_swflag_ich8lan(sc);
   3325 			if (rv != 0)
   3326 				return;
   3327 			CSR_WRITE(sc, addrl, ral_lo);
   3328 			CSR_WRITE_FLUSH(sc);
   3329 			CSR_WRITE(sc, addrh, ral_hi);
   3330 			CSR_WRITE_FLUSH(sc);
   3331 			wm_put_swflag_ich8lan(sc);
   3332 		}
   3333 
   3334 		break;
   3335 	default:
   3336 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3337 		CSR_WRITE_FLUSH(sc);
   3338 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3339 		CSR_WRITE_FLUSH(sc);
   3340 		break;
   3341 	}
   3342 }
   3343 
   3344 /*
   3345  * wm_mchash:
   3346  *
   3347  *	Compute the hash of the multicast address for the 4096-bit
   3348  *	multicast filter.
   3349  */
   3350 static uint32_t
   3351 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3352 {
   3353 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3354 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3355 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3356 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3357 	uint32_t hash;
   3358 
   3359 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3360 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3361 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3362 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   3363 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3364 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3365 		return (hash & 0x3ff);
   3366 	}
   3367 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3368 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3369 
   3370 	return (hash & 0xfff);
   3371 }
   3372 
   3373 /*
   3374  * wm_set_filter:
   3375  *
   3376  *	Set up the receive filter.
   3377  */
   3378 static void
   3379 wm_set_filter(struct wm_softc *sc)
   3380 {
   3381 	struct ethercom *ec = &sc->sc_ethercom;
   3382 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3383 	struct ether_multi *enm;
   3384 	struct ether_multistep step;
   3385 	bus_addr_t mta_reg;
   3386 	uint32_t hash, reg, bit;
   3387 	int i, size, ralmax;
   3388 
   3389 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3390 		device_xname(sc->sc_dev), __func__));
   3391 
   3392 	if (sc->sc_type >= WM_T_82544)
   3393 		mta_reg = WMREG_CORDOVA_MTA;
   3394 	else
   3395 		mta_reg = WMREG_MTA;
   3396 
   3397 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3398 
   3399 	if (ifp->if_flags & IFF_BROADCAST)
   3400 		sc->sc_rctl |= RCTL_BAM;
   3401 	if (ifp->if_flags & IFF_PROMISC) {
   3402 		sc->sc_rctl |= RCTL_UPE;
   3403 		goto allmulti;
   3404 	}
   3405 
   3406 	/*
   3407 	 * Set the station address in the first RAL slot, and
   3408 	 * clear the remaining slots.
   3409 	 */
   3410 	if (sc->sc_type == WM_T_ICH8)
   3411 		size = WM_RAL_TABSIZE_ICH8 -1;
   3412 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3413 	    || (sc->sc_type == WM_T_PCH))
   3414 		size = WM_RAL_TABSIZE_ICH8;
   3415 	else if (sc->sc_type == WM_T_PCH2)
   3416 		size = WM_RAL_TABSIZE_PCH2;
   3417 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
   3418 		size = WM_RAL_TABSIZE_PCH_LPT;
   3419 	else if (sc->sc_type == WM_T_82575)
   3420 		size = WM_RAL_TABSIZE_82575;
   3421 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3422 		size = WM_RAL_TABSIZE_82576;
   3423 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3424 		size = WM_RAL_TABSIZE_I350;
   3425 	else
   3426 		size = WM_RAL_TABSIZE;
   3427 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3428 
   3429 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   3430 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3431 		switch (i) {
   3432 		case 0:
   3433 			/* We can use all entries */
   3434 			ralmax = size;
   3435 			break;
   3436 		case 1:
   3437 			/* Only RAR[0] */
   3438 			ralmax = 1;
   3439 			break;
   3440 		default:
   3441 			/* available SHRA + RAR[0] */
   3442 			ralmax = i + 1;
   3443 		}
   3444 	} else
   3445 		ralmax = size;
   3446 	for (i = 1; i < size; i++) {
   3447 		if (i < ralmax)
   3448 			wm_set_ral(sc, NULL, i);
   3449 	}
   3450 
   3451 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3452 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3453 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3454 	    || (sc->sc_type == WM_T_PCH_SPT))
   3455 		size = WM_ICH8_MC_TABSIZE;
   3456 	else
   3457 		size = WM_MC_TABSIZE;
   3458 	/* Clear out the multicast table. */
   3459 	for (i = 0; i < size; i++) {
   3460 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3461 		CSR_WRITE_FLUSH(sc);
   3462 	}
   3463 
   3464 	ETHER_LOCK(ec);
   3465 	ETHER_FIRST_MULTI(step, ec, enm);
   3466 	while (enm != NULL) {
   3467 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3468 			ETHER_UNLOCK(ec);
   3469 			/*
   3470 			 * We must listen to a range of multicast addresses.
   3471 			 * For now, just accept all multicasts, rather than
   3472 			 * trying to set only those filter bits needed to match
   3473 			 * the range.  (At this time, the only use of address
   3474 			 * ranges is for IP multicast routing, for which the
   3475 			 * range is big enough to require all bits set.)
   3476 			 */
   3477 			goto allmulti;
   3478 		}
   3479 
   3480 		hash = wm_mchash(sc, enm->enm_addrlo);
   3481 
   3482 		reg = (hash >> 5);
   3483 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3484 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3485 		    || (sc->sc_type == WM_T_PCH2)
   3486 		    || (sc->sc_type == WM_T_PCH_LPT)
   3487 		    || (sc->sc_type == WM_T_PCH_SPT))
   3488 			reg &= 0x1f;
   3489 		else
   3490 			reg &= 0x7f;
   3491 		bit = hash & 0x1f;
   3492 
   3493 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3494 		hash |= 1U << bit;
   3495 
   3496 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3497 			/*
   3498 			 * 82544 Errata 9: Certain register cannot be written
   3499 			 * with particular alignments in PCI-X bus operation
   3500 			 * (FCAH, MTA and VFTA).
   3501 			 */
   3502 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3503 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3504 			CSR_WRITE_FLUSH(sc);
   3505 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3506 			CSR_WRITE_FLUSH(sc);
   3507 		} else {
   3508 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3509 			CSR_WRITE_FLUSH(sc);
   3510 		}
   3511 
   3512 		ETHER_NEXT_MULTI(step, enm);
   3513 	}
   3514 	ETHER_UNLOCK(ec);
   3515 
   3516 	ifp->if_flags &= ~IFF_ALLMULTI;
   3517 	goto setit;
   3518 
   3519  allmulti:
   3520 	ifp->if_flags |= IFF_ALLMULTI;
   3521 	sc->sc_rctl |= RCTL_MPE;
   3522 
   3523  setit:
   3524 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3525 }
   3526 
   3527 /* Reset and init related */
   3528 
   3529 static void
   3530 wm_set_vlan(struct wm_softc *sc)
   3531 {
   3532 
   3533 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3534 		device_xname(sc->sc_dev), __func__));
   3535 
   3536 	/* Deal with VLAN enables. */
   3537 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3538 		sc->sc_ctrl |= CTRL_VME;
   3539 	else
   3540 		sc->sc_ctrl &= ~CTRL_VME;
   3541 
   3542 	/* Write the control registers. */
   3543 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3544 }
   3545 
   3546 static void
   3547 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3548 {
   3549 	uint32_t gcr;
   3550 	pcireg_t ctrl2;
   3551 
   3552 	gcr = CSR_READ(sc, WMREG_GCR);
   3553 
   3554 	/* Only take action if timeout value is defaulted to 0 */
   3555 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3556 		goto out;
   3557 
   3558 	if ((gcr & GCR_CAP_VER2) == 0) {
   3559 		gcr |= GCR_CMPL_TMOUT_10MS;
   3560 		goto out;
   3561 	}
   3562 
   3563 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3564 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3565 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3566 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3567 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3568 
   3569 out:
   3570 	/* Disable completion timeout resend */
   3571 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3572 
   3573 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3574 }
   3575 
   3576 void
   3577 wm_get_auto_rd_done(struct wm_softc *sc)
   3578 {
   3579 	int i;
   3580 
   3581 	/* wait for eeprom to reload */
   3582 	switch (sc->sc_type) {
   3583 	case WM_T_82571:
   3584 	case WM_T_82572:
   3585 	case WM_T_82573:
   3586 	case WM_T_82574:
   3587 	case WM_T_82583:
   3588 	case WM_T_82575:
   3589 	case WM_T_82576:
   3590 	case WM_T_82580:
   3591 	case WM_T_I350:
   3592 	case WM_T_I354:
   3593 	case WM_T_I210:
   3594 	case WM_T_I211:
   3595 	case WM_T_80003:
   3596 	case WM_T_ICH8:
   3597 	case WM_T_ICH9:
   3598 		for (i = 0; i < 10; i++) {
   3599 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3600 				break;
   3601 			delay(1000);
   3602 		}
   3603 		if (i == 10) {
   3604 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3605 			    "complete\n", device_xname(sc->sc_dev));
   3606 		}
   3607 		break;
   3608 	default:
   3609 		break;
   3610 	}
   3611 }
   3612 
   3613 void
   3614 wm_lan_init_done(struct wm_softc *sc)
   3615 {
   3616 	uint32_t reg = 0;
   3617 	int i;
   3618 
   3619 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3620 		device_xname(sc->sc_dev), __func__));
   3621 
   3622 	/* Wait for eeprom to reload */
   3623 	switch (sc->sc_type) {
   3624 	case WM_T_ICH10:
   3625 	case WM_T_PCH:
   3626 	case WM_T_PCH2:
   3627 	case WM_T_PCH_LPT:
   3628 	case WM_T_PCH_SPT:
   3629 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3630 			reg = CSR_READ(sc, WMREG_STATUS);
   3631 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3632 				break;
   3633 			delay(100);
   3634 		}
   3635 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3636 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3637 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3638 		}
   3639 		break;
   3640 	default:
   3641 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3642 		    __func__);
   3643 		break;
   3644 	}
   3645 
   3646 	reg &= ~STATUS_LAN_INIT_DONE;
   3647 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3648 }
   3649 
   3650 void
   3651 wm_get_cfg_done(struct wm_softc *sc)
   3652 {
   3653 	int mask;
   3654 	uint32_t reg;
   3655 	int i;
   3656 
   3657 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3658 		device_xname(sc->sc_dev), __func__));
   3659 
   3660 	/* Wait for eeprom to reload */
   3661 	switch (sc->sc_type) {
   3662 	case WM_T_82542_2_0:
   3663 	case WM_T_82542_2_1:
   3664 		/* null */
   3665 		break;
   3666 	case WM_T_82543:
   3667 	case WM_T_82544:
   3668 	case WM_T_82540:
   3669 	case WM_T_82545:
   3670 	case WM_T_82545_3:
   3671 	case WM_T_82546:
   3672 	case WM_T_82546_3:
   3673 	case WM_T_82541:
   3674 	case WM_T_82541_2:
   3675 	case WM_T_82547:
   3676 	case WM_T_82547_2:
   3677 	case WM_T_82573:
   3678 	case WM_T_82574:
   3679 	case WM_T_82583:
   3680 		/* generic */
   3681 		delay(10*1000);
   3682 		break;
   3683 	case WM_T_80003:
   3684 	case WM_T_82571:
   3685 	case WM_T_82572:
   3686 	case WM_T_82575:
   3687 	case WM_T_82576:
   3688 	case WM_T_82580:
   3689 	case WM_T_I350:
   3690 	case WM_T_I354:
   3691 	case WM_T_I210:
   3692 	case WM_T_I211:
   3693 		if (sc->sc_type == WM_T_82571) {
   3694 			/* Only 82571 shares port 0 */
   3695 			mask = EEMNGCTL_CFGDONE_0;
   3696 		} else
   3697 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3698 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3699 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3700 				break;
   3701 			delay(1000);
   3702 		}
   3703 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3704 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3705 				device_xname(sc->sc_dev), __func__));
   3706 		}
   3707 		break;
   3708 	case WM_T_ICH8:
   3709 	case WM_T_ICH9:
   3710 	case WM_T_ICH10:
   3711 	case WM_T_PCH:
   3712 	case WM_T_PCH2:
   3713 	case WM_T_PCH_LPT:
   3714 	case WM_T_PCH_SPT:
   3715 		delay(10*1000);
   3716 		if (sc->sc_type >= WM_T_ICH10)
   3717 			wm_lan_init_done(sc);
   3718 		else
   3719 			wm_get_auto_rd_done(sc);
   3720 
   3721 		reg = CSR_READ(sc, WMREG_STATUS);
   3722 		if ((reg & STATUS_PHYRA) != 0)
   3723 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3724 		break;
   3725 	default:
   3726 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3727 		    __func__);
   3728 		break;
   3729 	}
   3730 }
   3731 
   3732 void
   3733 wm_phy_post_reset(struct wm_softc *sc)
   3734 {
   3735 	uint32_t reg;
   3736 
   3737 	/* This function is only for ICH8 and newer. */
   3738 	if (sc->sc_type < WM_T_ICH8)
   3739 		return;
   3740 
   3741 	if (wm_phy_resetisblocked(sc)) {
   3742 		/* XXX */
   3743 		device_printf(sc->sc_dev, "PHY is blocked\n");
   3744 		return;
   3745 	}
   3746 
   3747 	/* Allow time for h/w to get to quiescent state after reset */
   3748 	delay(10*1000);
   3749 
   3750 	/* Perform any necessary post-reset workarounds */
   3751 	if (sc->sc_type == WM_T_PCH)
   3752 		wm_hv_phy_workaround_ich8lan(sc);
   3753 	if (sc->sc_type == WM_T_PCH2)
   3754 		wm_lv_phy_workaround_ich8lan(sc);
   3755 
   3756 	/* Clear the host wakeup bit after lcd reset */
   3757 	if (sc->sc_type >= WM_T_PCH) {
   3758 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   3759 		    BM_PORT_GEN_CFG);
   3760 		reg &= ~BM_WUC_HOST_WU_BIT;
   3761 		wm_gmii_hv_writereg(sc->sc_dev, 2,
   3762 		    BM_PORT_GEN_CFG, reg);
   3763 	}
   3764 
   3765 	/* Configure the LCD with the extended configuration region in NVM */
   3766 	wm_init_lcd_from_nvm(sc);
   3767 
   3768 	/* Configure the LCD with the OEM bits in NVM */
   3769 }
   3770 
   3771 /* Only for PCH and newer */
   3772 static void
   3773 wm_write_smbus_addr(struct wm_softc *sc)
   3774 {
   3775 	uint32_t strap, freq;
   3776 	uint32_t phy_data;
   3777 
   3778 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3779 		device_xname(sc->sc_dev), __func__));
   3780 
   3781 	strap = CSR_READ(sc, WMREG_STRAP);
   3782 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   3783 
   3784 	phy_data = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR);
   3785 
   3786 	phy_data &= ~HV_SMB_ADDR_ADDR;
   3787 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   3788 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   3789 
   3790 	if (sc->sc_phytype == WMPHY_I217) {
   3791 		/* Restore SMBus frequency */
   3792 		if (freq --) {
   3793 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   3794 			    | HV_SMB_ADDR_FREQ_HIGH);
   3795 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   3796 			    HV_SMB_ADDR_FREQ_LOW);
   3797 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   3798 			    HV_SMB_ADDR_FREQ_HIGH);
   3799 		} else {
   3800 			DPRINTF(WM_DEBUG_INIT,
   3801 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   3802 				device_xname(sc->sc_dev), __func__));
   3803 		}
   3804 	}
   3805 
   3806 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR, phy_data);
   3807 }
   3808 
   3809 void
   3810 wm_init_lcd_from_nvm(struct wm_softc *sc)
   3811 {
   3812 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   3813 	uint16_t phy_page = 0;
   3814 
   3815 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3816 		device_xname(sc->sc_dev), __func__));
   3817 
   3818 	switch (sc->sc_type) {
   3819 	case WM_T_ICH8:
   3820 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   3821 		    || (sc->sc_phytype != WMPHY_IGP_3))
   3822 			return;
   3823 
   3824 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   3825 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   3826 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   3827 			break;
   3828 		}
   3829 		/* FALLTHROUGH */
   3830 	case WM_T_PCH:
   3831 	case WM_T_PCH2:
   3832 	case WM_T_PCH_LPT:
   3833 	case WM_T_PCH_SPT:
   3834 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   3835 		break;
   3836 	default:
   3837 		return;
   3838 	}
   3839 
   3840 	sc->phy.acquire(sc);
   3841 
   3842 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   3843 	if ((reg & sw_cfg_mask) == 0)
   3844 		goto release;
   3845 
   3846 	/*
   3847 	 * Make sure HW does not configure LCD from PHY extended configuration
   3848 	 * before SW configuration
   3849 	 */
   3850 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   3851 	if ((sc->sc_type < WM_T_PCH2)
   3852 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   3853 		goto release;
   3854 
   3855 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   3856 		device_xname(sc->sc_dev), __func__));
   3857 	/* word_addr is in DWORD */
   3858 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   3859 
   3860 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   3861 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   3862 
   3863 	if (((sc->sc_type == WM_T_PCH)
   3864 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   3865 	    || (sc->sc_type > WM_T_PCH)) {
   3866 		/*
   3867 		 * HW configures the SMBus address and LEDs when the OEM and
   3868 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   3869 		 * are cleared, SW will configure them instead.
   3870 		 */
   3871 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   3872 			device_xname(sc->sc_dev), __func__));
   3873 		wm_write_smbus_addr(sc);
   3874 
   3875 		reg = CSR_READ(sc, WMREG_LEDCTL);
   3876 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG, reg);
   3877 	}
   3878 
   3879 	/* Configure LCD from extended configuration region. */
   3880 	for (i = 0; i < cnf_size; i++) {
   3881 		uint16_t reg_data, reg_addr;
   3882 
   3883 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   3884 			goto release;
   3885 
   3886 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   3887 			goto release;
   3888 
   3889 		if (reg_addr == MII_IGPHY_PAGE_SELECT)
   3890 			phy_page = reg_data;
   3891 
   3892 		reg_addr &= IGPHY_MAXREGADDR;
   3893 		reg_addr |= phy_page;
   3894 
   3895 		sc->phy.release(sc); /* XXX */
   3896 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, reg_addr, reg_data);
   3897 		sc->phy.acquire(sc); /* XXX */
   3898 	}
   3899 
   3900 release:
   3901 	sc->phy.release(sc);
   3902 	return;
   3903 }
   3904 
   3905 
   3906 /* Init hardware bits */
   3907 void
   3908 wm_initialize_hardware_bits(struct wm_softc *sc)
   3909 {
   3910 	uint32_t tarc0, tarc1, reg;
   3911 
   3912 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3913 		device_xname(sc->sc_dev), __func__));
   3914 
   3915 	/* For 82571 variant, 80003 and ICHs */
   3916 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3917 	    || (sc->sc_type >= WM_T_80003)) {
   3918 
   3919 		/* Transmit Descriptor Control 0 */
   3920 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3921 		reg |= TXDCTL_COUNT_DESC;
   3922 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3923 
   3924 		/* Transmit Descriptor Control 1 */
   3925 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3926 		reg |= TXDCTL_COUNT_DESC;
   3927 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3928 
   3929 		/* TARC0 */
   3930 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3931 		switch (sc->sc_type) {
   3932 		case WM_T_82571:
   3933 		case WM_T_82572:
   3934 		case WM_T_82573:
   3935 		case WM_T_82574:
   3936 		case WM_T_82583:
   3937 		case WM_T_80003:
   3938 			/* Clear bits 30..27 */
   3939 			tarc0 &= ~__BITS(30, 27);
   3940 			break;
   3941 		default:
   3942 			break;
   3943 		}
   3944 
   3945 		switch (sc->sc_type) {
   3946 		case WM_T_82571:
   3947 		case WM_T_82572:
   3948 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3949 
   3950 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3951 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3952 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3953 			/* 8257[12] Errata No.7 */
   3954 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3955 
   3956 			/* TARC1 bit 28 */
   3957 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3958 				tarc1 &= ~__BIT(28);
   3959 			else
   3960 				tarc1 |= __BIT(28);
   3961 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3962 
   3963 			/*
   3964 			 * 8257[12] Errata No.13
   3965 			 * Disable Dyamic Clock Gating.
   3966 			 */
   3967 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3968 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3969 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3970 			break;
   3971 		case WM_T_82573:
   3972 		case WM_T_82574:
   3973 		case WM_T_82583:
   3974 			if ((sc->sc_type == WM_T_82574)
   3975 			    || (sc->sc_type == WM_T_82583))
   3976 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3977 
   3978 			/* Extended Device Control */
   3979 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3980 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3981 			reg |= __BIT(22);	/* Set bit 22 */
   3982 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3983 
   3984 			/* Device Control */
   3985 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   3986 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3987 
   3988 			/* PCIe Control Register */
   3989 			/*
   3990 			 * 82573 Errata (unknown).
   3991 			 *
   3992 			 * 82574 Errata 25 and 82583 Errata 12
   3993 			 * "Dropped Rx Packets":
   3994 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   3995 			 */
   3996 			reg = CSR_READ(sc, WMREG_GCR);
   3997 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   3998 			CSR_WRITE(sc, WMREG_GCR, reg);
   3999 
   4000 			if ((sc->sc_type == WM_T_82574)
   4001 			    || (sc->sc_type == WM_T_82583)) {
   4002 				/*
   4003 				 * Document says this bit must be set for
   4004 				 * proper operation.
   4005 				 */
   4006 				reg = CSR_READ(sc, WMREG_GCR);
   4007 				reg |= __BIT(22);
   4008 				CSR_WRITE(sc, WMREG_GCR, reg);
   4009 
   4010 				/*
   4011 				 * Apply workaround for hardware errata
   4012 				 * documented in errata docs Fixes issue where
   4013 				 * some error prone or unreliable PCIe
   4014 				 * completions are occurring, particularly
   4015 				 * with ASPM enabled. Without fix, issue can
   4016 				 * cause Tx timeouts.
   4017 				 */
   4018 				reg = CSR_READ(sc, WMREG_GCR2);
   4019 				reg |= __BIT(0);
   4020 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4021 			}
   4022 			break;
   4023 		case WM_T_80003:
   4024 			/* TARC0 */
   4025 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4026 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4027 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4028 
   4029 			/* TARC1 bit 28 */
   4030 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4031 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4032 				tarc1 &= ~__BIT(28);
   4033 			else
   4034 				tarc1 |= __BIT(28);
   4035 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4036 			break;
   4037 		case WM_T_ICH8:
   4038 		case WM_T_ICH9:
   4039 		case WM_T_ICH10:
   4040 		case WM_T_PCH:
   4041 		case WM_T_PCH2:
   4042 		case WM_T_PCH_LPT:
   4043 		case WM_T_PCH_SPT:
   4044 			/* TARC0 */
   4045 			if (sc->sc_type == WM_T_ICH8) {
   4046 				/* Set TARC0 bits 29 and 28 */
   4047 				tarc0 |= __BITS(29, 28);
   4048 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4049 				tarc0 |= __BIT(29);
   4050 				/*
   4051 				 *  Drop bit 28. From Linux.
   4052 				 * See I218/I219 spec update
   4053 				 * "5. Buffer Overrun While the I219 is
   4054 				 * Processing DMA Transactions"
   4055 				 */
   4056 				tarc0 &= ~__BIT(28);
   4057 			}
   4058 			/* Set TARC0 bits 23,24,26,27 */
   4059 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4060 
   4061 			/* CTRL_EXT */
   4062 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4063 			reg |= __BIT(22);	/* Set bit 22 */
   4064 			/*
   4065 			 * Enable PHY low-power state when MAC is at D3
   4066 			 * w/o WoL
   4067 			 */
   4068 			if (sc->sc_type >= WM_T_PCH)
   4069 				reg |= CTRL_EXT_PHYPDEN;
   4070 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4071 
   4072 			/* TARC1 */
   4073 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4074 			/* bit 28 */
   4075 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4076 				tarc1 &= ~__BIT(28);
   4077 			else
   4078 				tarc1 |= __BIT(28);
   4079 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4080 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4081 
   4082 			/* Device Status */
   4083 			if (sc->sc_type == WM_T_ICH8) {
   4084 				reg = CSR_READ(sc, WMREG_STATUS);
   4085 				reg &= ~__BIT(31);
   4086 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4087 
   4088 			}
   4089 
   4090 			/* IOSFPC */
   4091 			if (sc->sc_type == WM_T_PCH_SPT) {
   4092 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4093 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4094 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4095 			}
   4096 			/*
   4097 			 * Work-around descriptor data corruption issue during
   4098 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4099 			 * capability.
   4100 			 */
   4101 			reg = CSR_READ(sc, WMREG_RFCTL);
   4102 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4103 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4104 			break;
   4105 		default:
   4106 			break;
   4107 		}
   4108 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4109 
   4110 		switch (sc->sc_type) {
   4111 		/*
   4112 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4113 		 * Avoid RSS Hash Value bug.
   4114 		 */
   4115 		case WM_T_82571:
   4116 		case WM_T_82572:
   4117 		case WM_T_82573:
   4118 		case WM_T_80003:
   4119 		case WM_T_ICH8:
   4120 			reg = CSR_READ(sc, WMREG_RFCTL);
   4121 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4122 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4123 			break;
   4124 		case WM_T_82574:
   4125 			/* use extened Rx descriptor. */
   4126 			reg = CSR_READ(sc, WMREG_RFCTL);
   4127 			reg |= WMREG_RFCTL_EXSTEN;
   4128 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4129 			break;
   4130 		default:
   4131 			break;
   4132 		}
   4133 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4134 		/*
   4135 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4136 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4137 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4138 		 * Correctly by the Device"
   4139 		 *
   4140 		 * I354(C2000) Errata AVR53:
   4141 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4142 		 * Hang"
   4143 		 */
   4144 		reg = CSR_READ(sc, WMREG_RFCTL);
   4145 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4146 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4147 	}
   4148 }
   4149 
   4150 static uint32_t
   4151 wm_rxpbs_adjust_82580(uint32_t val)
   4152 {
   4153 	uint32_t rv = 0;
   4154 
   4155 	if (val < __arraycount(wm_82580_rxpbs_table))
   4156 		rv = wm_82580_rxpbs_table[val];
   4157 
   4158 	return rv;
   4159 }
   4160 
   4161 /*
   4162  * wm_reset_phy:
   4163  *
   4164  *	generic PHY reset function.
   4165  *	Same as e1000_phy_hw_reset_generic()
   4166  */
   4167 static void
   4168 wm_reset_phy(struct wm_softc *sc)
   4169 {
   4170 	uint32_t reg;
   4171 
   4172 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4173 		device_xname(sc->sc_dev), __func__));
   4174 	if (wm_phy_resetisblocked(sc))
   4175 		return;
   4176 
   4177 	sc->phy.acquire(sc);
   4178 
   4179 	reg = CSR_READ(sc, WMREG_CTRL);
   4180 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4181 	CSR_WRITE_FLUSH(sc);
   4182 
   4183 	delay(sc->phy.reset_delay_us);
   4184 
   4185 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4186 	CSR_WRITE_FLUSH(sc);
   4187 
   4188 	delay(150);
   4189 
   4190 	sc->phy.release(sc);
   4191 
   4192 	wm_get_cfg_done(sc);
   4193 	wm_phy_post_reset(sc);
   4194 }
   4195 
   4196 static void
   4197 wm_flush_desc_rings(struct wm_softc *sc)
   4198 {
   4199 	pcireg_t preg;
   4200 	uint32_t reg;
   4201 	struct wm_txqueue *txq;
   4202 	wiseman_txdesc_t *txd;
   4203 	int nexttx;
   4204 	uint32_t rctl;
   4205 
   4206 	/* First, disable MULR fix in FEXTNVM11 */
   4207 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4208 	reg |= FEXTNVM11_DIS_MULRFIX;
   4209 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4210 
   4211 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4212 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4213 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4214 		return;
   4215 
   4216 	/* TX */
   4217 	printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   4218 	    device_xname(sc->sc_dev), preg, reg);
   4219 	reg = CSR_READ(sc, WMREG_TCTL);
   4220 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4221 
   4222 	txq = &sc->sc_queue[0].wmq_txq;
   4223 	nexttx = txq->txq_next;
   4224 	txd = &txq->txq_descs[nexttx];
   4225 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4226 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS| 512);
   4227 	txd->wtx_fields.wtxu_status = 0;
   4228 	txd->wtx_fields.wtxu_options = 0;
   4229 	txd->wtx_fields.wtxu_vlan = 0;
   4230 
   4231 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4232 	    BUS_SPACE_BARRIER_WRITE);
   4233 
   4234 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4235 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4236 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4237 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4238 	delay(250);
   4239 
   4240 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4241 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4242 		return;
   4243 
   4244 	/* RX */
   4245 	printf("%s: Need RX flush (reg = %08x)\n",
   4246 	    device_xname(sc->sc_dev), preg);
   4247 	rctl = CSR_READ(sc, WMREG_RCTL);
   4248 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4249 	CSR_WRITE_FLUSH(sc);
   4250 	delay(150);
   4251 
   4252 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4253 	/* zero the lower 14 bits (prefetch and host thresholds) */
   4254 	reg &= 0xffffc000;
   4255 	/*
   4256 	 * update thresholds: prefetch threshold to 31, host threshold
   4257 	 * to 1 and make sure the granularity is "descriptors" and not
   4258 	 * "cache lines"
   4259 	 */
   4260 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4261 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4262 
   4263 	/*
   4264 	 * momentarily enable the RX ring for the changes to take
   4265 	 * effect
   4266 	 */
   4267 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4268 	CSR_WRITE_FLUSH(sc);
   4269 	delay(150);
   4270 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4271 }
   4272 
   4273 /*
   4274  * wm_reset:
   4275  *
   4276  *	Reset the i82542 chip.
   4277  */
   4278 static void
   4279 wm_reset(struct wm_softc *sc)
   4280 {
   4281 	int phy_reset = 0;
   4282 	int i, error = 0;
   4283 	uint32_t reg;
   4284 	uint16_t kmreg;
   4285 	int rv;
   4286 
   4287 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4288 		device_xname(sc->sc_dev), __func__));
   4289 	KASSERT(sc->sc_type != 0);
   4290 
   4291 	/*
   4292 	 * Allocate on-chip memory according to the MTU size.
   4293 	 * The Packet Buffer Allocation register must be written
   4294 	 * before the chip is reset.
   4295 	 */
   4296 	switch (sc->sc_type) {
   4297 	case WM_T_82547:
   4298 	case WM_T_82547_2:
   4299 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4300 		    PBA_22K : PBA_30K;
   4301 		for (i = 0; i < sc->sc_nqueues; i++) {
   4302 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4303 			txq->txq_fifo_head = 0;
   4304 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4305 			txq->txq_fifo_size =
   4306 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4307 			txq->txq_fifo_stall = 0;
   4308 		}
   4309 		break;
   4310 	case WM_T_82571:
   4311 	case WM_T_82572:
   4312 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4313 	case WM_T_80003:
   4314 		sc->sc_pba = PBA_32K;
   4315 		break;
   4316 	case WM_T_82573:
   4317 		sc->sc_pba = PBA_12K;
   4318 		break;
   4319 	case WM_T_82574:
   4320 	case WM_T_82583:
   4321 		sc->sc_pba = PBA_20K;
   4322 		break;
   4323 	case WM_T_82576:
   4324 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4325 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4326 		break;
   4327 	case WM_T_82580:
   4328 	case WM_T_I350:
   4329 	case WM_T_I354:
   4330 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4331 		break;
   4332 	case WM_T_I210:
   4333 	case WM_T_I211:
   4334 		sc->sc_pba = PBA_34K;
   4335 		break;
   4336 	case WM_T_ICH8:
   4337 		/* Workaround for a bit corruption issue in FIFO memory */
   4338 		sc->sc_pba = PBA_8K;
   4339 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4340 		break;
   4341 	case WM_T_ICH9:
   4342 	case WM_T_ICH10:
   4343 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4344 		    PBA_14K : PBA_10K;
   4345 		break;
   4346 	case WM_T_PCH:
   4347 	case WM_T_PCH2:
   4348 	case WM_T_PCH_LPT:
   4349 	case WM_T_PCH_SPT:
   4350 		sc->sc_pba = PBA_26K;
   4351 		break;
   4352 	default:
   4353 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4354 		    PBA_40K : PBA_48K;
   4355 		break;
   4356 	}
   4357 	/*
   4358 	 * Only old or non-multiqueue devices have the PBA register
   4359 	 * XXX Need special handling for 82575.
   4360 	 */
   4361 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4362 	    || (sc->sc_type == WM_T_82575))
   4363 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4364 
   4365 	/* Prevent the PCI-E bus from sticking */
   4366 	if (sc->sc_flags & WM_F_PCIE) {
   4367 		int timeout = 800;
   4368 
   4369 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4370 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4371 
   4372 		while (timeout--) {
   4373 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4374 			    == 0)
   4375 				break;
   4376 			delay(100);
   4377 		}
   4378 		if (timeout == 0)
   4379 			device_printf(sc->sc_dev,
   4380 			    "failed to disable busmastering\n");
   4381 	}
   4382 
   4383 	/* Set the completion timeout for interface */
   4384 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4385 	    || (sc->sc_type == WM_T_82580)
   4386 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4387 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4388 		wm_set_pcie_completion_timeout(sc);
   4389 
   4390 	/* Clear interrupt */
   4391 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4392 	if (wm_is_using_msix(sc)) {
   4393 		if (sc->sc_type != WM_T_82574) {
   4394 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4395 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4396 		} else {
   4397 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4398 		}
   4399 	}
   4400 
   4401 	/* Stop the transmit and receive processes. */
   4402 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4403 	sc->sc_rctl &= ~RCTL_EN;
   4404 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4405 	CSR_WRITE_FLUSH(sc);
   4406 
   4407 	/* XXX set_tbi_sbp_82543() */
   4408 
   4409 	delay(10*1000);
   4410 
   4411 	/* Must acquire the MDIO ownership before MAC reset */
   4412 	switch (sc->sc_type) {
   4413 	case WM_T_82573:
   4414 	case WM_T_82574:
   4415 	case WM_T_82583:
   4416 		error = wm_get_hw_semaphore_82573(sc);
   4417 		break;
   4418 	default:
   4419 		break;
   4420 	}
   4421 
   4422 	/*
   4423 	 * 82541 Errata 29? & 82547 Errata 28?
   4424 	 * See also the description about PHY_RST bit in CTRL register
   4425 	 * in 8254x_GBe_SDM.pdf.
   4426 	 */
   4427 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4428 		CSR_WRITE(sc, WMREG_CTRL,
   4429 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4430 		CSR_WRITE_FLUSH(sc);
   4431 		delay(5000);
   4432 	}
   4433 
   4434 	switch (sc->sc_type) {
   4435 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4436 	case WM_T_82541:
   4437 	case WM_T_82541_2:
   4438 	case WM_T_82547:
   4439 	case WM_T_82547_2:
   4440 		/*
   4441 		 * On some chipsets, a reset through a memory-mapped write
   4442 		 * cycle can cause the chip to reset before completing the
   4443 		 * write cycle.  This causes major headache that can be
   4444 		 * avoided by issuing the reset via indirect register writes
   4445 		 * through I/O space.
   4446 		 *
   4447 		 * So, if we successfully mapped the I/O BAR at attach time,
   4448 		 * use that.  Otherwise, try our luck with a memory-mapped
   4449 		 * reset.
   4450 		 */
   4451 		if (sc->sc_flags & WM_F_IOH_VALID)
   4452 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4453 		else
   4454 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4455 		break;
   4456 	case WM_T_82545_3:
   4457 	case WM_T_82546_3:
   4458 		/* Use the shadow control register on these chips. */
   4459 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4460 		break;
   4461 	case WM_T_80003:
   4462 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4463 		sc->phy.acquire(sc);
   4464 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4465 		sc->phy.release(sc);
   4466 		break;
   4467 	case WM_T_ICH8:
   4468 	case WM_T_ICH9:
   4469 	case WM_T_ICH10:
   4470 	case WM_T_PCH:
   4471 	case WM_T_PCH2:
   4472 	case WM_T_PCH_LPT:
   4473 	case WM_T_PCH_SPT:
   4474 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4475 		if (wm_phy_resetisblocked(sc) == false) {
   4476 			/*
   4477 			 * Gate automatic PHY configuration by hardware on
   4478 			 * non-managed 82579
   4479 			 */
   4480 			if ((sc->sc_type == WM_T_PCH2)
   4481 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4482 				== 0))
   4483 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4484 
   4485 			reg |= CTRL_PHY_RESET;
   4486 			phy_reset = 1;
   4487 		} else
   4488 			printf("XXX reset is blocked!!!\n");
   4489 		sc->phy.acquire(sc);
   4490 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4491 		/* Don't insert a completion barrier when reset */
   4492 		delay(20*1000);
   4493 		mutex_exit(sc->sc_ich_phymtx);
   4494 		break;
   4495 	case WM_T_82580:
   4496 	case WM_T_I350:
   4497 	case WM_T_I354:
   4498 	case WM_T_I210:
   4499 	case WM_T_I211:
   4500 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4501 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4502 			CSR_WRITE_FLUSH(sc);
   4503 		delay(5000);
   4504 		break;
   4505 	case WM_T_82542_2_0:
   4506 	case WM_T_82542_2_1:
   4507 	case WM_T_82543:
   4508 	case WM_T_82540:
   4509 	case WM_T_82545:
   4510 	case WM_T_82546:
   4511 	case WM_T_82571:
   4512 	case WM_T_82572:
   4513 	case WM_T_82573:
   4514 	case WM_T_82574:
   4515 	case WM_T_82575:
   4516 	case WM_T_82576:
   4517 	case WM_T_82583:
   4518 	default:
   4519 		/* Everything else can safely use the documented method. */
   4520 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4521 		break;
   4522 	}
   4523 
   4524 	/* Must release the MDIO ownership after MAC reset */
   4525 	switch (sc->sc_type) {
   4526 	case WM_T_82573:
   4527 	case WM_T_82574:
   4528 	case WM_T_82583:
   4529 		if (error == 0)
   4530 			wm_put_hw_semaphore_82573(sc);
   4531 		break;
   4532 	default:
   4533 		break;
   4534 	}
   4535 
   4536 	if (phy_reset != 0)
   4537 		wm_get_cfg_done(sc);
   4538 
   4539 	/* reload EEPROM */
   4540 	switch (sc->sc_type) {
   4541 	case WM_T_82542_2_0:
   4542 	case WM_T_82542_2_1:
   4543 	case WM_T_82543:
   4544 	case WM_T_82544:
   4545 		delay(10);
   4546 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4547 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4548 		CSR_WRITE_FLUSH(sc);
   4549 		delay(2000);
   4550 		break;
   4551 	case WM_T_82540:
   4552 	case WM_T_82545:
   4553 	case WM_T_82545_3:
   4554 	case WM_T_82546:
   4555 	case WM_T_82546_3:
   4556 		delay(5*1000);
   4557 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4558 		break;
   4559 	case WM_T_82541:
   4560 	case WM_T_82541_2:
   4561 	case WM_T_82547:
   4562 	case WM_T_82547_2:
   4563 		delay(20000);
   4564 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4565 		break;
   4566 	case WM_T_82571:
   4567 	case WM_T_82572:
   4568 	case WM_T_82573:
   4569 	case WM_T_82574:
   4570 	case WM_T_82583:
   4571 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4572 			delay(10);
   4573 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4574 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4575 			CSR_WRITE_FLUSH(sc);
   4576 		}
   4577 		/* check EECD_EE_AUTORD */
   4578 		wm_get_auto_rd_done(sc);
   4579 		/*
   4580 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4581 		 * is set.
   4582 		 */
   4583 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4584 		    || (sc->sc_type == WM_T_82583))
   4585 			delay(25*1000);
   4586 		break;
   4587 	case WM_T_82575:
   4588 	case WM_T_82576:
   4589 	case WM_T_82580:
   4590 	case WM_T_I350:
   4591 	case WM_T_I354:
   4592 	case WM_T_I210:
   4593 	case WM_T_I211:
   4594 	case WM_T_80003:
   4595 		/* check EECD_EE_AUTORD */
   4596 		wm_get_auto_rd_done(sc);
   4597 		break;
   4598 	case WM_T_ICH8:
   4599 	case WM_T_ICH9:
   4600 	case WM_T_ICH10:
   4601 	case WM_T_PCH:
   4602 	case WM_T_PCH2:
   4603 	case WM_T_PCH_LPT:
   4604 	case WM_T_PCH_SPT:
   4605 		break;
   4606 	default:
   4607 		panic("%s: unknown type\n", __func__);
   4608 	}
   4609 
   4610 	/* Check whether EEPROM is present or not */
   4611 	switch (sc->sc_type) {
   4612 	case WM_T_82575:
   4613 	case WM_T_82576:
   4614 	case WM_T_82580:
   4615 	case WM_T_I350:
   4616 	case WM_T_I354:
   4617 	case WM_T_ICH8:
   4618 	case WM_T_ICH9:
   4619 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4620 			/* Not found */
   4621 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4622 			if (sc->sc_type == WM_T_82575)
   4623 				wm_reset_init_script_82575(sc);
   4624 		}
   4625 		break;
   4626 	default:
   4627 		break;
   4628 	}
   4629 
   4630 	if (phy_reset != 0)
   4631 		wm_phy_post_reset(sc);
   4632 
   4633 	if ((sc->sc_type == WM_T_82580)
   4634 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4635 		/* clear global device reset status bit */
   4636 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4637 	}
   4638 
   4639 	/* Clear any pending interrupt events. */
   4640 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4641 	reg = CSR_READ(sc, WMREG_ICR);
   4642 	if (wm_is_using_msix(sc)) {
   4643 		if (sc->sc_type != WM_T_82574) {
   4644 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4645 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4646 		} else
   4647 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4648 	}
   4649 
   4650 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4651 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4652 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4653 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   4654 		reg = CSR_READ(sc, WMREG_KABGTXD);
   4655 		reg |= KABGTXD_BGSQLBIAS;
   4656 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   4657 	}
   4658 
   4659 	/* reload sc_ctrl */
   4660 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4661 
   4662 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4663 		wm_set_eee_i350(sc);
   4664 
   4665 	/*
   4666 	 * For PCH, this write will make sure that any noise will be detected
   4667 	 * as a CRC error and be dropped rather than show up as a bad packet
   4668 	 * to the DMA engine
   4669 	 */
   4670 	if (sc->sc_type == WM_T_PCH)
   4671 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4672 
   4673 	if (sc->sc_type >= WM_T_82544)
   4674 		CSR_WRITE(sc, WMREG_WUC, 0);
   4675 
   4676 	wm_reset_mdicnfg_82580(sc);
   4677 
   4678 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4679 		wm_pll_workaround_i210(sc);
   4680 
   4681 	if (sc->sc_type == WM_T_80003) {
   4682 		/* default to TRUE to enable the MDIC W/A */
   4683 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   4684 
   4685 		rv = wm_kmrn_readreg(sc,
   4686 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   4687 		if (rv == 0) {
   4688 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   4689 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   4690 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   4691 			else
   4692 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   4693 		}
   4694 	}
   4695 }
   4696 
   4697 /*
   4698  * wm_add_rxbuf:
   4699  *
   4700  *	Add a receive buffer to the indiciated descriptor.
   4701  */
   4702 static int
   4703 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4704 {
   4705 	struct wm_softc *sc = rxq->rxq_sc;
   4706 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4707 	struct mbuf *m;
   4708 	int error;
   4709 
   4710 	KASSERT(mutex_owned(rxq->rxq_lock));
   4711 
   4712 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4713 	if (m == NULL)
   4714 		return ENOBUFS;
   4715 
   4716 	MCLGET(m, M_DONTWAIT);
   4717 	if ((m->m_flags & M_EXT) == 0) {
   4718 		m_freem(m);
   4719 		return ENOBUFS;
   4720 	}
   4721 
   4722 	if (rxs->rxs_mbuf != NULL)
   4723 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4724 
   4725 	rxs->rxs_mbuf = m;
   4726 
   4727 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4728 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4729 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4730 	if (error) {
   4731 		/* XXX XXX XXX */
   4732 		aprint_error_dev(sc->sc_dev,
   4733 		    "unable to load rx DMA map %d, error = %d\n",
   4734 		    idx, error);
   4735 		panic("wm_add_rxbuf");
   4736 	}
   4737 
   4738 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4739 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4740 
   4741 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4742 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4743 			wm_init_rxdesc(rxq, idx);
   4744 	} else
   4745 		wm_init_rxdesc(rxq, idx);
   4746 
   4747 	return 0;
   4748 }
   4749 
   4750 /*
   4751  * wm_rxdrain:
   4752  *
   4753  *	Drain the receive queue.
   4754  */
   4755 static void
   4756 wm_rxdrain(struct wm_rxqueue *rxq)
   4757 {
   4758 	struct wm_softc *sc = rxq->rxq_sc;
   4759 	struct wm_rxsoft *rxs;
   4760 	int i;
   4761 
   4762 	KASSERT(mutex_owned(rxq->rxq_lock));
   4763 
   4764 	for (i = 0; i < WM_NRXDESC; i++) {
   4765 		rxs = &rxq->rxq_soft[i];
   4766 		if (rxs->rxs_mbuf != NULL) {
   4767 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4768 			m_freem(rxs->rxs_mbuf);
   4769 			rxs->rxs_mbuf = NULL;
   4770 		}
   4771 	}
   4772 }
   4773 
   4774 
   4775 /*
   4776  * XXX copy from FreeBSD's sys/net/rss_config.c
   4777  */
   4778 /*
   4779  * RSS secret key, intended to prevent attacks on load-balancing.  Its
   4780  * effectiveness may be limited by algorithm choice and available entropy
   4781  * during the boot.
   4782  *
   4783  * XXXRW: And that we don't randomize it yet!
   4784  *
   4785  * This is the default Microsoft RSS specification key which is also
   4786  * the Chelsio T5 firmware default key.
   4787  */
   4788 #define RSS_KEYSIZE 40
   4789 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
   4790 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
   4791 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
   4792 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
   4793 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
   4794 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
   4795 };
   4796 
   4797 /*
   4798  * Caller must pass an array of size sizeof(rss_key).
   4799  *
   4800  * XXX
   4801  * As if_ixgbe may use this function, this function should not be
   4802  * if_wm specific function.
   4803  */
   4804 static void
   4805 wm_rss_getkey(uint8_t *key)
   4806 {
   4807 
   4808 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
   4809 }
   4810 
   4811 /*
   4812  * Setup registers for RSS.
   4813  *
   4814  * XXX not yet VMDq support
   4815  */
   4816 static void
   4817 wm_init_rss(struct wm_softc *sc)
   4818 {
   4819 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4820 	int i;
   4821 
   4822 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
   4823 
   4824 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4825 		int qid, reta_ent;
   4826 
   4827 		qid  = i % sc->sc_nqueues;
   4828 		switch(sc->sc_type) {
   4829 		case WM_T_82574:
   4830 			reta_ent = __SHIFTIN(qid,
   4831 			    RETA_ENT_QINDEX_MASK_82574);
   4832 			break;
   4833 		case WM_T_82575:
   4834 			reta_ent = __SHIFTIN(qid,
   4835 			    RETA_ENT_QINDEX1_MASK_82575);
   4836 			break;
   4837 		default:
   4838 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4839 			break;
   4840 		}
   4841 
   4842 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4843 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4844 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4845 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4846 	}
   4847 
   4848 	wm_rss_getkey((uint8_t *)rss_key);
   4849 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4850 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4851 
   4852 	if (sc->sc_type == WM_T_82574)
   4853 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4854 	else
   4855 		mrqc = MRQC_ENABLE_RSS_MQ;
   4856 
   4857 	/*
   4858 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   4859 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   4860 	 */
   4861 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4862 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4863 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4864 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4865 
   4866 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4867 }
   4868 
   4869 /*
   4870  * Adjust TX and RX queue numbers which the system actulally uses.
   4871  *
   4872  * The numbers are affected by below parameters.
   4873  *     - The nubmer of hardware queues
   4874  *     - The number of MSI-X vectors (= "nvectors" argument)
   4875  *     - ncpu
   4876  */
   4877 static void
   4878 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4879 {
   4880 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4881 
   4882 	if (nvectors < 2) {
   4883 		sc->sc_nqueues = 1;
   4884 		return;
   4885 	}
   4886 
   4887 	switch(sc->sc_type) {
   4888 	case WM_T_82572:
   4889 		hw_ntxqueues = 2;
   4890 		hw_nrxqueues = 2;
   4891 		break;
   4892 	case WM_T_82574:
   4893 		hw_ntxqueues = 2;
   4894 		hw_nrxqueues = 2;
   4895 		break;
   4896 	case WM_T_82575:
   4897 		hw_ntxqueues = 4;
   4898 		hw_nrxqueues = 4;
   4899 		break;
   4900 	case WM_T_82576:
   4901 		hw_ntxqueues = 16;
   4902 		hw_nrxqueues = 16;
   4903 		break;
   4904 	case WM_T_82580:
   4905 	case WM_T_I350:
   4906 	case WM_T_I354:
   4907 		hw_ntxqueues = 8;
   4908 		hw_nrxqueues = 8;
   4909 		break;
   4910 	case WM_T_I210:
   4911 		hw_ntxqueues = 4;
   4912 		hw_nrxqueues = 4;
   4913 		break;
   4914 	case WM_T_I211:
   4915 		hw_ntxqueues = 2;
   4916 		hw_nrxqueues = 2;
   4917 		break;
   4918 		/*
   4919 		 * As below ethernet controllers does not support MSI-X,
   4920 		 * this driver let them not use multiqueue.
   4921 		 *     - WM_T_80003
   4922 		 *     - WM_T_ICH8
   4923 		 *     - WM_T_ICH9
   4924 		 *     - WM_T_ICH10
   4925 		 *     - WM_T_PCH
   4926 		 *     - WM_T_PCH2
   4927 		 *     - WM_T_PCH_LPT
   4928 		 */
   4929 	default:
   4930 		hw_ntxqueues = 1;
   4931 		hw_nrxqueues = 1;
   4932 		break;
   4933 	}
   4934 
   4935 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
   4936 
   4937 	/*
   4938 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   4939 	 * the number of queues used actually.
   4940 	 */
   4941 	if (nvectors < hw_nqueues + 1) {
   4942 		sc->sc_nqueues = nvectors - 1;
   4943 	} else {
   4944 		sc->sc_nqueues = hw_nqueues;
   4945 	}
   4946 
   4947 	/*
   4948 	 * As queues more then cpus cannot improve scaling, we limit
   4949 	 * the number of queues used actually.
   4950 	 */
   4951 	if (ncpu < sc->sc_nqueues)
   4952 		sc->sc_nqueues = ncpu;
   4953 }
   4954 
   4955 static inline bool
   4956 wm_is_using_msix(struct wm_softc *sc)
   4957 {
   4958 
   4959 	return (sc->sc_nintrs > 1);
   4960 }
   4961 
   4962 static inline bool
   4963 wm_is_using_multiqueue(struct wm_softc *sc)
   4964 {
   4965 
   4966 	return (sc->sc_nqueues > 1);
   4967 }
   4968 
   4969 static int
   4970 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   4971 {
   4972 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   4973 	wmq->wmq_id = qidx;
   4974 	wmq->wmq_intr_idx = intr_idx;
   4975 	wmq->wmq_si = softint_establish(SOFTINT_NET
   4976 #ifdef WM_MPSAFE
   4977 	    | SOFTINT_MPSAFE
   4978 #endif
   4979 	    , wm_handle_queue, wmq);
   4980 	if (wmq->wmq_si != NULL)
   4981 		return 0;
   4982 
   4983 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   4984 	    wmq->wmq_id);
   4985 
   4986 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   4987 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4988 	return ENOMEM;
   4989 }
   4990 
   4991 /*
   4992  * Both single interrupt MSI and INTx can use this function.
   4993  */
   4994 static int
   4995 wm_setup_legacy(struct wm_softc *sc)
   4996 {
   4997 	pci_chipset_tag_t pc = sc->sc_pc;
   4998 	const char *intrstr = NULL;
   4999 	char intrbuf[PCI_INTRSTR_LEN];
   5000 	int error;
   5001 
   5002 	error = wm_alloc_txrx_queues(sc);
   5003 	if (error) {
   5004 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5005 		    error);
   5006 		return ENOMEM;
   5007 	}
   5008 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   5009 	    sizeof(intrbuf));
   5010 #ifdef WM_MPSAFE
   5011 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   5012 #endif
   5013 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   5014 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5015 	if (sc->sc_ihs[0] == NULL) {
   5016 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5017 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5018 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5019 		return ENOMEM;
   5020 	}
   5021 
   5022 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5023 	sc->sc_nintrs = 1;
   5024 
   5025 	return wm_softint_establish(sc, 0, 0);
   5026 }
   5027 
   5028 static int
   5029 wm_setup_msix(struct wm_softc *sc)
   5030 {
   5031 	void *vih;
   5032 	kcpuset_t *affinity;
   5033 	int qidx, error, intr_idx, txrx_established;
   5034 	pci_chipset_tag_t pc = sc->sc_pc;
   5035 	const char *intrstr = NULL;
   5036 	char intrbuf[PCI_INTRSTR_LEN];
   5037 	char intr_xname[INTRDEVNAMEBUF];
   5038 
   5039 	if (sc->sc_nqueues < ncpu) {
   5040 		/*
   5041 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5042 		 * interrupts start from CPU#1.
   5043 		 */
   5044 		sc->sc_affinity_offset = 1;
   5045 	} else {
   5046 		/*
   5047 		 * In this case, this device use all CPUs. So, we unify
   5048 		 * affinitied cpu_index to msix vector number for readability.
   5049 		 */
   5050 		sc->sc_affinity_offset = 0;
   5051 	}
   5052 
   5053 	error = wm_alloc_txrx_queues(sc);
   5054 	if (error) {
   5055 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5056 		    error);
   5057 		return ENOMEM;
   5058 	}
   5059 
   5060 	kcpuset_create(&affinity, false);
   5061 	intr_idx = 0;
   5062 
   5063 	/*
   5064 	 * TX and RX
   5065 	 */
   5066 	txrx_established = 0;
   5067 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5068 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5069 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5070 
   5071 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5072 		    sizeof(intrbuf));
   5073 #ifdef WM_MPSAFE
   5074 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5075 		    PCI_INTR_MPSAFE, true);
   5076 #endif
   5077 		memset(intr_xname, 0, sizeof(intr_xname));
   5078 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5079 		    device_xname(sc->sc_dev), qidx);
   5080 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5081 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5082 		if (vih == NULL) {
   5083 			aprint_error_dev(sc->sc_dev,
   5084 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5085 			    intrstr ? " at " : "",
   5086 			    intrstr ? intrstr : "");
   5087 
   5088 			goto fail;
   5089 		}
   5090 		kcpuset_zero(affinity);
   5091 		/* Round-robin affinity */
   5092 		kcpuset_set(affinity, affinity_to);
   5093 		error = interrupt_distribute(vih, affinity, NULL);
   5094 		if (error == 0) {
   5095 			aprint_normal_dev(sc->sc_dev,
   5096 			    "for TX and RX interrupting at %s affinity to %u\n",
   5097 			    intrstr, affinity_to);
   5098 		} else {
   5099 			aprint_normal_dev(sc->sc_dev,
   5100 			    "for TX and RX interrupting at %s\n", intrstr);
   5101 		}
   5102 		sc->sc_ihs[intr_idx] = vih;
   5103 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   5104 			goto fail;
   5105 		txrx_established++;
   5106 		intr_idx++;
   5107 	}
   5108 
   5109 	/*
   5110 	 * LINK
   5111 	 */
   5112 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5113 	    sizeof(intrbuf));
   5114 #ifdef WM_MPSAFE
   5115 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5116 #endif
   5117 	memset(intr_xname, 0, sizeof(intr_xname));
   5118 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5119 	    device_xname(sc->sc_dev));
   5120 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5121 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5122 	if (vih == NULL) {
   5123 		aprint_error_dev(sc->sc_dev,
   5124 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5125 		    intrstr ? " at " : "",
   5126 		    intrstr ? intrstr : "");
   5127 
   5128 		goto fail;
   5129 	}
   5130 	/* keep default affinity to LINK interrupt */
   5131 	aprint_normal_dev(sc->sc_dev,
   5132 	    "for LINK interrupting at %s\n", intrstr);
   5133 	sc->sc_ihs[intr_idx] = vih;
   5134 	sc->sc_link_intr_idx = intr_idx;
   5135 
   5136 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5137 	kcpuset_destroy(affinity);
   5138 	return 0;
   5139 
   5140  fail:
   5141 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5142 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5143 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5144 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5145 	}
   5146 
   5147 	kcpuset_destroy(affinity);
   5148 	return ENOMEM;
   5149 }
   5150 
   5151 static void
   5152 wm_unset_stopping_flags(struct wm_softc *sc)
   5153 {
   5154 	int i;
   5155 
   5156 	KASSERT(WM_CORE_LOCKED(sc));
   5157 
   5158 	/*
   5159 	 * must unset stopping flags in ascending order.
   5160 	 */
   5161 	for(i = 0; i < sc->sc_nqueues; i++) {
   5162 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5163 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5164 
   5165 		mutex_enter(txq->txq_lock);
   5166 		txq->txq_stopping = false;
   5167 		mutex_exit(txq->txq_lock);
   5168 
   5169 		mutex_enter(rxq->rxq_lock);
   5170 		rxq->rxq_stopping = false;
   5171 		mutex_exit(rxq->rxq_lock);
   5172 	}
   5173 
   5174 	sc->sc_core_stopping = false;
   5175 }
   5176 
   5177 static void
   5178 wm_set_stopping_flags(struct wm_softc *sc)
   5179 {
   5180 	int i;
   5181 
   5182 	KASSERT(WM_CORE_LOCKED(sc));
   5183 
   5184 	sc->sc_core_stopping = true;
   5185 
   5186 	/*
   5187 	 * must set stopping flags in ascending order.
   5188 	 */
   5189 	for(i = 0; i < sc->sc_nqueues; i++) {
   5190 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5191 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5192 
   5193 		mutex_enter(rxq->rxq_lock);
   5194 		rxq->rxq_stopping = true;
   5195 		mutex_exit(rxq->rxq_lock);
   5196 
   5197 		mutex_enter(txq->txq_lock);
   5198 		txq->txq_stopping = true;
   5199 		mutex_exit(txq->txq_lock);
   5200 	}
   5201 }
   5202 
   5203 /*
   5204  * write interrupt interval value to ITR or EITR
   5205  */
   5206 static void
   5207 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5208 {
   5209 
   5210 	if (!wmq->wmq_set_itr)
   5211 		return;
   5212 
   5213 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5214 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5215 
   5216 		/*
   5217 		 * 82575 doesn't have CNT_INGR field.
   5218 		 * So, overwrite counter field by software.
   5219 		 */
   5220 		if (sc->sc_type == WM_T_82575)
   5221 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5222 		else
   5223 			eitr |= EITR_CNT_INGR;
   5224 
   5225 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5226 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5227 		/*
   5228 		 * 82574 has both ITR and EITR. SET EITR when we use
   5229 		 * the multi queue function with MSI-X.
   5230 		 */
   5231 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5232 			    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5233 	} else {
   5234 		KASSERT(wmq->wmq_id == 0);
   5235 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5236 	}
   5237 
   5238 	wmq->wmq_set_itr = false;
   5239 }
   5240 
   5241 /*
   5242  * TODO
   5243  * Below dynamic calculation of itr is almost the same as linux igb,
   5244  * however it does not fit to wm(4). So, we will have been disable AIM
   5245  * until we will find appropriate calculation of itr.
   5246  */
   5247 /*
   5248  * calculate interrupt interval value to be going to write register in
   5249  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5250  */
   5251 static void
   5252 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5253 {
   5254 #ifdef NOTYET
   5255 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5256 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5257 	uint32_t avg_size = 0;
   5258 	uint32_t new_itr;
   5259 
   5260 	if (rxq->rxq_packets)
   5261 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5262 	if (txq->txq_packets)
   5263 		avg_size = max(avg_size, txq->txq_bytes / txq->txq_packets);
   5264 
   5265 	if (avg_size == 0) {
   5266 		new_itr = 450; /* restore default value */
   5267 		goto out;
   5268 	}
   5269 
   5270 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5271 	avg_size += 24;
   5272 
   5273 	/* Don't starve jumbo frames */
   5274 	avg_size = min(avg_size, 3000);
   5275 
   5276 	/* Give a little boost to mid-size frames */
   5277 	if ((avg_size > 300) && (avg_size < 1200))
   5278 		new_itr = avg_size / 3;
   5279 	else
   5280 		new_itr = avg_size / 2;
   5281 
   5282 out:
   5283 	/*
   5284 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5285 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5286 	 */
   5287 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5288 		new_itr *= 4;
   5289 
   5290 	if (new_itr != wmq->wmq_itr) {
   5291 		wmq->wmq_itr = new_itr;
   5292 		wmq->wmq_set_itr = true;
   5293 	} else
   5294 		wmq->wmq_set_itr = false;
   5295 
   5296 	rxq->rxq_packets = 0;
   5297 	rxq->rxq_bytes = 0;
   5298 	txq->txq_packets = 0;
   5299 	txq->txq_bytes = 0;
   5300 #endif
   5301 }
   5302 
   5303 /*
   5304  * wm_init:		[ifnet interface function]
   5305  *
   5306  *	Initialize the interface.
   5307  */
   5308 static int
   5309 wm_init(struct ifnet *ifp)
   5310 {
   5311 	struct wm_softc *sc = ifp->if_softc;
   5312 	int ret;
   5313 
   5314 	WM_CORE_LOCK(sc);
   5315 	ret = wm_init_locked(ifp);
   5316 	WM_CORE_UNLOCK(sc);
   5317 
   5318 	return ret;
   5319 }
   5320 
   5321 static int
   5322 wm_init_locked(struct ifnet *ifp)
   5323 {
   5324 	struct wm_softc *sc = ifp->if_softc;
   5325 	int i, j, trynum, error = 0;
   5326 	uint32_t reg;
   5327 
   5328 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5329 		device_xname(sc->sc_dev), __func__));
   5330 	KASSERT(WM_CORE_LOCKED(sc));
   5331 
   5332 	/*
   5333 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5334 	 * There is a small but measurable benefit to avoiding the adjusment
   5335 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5336 	 * on such platforms.  One possibility is that the DMA itself is
   5337 	 * slightly more efficient if the front of the entire packet (instead
   5338 	 * of the front of the headers) is aligned.
   5339 	 *
   5340 	 * Note we must always set align_tweak to 0 if we are using
   5341 	 * jumbo frames.
   5342 	 */
   5343 #ifdef __NO_STRICT_ALIGNMENT
   5344 	sc->sc_align_tweak = 0;
   5345 #else
   5346 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5347 		sc->sc_align_tweak = 0;
   5348 	else
   5349 		sc->sc_align_tweak = 2;
   5350 #endif /* __NO_STRICT_ALIGNMENT */
   5351 
   5352 	/* Cancel any pending I/O. */
   5353 	wm_stop_locked(ifp, 0);
   5354 
   5355 	/* update statistics before reset */
   5356 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   5357 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   5358 
   5359 	/* PCH_SPT hardware workaround */
   5360 	if (sc->sc_type == WM_T_PCH_SPT)
   5361 		wm_flush_desc_rings(sc);
   5362 
   5363 	/* Reset the chip to a known state. */
   5364 	wm_reset(sc);
   5365 
   5366 	/*
   5367 	 * AMT based hardware can now take control from firmware
   5368 	 * Do this after reset.
   5369 	 */
   5370 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5371 		wm_get_hw_control(sc);
   5372 
   5373 	if ((sc->sc_type == WM_T_PCH_SPT) &&
   5374 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5375 		wm_legacy_irq_quirk_spt(sc);
   5376 
   5377 	/* Init hardware bits */
   5378 	wm_initialize_hardware_bits(sc);
   5379 
   5380 	/* Reset the PHY. */
   5381 	if (sc->sc_flags & WM_F_HAS_MII)
   5382 		wm_gmii_reset(sc);
   5383 
   5384 	/* Calculate (E)ITR value */
   5385 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5386 		/*
   5387 		 * For NEWQUEUE's EITR (except for 82575).
   5388 		 * 82575's EITR should be set same throttling value as other
   5389 		 * old controllers' ITR because the interrupt/sec calculation
   5390 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5391 		 *
   5392 		 * 82574's EITR should be set same throttling value as ITR.
   5393 		 *
   5394 		 * For N interrupts/sec, set this value to:
   5395 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5396 		 */
   5397 		sc->sc_itr_init = 450;
   5398 	} else if (sc->sc_type >= WM_T_82543) {
   5399 		/*
   5400 		 * Set up the interrupt throttling register (units of 256ns)
   5401 		 * Note that a footnote in Intel's documentation says this
   5402 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5403 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5404 		 * that that is also true for the 1024ns units of the other
   5405 		 * interrupt-related timer registers -- so, really, we ought
   5406 		 * to divide this value by 4 when the link speed is low.
   5407 		 *
   5408 		 * XXX implement this division at link speed change!
   5409 		 */
   5410 
   5411 		/*
   5412 		 * For N interrupts/sec, set this value to:
   5413 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5414 		 * absolute and packet timer values to this value
   5415 		 * divided by 4 to get "simple timer" behavior.
   5416 		 */
   5417 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5418 	}
   5419 
   5420 	error = wm_init_txrx_queues(sc);
   5421 	if (error)
   5422 		goto out;
   5423 
   5424 	/*
   5425 	 * Clear out the VLAN table -- we don't use it (yet).
   5426 	 */
   5427 	CSR_WRITE(sc, WMREG_VET, 0);
   5428 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5429 		trynum = 10; /* Due to hw errata */
   5430 	else
   5431 		trynum = 1;
   5432 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5433 		for (j = 0; j < trynum; j++)
   5434 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5435 
   5436 	/*
   5437 	 * Set up flow-control parameters.
   5438 	 *
   5439 	 * XXX Values could probably stand some tuning.
   5440 	 */
   5441 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5442 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5443 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5444 	    && (sc->sc_type != WM_T_PCH_SPT)) {
   5445 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5446 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5447 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5448 	}
   5449 
   5450 	sc->sc_fcrtl = FCRTL_DFLT;
   5451 	if (sc->sc_type < WM_T_82543) {
   5452 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5453 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5454 	} else {
   5455 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5456 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5457 	}
   5458 
   5459 	if (sc->sc_type == WM_T_80003)
   5460 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5461 	else
   5462 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5463 
   5464 	/* Writes the control register. */
   5465 	wm_set_vlan(sc);
   5466 
   5467 	if (sc->sc_flags & WM_F_HAS_MII) {
   5468 		uint16_t kmreg;
   5469 
   5470 		switch (sc->sc_type) {
   5471 		case WM_T_80003:
   5472 		case WM_T_ICH8:
   5473 		case WM_T_ICH9:
   5474 		case WM_T_ICH10:
   5475 		case WM_T_PCH:
   5476 		case WM_T_PCH2:
   5477 		case WM_T_PCH_LPT:
   5478 		case WM_T_PCH_SPT:
   5479 			/*
   5480 			 * Set the mac to wait the maximum time between each
   5481 			 * iteration and increase the max iterations when
   5482 			 * polling the phy; this fixes erroneous timeouts at
   5483 			 * 10Mbps.
   5484 			 */
   5485 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5486 			    0xFFFF);
   5487 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5488 			    &kmreg);
   5489 			kmreg |= 0x3F;
   5490 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5491 			    kmreg);
   5492 			break;
   5493 		default:
   5494 			break;
   5495 		}
   5496 
   5497 		if (sc->sc_type == WM_T_80003) {
   5498 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5499 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   5500 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5501 
   5502 			/* Bypass RX and TX FIFO's */
   5503 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5504 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5505 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5506 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5507 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5508 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5509 		}
   5510 	}
   5511 #if 0
   5512 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5513 #endif
   5514 
   5515 	/* Set up checksum offload parameters. */
   5516 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5517 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5518 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5519 		reg |= RXCSUM_IPOFL;
   5520 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5521 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5522 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5523 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5524 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5525 
   5526 	/* Set registers about MSI-X */
   5527 	if (wm_is_using_msix(sc)) {
   5528 		uint32_t ivar;
   5529 		struct wm_queue *wmq;
   5530 		int qid, qintr_idx;
   5531 
   5532 		if (sc->sc_type == WM_T_82575) {
   5533 			/* Interrupt control */
   5534 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5535 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5536 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5537 
   5538 			/* TX and RX */
   5539 			for (i = 0; i < sc->sc_nqueues; i++) {
   5540 				wmq = &sc->sc_queue[i];
   5541 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5542 				    EITR_TX_QUEUE(wmq->wmq_id)
   5543 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5544 			}
   5545 			/* Link status */
   5546 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5547 			    EITR_OTHER);
   5548 		} else if (sc->sc_type == WM_T_82574) {
   5549 			/* Interrupt control */
   5550 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5551 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5552 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5553 
   5554 			/*
   5555 			 * workaround issue with spurious interrupts
   5556 			 * in MSI-X mode.
   5557 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5558 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5559 			 */
   5560 			reg = CSR_READ(sc, WMREG_RFCTL);
   5561 			reg |= WMREG_RFCTL_ACKDIS;
   5562 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5563 
   5564 			ivar = 0;
   5565 			/* TX and RX */
   5566 			for (i = 0; i < sc->sc_nqueues; i++) {
   5567 				wmq = &sc->sc_queue[i];
   5568 				qid = wmq->wmq_id;
   5569 				qintr_idx = wmq->wmq_intr_idx;
   5570 
   5571 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5572 				    IVAR_TX_MASK_Q_82574(qid));
   5573 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5574 				    IVAR_RX_MASK_Q_82574(qid));
   5575 			}
   5576 			/* Link status */
   5577 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5578 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5579 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5580 		} else {
   5581 			/* Interrupt control */
   5582 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5583 			    | GPIE_EIAME | GPIE_PBA);
   5584 
   5585 			switch (sc->sc_type) {
   5586 			case WM_T_82580:
   5587 			case WM_T_I350:
   5588 			case WM_T_I354:
   5589 			case WM_T_I210:
   5590 			case WM_T_I211:
   5591 				/* TX and RX */
   5592 				for (i = 0; i < sc->sc_nqueues; i++) {
   5593 					wmq = &sc->sc_queue[i];
   5594 					qid = wmq->wmq_id;
   5595 					qintr_idx = wmq->wmq_intr_idx;
   5596 
   5597 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5598 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5599 					ivar |= __SHIFTIN((qintr_idx
   5600 						| IVAR_VALID),
   5601 					    IVAR_TX_MASK_Q(qid));
   5602 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5603 					ivar |= __SHIFTIN((qintr_idx
   5604 						| IVAR_VALID),
   5605 					    IVAR_RX_MASK_Q(qid));
   5606 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5607 				}
   5608 				break;
   5609 			case WM_T_82576:
   5610 				/* TX and RX */
   5611 				for (i = 0; i < sc->sc_nqueues; i++) {
   5612 					wmq = &sc->sc_queue[i];
   5613 					qid = wmq->wmq_id;
   5614 					qintr_idx = wmq->wmq_intr_idx;
   5615 
   5616 					ivar = CSR_READ(sc,
   5617 					    WMREG_IVAR_Q_82576(qid));
   5618 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   5619 					ivar |= __SHIFTIN((qintr_idx
   5620 						| IVAR_VALID),
   5621 					    IVAR_TX_MASK_Q_82576(qid));
   5622 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   5623 					ivar |= __SHIFTIN((qintr_idx
   5624 						| IVAR_VALID),
   5625 					    IVAR_RX_MASK_Q_82576(qid));
   5626 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   5627 					    ivar);
   5628 				}
   5629 				break;
   5630 			default:
   5631 				break;
   5632 			}
   5633 
   5634 			/* Link status */
   5635 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   5636 			    IVAR_MISC_OTHER);
   5637 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   5638 		}
   5639 
   5640 		if (wm_is_using_multiqueue(sc)) {
   5641 			wm_init_rss(sc);
   5642 
   5643 			/*
   5644 			** NOTE: Receive Full-Packet Checksum Offload
   5645 			** is mutually exclusive with Multiqueue. However
   5646 			** this is not the same as TCP/IP checksums which
   5647 			** still work.
   5648 			*/
   5649 			reg = CSR_READ(sc, WMREG_RXCSUM);
   5650 			reg |= RXCSUM_PCSD;
   5651 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5652 		}
   5653 	}
   5654 
   5655 	/* Set up the interrupt registers. */
   5656 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5657 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   5658 	    ICR_RXO | ICR_RXT0;
   5659 	if (wm_is_using_msix(sc)) {
   5660 		uint32_t mask;
   5661 		struct wm_queue *wmq;
   5662 
   5663 		switch (sc->sc_type) {
   5664 		case WM_T_82574:
   5665 			mask = 0;
   5666 			for (i = 0; i < sc->sc_nqueues; i++) {
   5667 				wmq = &sc->sc_queue[i];
   5668 				mask |= ICR_TXQ(wmq->wmq_id);
   5669 				mask |= ICR_RXQ(wmq->wmq_id);
   5670 			}
   5671 			mask |= ICR_OTHER;
   5672 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   5673 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   5674 			break;
   5675 		default:
   5676 			if (sc->sc_type == WM_T_82575) {
   5677 				mask = 0;
   5678 				for (i = 0; i < sc->sc_nqueues; i++) {
   5679 					wmq = &sc->sc_queue[i];
   5680 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   5681 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   5682 				}
   5683 				mask |= EITR_OTHER;
   5684 			} else {
   5685 				mask = 0;
   5686 				for (i = 0; i < sc->sc_nqueues; i++) {
   5687 					wmq = &sc->sc_queue[i];
   5688 					mask |= 1 << wmq->wmq_intr_idx;
   5689 				}
   5690 				mask |= 1 << sc->sc_link_intr_idx;
   5691 			}
   5692 			CSR_WRITE(sc, WMREG_EIAC, mask);
   5693 			CSR_WRITE(sc, WMREG_EIAM, mask);
   5694 			CSR_WRITE(sc, WMREG_EIMS, mask);
   5695 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   5696 			break;
   5697 		}
   5698 	} else
   5699 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5700 
   5701 	/* Set up the inter-packet gap. */
   5702 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   5703 
   5704 	if (sc->sc_type >= WM_T_82543) {
   5705 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5706 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   5707 			wm_itrs_writereg(sc, wmq);
   5708 		}
   5709 		/*
   5710 		 * Link interrupts occur much less than TX
   5711 		 * interrupts and RX interrupts. So, we don't
   5712 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   5713 		 * FreeBSD's if_igb.
   5714 		 */
   5715 	}
   5716 
   5717 	/* Set the VLAN ethernetype. */
   5718 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   5719 
   5720 	/*
   5721 	 * Set up the transmit control register; we start out with
   5722 	 * a collision distance suitable for FDX, but update it whe
   5723 	 * we resolve the media type.
   5724 	 */
   5725 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   5726 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   5727 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5728 	if (sc->sc_type >= WM_T_82571)
   5729 		sc->sc_tctl |= TCTL_MULR;
   5730 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5731 
   5732 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5733 		/* Write TDT after TCTL.EN is set. See the document. */
   5734 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   5735 	}
   5736 
   5737 	if (sc->sc_type == WM_T_80003) {
   5738 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   5739 		reg &= ~TCTL_EXT_GCEX_MASK;
   5740 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   5741 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   5742 	}
   5743 
   5744 	/* Set the media. */
   5745 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   5746 		goto out;
   5747 
   5748 	/* Configure for OS presence */
   5749 	wm_init_manageability(sc);
   5750 
   5751 	/*
   5752 	 * Set up the receive control register; we actually program
   5753 	 * the register when we set the receive filter.  Use multicast
   5754 	 * address offset type 0.
   5755 	 *
   5756 	 * Only the i82544 has the ability to strip the incoming
   5757 	 * CRC, so we don't enable that feature.
   5758 	 */
   5759 	sc->sc_mchash_type = 0;
   5760 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   5761 	    | RCTL_MO(sc->sc_mchash_type);
   5762 
   5763 	/*
   5764 	 * 82574 use one buffer extended Rx descriptor.
   5765 	 */
   5766 	if (sc->sc_type == WM_T_82574)
   5767 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   5768 
   5769 	/*
   5770 	 * The I350 has a bug where it always strips the CRC whether
   5771 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   5772 	 */
   5773 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5774 	    || (sc->sc_type == WM_T_I210))
   5775 		sc->sc_rctl |= RCTL_SECRC;
   5776 
   5777 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5778 	    && (ifp->if_mtu > ETHERMTU)) {
   5779 		sc->sc_rctl |= RCTL_LPE;
   5780 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5781 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5782 	}
   5783 
   5784 	if (MCLBYTES == 2048) {
   5785 		sc->sc_rctl |= RCTL_2k;
   5786 	} else {
   5787 		if (sc->sc_type >= WM_T_82543) {
   5788 			switch (MCLBYTES) {
   5789 			case 4096:
   5790 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5791 				break;
   5792 			case 8192:
   5793 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5794 				break;
   5795 			case 16384:
   5796 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5797 				break;
   5798 			default:
   5799 				panic("wm_init: MCLBYTES %d unsupported",
   5800 				    MCLBYTES);
   5801 				break;
   5802 			}
   5803 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   5804 	}
   5805 
   5806 	/* Enable ECC */
   5807 	switch (sc->sc_type) {
   5808 	case WM_T_82571:
   5809 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5810 		reg |= PBA_ECC_CORR_EN;
   5811 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5812 		break;
   5813 	case WM_T_PCH_LPT:
   5814 	case WM_T_PCH_SPT:
   5815 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5816 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5817 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5818 
   5819 		sc->sc_ctrl |= CTRL_MEHE;
   5820 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5821 		break;
   5822 	default:
   5823 		break;
   5824 	}
   5825 
   5826 	/*
   5827 	 * Set the receive filter.
   5828 	 *
   5829 	 * For 82575 and 82576, the RX descriptors must be initialized after
   5830 	 * the setting of RCTL.EN in wm_set_filter()
   5831 	 */
   5832 	wm_set_filter(sc);
   5833 
   5834 	/* On 575 and later set RDT only if RX enabled */
   5835 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5836 		int qidx;
   5837 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5838 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5839 			for (i = 0; i < WM_NRXDESC; i++) {
   5840 				mutex_enter(rxq->rxq_lock);
   5841 				wm_init_rxdesc(rxq, i);
   5842 				mutex_exit(rxq->rxq_lock);
   5843 
   5844 			}
   5845 		}
   5846 	}
   5847 
   5848 	wm_unset_stopping_flags(sc);
   5849 
   5850 	/* Start the one second link check clock. */
   5851 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5852 
   5853 	/* ...all done! */
   5854 	ifp->if_flags |= IFF_RUNNING;
   5855 	ifp->if_flags &= ~IFF_OACTIVE;
   5856 
   5857  out:
   5858 	sc->sc_if_flags = ifp->if_flags;
   5859 	if (error)
   5860 		log(LOG_ERR, "%s: interface not running\n",
   5861 		    device_xname(sc->sc_dev));
   5862 	return error;
   5863 }
   5864 
   5865 /*
   5866  * wm_stop:		[ifnet interface function]
   5867  *
   5868  *	Stop transmission on the interface.
   5869  */
   5870 static void
   5871 wm_stop(struct ifnet *ifp, int disable)
   5872 {
   5873 	struct wm_softc *sc = ifp->if_softc;
   5874 
   5875 	WM_CORE_LOCK(sc);
   5876 	wm_stop_locked(ifp, disable);
   5877 	WM_CORE_UNLOCK(sc);
   5878 }
   5879 
   5880 static void
   5881 wm_stop_locked(struct ifnet *ifp, int disable)
   5882 {
   5883 	struct wm_softc *sc = ifp->if_softc;
   5884 	struct wm_txsoft *txs;
   5885 	int i, qidx;
   5886 
   5887 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5888 		device_xname(sc->sc_dev), __func__));
   5889 	KASSERT(WM_CORE_LOCKED(sc));
   5890 
   5891 	wm_set_stopping_flags(sc);
   5892 
   5893 	/* Stop the one second clock. */
   5894 	callout_stop(&sc->sc_tick_ch);
   5895 
   5896 	/* Stop the 82547 Tx FIFO stall check timer. */
   5897 	if (sc->sc_type == WM_T_82547)
   5898 		callout_stop(&sc->sc_txfifo_ch);
   5899 
   5900 	if (sc->sc_flags & WM_F_HAS_MII) {
   5901 		/* Down the MII. */
   5902 		mii_down(&sc->sc_mii);
   5903 	} else {
   5904 #if 0
   5905 		/* Should we clear PHY's status properly? */
   5906 		wm_reset(sc);
   5907 #endif
   5908 	}
   5909 
   5910 	/* Stop the transmit and receive processes. */
   5911 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5912 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5913 	sc->sc_rctl &= ~RCTL_EN;
   5914 
   5915 	/*
   5916 	 * Clear the interrupt mask to ensure the device cannot assert its
   5917 	 * interrupt line.
   5918 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5919 	 * service any currently pending or shared interrupt.
   5920 	 */
   5921 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5922 	sc->sc_icr = 0;
   5923 	if (wm_is_using_msix(sc)) {
   5924 		if (sc->sc_type != WM_T_82574) {
   5925 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5926 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5927 		} else
   5928 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5929 	}
   5930 
   5931 	/* Release any queued transmit buffers. */
   5932 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5933 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5934 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5935 		mutex_enter(txq->txq_lock);
   5936 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5937 			txs = &txq->txq_soft[i];
   5938 			if (txs->txs_mbuf != NULL) {
   5939 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   5940 				m_freem(txs->txs_mbuf);
   5941 				txs->txs_mbuf = NULL;
   5942 			}
   5943 		}
   5944 		mutex_exit(txq->txq_lock);
   5945 	}
   5946 
   5947 	/* Mark the interface as down and cancel the watchdog timer. */
   5948 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5949 	ifp->if_timer = 0;
   5950 
   5951 	if (disable) {
   5952 		for (i = 0; i < sc->sc_nqueues; i++) {
   5953 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5954 			mutex_enter(rxq->rxq_lock);
   5955 			wm_rxdrain(rxq);
   5956 			mutex_exit(rxq->rxq_lock);
   5957 		}
   5958 	}
   5959 
   5960 #if 0 /* notyet */
   5961 	if (sc->sc_type >= WM_T_82544)
   5962 		CSR_WRITE(sc, WMREG_WUC, 0);
   5963 #endif
   5964 }
   5965 
   5966 static void
   5967 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   5968 {
   5969 	struct mbuf *m;
   5970 	int i;
   5971 
   5972 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   5973 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   5974 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   5975 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   5976 		    m->m_data, m->m_len, m->m_flags);
   5977 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   5978 	    i, i == 1 ? "" : "s");
   5979 }
   5980 
   5981 /*
   5982  * wm_82547_txfifo_stall:
   5983  *
   5984  *	Callout used to wait for the 82547 Tx FIFO to drain,
   5985  *	reset the FIFO pointers, and restart packet transmission.
   5986  */
   5987 static void
   5988 wm_82547_txfifo_stall(void *arg)
   5989 {
   5990 	struct wm_softc *sc = arg;
   5991 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5992 
   5993 	mutex_enter(txq->txq_lock);
   5994 
   5995 	if (txq->txq_stopping)
   5996 		goto out;
   5997 
   5998 	if (txq->txq_fifo_stall) {
   5999 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   6000 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   6001 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   6002 			/*
   6003 			 * Packets have drained.  Stop transmitter, reset
   6004 			 * FIFO pointers, restart transmitter, and kick
   6005 			 * the packet queue.
   6006 			 */
   6007 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   6008 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   6009 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   6010 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   6011 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   6012 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   6013 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   6014 			CSR_WRITE_FLUSH(sc);
   6015 
   6016 			txq->txq_fifo_head = 0;
   6017 			txq->txq_fifo_stall = 0;
   6018 			wm_start_locked(&sc->sc_ethercom.ec_if);
   6019 		} else {
   6020 			/*
   6021 			 * Still waiting for packets to drain; try again in
   6022 			 * another tick.
   6023 			 */
   6024 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6025 		}
   6026 	}
   6027 
   6028 out:
   6029 	mutex_exit(txq->txq_lock);
   6030 }
   6031 
   6032 /*
   6033  * wm_82547_txfifo_bugchk:
   6034  *
   6035  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6036  *	prevent enqueueing a packet that would wrap around the end
   6037  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6038  *
   6039  *	We do this by checking the amount of space before the end
   6040  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   6041  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6042  *	the internal FIFO pointers to the beginning, and restart
   6043  *	transmission on the interface.
   6044  */
   6045 #define	WM_FIFO_HDR		0x10
   6046 #define	WM_82547_PAD_LEN	0x3e0
   6047 static int
   6048 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6049 {
   6050 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6051 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6052 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6053 
   6054 	/* Just return if already stalled. */
   6055 	if (txq->txq_fifo_stall)
   6056 		return 1;
   6057 
   6058 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6059 		/* Stall only occurs in half-duplex mode. */
   6060 		goto send_packet;
   6061 	}
   6062 
   6063 	if (len >= WM_82547_PAD_LEN + space) {
   6064 		txq->txq_fifo_stall = 1;
   6065 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6066 		return 1;
   6067 	}
   6068 
   6069  send_packet:
   6070 	txq->txq_fifo_head += len;
   6071 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6072 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6073 
   6074 	return 0;
   6075 }
   6076 
   6077 static int
   6078 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6079 {
   6080 	int error;
   6081 
   6082 	/*
   6083 	 * Allocate the control data structures, and create and load the
   6084 	 * DMA map for it.
   6085 	 *
   6086 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6087 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6088 	 * both sets within the same 4G segment.
   6089 	 */
   6090 	if (sc->sc_type < WM_T_82544)
   6091 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6092 	else
   6093 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6094 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6095 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6096 	else
   6097 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6098 
   6099 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6100 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6101 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6102 		aprint_error_dev(sc->sc_dev,
   6103 		    "unable to allocate TX control data, error = %d\n",
   6104 		    error);
   6105 		goto fail_0;
   6106 	}
   6107 
   6108 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6109 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6110 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6111 		aprint_error_dev(sc->sc_dev,
   6112 		    "unable to map TX control data, error = %d\n", error);
   6113 		goto fail_1;
   6114 	}
   6115 
   6116 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6117 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6118 		aprint_error_dev(sc->sc_dev,
   6119 		    "unable to create TX control data DMA map, error = %d\n",
   6120 		    error);
   6121 		goto fail_2;
   6122 	}
   6123 
   6124 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6125 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6126 		aprint_error_dev(sc->sc_dev,
   6127 		    "unable to load TX control data DMA map, error = %d\n",
   6128 		    error);
   6129 		goto fail_3;
   6130 	}
   6131 
   6132 	return 0;
   6133 
   6134  fail_3:
   6135 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6136  fail_2:
   6137 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6138 	    WM_TXDESCS_SIZE(txq));
   6139  fail_1:
   6140 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6141  fail_0:
   6142 	return error;
   6143 }
   6144 
   6145 static void
   6146 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6147 {
   6148 
   6149 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6150 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6151 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6152 	    WM_TXDESCS_SIZE(txq));
   6153 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6154 }
   6155 
   6156 static int
   6157 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6158 {
   6159 	int error;
   6160 	size_t rxq_descs_size;
   6161 
   6162 	/*
   6163 	 * Allocate the control data structures, and create and load the
   6164 	 * DMA map for it.
   6165 	 *
   6166 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6167 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6168 	 * both sets within the same 4G segment.
   6169 	 */
   6170 	rxq->rxq_ndesc = WM_NRXDESC;
   6171 	if (sc->sc_type == WM_T_82574)
   6172 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6173 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6174 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6175 	else
   6176 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6177 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6178 
   6179 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6180 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6181 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6182 		aprint_error_dev(sc->sc_dev,
   6183 		    "unable to allocate RX control data, error = %d\n",
   6184 		    error);
   6185 		goto fail_0;
   6186 	}
   6187 
   6188 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6189 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6190 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6191 		aprint_error_dev(sc->sc_dev,
   6192 		    "unable to map RX control data, error = %d\n", error);
   6193 		goto fail_1;
   6194 	}
   6195 
   6196 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6197 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6198 		aprint_error_dev(sc->sc_dev,
   6199 		    "unable to create RX control data DMA map, error = %d\n",
   6200 		    error);
   6201 		goto fail_2;
   6202 	}
   6203 
   6204 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6205 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6206 		aprint_error_dev(sc->sc_dev,
   6207 		    "unable to load RX control data DMA map, error = %d\n",
   6208 		    error);
   6209 		goto fail_3;
   6210 	}
   6211 
   6212 	return 0;
   6213 
   6214  fail_3:
   6215 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6216  fail_2:
   6217 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6218 	    rxq_descs_size);
   6219  fail_1:
   6220 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6221  fail_0:
   6222 	return error;
   6223 }
   6224 
   6225 static void
   6226 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6227 {
   6228 
   6229 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6230 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6231 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6232 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6233 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6234 }
   6235 
   6236 
   6237 static int
   6238 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6239 {
   6240 	int i, error;
   6241 
   6242 	/* Create the transmit buffer DMA maps. */
   6243 	WM_TXQUEUELEN(txq) =
   6244 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6245 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6246 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6247 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6248 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6249 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6250 			aprint_error_dev(sc->sc_dev,
   6251 			    "unable to create Tx DMA map %d, error = %d\n",
   6252 			    i, error);
   6253 			goto fail;
   6254 		}
   6255 	}
   6256 
   6257 	return 0;
   6258 
   6259  fail:
   6260 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6261 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6262 			bus_dmamap_destroy(sc->sc_dmat,
   6263 			    txq->txq_soft[i].txs_dmamap);
   6264 	}
   6265 	return error;
   6266 }
   6267 
   6268 static void
   6269 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6270 {
   6271 	int i;
   6272 
   6273 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6274 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6275 			bus_dmamap_destroy(sc->sc_dmat,
   6276 			    txq->txq_soft[i].txs_dmamap);
   6277 	}
   6278 }
   6279 
   6280 static int
   6281 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6282 {
   6283 	int i, error;
   6284 
   6285 	/* Create the receive buffer DMA maps. */
   6286 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6287 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6288 			    MCLBYTES, 0, 0,
   6289 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6290 			aprint_error_dev(sc->sc_dev,
   6291 			    "unable to create Rx DMA map %d error = %d\n",
   6292 			    i, error);
   6293 			goto fail;
   6294 		}
   6295 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6296 	}
   6297 
   6298 	return 0;
   6299 
   6300  fail:
   6301 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6302 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6303 			bus_dmamap_destroy(sc->sc_dmat,
   6304 			    rxq->rxq_soft[i].rxs_dmamap);
   6305 	}
   6306 	return error;
   6307 }
   6308 
   6309 static void
   6310 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6311 {
   6312 	int i;
   6313 
   6314 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6315 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6316 			bus_dmamap_destroy(sc->sc_dmat,
   6317 			    rxq->rxq_soft[i].rxs_dmamap);
   6318 	}
   6319 }
   6320 
   6321 /*
   6322  * wm_alloc_quques:
   6323  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6324  */
   6325 static int
   6326 wm_alloc_txrx_queues(struct wm_softc *sc)
   6327 {
   6328 	int i, error, tx_done, rx_done;
   6329 
   6330 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6331 	    KM_SLEEP);
   6332 	if (sc->sc_queue == NULL) {
   6333 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6334 		error = ENOMEM;
   6335 		goto fail_0;
   6336 	}
   6337 
   6338 	/*
   6339 	 * For transmission
   6340 	 */
   6341 	error = 0;
   6342 	tx_done = 0;
   6343 	for (i = 0; i < sc->sc_nqueues; i++) {
   6344 #ifdef WM_EVENT_COUNTERS
   6345 		int j;
   6346 		const char *xname;
   6347 #endif
   6348 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6349 		txq->txq_sc = sc;
   6350 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6351 
   6352 		error = wm_alloc_tx_descs(sc, txq);
   6353 		if (error)
   6354 			break;
   6355 		error = wm_alloc_tx_buffer(sc, txq);
   6356 		if (error) {
   6357 			wm_free_tx_descs(sc, txq);
   6358 			break;
   6359 		}
   6360 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6361 		if (txq->txq_interq == NULL) {
   6362 			wm_free_tx_descs(sc, txq);
   6363 			wm_free_tx_buffer(sc, txq);
   6364 			error = ENOMEM;
   6365 			break;
   6366 		}
   6367 
   6368 #ifdef WM_EVENT_COUNTERS
   6369 		xname = device_xname(sc->sc_dev);
   6370 
   6371 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6372 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6373 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
   6374 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6375 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6376 
   6377 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
   6378 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
   6379 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
   6380 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
   6381 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
   6382 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
   6383 
   6384 		for (j = 0; j < WM_NTXSEGS; j++) {
   6385 			snprintf(txq->txq_txseg_evcnt_names[j],
   6386 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6387 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6388 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6389 		}
   6390 
   6391 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
   6392 
   6393 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
   6394 #endif /* WM_EVENT_COUNTERS */
   6395 
   6396 		tx_done++;
   6397 	}
   6398 	if (error)
   6399 		goto fail_1;
   6400 
   6401 	/*
   6402 	 * For recieve
   6403 	 */
   6404 	error = 0;
   6405 	rx_done = 0;
   6406 	for (i = 0; i < sc->sc_nqueues; i++) {
   6407 #ifdef WM_EVENT_COUNTERS
   6408 		const char *xname;
   6409 #endif
   6410 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6411 		rxq->rxq_sc = sc;
   6412 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6413 
   6414 		error = wm_alloc_rx_descs(sc, rxq);
   6415 		if (error)
   6416 			break;
   6417 
   6418 		error = wm_alloc_rx_buffer(sc, rxq);
   6419 		if (error) {
   6420 			wm_free_rx_descs(sc, rxq);
   6421 			break;
   6422 		}
   6423 
   6424 #ifdef WM_EVENT_COUNTERS
   6425 		xname = device_xname(sc->sc_dev);
   6426 
   6427 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
   6428 
   6429 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
   6430 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
   6431 #endif /* WM_EVENT_COUNTERS */
   6432 
   6433 		rx_done++;
   6434 	}
   6435 	if (error)
   6436 		goto fail_2;
   6437 
   6438 	return 0;
   6439 
   6440  fail_2:
   6441 	for (i = 0; i < rx_done; i++) {
   6442 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6443 		wm_free_rx_buffer(sc, rxq);
   6444 		wm_free_rx_descs(sc, rxq);
   6445 		if (rxq->rxq_lock)
   6446 			mutex_obj_free(rxq->rxq_lock);
   6447 	}
   6448  fail_1:
   6449 	for (i = 0; i < tx_done; i++) {
   6450 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6451 		pcq_destroy(txq->txq_interq);
   6452 		wm_free_tx_buffer(sc, txq);
   6453 		wm_free_tx_descs(sc, txq);
   6454 		if (txq->txq_lock)
   6455 			mutex_obj_free(txq->txq_lock);
   6456 	}
   6457 
   6458 	kmem_free(sc->sc_queue,
   6459 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6460  fail_0:
   6461 	return error;
   6462 }
   6463 
   6464 /*
   6465  * wm_free_quques:
   6466  *	Free {tx,rx}descs and {tx,rx} buffers
   6467  */
   6468 static void
   6469 wm_free_txrx_queues(struct wm_softc *sc)
   6470 {
   6471 	int i;
   6472 
   6473 	for (i = 0; i < sc->sc_nqueues; i++) {
   6474 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6475 
   6476 #ifdef WM_EVENT_COUNTERS
   6477 		WM_Q_EVCNT_DETACH(rxq, rxintr, rxq, i);
   6478 		WM_Q_EVCNT_DETACH(rxq, rxipsum, rxq, i);
   6479 		WM_Q_EVCNT_DETACH(rxq, rxtusum, rxq, i);
   6480 #endif /* WM_EVENT_COUNTERS */
   6481 
   6482 		wm_free_rx_buffer(sc, rxq);
   6483 		wm_free_rx_descs(sc, rxq);
   6484 		if (rxq->rxq_lock)
   6485 			mutex_obj_free(rxq->rxq_lock);
   6486 	}
   6487 
   6488 	for (i = 0; i < sc->sc_nqueues; i++) {
   6489 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6490 		struct mbuf *m;
   6491 #ifdef WM_EVENT_COUNTERS
   6492 		int j;
   6493 
   6494 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6495 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6496 		WM_Q_EVCNT_DETACH(txq, txfifo_stall, txq, i);
   6497 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6498 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6499 		WM_Q_EVCNT_DETACH(txq, txipsum, txq, i);
   6500 		WM_Q_EVCNT_DETACH(txq, txtusum, txq, i);
   6501 		WM_Q_EVCNT_DETACH(txq, txtusum6, txq, i);
   6502 		WM_Q_EVCNT_DETACH(txq, txtso, txq, i);
   6503 		WM_Q_EVCNT_DETACH(txq, txtso6, txq, i);
   6504 		WM_Q_EVCNT_DETACH(txq, txtsopain, txq, i);
   6505 
   6506 		for (j = 0; j < WM_NTXSEGS; j++)
   6507 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6508 
   6509 		WM_Q_EVCNT_DETACH(txq, txdrop, txq, i);
   6510 		WM_Q_EVCNT_DETACH(txq, tu, txq, i);
   6511 #endif /* WM_EVENT_COUNTERS */
   6512 
   6513 		/* drain txq_interq */
   6514 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6515 			m_freem(m);
   6516 		pcq_destroy(txq->txq_interq);
   6517 
   6518 		wm_free_tx_buffer(sc, txq);
   6519 		wm_free_tx_descs(sc, txq);
   6520 		if (txq->txq_lock)
   6521 			mutex_obj_free(txq->txq_lock);
   6522 	}
   6523 
   6524 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6525 }
   6526 
   6527 static void
   6528 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6529 {
   6530 
   6531 	KASSERT(mutex_owned(txq->txq_lock));
   6532 
   6533 	/* Initialize the transmit descriptor ring. */
   6534 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6535 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6536 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6537 	txq->txq_free = WM_NTXDESC(txq);
   6538 	txq->txq_next = 0;
   6539 }
   6540 
   6541 static void
   6542 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6543     struct wm_txqueue *txq)
   6544 {
   6545 
   6546 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6547 		device_xname(sc->sc_dev), __func__));
   6548 	KASSERT(mutex_owned(txq->txq_lock));
   6549 
   6550 	if (sc->sc_type < WM_T_82543) {
   6551 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6552 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6553 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6554 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6555 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6556 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6557 	} else {
   6558 		int qid = wmq->wmq_id;
   6559 
   6560 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6561 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6562 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6563 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6564 
   6565 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6566 			/*
   6567 			 * Don't write TDT before TCTL.EN is set.
   6568 			 * See the document.
   6569 			 */
   6570 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6571 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6572 			    | TXDCTL_WTHRESH(0));
   6573 		else {
   6574 			/* XXX should update with AIM? */
   6575 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6576 			if (sc->sc_type >= WM_T_82540) {
   6577 				/* should be same */
   6578 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6579 			}
   6580 
   6581 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6582 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6583 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6584 		}
   6585 	}
   6586 }
   6587 
   6588 static void
   6589 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6590 {
   6591 	int i;
   6592 
   6593 	KASSERT(mutex_owned(txq->txq_lock));
   6594 
   6595 	/* Initialize the transmit job descriptors. */
   6596 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6597 		txq->txq_soft[i].txs_mbuf = NULL;
   6598 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6599 	txq->txq_snext = 0;
   6600 	txq->txq_sdirty = 0;
   6601 }
   6602 
   6603 static void
   6604 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6605     struct wm_txqueue *txq)
   6606 {
   6607 
   6608 	KASSERT(mutex_owned(txq->txq_lock));
   6609 
   6610 	/*
   6611 	 * Set up some register offsets that are different between
   6612 	 * the i82542 and the i82543 and later chips.
   6613 	 */
   6614 	if (sc->sc_type < WM_T_82543)
   6615 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   6616 	else
   6617 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   6618 
   6619 	wm_init_tx_descs(sc, txq);
   6620 	wm_init_tx_regs(sc, wmq, txq);
   6621 	wm_init_tx_buffer(sc, txq);
   6622 }
   6623 
   6624 static void
   6625 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6626     struct wm_rxqueue *rxq)
   6627 {
   6628 
   6629 	KASSERT(mutex_owned(rxq->rxq_lock));
   6630 
   6631 	/*
   6632 	 * Initialize the receive descriptor and receive job
   6633 	 * descriptor rings.
   6634 	 */
   6635 	if (sc->sc_type < WM_T_82543) {
   6636 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   6637 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   6638 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   6639 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6640 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   6641 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   6642 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   6643 
   6644 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   6645 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   6646 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   6647 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   6648 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   6649 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   6650 	} else {
   6651 		int qid = wmq->wmq_id;
   6652 
   6653 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   6654 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   6655 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_descsize * rxq->rxq_ndesc);
   6656 
   6657 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6658 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   6659 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   6660 
   6661 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   6662 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   6663 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   6664 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   6665 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   6666 			    | RXDCTL_WTHRESH(1));
   6667 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6668 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6669 		} else {
   6670 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6671 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6672 			/* XXX should update with AIM? */
   6673 			CSR_WRITE(sc, WMREG_RDTR, (wmq->wmq_itr / 4) | RDTR_FPD);
   6674 			/* MUST be same */
   6675 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   6676 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   6677 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   6678 		}
   6679 	}
   6680 }
   6681 
   6682 static int
   6683 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6684 {
   6685 	struct wm_rxsoft *rxs;
   6686 	int error, i;
   6687 
   6688 	KASSERT(mutex_owned(rxq->rxq_lock));
   6689 
   6690 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6691 		rxs = &rxq->rxq_soft[i];
   6692 		if (rxs->rxs_mbuf == NULL) {
   6693 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   6694 				log(LOG_ERR, "%s: unable to allocate or map "
   6695 				    "rx buffer %d, error = %d\n",
   6696 				    device_xname(sc->sc_dev), i, error);
   6697 				/*
   6698 				 * XXX Should attempt to run with fewer receive
   6699 				 * XXX buffers instead of just failing.
   6700 				 */
   6701 				wm_rxdrain(rxq);
   6702 				return ENOMEM;
   6703 			}
   6704 		} else {
   6705 			/*
   6706 			 * For 82575 and 82576, the RX descriptors must be
   6707 			 * initialized after the setting of RCTL.EN in
   6708 			 * wm_set_filter()
   6709 			 */
   6710 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   6711 				wm_init_rxdesc(rxq, i);
   6712 		}
   6713 	}
   6714 	rxq->rxq_ptr = 0;
   6715 	rxq->rxq_discard = 0;
   6716 	WM_RXCHAIN_RESET(rxq);
   6717 
   6718 	return 0;
   6719 }
   6720 
   6721 static int
   6722 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6723     struct wm_rxqueue *rxq)
   6724 {
   6725 
   6726 	KASSERT(mutex_owned(rxq->rxq_lock));
   6727 
   6728 	/*
   6729 	 * Set up some register offsets that are different between
   6730 	 * the i82542 and the i82543 and later chips.
   6731 	 */
   6732 	if (sc->sc_type < WM_T_82543)
   6733 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   6734 	else
   6735 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   6736 
   6737 	wm_init_rx_regs(sc, wmq, rxq);
   6738 	return wm_init_rx_buffer(sc, rxq);
   6739 }
   6740 
   6741 /*
   6742  * wm_init_quques:
   6743  *	Initialize {tx,rx}descs and {tx,rx} buffers
   6744  */
   6745 static int
   6746 wm_init_txrx_queues(struct wm_softc *sc)
   6747 {
   6748 	int i, error = 0;
   6749 
   6750 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6751 		device_xname(sc->sc_dev), __func__));
   6752 
   6753 	for (i = 0; i < sc->sc_nqueues; i++) {
   6754 		struct wm_queue *wmq = &sc->sc_queue[i];
   6755 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6756 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6757 
   6758 		/*
   6759 		 * TODO
   6760 		 * Currently, use constant variable instead of AIM.
   6761 		 * Furthermore, the interrupt interval of multiqueue which use
   6762 		 * polling mode is less than default value.
   6763 		 * More tuning and AIM are required.
   6764 		 */
   6765 		if (wm_is_using_multiqueue(sc))
   6766 			wmq->wmq_itr = 50;
   6767 		else
   6768 			wmq->wmq_itr = sc->sc_itr_init;
   6769 		wmq->wmq_set_itr = true;
   6770 
   6771 		mutex_enter(txq->txq_lock);
   6772 		wm_init_tx_queue(sc, wmq, txq);
   6773 		mutex_exit(txq->txq_lock);
   6774 
   6775 		mutex_enter(rxq->rxq_lock);
   6776 		error = wm_init_rx_queue(sc, wmq, rxq);
   6777 		mutex_exit(rxq->rxq_lock);
   6778 		if (error)
   6779 			break;
   6780 	}
   6781 
   6782 	return error;
   6783 }
   6784 
   6785 /*
   6786  * wm_tx_offload:
   6787  *
   6788  *	Set up TCP/IP checksumming parameters for the
   6789  *	specified packet.
   6790  */
   6791 static int
   6792 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   6793     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   6794 {
   6795 	struct mbuf *m0 = txs->txs_mbuf;
   6796 	struct livengood_tcpip_ctxdesc *t;
   6797 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   6798 	uint32_t ipcse;
   6799 	struct ether_header *eh;
   6800 	int offset, iphl;
   6801 	uint8_t fields;
   6802 
   6803 	/*
   6804 	 * XXX It would be nice if the mbuf pkthdr had offset
   6805 	 * fields for the protocol headers.
   6806 	 */
   6807 
   6808 	eh = mtod(m0, struct ether_header *);
   6809 	switch (htons(eh->ether_type)) {
   6810 	case ETHERTYPE_IP:
   6811 	case ETHERTYPE_IPV6:
   6812 		offset = ETHER_HDR_LEN;
   6813 		break;
   6814 
   6815 	case ETHERTYPE_VLAN:
   6816 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6817 		break;
   6818 
   6819 	default:
   6820 		/*
   6821 		 * Don't support this protocol or encapsulation.
   6822 		 */
   6823 		*fieldsp = 0;
   6824 		*cmdp = 0;
   6825 		return 0;
   6826 	}
   6827 
   6828 	if ((m0->m_pkthdr.csum_flags &
   6829 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6830 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6831 	} else {
   6832 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6833 	}
   6834 	ipcse = offset + iphl - 1;
   6835 
   6836 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   6837 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   6838 	seg = 0;
   6839 	fields = 0;
   6840 
   6841 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6842 		int hlen = offset + iphl;
   6843 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6844 
   6845 		if (__predict_false(m0->m_len <
   6846 				    (hlen + sizeof(struct tcphdr)))) {
   6847 			/*
   6848 			 * TCP/IP headers are not in the first mbuf; we need
   6849 			 * to do this the slow and painful way.  Let's just
   6850 			 * hope this doesn't happen very often.
   6851 			 */
   6852 			struct tcphdr th;
   6853 
   6854 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6855 
   6856 			m_copydata(m0, hlen, sizeof(th), &th);
   6857 			if (v4) {
   6858 				struct ip ip;
   6859 
   6860 				m_copydata(m0, offset, sizeof(ip), &ip);
   6861 				ip.ip_len = 0;
   6862 				m_copyback(m0,
   6863 				    offset + offsetof(struct ip, ip_len),
   6864 				    sizeof(ip.ip_len), &ip.ip_len);
   6865 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6866 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6867 			} else {
   6868 				struct ip6_hdr ip6;
   6869 
   6870 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6871 				ip6.ip6_plen = 0;
   6872 				m_copyback(m0,
   6873 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6874 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6875 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6876 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6877 			}
   6878 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6879 			    sizeof(th.th_sum), &th.th_sum);
   6880 
   6881 			hlen += th.th_off << 2;
   6882 		} else {
   6883 			/*
   6884 			 * TCP/IP headers are in the first mbuf; we can do
   6885 			 * this the easy way.
   6886 			 */
   6887 			struct tcphdr *th;
   6888 
   6889 			if (v4) {
   6890 				struct ip *ip =
   6891 				    (void *)(mtod(m0, char *) + offset);
   6892 				th = (void *)(mtod(m0, char *) + hlen);
   6893 
   6894 				ip->ip_len = 0;
   6895 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6896 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6897 			} else {
   6898 				struct ip6_hdr *ip6 =
   6899 				    (void *)(mtod(m0, char *) + offset);
   6900 				th = (void *)(mtod(m0, char *) + hlen);
   6901 
   6902 				ip6->ip6_plen = 0;
   6903 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6904 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6905 			}
   6906 			hlen += th->th_off << 2;
   6907 		}
   6908 
   6909 		if (v4) {
   6910 			WM_Q_EVCNT_INCR(txq, txtso);
   6911 			cmdlen |= WTX_TCPIP_CMD_IP;
   6912 		} else {
   6913 			WM_Q_EVCNT_INCR(txq, txtso6);
   6914 			ipcse = 0;
   6915 		}
   6916 		cmd |= WTX_TCPIP_CMD_TSE;
   6917 		cmdlen |= WTX_TCPIP_CMD_TSE |
   6918 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   6919 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   6920 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   6921 	}
   6922 
   6923 	/*
   6924 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   6925 	 * offload feature, if we load the context descriptor, we
   6926 	 * MUST provide valid values for IPCSS and TUCSS fields.
   6927 	 */
   6928 
   6929 	ipcs = WTX_TCPIP_IPCSS(offset) |
   6930 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   6931 	    WTX_TCPIP_IPCSE(ipcse);
   6932 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   6933 		WM_Q_EVCNT_INCR(txq, txipsum);
   6934 		fields |= WTX_IXSM;
   6935 	}
   6936 
   6937 	offset += iphl;
   6938 
   6939 	if (m0->m_pkthdr.csum_flags &
   6940 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   6941 		WM_Q_EVCNT_INCR(txq, txtusum);
   6942 		fields |= WTX_TXSM;
   6943 		tucs = WTX_TCPIP_TUCSS(offset) |
   6944 		    WTX_TCPIP_TUCSO(offset +
   6945 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   6946 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6947 	} else if ((m0->m_pkthdr.csum_flags &
   6948 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   6949 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6950 		fields |= WTX_TXSM;
   6951 		tucs = WTX_TCPIP_TUCSS(offset) |
   6952 		    WTX_TCPIP_TUCSO(offset +
   6953 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   6954 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6955 	} else {
   6956 		/* Just initialize it to a valid TCP context. */
   6957 		tucs = WTX_TCPIP_TUCSS(offset) |
   6958 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   6959 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6960 	}
   6961 
   6962 	/*
   6963 	 * We don't have to write context descriptor for every packet
   6964 	 * except for 82574. For 82574, we must write context descriptor
   6965 	 * for every packet when we use two descriptor queues.
   6966 	 * It would be overhead to write context descriptor for every packet,
   6967 	 * however it does not cause problems.
   6968 	 */
   6969 	/* Fill in the context descriptor. */
   6970 	t = (struct livengood_tcpip_ctxdesc *)
   6971 	    &txq->txq_descs[txq->txq_next];
   6972 	t->tcpip_ipcs = htole32(ipcs);
   6973 	t->tcpip_tucs = htole32(tucs);
   6974 	t->tcpip_cmdlen = htole32(cmdlen);
   6975 	t->tcpip_seg = htole32(seg);
   6976 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6977 
   6978 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6979 	txs->txs_ndesc++;
   6980 
   6981 	*cmdp = cmd;
   6982 	*fieldsp = fields;
   6983 
   6984 	return 0;
   6985 }
   6986 
   6987 static inline int
   6988 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   6989 {
   6990 	struct wm_softc *sc = ifp->if_softc;
   6991 	u_int cpuid = cpu_index(curcpu());
   6992 
   6993 	/*
   6994 	 * Currently, simple distribute strategy.
   6995 	 * TODO:
   6996 	 * distribute by flowid(RSS has value).
   6997 	 */
   6998         return (cpuid + ncpu - sc->sc_affinity_offset) % sc->sc_nqueues;
   6999 }
   7000 
   7001 /*
   7002  * wm_start:		[ifnet interface function]
   7003  *
   7004  *	Start packet transmission on the interface.
   7005  */
   7006 static void
   7007 wm_start(struct ifnet *ifp)
   7008 {
   7009 	struct wm_softc *sc = ifp->if_softc;
   7010 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7011 
   7012 #ifdef WM_MPSAFE
   7013 	KASSERT(if_is_mpsafe(ifp));
   7014 #endif
   7015 	/*
   7016 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7017 	 */
   7018 
   7019 	mutex_enter(txq->txq_lock);
   7020 	if (!txq->txq_stopping)
   7021 		wm_start_locked(ifp);
   7022 	mutex_exit(txq->txq_lock);
   7023 }
   7024 
   7025 static void
   7026 wm_start_locked(struct ifnet *ifp)
   7027 {
   7028 	struct wm_softc *sc = ifp->if_softc;
   7029 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7030 
   7031 	wm_send_common_locked(ifp, txq, false);
   7032 }
   7033 
   7034 static int
   7035 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7036 {
   7037 	int qid;
   7038 	struct wm_softc *sc = ifp->if_softc;
   7039 	struct wm_txqueue *txq;
   7040 
   7041 	qid = wm_select_txqueue(ifp, m);
   7042 	txq = &sc->sc_queue[qid].wmq_txq;
   7043 
   7044 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7045 		m_freem(m);
   7046 		WM_Q_EVCNT_INCR(txq, txdrop);
   7047 		return ENOBUFS;
   7048 	}
   7049 
   7050 	/*
   7051 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7052 	 */
   7053 	ifp->if_obytes += m->m_pkthdr.len;
   7054 	if (m->m_flags & M_MCAST)
   7055 		ifp->if_omcasts++;
   7056 
   7057 	if (mutex_tryenter(txq->txq_lock)) {
   7058 		if (!txq->txq_stopping)
   7059 			wm_transmit_locked(ifp, txq);
   7060 		mutex_exit(txq->txq_lock);
   7061 	}
   7062 
   7063 	return 0;
   7064 }
   7065 
   7066 static void
   7067 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7068 {
   7069 
   7070 	wm_send_common_locked(ifp, txq, true);
   7071 }
   7072 
   7073 static void
   7074 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7075     bool is_transmit)
   7076 {
   7077 	struct wm_softc *sc = ifp->if_softc;
   7078 	struct mbuf *m0;
   7079 	struct wm_txsoft *txs;
   7080 	bus_dmamap_t dmamap;
   7081 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7082 	bus_addr_t curaddr;
   7083 	bus_size_t seglen, curlen;
   7084 	uint32_t cksumcmd;
   7085 	uint8_t cksumfields;
   7086 
   7087 	KASSERT(mutex_owned(txq->txq_lock));
   7088 
   7089 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7090 		return;
   7091 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7092 		return;
   7093 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7094 		return;
   7095 
   7096 	/* Remember the previous number of free descriptors. */
   7097 	ofree = txq->txq_free;
   7098 
   7099 	/*
   7100 	 * Loop through the send queue, setting up transmit descriptors
   7101 	 * until we drain the queue, or use up all available transmit
   7102 	 * descriptors.
   7103 	 */
   7104 	for (;;) {
   7105 		m0 = NULL;
   7106 
   7107 		/* Get a work queue entry. */
   7108 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7109 			wm_txeof(sc, txq);
   7110 			if (txq->txq_sfree == 0) {
   7111 				DPRINTF(WM_DEBUG_TX,
   7112 				    ("%s: TX: no free job descriptors\n",
   7113 					device_xname(sc->sc_dev)));
   7114 				WM_Q_EVCNT_INCR(txq, txsstall);
   7115 				break;
   7116 			}
   7117 		}
   7118 
   7119 		/* Grab a packet off the queue. */
   7120 		if (is_transmit)
   7121 			m0 = pcq_get(txq->txq_interq);
   7122 		else
   7123 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7124 		if (m0 == NULL)
   7125 			break;
   7126 
   7127 		DPRINTF(WM_DEBUG_TX,
   7128 		    ("%s: TX: have packet to transmit: %p\n",
   7129 		    device_xname(sc->sc_dev), m0));
   7130 
   7131 		txs = &txq->txq_soft[txq->txq_snext];
   7132 		dmamap = txs->txs_dmamap;
   7133 
   7134 		use_tso = (m0->m_pkthdr.csum_flags &
   7135 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7136 
   7137 		/*
   7138 		 * So says the Linux driver:
   7139 		 * The controller does a simple calculation to make sure
   7140 		 * there is enough room in the FIFO before initiating the
   7141 		 * DMA for each buffer.  The calc is:
   7142 		 *	4 = ceil(buffer len / MSS)
   7143 		 * To make sure we don't overrun the FIFO, adjust the max
   7144 		 * buffer len if the MSS drops.
   7145 		 */
   7146 		dmamap->dm_maxsegsz =
   7147 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7148 		    ? m0->m_pkthdr.segsz << 2
   7149 		    : WTX_MAX_LEN;
   7150 
   7151 		/*
   7152 		 * Load the DMA map.  If this fails, the packet either
   7153 		 * didn't fit in the allotted number of segments, or we
   7154 		 * were short on resources.  For the too-many-segments
   7155 		 * case, we simply report an error and drop the packet,
   7156 		 * since we can't sanely copy a jumbo packet to a single
   7157 		 * buffer.
   7158 		 */
   7159 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7160 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7161 		if (error) {
   7162 			if (error == EFBIG) {
   7163 				WM_Q_EVCNT_INCR(txq, txdrop);
   7164 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7165 				    "DMA segments, dropping...\n",
   7166 				    device_xname(sc->sc_dev));
   7167 				wm_dump_mbuf_chain(sc, m0);
   7168 				m_freem(m0);
   7169 				continue;
   7170 			}
   7171 			/*  Short on resources, just stop for now. */
   7172 			DPRINTF(WM_DEBUG_TX,
   7173 			    ("%s: TX: dmamap load failed: %d\n",
   7174 			    device_xname(sc->sc_dev), error));
   7175 			break;
   7176 		}
   7177 
   7178 		segs_needed = dmamap->dm_nsegs;
   7179 		if (use_tso) {
   7180 			/* For sentinel descriptor; see below. */
   7181 			segs_needed++;
   7182 		}
   7183 
   7184 		/*
   7185 		 * Ensure we have enough descriptors free to describe
   7186 		 * the packet.  Note, we always reserve one descriptor
   7187 		 * at the end of the ring due to the semantics of the
   7188 		 * TDT register, plus one more in the event we need
   7189 		 * to load offload context.
   7190 		 */
   7191 		if (segs_needed > txq->txq_free - 2) {
   7192 			/*
   7193 			 * Not enough free descriptors to transmit this
   7194 			 * packet.  We haven't committed anything yet,
   7195 			 * so just unload the DMA map, put the packet
   7196 			 * pack on the queue, and punt.  Notify the upper
   7197 			 * layer that there are no more slots left.
   7198 			 */
   7199 			DPRINTF(WM_DEBUG_TX,
   7200 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7201 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7202 			    segs_needed, txq->txq_free - 1));
   7203 			if (!is_transmit)
   7204 				ifp->if_flags |= IFF_OACTIVE;
   7205 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7206 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7207 			WM_Q_EVCNT_INCR(txq, txdstall);
   7208 			break;
   7209 		}
   7210 
   7211 		/*
   7212 		 * Check for 82547 Tx FIFO bug.  We need to do this
   7213 		 * once we know we can transmit the packet, since we
   7214 		 * do some internal FIFO space accounting here.
   7215 		 */
   7216 		if (sc->sc_type == WM_T_82547 &&
   7217 		    wm_82547_txfifo_bugchk(sc, m0)) {
   7218 			DPRINTF(WM_DEBUG_TX,
   7219 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   7220 			    device_xname(sc->sc_dev)));
   7221 			if (!is_transmit)
   7222 				ifp->if_flags |= IFF_OACTIVE;
   7223 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7224 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7225 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
   7226 			break;
   7227 		}
   7228 
   7229 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7230 
   7231 		DPRINTF(WM_DEBUG_TX,
   7232 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7233 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7234 
   7235 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7236 
   7237 		/*
   7238 		 * Store a pointer to the packet so that we can free it
   7239 		 * later.
   7240 		 *
   7241 		 * Initially, we consider the number of descriptors the
   7242 		 * packet uses the number of DMA segments.  This may be
   7243 		 * incremented by 1 if we do checksum offload (a descriptor
   7244 		 * is used to set the checksum context).
   7245 		 */
   7246 		txs->txs_mbuf = m0;
   7247 		txs->txs_firstdesc = txq->txq_next;
   7248 		txs->txs_ndesc = segs_needed;
   7249 
   7250 		/* Set up offload parameters for this packet. */
   7251 		if (m0->m_pkthdr.csum_flags &
   7252 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7253 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7254 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7255 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   7256 					  &cksumfields) != 0) {
   7257 				/* Error message already displayed. */
   7258 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7259 				continue;
   7260 			}
   7261 		} else {
   7262 			cksumcmd = 0;
   7263 			cksumfields = 0;
   7264 		}
   7265 
   7266 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7267 
   7268 		/* Sync the DMA map. */
   7269 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7270 		    BUS_DMASYNC_PREWRITE);
   7271 
   7272 		/* Initialize the transmit descriptor. */
   7273 		for (nexttx = txq->txq_next, seg = 0;
   7274 		     seg < dmamap->dm_nsegs; seg++) {
   7275 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7276 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7277 			     seglen != 0;
   7278 			     curaddr += curlen, seglen -= curlen,
   7279 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7280 				curlen = seglen;
   7281 
   7282 				/*
   7283 				 * So says the Linux driver:
   7284 				 * Work around for premature descriptor
   7285 				 * write-backs in TSO mode.  Append a
   7286 				 * 4-byte sentinel descriptor.
   7287 				 */
   7288 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7289 				    curlen > 8)
   7290 					curlen -= 4;
   7291 
   7292 				wm_set_dma_addr(
   7293 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7294 				txq->txq_descs[nexttx].wtx_cmdlen
   7295 				    = htole32(cksumcmd | curlen);
   7296 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7297 				    = 0;
   7298 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7299 				    = cksumfields;
   7300 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7301 				lasttx = nexttx;
   7302 
   7303 				DPRINTF(WM_DEBUG_TX,
   7304 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7305 				     "len %#04zx\n",
   7306 				    device_xname(sc->sc_dev), nexttx,
   7307 				    (uint64_t)curaddr, curlen));
   7308 			}
   7309 		}
   7310 
   7311 		KASSERT(lasttx != -1);
   7312 
   7313 		/*
   7314 		 * Set up the command byte on the last descriptor of
   7315 		 * the packet.  If we're in the interrupt delay window,
   7316 		 * delay the interrupt.
   7317 		 */
   7318 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7319 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7320 
   7321 		/*
   7322 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7323 		 * up the descriptor to encapsulate the packet for us.
   7324 		 *
   7325 		 * This is only valid on the last descriptor of the packet.
   7326 		 */
   7327 		if (vlan_has_tag(m0)) {
   7328 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7329 			    htole32(WTX_CMD_VLE);
   7330 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7331 			    = htole16(vlan_get_tag(m0));
   7332 		}
   7333 
   7334 		txs->txs_lastdesc = lasttx;
   7335 
   7336 		DPRINTF(WM_DEBUG_TX,
   7337 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7338 		    device_xname(sc->sc_dev),
   7339 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7340 
   7341 		/* Sync the descriptors we're using. */
   7342 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7343 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7344 
   7345 		/* Give the packet to the chip. */
   7346 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7347 
   7348 		DPRINTF(WM_DEBUG_TX,
   7349 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7350 
   7351 		DPRINTF(WM_DEBUG_TX,
   7352 		    ("%s: TX: finished transmitting packet, job %d\n",
   7353 		    device_xname(sc->sc_dev), txq->txq_snext));
   7354 
   7355 		/* Advance the tx pointer. */
   7356 		txq->txq_free -= txs->txs_ndesc;
   7357 		txq->txq_next = nexttx;
   7358 
   7359 		txq->txq_sfree--;
   7360 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7361 
   7362 		/* Pass the packet to any BPF listeners. */
   7363 		bpf_mtap(ifp, m0);
   7364 	}
   7365 
   7366 	if (m0 != NULL) {
   7367 		if (!is_transmit)
   7368 			ifp->if_flags |= IFF_OACTIVE;
   7369 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7370 		WM_Q_EVCNT_INCR(txq, txdrop);
   7371 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7372 			__func__));
   7373 		m_freem(m0);
   7374 	}
   7375 
   7376 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7377 		/* No more slots; notify upper layer. */
   7378 		if (!is_transmit)
   7379 			ifp->if_flags |= IFF_OACTIVE;
   7380 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7381 	}
   7382 
   7383 	if (txq->txq_free != ofree) {
   7384 		/* Set a watchdog timer in case the chip flakes out. */
   7385 		ifp->if_timer = 5;
   7386 	}
   7387 }
   7388 
   7389 /*
   7390  * wm_nq_tx_offload:
   7391  *
   7392  *	Set up TCP/IP checksumming parameters for the
   7393  *	specified packet, for NEWQUEUE devices
   7394  */
   7395 static int
   7396 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7397     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7398 {
   7399 	struct mbuf *m0 = txs->txs_mbuf;
   7400 	uint32_t vl_len, mssidx, cmdc;
   7401 	struct ether_header *eh;
   7402 	int offset, iphl;
   7403 
   7404 	/*
   7405 	 * XXX It would be nice if the mbuf pkthdr had offset
   7406 	 * fields for the protocol headers.
   7407 	 */
   7408 	*cmdlenp = 0;
   7409 	*fieldsp = 0;
   7410 
   7411 	eh = mtod(m0, struct ether_header *);
   7412 	switch (htons(eh->ether_type)) {
   7413 	case ETHERTYPE_IP:
   7414 	case ETHERTYPE_IPV6:
   7415 		offset = ETHER_HDR_LEN;
   7416 		break;
   7417 
   7418 	case ETHERTYPE_VLAN:
   7419 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7420 		break;
   7421 
   7422 	default:
   7423 		/* Don't support this protocol or encapsulation. */
   7424 		*do_csum = false;
   7425 		return 0;
   7426 	}
   7427 	*do_csum = true;
   7428 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7429 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7430 
   7431 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7432 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7433 
   7434 	if ((m0->m_pkthdr.csum_flags &
   7435 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7436 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7437 	} else {
   7438 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   7439 	}
   7440 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7441 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7442 
   7443 	if (vlan_has_tag(m0)) {
   7444 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   7445 		     << NQTXC_VLLEN_VLAN_SHIFT);
   7446 		*cmdlenp |= NQTX_CMD_VLE;
   7447 	}
   7448 
   7449 	mssidx = 0;
   7450 
   7451 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7452 		int hlen = offset + iphl;
   7453 		int tcp_hlen;
   7454 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7455 
   7456 		if (__predict_false(m0->m_len <
   7457 				    (hlen + sizeof(struct tcphdr)))) {
   7458 			/*
   7459 			 * TCP/IP headers are not in the first mbuf; we need
   7460 			 * to do this the slow and painful way.  Let's just
   7461 			 * hope this doesn't happen very often.
   7462 			 */
   7463 			struct tcphdr th;
   7464 
   7465 			WM_Q_EVCNT_INCR(txq, txtsopain);
   7466 
   7467 			m_copydata(m0, hlen, sizeof(th), &th);
   7468 			if (v4) {
   7469 				struct ip ip;
   7470 
   7471 				m_copydata(m0, offset, sizeof(ip), &ip);
   7472 				ip.ip_len = 0;
   7473 				m_copyback(m0,
   7474 				    offset + offsetof(struct ip, ip_len),
   7475 				    sizeof(ip.ip_len), &ip.ip_len);
   7476 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7477 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7478 			} else {
   7479 				struct ip6_hdr ip6;
   7480 
   7481 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7482 				ip6.ip6_plen = 0;
   7483 				m_copyback(m0,
   7484 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7485 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7486 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7487 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7488 			}
   7489 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7490 			    sizeof(th.th_sum), &th.th_sum);
   7491 
   7492 			tcp_hlen = th.th_off << 2;
   7493 		} else {
   7494 			/*
   7495 			 * TCP/IP headers are in the first mbuf; we can do
   7496 			 * this the easy way.
   7497 			 */
   7498 			struct tcphdr *th;
   7499 
   7500 			if (v4) {
   7501 				struct ip *ip =
   7502 				    (void *)(mtod(m0, char *) + offset);
   7503 				th = (void *)(mtod(m0, char *) + hlen);
   7504 
   7505 				ip->ip_len = 0;
   7506 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7507 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7508 			} else {
   7509 				struct ip6_hdr *ip6 =
   7510 				    (void *)(mtod(m0, char *) + offset);
   7511 				th = (void *)(mtod(m0, char *) + hlen);
   7512 
   7513 				ip6->ip6_plen = 0;
   7514 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7515 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7516 			}
   7517 			tcp_hlen = th->th_off << 2;
   7518 		}
   7519 		hlen += tcp_hlen;
   7520 		*cmdlenp |= NQTX_CMD_TSE;
   7521 
   7522 		if (v4) {
   7523 			WM_Q_EVCNT_INCR(txq, txtso);
   7524 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7525 		} else {
   7526 			WM_Q_EVCNT_INCR(txq, txtso6);
   7527 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7528 		}
   7529 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7530 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7531 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7532 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7533 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7534 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7535 	} else {
   7536 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7537 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7538 	}
   7539 
   7540 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7541 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7542 		cmdc |= NQTXC_CMD_IP4;
   7543 	}
   7544 
   7545 	if (m0->m_pkthdr.csum_flags &
   7546 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7547 		WM_Q_EVCNT_INCR(txq, txtusum);
   7548 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7549 			cmdc |= NQTXC_CMD_TCP;
   7550 		} else {
   7551 			cmdc |= NQTXC_CMD_UDP;
   7552 		}
   7553 		cmdc |= NQTXC_CMD_IP4;
   7554 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7555 	}
   7556 	if (m0->m_pkthdr.csum_flags &
   7557 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7558 		WM_Q_EVCNT_INCR(txq, txtusum6);
   7559 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7560 			cmdc |= NQTXC_CMD_TCP;
   7561 		} else {
   7562 			cmdc |= NQTXC_CMD_UDP;
   7563 		}
   7564 		cmdc |= NQTXC_CMD_IP6;
   7565 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7566 	}
   7567 
   7568 	/*
   7569 	 * We don't have to write context descriptor for every packet to
   7570 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   7571 	 * I210 and I211. It is enough to write once per a Tx queue for these
   7572 	 * controllers.
   7573 	 * It would be overhead to write context descriptor for every packet,
   7574 	 * however it does not cause problems.
   7575 	 */
   7576 	/* Fill in the context descriptor. */
   7577 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   7578 	    htole32(vl_len);
   7579 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   7580 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   7581 	    htole32(cmdc);
   7582 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   7583 	    htole32(mssidx);
   7584 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7585 	DPRINTF(WM_DEBUG_TX,
   7586 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   7587 	    txq->txq_next, 0, vl_len));
   7588 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   7589 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7590 	txs->txs_ndesc++;
   7591 	return 0;
   7592 }
   7593 
   7594 /*
   7595  * wm_nq_start:		[ifnet interface function]
   7596  *
   7597  *	Start packet transmission on the interface for NEWQUEUE devices
   7598  */
   7599 static void
   7600 wm_nq_start(struct ifnet *ifp)
   7601 {
   7602 	struct wm_softc *sc = ifp->if_softc;
   7603 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7604 
   7605 #ifdef WM_MPSAFE
   7606 	KASSERT(if_is_mpsafe(ifp));
   7607 #endif
   7608 	/*
   7609 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7610 	 */
   7611 
   7612 	mutex_enter(txq->txq_lock);
   7613 	if (!txq->txq_stopping)
   7614 		wm_nq_start_locked(ifp);
   7615 	mutex_exit(txq->txq_lock);
   7616 }
   7617 
   7618 static void
   7619 wm_nq_start_locked(struct ifnet *ifp)
   7620 {
   7621 	struct wm_softc *sc = ifp->if_softc;
   7622 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7623 
   7624 	wm_nq_send_common_locked(ifp, txq, false);
   7625 }
   7626 
   7627 static int
   7628 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   7629 {
   7630 	int qid;
   7631 	struct wm_softc *sc = ifp->if_softc;
   7632 	struct wm_txqueue *txq;
   7633 
   7634 	qid = wm_select_txqueue(ifp, m);
   7635 	txq = &sc->sc_queue[qid].wmq_txq;
   7636 
   7637 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7638 		m_freem(m);
   7639 		WM_Q_EVCNT_INCR(txq, txdrop);
   7640 		return ENOBUFS;
   7641 	}
   7642 
   7643 	/*
   7644 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7645 	 */
   7646 	ifp->if_obytes += m->m_pkthdr.len;
   7647 	if (m->m_flags & M_MCAST)
   7648 		ifp->if_omcasts++;
   7649 
   7650 	/*
   7651 	 * The situations which this mutex_tryenter() fails at running time
   7652 	 * are below two patterns.
   7653 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   7654 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   7655 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   7656 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   7657 	 * In the case of (2), the last packet enqueued to txq->txq_interq is also
   7658 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck, either.
   7659 	 */
   7660 	if (mutex_tryenter(txq->txq_lock)) {
   7661 		if (!txq->txq_stopping)
   7662 			wm_nq_transmit_locked(ifp, txq);
   7663 		mutex_exit(txq->txq_lock);
   7664 	}
   7665 
   7666 	return 0;
   7667 }
   7668 
   7669 static void
   7670 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7671 {
   7672 
   7673 	wm_nq_send_common_locked(ifp, txq, true);
   7674 }
   7675 
   7676 static void
   7677 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7678     bool is_transmit)
   7679 {
   7680 	struct wm_softc *sc = ifp->if_softc;
   7681 	struct mbuf *m0;
   7682 	struct wm_txsoft *txs;
   7683 	bus_dmamap_t dmamap;
   7684 	int error, nexttx, lasttx = -1, seg, segs_needed;
   7685 	bool do_csum, sent;
   7686 
   7687 	KASSERT(mutex_owned(txq->txq_lock));
   7688 
   7689 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7690 		return;
   7691 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7692 		return;
   7693 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7694 		return;
   7695 
   7696 	sent = false;
   7697 
   7698 	/*
   7699 	 * Loop through the send queue, setting up transmit descriptors
   7700 	 * until we drain the queue, or use up all available transmit
   7701 	 * descriptors.
   7702 	 */
   7703 	for (;;) {
   7704 		m0 = NULL;
   7705 
   7706 		/* Get a work queue entry. */
   7707 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7708 			wm_txeof(sc, txq);
   7709 			if (txq->txq_sfree == 0) {
   7710 				DPRINTF(WM_DEBUG_TX,
   7711 				    ("%s: TX: no free job descriptors\n",
   7712 					device_xname(sc->sc_dev)));
   7713 				WM_Q_EVCNT_INCR(txq, txsstall);
   7714 				break;
   7715 			}
   7716 		}
   7717 
   7718 		/* Grab a packet off the queue. */
   7719 		if (is_transmit)
   7720 			m0 = pcq_get(txq->txq_interq);
   7721 		else
   7722 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7723 		if (m0 == NULL)
   7724 			break;
   7725 
   7726 		DPRINTF(WM_DEBUG_TX,
   7727 		    ("%s: TX: have packet to transmit: %p\n",
   7728 		    device_xname(sc->sc_dev), m0));
   7729 
   7730 		txs = &txq->txq_soft[txq->txq_snext];
   7731 		dmamap = txs->txs_dmamap;
   7732 
   7733 		/*
   7734 		 * Load the DMA map.  If this fails, the packet either
   7735 		 * didn't fit in the allotted number of segments, or we
   7736 		 * were short on resources.  For the too-many-segments
   7737 		 * case, we simply report an error and drop the packet,
   7738 		 * since we can't sanely copy a jumbo packet to a single
   7739 		 * buffer.
   7740 		 */
   7741 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7742 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7743 		if (error) {
   7744 			if (error == EFBIG) {
   7745 				WM_Q_EVCNT_INCR(txq, txdrop);
   7746 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7747 				    "DMA segments, dropping...\n",
   7748 				    device_xname(sc->sc_dev));
   7749 				wm_dump_mbuf_chain(sc, m0);
   7750 				m_freem(m0);
   7751 				continue;
   7752 			}
   7753 			/* Short on resources, just stop for now. */
   7754 			DPRINTF(WM_DEBUG_TX,
   7755 			    ("%s: TX: dmamap load failed: %d\n",
   7756 			    device_xname(sc->sc_dev), error));
   7757 			break;
   7758 		}
   7759 
   7760 		segs_needed = dmamap->dm_nsegs;
   7761 
   7762 		/*
   7763 		 * Ensure we have enough descriptors free to describe
   7764 		 * the packet.  Note, we always reserve one descriptor
   7765 		 * at the end of the ring due to the semantics of the
   7766 		 * TDT register, plus one more in the event we need
   7767 		 * to load offload context.
   7768 		 */
   7769 		if (segs_needed > txq->txq_free - 2) {
   7770 			/*
   7771 			 * Not enough free descriptors to transmit this
   7772 			 * packet.  We haven't committed anything yet,
   7773 			 * so just unload the DMA map, put the packet
   7774 			 * pack on the queue, and punt.  Notify the upper
   7775 			 * layer that there are no more slots left.
   7776 			 */
   7777 			DPRINTF(WM_DEBUG_TX,
   7778 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7779 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7780 			    segs_needed, txq->txq_free - 1));
   7781 			if (!is_transmit)
   7782 				ifp->if_flags |= IFF_OACTIVE;
   7783 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7784 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7785 			WM_Q_EVCNT_INCR(txq, txdstall);
   7786 			break;
   7787 		}
   7788 
   7789 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7790 
   7791 		DPRINTF(WM_DEBUG_TX,
   7792 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7793 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7794 
   7795 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7796 
   7797 		/*
   7798 		 * Store a pointer to the packet so that we can free it
   7799 		 * later.
   7800 		 *
   7801 		 * Initially, we consider the number of descriptors the
   7802 		 * packet uses the number of DMA segments.  This may be
   7803 		 * incremented by 1 if we do checksum offload (a descriptor
   7804 		 * is used to set the checksum context).
   7805 		 */
   7806 		txs->txs_mbuf = m0;
   7807 		txs->txs_firstdesc = txq->txq_next;
   7808 		txs->txs_ndesc = segs_needed;
   7809 
   7810 		/* Set up offload parameters for this packet. */
   7811 		uint32_t cmdlen, fields, dcmdlen;
   7812 		if (m0->m_pkthdr.csum_flags &
   7813 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7814 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7815 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7816 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   7817 			    &do_csum) != 0) {
   7818 				/* Error message already displayed. */
   7819 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7820 				continue;
   7821 			}
   7822 		} else {
   7823 			do_csum = false;
   7824 			cmdlen = 0;
   7825 			fields = 0;
   7826 		}
   7827 
   7828 		/* Sync the DMA map. */
   7829 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7830 		    BUS_DMASYNC_PREWRITE);
   7831 
   7832 		/* Initialize the first transmit descriptor. */
   7833 		nexttx = txq->txq_next;
   7834 		if (!do_csum) {
   7835 			/* setup a legacy descriptor */
   7836 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   7837 			    dmamap->dm_segs[0].ds_addr);
   7838 			txq->txq_descs[nexttx].wtx_cmdlen =
   7839 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   7840 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   7841 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   7842 			if (vlan_has_tag(m0)) {
   7843 				txq->txq_descs[nexttx].wtx_cmdlen |=
   7844 				    htole32(WTX_CMD_VLE);
   7845 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   7846 				    htole16(vlan_get_tag(m0));
   7847 			} else {
   7848 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7849 			}
   7850 			dcmdlen = 0;
   7851 		} else {
   7852 			/* setup an advanced data descriptor */
   7853 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7854 			    htole64(dmamap->dm_segs[0].ds_addr);
   7855 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   7856 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7857 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   7858 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   7859 			    htole32(fields);
   7860 			DPRINTF(WM_DEBUG_TX,
   7861 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   7862 			    device_xname(sc->sc_dev), nexttx,
   7863 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   7864 			DPRINTF(WM_DEBUG_TX,
   7865 			    ("\t 0x%08x%08x\n", fields,
   7866 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   7867 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   7868 		}
   7869 
   7870 		lasttx = nexttx;
   7871 		nexttx = WM_NEXTTX(txq, nexttx);
   7872 		/*
   7873 		 * fill in the next descriptors. legacy or adcanced format
   7874 		 * is the same here
   7875 		 */
   7876 		for (seg = 1; seg < dmamap->dm_nsegs;
   7877 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   7878 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7879 			    htole64(dmamap->dm_segs[seg].ds_addr);
   7880 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7881 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   7882 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   7883 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   7884 			lasttx = nexttx;
   7885 
   7886 			DPRINTF(WM_DEBUG_TX,
   7887 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   7888 			     "len %#04zx\n",
   7889 			    device_xname(sc->sc_dev), nexttx,
   7890 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   7891 			    dmamap->dm_segs[seg].ds_len));
   7892 		}
   7893 
   7894 		KASSERT(lasttx != -1);
   7895 
   7896 		/*
   7897 		 * Set up the command byte on the last descriptor of
   7898 		 * the packet.  If we're in the interrupt delay window,
   7899 		 * delay the interrupt.
   7900 		 */
   7901 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   7902 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   7903 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7904 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7905 
   7906 		txs->txs_lastdesc = lasttx;
   7907 
   7908 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7909 		    device_xname(sc->sc_dev),
   7910 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7911 
   7912 		/* Sync the descriptors we're using. */
   7913 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7914 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7915 
   7916 		/* Give the packet to the chip. */
   7917 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7918 		sent = true;
   7919 
   7920 		DPRINTF(WM_DEBUG_TX,
   7921 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7922 
   7923 		DPRINTF(WM_DEBUG_TX,
   7924 		    ("%s: TX: finished transmitting packet, job %d\n",
   7925 		    device_xname(sc->sc_dev), txq->txq_snext));
   7926 
   7927 		/* Advance the tx pointer. */
   7928 		txq->txq_free -= txs->txs_ndesc;
   7929 		txq->txq_next = nexttx;
   7930 
   7931 		txq->txq_sfree--;
   7932 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7933 
   7934 		/* Pass the packet to any BPF listeners. */
   7935 		bpf_mtap(ifp, m0);
   7936 	}
   7937 
   7938 	if (m0 != NULL) {
   7939 		if (!is_transmit)
   7940 			ifp->if_flags |= IFF_OACTIVE;
   7941 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7942 		WM_Q_EVCNT_INCR(txq, txdrop);
   7943 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7944 			__func__));
   7945 		m_freem(m0);
   7946 	}
   7947 
   7948 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7949 		/* No more slots; notify upper layer. */
   7950 		if (!is_transmit)
   7951 			ifp->if_flags |= IFF_OACTIVE;
   7952 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7953 	}
   7954 
   7955 	if (sent) {
   7956 		/* Set a watchdog timer in case the chip flakes out. */
   7957 		ifp->if_timer = 5;
   7958 	}
   7959 }
   7960 
   7961 static void
   7962 wm_deferred_start_locked(struct wm_txqueue *txq)
   7963 {
   7964 	struct wm_softc *sc = txq->txq_sc;
   7965 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7966 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   7967 	int qid = wmq->wmq_id;
   7968 
   7969 	KASSERT(mutex_owned(txq->txq_lock));
   7970 
   7971 	if (txq->txq_stopping) {
   7972 		mutex_exit(txq->txq_lock);
   7973 		return;
   7974 	}
   7975 
   7976 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7977 		/* XXX need for ALTQ or one CPU system */
   7978 		if (qid == 0)
   7979 			wm_nq_start_locked(ifp);
   7980 		wm_nq_transmit_locked(ifp, txq);
   7981 	} else {
   7982 		/* XXX need for ALTQ or one CPU system */
   7983 		if (qid == 0)
   7984 			wm_start_locked(ifp);
   7985 		wm_transmit_locked(ifp, txq);
   7986 	}
   7987 }
   7988 
   7989 /* Interrupt */
   7990 
   7991 /*
   7992  * wm_txeof:
   7993  *
   7994  *	Helper; handle transmit interrupts.
   7995  */
   7996 static int
   7997 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
   7998 {
   7999 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8000 	struct wm_txsoft *txs;
   8001 	bool processed = false;
   8002 	int count = 0;
   8003 	int i;
   8004 	uint8_t status;
   8005 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8006 
   8007 	KASSERT(mutex_owned(txq->txq_lock));
   8008 
   8009 	if (txq->txq_stopping)
   8010 		return 0;
   8011 
   8012 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   8013 	/* for ALTQ and legacy(not use multiqueue) ethernet controller */
   8014 	if (wmq->wmq_id == 0)
   8015 		ifp->if_flags &= ~IFF_OACTIVE;
   8016 
   8017 	/*
   8018 	 * Go through the Tx list and free mbufs for those
   8019 	 * frames which have been transmitted.
   8020 	 */
   8021 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8022 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8023 		txs = &txq->txq_soft[i];
   8024 
   8025 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8026 			device_xname(sc->sc_dev), i));
   8027 
   8028 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8029 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8030 
   8031 		status =
   8032 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8033 		if ((status & WTX_ST_DD) == 0) {
   8034 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8035 			    BUS_DMASYNC_PREREAD);
   8036 			break;
   8037 		}
   8038 
   8039 		processed = true;
   8040 		count++;
   8041 		DPRINTF(WM_DEBUG_TX,
   8042 		    ("%s: TX: job %d done: descs %d..%d\n",
   8043 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8044 		    txs->txs_lastdesc));
   8045 
   8046 		/*
   8047 		 * XXX We should probably be using the statistics
   8048 		 * XXX registers, but I don't know if they exist
   8049 		 * XXX on chips before the i82544.
   8050 		 */
   8051 
   8052 #ifdef WM_EVENT_COUNTERS
   8053 		if (status & WTX_ST_TU)
   8054 			WM_Q_EVCNT_INCR(txq, tu);
   8055 #endif /* WM_EVENT_COUNTERS */
   8056 
   8057 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   8058 			ifp->if_oerrors++;
   8059 			if (status & WTX_ST_LC)
   8060 				log(LOG_WARNING, "%s: late collision\n",
   8061 				    device_xname(sc->sc_dev));
   8062 			else if (status & WTX_ST_EC) {
   8063 				ifp->if_collisions += 16;
   8064 				log(LOG_WARNING, "%s: excessive collisions\n",
   8065 				    device_xname(sc->sc_dev));
   8066 			}
   8067 		} else
   8068 			ifp->if_opackets++;
   8069 
   8070 		txq->txq_packets++;
   8071 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8072 
   8073 		txq->txq_free += txs->txs_ndesc;
   8074 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8075 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8076 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8077 		m_freem(txs->txs_mbuf);
   8078 		txs->txs_mbuf = NULL;
   8079 	}
   8080 
   8081 	/* Update the dirty transmit buffer pointer. */
   8082 	txq->txq_sdirty = i;
   8083 	DPRINTF(WM_DEBUG_TX,
   8084 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8085 
   8086 	if (count != 0)
   8087 		rnd_add_uint32(&sc->rnd_source, count);
   8088 
   8089 	/*
   8090 	 * If there are no more pending transmissions, cancel the watchdog
   8091 	 * timer.
   8092 	 */
   8093 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8094 		ifp->if_timer = 0;
   8095 
   8096 	return processed;
   8097 }
   8098 
   8099 static inline uint32_t
   8100 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8101 {
   8102 	struct wm_softc *sc = rxq->rxq_sc;
   8103 
   8104 	if (sc->sc_type == WM_T_82574)
   8105 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8106 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8107 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8108 	else
   8109 		return rxq->rxq_descs[idx].wrx_status;
   8110 }
   8111 
   8112 static inline uint32_t
   8113 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8114 {
   8115 	struct wm_softc *sc = rxq->rxq_sc;
   8116 
   8117 	if (sc->sc_type == WM_T_82574)
   8118 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8119 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8120 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8121 	else
   8122 		return rxq->rxq_descs[idx].wrx_errors;
   8123 }
   8124 
   8125 static inline uint16_t
   8126 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8127 {
   8128 	struct wm_softc *sc = rxq->rxq_sc;
   8129 
   8130 	if (sc->sc_type == WM_T_82574)
   8131 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8132 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8133 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8134 	else
   8135 		return rxq->rxq_descs[idx].wrx_special;
   8136 }
   8137 
   8138 static inline int
   8139 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8140 {
   8141 	struct wm_softc *sc = rxq->rxq_sc;
   8142 
   8143 	if (sc->sc_type == WM_T_82574)
   8144 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8145 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8146 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   8147 	else
   8148 		return rxq->rxq_descs[idx].wrx_len;
   8149 }
   8150 
   8151 #ifdef WM_DEBUG
   8152 static inline uint32_t
   8153 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   8154 {
   8155 	struct wm_softc *sc = rxq->rxq_sc;
   8156 
   8157 	if (sc->sc_type == WM_T_82574)
   8158 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   8159 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8160 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   8161 	else
   8162 		return 0;
   8163 }
   8164 
   8165 static inline uint8_t
   8166 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   8167 {
   8168 	struct wm_softc *sc = rxq->rxq_sc;
   8169 
   8170 	if (sc->sc_type == WM_T_82574)
   8171 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   8172 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8173 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   8174 	else
   8175 		return 0;
   8176 }
   8177 #endif /* WM_DEBUG */
   8178 
   8179 static inline bool
   8180 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   8181     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8182 {
   8183 
   8184 	if (sc->sc_type == WM_T_82574)
   8185 		return (status & ext_bit) != 0;
   8186 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8187 		return (status & nq_bit) != 0;
   8188 	else
   8189 		return (status & legacy_bit) != 0;
   8190 }
   8191 
   8192 static inline bool
   8193 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   8194     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8195 {
   8196 
   8197 	if (sc->sc_type == WM_T_82574)
   8198 		return (error & ext_bit) != 0;
   8199 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8200 		return (error & nq_bit) != 0;
   8201 	else
   8202 		return (error & legacy_bit) != 0;
   8203 }
   8204 
   8205 static inline bool
   8206 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   8207 {
   8208 
   8209 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8210 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   8211 		return true;
   8212 	else
   8213 		return false;
   8214 }
   8215 
   8216 static inline bool
   8217 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   8218 {
   8219 	struct wm_softc *sc = rxq->rxq_sc;
   8220 
   8221 	/* XXXX missing error bit for newqueue? */
   8222 	if (wm_rxdesc_is_set_error(sc, errors,
   8223 		WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE,
   8224 		EXTRXC_ERROR_CE|EXTRXC_ERROR_SE|EXTRXC_ERROR_SEQ|EXTRXC_ERROR_CXE|EXTRXC_ERROR_RXE,
   8225 		NQRXC_ERROR_RXE)) {
   8226 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE, EXTRXC_ERROR_SE, 0))
   8227 			log(LOG_WARNING, "%s: symbol error\n",
   8228 			    device_xname(sc->sc_dev));
   8229 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ, EXTRXC_ERROR_SEQ, 0))
   8230 			log(LOG_WARNING, "%s: receive sequence error\n",
   8231 			    device_xname(sc->sc_dev));
   8232 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE, EXTRXC_ERROR_CE, 0))
   8233 			log(LOG_WARNING, "%s: CRC error\n",
   8234 			    device_xname(sc->sc_dev));
   8235 		return true;
   8236 	}
   8237 
   8238 	return false;
   8239 }
   8240 
   8241 static inline bool
   8242 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8243 {
   8244 	struct wm_softc *sc = rxq->rxq_sc;
   8245 
   8246 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8247 		NQRXC_STATUS_DD)) {
   8248 		/* We have processed all of the receive descriptors. */
   8249 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8250 		return false;
   8251 	}
   8252 
   8253 	return true;
   8254 }
   8255 
   8256 static inline bool
   8257 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status, uint16_t vlantag,
   8258     struct mbuf *m)
   8259 {
   8260 
   8261 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8262 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8263 		vlan_set_tag(m, le16toh(vlantag));
   8264 	}
   8265 
   8266 	return true;
   8267 }
   8268 
   8269 static inline void
   8270 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8271     uint32_t errors, struct mbuf *m)
   8272 {
   8273 	struct wm_softc *sc = rxq->rxq_sc;
   8274 
   8275 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8276 		if (wm_rxdesc_is_set_status(sc, status,
   8277 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8278 			WM_Q_EVCNT_INCR(rxq, rxipsum);
   8279 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8280 			if (wm_rxdesc_is_set_error(sc, errors,
   8281 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8282 				m->m_pkthdr.csum_flags |=
   8283 					M_CSUM_IPv4_BAD;
   8284 		}
   8285 		if (wm_rxdesc_is_set_status(sc, status,
   8286 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8287 			/*
   8288 			 * Note: we don't know if this was TCP or UDP,
   8289 			 * so we just set both bits, and expect the
   8290 			 * upper layers to deal.
   8291 			 */
   8292 			WM_Q_EVCNT_INCR(rxq, rxtusum);
   8293 			m->m_pkthdr.csum_flags |=
   8294 				M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8295 				M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8296 			if (wm_rxdesc_is_set_error(sc, errors,
   8297 				WRX_ER_TCPE, EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8298 				m->m_pkthdr.csum_flags |=
   8299 					M_CSUM_TCP_UDP_BAD;
   8300 		}
   8301 	}
   8302 }
   8303 
   8304 /*
   8305  * wm_rxeof:
   8306  *
   8307  *	Helper; handle receive interrupts.
   8308  */
   8309 static void
   8310 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8311 {
   8312 	struct wm_softc *sc = rxq->rxq_sc;
   8313 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8314 	struct wm_rxsoft *rxs;
   8315 	struct mbuf *m;
   8316 	int i, len;
   8317 	int count = 0;
   8318 	uint32_t status, errors;
   8319 	uint16_t vlantag;
   8320 
   8321 	KASSERT(mutex_owned(rxq->rxq_lock));
   8322 
   8323 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8324 		if (limit-- == 0) {
   8325 			rxq->rxq_ptr = i;
   8326 			break;
   8327 		}
   8328 
   8329 		rxs = &rxq->rxq_soft[i];
   8330 
   8331 		DPRINTF(WM_DEBUG_RX,
   8332 		    ("%s: RX: checking descriptor %d\n",
   8333 		    device_xname(sc->sc_dev), i));
   8334 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   8335 
   8336 		status = wm_rxdesc_get_status(rxq, i);
   8337 		errors = wm_rxdesc_get_errors(rxq, i);
   8338 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8339 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8340 #ifdef WM_DEBUG
   8341 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8342 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8343 #endif
   8344 
   8345 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8346 			/*
   8347 			 * Update the receive pointer holding rxq_lock
   8348 			 * consistent with increment counter.
   8349 			 */
   8350 			rxq->rxq_ptr = i;
   8351 			break;
   8352 		}
   8353 
   8354 		count++;
   8355 		if (__predict_false(rxq->rxq_discard)) {
   8356 			DPRINTF(WM_DEBUG_RX,
   8357 			    ("%s: RX: discarding contents of descriptor %d\n",
   8358 			    device_xname(sc->sc_dev), i));
   8359 			wm_init_rxdesc(rxq, i);
   8360 			if (wm_rxdesc_is_eop(rxq, status)) {
   8361 				/* Reset our state. */
   8362 				DPRINTF(WM_DEBUG_RX,
   8363 				    ("%s: RX: resetting rxdiscard -> 0\n",
   8364 				    device_xname(sc->sc_dev)));
   8365 				rxq->rxq_discard = 0;
   8366 			}
   8367 			continue;
   8368 		}
   8369 
   8370 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8371 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   8372 
   8373 		m = rxs->rxs_mbuf;
   8374 
   8375 		/*
   8376 		 * Add a new receive buffer to the ring, unless of
   8377 		 * course the length is zero. Treat the latter as a
   8378 		 * failed mapping.
   8379 		 */
   8380 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   8381 			/*
   8382 			 * Failed, throw away what we've done so
   8383 			 * far, and discard the rest of the packet.
   8384 			 */
   8385 			ifp->if_ierrors++;
   8386 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8387 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   8388 			wm_init_rxdesc(rxq, i);
   8389 			if (!wm_rxdesc_is_eop(rxq, status))
   8390 				rxq->rxq_discard = 1;
   8391 			if (rxq->rxq_head != NULL)
   8392 				m_freem(rxq->rxq_head);
   8393 			WM_RXCHAIN_RESET(rxq);
   8394 			DPRINTF(WM_DEBUG_RX,
   8395 			    ("%s: RX: Rx buffer allocation failed, "
   8396 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8397 			    rxq->rxq_discard ? " (discard)" : ""));
   8398 			continue;
   8399 		}
   8400 
   8401 		m->m_len = len;
   8402 		rxq->rxq_len += len;
   8403 		DPRINTF(WM_DEBUG_RX,
   8404 		    ("%s: RX: buffer at %p len %d\n",
   8405 		    device_xname(sc->sc_dev), m->m_data, len));
   8406 
   8407 		/* If this is not the end of the packet, keep looking. */
   8408 		if (!wm_rxdesc_is_eop(rxq, status)) {
   8409 			WM_RXCHAIN_LINK(rxq, m);
   8410 			DPRINTF(WM_DEBUG_RX,
   8411 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   8412 			    device_xname(sc->sc_dev), rxq->rxq_len));
   8413 			continue;
   8414 		}
   8415 
   8416 		/*
   8417 		 * Okay, we have the entire packet now.  The chip is
   8418 		 * configured to include the FCS except I350 and I21[01]
   8419 		 * (not all chips can be configured to strip it),
   8420 		 * so we need to trim it.
   8421 		 * May need to adjust length of previous mbuf in the
   8422 		 * chain if the current mbuf is too short.
   8423 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   8424 		 * is always set in I350, so we don't trim it.
   8425 		 */
   8426 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   8427 		    && (sc->sc_type != WM_T_I210)
   8428 		    && (sc->sc_type != WM_T_I211)) {
   8429 			if (m->m_len < ETHER_CRC_LEN) {
   8430 				rxq->rxq_tail->m_len
   8431 				    -= (ETHER_CRC_LEN - m->m_len);
   8432 				m->m_len = 0;
   8433 			} else
   8434 				m->m_len -= ETHER_CRC_LEN;
   8435 			len = rxq->rxq_len - ETHER_CRC_LEN;
   8436 		} else
   8437 			len = rxq->rxq_len;
   8438 
   8439 		WM_RXCHAIN_LINK(rxq, m);
   8440 
   8441 		*rxq->rxq_tailp = NULL;
   8442 		m = rxq->rxq_head;
   8443 
   8444 		WM_RXCHAIN_RESET(rxq);
   8445 
   8446 		DPRINTF(WM_DEBUG_RX,
   8447 		    ("%s: RX: have entire packet, len -> %d\n",
   8448 		    device_xname(sc->sc_dev), len));
   8449 
   8450 		/* If an error occurred, update stats and drop the packet. */
   8451 		if (wm_rxdesc_has_errors(rxq, errors)) {
   8452 			m_freem(m);
   8453 			continue;
   8454 		}
   8455 
   8456 		/* No errors.  Receive the packet. */
   8457 		m_set_rcvif(m, ifp);
   8458 		m->m_pkthdr.len = len;
   8459 		/*
   8460 		 * TODO
   8461 		 * should be save rsshash and rsstype to this mbuf.
   8462 		 */
   8463 		DPRINTF(WM_DEBUG_RX,
   8464 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   8465 			device_xname(sc->sc_dev), rsstype, rsshash));
   8466 
   8467 		/*
   8468 		 * If VLANs are enabled, VLAN packets have been unwrapped
   8469 		 * for us.  Associate the tag with the packet.
   8470 		 */
   8471 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   8472 			continue;
   8473 
   8474 		/* Set up checksum info for this packet. */
   8475 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   8476 		/*
   8477 		 * Update the receive pointer holding rxq_lock consistent with
   8478 		 * increment counter.
   8479 		 */
   8480 		rxq->rxq_ptr = i;
   8481 		rxq->rxq_packets++;
   8482 		rxq->rxq_bytes += len;
   8483 		mutex_exit(rxq->rxq_lock);
   8484 
   8485 		/* Pass it on. */
   8486 		if_percpuq_enqueue(sc->sc_ipq, m);
   8487 
   8488 		mutex_enter(rxq->rxq_lock);
   8489 
   8490 		if (rxq->rxq_stopping)
   8491 			break;
   8492 	}
   8493 
   8494 	if (count != 0)
   8495 		rnd_add_uint32(&sc->rnd_source, count);
   8496 
   8497 	DPRINTF(WM_DEBUG_RX,
   8498 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8499 }
   8500 
   8501 /*
   8502  * wm_linkintr_gmii:
   8503  *
   8504  *	Helper; handle link interrupts for GMII.
   8505  */
   8506 static void
   8507 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8508 {
   8509 
   8510 	KASSERT(WM_CORE_LOCKED(sc));
   8511 
   8512 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8513 		__func__));
   8514 
   8515 	if (icr & ICR_LSC) {
   8516 		uint32_t reg;
   8517 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   8518 
   8519 		if ((status & STATUS_LU) != 0) {
   8520 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8521 				device_xname(sc->sc_dev),
   8522 				(status & STATUS_FD) ? "FDX" : "HDX"));
   8523 		} else {
   8524 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8525 				device_xname(sc->sc_dev)));
   8526 		}
   8527 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   8528 			wm_gig_downshift_workaround_ich8lan(sc);
   8529 
   8530 		if ((sc->sc_type == WM_T_ICH8)
   8531 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   8532 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   8533 		}
   8534 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   8535 			device_xname(sc->sc_dev)));
   8536 		mii_pollstat(&sc->sc_mii);
   8537 		if (sc->sc_type == WM_T_82543) {
   8538 			int miistatus, active;
   8539 
   8540 			/*
   8541 			 * With 82543, we need to force speed and
   8542 			 * duplex on the MAC equal to what the PHY
   8543 			 * speed and duplex configuration is.
   8544 			 */
   8545 			miistatus = sc->sc_mii.mii_media_status;
   8546 
   8547 			if (miistatus & IFM_ACTIVE) {
   8548 				active = sc->sc_mii.mii_media_active;
   8549 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8550 				switch (IFM_SUBTYPE(active)) {
   8551 				case IFM_10_T:
   8552 					sc->sc_ctrl |= CTRL_SPEED_10;
   8553 					break;
   8554 				case IFM_100_TX:
   8555 					sc->sc_ctrl |= CTRL_SPEED_100;
   8556 					break;
   8557 				case IFM_1000_T:
   8558 					sc->sc_ctrl |= CTRL_SPEED_1000;
   8559 					break;
   8560 				default:
   8561 					/*
   8562 					 * fiber?
   8563 					 * Shoud not enter here.
   8564 					 */
   8565 					printf("unknown media (%x)\n", active);
   8566 					break;
   8567 				}
   8568 				if (active & IFM_FDX)
   8569 					sc->sc_ctrl |= CTRL_FD;
   8570 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8571 			}
   8572 		} else if (sc->sc_type == WM_T_PCH) {
   8573 			wm_k1_gig_workaround_hv(sc,
   8574 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   8575 		}
   8576 
   8577 		if ((sc->sc_phytype == WMPHY_82578)
   8578 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   8579 			== IFM_1000_T)) {
   8580 
   8581 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   8582 				delay(200*1000); /* XXX too big */
   8583 
   8584 				/* Link stall fix for link up */
   8585 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8586 				    HV_MUX_DATA_CTRL,
   8587 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   8588 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   8589 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8590 				    HV_MUX_DATA_CTRL,
   8591 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   8592 			}
   8593 		}
   8594 		/*
   8595 		 * I217 Packet Loss issue:
   8596 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
   8597 		 * on power up.
   8598 		 * Set the Beacon Duration for I217 to 8 usec
   8599 		 */
   8600 		if ((sc->sc_type == WM_T_PCH_LPT)
   8601 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8602 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   8603 			reg &= ~FEXTNVM4_BEACON_DURATION;
   8604 			reg |= FEXTNVM4_BEACON_DURATION_8US;
   8605 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   8606 		}
   8607 
   8608 		/* XXX Work-around I218 hang issue */
   8609 		/* e1000_k1_workaround_lpt_lp() */
   8610 
   8611 		if ((sc->sc_type == WM_T_PCH_LPT)
   8612 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8613 			/*
   8614 			 * Set platform power management values for Latency
   8615 			 * Tolerance Reporting (LTR)
   8616 			 */
   8617 			wm_platform_pm_pch_lpt(sc,
   8618 				((sc->sc_mii.mii_media_status & IFM_ACTIVE)
   8619 				    != 0));
   8620 		}
   8621 
   8622 		/* FEXTNVM6 K1-off workaround */
   8623 		if (sc->sc_type == WM_T_PCH_SPT) {
   8624 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
   8625 			if (CSR_READ(sc, WMREG_PCIEANACFG)
   8626 			    & FEXTNVM6_K1_OFF_ENABLE)
   8627 				reg |= FEXTNVM6_K1_OFF_ENABLE;
   8628 			else
   8629 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   8630 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   8631 		}
   8632 	} else if (icr & ICR_RXSEQ) {
   8633 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   8634 			device_xname(sc->sc_dev)));
   8635 	}
   8636 }
   8637 
   8638 /*
   8639  * wm_linkintr_tbi:
   8640  *
   8641  *	Helper; handle link interrupts for TBI mode.
   8642  */
   8643 static void
   8644 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   8645 {
   8646 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8647 	uint32_t status;
   8648 
   8649 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8650 		__func__));
   8651 
   8652 	status = CSR_READ(sc, WMREG_STATUS);
   8653 	if (icr & ICR_LSC) {
   8654 		if (status & STATUS_LU) {
   8655 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8656 			    device_xname(sc->sc_dev),
   8657 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   8658 			/*
   8659 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   8660 			 * so we should update sc->sc_ctrl
   8661 			 */
   8662 
   8663 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   8664 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8665 			sc->sc_fcrtl &= ~FCRTL_XONE;
   8666 			if (status & STATUS_FD)
   8667 				sc->sc_tctl |=
   8668 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8669 			else
   8670 				sc->sc_tctl |=
   8671 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8672 			if (sc->sc_ctrl & CTRL_TFCE)
   8673 				sc->sc_fcrtl |= FCRTL_XONE;
   8674 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8675 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   8676 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   8677 				      sc->sc_fcrtl);
   8678 			sc->sc_tbi_linkup = 1;
   8679 			if_link_state_change(ifp, LINK_STATE_UP);
   8680 		} else {
   8681 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8682 			    device_xname(sc->sc_dev)));
   8683 			sc->sc_tbi_linkup = 0;
   8684 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8685 		}
   8686 		/* Update LED */
   8687 		wm_tbi_serdes_set_linkled(sc);
   8688 	} else if (icr & ICR_RXSEQ) {
   8689 		DPRINTF(WM_DEBUG_LINK,
   8690 		    ("%s: LINK: Receive sequence error\n",
   8691 		    device_xname(sc->sc_dev)));
   8692 	}
   8693 }
   8694 
   8695 /*
   8696  * wm_linkintr_serdes:
   8697  *
   8698  *	Helper; handle link interrupts for TBI mode.
   8699  */
   8700 static void
   8701 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   8702 {
   8703 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8704 	struct mii_data *mii = &sc->sc_mii;
   8705 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8706 	uint32_t pcs_adv, pcs_lpab, reg;
   8707 
   8708 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8709 		__func__));
   8710 
   8711 	if (icr & ICR_LSC) {
   8712 		/* Check PCS */
   8713 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8714 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   8715 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   8716 				device_xname(sc->sc_dev)));
   8717 			mii->mii_media_status |= IFM_ACTIVE;
   8718 			sc->sc_tbi_linkup = 1;
   8719 			if_link_state_change(ifp, LINK_STATE_UP);
   8720 		} else {
   8721 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8722 				device_xname(sc->sc_dev)));
   8723 			mii->mii_media_status |= IFM_NONE;
   8724 			sc->sc_tbi_linkup = 0;
   8725 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8726 			wm_tbi_serdes_set_linkled(sc);
   8727 			return;
   8728 		}
   8729 		mii->mii_media_active |= IFM_1000_SX;
   8730 		if ((reg & PCS_LSTS_FDX) != 0)
   8731 			mii->mii_media_active |= IFM_FDX;
   8732 		else
   8733 			mii->mii_media_active |= IFM_HDX;
   8734 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   8735 			/* Check flow */
   8736 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8737 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   8738 				DPRINTF(WM_DEBUG_LINK,
   8739 				    ("XXX LINKOK but not ACOMP\n"));
   8740 				return;
   8741 			}
   8742 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   8743 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   8744 			DPRINTF(WM_DEBUG_LINK,
   8745 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   8746 			if ((pcs_adv & TXCW_SYM_PAUSE)
   8747 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   8748 				mii->mii_media_active |= IFM_FLOW
   8749 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   8750 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   8751 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8752 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   8753 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8754 				mii->mii_media_active |= IFM_FLOW
   8755 				    | IFM_ETH_TXPAUSE;
   8756 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   8757 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8758 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   8759 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8760 				mii->mii_media_active |= IFM_FLOW
   8761 				    | IFM_ETH_RXPAUSE;
   8762 		}
   8763 		/* Update LED */
   8764 		wm_tbi_serdes_set_linkled(sc);
   8765 	} else {
   8766 		DPRINTF(WM_DEBUG_LINK,
   8767 		    ("%s: LINK: Receive sequence error\n",
   8768 		    device_xname(sc->sc_dev)));
   8769 	}
   8770 }
   8771 
   8772 /*
   8773  * wm_linkintr:
   8774  *
   8775  *	Helper; handle link interrupts.
   8776  */
   8777 static void
   8778 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   8779 {
   8780 
   8781 	KASSERT(WM_CORE_LOCKED(sc));
   8782 
   8783 	if (sc->sc_flags & WM_F_HAS_MII)
   8784 		wm_linkintr_gmii(sc, icr);
   8785 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   8786 	    && (sc->sc_type >= WM_T_82575))
   8787 		wm_linkintr_serdes(sc, icr);
   8788 	else
   8789 		wm_linkintr_tbi(sc, icr);
   8790 }
   8791 
   8792 /*
   8793  * wm_intr_legacy:
   8794  *
   8795  *	Interrupt service routine for INTx and MSI.
   8796  */
   8797 static int
   8798 wm_intr_legacy(void *arg)
   8799 {
   8800 	struct wm_softc *sc = arg;
   8801 	struct wm_queue *wmq = &sc->sc_queue[0];
   8802 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8803 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8804 	uint32_t icr, rndval = 0;
   8805 	int handled = 0;
   8806 
   8807 	while (1 /* CONSTCOND */) {
   8808 		icr = CSR_READ(sc, WMREG_ICR);
   8809 		if ((icr & sc->sc_icr) == 0)
   8810 			break;
   8811 		if (handled == 0) {
   8812 			DPRINTF(WM_DEBUG_TX,
   8813 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   8814 		}
   8815 		if (rndval == 0)
   8816 			rndval = icr;
   8817 
   8818 		mutex_enter(rxq->rxq_lock);
   8819 
   8820 		if (rxq->rxq_stopping) {
   8821 			mutex_exit(rxq->rxq_lock);
   8822 			break;
   8823 		}
   8824 
   8825 		handled = 1;
   8826 
   8827 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8828 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   8829 			DPRINTF(WM_DEBUG_RX,
   8830 			    ("%s: RX: got Rx intr 0x%08x\n",
   8831 			    device_xname(sc->sc_dev),
   8832 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
   8833 			WM_Q_EVCNT_INCR(rxq, rxintr);
   8834 		}
   8835 #endif
   8836 		/*
   8837 		 * wm_rxeof() does *not* call upper layer functions directly,
   8838 		 * as if_percpuq_enqueue() just call softint_schedule().
   8839 		 * So, we can call wm_rxeof() in interrupt context.
   8840 		 */
   8841 		wm_rxeof(rxq, UINT_MAX);
   8842 
   8843 		mutex_exit(rxq->rxq_lock);
   8844 		mutex_enter(txq->txq_lock);
   8845 
   8846 		if (txq->txq_stopping) {
   8847 			mutex_exit(txq->txq_lock);
   8848 			break;
   8849 		}
   8850 
   8851 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8852 		if (icr & ICR_TXDW) {
   8853 			DPRINTF(WM_DEBUG_TX,
   8854 			    ("%s: TX: got TXDW interrupt\n",
   8855 			    device_xname(sc->sc_dev)));
   8856 			WM_Q_EVCNT_INCR(txq, txdw);
   8857 		}
   8858 #endif
   8859 		wm_txeof(sc, txq);
   8860 
   8861 		mutex_exit(txq->txq_lock);
   8862 		WM_CORE_LOCK(sc);
   8863 
   8864 		if (sc->sc_core_stopping) {
   8865 			WM_CORE_UNLOCK(sc);
   8866 			break;
   8867 		}
   8868 
   8869 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   8870 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8871 			wm_linkintr(sc, icr);
   8872 		}
   8873 
   8874 		WM_CORE_UNLOCK(sc);
   8875 
   8876 		if (icr & ICR_RXO) {
   8877 #if defined(WM_DEBUG)
   8878 			log(LOG_WARNING, "%s: Receive overrun\n",
   8879 			    device_xname(sc->sc_dev));
   8880 #endif /* defined(WM_DEBUG) */
   8881 		}
   8882 	}
   8883 
   8884 	rnd_add_uint32(&sc->rnd_source, rndval);
   8885 
   8886 	if (handled) {
   8887 		/* Try to get more packets going. */
   8888 		softint_schedule(wmq->wmq_si);
   8889 	}
   8890 
   8891 	return handled;
   8892 }
   8893 
   8894 static inline void
   8895 wm_txrxintr_disable(struct wm_queue *wmq)
   8896 {
   8897 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8898 
   8899 	if (sc->sc_type == WM_T_82574)
   8900 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8901 	else if (sc->sc_type == WM_T_82575)
   8902 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8903 	else
   8904 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   8905 }
   8906 
   8907 static inline void
   8908 wm_txrxintr_enable(struct wm_queue *wmq)
   8909 {
   8910 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8911 
   8912 	wm_itrs_calculate(sc, wmq);
   8913 
   8914 	if (sc->sc_type == WM_T_82574)
   8915 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8916 	else if (sc->sc_type == WM_T_82575)
   8917 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8918 	else
   8919 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   8920 }
   8921 
   8922 static int
   8923 wm_txrxintr_msix(void *arg)
   8924 {
   8925 	struct wm_queue *wmq = arg;
   8926 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8927 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8928 	struct wm_softc *sc = txq->txq_sc;
   8929 	u_int limit = sc->sc_rx_intr_process_limit;
   8930 
   8931 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   8932 
   8933 	DPRINTF(WM_DEBUG_TX,
   8934 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   8935 
   8936 	wm_txrxintr_disable(wmq);
   8937 
   8938 	mutex_enter(txq->txq_lock);
   8939 
   8940 	if (txq->txq_stopping) {
   8941 		mutex_exit(txq->txq_lock);
   8942 		return 0;
   8943 	}
   8944 
   8945 	WM_Q_EVCNT_INCR(txq, txdw);
   8946 	wm_txeof(sc, txq);
   8947 	/* wm_deferred start() is done in wm_handle_queue(). */
   8948 	mutex_exit(txq->txq_lock);
   8949 
   8950 	DPRINTF(WM_DEBUG_RX,
   8951 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   8952 	mutex_enter(rxq->rxq_lock);
   8953 
   8954 	if (rxq->rxq_stopping) {
   8955 		mutex_exit(rxq->rxq_lock);
   8956 		return 0;
   8957 	}
   8958 
   8959 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8960 	wm_rxeof(rxq, limit);
   8961 	mutex_exit(rxq->rxq_lock);
   8962 
   8963 	wm_itrs_writereg(sc, wmq);
   8964 
   8965 	softint_schedule(wmq->wmq_si);
   8966 
   8967 	return 1;
   8968 }
   8969 
   8970 static void
   8971 wm_handle_queue(void *arg)
   8972 {
   8973 	struct wm_queue *wmq = arg;
   8974 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8975 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8976 	struct wm_softc *sc = txq->txq_sc;
   8977 	u_int limit = sc->sc_rx_process_limit;
   8978 
   8979 	mutex_enter(txq->txq_lock);
   8980 	if (txq->txq_stopping) {
   8981 		mutex_exit(txq->txq_lock);
   8982 		return;
   8983 	}
   8984 	wm_txeof(sc, txq);
   8985 	wm_deferred_start_locked(txq);
   8986 	mutex_exit(txq->txq_lock);
   8987 
   8988 	mutex_enter(rxq->rxq_lock);
   8989 	if (rxq->rxq_stopping) {
   8990 		mutex_exit(rxq->rxq_lock);
   8991 		return;
   8992 	}
   8993 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8994 	wm_rxeof(rxq, limit);
   8995 	mutex_exit(rxq->rxq_lock);
   8996 
   8997 	wm_txrxintr_enable(wmq);
   8998 }
   8999 
   9000 /*
   9001  * wm_linkintr_msix:
   9002  *
   9003  *	Interrupt service routine for link status change for MSI-X.
   9004  */
   9005 static int
   9006 wm_linkintr_msix(void *arg)
   9007 {
   9008 	struct wm_softc *sc = arg;
   9009 	uint32_t reg;
   9010 
   9011 	DPRINTF(WM_DEBUG_LINK,
   9012 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   9013 
   9014 	reg = CSR_READ(sc, WMREG_ICR);
   9015 	WM_CORE_LOCK(sc);
   9016 	if ((sc->sc_core_stopping) || ((reg & ICR_LSC) == 0))
   9017 		goto out;
   9018 
   9019 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9020 	wm_linkintr(sc, ICR_LSC);
   9021 
   9022 out:
   9023 	WM_CORE_UNLOCK(sc);
   9024 
   9025 	if (sc->sc_type == WM_T_82574)
   9026 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   9027 	else if (sc->sc_type == WM_T_82575)
   9028 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   9029 	else
   9030 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   9031 
   9032 	return 1;
   9033 }
   9034 
   9035 /*
   9036  * Media related.
   9037  * GMII, SGMII, TBI (and SERDES)
   9038  */
   9039 
   9040 /* Common */
   9041 
   9042 /*
   9043  * wm_tbi_serdes_set_linkled:
   9044  *
   9045  *	Update the link LED on TBI and SERDES devices.
   9046  */
   9047 static void
   9048 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   9049 {
   9050 
   9051 	if (sc->sc_tbi_linkup)
   9052 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   9053 	else
   9054 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   9055 
   9056 	/* 82540 or newer devices are active low */
   9057 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   9058 
   9059 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9060 }
   9061 
   9062 /* GMII related */
   9063 
   9064 /*
   9065  * wm_gmii_reset:
   9066  *
   9067  *	Reset the PHY.
   9068  */
   9069 static void
   9070 wm_gmii_reset(struct wm_softc *sc)
   9071 {
   9072 	uint32_t reg;
   9073 	int rv;
   9074 
   9075 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9076 		device_xname(sc->sc_dev), __func__));
   9077 
   9078 	rv = sc->phy.acquire(sc);
   9079 	if (rv != 0) {
   9080 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9081 		    __func__);
   9082 		return;
   9083 	}
   9084 
   9085 	switch (sc->sc_type) {
   9086 	case WM_T_82542_2_0:
   9087 	case WM_T_82542_2_1:
   9088 		/* null */
   9089 		break;
   9090 	case WM_T_82543:
   9091 		/*
   9092 		 * With 82543, we need to force speed and duplex on the MAC
   9093 		 * equal to what the PHY speed and duplex configuration is.
   9094 		 * In addition, we need to perform a hardware reset on the PHY
   9095 		 * to take it out of reset.
   9096 		 */
   9097 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9098 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9099 
   9100 		/* The PHY reset pin is active-low. */
   9101 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9102 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   9103 		    CTRL_EXT_SWDPIN(4));
   9104 		reg |= CTRL_EXT_SWDPIO(4);
   9105 
   9106 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9107 		CSR_WRITE_FLUSH(sc);
   9108 		delay(10*1000);
   9109 
   9110 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   9111 		CSR_WRITE_FLUSH(sc);
   9112 		delay(150);
   9113 #if 0
   9114 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   9115 #endif
   9116 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   9117 		break;
   9118 	case WM_T_82544:	/* reset 10000us */
   9119 	case WM_T_82540:
   9120 	case WM_T_82545:
   9121 	case WM_T_82545_3:
   9122 	case WM_T_82546:
   9123 	case WM_T_82546_3:
   9124 	case WM_T_82541:
   9125 	case WM_T_82541_2:
   9126 	case WM_T_82547:
   9127 	case WM_T_82547_2:
   9128 	case WM_T_82571:	/* reset 100us */
   9129 	case WM_T_82572:
   9130 	case WM_T_82573:
   9131 	case WM_T_82574:
   9132 	case WM_T_82575:
   9133 	case WM_T_82576:
   9134 	case WM_T_82580:
   9135 	case WM_T_I350:
   9136 	case WM_T_I354:
   9137 	case WM_T_I210:
   9138 	case WM_T_I211:
   9139 	case WM_T_82583:
   9140 	case WM_T_80003:
   9141 		/* generic reset */
   9142 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9143 		CSR_WRITE_FLUSH(sc);
   9144 		delay(20000);
   9145 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9146 		CSR_WRITE_FLUSH(sc);
   9147 		delay(20000);
   9148 
   9149 		if ((sc->sc_type == WM_T_82541)
   9150 		    || (sc->sc_type == WM_T_82541_2)
   9151 		    || (sc->sc_type == WM_T_82547)
   9152 		    || (sc->sc_type == WM_T_82547_2)) {
   9153 			/* workaround for igp are done in igp_reset() */
   9154 			/* XXX add code to set LED after phy reset */
   9155 		}
   9156 		break;
   9157 	case WM_T_ICH8:
   9158 	case WM_T_ICH9:
   9159 	case WM_T_ICH10:
   9160 	case WM_T_PCH:
   9161 	case WM_T_PCH2:
   9162 	case WM_T_PCH_LPT:
   9163 	case WM_T_PCH_SPT:
   9164 		/* generic reset */
   9165 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9166 		CSR_WRITE_FLUSH(sc);
   9167 		delay(100);
   9168 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9169 		CSR_WRITE_FLUSH(sc);
   9170 		delay(150);
   9171 		break;
   9172 	default:
   9173 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   9174 		    __func__);
   9175 		break;
   9176 	}
   9177 
   9178 	sc->phy.release(sc);
   9179 
   9180 	/* get_cfg_done */
   9181 	wm_get_cfg_done(sc);
   9182 
   9183 	/* extra setup */
   9184 	switch (sc->sc_type) {
   9185 	case WM_T_82542_2_0:
   9186 	case WM_T_82542_2_1:
   9187 	case WM_T_82543:
   9188 	case WM_T_82544:
   9189 	case WM_T_82540:
   9190 	case WM_T_82545:
   9191 	case WM_T_82545_3:
   9192 	case WM_T_82546:
   9193 	case WM_T_82546_3:
   9194 	case WM_T_82541_2:
   9195 	case WM_T_82547_2:
   9196 	case WM_T_82571:
   9197 	case WM_T_82572:
   9198 	case WM_T_82573:
   9199 	case WM_T_82574:
   9200 	case WM_T_82583:
   9201 	case WM_T_82575:
   9202 	case WM_T_82576:
   9203 	case WM_T_82580:
   9204 	case WM_T_I350:
   9205 	case WM_T_I354:
   9206 	case WM_T_I210:
   9207 	case WM_T_I211:
   9208 	case WM_T_80003:
   9209 		/* null */
   9210 		break;
   9211 	case WM_T_82541:
   9212 	case WM_T_82547:
   9213 		/* XXX Configure actively LED after PHY reset */
   9214 		break;
   9215 	case WM_T_ICH8:
   9216 	case WM_T_ICH9:
   9217 	case WM_T_ICH10:
   9218 	case WM_T_PCH:
   9219 	case WM_T_PCH2:
   9220 	case WM_T_PCH_LPT:
   9221 	case WM_T_PCH_SPT:
   9222 		wm_phy_post_reset(sc);
   9223 		break;
   9224 	default:
   9225 		panic("%s: unknown type\n", __func__);
   9226 		break;
   9227 	}
   9228 }
   9229 
   9230 /*
   9231  * Setup sc_phytype and mii_{read|write}reg.
   9232  *
   9233  *  To identify PHY type, correct read/write function should be selected.
   9234  * To select correct read/write function, PCI ID or MAC type are required
   9235  * without accessing PHY registers.
   9236  *
   9237  *  On the first call of this function, PHY ID is not known yet. Check
   9238  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   9239  * result might be incorrect.
   9240  *
   9241  *  In the second call, PHY OUI and model is used to identify PHY type.
   9242  * It might not be perfpect because of the lack of compared entry, but it
   9243  * would be better than the first call.
   9244  *
   9245  *  If the detected new result and previous assumption is different,
   9246  * diagnous message will be printed.
   9247  */
   9248 static void
   9249 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   9250     uint16_t phy_model)
   9251 {
   9252 	device_t dev = sc->sc_dev;
   9253 	struct mii_data *mii = &sc->sc_mii;
   9254 	uint16_t new_phytype = WMPHY_UNKNOWN;
   9255 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   9256 	mii_readreg_t new_readreg;
   9257 	mii_writereg_t new_writereg;
   9258 
   9259 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9260 		device_xname(sc->sc_dev), __func__));
   9261 
   9262 	if (mii->mii_readreg == NULL) {
   9263 		/*
   9264 		 *  This is the first call of this function. For ICH and PCH
   9265 		 * variants, it's difficult to determine the PHY access method
   9266 		 * by sc_type, so use the PCI product ID for some devices.
   9267 		 */
   9268 
   9269 		switch (sc->sc_pcidevid) {
   9270 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   9271 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   9272 			/* 82577 */
   9273 			new_phytype = WMPHY_82577;
   9274 			break;
   9275 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   9276 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   9277 			/* 82578 */
   9278 			new_phytype = WMPHY_82578;
   9279 			break;
   9280 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   9281 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   9282 			/* 82579 */
   9283 			new_phytype = WMPHY_82579;
   9284 			break;
   9285 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   9286 		case PCI_PRODUCT_INTEL_82801I_BM:
   9287 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   9288 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   9289 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   9290 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   9291 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   9292 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   9293 			/* ICH8, 9, 10 with 82567 */
   9294 			new_phytype = WMPHY_BM;
   9295 			break;
   9296 		default:
   9297 			break;
   9298 		}
   9299 	} else {
   9300 		/* It's not the first call. Use PHY OUI and model */
   9301 		switch (phy_oui) {
   9302 		case MII_OUI_ATHEROS: /* XXX ??? */
   9303 			switch (phy_model) {
   9304 			case 0x0004: /* XXX */
   9305 				new_phytype = WMPHY_82578;
   9306 				break;
   9307 			default:
   9308 				break;
   9309 			}
   9310 			break;
   9311 		case MII_OUI_xxMARVELL:
   9312 			switch (phy_model) {
   9313 			case MII_MODEL_xxMARVELL_I210:
   9314 				new_phytype = WMPHY_I210;
   9315 				break;
   9316 			case MII_MODEL_xxMARVELL_E1011:
   9317 			case MII_MODEL_xxMARVELL_E1000_3:
   9318 			case MII_MODEL_xxMARVELL_E1000_5:
   9319 			case MII_MODEL_xxMARVELL_E1112:
   9320 				new_phytype = WMPHY_M88;
   9321 				break;
   9322 			case MII_MODEL_xxMARVELL_E1149:
   9323 				new_phytype = WMPHY_BM;
   9324 				break;
   9325 			case MII_MODEL_xxMARVELL_E1111:
   9326 			case MII_MODEL_xxMARVELL_I347:
   9327 			case MII_MODEL_xxMARVELL_E1512:
   9328 			case MII_MODEL_xxMARVELL_E1340M:
   9329 			case MII_MODEL_xxMARVELL_E1543:
   9330 				new_phytype = WMPHY_M88;
   9331 				break;
   9332 			case MII_MODEL_xxMARVELL_I82563:
   9333 				new_phytype = WMPHY_GG82563;
   9334 				break;
   9335 			default:
   9336 				break;
   9337 			}
   9338 			break;
   9339 		case MII_OUI_INTEL:
   9340 			switch (phy_model) {
   9341 			case MII_MODEL_INTEL_I82577:
   9342 				new_phytype = WMPHY_82577;
   9343 				break;
   9344 			case MII_MODEL_INTEL_I82579:
   9345 				new_phytype = WMPHY_82579;
   9346 				break;
   9347 			case MII_MODEL_INTEL_I217:
   9348 				new_phytype = WMPHY_I217;
   9349 				break;
   9350 			case MII_MODEL_INTEL_I82580:
   9351 			case MII_MODEL_INTEL_I350:
   9352 				new_phytype = WMPHY_82580;
   9353 				break;
   9354 			default:
   9355 				break;
   9356 			}
   9357 			break;
   9358 		case MII_OUI_yyINTEL:
   9359 			switch (phy_model) {
   9360 			case MII_MODEL_yyINTEL_I82562G:
   9361 			case MII_MODEL_yyINTEL_I82562EM:
   9362 			case MII_MODEL_yyINTEL_I82562ET:
   9363 				new_phytype = WMPHY_IFE;
   9364 				break;
   9365 			case MII_MODEL_yyINTEL_IGP01E1000:
   9366 				new_phytype = WMPHY_IGP;
   9367 				break;
   9368 			case MII_MODEL_yyINTEL_I82566:
   9369 				new_phytype = WMPHY_IGP_3;
   9370 				break;
   9371 			default:
   9372 				break;
   9373 			}
   9374 			break;
   9375 		default:
   9376 			break;
   9377 		}
   9378 		if (new_phytype == WMPHY_UNKNOWN)
   9379 			aprint_verbose_dev(dev, "%s: unknown PHY model\n",
   9380 			    __func__);
   9381 
   9382 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9383 		    && (sc->sc_phytype != new_phytype )) {
   9384 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9385 			    "was incorrect. PHY type from PHY ID = %u\n",
   9386 			    sc->sc_phytype, new_phytype);
   9387 		}
   9388 	}
   9389 
   9390 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   9391 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   9392 		/* SGMII */
   9393 		new_readreg = wm_sgmii_readreg;
   9394 		new_writereg = wm_sgmii_writereg;
   9395 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   9396 		/* BM2 (phyaddr == 1) */
   9397 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9398 		    && (new_phytype != WMPHY_BM)
   9399 		    && (new_phytype != WMPHY_UNKNOWN))
   9400 			doubt_phytype = new_phytype;
   9401 		new_phytype = WMPHY_BM;
   9402 		new_readreg = wm_gmii_bm_readreg;
   9403 		new_writereg = wm_gmii_bm_writereg;
   9404 	} else if (sc->sc_type >= WM_T_PCH) {
   9405 		/* All PCH* use _hv_ */
   9406 		new_readreg = wm_gmii_hv_readreg;
   9407 		new_writereg = wm_gmii_hv_writereg;
   9408 	} else if (sc->sc_type >= WM_T_ICH8) {
   9409 		/* non-82567 ICH8, 9 and 10 */
   9410 		new_readreg = wm_gmii_i82544_readreg;
   9411 		new_writereg = wm_gmii_i82544_writereg;
   9412 	} else if (sc->sc_type >= WM_T_80003) {
   9413 		/* 80003 */
   9414 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9415 		    && (new_phytype != WMPHY_GG82563)
   9416 		    && (new_phytype != WMPHY_UNKNOWN))
   9417 			doubt_phytype = new_phytype;
   9418 		new_phytype = WMPHY_GG82563;
   9419 		new_readreg = wm_gmii_i80003_readreg;
   9420 		new_writereg = wm_gmii_i80003_writereg;
   9421 	} else if (sc->sc_type >= WM_T_I210) {
   9422 		/* I210 and I211 */
   9423 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9424 		    && (new_phytype != WMPHY_I210)
   9425 		    && (new_phytype != WMPHY_UNKNOWN))
   9426 			doubt_phytype = new_phytype;
   9427 		new_phytype = WMPHY_I210;
   9428 		new_readreg = wm_gmii_gs40g_readreg;
   9429 		new_writereg = wm_gmii_gs40g_writereg;
   9430 	} else if (sc->sc_type >= WM_T_82580) {
   9431 		/* 82580, I350 and I354 */
   9432 		new_readreg = wm_gmii_82580_readreg;
   9433 		new_writereg = wm_gmii_82580_writereg;
   9434 	} else if (sc->sc_type >= WM_T_82544) {
   9435 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   9436 		new_readreg = wm_gmii_i82544_readreg;
   9437 		new_writereg = wm_gmii_i82544_writereg;
   9438 	} else {
   9439 		new_readreg = wm_gmii_i82543_readreg;
   9440 		new_writereg = wm_gmii_i82543_writereg;
   9441 	}
   9442 
   9443 	if (new_phytype == WMPHY_BM) {
   9444 		/* All BM use _bm_ */
   9445 		new_readreg = wm_gmii_bm_readreg;
   9446 		new_writereg = wm_gmii_bm_writereg;
   9447 	}
   9448 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
   9449 		/* All PCH* use _hv_ */
   9450 		new_readreg = wm_gmii_hv_readreg;
   9451 		new_writereg = wm_gmii_hv_writereg;
   9452 	}
   9453 
   9454 	/* Diag output */
   9455 	if (doubt_phytype != WMPHY_UNKNOWN)
   9456 		aprint_error_dev(dev, "Assumed new PHY type was "
   9457 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   9458 		    new_phytype);
   9459 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9460 	    && (sc->sc_phytype != new_phytype ))
   9461 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9462 		    "was incorrect. New PHY type = %u\n",
   9463 		    sc->sc_phytype, new_phytype);
   9464 
   9465 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   9466 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   9467 
   9468 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   9469 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   9470 		    "function was incorrect.\n");
   9471 
   9472 	/* Update now */
   9473 	sc->sc_phytype = new_phytype;
   9474 	mii->mii_readreg = new_readreg;
   9475 	mii->mii_writereg = new_writereg;
   9476 }
   9477 
   9478 /*
   9479  * wm_get_phy_id_82575:
   9480  *
   9481  * Return PHY ID. Return -1 if it failed.
   9482  */
   9483 static int
   9484 wm_get_phy_id_82575(struct wm_softc *sc)
   9485 {
   9486 	uint32_t reg;
   9487 	int phyid = -1;
   9488 
   9489 	/* XXX */
   9490 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   9491 		return -1;
   9492 
   9493 	if (wm_sgmii_uses_mdio(sc)) {
   9494 		switch (sc->sc_type) {
   9495 		case WM_T_82575:
   9496 		case WM_T_82576:
   9497 			reg = CSR_READ(sc, WMREG_MDIC);
   9498 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   9499 			break;
   9500 		case WM_T_82580:
   9501 		case WM_T_I350:
   9502 		case WM_T_I354:
   9503 		case WM_T_I210:
   9504 		case WM_T_I211:
   9505 			reg = CSR_READ(sc, WMREG_MDICNFG);
   9506 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   9507 			break;
   9508 		default:
   9509 			return -1;
   9510 		}
   9511 	}
   9512 
   9513 	return phyid;
   9514 }
   9515 
   9516 
   9517 /*
   9518  * wm_gmii_mediainit:
   9519  *
   9520  *	Initialize media for use on 1000BASE-T devices.
   9521  */
   9522 static void
   9523 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   9524 {
   9525 	device_t dev = sc->sc_dev;
   9526 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9527 	struct mii_data *mii = &sc->sc_mii;
   9528 	uint32_t reg;
   9529 
   9530 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9531 		device_xname(sc->sc_dev), __func__));
   9532 
   9533 	/* We have GMII. */
   9534 	sc->sc_flags |= WM_F_HAS_MII;
   9535 
   9536 	if (sc->sc_type == WM_T_80003)
   9537 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9538 	else
   9539 		sc->sc_tipg = TIPG_1000T_DFLT;
   9540 
   9541 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   9542 	if ((sc->sc_type == WM_T_82580)
   9543 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   9544 	    || (sc->sc_type == WM_T_I211)) {
   9545 		reg = CSR_READ(sc, WMREG_PHPM);
   9546 		reg &= ~PHPM_GO_LINK_D;
   9547 		CSR_WRITE(sc, WMREG_PHPM, reg);
   9548 	}
   9549 
   9550 	/*
   9551 	 * Let the chip set speed/duplex on its own based on
   9552 	 * signals from the PHY.
   9553 	 * XXXbouyer - I'm not sure this is right for the 80003,
   9554 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   9555 	 */
   9556 	sc->sc_ctrl |= CTRL_SLU;
   9557 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9558 
   9559 	/* Initialize our media structures and probe the GMII. */
   9560 	mii->mii_ifp = ifp;
   9561 
   9562 	mii->mii_statchg = wm_gmii_statchg;
   9563 
   9564 	/* get PHY control from SMBus to PCIe */
   9565 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   9566 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
   9567 		wm_smbustopci(sc);
   9568 
   9569 	wm_gmii_reset(sc);
   9570 
   9571 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9572 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   9573 	    wm_gmii_mediastatus);
   9574 
   9575 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   9576 	    || (sc->sc_type == WM_T_82580)
   9577 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   9578 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   9579 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   9580 			/* Attach only one port */
   9581 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   9582 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9583 		} else {
   9584 			int i, id;
   9585 			uint32_t ctrl_ext;
   9586 
   9587 			id = wm_get_phy_id_82575(sc);
   9588 			if (id != -1) {
   9589 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   9590 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   9591 			}
   9592 			if ((id == -1)
   9593 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9594 				/* Power on sgmii phy if it is disabled */
   9595 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9596 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   9597 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   9598 				CSR_WRITE_FLUSH(sc);
   9599 				delay(300*1000); /* XXX too long */
   9600 
   9601 				/* from 1 to 8 */
   9602 				for (i = 1; i < 8; i++)
   9603 					mii_attach(sc->sc_dev, &sc->sc_mii,
   9604 					    0xffffffff, i, MII_OFFSET_ANY,
   9605 					    MIIF_DOPAUSE);
   9606 
   9607 				/* restore previous sfp cage power state */
   9608 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9609 			}
   9610 		}
   9611 	} else {
   9612 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9613 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9614 	}
   9615 
   9616 	/*
   9617 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   9618 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   9619 	 */
   9620 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   9621 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9622 		wm_set_mdio_slow_mode_hv(sc);
   9623 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9624 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9625 	}
   9626 
   9627 	/*
   9628 	 * (For ICH8 variants)
   9629 	 * If PHY detection failed, use BM's r/w function and retry.
   9630 	 */
   9631 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9632 		/* if failed, retry with *_bm_* */
   9633 		aprint_verbose_dev(dev, "Assumed PHY access function "
   9634 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   9635 		    sc->sc_phytype);
   9636 		sc->sc_phytype = WMPHY_BM;
   9637 		mii->mii_readreg = wm_gmii_bm_readreg;
   9638 		mii->mii_writereg = wm_gmii_bm_writereg;
   9639 
   9640 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9641 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9642 	}
   9643 
   9644 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9645 		/* Any PHY wasn't find */
   9646 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   9647 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   9648 		sc->sc_phytype = WMPHY_NONE;
   9649 	} else {
   9650 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   9651 
   9652 		/*
   9653 		 * PHY Found! Check PHY type again by the second call of
   9654 		 * wm_gmii_setup_phytype.
   9655 		 */
   9656 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   9657 		    child->mii_mpd_model);
   9658 
   9659 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   9660 	}
   9661 }
   9662 
   9663 /*
   9664  * wm_gmii_mediachange:	[ifmedia interface function]
   9665  *
   9666  *	Set hardware to newly-selected media on a 1000BASE-T device.
   9667  */
   9668 static int
   9669 wm_gmii_mediachange(struct ifnet *ifp)
   9670 {
   9671 	struct wm_softc *sc = ifp->if_softc;
   9672 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9673 	int rc;
   9674 
   9675 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9676 		device_xname(sc->sc_dev), __func__));
   9677 	if ((ifp->if_flags & IFF_UP) == 0)
   9678 		return 0;
   9679 
   9680 	/* Disable D0 LPLU. */
   9681 	wm_lplu_d0_disable(sc);
   9682 
   9683 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9684 	sc->sc_ctrl |= CTRL_SLU;
   9685 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9686 	    || (sc->sc_type > WM_T_82543)) {
   9687 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   9688 	} else {
   9689 		sc->sc_ctrl &= ~CTRL_ASDE;
   9690 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9691 		if (ife->ifm_media & IFM_FDX)
   9692 			sc->sc_ctrl |= CTRL_FD;
   9693 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   9694 		case IFM_10_T:
   9695 			sc->sc_ctrl |= CTRL_SPEED_10;
   9696 			break;
   9697 		case IFM_100_TX:
   9698 			sc->sc_ctrl |= CTRL_SPEED_100;
   9699 			break;
   9700 		case IFM_1000_T:
   9701 			sc->sc_ctrl |= CTRL_SPEED_1000;
   9702 			break;
   9703 		default:
   9704 			panic("wm_gmii_mediachange: bad media 0x%x",
   9705 			    ife->ifm_media);
   9706 		}
   9707 	}
   9708 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9709 	CSR_WRITE_FLUSH(sc);
   9710 	if (sc->sc_type <= WM_T_82543)
   9711 		wm_gmii_reset(sc);
   9712 
   9713 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   9714 		return 0;
   9715 	return rc;
   9716 }
   9717 
   9718 /*
   9719  * wm_gmii_mediastatus:	[ifmedia interface function]
   9720  *
   9721  *	Get the current interface media status on a 1000BASE-T device.
   9722  */
   9723 static void
   9724 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9725 {
   9726 	struct wm_softc *sc = ifp->if_softc;
   9727 
   9728 	ether_mediastatus(ifp, ifmr);
   9729 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9730 	    | sc->sc_flowflags;
   9731 }
   9732 
   9733 #define	MDI_IO		CTRL_SWDPIN(2)
   9734 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   9735 #define	MDI_CLK		CTRL_SWDPIN(3)
   9736 
   9737 static void
   9738 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   9739 {
   9740 	uint32_t i, v;
   9741 
   9742 	v = CSR_READ(sc, WMREG_CTRL);
   9743 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9744 	v |= MDI_DIR | CTRL_SWDPIO(3);
   9745 
   9746 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   9747 		if (data & i)
   9748 			v |= MDI_IO;
   9749 		else
   9750 			v &= ~MDI_IO;
   9751 		CSR_WRITE(sc, WMREG_CTRL, v);
   9752 		CSR_WRITE_FLUSH(sc);
   9753 		delay(10);
   9754 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9755 		CSR_WRITE_FLUSH(sc);
   9756 		delay(10);
   9757 		CSR_WRITE(sc, WMREG_CTRL, v);
   9758 		CSR_WRITE_FLUSH(sc);
   9759 		delay(10);
   9760 	}
   9761 }
   9762 
   9763 static uint32_t
   9764 wm_i82543_mii_recvbits(struct wm_softc *sc)
   9765 {
   9766 	uint32_t v, i, data = 0;
   9767 
   9768 	v = CSR_READ(sc, WMREG_CTRL);
   9769 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9770 	v |= CTRL_SWDPIO(3);
   9771 
   9772 	CSR_WRITE(sc, WMREG_CTRL, v);
   9773 	CSR_WRITE_FLUSH(sc);
   9774 	delay(10);
   9775 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9776 	CSR_WRITE_FLUSH(sc);
   9777 	delay(10);
   9778 	CSR_WRITE(sc, WMREG_CTRL, v);
   9779 	CSR_WRITE_FLUSH(sc);
   9780 	delay(10);
   9781 
   9782 	for (i = 0; i < 16; i++) {
   9783 		data <<= 1;
   9784 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9785 		CSR_WRITE_FLUSH(sc);
   9786 		delay(10);
   9787 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   9788 			data |= 1;
   9789 		CSR_WRITE(sc, WMREG_CTRL, v);
   9790 		CSR_WRITE_FLUSH(sc);
   9791 		delay(10);
   9792 	}
   9793 
   9794 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9795 	CSR_WRITE_FLUSH(sc);
   9796 	delay(10);
   9797 	CSR_WRITE(sc, WMREG_CTRL, v);
   9798 	CSR_WRITE_FLUSH(sc);
   9799 	delay(10);
   9800 
   9801 	return data;
   9802 }
   9803 
   9804 #undef MDI_IO
   9805 #undef MDI_DIR
   9806 #undef MDI_CLK
   9807 
   9808 /*
   9809  * wm_gmii_i82543_readreg:	[mii interface function]
   9810  *
   9811  *	Read a PHY register on the GMII (i82543 version).
   9812  */
   9813 static int
   9814 wm_gmii_i82543_readreg(device_t dev, int phy, int reg)
   9815 {
   9816 	struct wm_softc *sc = device_private(dev);
   9817 	int rv;
   9818 
   9819 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9820 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   9821 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   9822 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   9823 
   9824 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   9825 	    device_xname(dev), phy, reg, rv));
   9826 
   9827 	return rv;
   9828 }
   9829 
   9830 /*
   9831  * wm_gmii_i82543_writereg:	[mii interface function]
   9832  *
   9833  *	Write a PHY register on the GMII (i82543 version).
   9834  */
   9835 static void
   9836 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, int val)
   9837 {
   9838 	struct wm_softc *sc = device_private(dev);
   9839 
   9840 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9841 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   9842 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   9843 	    (MII_COMMAND_START << 30), 32);
   9844 }
   9845 
   9846 /*
   9847  * wm_gmii_mdic_readreg:	[mii interface function]
   9848  *
   9849  *	Read a PHY register on the GMII.
   9850  */
   9851 static int
   9852 wm_gmii_mdic_readreg(device_t dev, int phy, int reg)
   9853 {
   9854 	struct wm_softc *sc = device_private(dev);
   9855 	uint32_t mdic = 0;
   9856 	int i, rv;
   9857 
   9858 	if (reg > MII_ADDRMASK) {
   9859 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   9860 		    __func__, sc->sc_phytype, reg);
   9861 		reg &= MII_ADDRMASK;
   9862 	}
   9863 
   9864 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   9865 	    MDIC_REGADD(reg));
   9866 
   9867 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9868 		mdic = CSR_READ(sc, WMREG_MDIC);
   9869 		if (mdic & MDIC_READY)
   9870 			break;
   9871 		delay(50);
   9872 	}
   9873 
   9874 	if ((mdic & MDIC_READY) == 0) {
   9875 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   9876 		    device_xname(dev), phy, reg);
   9877 		rv = 0;
   9878 	} else if (mdic & MDIC_E) {
   9879 #if 0 /* This is normal if no PHY is present. */
   9880 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   9881 		    device_xname(dev), phy, reg);
   9882 #endif
   9883 		rv = 0;
   9884 	} else {
   9885 		rv = MDIC_DATA(mdic);
   9886 		if (rv == 0xffff)
   9887 			rv = 0;
   9888 	}
   9889 
   9890 	return rv;
   9891 }
   9892 
   9893 /*
   9894  * wm_gmii_mdic_writereg:	[mii interface function]
   9895  *
   9896  *	Write a PHY register on the GMII.
   9897  */
   9898 static void
   9899 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, int val)
   9900 {
   9901 	struct wm_softc *sc = device_private(dev);
   9902 	uint32_t mdic = 0;
   9903 	int i;
   9904 
   9905 	if (reg > MII_ADDRMASK) {
   9906 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   9907 		    __func__, sc->sc_phytype, reg);
   9908 		reg &= MII_ADDRMASK;
   9909 	}
   9910 
   9911 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   9912 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   9913 
   9914 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9915 		mdic = CSR_READ(sc, WMREG_MDIC);
   9916 		if (mdic & MDIC_READY)
   9917 			break;
   9918 		delay(50);
   9919 	}
   9920 
   9921 	if ((mdic & MDIC_READY) == 0)
   9922 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   9923 		    device_xname(dev), phy, reg);
   9924 	else if (mdic & MDIC_E)
   9925 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   9926 		    device_xname(dev), phy, reg);
   9927 }
   9928 
   9929 /*
   9930  * wm_gmii_i82544_readreg:	[mii interface function]
   9931  *
   9932  *	Read a PHY register on the GMII.
   9933  */
   9934 static int
   9935 wm_gmii_i82544_readreg(device_t dev, int phy, int reg)
   9936 {
   9937 	struct wm_softc *sc = device_private(dev);
   9938 	int rv;
   9939 
   9940 	if (sc->phy.acquire(sc)) {
   9941 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   9942 		return 0;
   9943 	}
   9944 
   9945 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9946 		switch (sc->sc_phytype) {
   9947 		case WMPHY_IGP:
   9948 		case WMPHY_IGP_2:
   9949 		case WMPHY_IGP_3:
   9950 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT, reg);
   9951 			break;
   9952 		default:
   9953 #ifdef WM_DEBUG
   9954 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   9955 			    __func__, sc->sc_phytype, reg);
   9956 #endif
   9957 			break;
   9958 		}
   9959 	}
   9960 
   9961 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   9962 	sc->phy.release(sc);
   9963 
   9964 	return rv;
   9965 }
   9966 
   9967 /*
   9968  * wm_gmii_i82544_writereg:	[mii interface function]
   9969  *
   9970  *	Write a PHY register on the GMII.
   9971  */
   9972 static void
   9973 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, int val)
   9974 {
   9975 	struct wm_softc *sc = device_private(dev);
   9976 
   9977 	if (sc->phy.acquire(sc)) {
   9978 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   9979 		return;
   9980 	}
   9981 
   9982 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9983 		switch (sc->sc_phytype) {
   9984 		case WMPHY_IGP:
   9985 		case WMPHY_IGP_2:
   9986 		case WMPHY_IGP_3:
   9987 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT, reg);
   9988 			break;
   9989 		default:
   9990 #ifdef WM_DEBUG
   9991 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   9992 			    __func__, sc->sc_phytype, reg);
   9993 #endif
   9994 			break;
   9995 		}
   9996 	}
   9997 
   9998 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   9999 	sc->phy.release(sc);
   10000 }
   10001 
   10002 /*
   10003  * wm_gmii_i80003_readreg:	[mii interface function]
   10004  *
   10005  *	Read a PHY register on the kumeran
   10006  * This could be handled by the PHY layer if we didn't have to lock the
   10007  * ressource ...
   10008  */
   10009 static int
   10010 wm_gmii_i80003_readreg(device_t dev, int phy, int reg)
   10011 {
   10012 	struct wm_softc *sc = device_private(dev);
   10013 	int page_select, temp;
   10014 	int rv;
   10015 
   10016 	if (phy != 1) /* only one PHY on kumeran bus */
   10017 		return 0;
   10018 
   10019 	if (sc->phy.acquire(sc)) {
   10020 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10021 		return 0;
   10022 	}
   10023 
   10024 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10025 		page_select = GG82563_PHY_PAGE_SELECT;
   10026 	else {
   10027 		/*
   10028 		 * Use Alternative Page Select register to access registers
   10029 		 * 30 and 31.
   10030 		 */
   10031 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10032 	}
   10033 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10034 	wm_gmii_mdic_writereg(dev, phy, page_select, temp);
   10035 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10036 		/*
   10037 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10038 		 * register.
   10039 		 */
   10040 		delay(200);
   10041 		if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) {
   10042 			device_printf(dev, "%s failed\n", __func__);
   10043 			rv = 0; /* XXX */
   10044 			goto out;
   10045 		}
   10046 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10047 		delay(200);
   10048 	} else
   10049 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10050 
   10051 out:
   10052 	sc->phy.release(sc);
   10053 	return rv;
   10054 }
   10055 
   10056 /*
   10057  * wm_gmii_i80003_writereg:	[mii interface function]
   10058  *
   10059  *	Write a PHY register on the kumeran.
   10060  * This could be handled by the PHY layer if we didn't have to lock the
   10061  * ressource ...
   10062  */
   10063 static void
   10064 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, int val)
   10065 {
   10066 	struct wm_softc *sc = device_private(dev);
   10067 	int page_select, temp;
   10068 
   10069 	if (phy != 1) /* only one PHY on kumeran bus */
   10070 		return;
   10071 
   10072 	if (sc->phy.acquire(sc)) {
   10073 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10074 		return;
   10075 	}
   10076 
   10077 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10078 		page_select = GG82563_PHY_PAGE_SELECT;
   10079 	else {
   10080 		/*
   10081 		 * Use Alternative Page Select register to access registers
   10082 		 * 30 and 31.
   10083 		 */
   10084 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10085 	}
   10086 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10087 	wm_gmii_mdic_writereg(dev, phy, page_select, temp);
   10088 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10089 		/*
   10090 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10091 		 * register.
   10092 		 */
   10093 		delay(200);
   10094 		if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) {
   10095 			device_printf(dev, "%s failed\n", __func__);
   10096 			goto out;
   10097 		}
   10098 		wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10099 		delay(200);
   10100 	} else
   10101 		wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10102 
   10103 out:
   10104 	sc->phy.release(sc);
   10105 }
   10106 
   10107 /*
   10108  * wm_gmii_bm_readreg:	[mii interface function]
   10109  *
   10110  *	Read a PHY register on the kumeran
   10111  * This could be handled by the PHY layer if we didn't have to lock the
   10112  * ressource ...
   10113  */
   10114 static int
   10115 wm_gmii_bm_readreg(device_t dev, int phy, int reg)
   10116 {
   10117 	struct wm_softc *sc = device_private(dev);
   10118 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10119 	uint16_t val;
   10120 	int rv;
   10121 
   10122 	if (sc->phy.acquire(sc)) {
   10123 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10124 		return 0;
   10125 	}
   10126 
   10127 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10128 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10129 		    || (reg == 31)) ? 1 : phy;
   10130 	/* Page 800 works differently than the rest so it has its own func */
   10131 	if (page == BM_WUC_PAGE) {
   10132 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
   10133 		rv = val;
   10134 		goto release;
   10135 	}
   10136 
   10137 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10138 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10139 		    && (sc->sc_type != WM_T_82583))
   10140 			wm_gmii_mdic_writereg(dev, phy,
   10141 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10142 		else
   10143 			wm_gmii_mdic_writereg(dev, phy,
   10144 			    BME1000_PHY_PAGE_SELECT, page);
   10145 	}
   10146 
   10147 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10148 
   10149 release:
   10150 	sc->phy.release(sc);
   10151 	return rv;
   10152 }
   10153 
   10154 /*
   10155  * wm_gmii_bm_writereg:	[mii interface function]
   10156  *
   10157  *	Write a PHY register on the kumeran.
   10158  * This could be handled by the PHY layer if we didn't have to lock the
   10159  * ressource ...
   10160  */
   10161 static void
   10162 wm_gmii_bm_writereg(device_t dev, int phy, int reg, int val)
   10163 {
   10164 	struct wm_softc *sc = device_private(dev);
   10165 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10166 
   10167 	if (sc->phy.acquire(sc)) {
   10168 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10169 		return;
   10170 	}
   10171 
   10172 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10173 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10174 		    || (reg == 31)) ? 1 : phy;
   10175 	/* Page 800 works differently than the rest so it has its own func */
   10176 	if (page == BM_WUC_PAGE) {
   10177 		uint16_t tmp;
   10178 
   10179 		tmp = val;
   10180 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   10181 		goto release;
   10182 	}
   10183 
   10184 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10185 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10186 		    && (sc->sc_type != WM_T_82583))
   10187 			wm_gmii_mdic_writereg(dev, phy,
   10188 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10189 		else
   10190 			wm_gmii_mdic_writereg(dev, phy,
   10191 			    BME1000_PHY_PAGE_SELECT, page);
   10192 	}
   10193 
   10194 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10195 
   10196 release:
   10197 	sc->phy.release(sc);
   10198 }
   10199 
   10200 static void
   10201 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd)
   10202 {
   10203 	struct wm_softc *sc = device_private(dev);
   10204 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   10205 	uint16_t wuce, reg;
   10206 
   10207 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10208 		device_xname(dev), __func__));
   10209 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   10210 	if (sc->sc_type == WM_T_PCH) {
   10211 		/* XXX e1000 driver do nothing... why? */
   10212 	}
   10213 
   10214 	/*
   10215 	 * 1) Enable PHY wakeup register first.
   10216 	 * See e1000_enable_phy_wakeup_reg_access_bm().
   10217 	 */
   10218 
   10219 	/* Set page 769 */
   10220 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10221 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   10222 
   10223 	/* Read WUCE and save it */
   10224 	wuce = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG);
   10225 
   10226 	reg = wuce | BM_WUC_ENABLE_BIT;
   10227 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   10228 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, reg);
   10229 
   10230 	/* Select page 800 */
   10231 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10232 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   10233 
   10234 	/*
   10235 	 * 2) Access PHY wakeup register.
   10236 	 * See e1000_access_phy_wakeup_reg_bm.
   10237 	 */
   10238 
   10239 	/* Write page 800 */
   10240 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   10241 
   10242 	if (rd)
   10243 		*val = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE);
   10244 	else
   10245 		wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   10246 
   10247 	/*
   10248 	 * 3) Disable PHY wakeup register.
   10249 	 * See e1000_disable_phy_wakeup_reg_access_bm().
   10250 	 */
   10251 	/* Set page 769 */
   10252 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10253 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   10254 
   10255 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, wuce);
   10256 }
   10257 
   10258 /*
   10259  * wm_gmii_hv_readreg:	[mii interface function]
   10260  *
   10261  *	Read a PHY register on the kumeran
   10262  * This could be handled by the PHY layer if we didn't have to lock the
   10263  * ressource ...
   10264  */
   10265 static int
   10266 wm_gmii_hv_readreg(device_t dev, int phy, int reg)
   10267 {
   10268 	struct wm_softc *sc = device_private(dev);
   10269 	int rv;
   10270 
   10271 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10272 		device_xname(dev), __func__));
   10273 	if (sc->phy.acquire(sc)) {
   10274 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10275 		return 0;
   10276 	}
   10277 
   10278 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg);
   10279 	sc->phy.release(sc);
   10280 	return rv;
   10281 }
   10282 
   10283 static int
   10284 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg)
   10285 {
   10286 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10287 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10288 	uint16_t val;
   10289 	int rv;
   10290 
   10291 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10292 
   10293 	/* Page 800 works differently than the rest so it has its own func */
   10294 	if (page == BM_WUC_PAGE) {
   10295 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
   10296 		return val;
   10297 	}
   10298 
   10299 	/*
   10300 	 * Lower than page 768 works differently than the rest so it has its
   10301 	 * own func
   10302 	 */
   10303 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10304 		printf("gmii_hv_readreg!!!\n");
   10305 		return 0;
   10306 	}
   10307 
   10308 	/*
   10309 	 * XXX I21[789] documents say that the SMBus Address register is at
   10310 	 * PHY address 01, Page 0 (not 768), Register 26.
   10311 	 */
   10312 	if (page == HV_INTC_FC_PAGE_START)
   10313 		page = 0;
   10314 
   10315 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10316 		wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10317 		    page << BME1000_PAGE_SHIFT);
   10318 	}
   10319 
   10320 	rv = wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK);
   10321 	return rv;
   10322 }
   10323 
   10324 /*
   10325  * wm_gmii_hv_writereg:	[mii interface function]
   10326  *
   10327  *	Write a PHY register on the kumeran.
   10328  * This could be handled by the PHY layer if we didn't have to lock the
   10329  * ressource ...
   10330  */
   10331 static void
   10332 wm_gmii_hv_writereg(device_t dev, int phy, int reg, int val)
   10333 {
   10334 	struct wm_softc *sc = device_private(dev);
   10335 
   10336 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10337 		device_xname(dev), __func__));
   10338 
   10339 	if (sc->phy.acquire(sc)) {
   10340 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10341 		return;
   10342 	}
   10343 
   10344 	wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   10345 	sc->phy.release(sc);
   10346 }
   10347 
   10348 static void
   10349 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, int val)
   10350 {
   10351 	struct wm_softc *sc = device_private(dev);
   10352 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10353 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10354 
   10355 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10356 
   10357 	/* Page 800 works differently than the rest so it has its own func */
   10358 	if (page == BM_WUC_PAGE) {
   10359 		uint16_t tmp;
   10360 
   10361 		tmp = val;
   10362 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   10363 		return;
   10364 	}
   10365 
   10366 	/*
   10367 	 * Lower than page 768 works differently than the rest so it has its
   10368 	 * own func
   10369 	 */
   10370 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10371 		printf("gmii_hv_writereg!!!\n");
   10372 		return;
   10373 	}
   10374 
   10375 	{
   10376 		/*
   10377 		 * XXX I21[789] documents say that the SMBus Address register
   10378 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   10379 		 */
   10380 		if (page == HV_INTC_FC_PAGE_START)
   10381 			page = 0;
   10382 
   10383 		/*
   10384 		 * XXX Workaround MDIO accesses being disabled after entering
   10385 		 * IEEE Power Down (whenever bit 11 of the PHY control
   10386 		 * register is set)
   10387 		 */
   10388 		if (sc->sc_phytype == WMPHY_82578) {
   10389 			struct mii_softc *child;
   10390 
   10391 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   10392 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   10393 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   10394 			    && ((val & (1 << 11)) != 0)) {
   10395 				printf("XXX need workaround\n");
   10396 			}
   10397 		}
   10398 
   10399 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10400 			wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10401 			    page << BME1000_PAGE_SHIFT);
   10402 		}
   10403 	}
   10404 
   10405 	wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   10406 }
   10407 
   10408 /*
   10409  * wm_gmii_82580_readreg:	[mii interface function]
   10410  *
   10411  *	Read a PHY register on the 82580 and I350.
   10412  * This could be handled by the PHY layer if we didn't have to lock the
   10413  * ressource ...
   10414  */
   10415 static int
   10416 wm_gmii_82580_readreg(device_t dev, int phy, int reg)
   10417 {
   10418 	struct wm_softc *sc = device_private(dev);
   10419 	int rv;
   10420 
   10421 	if (sc->phy.acquire(sc) != 0) {
   10422 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10423 		return 0;
   10424 	}
   10425 
   10426 #ifdef DIAGNOSTIC
   10427 	if (reg > MII_ADDRMASK) {
   10428 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10429 		    __func__, sc->sc_phytype, reg);
   10430 		reg &= MII_ADDRMASK;
   10431 	}
   10432 #endif
   10433 	rv = wm_gmii_mdic_readreg(dev, phy, reg);
   10434 
   10435 	sc->phy.release(sc);
   10436 	return rv;
   10437 }
   10438 
   10439 /*
   10440  * wm_gmii_82580_writereg:	[mii interface function]
   10441  *
   10442  *	Write a PHY register on the 82580 and I350.
   10443  * This could be handled by the PHY layer if we didn't have to lock the
   10444  * ressource ...
   10445  */
   10446 static void
   10447 wm_gmii_82580_writereg(device_t dev, int phy, int reg, int val)
   10448 {
   10449 	struct wm_softc *sc = device_private(dev);
   10450 
   10451 	if (sc->phy.acquire(sc) != 0) {
   10452 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10453 		return;
   10454 	}
   10455 
   10456 #ifdef DIAGNOSTIC
   10457 	if (reg > MII_ADDRMASK) {
   10458 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10459 		    __func__, sc->sc_phytype, reg);
   10460 		reg &= MII_ADDRMASK;
   10461 	}
   10462 #endif
   10463 	wm_gmii_mdic_writereg(dev, phy, reg, val);
   10464 
   10465 	sc->phy.release(sc);
   10466 }
   10467 
   10468 /*
   10469  * wm_gmii_gs40g_readreg:	[mii interface function]
   10470  *
   10471  *	Read a PHY register on the I2100 and I211.
   10472  * This could be handled by the PHY layer if we didn't have to lock the
   10473  * ressource ...
   10474  */
   10475 static int
   10476 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg)
   10477 {
   10478 	struct wm_softc *sc = device_private(dev);
   10479 	int page, offset;
   10480 	int rv;
   10481 
   10482 	/* Acquire semaphore */
   10483 	if (sc->phy.acquire(sc)) {
   10484 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10485 		return 0;
   10486 	}
   10487 
   10488 	/* Page select */
   10489 	page = reg >> GS40G_PAGE_SHIFT;
   10490 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10491 
   10492 	/* Read reg */
   10493 	offset = reg & GS40G_OFFSET_MASK;
   10494 	rv = wm_gmii_mdic_readreg(dev, phy, offset);
   10495 
   10496 	sc->phy.release(sc);
   10497 	return rv;
   10498 }
   10499 
   10500 /*
   10501  * wm_gmii_gs40g_writereg:	[mii interface function]
   10502  *
   10503  *	Write a PHY register on the I210 and I211.
   10504  * This could be handled by the PHY layer if we didn't have to lock the
   10505  * ressource ...
   10506  */
   10507 static void
   10508 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, int val)
   10509 {
   10510 	struct wm_softc *sc = device_private(dev);
   10511 	int page, offset;
   10512 
   10513 	/* Acquire semaphore */
   10514 	if (sc->phy.acquire(sc)) {
   10515 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10516 		return;
   10517 	}
   10518 
   10519 	/* Page select */
   10520 	page = reg >> GS40G_PAGE_SHIFT;
   10521 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10522 
   10523 	/* Write reg */
   10524 	offset = reg & GS40G_OFFSET_MASK;
   10525 	wm_gmii_mdic_writereg(dev, phy, offset, val);
   10526 
   10527 	/* Release semaphore */
   10528 	sc->phy.release(sc);
   10529 }
   10530 
   10531 /*
   10532  * wm_gmii_statchg:	[mii interface function]
   10533  *
   10534  *	Callback from MII layer when media changes.
   10535  */
   10536 static void
   10537 wm_gmii_statchg(struct ifnet *ifp)
   10538 {
   10539 	struct wm_softc *sc = ifp->if_softc;
   10540 	struct mii_data *mii = &sc->sc_mii;
   10541 
   10542 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   10543 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10544 	sc->sc_fcrtl &= ~FCRTL_XONE;
   10545 
   10546 	/*
   10547 	 * Get flow control negotiation result.
   10548 	 */
   10549 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   10550 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   10551 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   10552 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   10553 	}
   10554 
   10555 	if (sc->sc_flowflags & IFM_FLOW) {
   10556 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   10557 			sc->sc_ctrl |= CTRL_TFCE;
   10558 			sc->sc_fcrtl |= FCRTL_XONE;
   10559 		}
   10560 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   10561 			sc->sc_ctrl |= CTRL_RFCE;
   10562 	}
   10563 
   10564 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   10565 		DPRINTF(WM_DEBUG_LINK,
   10566 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   10567 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10568 	} else {
   10569 		DPRINTF(WM_DEBUG_LINK,
   10570 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   10571 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10572 	}
   10573 
   10574 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10575 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10576 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   10577 						 : WMREG_FCRTL, sc->sc_fcrtl);
   10578 	if (sc->sc_type == WM_T_80003) {
   10579 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   10580 		case IFM_1000_T:
   10581 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10582 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   10583 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10584 			break;
   10585 		default:
   10586 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10587 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   10588 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   10589 			break;
   10590 		}
   10591 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   10592 	}
   10593 }
   10594 
   10595 /* kumeran related (80003, ICH* and PCH*) */
   10596 
   10597 /*
   10598  * wm_kmrn_readreg:
   10599  *
   10600  *	Read a kumeran register
   10601  */
   10602 static int
   10603 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   10604 {
   10605 	int rv;
   10606 
   10607 	if (sc->sc_type == WM_T_80003)
   10608 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10609 	else
   10610 		rv = sc->phy.acquire(sc);
   10611 	if (rv != 0) {
   10612 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   10613 		    __func__);
   10614 		return rv;
   10615 	}
   10616 
   10617 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   10618 
   10619 	if (sc->sc_type == WM_T_80003)
   10620 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10621 	else
   10622 		sc->phy.release(sc);
   10623 
   10624 	return rv;
   10625 }
   10626 
   10627 static int
   10628 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   10629 {
   10630 
   10631 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10632 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10633 	    KUMCTRLSTA_REN);
   10634 	CSR_WRITE_FLUSH(sc);
   10635 	delay(2);
   10636 
   10637 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   10638 
   10639 	return 0;
   10640 }
   10641 
   10642 /*
   10643  * wm_kmrn_writereg:
   10644  *
   10645  *	Write a kumeran register
   10646  */
   10647 static int
   10648 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   10649 {
   10650 	int rv;
   10651 
   10652 	if (sc->sc_type == WM_T_80003)
   10653 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10654 	else
   10655 		rv = sc->phy.acquire(sc);
   10656 	if (rv != 0) {
   10657 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   10658 		    __func__);
   10659 		return rv;
   10660 	}
   10661 
   10662 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   10663 
   10664 	if (sc->sc_type == WM_T_80003)
   10665 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10666 	else
   10667 		sc->phy.release(sc);
   10668 
   10669 	return rv;
   10670 }
   10671 
   10672 static int
   10673 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   10674 {
   10675 
   10676 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10677 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   10678 
   10679 	return 0;
   10680 }
   10681 
   10682 /* SGMII related */
   10683 
   10684 /*
   10685  * wm_sgmii_uses_mdio
   10686  *
   10687  * Check whether the transaction is to the internal PHY or the external
   10688  * MDIO interface. Return true if it's MDIO.
   10689  */
   10690 static bool
   10691 wm_sgmii_uses_mdio(struct wm_softc *sc)
   10692 {
   10693 	uint32_t reg;
   10694 	bool ismdio = false;
   10695 
   10696 	switch (sc->sc_type) {
   10697 	case WM_T_82575:
   10698 	case WM_T_82576:
   10699 		reg = CSR_READ(sc, WMREG_MDIC);
   10700 		ismdio = ((reg & MDIC_DEST) != 0);
   10701 		break;
   10702 	case WM_T_82580:
   10703 	case WM_T_I350:
   10704 	case WM_T_I354:
   10705 	case WM_T_I210:
   10706 	case WM_T_I211:
   10707 		reg = CSR_READ(sc, WMREG_MDICNFG);
   10708 		ismdio = ((reg & MDICNFG_DEST) != 0);
   10709 		break;
   10710 	default:
   10711 		break;
   10712 	}
   10713 
   10714 	return ismdio;
   10715 }
   10716 
   10717 /*
   10718  * wm_sgmii_readreg:	[mii interface function]
   10719  *
   10720  *	Read a PHY register on the SGMII
   10721  * This could be handled by the PHY layer if we didn't have to lock the
   10722  * ressource ...
   10723  */
   10724 static int
   10725 wm_sgmii_readreg(device_t dev, int phy, int reg)
   10726 {
   10727 	struct wm_softc *sc = device_private(dev);
   10728 	uint32_t i2ccmd;
   10729 	int i, rv;
   10730 
   10731 	if (sc->phy.acquire(sc)) {
   10732 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10733 		return 0;
   10734 	}
   10735 
   10736 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10737 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10738 	    | I2CCMD_OPCODE_READ;
   10739 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10740 
   10741 	/* Poll the ready bit */
   10742 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10743 		delay(50);
   10744 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10745 		if (i2ccmd & I2CCMD_READY)
   10746 			break;
   10747 	}
   10748 	if ((i2ccmd & I2CCMD_READY) == 0)
   10749 		device_printf(dev, "I2CCMD Read did not complete\n");
   10750 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10751 		device_printf(dev, "I2CCMD Error bit set\n");
   10752 
   10753 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   10754 
   10755 	sc->phy.release(sc);
   10756 	return rv;
   10757 }
   10758 
   10759 /*
   10760  * wm_sgmii_writereg:	[mii interface function]
   10761  *
   10762  *	Write a PHY register on the SGMII.
   10763  * This could be handled by the PHY layer if we didn't have to lock the
   10764  * ressource ...
   10765  */
   10766 static void
   10767 wm_sgmii_writereg(device_t dev, int phy, int reg, int val)
   10768 {
   10769 	struct wm_softc *sc = device_private(dev);
   10770 	uint32_t i2ccmd;
   10771 	int i;
   10772 	int val_swapped;
   10773 
   10774 	if (sc->phy.acquire(sc) != 0) {
   10775 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10776 		return;
   10777 	}
   10778 	/* Swap the data bytes for the I2C interface */
   10779 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   10780 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10781 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10782 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   10783 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10784 
   10785 	/* Poll the ready bit */
   10786 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10787 		delay(50);
   10788 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10789 		if (i2ccmd & I2CCMD_READY)
   10790 			break;
   10791 	}
   10792 	if ((i2ccmd & I2CCMD_READY) == 0)
   10793 		device_printf(dev, "I2CCMD Write did not complete\n");
   10794 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10795 		device_printf(dev, "I2CCMD Error bit set\n");
   10796 
   10797 	sc->phy.release(sc);
   10798 }
   10799 
   10800 /* TBI related */
   10801 
   10802 /*
   10803  * wm_tbi_mediainit:
   10804  *
   10805  *	Initialize media for use on 1000BASE-X devices.
   10806  */
   10807 static void
   10808 wm_tbi_mediainit(struct wm_softc *sc)
   10809 {
   10810 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10811 	const char *sep = "";
   10812 
   10813 	if (sc->sc_type < WM_T_82543)
   10814 		sc->sc_tipg = TIPG_WM_DFLT;
   10815 	else
   10816 		sc->sc_tipg = TIPG_LG_DFLT;
   10817 
   10818 	sc->sc_tbi_serdes_anegticks = 5;
   10819 
   10820 	/* Initialize our media structures */
   10821 	sc->sc_mii.mii_ifp = ifp;
   10822 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10823 
   10824 	if ((sc->sc_type >= WM_T_82575)
   10825 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   10826 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10827 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   10828 	else
   10829 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10830 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   10831 
   10832 	/*
   10833 	 * SWD Pins:
   10834 	 *
   10835 	 *	0 = Link LED (output)
   10836 	 *	1 = Loss Of Signal (input)
   10837 	 */
   10838 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   10839 
   10840 	/* XXX Perhaps this is only for TBI */
   10841 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   10842 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   10843 
   10844 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   10845 		sc->sc_ctrl &= ~CTRL_LRST;
   10846 
   10847 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10848 
   10849 #define	ADD(ss, mm, dd)							\
   10850 do {									\
   10851 	aprint_normal("%s%s", sep, ss);					\
   10852 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   10853 	sep = ", ";							\
   10854 } while (/*CONSTCOND*/0)
   10855 
   10856 	aprint_normal_dev(sc->sc_dev, "");
   10857 
   10858 	if (sc->sc_type == WM_T_I354) {
   10859 		uint32_t status;
   10860 
   10861 		status = CSR_READ(sc, WMREG_STATUS);
   10862 		if (((status & STATUS_2P5_SKU) != 0)
   10863 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   10864 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   10865 		} else
   10866 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   10867 	} else if (sc->sc_type == WM_T_82545) {
   10868 		/* Only 82545 is LX (XXX except SFP) */
   10869 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   10870 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   10871 	} else {
   10872 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   10873 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   10874 	}
   10875 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   10876 	aprint_normal("\n");
   10877 
   10878 #undef ADD
   10879 
   10880 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   10881 }
   10882 
   10883 /*
   10884  * wm_tbi_mediachange:	[ifmedia interface function]
   10885  *
   10886  *	Set hardware to newly-selected media on a 1000BASE-X device.
   10887  */
   10888 static int
   10889 wm_tbi_mediachange(struct ifnet *ifp)
   10890 {
   10891 	struct wm_softc *sc = ifp->if_softc;
   10892 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10893 	uint32_t status;
   10894 	int i;
   10895 
   10896 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   10897 		/* XXX need some work for >= 82571 and < 82575 */
   10898 		if (sc->sc_type < WM_T_82575)
   10899 			return 0;
   10900 	}
   10901 
   10902 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   10903 	    || (sc->sc_type >= WM_T_82575))
   10904 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   10905 
   10906 	sc->sc_ctrl &= ~CTRL_LRST;
   10907 	sc->sc_txcw = TXCW_ANE;
   10908 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10909 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   10910 	else if (ife->ifm_media & IFM_FDX)
   10911 		sc->sc_txcw |= TXCW_FD;
   10912 	else
   10913 		sc->sc_txcw |= TXCW_HD;
   10914 
   10915 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   10916 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   10917 
   10918 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   10919 		    device_xname(sc->sc_dev), sc->sc_txcw));
   10920 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10921 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10922 	CSR_WRITE_FLUSH(sc);
   10923 	delay(1000);
   10924 
   10925 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   10926 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   10927 
   10928 	/*
   10929 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   10930 	 * optics detect a signal, 0 if they don't.
   10931 	 */
   10932 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   10933 		/* Have signal; wait for the link to come up. */
   10934 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   10935 			delay(10000);
   10936 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   10937 				break;
   10938 		}
   10939 
   10940 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   10941 			    device_xname(sc->sc_dev),i));
   10942 
   10943 		status = CSR_READ(sc, WMREG_STATUS);
   10944 		DPRINTF(WM_DEBUG_LINK,
   10945 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   10946 			device_xname(sc->sc_dev),status, STATUS_LU));
   10947 		if (status & STATUS_LU) {
   10948 			/* Link is up. */
   10949 			DPRINTF(WM_DEBUG_LINK,
   10950 			    ("%s: LINK: set media -> link up %s\n",
   10951 			    device_xname(sc->sc_dev),
   10952 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   10953 
   10954 			/*
   10955 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   10956 			 * so we should update sc->sc_ctrl
   10957 			 */
   10958 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   10959 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10960 			sc->sc_fcrtl &= ~FCRTL_XONE;
   10961 			if (status & STATUS_FD)
   10962 				sc->sc_tctl |=
   10963 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10964 			else
   10965 				sc->sc_tctl |=
   10966 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10967 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   10968 				sc->sc_fcrtl |= FCRTL_XONE;
   10969 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10970 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   10971 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   10972 				      sc->sc_fcrtl);
   10973 			sc->sc_tbi_linkup = 1;
   10974 		} else {
   10975 			if (i == WM_LINKUP_TIMEOUT)
   10976 				wm_check_for_link(sc);
   10977 			/* Link is down. */
   10978 			DPRINTF(WM_DEBUG_LINK,
   10979 			    ("%s: LINK: set media -> link down\n",
   10980 			    device_xname(sc->sc_dev)));
   10981 			sc->sc_tbi_linkup = 0;
   10982 		}
   10983 	} else {
   10984 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   10985 		    device_xname(sc->sc_dev)));
   10986 		sc->sc_tbi_linkup = 0;
   10987 	}
   10988 
   10989 	wm_tbi_serdes_set_linkled(sc);
   10990 
   10991 	return 0;
   10992 }
   10993 
   10994 /*
   10995  * wm_tbi_mediastatus:	[ifmedia interface function]
   10996  *
   10997  *	Get the current interface media status on a 1000BASE-X device.
   10998  */
   10999 static void
   11000 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11001 {
   11002 	struct wm_softc *sc = ifp->if_softc;
   11003 	uint32_t ctrl, status;
   11004 
   11005 	ifmr->ifm_status = IFM_AVALID;
   11006 	ifmr->ifm_active = IFM_ETHER;
   11007 
   11008 	status = CSR_READ(sc, WMREG_STATUS);
   11009 	if ((status & STATUS_LU) == 0) {
   11010 		ifmr->ifm_active |= IFM_NONE;
   11011 		return;
   11012 	}
   11013 
   11014 	ifmr->ifm_status |= IFM_ACTIVE;
   11015 	/* Only 82545 is LX */
   11016 	if (sc->sc_type == WM_T_82545)
   11017 		ifmr->ifm_active |= IFM_1000_LX;
   11018 	else
   11019 		ifmr->ifm_active |= IFM_1000_SX;
   11020 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   11021 		ifmr->ifm_active |= IFM_FDX;
   11022 	else
   11023 		ifmr->ifm_active |= IFM_HDX;
   11024 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11025 	if (ctrl & CTRL_RFCE)
   11026 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   11027 	if (ctrl & CTRL_TFCE)
   11028 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   11029 }
   11030 
   11031 /* XXX TBI only */
   11032 static int
   11033 wm_check_for_link(struct wm_softc *sc)
   11034 {
   11035 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11036 	uint32_t rxcw;
   11037 	uint32_t ctrl;
   11038 	uint32_t status;
   11039 	uint32_t sig;
   11040 
   11041 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11042 		/* XXX need some work for >= 82571 */
   11043 		if (sc->sc_type >= WM_T_82571) {
   11044 			sc->sc_tbi_linkup = 1;
   11045 			return 0;
   11046 		}
   11047 	}
   11048 
   11049 	rxcw = CSR_READ(sc, WMREG_RXCW);
   11050 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11051 	status = CSR_READ(sc, WMREG_STATUS);
   11052 
   11053 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   11054 
   11055 	DPRINTF(WM_DEBUG_LINK,
   11056 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   11057 		device_xname(sc->sc_dev), __func__,
   11058 		((ctrl & CTRL_SWDPIN(1)) == sig),
   11059 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   11060 
   11061 	/*
   11062 	 * SWDPIN   LU RXCW
   11063 	 *      0    0    0
   11064 	 *      0    0    1	(should not happen)
   11065 	 *      0    1    0	(should not happen)
   11066 	 *      0    1    1	(should not happen)
   11067 	 *      1    0    0	Disable autonego and force linkup
   11068 	 *      1    0    1	got /C/ but not linkup yet
   11069 	 *      1    1    0	(linkup)
   11070 	 *      1    1    1	If IFM_AUTO, back to autonego
   11071 	 *
   11072 	 */
   11073 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   11074 	    && ((status & STATUS_LU) == 0)
   11075 	    && ((rxcw & RXCW_C) == 0)) {
   11076 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   11077 			__func__));
   11078 		sc->sc_tbi_linkup = 0;
   11079 		/* Disable auto-negotiation in the TXCW register */
   11080 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   11081 
   11082 		/*
   11083 		 * Force link-up and also force full-duplex.
   11084 		 *
   11085 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   11086 		 * so we should update sc->sc_ctrl
   11087 		 */
   11088 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   11089 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11090 	} else if (((status & STATUS_LU) != 0)
   11091 	    && ((rxcw & RXCW_C) != 0)
   11092 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   11093 		sc->sc_tbi_linkup = 1;
   11094 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   11095 			__func__));
   11096 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11097 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   11098 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   11099 	    && ((rxcw & RXCW_C) != 0)) {
   11100 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   11101 	} else {
   11102 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   11103 			status));
   11104 	}
   11105 
   11106 	return 0;
   11107 }
   11108 
   11109 /*
   11110  * wm_tbi_tick:
   11111  *
   11112  *	Check the link on TBI devices.
   11113  *	This function acts as mii_tick().
   11114  */
   11115 static void
   11116 wm_tbi_tick(struct wm_softc *sc)
   11117 {
   11118 	struct mii_data *mii = &sc->sc_mii;
   11119 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11120 	uint32_t status;
   11121 
   11122 	KASSERT(WM_CORE_LOCKED(sc));
   11123 
   11124 	status = CSR_READ(sc, WMREG_STATUS);
   11125 
   11126 	/* XXX is this needed? */
   11127 	(void)CSR_READ(sc, WMREG_RXCW);
   11128 	(void)CSR_READ(sc, WMREG_CTRL);
   11129 
   11130 	/* set link status */
   11131 	if ((status & STATUS_LU) == 0) {
   11132 		DPRINTF(WM_DEBUG_LINK,
   11133 		    ("%s: LINK: checklink -> down\n",
   11134 			device_xname(sc->sc_dev)));
   11135 		sc->sc_tbi_linkup = 0;
   11136 	} else if (sc->sc_tbi_linkup == 0) {
   11137 		DPRINTF(WM_DEBUG_LINK,
   11138 		    ("%s: LINK: checklink -> up %s\n",
   11139 			device_xname(sc->sc_dev),
   11140 			(status & STATUS_FD) ? "FDX" : "HDX"));
   11141 		sc->sc_tbi_linkup = 1;
   11142 		sc->sc_tbi_serdes_ticks = 0;
   11143 	}
   11144 
   11145 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   11146 		goto setled;
   11147 
   11148 	if ((status & STATUS_LU) == 0) {
   11149 		sc->sc_tbi_linkup = 0;
   11150 		/* If the timer expired, retry autonegotiation */
   11151 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11152 		    && (++sc->sc_tbi_serdes_ticks
   11153 			>= sc->sc_tbi_serdes_anegticks)) {
   11154 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11155 			sc->sc_tbi_serdes_ticks = 0;
   11156 			/*
   11157 			 * Reset the link, and let autonegotiation do
   11158 			 * its thing
   11159 			 */
   11160 			sc->sc_ctrl |= CTRL_LRST;
   11161 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11162 			CSR_WRITE_FLUSH(sc);
   11163 			delay(1000);
   11164 			sc->sc_ctrl &= ~CTRL_LRST;
   11165 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11166 			CSR_WRITE_FLUSH(sc);
   11167 			delay(1000);
   11168 			CSR_WRITE(sc, WMREG_TXCW,
   11169 			    sc->sc_txcw & ~TXCW_ANE);
   11170 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11171 		}
   11172 	}
   11173 
   11174 setled:
   11175 	wm_tbi_serdes_set_linkled(sc);
   11176 }
   11177 
   11178 /* SERDES related */
   11179 static void
   11180 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   11181 {
   11182 	uint32_t reg;
   11183 
   11184 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11185 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   11186 		return;
   11187 
   11188 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   11189 	reg |= PCS_CFG_PCS_EN;
   11190 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   11191 
   11192 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11193 	reg &= ~CTRL_EXT_SWDPIN(3);
   11194 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11195 	CSR_WRITE_FLUSH(sc);
   11196 }
   11197 
   11198 static int
   11199 wm_serdes_mediachange(struct ifnet *ifp)
   11200 {
   11201 	struct wm_softc *sc = ifp->if_softc;
   11202 	bool pcs_autoneg = true; /* XXX */
   11203 	uint32_t ctrl_ext, pcs_lctl, reg;
   11204 
   11205 	/* XXX Currently, this function is not called on 8257[12] */
   11206 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11207 	    || (sc->sc_type >= WM_T_82575))
   11208 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11209 
   11210 	wm_serdes_power_up_link_82575(sc);
   11211 
   11212 	sc->sc_ctrl |= CTRL_SLU;
   11213 
   11214 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   11215 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   11216 
   11217 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11218 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   11219 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   11220 	case CTRL_EXT_LINK_MODE_SGMII:
   11221 		pcs_autoneg = true;
   11222 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   11223 		break;
   11224 	case CTRL_EXT_LINK_MODE_1000KX:
   11225 		pcs_autoneg = false;
   11226 		/* FALLTHROUGH */
   11227 	default:
   11228 		if ((sc->sc_type == WM_T_82575)
   11229 		    || (sc->sc_type == WM_T_82576)) {
   11230 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   11231 				pcs_autoneg = false;
   11232 		}
   11233 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   11234 		    | CTRL_FRCFDX;
   11235 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   11236 	}
   11237 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11238 
   11239 	if (pcs_autoneg) {
   11240 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   11241 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   11242 
   11243 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   11244 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   11245 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   11246 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   11247 	} else
   11248 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   11249 
   11250 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   11251 
   11252 
   11253 	return 0;
   11254 }
   11255 
   11256 static void
   11257 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11258 {
   11259 	struct wm_softc *sc = ifp->if_softc;
   11260 	struct mii_data *mii = &sc->sc_mii;
   11261 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11262 	uint32_t pcs_adv, pcs_lpab, reg;
   11263 
   11264 	ifmr->ifm_status = IFM_AVALID;
   11265 	ifmr->ifm_active = IFM_ETHER;
   11266 
   11267 	/* Check PCS */
   11268 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11269 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   11270 		ifmr->ifm_active |= IFM_NONE;
   11271 		sc->sc_tbi_linkup = 0;
   11272 		goto setled;
   11273 	}
   11274 
   11275 	sc->sc_tbi_linkup = 1;
   11276 	ifmr->ifm_status |= IFM_ACTIVE;
   11277 	if (sc->sc_type == WM_T_I354) {
   11278 		uint32_t status;
   11279 
   11280 		status = CSR_READ(sc, WMREG_STATUS);
   11281 		if (((status & STATUS_2P5_SKU) != 0)
   11282 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11283 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
   11284 		} else
   11285 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
   11286 	} else {
   11287 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   11288 		case PCS_LSTS_SPEED_10:
   11289 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   11290 			break;
   11291 		case PCS_LSTS_SPEED_100:
   11292 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   11293 			break;
   11294 		case PCS_LSTS_SPEED_1000:
   11295 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11296 			break;
   11297 		default:
   11298 			device_printf(sc->sc_dev, "Unknown speed\n");
   11299 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11300 			break;
   11301 		}
   11302 	}
   11303 	if ((reg & PCS_LSTS_FDX) != 0)
   11304 		ifmr->ifm_active |= IFM_FDX;
   11305 	else
   11306 		ifmr->ifm_active |= IFM_HDX;
   11307 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   11308 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   11309 		/* Check flow */
   11310 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11311 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   11312 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   11313 			goto setled;
   11314 		}
   11315 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   11316 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   11317 		DPRINTF(WM_DEBUG_LINK,
   11318 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   11319 		if ((pcs_adv & TXCW_SYM_PAUSE)
   11320 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   11321 			mii->mii_media_active |= IFM_FLOW
   11322 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   11323 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   11324 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11325 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   11326 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11327 			mii->mii_media_active |= IFM_FLOW
   11328 			    | IFM_ETH_TXPAUSE;
   11329 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   11330 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11331 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   11332 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11333 			mii->mii_media_active |= IFM_FLOW
   11334 			    | IFM_ETH_RXPAUSE;
   11335 		}
   11336 	}
   11337 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   11338 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   11339 setled:
   11340 	wm_tbi_serdes_set_linkled(sc);
   11341 }
   11342 
   11343 /*
   11344  * wm_serdes_tick:
   11345  *
   11346  *	Check the link on serdes devices.
   11347  */
   11348 static void
   11349 wm_serdes_tick(struct wm_softc *sc)
   11350 {
   11351 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11352 	struct mii_data *mii = &sc->sc_mii;
   11353 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11354 	uint32_t reg;
   11355 
   11356 	KASSERT(WM_CORE_LOCKED(sc));
   11357 
   11358 	mii->mii_media_status = IFM_AVALID;
   11359 	mii->mii_media_active = IFM_ETHER;
   11360 
   11361 	/* Check PCS */
   11362 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11363 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   11364 		mii->mii_media_status |= IFM_ACTIVE;
   11365 		sc->sc_tbi_linkup = 1;
   11366 		sc->sc_tbi_serdes_ticks = 0;
   11367 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   11368 		if ((reg & PCS_LSTS_FDX) != 0)
   11369 			mii->mii_media_active |= IFM_FDX;
   11370 		else
   11371 			mii->mii_media_active |= IFM_HDX;
   11372 	} else {
   11373 		mii->mii_media_status |= IFM_NONE;
   11374 		sc->sc_tbi_linkup = 0;
   11375 		/* If the timer expired, retry autonegotiation */
   11376 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11377 		    && (++sc->sc_tbi_serdes_ticks
   11378 			>= sc->sc_tbi_serdes_anegticks)) {
   11379 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11380 			sc->sc_tbi_serdes_ticks = 0;
   11381 			/* XXX */
   11382 			wm_serdes_mediachange(ifp);
   11383 		}
   11384 	}
   11385 
   11386 	wm_tbi_serdes_set_linkled(sc);
   11387 }
   11388 
   11389 /* SFP related */
   11390 
   11391 static int
   11392 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   11393 {
   11394 	uint32_t i2ccmd;
   11395 	int i;
   11396 
   11397 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11398 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11399 
   11400 	/* Poll the ready bit */
   11401 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11402 		delay(50);
   11403 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11404 		if (i2ccmd & I2CCMD_READY)
   11405 			break;
   11406 	}
   11407 	if ((i2ccmd & I2CCMD_READY) == 0)
   11408 		return -1;
   11409 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   11410 		return -1;
   11411 
   11412 	*data = i2ccmd & 0x00ff;
   11413 
   11414 	return 0;
   11415 }
   11416 
   11417 static uint32_t
   11418 wm_sfp_get_media_type(struct wm_softc *sc)
   11419 {
   11420 	uint32_t ctrl_ext;
   11421 	uint8_t val = 0;
   11422 	int timeout = 3;
   11423 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   11424 	int rv = -1;
   11425 
   11426 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11427 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   11428 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   11429 	CSR_WRITE_FLUSH(sc);
   11430 
   11431 	/* Read SFP module data */
   11432 	while (timeout) {
   11433 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   11434 		if (rv == 0)
   11435 			break;
   11436 		delay(100*1000); /* XXX too big */
   11437 		timeout--;
   11438 	}
   11439 	if (rv != 0)
   11440 		goto out;
   11441 	switch (val) {
   11442 	case SFF_SFP_ID_SFF:
   11443 		aprint_normal_dev(sc->sc_dev,
   11444 		    "Module/Connector soldered to board\n");
   11445 		break;
   11446 	case SFF_SFP_ID_SFP:
   11447 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   11448 		break;
   11449 	case SFF_SFP_ID_UNKNOWN:
   11450 		goto out;
   11451 	default:
   11452 		break;
   11453 	}
   11454 
   11455 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   11456 	if (rv != 0) {
   11457 		goto out;
   11458 	}
   11459 
   11460 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   11461 		mediatype = WM_MEDIATYPE_SERDES;
   11462 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   11463 		sc->sc_flags |= WM_F_SGMII;
   11464 		mediatype = WM_MEDIATYPE_COPPER;
   11465 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   11466 		sc->sc_flags |= WM_F_SGMII;
   11467 		mediatype = WM_MEDIATYPE_SERDES;
   11468 	}
   11469 
   11470 out:
   11471 	/* Restore I2C interface setting */
   11472 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11473 
   11474 	return mediatype;
   11475 }
   11476 
   11477 /*
   11478  * NVM related.
   11479  * Microwire, SPI (w/wo EERD) and Flash.
   11480  */
   11481 
   11482 /* Both spi and uwire */
   11483 
   11484 /*
   11485  * wm_eeprom_sendbits:
   11486  *
   11487  *	Send a series of bits to the EEPROM.
   11488  */
   11489 static void
   11490 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   11491 {
   11492 	uint32_t reg;
   11493 	int x;
   11494 
   11495 	reg = CSR_READ(sc, WMREG_EECD);
   11496 
   11497 	for (x = nbits; x > 0; x--) {
   11498 		if (bits & (1U << (x - 1)))
   11499 			reg |= EECD_DI;
   11500 		else
   11501 			reg &= ~EECD_DI;
   11502 		CSR_WRITE(sc, WMREG_EECD, reg);
   11503 		CSR_WRITE_FLUSH(sc);
   11504 		delay(2);
   11505 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11506 		CSR_WRITE_FLUSH(sc);
   11507 		delay(2);
   11508 		CSR_WRITE(sc, WMREG_EECD, reg);
   11509 		CSR_WRITE_FLUSH(sc);
   11510 		delay(2);
   11511 	}
   11512 }
   11513 
   11514 /*
   11515  * wm_eeprom_recvbits:
   11516  *
   11517  *	Receive a series of bits from the EEPROM.
   11518  */
   11519 static void
   11520 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   11521 {
   11522 	uint32_t reg, val;
   11523 	int x;
   11524 
   11525 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   11526 
   11527 	val = 0;
   11528 	for (x = nbits; x > 0; x--) {
   11529 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11530 		CSR_WRITE_FLUSH(sc);
   11531 		delay(2);
   11532 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   11533 			val |= (1U << (x - 1));
   11534 		CSR_WRITE(sc, WMREG_EECD, reg);
   11535 		CSR_WRITE_FLUSH(sc);
   11536 		delay(2);
   11537 	}
   11538 	*valp = val;
   11539 }
   11540 
   11541 /* Microwire */
   11542 
   11543 /*
   11544  * wm_nvm_read_uwire:
   11545  *
   11546  *	Read a word from the EEPROM using the MicroWire protocol.
   11547  */
   11548 static int
   11549 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11550 {
   11551 	uint32_t reg, val;
   11552 	int i;
   11553 
   11554 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11555 		device_xname(sc->sc_dev), __func__));
   11556 
   11557 	if (sc->nvm.acquire(sc) != 0)
   11558 		return -1;
   11559 
   11560 	for (i = 0; i < wordcnt; i++) {
   11561 		/* Clear SK and DI. */
   11562 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   11563 		CSR_WRITE(sc, WMREG_EECD, reg);
   11564 
   11565 		/*
   11566 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   11567 		 * and Xen.
   11568 		 *
   11569 		 * We use this workaround only for 82540 because qemu's
   11570 		 * e1000 act as 82540.
   11571 		 */
   11572 		if (sc->sc_type == WM_T_82540) {
   11573 			reg |= EECD_SK;
   11574 			CSR_WRITE(sc, WMREG_EECD, reg);
   11575 			reg &= ~EECD_SK;
   11576 			CSR_WRITE(sc, WMREG_EECD, reg);
   11577 			CSR_WRITE_FLUSH(sc);
   11578 			delay(2);
   11579 		}
   11580 		/* XXX: end of workaround */
   11581 
   11582 		/* Set CHIP SELECT. */
   11583 		reg |= EECD_CS;
   11584 		CSR_WRITE(sc, WMREG_EECD, reg);
   11585 		CSR_WRITE_FLUSH(sc);
   11586 		delay(2);
   11587 
   11588 		/* Shift in the READ command. */
   11589 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   11590 
   11591 		/* Shift in address. */
   11592 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   11593 
   11594 		/* Shift out the data. */
   11595 		wm_eeprom_recvbits(sc, &val, 16);
   11596 		data[i] = val & 0xffff;
   11597 
   11598 		/* Clear CHIP SELECT. */
   11599 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   11600 		CSR_WRITE(sc, WMREG_EECD, reg);
   11601 		CSR_WRITE_FLUSH(sc);
   11602 		delay(2);
   11603 	}
   11604 
   11605 	sc->nvm.release(sc);
   11606 	return 0;
   11607 }
   11608 
   11609 /* SPI */
   11610 
   11611 /*
   11612  * Set SPI and FLASH related information from the EECD register.
   11613  * For 82541 and 82547, the word size is taken from EEPROM.
   11614  */
   11615 static int
   11616 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   11617 {
   11618 	int size;
   11619 	uint32_t reg;
   11620 	uint16_t data;
   11621 
   11622 	reg = CSR_READ(sc, WMREG_EECD);
   11623 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   11624 
   11625 	/* Read the size of NVM from EECD by default */
   11626 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11627 	switch (sc->sc_type) {
   11628 	case WM_T_82541:
   11629 	case WM_T_82541_2:
   11630 	case WM_T_82547:
   11631 	case WM_T_82547_2:
   11632 		/* Set dummy value to access EEPROM */
   11633 		sc->sc_nvm_wordsize = 64;
   11634 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   11635 			aprint_error_dev(sc->sc_dev,
   11636 			    "%s: failed to read EEPROM size\n", __func__);
   11637 		}
   11638 		reg = data;
   11639 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11640 		if (size == 0)
   11641 			size = 6; /* 64 word size */
   11642 		else
   11643 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   11644 		break;
   11645 	case WM_T_80003:
   11646 	case WM_T_82571:
   11647 	case WM_T_82572:
   11648 	case WM_T_82573: /* SPI case */
   11649 	case WM_T_82574: /* SPI case */
   11650 	case WM_T_82583: /* SPI case */
   11651 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11652 		if (size > 14)
   11653 			size = 14;
   11654 		break;
   11655 	case WM_T_82575:
   11656 	case WM_T_82576:
   11657 	case WM_T_82580:
   11658 	case WM_T_I350:
   11659 	case WM_T_I354:
   11660 	case WM_T_I210:
   11661 	case WM_T_I211:
   11662 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11663 		if (size > 15)
   11664 			size = 15;
   11665 		break;
   11666 	default:
   11667 		aprint_error_dev(sc->sc_dev,
   11668 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   11669 		return -1;
   11670 		break;
   11671 	}
   11672 
   11673 	sc->sc_nvm_wordsize = 1 << size;
   11674 
   11675 	return 0;
   11676 }
   11677 
   11678 /*
   11679  * wm_nvm_ready_spi:
   11680  *
   11681  *	Wait for a SPI EEPROM to be ready for commands.
   11682  */
   11683 static int
   11684 wm_nvm_ready_spi(struct wm_softc *sc)
   11685 {
   11686 	uint32_t val;
   11687 	int usec;
   11688 
   11689 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11690 		device_xname(sc->sc_dev), __func__));
   11691 
   11692 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   11693 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   11694 		wm_eeprom_recvbits(sc, &val, 8);
   11695 		if ((val & SPI_SR_RDY) == 0)
   11696 			break;
   11697 	}
   11698 	if (usec >= SPI_MAX_RETRIES) {
   11699 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   11700 		return -1;
   11701 	}
   11702 	return 0;
   11703 }
   11704 
   11705 /*
   11706  * wm_nvm_read_spi:
   11707  *
   11708  *	Read a work from the EEPROM using the SPI protocol.
   11709  */
   11710 static int
   11711 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11712 {
   11713 	uint32_t reg, val;
   11714 	int i;
   11715 	uint8_t opc;
   11716 	int rv = 0;
   11717 
   11718 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11719 		device_xname(sc->sc_dev), __func__));
   11720 
   11721 	if (sc->nvm.acquire(sc) != 0)
   11722 		return -1;
   11723 
   11724 	/* Clear SK and CS. */
   11725 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   11726 	CSR_WRITE(sc, WMREG_EECD, reg);
   11727 	CSR_WRITE_FLUSH(sc);
   11728 	delay(2);
   11729 
   11730 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   11731 		goto out;
   11732 
   11733 	/* Toggle CS to flush commands. */
   11734 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   11735 	CSR_WRITE_FLUSH(sc);
   11736 	delay(2);
   11737 	CSR_WRITE(sc, WMREG_EECD, reg);
   11738 	CSR_WRITE_FLUSH(sc);
   11739 	delay(2);
   11740 
   11741 	opc = SPI_OPC_READ;
   11742 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   11743 		opc |= SPI_OPC_A8;
   11744 
   11745 	wm_eeprom_sendbits(sc, opc, 8);
   11746 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   11747 
   11748 	for (i = 0; i < wordcnt; i++) {
   11749 		wm_eeprom_recvbits(sc, &val, 16);
   11750 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   11751 	}
   11752 
   11753 	/* Raise CS and clear SK. */
   11754 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   11755 	CSR_WRITE(sc, WMREG_EECD, reg);
   11756 	CSR_WRITE_FLUSH(sc);
   11757 	delay(2);
   11758 
   11759 out:
   11760 	sc->nvm.release(sc);
   11761 	return rv;
   11762 }
   11763 
   11764 /* Using with EERD */
   11765 
   11766 static int
   11767 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   11768 {
   11769 	uint32_t attempts = 100000;
   11770 	uint32_t i, reg = 0;
   11771 	int32_t done = -1;
   11772 
   11773 	for (i = 0; i < attempts; i++) {
   11774 		reg = CSR_READ(sc, rw);
   11775 
   11776 		if (reg & EERD_DONE) {
   11777 			done = 0;
   11778 			break;
   11779 		}
   11780 		delay(5);
   11781 	}
   11782 
   11783 	return done;
   11784 }
   11785 
   11786 static int
   11787 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   11788     uint16_t *data)
   11789 {
   11790 	int i, eerd = 0;
   11791 	int rv = 0;
   11792 
   11793 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11794 		device_xname(sc->sc_dev), __func__));
   11795 
   11796 	if (sc->nvm.acquire(sc) != 0)
   11797 		return -1;
   11798 
   11799 	for (i = 0; i < wordcnt; i++) {
   11800 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   11801 		CSR_WRITE(sc, WMREG_EERD, eerd);
   11802 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   11803 		if (rv != 0) {
   11804 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   11805 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   11806 			break;
   11807 		}
   11808 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   11809 	}
   11810 
   11811 	sc->nvm.release(sc);
   11812 	return rv;
   11813 }
   11814 
   11815 /* Flash */
   11816 
   11817 static int
   11818 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   11819 {
   11820 	uint32_t eecd;
   11821 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   11822 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   11823 	uint8_t sig_byte = 0;
   11824 
   11825 	switch (sc->sc_type) {
   11826 	case WM_T_PCH_SPT:
   11827 		/*
   11828 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
   11829 		 * sector valid bits from the NVM.
   11830 		 */
   11831 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
   11832 		if ((*bank == 0) || (*bank == 1)) {
   11833 			aprint_error_dev(sc->sc_dev,
   11834 			    "%s: no valid NVM bank present (%u)\n", __func__,
   11835 				*bank);
   11836 			return -1;
   11837 		} else {
   11838 			*bank = *bank - 2;
   11839 			return 0;
   11840 		}
   11841 	case WM_T_ICH8:
   11842 	case WM_T_ICH9:
   11843 		eecd = CSR_READ(sc, WMREG_EECD);
   11844 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   11845 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   11846 			return 0;
   11847 		}
   11848 		/* FALLTHROUGH */
   11849 	default:
   11850 		/* Default to 0 */
   11851 		*bank = 0;
   11852 
   11853 		/* Check bank 0 */
   11854 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   11855 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11856 			*bank = 0;
   11857 			return 0;
   11858 		}
   11859 
   11860 		/* Check bank 1 */
   11861 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   11862 		    &sig_byte);
   11863 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11864 			*bank = 1;
   11865 			return 0;
   11866 		}
   11867 	}
   11868 
   11869 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   11870 		device_xname(sc->sc_dev)));
   11871 	return -1;
   11872 }
   11873 
   11874 /******************************************************************************
   11875  * This function does initial flash setup so that a new read/write/erase cycle
   11876  * can be started.
   11877  *
   11878  * sc - The pointer to the hw structure
   11879  ****************************************************************************/
   11880 static int32_t
   11881 wm_ich8_cycle_init(struct wm_softc *sc)
   11882 {
   11883 	uint16_t hsfsts;
   11884 	int32_t error = 1;
   11885 	int32_t i     = 0;
   11886 
   11887 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11888 
   11889 	/* May be check the Flash Des Valid bit in Hw status */
   11890 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   11891 		return error;
   11892 	}
   11893 
   11894 	/* Clear FCERR in Hw status by writing 1 */
   11895 	/* Clear DAEL in Hw status by writing a 1 */
   11896 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   11897 
   11898 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11899 
   11900 	/*
   11901 	 * Either we should have a hardware SPI cycle in progress bit to check
   11902 	 * against, in order to start a new cycle or FDONE bit should be
   11903 	 * changed in the hardware so that it is 1 after harware reset, which
   11904 	 * can then be used as an indication whether a cycle is in progress or
   11905 	 * has been completed .. we should also have some software semaphore
   11906 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   11907 	 * threads access to those bits can be sequentiallized or a way so that
   11908 	 * 2 threads dont start the cycle at the same time
   11909 	 */
   11910 
   11911 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11912 		/*
   11913 		 * There is no cycle running at present, so we can start a
   11914 		 * cycle
   11915 		 */
   11916 
   11917 		/* Begin by setting Flash Cycle Done. */
   11918 		hsfsts |= HSFSTS_DONE;
   11919 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11920 		error = 0;
   11921 	} else {
   11922 		/*
   11923 		 * otherwise poll for sometime so the current cycle has a
   11924 		 * chance to end before giving up.
   11925 		 */
   11926 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   11927 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11928 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11929 				error = 0;
   11930 				break;
   11931 			}
   11932 			delay(1);
   11933 		}
   11934 		if (error == 0) {
   11935 			/*
   11936 			 * Successful in waiting for previous cycle to timeout,
   11937 			 * now set the Flash Cycle Done.
   11938 			 */
   11939 			hsfsts |= HSFSTS_DONE;
   11940 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11941 		}
   11942 	}
   11943 	return error;
   11944 }
   11945 
   11946 /******************************************************************************
   11947  * This function starts a flash cycle and waits for its completion
   11948  *
   11949  * sc - The pointer to the hw structure
   11950  ****************************************************************************/
   11951 static int32_t
   11952 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   11953 {
   11954 	uint16_t hsflctl;
   11955 	uint16_t hsfsts;
   11956 	int32_t error = 1;
   11957 	uint32_t i = 0;
   11958 
   11959 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   11960 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11961 	hsflctl |= HSFCTL_GO;
   11962 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11963 
   11964 	/* Wait till FDONE bit is set to 1 */
   11965 	do {
   11966 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11967 		if (hsfsts & HSFSTS_DONE)
   11968 			break;
   11969 		delay(1);
   11970 		i++;
   11971 	} while (i < timeout);
   11972 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   11973 		error = 0;
   11974 
   11975 	return error;
   11976 }
   11977 
   11978 /******************************************************************************
   11979  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   11980  *
   11981  * sc - The pointer to the hw structure
   11982  * index - The index of the byte or word to read.
   11983  * size - Size of data to read, 1=byte 2=word, 4=dword
   11984  * data - Pointer to the word to store the value read.
   11985  *****************************************************************************/
   11986 static int32_t
   11987 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   11988     uint32_t size, uint32_t *data)
   11989 {
   11990 	uint16_t hsfsts;
   11991 	uint16_t hsflctl;
   11992 	uint32_t flash_linear_address;
   11993 	uint32_t flash_data = 0;
   11994 	int32_t error = 1;
   11995 	int32_t count = 0;
   11996 
   11997 	if (size < 1  || size > 4 || data == 0x0 ||
   11998 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   11999 		return error;
   12000 
   12001 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   12002 	    sc->sc_ich8_flash_base;
   12003 
   12004 	do {
   12005 		delay(1);
   12006 		/* Steps */
   12007 		error = wm_ich8_cycle_init(sc);
   12008 		if (error)
   12009 			break;
   12010 
   12011 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12012 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   12013 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   12014 		    & HSFCTL_BCOUNT_MASK;
   12015 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   12016 		if (sc->sc_type == WM_T_PCH_SPT) {
   12017 			/*
   12018 			 * In SPT, This register is in Lan memory space, not
   12019 			 * flash. Therefore, only 32 bit access is supported.
   12020 			 */
   12021 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
   12022 			    (uint32_t)hsflctl);
   12023 		} else
   12024 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12025 
   12026 		/*
   12027 		 * Write the last 24 bits of index into Flash Linear address
   12028 		 * field in Flash Address
   12029 		 */
   12030 		/* TODO: TBD maybe check the index against the size of flash */
   12031 
   12032 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   12033 
   12034 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   12035 
   12036 		/*
   12037 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   12038 		 * the whole sequence a few more times, else read in (shift in)
   12039 		 * the Flash Data0, the order is least significant byte first
   12040 		 * msb to lsb
   12041 		 */
   12042 		if (error == 0) {
   12043 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   12044 			if (size == 1)
   12045 				*data = (uint8_t)(flash_data & 0x000000FF);
   12046 			else if (size == 2)
   12047 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   12048 			else if (size == 4)
   12049 				*data = (uint32_t)flash_data;
   12050 			break;
   12051 		} else {
   12052 			/*
   12053 			 * If we've gotten here, then things are probably
   12054 			 * completely hosed, but if the error condition is
   12055 			 * detected, it won't hurt to give it another try...
   12056 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   12057 			 */
   12058 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12059 			if (hsfsts & HSFSTS_ERR) {
   12060 				/* Repeat for some time before giving up. */
   12061 				continue;
   12062 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   12063 				break;
   12064 		}
   12065 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   12066 
   12067 	return error;
   12068 }
   12069 
   12070 /******************************************************************************
   12071  * Reads a single byte from the NVM using the ICH8 flash access registers.
   12072  *
   12073  * sc - pointer to wm_hw structure
   12074  * index - The index of the byte to read.
   12075  * data - Pointer to a byte to store the value read.
   12076  *****************************************************************************/
   12077 static int32_t
   12078 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   12079 {
   12080 	int32_t status;
   12081 	uint32_t word = 0;
   12082 
   12083 	status = wm_read_ich8_data(sc, index, 1, &word);
   12084 	if (status == 0)
   12085 		*data = (uint8_t)word;
   12086 	else
   12087 		*data = 0;
   12088 
   12089 	return status;
   12090 }
   12091 
   12092 /******************************************************************************
   12093  * Reads a word from the NVM using the ICH8 flash access registers.
   12094  *
   12095  * sc - pointer to wm_hw structure
   12096  * index - The starting byte index of the word to read.
   12097  * data - Pointer to a word to store the value read.
   12098  *****************************************************************************/
   12099 static int32_t
   12100 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   12101 {
   12102 	int32_t status;
   12103 	uint32_t word = 0;
   12104 
   12105 	status = wm_read_ich8_data(sc, index, 2, &word);
   12106 	if (status == 0)
   12107 		*data = (uint16_t)word;
   12108 	else
   12109 		*data = 0;
   12110 
   12111 	return status;
   12112 }
   12113 
   12114 /******************************************************************************
   12115  * Reads a dword from the NVM using the ICH8 flash access registers.
   12116  *
   12117  * sc - pointer to wm_hw structure
   12118  * index - The starting byte index of the word to read.
   12119  * data - Pointer to a word to store the value read.
   12120  *****************************************************************************/
   12121 static int32_t
   12122 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   12123 {
   12124 	int32_t status;
   12125 
   12126 	status = wm_read_ich8_data(sc, index, 4, data);
   12127 	return status;
   12128 }
   12129 
   12130 /******************************************************************************
   12131  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   12132  * register.
   12133  *
   12134  * sc - Struct containing variables accessed by shared code
   12135  * offset - offset of word in the EEPROM to read
   12136  * data - word read from the EEPROM
   12137  * words - number of words to read
   12138  *****************************************************************************/
   12139 static int
   12140 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12141 {
   12142 	int32_t  rv = 0;
   12143 	uint32_t flash_bank = 0;
   12144 	uint32_t act_offset = 0;
   12145 	uint32_t bank_offset = 0;
   12146 	uint16_t word = 0;
   12147 	uint16_t i = 0;
   12148 
   12149 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12150 		device_xname(sc->sc_dev), __func__));
   12151 
   12152 	if (sc->nvm.acquire(sc) != 0)
   12153 		return -1;
   12154 
   12155 	/*
   12156 	 * We need to know which is the valid flash bank.  In the event
   12157 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12158 	 * managing flash_bank.  So it cannot be trusted and needs
   12159 	 * to be updated with each read.
   12160 	 */
   12161 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   12162 	if (rv) {
   12163 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   12164 			device_xname(sc->sc_dev)));
   12165 		flash_bank = 0;
   12166 	}
   12167 
   12168 	/*
   12169 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   12170 	 * size
   12171 	 */
   12172 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   12173 
   12174 	for (i = 0; i < words; i++) {
   12175 		/* The NVM part needs a byte offset, hence * 2 */
   12176 		act_offset = bank_offset + ((offset + i) * 2);
   12177 		rv = wm_read_ich8_word(sc, act_offset, &word);
   12178 		if (rv) {
   12179 			aprint_error_dev(sc->sc_dev,
   12180 			    "%s: failed to read NVM\n", __func__);
   12181 			break;
   12182 		}
   12183 		data[i] = word;
   12184 	}
   12185 
   12186 	sc->nvm.release(sc);
   12187 	return rv;
   12188 }
   12189 
   12190 /******************************************************************************
   12191  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   12192  * register.
   12193  *
   12194  * sc - Struct containing variables accessed by shared code
   12195  * offset - offset of word in the EEPROM to read
   12196  * data - word read from the EEPROM
   12197  * words - number of words to read
   12198  *****************************************************************************/
   12199 static int
   12200 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12201 {
   12202 	int32_t  rv = 0;
   12203 	uint32_t flash_bank = 0;
   12204 	uint32_t act_offset = 0;
   12205 	uint32_t bank_offset = 0;
   12206 	uint32_t dword = 0;
   12207 	uint16_t i = 0;
   12208 
   12209 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12210 		device_xname(sc->sc_dev), __func__));
   12211 
   12212 	if (sc->nvm.acquire(sc) != 0)
   12213 		return -1;
   12214 
   12215 	/*
   12216 	 * We need to know which is the valid flash bank.  In the event
   12217 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12218 	 * managing flash_bank.  So it cannot be trusted and needs
   12219 	 * to be updated with each read.
   12220 	 */
   12221 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   12222 	if (rv) {
   12223 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   12224 			device_xname(sc->sc_dev)));
   12225 		flash_bank = 0;
   12226 	}
   12227 
   12228 	/*
   12229 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   12230 	 * size
   12231 	 */
   12232 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   12233 
   12234 	for (i = 0; i < words; i++) {
   12235 		/* The NVM part needs a byte offset, hence * 2 */
   12236 		act_offset = bank_offset + ((offset + i) * 2);
   12237 		/* but we must read dword aligned, so mask ... */
   12238 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   12239 		if (rv) {
   12240 			aprint_error_dev(sc->sc_dev,
   12241 			    "%s: failed to read NVM\n", __func__);
   12242 			break;
   12243 		}
   12244 		/* ... and pick out low or high word */
   12245 		if ((act_offset & 0x2) == 0)
   12246 			data[i] = (uint16_t)(dword & 0xFFFF);
   12247 		else
   12248 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   12249 	}
   12250 
   12251 	sc->nvm.release(sc);
   12252 	return rv;
   12253 }
   12254 
   12255 /* iNVM */
   12256 
   12257 static int
   12258 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   12259 {
   12260 	int32_t  rv = 0;
   12261 	uint32_t invm_dword;
   12262 	uint16_t i;
   12263 	uint8_t record_type, word_address;
   12264 
   12265 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12266 		device_xname(sc->sc_dev), __func__));
   12267 
   12268 	for (i = 0; i < INVM_SIZE; i++) {
   12269 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   12270 		/* Get record type */
   12271 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   12272 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   12273 			break;
   12274 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   12275 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   12276 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   12277 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   12278 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   12279 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   12280 			if (word_address == address) {
   12281 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   12282 				rv = 0;
   12283 				break;
   12284 			}
   12285 		}
   12286 	}
   12287 
   12288 	return rv;
   12289 }
   12290 
   12291 static int
   12292 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12293 {
   12294 	int rv = 0;
   12295 	int i;
   12296 
   12297 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12298 		device_xname(sc->sc_dev), __func__));
   12299 
   12300 	if (sc->nvm.acquire(sc) != 0)
   12301 		return -1;
   12302 
   12303 	for (i = 0; i < words; i++) {
   12304 		switch (offset + i) {
   12305 		case NVM_OFF_MACADDR:
   12306 		case NVM_OFF_MACADDR1:
   12307 		case NVM_OFF_MACADDR2:
   12308 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   12309 			if (rv != 0) {
   12310 				data[i] = 0xffff;
   12311 				rv = -1;
   12312 			}
   12313 			break;
   12314 		case NVM_OFF_CFG2:
   12315 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12316 			if (rv != 0) {
   12317 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   12318 				rv = 0;
   12319 			}
   12320 			break;
   12321 		case NVM_OFF_CFG4:
   12322 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12323 			if (rv != 0) {
   12324 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   12325 				rv = 0;
   12326 			}
   12327 			break;
   12328 		case NVM_OFF_LED_1_CFG:
   12329 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12330 			if (rv != 0) {
   12331 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   12332 				rv = 0;
   12333 			}
   12334 			break;
   12335 		case NVM_OFF_LED_0_2_CFG:
   12336 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12337 			if (rv != 0) {
   12338 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   12339 				rv = 0;
   12340 			}
   12341 			break;
   12342 		case NVM_OFF_ID_LED_SETTINGS:
   12343 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12344 			if (rv != 0) {
   12345 				*data = ID_LED_RESERVED_FFFF;
   12346 				rv = 0;
   12347 			}
   12348 			break;
   12349 		default:
   12350 			DPRINTF(WM_DEBUG_NVM,
   12351 			    ("NVM word 0x%02x is not mapped.\n", offset));
   12352 			*data = NVM_RESERVED_WORD;
   12353 			break;
   12354 		}
   12355 	}
   12356 
   12357 	sc->nvm.release(sc);
   12358 	return rv;
   12359 }
   12360 
   12361 /* Lock, detecting NVM type, validate checksum, version and read */
   12362 
   12363 static int
   12364 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   12365 {
   12366 	uint32_t eecd = 0;
   12367 
   12368 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   12369 	    || sc->sc_type == WM_T_82583) {
   12370 		eecd = CSR_READ(sc, WMREG_EECD);
   12371 
   12372 		/* Isolate bits 15 & 16 */
   12373 		eecd = ((eecd >> 15) & 0x03);
   12374 
   12375 		/* If both bits are set, device is Flash type */
   12376 		if (eecd == 0x03)
   12377 			return 0;
   12378 	}
   12379 	return 1;
   12380 }
   12381 
   12382 static int
   12383 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   12384 {
   12385 	uint32_t eec;
   12386 
   12387 	eec = CSR_READ(sc, WMREG_EEC);
   12388 	if ((eec & EEC_FLASH_DETECTED) != 0)
   12389 		return 1;
   12390 
   12391 	return 0;
   12392 }
   12393 
   12394 /*
   12395  * wm_nvm_validate_checksum
   12396  *
   12397  * The checksum is defined as the sum of the first 64 (16 bit) words.
   12398  */
   12399 static int
   12400 wm_nvm_validate_checksum(struct wm_softc *sc)
   12401 {
   12402 	uint16_t checksum;
   12403 	uint16_t eeprom_data;
   12404 #ifdef WM_DEBUG
   12405 	uint16_t csum_wordaddr, valid_checksum;
   12406 #endif
   12407 	int i;
   12408 
   12409 	checksum = 0;
   12410 
   12411 	/* Don't check for I211 */
   12412 	if (sc->sc_type == WM_T_I211)
   12413 		return 0;
   12414 
   12415 #ifdef WM_DEBUG
   12416 	if (sc->sc_type == WM_T_PCH_LPT) {
   12417 		csum_wordaddr = NVM_OFF_COMPAT;
   12418 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   12419 	} else {
   12420 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   12421 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   12422 	}
   12423 
   12424 	/* Dump EEPROM image for debug */
   12425 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12426 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12427 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   12428 		/* XXX PCH_SPT? */
   12429 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   12430 		if ((eeprom_data & valid_checksum) == 0) {
   12431 			DPRINTF(WM_DEBUG_NVM,
   12432 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   12433 				device_xname(sc->sc_dev), eeprom_data,
   12434 				    valid_checksum));
   12435 		}
   12436 	}
   12437 
   12438 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   12439 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   12440 		for (i = 0; i < NVM_SIZE; i++) {
   12441 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12442 				printf("XXXX ");
   12443 			else
   12444 				printf("%04hx ", eeprom_data);
   12445 			if (i % 8 == 7)
   12446 				printf("\n");
   12447 		}
   12448 	}
   12449 
   12450 #endif /* WM_DEBUG */
   12451 
   12452 	for (i = 0; i < NVM_SIZE; i++) {
   12453 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12454 			return 1;
   12455 		checksum += eeprom_data;
   12456 	}
   12457 
   12458 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   12459 #ifdef WM_DEBUG
   12460 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   12461 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   12462 #endif
   12463 	}
   12464 
   12465 	return 0;
   12466 }
   12467 
   12468 static void
   12469 wm_nvm_version_invm(struct wm_softc *sc)
   12470 {
   12471 	uint32_t dword;
   12472 
   12473 	/*
   12474 	 * Linux's code to decode version is very strange, so we don't
   12475 	 * obey that algorithm and just use word 61 as the document.
   12476 	 * Perhaps it's not perfect though...
   12477 	 *
   12478 	 * Example:
   12479 	 *
   12480 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   12481 	 */
   12482 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   12483 	dword = __SHIFTOUT(dword, INVM_VER_1);
   12484 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   12485 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   12486 }
   12487 
   12488 static void
   12489 wm_nvm_version(struct wm_softc *sc)
   12490 {
   12491 	uint16_t major, minor, build, patch;
   12492 	uint16_t uid0, uid1;
   12493 	uint16_t nvm_data;
   12494 	uint16_t off;
   12495 	bool check_version = false;
   12496 	bool check_optionrom = false;
   12497 	bool have_build = false;
   12498 	bool have_uid = true;
   12499 
   12500 	/*
   12501 	 * Version format:
   12502 	 *
   12503 	 * XYYZ
   12504 	 * X0YZ
   12505 	 * X0YY
   12506 	 *
   12507 	 * Example:
   12508 	 *
   12509 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   12510 	 *	82571	0x50a6	5.10.6?
   12511 	 *	82572	0x506a	5.6.10?
   12512 	 *	82572EI	0x5069	5.6.9?
   12513 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   12514 	 *		0x2013	2.1.3?
   12515 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   12516 	 */
   12517 
   12518 	/*
   12519 	 * XXX
   12520 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   12521 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   12522 	 */
   12523 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   12524 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   12525 		have_uid = false;
   12526 
   12527 	switch (sc->sc_type) {
   12528 	case WM_T_82571:
   12529 	case WM_T_82572:
   12530 	case WM_T_82574:
   12531 	case WM_T_82583:
   12532 		check_version = true;
   12533 		check_optionrom = true;
   12534 		have_build = true;
   12535 		break;
   12536 	case WM_T_82575:
   12537 	case WM_T_82576:
   12538 	case WM_T_82580:
   12539 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   12540 			check_version = true;
   12541 		break;
   12542 	case WM_T_I211:
   12543 		wm_nvm_version_invm(sc);
   12544 		have_uid = false;
   12545 		goto printver;
   12546 	case WM_T_I210:
   12547 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   12548 			wm_nvm_version_invm(sc);
   12549 			have_uid = false;
   12550 			goto printver;
   12551 		}
   12552 		/* FALLTHROUGH */
   12553 	case WM_T_I350:
   12554 	case WM_T_I354:
   12555 		check_version = true;
   12556 		check_optionrom = true;
   12557 		break;
   12558 	default:
   12559 		return;
   12560 	}
   12561 	if (check_version
   12562 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   12563 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   12564 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   12565 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   12566 			build = nvm_data & NVM_BUILD_MASK;
   12567 			have_build = true;
   12568 		} else
   12569 			minor = nvm_data & 0x00ff;
   12570 
   12571 		/* Decimal */
   12572 		minor = (minor / 16) * 10 + (minor % 16);
   12573 		sc->sc_nvm_ver_major = major;
   12574 		sc->sc_nvm_ver_minor = minor;
   12575 
   12576 printver:
   12577 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   12578 		    sc->sc_nvm_ver_minor);
   12579 		if (have_build) {
   12580 			sc->sc_nvm_ver_build = build;
   12581 			aprint_verbose(".%d", build);
   12582 		}
   12583 	}
   12584 
   12585 	/* Assume the Option ROM area is at avove NVM_SIZE */
   12586 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   12587 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   12588 		/* Option ROM Version */
   12589 		if ((off != 0x0000) && (off != 0xffff)) {
   12590 			int rv;
   12591 
   12592 			off += NVM_COMBO_VER_OFF;
   12593 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   12594 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   12595 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   12596 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   12597 				/* 16bits */
   12598 				major = uid0 >> 8;
   12599 				build = (uid0 << 8) | (uid1 >> 8);
   12600 				patch = uid1 & 0x00ff;
   12601 				aprint_verbose(", option ROM Version %d.%d.%d",
   12602 				    major, build, patch);
   12603 			}
   12604 		}
   12605 	}
   12606 
   12607 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   12608 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   12609 }
   12610 
   12611 /*
   12612  * wm_nvm_read:
   12613  *
   12614  *	Read data from the serial EEPROM.
   12615  */
   12616 static int
   12617 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12618 {
   12619 	int rv;
   12620 
   12621 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12622 		device_xname(sc->sc_dev), __func__));
   12623 
   12624 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   12625 		return -1;
   12626 
   12627 	rv = sc->nvm.read(sc, word, wordcnt, data);
   12628 
   12629 	return rv;
   12630 }
   12631 
   12632 /*
   12633  * Hardware semaphores.
   12634  * Very complexed...
   12635  */
   12636 
   12637 static int
   12638 wm_get_null(struct wm_softc *sc)
   12639 {
   12640 
   12641 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12642 		device_xname(sc->sc_dev), __func__));
   12643 	return 0;
   12644 }
   12645 
   12646 static void
   12647 wm_put_null(struct wm_softc *sc)
   12648 {
   12649 
   12650 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12651 		device_xname(sc->sc_dev), __func__));
   12652 	return;
   12653 }
   12654 
   12655 static int
   12656 wm_get_eecd(struct wm_softc *sc)
   12657 {
   12658 	uint32_t reg;
   12659 	int x;
   12660 
   12661 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   12662 		device_xname(sc->sc_dev), __func__));
   12663 
   12664 	reg = CSR_READ(sc, WMREG_EECD);
   12665 
   12666 	/* Request EEPROM access. */
   12667 	reg |= EECD_EE_REQ;
   12668 	CSR_WRITE(sc, WMREG_EECD, reg);
   12669 
   12670 	/* ..and wait for it to be granted. */
   12671 	for (x = 0; x < 1000; x++) {
   12672 		reg = CSR_READ(sc, WMREG_EECD);
   12673 		if (reg & EECD_EE_GNT)
   12674 			break;
   12675 		delay(5);
   12676 	}
   12677 	if ((reg & EECD_EE_GNT) == 0) {
   12678 		aprint_error_dev(sc->sc_dev,
   12679 		    "could not acquire EEPROM GNT\n");
   12680 		reg &= ~EECD_EE_REQ;
   12681 		CSR_WRITE(sc, WMREG_EECD, reg);
   12682 		return -1;
   12683 	}
   12684 
   12685 	return 0;
   12686 }
   12687 
   12688 static void
   12689 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   12690 {
   12691 
   12692 	*eecd |= EECD_SK;
   12693 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   12694 	CSR_WRITE_FLUSH(sc);
   12695 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   12696 		delay(1);
   12697 	else
   12698 		delay(50);
   12699 }
   12700 
   12701 static void
   12702 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   12703 {
   12704 
   12705 	*eecd &= ~EECD_SK;
   12706 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   12707 	CSR_WRITE_FLUSH(sc);
   12708 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   12709 		delay(1);
   12710 	else
   12711 		delay(50);
   12712 }
   12713 
   12714 static void
   12715 wm_put_eecd(struct wm_softc *sc)
   12716 {
   12717 	uint32_t reg;
   12718 
   12719 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12720 		device_xname(sc->sc_dev), __func__));
   12721 
   12722 	/* Stop nvm */
   12723 	reg = CSR_READ(sc, WMREG_EECD);
   12724 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   12725 		/* Pull CS high */
   12726 		reg |= EECD_CS;
   12727 		wm_nvm_eec_clock_lower(sc, &reg);
   12728 	} else {
   12729 		/* CS on Microwire is active-high */
   12730 		reg &= ~(EECD_CS | EECD_DI);
   12731 		CSR_WRITE(sc, WMREG_EECD, reg);
   12732 		wm_nvm_eec_clock_raise(sc, &reg);
   12733 		wm_nvm_eec_clock_lower(sc, &reg);
   12734 	}
   12735 
   12736 	reg = CSR_READ(sc, WMREG_EECD);
   12737 	reg &= ~EECD_EE_REQ;
   12738 	CSR_WRITE(sc, WMREG_EECD, reg);
   12739 
   12740 	return;
   12741 }
   12742 
   12743 /*
   12744  * Get hardware semaphore.
   12745  * Same as e1000_get_hw_semaphore_generic()
   12746  */
   12747 static int
   12748 wm_get_swsm_semaphore(struct wm_softc *sc)
   12749 {
   12750 	int32_t timeout;
   12751 	uint32_t swsm;
   12752 
   12753 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12754 		device_xname(sc->sc_dev), __func__));
   12755 	KASSERT(sc->sc_nvm_wordsize > 0);
   12756 
   12757 retry:
   12758 	/* Get the SW semaphore. */
   12759 	timeout = sc->sc_nvm_wordsize + 1;
   12760 	while (timeout) {
   12761 		swsm = CSR_READ(sc, WMREG_SWSM);
   12762 
   12763 		if ((swsm & SWSM_SMBI) == 0)
   12764 			break;
   12765 
   12766 		delay(50);
   12767 		timeout--;
   12768 	}
   12769 
   12770 	if (timeout == 0) {
   12771 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   12772 			/*
   12773 			 * In rare circumstances, the SW semaphore may already
   12774 			 * be held unintentionally. Clear the semaphore once
   12775 			 * before giving up.
   12776 			 */
   12777 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   12778 			wm_put_swsm_semaphore(sc);
   12779 			goto retry;
   12780 		}
   12781 		aprint_error_dev(sc->sc_dev,
   12782 		    "could not acquire SWSM SMBI\n");
   12783 		return 1;
   12784 	}
   12785 
   12786 	/* Get the FW semaphore. */
   12787 	timeout = sc->sc_nvm_wordsize + 1;
   12788 	while (timeout) {
   12789 		swsm = CSR_READ(sc, WMREG_SWSM);
   12790 		swsm |= SWSM_SWESMBI;
   12791 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   12792 		/* If we managed to set the bit we got the semaphore. */
   12793 		swsm = CSR_READ(sc, WMREG_SWSM);
   12794 		if (swsm & SWSM_SWESMBI)
   12795 			break;
   12796 
   12797 		delay(50);
   12798 		timeout--;
   12799 	}
   12800 
   12801 	if (timeout == 0) {
   12802 		aprint_error_dev(sc->sc_dev,
   12803 		    "could not acquire SWSM SWESMBI\n");
   12804 		/* Release semaphores */
   12805 		wm_put_swsm_semaphore(sc);
   12806 		return 1;
   12807 	}
   12808 	return 0;
   12809 }
   12810 
   12811 /*
   12812  * Put hardware semaphore.
   12813  * Same as e1000_put_hw_semaphore_generic()
   12814  */
   12815 static void
   12816 wm_put_swsm_semaphore(struct wm_softc *sc)
   12817 {
   12818 	uint32_t swsm;
   12819 
   12820 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12821 		device_xname(sc->sc_dev), __func__));
   12822 
   12823 	swsm = CSR_READ(sc, WMREG_SWSM);
   12824 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   12825 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   12826 }
   12827 
   12828 /*
   12829  * Get SW/FW semaphore.
   12830  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   12831  */
   12832 static int
   12833 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12834 {
   12835 	uint32_t swfw_sync;
   12836 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   12837 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   12838 	int timeout;
   12839 
   12840 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12841 		device_xname(sc->sc_dev), __func__));
   12842 
   12843 	if (sc->sc_type == WM_T_80003)
   12844 		timeout = 50;
   12845 	else
   12846 		timeout = 200;
   12847 
   12848 	for (timeout = 0; timeout < 200; timeout++) {
   12849 		if (wm_get_swsm_semaphore(sc)) {
   12850 			aprint_error_dev(sc->sc_dev,
   12851 			    "%s: failed to get semaphore\n",
   12852 			    __func__);
   12853 			return 1;
   12854 		}
   12855 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12856 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   12857 			swfw_sync |= swmask;
   12858 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12859 			wm_put_swsm_semaphore(sc);
   12860 			return 0;
   12861 		}
   12862 		wm_put_swsm_semaphore(sc);
   12863 		delay(5000);
   12864 	}
   12865 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   12866 	    device_xname(sc->sc_dev), mask, swfw_sync);
   12867 	return 1;
   12868 }
   12869 
   12870 static void
   12871 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12872 {
   12873 	uint32_t swfw_sync;
   12874 
   12875 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12876 		device_xname(sc->sc_dev), __func__));
   12877 
   12878 	while (wm_get_swsm_semaphore(sc) != 0)
   12879 		continue;
   12880 
   12881 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12882 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   12883 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12884 
   12885 	wm_put_swsm_semaphore(sc);
   12886 }
   12887 
   12888 static int
   12889 wm_get_nvm_80003(struct wm_softc *sc)
   12890 {
   12891 	int rv;
   12892 
   12893 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   12894 		device_xname(sc->sc_dev), __func__));
   12895 
   12896 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   12897 		aprint_error_dev(sc->sc_dev,
   12898 		    "%s: failed to get semaphore(SWFW)\n",
   12899 		    __func__);
   12900 		return rv;
   12901 	}
   12902 
   12903 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   12904 	    && (rv = wm_get_eecd(sc)) != 0) {
   12905 		aprint_error_dev(sc->sc_dev,
   12906 		    "%s: failed to get semaphore(EECD)\n",
   12907 		    __func__);
   12908 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   12909 		return rv;
   12910 	}
   12911 
   12912 	return 0;
   12913 }
   12914 
   12915 static void
   12916 wm_put_nvm_80003(struct wm_softc *sc)
   12917 {
   12918 
   12919 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12920 		device_xname(sc->sc_dev), __func__));
   12921 
   12922 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   12923 		wm_put_eecd(sc);
   12924 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   12925 }
   12926 
   12927 static int
   12928 wm_get_nvm_82571(struct wm_softc *sc)
   12929 {
   12930 	int rv;
   12931 
   12932 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12933 		device_xname(sc->sc_dev), __func__));
   12934 
   12935 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   12936 		return rv;
   12937 
   12938 	switch (sc->sc_type) {
   12939 	case WM_T_82573:
   12940 		break;
   12941 	default:
   12942 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   12943 			rv = wm_get_eecd(sc);
   12944 		break;
   12945 	}
   12946 
   12947 	if (rv != 0) {
   12948 		aprint_error_dev(sc->sc_dev,
   12949 		    "%s: failed to get semaphore\n",
   12950 		    __func__);
   12951 		wm_put_swsm_semaphore(sc);
   12952 	}
   12953 
   12954 	return rv;
   12955 }
   12956 
   12957 static void
   12958 wm_put_nvm_82571(struct wm_softc *sc)
   12959 {
   12960 
   12961 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12962 		device_xname(sc->sc_dev), __func__));
   12963 
   12964 	switch (sc->sc_type) {
   12965 	case WM_T_82573:
   12966 		break;
   12967 	default:
   12968 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   12969 			wm_put_eecd(sc);
   12970 		break;
   12971 	}
   12972 
   12973 	wm_put_swsm_semaphore(sc);
   12974 }
   12975 
   12976 static int
   12977 wm_get_phy_82575(struct wm_softc *sc)
   12978 {
   12979 
   12980 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12981 		device_xname(sc->sc_dev), __func__));
   12982 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   12983 }
   12984 
   12985 static void
   12986 wm_put_phy_82575(struct wm_softc *sc)
   12987 {
   12988 
   12989 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12990 		device_xname(sc->sc_dev), __func__));
   12991 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   12992 }
   12993 
   12994 static int
   12995 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   12996 {
   12997 	uint32_t ext_ctrl;
   12998 	int timeout = 200;
   12999 
   13000 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13001 		device_xname(sc->sc_dev), __func__));
   13002 
   13003 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13004 	for (timeout = 0; timeout < 200; timeout++) {
   13005 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13006 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13007 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13008 
   13009 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13010 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   13011 			return 0;
   13012 		delay(5000);
   13013 	}
   13014 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   13015 	    device_xname(sc->sc_dev), ext_ctrl);
   13016 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13017 	return 1;
   13018 }
   13019 
   13020 static void
   13021 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   13022 {
   13023 	uint32_t ext_ctrl;
   13024 
   13025 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13026 		device_xname(sc->sc_dev), __func__));
   13027 
   13028 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13029 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13030 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13031 
   13032 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13033 }
   13034 
   13035 static int
   13036 wm_get_swflag_ich8lan(struct wm_softc *sc)
   13037 {
   13038 	uint32_t ext_ctrl;
   13039 	int timeout;
   13040 
   13041 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13042 		device_xname(sc->sc_dev), __func__));
   13043 	mutex_enter(sc->sc_ich_phymtx);
   13044 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   13045 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13046 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   13047 			break;
   13048 		delay(1000);
   13049 	}
   13050 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   13051 		printf("%s: SW has already locked the resource\n",
   13052 		    device_xname(sc->sc_dev));
   13053 		goto out;
   13054 	}
   13055 
   13056 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13057 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13058 	for (timeout = 0; timeout < 1000; timeout++) {
   13059 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13060 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   13061 			break;
   13062 		delay(1000);
   13063 	}
   13064 	if (timeout >= 1000) {
   13065 		printf("%s: failed to acquire semaphore\n",
   13066 		    device_xname(sc->sc_dev));
   13067 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13068 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13069 		goto out;
   13070 	}
   13071 	return 0;
   13072 
   13073 out:
   13074 	mutex_exit(sc->sc_ich_phymtx);
   13075 	return 1;
   13076 }
   13077 
   13078 static void
   13079 wm_put_swflag_ich8lan(struct wm_softc *sc)
   13080 {
   13081 	uint32_t ext_ctrl;
   13082 
   13083 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13084 		device_xname(sc->sc_dev), __func__));
   13085 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13086 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   13087 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13088 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13089 	} else {
   13090 		printf("%s: Semaphore unexpectedly released\n",
   13091 		    device_xname(sc->sc_dev));
   13092 	}
   13093 
   13094 	mutex_exit(sc->sc_ich_phymtx);
   13095 }
   13096 
   13097 static int
   13098 wm_get_nvm_ich8lan(struct wm_softc *sc)
   13099 {
   13100 
   13101 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13102 		device_xname(sc->sc_dev), __func__));
   13103 	mutex_enter(sc->sc_ich_nvmmtx);
   13104 
   13105 	return 0;
   13106 }
   13107 
   13108 static void
   13109 wm_put_nvm_ich8lan(struct wm_softc *sc)
   13110 {
   13111 
   13112 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13113 		device_xname(sc->sc_dev), __func__));
   13114 	mutex_exit(sc->sc_ich_nvmmtx);
   13115 }
   13116 
   13117 static int
   13118 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   13119 {
   13120 	int i = 0;
   13121 	uint32_t reg;
   13122 
   13123 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13124 		device_xname(sc->sc_dev), __func__));
   13125 
   13126 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13127 	do {
   13128 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   13129 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   13130 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13131 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   13132 			break;
   13133 		delay(2*1000);
   13134 		i++;
   13135 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   13136 
   13137 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   13138 		wm_put_hw_semaphore_82573(sc);
   13139 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   13140 		    device_xname(sc->sc_dev));
   13141 		return -1;
   13142 	}
   13143 
   13144 	return 0;
   13145 }
   13146 
   13147 static void
   13148 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   13149 {
   13150 	uint32_t reg;
   13151 
   13152 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13153 		device_xname(sc->sc_dev), __func__));
   13154 
   13155 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13156 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13157 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   13158 }
   13159 
   13160 /*
   13161  * Management mode and power management related subroutines.
   13162  * BMC, AMT, suspend/resume and EEE.
   13163  */
   13164 
   13165 #ifdef WM_WOL
   13166 static int
   13167 wm_check_mng_mode(struct wm_softc *sc)
   13168 {
   13169 	int rv;
   13170 
   13171 	switch (sc->sc_type) {
   13172 	case WM_T_ICH8:
   13173 	case WM_T_ICH9:
   13174 	case WM_T_ICH10:
   13175 	case WM_T_PCH:
   13176 	case WM_T_PCH2:
   13177 	case WM_T_PCH_LPT:
   13178 	case WM_T_PCH_SPT:
   13179 		rv = wm_check_mng_mode_ich8lan(sc);
   13180 		break;
   13181 	case WM_T_82574:
   13182 	case WM_T_82583:
   13183 		rv = wm_check_mng_mode_82574(sc);
   13184 		break;
   13185 	case WM_T_82571:
   13186 	case WM_T_82572:
   13187 	case WM_T_82573:
   13188 	case WM_T_80003:
   13189 		rv = wm_check_mng_mode_generic(sc);
   13190 		break;
   13191 	default:
   13192 		/* noting to do */
   13193 		rv = 0;
   13194 		break;
   13195 	}
   13196 
   13197 	return rv;
   13198 }
   13199 
   13200 static int
   13201 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   13202 {
   13203 	uint32_t fwsm;
   13204 
   13205 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13206 
   13207 	if (((fwsm & FWSM_FW_VALID) != 0)
   13208 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   13209 		return 1;
   13210 
   13211 	return 0;
   13212 }
   13213 
   13214 static int
   13215 wm_check_mng_mode_82574(struct wm_softc *sc)
   13216 {
   13217 	uint16_t data;
   13218 
   13219 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   13220 
   13221 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   13222 		return 1;
   13223 
   13224 	return 0;
   13225 }
   13226 
   13227 static int
   13228 wm_check_mng_mode_generic(struct wm_softc *sc)
   13229 {
   13230 	uint32_t fwsm;
   13231 
   13232 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13233 
   13234 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   13235 		return 1;
   13236 
   13237 	return 0;
   13238 }
   13239 #endif /* WM_WOL */
   13240 
   13241 static int
   13242 wm_enable_mng_pass_thru(struct wm_softc *sc)
   13243 {
   13244 	uint32_t manc, fwsm, factps;
   13245 
   13246 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   13247 		return 0;
   13248 
   13249 	manc = CSR_READ(sc, WMREG_MANC);
   13250 
   13251 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   13252 		device_xname(sc->sc_dev), manc));
   13253 	if ((manc & MANC_RECV_TCO_EN) == 0)
   13254 		return 0;
   13255 
   13256 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   13257 		fwsm = CSR_READ(sc, WMREG_FWSM);
   13258 		factps = CSR_READ(sc, WMREG_FACTPS);
   13259 		if (((factps & FACTPS_MNGCG) == 0)
   13260 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   13261 			return 1;
   13262 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   13263 		uint16_t data;
   13264 
   13265 		factps = CSR_READ(sc, WMREG_FACTPS);
   13266 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   13267 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   13268 			device_xname(sc->sc_dev), factps, data));
   13269 		if (((factps & FACTPS_MNGCG) == 0)
   13270 		    && ((data & NVM_CFG2_MNGM_MASK)
   13271 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   13272 			return 1;
   13273 	} else if (((manc & MANC_SMBUS_EN) != 0)
   13274 	    && ((manc & MANC_ASF_EN) == 0))
   13275 		return 1;
   13276 
   13277 	return 0;
   13278 }
   13279 
   13280 static bool
   13281 wm_phy_resetisblocked(struct wm_softc *sc)
   13282 {
   13283 	bool blocked = false;
   13284 	uint32_t reg;
   13285 	int i = 0;
   13286 
   13287 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13288 		device_xname(sc->sc_dev), __func__));
   13289 
   13290 	switch (sc->sc_type) {
   13291 	case WM_T_ICH8:
   13292 	case WM_T_ICH9:
   13293 	case WM_T_ICH10:
   13294 	case WM_T_PCH:
   13295 	case WM_T_PCH2:
   13296 	case WM_T_PCH_LPT:
   13297 	case WM_T_PCH_SPT:
   13298 		do {
   13299 			reg = CSR_READ(sc, WMREG_FWSM);
   13300 			if ((reg & FWSM_RSPCIPHY) == 0) {
   13301 				blocked = true;
   13302 				delay(10*1000);
   13303 				continue;
   13304 			}
   13305 			blocked = false;
   13306 		} while (blocked && (i++ < 30));
   13307 		return blocked;
   13308 		break;
   13309 	case WM_T_82571:
   13310 	case WM_T_82572:
   13311 	case WM_T_82573:
   13312 	case WM_T_82574:
   13313 	case WM_T_82583:
   13314 	case WM_T_80003:
   13315 		reg = CSR_READ(sc, WMREG_MANC);
   13316 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   13317 			return true;
   13318 		else
   13319 			return false;
   13320 		break;
   13321 	default:
   13322 		/* no problem */
   13323 		break;
   13324 	}
   13325 
   13326 	return false;
   13327 }
   13328 
   13329 static void
   13330 wm_get_hw_control(struct wm_softc *sc)
   13331 {
   13332 	uint32_t reg;
   13333 
   13334 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13335 		device_xname(sc->sc_dev), __func__));
   13336 
   13337 	if (sc->sc_type == WM_T_82573) {
   13338 		reg = CSR_READ(sc, WMREG_SWSM);
   13339 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   13340 	} else if (sc->sc_type >= WM_T_82571) {
   13341 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13342 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   13343 	}
   13344 }
   13345 
   13346 static void
   13347 wm_release_hw_control(struct wm_softc *sc)
   13348 {
   13349 	uint32_t reg;
   13350 
   13351 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13352 		device_xname(sc->sc_dev), __func__));
   13353 
   13354 	if (sc->sc_type == WM_T_82573) {
   13355 		reg = CSR_READ(sc, WMREG_SWSM);
   13356 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   13357 	} else if (sc->sc_type >= WM_T_82571) {
   13358 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13359 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   13360 	}
   13361 }
   13362 
   13363 static void
   13364 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   13365 {
   13366 	uint32_t reg;
   13367 
   13368 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13369 		device_xname(sc->sc_dev), __func__));
   13370 
   13371 	if (sc->sc_type < WM_T_PCH2)
   13372 		return;
   13373 
   13374 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13375 
   13376 	if (gate)
   13377 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   13378 	else
   13379 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   13380 
   13381 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   13382 }
   13383 
   13384 static void
   13385 wm_smbustopci(struct wm_softc *sc)
   13386 {
   13387 	uint32_t fwsm, reg;
   13388 	int rv = 0;
   13389 
   13390 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13391 		device_xname(sc->sc_dev), __func__));
   13392 
   13393 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   13394 	wm_gate_hw_phy_config_ich8lan(sc, true);
   13395 
   13396 	/* Disable ULP */
   13397 	wm_ulp_disable(sc);
   13398 
   13399 	/* Acquire PHY semaphore */
   13400 	sc->phy.acquire(sc);
   13401 
   13402 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13403 	switch (sc->sc_type) {
   13404 	case WM_T_PCH_LPT:
   13405 	case WM_T_PCH_SPT:
   13406 		if (wm_phy_is_accessible_pchlan(sc))
   13407 			break;
   13408 
   13409 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13410 		reg |= CTRL_EXT_FORCE_SMBUS;
   13411 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13412 #if 0
   13413 		/* XXX Isn't this required??? */
   13414 		CSR_WRITE_FLUSH(sc);
   13415 #endif
   13416 		delay(50 * 1000);
   13417 		/* FALLTHROUGH */
   13418 	case WM_T_PCH2:
   13419 		if (wm_phy_is_accessible_pchlan(sc) == true)
   13420 			break;
   13421 		/* FALLTHROUGH */
   13422 	case WM_T_PCH:
   13423 		if (sc->sc_type == WM_T_PCH)
   13424 			if ((fwsm & FWSM_FW_VALID) != 0)
   13425 				break;
   13426 
   13427 		if (wm_phy_resetisblocked(sc) == true) {
   13428 			printf("XXX reset is blocked(3)\n");
   13429 			break;
   13430 		}
   13431 
   13432 		wm_toggle_lanphypc_pch_lpt(sc);
   13433 
   13434 		if (sc->sc_type >= WM_T_PCH_LPT) {
   13435 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13436 				break;
   13437 
   13438 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13439 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   13440 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13441 
   13442 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13443 				break;
   13444 			rv = -1;
   13445 		}
   13446 		break;
   13447 	default:
   13448 		break;
   13449 	}
   13450 
   13451 	/* Release semaphore */
   13452 	sc->phy.release(sc);
   13453 
   13454 	if (rv == 0) {
   13455 		if (wm_phy_resetisblocked(sc)) {
   13456 			printf("XXX reset is blocked(4)\n");
   13457 			goto out;
   13458 		}
   13459 		wm_reset_phy(sc);
   13460 		if (wm_phy_resetisblocked(sc))
   13461 			printf("XXX reset is blocked(4)\n");
   13462 	}
   13463 
   13464 out:
   13465 	/*
   13466 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   13467 	 */
   13468 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   13469 		delay(10*1000);
   13470 		wm_gate_hw_phy_config_ich8lan(sc, false);
   13471 	}
   13472 }
   13473 
   13474 static void
   13475 wm_init_manageability(struct wm_softc *sc)
   13476 {
   13477 
   13478 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13479 		device_xname(sc->sc_dev), __func__));
   13480 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13481 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   13482 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13483 
   13484 		/* Disable hardware interception of ARP */
   13485 		manc &= ~MANC_ARP_EN;
   13486 
   13487 		/* Enable receiving management packets to the host */
   13488 		if (sc->sc_type >= WM_T_82571) {
   13489 			manc |= MANC_EN_MNG2HOST;
   13490 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   13491 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   13492 		}
   13493 
   13494 		CSR_WRITE(sc, WMREG_MANC, manc);
   13495 	}
   13496 }
   13497 
   13498 static void
   13499 wm_release_manageability(struct wm_softc *sc)
   13500 {
   13501 
   13502 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13503 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13504 
   13505 		manc |= MANC_ARP_EN;
   13506 		if (sc->sc_type >= WM_T_82571)
   13507 			manc &= ~MANC_EN_MNG2HOST;
   13508 
   13509 		CSR_WRITE(sc, WMREG_MANC, manc);
   13510 	}
   13511 }
   13512 
   13513 static void
   13514 wm_get_wakeup(struct wm_softc *sc)
   13515 {
   13516 
   13517 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   13518 	switch (sc->sc_type) {
   13519 	case WM_T_82573:
   13520 	case WM_T_82583:
   13521 		sc->sc_flags |= WM_F_HAS_AMT;
   13522 		/* FALLTHROUGH */
   13523 	case WM_T_80003:
   13524 	case WM_T_82575:
   13525 	case WM_T_82576:
   13526 	case WM_T_82580:
   13527 	case WM_T_I350:
   13528 	case WM_T_I354:
   13529 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   13530 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   13531 		/* FALLTHROUGH */
   13532 	case WM_T_82541:
   13533 	case WM_T_82541_2:
   13534 	case WM_T_82547:
   13535 	case WM_T_82547_2:
   13536 	case WM_T_82571:
   13537 	case WM_T_82572:
   13538 	case WM_T_82574:
   13539 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13540 		break;
   13541 	case WM_T_ICH8:
   13542 	case WM_T_ICH9:
   13543 	case WM_T_ICH10:
   13544 	case WM_T_PCH:
   13545 	case WM_T_PCH2:
   13546 	case WM_T_PCH_LPT:
   13547 	case WM_T_PCH_SPT:
   13548 		sc->sc_flags |= WM_F_HAS_AMT;
   13549 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13550 		break;
   13551 	default:
   13552 		break;
   13553 	}
   13554 
   13555 	/* 1: HAS_MANAGE */
   13556 	if (wm_enable_mng_pass_thru(sc) != 0)
   13557 		sc->sc_flags |= WM_F_HAS_MANAGE;
   13558 
   13559 	/*
   13560 	 * Note that the WOL flags is set after the resetting of the eeprom
   13561 	 * stuff
   13562 	 */
   13563 }
   13564 
   13565 /*
   13566  * Unconfigure Ultra Low Power mode.
   13567  * Only for I217 and newer (see below).
   13568  */
   13569 static void
   13570 wm_ulp_disable(struct wm_softc *sc)
   13571 {
   13572 	uint32_t reg;
   13573 	int i = 0;
   13574 
   13575 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13576 		device_xname(sc->sc_dev), __func__));
   13577 	/* Exclude old devices */
   13578 	if ((sc->sc_type < WM_T_PCH_LPT)
   13579 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   13580 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   13581 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   13582 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   13583 		return;
   13584 
   13585 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   13586 		/* Request ME un-configure ULP mode in the PHY */
   13587 		reg = CSR_READ(sc, WMREG_H2ME);
   13588 		reg &= ~H2ME_ULP;
   13589 		reg |= H2ME_ENFORCE_SETTINGS;
   13590 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13591 
   13592 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   13593 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   13594 			if (i++ == 30) {
   13595 				printf("%s timed out\n", __func__);
   13596 				return;
   13597 			}
   13598 			delay(10 * 1000);
   13599 		}
   13600 		reg = CSR_READ(sc, WMREG_H2ME);
   13601 		reg &= ~H2ME_ENFORCE_SETTINGS;
   13602 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13603 
   13604 		return;
   13605 	}
   13606 
   13607 	/* Acquire semaphore */
   13608 	sc->phy.acquire(sc);
   13609 
   13610 	/* Toggle LANPHYPC */
   13611 	wm_toggle_lanphypc_pch_lpt(sc);
   13612 
   13613 	/* Unforce SMBus mode in PHY */
   13614 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13615 	if (reg == 0x0000 || reg == 0xffff) {
   13616 		uint32_t reg2;
   13617 
   13618 		printf("%s: Force SMBus first.\n", __func__);
   13619 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   13620 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   13621 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   13622 		delay(50 * 1000);
   13623 
   13624 		reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13625 	}
   13626 	reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13627 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg);
   13628 
   13629 	/* Unforce SMBus mode in MAC */
   13630 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13631 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   13632 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13633 
   13634 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL);
   13635 	reg |= HV_PM_CTRL_K1_ENA;
   13636 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg);
   13637 
   13638 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1);
   13639 	reg &= ~(I218_ULP_CONFIG1_IND
   13640 	    | I218_ULP_CONFIG1_STICKY_ULP
   13641 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   13642 	    | I218_ULP_CONFIG1_WOL_HOST
   13643 	    | I218_ULP_CONFIG1_INBAND_EXIT
   13644 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   13645 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   13646 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   13647 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13648 	reg |= I218_ULP_CONFIG1_START;
   13649 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13650 
   13651 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   13652 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   13653 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   13654 
   13655 	/* Release semaphore */
   13656 	sc->phy.release(sc);
   13657 	wm_gmii_reset(sc);
   13658 	delay(50 * 1000);
   13659 }
   13660 
   13661 /* WOL in the newer chipset interfaces (pchlan) */
   13662 static void
   13663 wm_enable_phy_wakeup(struct wm_softc *sc)
   13664 {
   13665 #if 0
   13666 	uint16_t preg;
   13667 
   13668 	/* Copy MAC RARs to PHY RARs */
   13669 
   13670 	/* Copy MAC MTA to PHY MTA */
   13671 
   13672 	/* Configure PHY Rx Control register */
   13673 
   13674 	/* Enable PHY wakeup in MAC register */
   13675 
   13676 	/* Configure and enable PHY wakeup in PHY registers */
   13677 
   13678 	/* Activate PHY wakeup */
   13679 
   13680 	/* XXX */
   13681 #endif
   13682 }
   13683 
   13684 /* Power down workaround on D3 */
   13685 static void
   13686 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   13687 {
   13688 	uint32_t reg;
   13689 	int i;
   13690 
   13691 	for (i = 0; i < 2; i++) {
   13692 		/* Disable link */
   13693 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13694 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13695 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13696 
   13697 		/*
   13698 		 * Call gig speed drop workaround on Gig disable before
   13699 		 * accessing any PHY registers
   13700 		 */
   13701 		if (sc->sc_type == WM_T_ICH8)
   13702 			wm_gig_downshift_workaround_ich8lan(sc);
   13703 
   13704 		/* Write VR power-down enable */
   13705 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13706 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13707 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   13708 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   13709 
   13710 		/* Read it back and test */
   13711 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13712 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13713 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   13714 			break;
   13715 
   13716 		/* Issue PHY reset and repeat at most one more time */
   13717 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   13718 	}
   13719 }
   13720 
   13721 static void
   13722 wm_enable_wakeup(struct wm_softc *sc)
   13723 {
   13724 	uint32_t reg, pmreg;
   13725 	pcireg_t pmode;
   13726 
   13727 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13728 		device_xname(sc->sc_dev), __func__));
   13729 
   13730 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   13731 		&pmreg, NULL) == 0)
   13732 		return;
   13733 
   13734 	/* Advertise the wakeup capability */
   13735 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   13736 	    | CTRL_SWDPIN(3));
   13737 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   13738 
   13739 	/* ICH workaround */
   13740 	switch (sc->sc_type) {
   13741 	case WM_T_ICH8:
   13742 	case WM_T_ICH9:
   13743 	case WM_T_ICH10:
   13744 	case WM_T_PCH:
   13745 	case WM_T_PCH2:
   13746 	case WM_T_PCH_LPT:
   13747 	case WM_T_PCH_SPT:
   13748 		/* Disable gig during WOL */
   13749 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13750 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   13751 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13752 		if (sc->sc_type == WM_T_PCH)
   13753 			wm_gmii_reset(sc);
   13754 
   13755 		/* Power down workaround */
   13756 		if (sc->sc_phytype == WMPHY_82577) {
   13757 			struct mii_softc *child;
   13758 
   13759 			/* Assume that the PHY is copper */
   13760 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13761 			if ((child != NULL) && (child->mii_mpd_rev <= 2))
   13762 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   13763 				    (768 << 5) | 25, 0x0444); /* magic num */
   13764 		}
   13765 		break;
   13766 	default:
   13767 		break;
   13768 	}
   13769 
   13770 	/* Keep the laser running on fiber adapters */
   13771 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   13772 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   13773 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13774 		reg |= CTRL_EXT_SWDPIN(3);
   13775 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13776 	}
   13777 
   13778 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   13779 #if 0	/* for the multicast packet */
   13780 	reg |= WUFC_MC;
   13781 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   13782 #endif
   13783 
   13784 	if (sc->sc_type >= WM_T_PCH)
   13785 		wm_enable_phy_wakeup(sc);
   13786 	else {
   13787 		CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
   13788 		CSR_WRITE(sc, WMREG_WUFC, reg);
   13789 	}
   13790 
   13791 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13792 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13793 		|| (sc->sc_type == WM_T_PCH2))
   13794 		    && (sc->sc_phytype == WMPHY_IGP_3))
   13795 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   13796 
   13797 	/* Request PME */
   13798 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   13799 #if 0
   13800 	/* Disable WOL */
   13801 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   13802 #else
   13803 	/* For WOL */
   13804 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   13805 #endif
   13806 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   13807 }
   13808 
   13809 /* LPLU */
   13810 
   13811 static void
   13812 wm_lplu_d0_disable(struct wm_softc *sc)
   13813 {
   13814 	struct mii_data *mii = &sc->sc_mii;
   13815 	uint32_t reg;
   13816 
   13817 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13818 		device_xname(sc->sc_dev), __func__));
   13819 
   13820 	if (sc->sc_phytype == WMPHY_IFE)
   13821 		return;
   13822 
   13823 	switch (sc->sc_type) {
   13824 	case WM_T_82571:
   13825 	case WM_T_82572:
   13826 	case WM_T_82573:
   13827 	case WM_T_82575:
   13828 	case WM_T_82576:
   13829 		reg = mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT);
   13830 		reg &= ~PMR_D0_LPLU;
   13831 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, reg);
   13832 		break;
   13833 	case WM_T_82580:
   13834 	case WM_T_I350:
   13835 	case WM_T_I210:
   13836 	case WM_T_I211:
   13837 		reg = CSR_READ(sc, WMREG_PHPM);
   13838 		reg &= ~PHPM_D0A_LPLU;
   13839 		CSR_WRITE(sc, WMREG_PHPM, reg);
   13840 		break;
   13841 	case WM_T_82574:
   13842 	case WM_T_82583:
   13843 	case WM_T_ICH8:
   13844 	case WM_T_ICH9:
   13845 	case WM_T_ICH10:
   13846 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13847 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   13848 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13849 		CSR_WRITE_FLUSH(sc);
   13850 		break;
   13851 	case WM_T_PCH:
   13852 	case WM_T_PCH2:
   13853 	case WM_T_PCH_LPT:
   13854 	case WM_T_PCH_SPT:
   13855 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   13856 		reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   13857 		if (wm_phy_resetisblocked(sc) == false)
   13858 			reg |= HV_OEM_BITS_ANEGNOW;
   13859 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   13860 		break;
   13861 	default:
   13862 		break;
   13863 	}
   13864 }
   13865 
   13866 /* EEE */
   13867 
   13868 static void
   13869 wm_set_eee_i350(struct wm_softc *sc)
   13870 {
   13871 	uint32_t ipcnfg, eeer;
   13872 
   13873 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   13874 	eeer = CSR_READ(sc, WMREG_EEER);
   13875 
   13876 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   13877 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   13878 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   13879 		    | EEER_LPI_FC);
   13880 	} else {
   13881 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   13882 		ipcnfg &= ~IPCNFG_10BASE_TE;
   13883 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   13884 		    | EEER_LPI_FC);
   13885 	}
   13886 
   13887 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   13888 	CSR_WRITE(sc, WMREG_EEER, eeer);
   13889 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   13890 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   13891 }
   13892 
   13893 /*
   13894  * Workarounds (mainly PHY related).
   13895  * Basically, PHY's workarounds are in the PHY drivers.
   13896  */
   13897 
   13898 /* Work-around for 82566 Kumeran PCS lock loss */
   13899 static void
   13900 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   13901 {
   13902 	struct mii_data *mii = &sc->sc_mii;
   13903 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   13904 	int i;
   13905 	int reg;
   13906 
   13907 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13908 		device_xname(sc->sc_dev), __func__));
   13909 
   13910 	/* If the link is not up, do nothing */
   13911 	if ((status & STATUS_LU) == 0)
   13912 		return;
   13913 
   13914 	/* Nothing to do if the link is other than 1Gbps */
   13915 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   13916 		return;
   13917 
   13918 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13919 	for (i = 0; i < 10; i++) {
   13920 		/* read twice */
   13921 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   13922 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   13923 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   13924 			goto out;	/* GOOD! */
   13925 
   13926 		/* Reset the PHY */
   13927 		wm_reset_phy(sc);
   13928 		delay(5*1000);
   13929 	}
   13930 
   13931 	/* Disable GigE link negotiation */
   13932 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13933 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13934 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13935 
   13936 	/*
   13937 	 * Call gig speed drop workaround on Gig disable before accessing
   13938 	 * any PHY registers.
   13939 	 */
   13940 	wm_gig_downshift_workaround_ich8lan(sc);
   13941 
   13942 out:
   13943 	return;
   13944 }
   13945 
   13946 /* WOL from S5 stops working */
   13947 static void
   13948 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   13949 {
   13950 	uint16_t kmreg;
   13951 
   13952 	/* Only for igp3 */
   13953 	if (sc->sc_phytype == WMPHY_IGP_3) {
   13954 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   13955 			return;
   13956 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   13957 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   13958 			return;
   13959 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   13960 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   13961 	}
   13962 }
   13963 
   13964 /*
   13965  * Workaround for pch's PHYs
   13966  * XXX should be moved to new PHY driver?
   13967  */
   13968 static void
   13969 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   13970 {
   13971 
   13972 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13973 		device_xname(sc->sc_dev), __func__));
   13974 	KASSERT(sc->sc_type == WM_T_PCH);
   13975 
   13976 	if (sc->sc_phytype == WMPHY_82577)
   13977 		wm_set_mdio_slow_mode_hv(sc);
   13978 
   13979 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   13980 
   13981 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   13982 
   13983 	/* 82578 */
   13984 	if (sc->sc_phytype == WMPHY_82578) {
   13985 		struct mii_softc *child;
   13986 
   13987 		/*
   13988 		 * Return registers to default by doing a soft reset then
   13989 		 * writing 0x3140 to the control register
   13990 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   13991 		 */
   13992 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13993 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   13994 			PHY_RESET(child);
   13995 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   13996 			    0x3140);
   13997 		}
   13998 	}
   13999 
   14000 	/* Select page 0 */
   14001 	sc->phy.acquire(sc);
   14002 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   14003 	sc->phy.release(sc);
   14004 
   14005 	/*
   14006 	 * Configure the K1 Si workaround during phy reset assuming there is
   14007 	 * link so that it disables K1 if link is in 1Gbps.
   14008 	 */
   14009 	wm_k1_gig_workaround_hv(sc, 1);
   14010 }
   14011 
   14012 static void
   14013 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   14014 {
   14015 
   14016 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14017 		device_xname(sc->sc_dev), __func__));
   14018 	KASSERT(sc->sc_type == WM_T_PCH2);
   14019 
   14020 	wm_set_mdio_slow_mode_hv(sc);
   14021 }
   14022 
   14023 static int
   14024 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   14025 {
   14026 	int k1_enable = sc->sc_nvm_k1_enabled;
   14027 
   14028 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14029 		device_xname(sc->sc_dev), __func__));
   14030 
   14031 	if (sc->phy.acquire(sc) != 0)
   14032 		return -1;
   14033 
   14034 	if (link) {
   14035 		k1_enable = 0;
   14036 
   14037 		/* Link stall fix for link up */
   14038 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   14039 	} else {
   14040 		/* Link stall fix for link down */
   14041 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   14042 	}
   14043 
   14044 	wm_configure_k1_ich8lan(sc, k1_enable);
   14045 	sc->phy.release(sc);
   14046 
   14047 	return 0;
   14048 }
   14049 
   14050 static void
   14051 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   14052 {
   14053 	uint32_t reg;
   14054 
   14055 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   14056 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   14057 	    reg | HV_KMRN_MDIO_SLOW);
   14058 }
   14059 
   14060 static void
   14061 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   14062 {
   14063 	uint32_t ctrl, ctrl_ext, tmp;
   14064 	uint16_t kmreg;
   14065 	int rv;
   14066 
   14067 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   14068 	if (rv != 0)
   14069 		return;
   14070 
   14071 	if (k1_enable)
   14072 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   14073 	else
   14074 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   14075 
   14076 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   14077 	if (rv != 0)
   14078 		return;
   14079 
   14080 	delay(20);
   14081 
   14082 	ctrl = CSR_READ(sc, WMREG_CTRL);
   14083 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   14084 
   14085 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   14086 	tmp |= CTRL_FRCSPD;
   14087 
   14088 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   14089 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   14090 	CSR_WRITE_FLUSH(sc);
   14091 	delay(20);
   14092 
   14093 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   14094 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   14095 	CSR_WRITE_FLUSH(sc);
   14096 	delay(20);
   14097 
   14098 	return;
   14099 }
   14100 
   14101 /* special case - for 82575 - need to do manual init ... */
   14102 static void
   14103 wm_reset_init_script_82575(struct wm_softc *sc)
   14104 {
   14105 	/*
   14106 	 * remark: this is untested code - we have no board without EEPROM
   14107 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   14108 	 */
   14109 
   14110 	/* SerDes configuration via SERDESCTRL */
   14111 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   14112 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   14113 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   14114 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   14115 
   14116 	/* CCM configuration via CCMCTL register */
   14117 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   14118 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   14119 
   14120 	/* PCIe lanes configuration */
   14121 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   14122 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   14123 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   14124 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   14125 
   14126 	/* PCIe PLL Configuration */
   14127 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   14128 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   14129 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   14130 }
   14131 
   14132 static void
   14133 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   14134 {
   14135 	uint32_t reg;
   14136 	uint16_t nvmword;
   14137 	int rv;
   14138 
   14139 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   14140 		return;
   14141 
   14142 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   14143 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   14144 	if (rv != 0) {
   14145 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   14146 		    __func__);
   14147 		return;
   14148 	}
   14149 
   14150 	reg = CSR_READ(sc, WMREG_MDICNFG);
   14151 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   14152 		reg |= MDICNFG_DEST;
   14153 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   14154 		reg |= MDICNFG_COM_MDIO;
   14155 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   14156 }
   14157 
   14158 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   14159 
   14160 static bool
   14161 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   14162 {
   14163 	int i;
   14164 	uint32_t reg;
   14165 	uint16_t id1, id2;
   14166 
   14167 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14168 		device_xname(sc->sc_dev), __func__));
   14169 	id1 = id2 = 0xffff;
   14170 	for (i = 0; i < 2; i++) {
   14171 		id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1);
   14172 		if (MII_INVALIDID(id1))
   14173 			continue;
   14174 		id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2);
   14175 		if (MII_INVALIDID(id2))
   14176 			continue;
   14177 		break;
   14178 	}
   14179 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) {
   14180 		goto out;
   14181 	}
   14182 
   14183 	if (sc->sc_type < WM_T_PCH_LPT) {
   14184 		sc->phy.release(sc);
   14185 		wm_set_mdio_slow_mode_hv(sc);
   14186 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
   14187 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
   14188 		sc->phy.acquire(sc);
   14189 	}
   14190 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   14191 		printf("XXX return with false\n");
   14192 		return false;
   14193 	}
   14194 out:
   14195 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   14196 		/* Only unforce SMBus if ME is not active */
   14197 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   14198 			/* Unforce SMBus mode in PHY */
   14199 			reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   14200 			    CV_SMB_CTRL);
   14201 			reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   14202 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   14203 			    CV_SMB_CTRL, reg);
   14204 
   14205 			/* Unforce SMBus mode in MAC */
   14206 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14207 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   14208 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14209 		}
   14210 	}
   14211 	return true;
   14212 }
   14213 
   14214 static void
   14215 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   14216 {
   14217 	uint32_t reg;
   14218 	int i;
   14219 
   14220 	/* Set PHY Config Counter to 50msec */
   14221 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   14222 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   14223 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   14224 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   14225 
   14226 	/* Toggle LANPHYPC */
   14227 	reg = CSR_READ(sc, WMREG_CTRL);
   14228 	reg |= CTRL_LANPHYPC_OVERRIDE;
   14229 	reg &= ~CTRL_LANPHYPC_VALUE;
   14230 	CSR_WRITE(sc, WMREG_CTRL, reg);
   14231 	CSR_WRITE_FLUSH(sc);
   14232 	delay(1000);
   14233 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   14234 	CSR_WRITE(sc, WMREG_CTRL, reg);
   14235 	CSR_WRITE_FLUSH(sc);
   14236 
   14237 	if (sc->sc_type < WM_T_PCH_LPT)
   14238 		delay(50 * 1000);
   14239 	else {
   14240 		i = 20;
   14241 
   14242 		do {
   14243 			delay(5 * 1000);
   14244 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   14245 		    && i--);
   14246 
   14247 		delay(30 * 1000);
   14248 	}
   14249 }
   14250 
   14251 static int
   14252 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   14253 {
   14254 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   14255 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   14256 	uint32_t rxa;
   14257 	uint16_t scale = 0, lat_enc = 0;
   14258 	int32_t obff_hwm = 0;
   14259 	int64_t lat_ns, value;
   14260 
   14261 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14262 		device_xname(sc->sc_dev), __func__));
   14263 
   14264 	if (link) {
   14265 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   14266 		uint32_t status;
   14267 		uint16_t speed;
   14268 		pcireg_t preg;
   14269 
   14270 		status = CSR_READ(sc, WMREG_STATUS);
   14271 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   14272 		case STATUS_SPEED_10:
   14273 			speed = 10;
   14274 			break;
   14275 		case STATUS_SPEED_100:
   14276 			speed = 100;
   14277 			break;
   14278 		case STATUS_SPEED_1000:
   14279 			speed = 1000;
   14280 			break;
   14281 		default:
   14282 			device_printf(sc->sc_dev, "Unknown speed "
   14283 			    "(status = %08x)\n", status);
   14284 			return -1;
   14285 		}
   14286 
   14287 		/* Rx Packet Buffer Allocation size (KB) */
   14288 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   14289 
   14290 		/*
   14291 		 * Determine the maximum latency tolerated by the device.
   14292 		 *
   14293 		 * Per the PCIe spec, the tolerated latencies are encoded as
   14294 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   14295 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   14296 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   14297 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   14298 		 */
   14299 		lat_ns = ((int64_t)rxa * 1024 -
   14300 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   14301 			+ ETHER_HDR_LEN))) * 8 * 1000;
   14302 		if (lat_ns < 0)
   14303 			lat_ns = 0;
   14304 		else
   14305 			lat_ns /= speed;
   14306 		value = lat_ns;
   14307 
   14308 		while (value > LTRV_VALUE) {
   14309 			scale ++;
   14310 			value = howmany(value, __BIT(5));
   14311 		}
   14312 		if (scale > LTRV_SCALE_MAX) {
   14313 			printf("%s: Invalid LTR latency scale %d\n",
   14314 			    device_xname(sc->sc_dev), scale);
   14315 			return -1;
   14316 		}
   14317 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   14318 
   14319 		/* Determine the maximum latency tolerated by the platform */
   14320 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14321 		    WM_PCI_LTR_CAP_LPT);
   14322 		max_snoop = preg & 0xffff;
   14323 		max_nosnoop = preg >> 16;
   14324 
   14325 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   14326 
   14327 		if (lat_enc > max_ltr_enc) {
   14328 			lat_enc = max_ltr_enc;
   14329 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   14330 			    * PCI_LTR_SCALETONS(
   14331 				    __SHIFTOUT(lat_enc,
   14332 					PCI_LTR_MAXSNOOPLAT_SCALE));
   14333 		}
   14334 
   14335 		if (lat_ns) {
   14336 			lat_ns *= speed * 1000;
   14337 			lat_ns /= 8;
   14338 			lat_ns /= 1000000000;
   14339 			obff_hwm = (int32_t)(rxa - lat_ns);
   14340 		}
   14341 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   14342 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   14343 			    "(rxa = %d, lat_ns = %d)\n",
   14344 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   14345 			return -1;
   14346 		}
   14347 	}
   14348 	/* Snoop and No-Snoop latencies the same */
   14349 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   14350 	CSR_WRITE(sc, WMREG_LTRV, reg);
   14351 
   14352 	/* Set OBFF high water mark */
   14353 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   14354 	reg |= obff_hwm;
   14355 	CSR_WRITE(sc, WMREG_SVT, reg);
   14356 
   14357 	/* Enable OBFF */
   14358 	reg = CSR_READ(sc, WMREG_SVCR);
   14359 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   14360 	CSR_WRITE(sc, WMREG_SVCR, reg);
   14361 
   14362 	return 0;
   14363 }
   14364 
   14365 /*
   14366  * I210 Errata 25 and I211 Errata 10
   14367  * Slow System Clock.
   14368  */
   14369 static void
   14370 wm_pll_workaround_i210(struct wm_softc *sc)
   14371 {
   14372 	uint32_t mdicnfg, wuc;
   14373 	uint32_t reg;
   14374 	pcireg_t pcireg;
   14375 	uint32_t pmreg;
   14376 	uint16_t nvmword, tmp_nvmword;
   14377 	int phyval;
   14378 	bool wa_done = false;
   14379 	int i;
   14380 
   14381 	/* Save WUC and MDICNFG registers */
   14382 	wuc = CSR_READ(sc, WMREG_WUC);
   14383 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   14384 
   14385 	reg = mdicnfg & ~MDICNFG_DEST;
   14386 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   14387 
   14388 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   14389 		nvmword = INVM_DEFAULT_AL;
   14390 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   14391 
   14392 	/* Get Power Management cap offset */
   14393 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   14394 		&pmreg, NULL) == 0)
   14395 		return;
   14396 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   14397 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   14398 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   14399 
   14400 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   14401 			break; /* OK */
   14402 		}
   14403 
   14404 		wa_done = true;
   14405 		/* Directly reset the internal PHY */
   14406 		reg = CSR_READ(sc, WMREG_CTRL);
   14407 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   14408 
   14409 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14410 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   14411 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14412 
   14413 		CSR_WRITE(sc, WMREG_WUC, 0);
   14414 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   14415 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   14416 
   14417 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14418 		    pmreg + PCI_PMCSR);
   14419 		pcireg |= PCI_PMCSR_STATE_D3;
   14420 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14421 		    pmreg + PCI_PMCSR, pcireg);
   14422 		delay(1000);
   14423 		pcireg &= ~PCI_PMCSR_STATE_D3;
   14424 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14425 		    pmreg + PCI_PMCSR, pcireg);
   14426 
   14427 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   14428 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   14429 
   14430 		/* Restore WUC register */
   14431 		CSR_WRITE(sc, WMREG_WUC, wuc);
   14432 	}
   14433 
   14434 	/* Restore MDICNFG setting */
   14435 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   14436 	if (wa_done)
   14437 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   14438 }
   14439 
   14440 static void
   14441 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   14442 {
   14443 	uint32_t reg;
   14444 
   14445 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14446 		device_xname(sc->sc_dev), __func__));
   14447 	KASSERT(sc->sc_type == WM_T_PCH_SPT);
   14448 
   14449 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   14450 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   14451 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   14452 
   14453 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   14454 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   14455 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   14456 }
   14457