Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.529
      1 /*	$NetBSD: if_wm.c,v 1.529 2017/07/20 10:00:25 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet)
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  *	- Image Unique ID
     83  */
     84 
     85 #include <sys/cdefs.h>
     86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.529 2017/07/20 10:00:25 msaitoh Exp $");
     87 
     88 #ifdef _KERNEL_OPT
     89 #include "opt_net_mpsafe.h"
     90 #include "opt_if_wm.h"
     91 #endif
     92 
     93 #include <sys/param.h>
     94 #include <sys/systm.h>
     95 #include <sys/callout.h>
     96 #include <sys/mbuf.h>
     97 #include <sys/malloc.h>
     98 #include <sys/kmem.h>
     99 #include <sys/kernel.h>
    100 #include <sys/socket.h>
    101 #include <sys/ioctl.h>
    102 #include <sys/errno.h>
    103 #include <sys/device.h>
    104 #include <sys/queue.h>
    105 #include <sys/syslog.h>
    106 #include <sys/interrupt.h>
    107 #include <sys/cpu.h>
    108 #include <sys/pcq.h>
    109 
    110 #include <sys/rndsource.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <netinet/in.h>			/* XXX for struct ip */
    120 #include <netinet/in_systm.h>		/* XXX for struct ip */
    121 #include <netinet/ip.h>			/* XXX for struct ip */
    122 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    123 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    124 
    125 #include <sys/bus.h>
    126 #include <sys/intr.h>
    127 #include <machine/endian.h>
    128 
    129 #include <dev/mii/mii.h>
    130 #include <dev/mii/miivar.h>
    131 #include <dev/mii/miidevs.h>
    132 #include <dev/mii/mii_bitbang.h>
    133 #include <dev/mii/ikphyreg.h>
    134 #include <dev/mii/igphyreg.h>
    135 #include <dev/mii/igphyvar.h>
    136 #include <dev/mii/inbmphyreg.h>
    137 #include <dev/mii/ihphyreg.h>
    138 
    139 #include <dev/pci/pcireg.h>
    140 #include <dev/pci/pcivar.h>
    141 #include <dev/pci/pcidevs.h>
    142 
    143 #include <dev/pci/if_wmreg.h>
    144 #include <dev/pci/if_wmvar.h>
    145 
    146 #ifdef WM_DEBUG
    147 #define	WM_DEBUG_LINK		__BIT(0)
    148 #define	WM_DEBUG_TX		__BIT(1)
    149 #define	WM_DEBUG_RX		__BIT(2)
    150 #define	WM_DEBUG_GMII		__BIT(3)
    151 #define	WM_DEBUG_MANAGE		__BIT(4)
    152 #define	WM_DEBUG_NVM		__BIT(5)
    153 #define	WM_DEBUG_INIT		__BIT(6)
    154 #define	WM_DEBUG_LOCK		__BIT(7)
    155 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    156     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    157 
    158 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    159 #else
    160 #define	DPRINTF(x, y)	/* nothing */
    161 #endif /* WM_DEBUG */
    162 
    163 #ifdef NET_MPSAFE
    164 #define WM_MPSAFE	1
    165 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    166 #else
    167 #define CALLOUT_FLAGS	0
    168 #endif
    169 
    170 /*
    171  * This device driver's max interrupt numbers.
    172  */
    173 #define WM_MAX_NQUEUEINTR	16
    174 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    175 
    176 #ifndef WM_DISABLE_MSI
    177 #define	WM_DISABLE_MSI 0
    178 #endif
    179 #ifndef WM_DISABLE_MSIX
    180 #define	WM_DISABLE_MSIX 0
    181 #endif
    182 
    183 int wm_disable_msi = WM_DISABLE_MSI;
    184 int wm_disable_msix = WM_DISABLE_MSIX;
    185 
    186 /*
    187  * Transmit descriptor list size.  Due to errata, we can only have
    188  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    189  * on >= 82544.  We tell the upper layers that they can queue a lot
    190  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    191  * of them at a time.
    192  *
    193  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    194  * chains containing many small mbufs have been observed in zero-copy
    195  * situations with jumbo frames.
    196  */
    197 #define	WM_NTXSEGS		256
    198 #define	WM_IFQUEUELEN		256
    199 #define	WM_TXQUEUELEN_MAX	64
    200 #define	WM_TXQUEUELEN_MAX_82547	16
    201 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    202 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    203 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    204 #define	WM_NTXDESC_82542	256
    205 #define	WM_NTXDESC_82544	4096
    206 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    207 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    208 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    209 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    210 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    211 
    212 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    213 
    214 #define	WM_TXINTERQSIZE		256
    215 
    216 /*
    217  * Receive descriptor list size.  We have one Rx buffer for normal
    218  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    219  * packet.  We allocate 256 receive descriptors, each with a 2k
    220  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    221  */
    222 #define	WM_NRXDESC		256
    223 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    224 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    225 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    226 
    227 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    228 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    229 #endif
    230 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    231 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    232 #endif
    233 
    234 typedef union txdescs {
    235 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    236 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    237 } txdescs_t;
    238 
    239 typedef union rxdescs {
    240 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    241 	ext_rxdesc_t      sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    242 	nq_rxdesc_t      sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    243 } rxdescs_t;
    244 
    245 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    246 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    247 
    248 /*
    249  * Software state for transmit jobs.
    250  */
    251 struct wm_txsoft {
    252 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    253 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    254 	int txs_firstdesc;		/* first descriptor in packet */
    255 	int txs_lastdesc;		/* last descriptor in packet */
    256 	int txs_ndesc;			/* # of descriptors used */
    257 };
    258 
    259 /*
    260  * Software state for receive buffers.  Each descriptor gets a
    261  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    262  * more than one buffer, we chain them together.
    263  */
    264 struct wm_rxsoft {
    265 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    266 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    267 };
    268 
    269 #define WM_LINKUP_TIMEOUT	50
    270 
    271 static uint16_t swfwphysem[] = {
    272 	SWFW_PHY0_SM,
    273 	SWFW_PHY1_SM,
    274 	SWFW_PHY2_SM,
    275 	SWFW_PHY3_SM
    276 };
    277 
    278 static const uint32_t wm_82580_rxpbs_table[] = {
    279 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    280 };
    281 
    282 struct wm_softc;
    283 
    284 #ifdef WM_EVENT_COUNTERS
    285 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    286 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    287 	struct evcnt qname##_ev_##evname;
    288 
    289 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    290 	do{								\
    291 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    292 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    293 		    "%s%02d%s", #qname, (qnum), #evname);		\
    294 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    295 		    (evtype), NULL, (xname),				\
    296 		    (q)->qname##_##evname##_evcnt_name);		\
    297 	}while(0)
    298 
    299 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    300 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    301 
    302 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    303 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    304 
    305 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    306 	evcnt_detach(&(q)->qname##_ev_##evname);
    307 #endif /* WM_EVENT_COUNTERS */
    308 
    309 struct wm_txqueue {
    310 	kmutex_t *txq_lock;		/* lock for tx operations */
    311 
    312 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    313 
    314 	/* Software state for the transmit descriptors. */
    315 	int txq_num;			/* must be a power of two */
    316 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    317 
    318 	/* TX control data structures. */
    319 	int txq_ndesc;			/* must be a power of two */
    320 	size_t txq_descsize;		/* a tx descriptor size */
    321 	txdescs_t *txq_descs_u;
    322         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    323 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    324 	int txq_desc_rseg;		/* real number of control segment */
    325 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    326 #define	txq_descs	txq_descs_u->sctxu_txdescs
    327 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    328 
    329 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    330 
    331 	int txq_free;			/* number of free Tx descriptors */
    332 	int txq_next;			/* next ready Tx descriptor */
    333 
    334 	int txq_sfree;			/* number of free Tx jobs */
    335 	int txq_snext;			/* next free Tx job */
    336 	int txq_sdirty;			/* dirty Tx jobs */
    337 
    338 	/* These 4 variables are used only on the 82547. */
    339 	int txq_fifo_size;		/* Tx FIFO size */
    340 	int txq_fifo_head;		/* current head of FIFO */
    341 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    342 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    343 
    344 	/*
    345 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    346 	 * CPUs. This queue intermediate them without block.
    347 	 */
    348 	pcq_t *txq_interq;
    349 
    350 	/*
    351 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    352 	 * to manage Tx H/W queue's busy flag.
    353 	 */
    354 	int txq_flags;			/* flags for H/W queue, see below */
    355 #define	WM_TXQ_NO_SPACE	0x1
    356 
    357 	bool txq_stopping;
    358 
    359 	uint32_t txq_packets;		/* for AIM */
    360 	uint32_t txq_bytes;		/* for AIM */
    361 #ifdef WM_EVENT_COUNTERS
    362 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
    363 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
    364 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
    365 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
    366 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
    367 						/* XXX not used? */
    368 
    369 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
    370 	WM_Q_EVCNT_DEFINE(txq,txtusum)		/* TCP/UDP cksums comp. out-bound */
    371 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
    372 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
    373 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
    374 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
    375 
    376 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
    377 
    378 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
    379 
    380 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    381 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    382 #endif /* WM_EVENT_COUNTERS */
    383 };
    384 
    385 struct wm_rxqueue {
    386 	kmutex_t *rxq_lock;		/* lock for rx operations */
    387 
    388 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    389 
    390 	/* Software state for the receive descriptors. */
    391 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    392 
    393 	/* RX control data structures. */
    394 	int rxq_ndesc;			/* must be a power of two */
    395 	size_t rxq_descsize;		/* a rx descriptor size */
    396 	rxdescs_t *rxq_descs_u;
    397 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    398 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    399 	int rxq_desc_rseg;		/* real number of control segment */
    400 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    401 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    402 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    403 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    404 
    405 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    406 
    407 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    408 	int rxq_discard;
    409 	int rxq_len;
    410 	struct mbuf *rxq_head;
    411 	struct mbuf *rxq_tail;
    412 	struct mbuf **rxq_tailp;
    413 
    414 	bool rxq_stopping;
    415 
    416 	uint32_t rxq_packets;		/* for AIM */
    417 	uint32_t rxq_bytes;		/* for AIM */
    418 #ifdef WM_EVENT_COUNTERS
    419 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
    420 
    421 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
    422 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
    423 #endif
    424 };
    425 
    426 struct wm_queue {
    427 	int wmq_id;			/* index of transmit and receive queues */
    428 	int wmq_intr_idx;		/* index of MSI-X tables */
    429 
    430 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    431 	bool wmq_set_itr;
    432 
    433 	struct wm_txqueue wmq_txq;
    434 	struct wm_rxqueue wmq_rxq;
    435 
    436 	void *wmq_si;
    437 };
    438 
    439 struct wm_phyop {
    440 	int (*acquire)(struct wm_softc *);
    441 	void (*release)(struct wm_softc *);
    442 	int reset_delay_us;
    443 };
    444 
    445 /*
    446  * Software state per device.
    447  */
    448 struct wm_softc {
    449 	device_t sc_dev;		/* generic device information */
    450 	bus_space_tag_t sc_st;		/* bus space tag */
    451 	bus_space_handle_t sc_sh;	/* bus space handle */
    452 	bus_size_t sc_ss;		/* bus space size */
    453 	bus_space_tag_t sc_iot;		/* I/O space tag */
    454 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    455 	bus_size_t sc_ios;		/* I/O space size */
    456 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    457 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    458 	bus_size_t sc_flashs;		/* flash registers space size */
    459 	off_t sc_flashreg_offset;	/*
    460 					 * offset to flash registers from
    461 					 * start of BAR
    462 					 */
    463 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    464 
    465 	struct ethercom sc_ethercom;	/* ethernet common data */
    466 	struct mii_data sc_mii;		/* MII/media information */
    467 
    468 	pci_chipset_tag_t sc_pc;
    469 	pcitag_t sc_pcitag;
    470 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    471 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    472 
    473 	uint16_t sc_pcidevid;		/* PCI device ID */
    474 	wm_chip_type sc_type;		/* MAC type */
    475 	int sc_rev;			/* MAC revision */
    476 	wm_phy_type sc_phytype;		/* PHY type */
    477 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    478 #define	WM_MEDIATYPE_UNKNOWN		0x00
    479 #define	WM_MEDIATYPE_FIBER		0x01
    480 #define	WM_MEDIATYPE_COPPER		0x02
    481 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    482 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    483 	int sc_flags;			/* flags; see below */
    484 	int sc_if_flags;		/* last if_flags */
    485 	int sc_flowflags;		/* 802.3x flow control flags */
    486 	int sc_align_tweak;
    487 
    488 	void *sc_ihs[WM_MAX_NINTR];	/*
    489 					 * interrupt cookie.
    490 					 * - legacy and msi use sc_ihs[0] only
    491 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    492 					 */
    493 	pci_intr_handle_t *sc_intrs;	/*
    494 					 * legacy and msi use sc_intrs[0] only
    495 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    496 					 */
    497 	int sc_nintrs;			/* number of interrupts */
    498 
    499 	int sc_link_intr_idx;		/* index of MSI-X tables */
    500 
    501 	callout_t sc_tick_ch;		/* tick callout */
    502 	bool sc_core_stopping;
    503 
    504 	int sc_nvm_ver_major;
    505 	int sc_nvm_ver_minor;
    506 	int sc_nvm_ver_build;
    507 	int sc_nvm_addrbits;		/* NVM address bits */
    508 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    509 	int sc_ich8_flash_base;
    510 	int sc_ich8_flash_bank_size;
    511 	int sc_nvm_k1_enabled;
    512 
    513 	int sc_nqueues;
    514 	struct wm_queue *sc_queue;
    515 	u_int sc_rx_process_limit;	/* Rx processing repeat limit in softint */
    516 	u_int sc_rx_intr_process_limit;	/* Rx processing repeat limit in H/W intr */
    517 
    518 	int sc_affinity_offset;
    519 
    520 #ifdef WM_EVENT_COUNTERS
    521 	/* Event counters. */
    522 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    523 
    524         /* WM_T_82542_2_1 only */
    525 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    526 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    527 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    528 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    529 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    530 #endif /* WM_EVENT_COUNTERS */
    531 
    532 	/* This variable are used only on the 82547. */
    533 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    534 
    535 	uint32_t sc_ctrl;		/* prototype CTRL register */
    536 #if 0
    537 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    538 #endif
    539 	uint32_t sc_icr;		/* prototype interrupt bits */
    540 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    541 	uint32_t sc_tctl;		/* prototype TCTL register */
    542 	uint32_t sc_rctl;		/* prototype RCTL register */
    543 	uint32_t sc_txcw;		/* prototype TXCW register */
    544 	uint32_t sc_tipg;		/* prototype TIPG register */
    545 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    546 	uint32_t sc_pba;		/* prototype PBA register */
    547 
    548 	int sc_tbi_linkup;		/* TBI link status */
    549 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    550 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    551 
    552 	int sc_mchash_type;		/* multicast filter offset */
    553 
    554 	krndsource_t rnd_source;	/* random source */
    555 
    556 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    557 
    558 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    559 	kmutex_t *sc_ich_phymtx;	/*
    560 					 * 82574/82583/ICH/PCH specific PHY
    561 					 * mutex. For 82574/82583, the mutex
    562 					 * is used for both PHY and NVM.
    563 					 */
    564 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    565 
    566 	struct wm_phyop phy;
    567 };
    568 
    569 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    570 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    571 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    572 
    573 #define	WM_RXCHAIN_RESET(rxq)						\
    574 do {									\
    575 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    576 	*(rxq)->rxq_tailp = NULL;					\
    577 	(rxq)->rxq_len = 0;						\
    578 } while (/*CONSTCOND*/0)
    579 
    580 #define	WM_RXCHAIN_LINK(rxq, m)						\
    581 do {									\
    582 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    583 	(rxq)->rxq_tailp = &(m)->m_next;				\
    584 } while (/*CONSTCOND*/0)
    585 
    586 #ifdef WM_EVENT_COUNTERS
    587 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    588 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    589 
    590 #define WM_Q_EVCNT_INCR(qname, evname)			\
    591 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    592 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    593 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    594 #else /* !WM_EVENT_COUNTERS */
    595 #define	WM_EVCNT_INCR(ev)	/* nothing */
    596 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    597 
    598 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    599 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    600 #endif /* !WM_EVENT_COUNTERS */
    601 
    602 #define	CSR_READ(sc, reg)						\
    603 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    604 #define	CSR_WRITE(sc, reg, val)						\
    605 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    606 #define	CSR_WRITE_FLUSH(sc)						\
    607 	(void) CSR_READ((sc), WMREG_STATUS)
    608 
    609 #define ICH8_FLASH_READ32(sc, reg)					\
    610 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    611 	    (reg) + sc->sc_flashreg_offset)
    612 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    613 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    614 	    (reg) + sc->sc_flashreg_offset, (data))
    615 
    616 #define ICH8_FLASH_READ16(sc, reg)					\
    617 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    618 	    (reg) + sc->sc_flashreg_offset)
    619 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    620 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    621 	    (reg) + sc->sc_flashreg_offset, (data))
    622 
    623 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    624 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    625 
    626 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    627 #define	WM_CDTXADDR_HI(txq, x)						\
    628 	(sizeof(bus_addr_t) == 8 ?					\
    629 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    630 
    631 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    632 #define	WM_CDRXADDR_HI(rxq, x)						\
    633 	(sizeof(bus_addr_t) == 8 ?					\
    634 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    635 
    636 /*
    637  * Register read/write functions.
    638  * Other than CSR_{READ|WRITE}().
    639  */
    640 #if 0
    641 static inline uint32_t wm_io_read(struct wm_softc *, int);
    642 #endif
    643 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    644 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    645 	uint32_t, uint32_t);
    646 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    647 
    648 /*
    649  * Descriptor sync/init functions.
    650  */
    651 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    652 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    653 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    654 
    655 /*
    656  * Device driver interface functions and commonly used functions.
    657  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    658  */
    659 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    660 static int	wm_match(device_t, cfdata_t, void *);
    661 static void	wm_attach(device_t, device_t, void *);
    662 static int	wm_detach(device_t, int);
    663 static bool	wm_suspend(device_t, const pmf_qual_t *);
    664 static bool	wm_resume(device_t, const pmf_qual_t *);
    665 static void	wm_watchdog(struct ifnet *);
    666 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
    667 static void	wm_tick(void *);
    668 static int	wm_ifflags_cb(struct ethercom *);
    669 static int	wm_ioctl(struct ifnet *, u_long, void *);
    670 /* MAC address related */
    671 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    672 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    673 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    674 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    675 static void	wm_set_filter(struct wm_softc *);
    676 /* Reset and init related */
    677 static void	wm_set_vlan(struct wm_softc *);
    678 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    679 static void	wm_get_auto_rd_done(struct wm_softc *);
    680 static void	wm_lan_init_done(struct wm_softc *);
    681 static void	wm_get_cfg_done(struct wm_softc *);
    682 static void	wm_phy_post_reset(struct wm_softc *);
    683 static void	wm_write_smbus_addr(struct wm_softc *);
    684 static void	wm_init_lcd_from_nvm(struct wm_softc *);
    685 static void	wm_initialize_hardware_bits(struct wm_softc *);
    686 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    687 static void	wm_reset_phy(struct wm_softc *);
    688 static void	wm_flush_desc_rings(struct wm_softc *);
    689 static void	wm_reset(struct wm_softc *);
    690 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    691 static void	wm_rxdrain(struct wm_rxqueue *);
    692 static void	wm_rss_getkey(uint8_t *);
    693 static void	wm_init_rss(struct wm_softc *);
    694 static void	wm_adjust_qnum(struct wm_softc *, int);
    695 static inline bool	wm_is_using_msix(struct wm_softc *);
    696 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    697 static int	wm_softint_establish(struct wm_softc *, int, int);
    698 static int	wm_setup_legacy(struct wm_softc *);
    699 static int	wm_setup_msix(struct wm_softc *);
    700 static int	wm_init(struct ifnet *);
    701 static int	wm_init_locked(struct ifnet *);
    702 static void	wm_turnon(struct wm_softc *);
    703 static void	wm_turnoff(struct wm_softc *);
    704 static void	wm_stop(struct ifnet *, int);
    705 static void	wm_stop_locked(struct ifnet *, int);
    706 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    707 static void	wm_82547_txfifo_stall(void *);
    708 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    709 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    710 /* DMA related */
    711 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    712 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    713 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    714 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    715     struct wm_txqueue *);
    716 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    717 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    718 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    719     struct wm_rxqueue *);
    720 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    721 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    722 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    723 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    724 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    725 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    726 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    727     struct wm_txqueue *);
    728 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    729     struct wm_rxqueue *);
    730 static int	wm_alloc_txrx_queues(struct wm_softc *);
    731 static void	wm_free_txrx_queues(struct wm_softc *);
    732 static int	wm_init_txrx_queues(struct wm_softc *);
    733 /* Start */
    734 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    735     struct wm_txsoft *, uint32_t *, uint8_t *);
    736 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    737 static void	wm_start(struct ifnet *);
    738 static void	wm_start_locked(struct ifnet *);
    739 static int	wm_transmit(struct ifnet *, struct mbuf *);
    740 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    741 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    742 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    743     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    744 static void	wm_nq_start(struct ifnet *);
    745 static void	wm_nq_start_locked(struct ifnet *);
    746 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    747 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    748 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    749 static void	wm_deferred_start_locked(struct wm_txqueue *);
    750 static void	wm_handle_queue(void *);
    751 /* Interrupt */
    752 static int	wm_txeof(struct wm_softc *, struct wm_txqueue *);
    753 static void	wm_rxeof(struct wm_rxqueue *, u_int);
    754 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    755 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    756 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    757 static void	wm_linkintr(struct wm_softc *, uint32_t);
    758 static int	wm_intr_legacy(void *);
    759 static inline void	wm_txrxintr_disable(struct wm_queue *);
    760 static inline void	wm_txrxintr_enable(struct wm_queue *);
    761 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    762 static int	wm_txrxintr_msix(void *);
    763 static int	wm_linkintr_msix(void *);
    764 
    765 /*
    766  * Media related.
    767  * GMII, SGMII, TBI, SERDES and SFP.
    768  */
    769 /* Common */
    770 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    771 /* GMII related */
    772 static void	wm_gmii_reset(struct wm_softc *);
    773 static void	wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t, uint16_t);
    774 static int	wm_get_phy_id_82575(struct wm_softc *);
    775 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    776 static int	wm_gmii_mediachange(struct ifnet *);
    777 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    778 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    779 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    780 static int	wm_gmii_i82543_readreg(device_t, int, int);
    781 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    782 static int	wm_gmii_mdic_readreg(device_t, int, int);
    783 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    784 static int	wm_gmii_i82544_readreg(device_t, int, int);
    785 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    786 static int	wm_gmii_i80003_readreg(device_t, int, int);
    787 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    788 static int	wm_gmii_bm_readreg(device_t, int, int);
    789 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    790 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    791 static int	wm_gmii_hv_readreg(device_t, int, int);
    792 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
    793 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    794 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
    795 static int	wm_gmii_82580_readreg(device_t, int, int);
    796 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    797 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    798 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    799 static void	wm_gmii_statchg(struct ifnet *);
    800 /*
    801  * kumeran related (80003, ICH* and PCH*).
    802  * These functions are not for accessing MII registers but for accessing
    803  * kumeran specific registers.
    804  */
    805 static int	wm_kmrn_readreg(struct wm_softc *, int);
    806 static int	wm_kmrn_readreg_locked(struct wm_softc *, int);
    807 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
    808 static void	wm_kmrn_writereg_locked(struct wm_softc *, int, int);
    809 /* SGMII */
    810 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    811 static int	wm_sgmii_readreg(device_t, int, int);
    812 static void	wm_sgmii_writereg(device_t, int, int, int);
    813 /* TBI related */
    814 static void	wm_tbi_mediainit(struct wm_softc *);
    815 static int	wm_tbi_mediachange(struct ifnet *);
    816 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    817 static int	wm_check_for_link(struct wm_softc *);
    818 static void	wm_tbi_tick(struct wm_softc *);
    819 /* SERDES related */
    820 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    821 static int	wm_serdes_mediachange(struct ifnet *);
    822 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    823 static void	wm_serdes_tick(struct wm_softc *);
    824 /* SFP related */
    825 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    826 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    827 
    828 /*
    829  * NVM related.
    830  * Microwire, SPI (w/wo EERD) and Flash.
    831  */
    832 /* Misc functions */
    833 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    834 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    835 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    836 /* Microwire */
    837 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    838 /* SPI */
    839 static int	wm_nvm_ready_spi(struct wm_softc *);
    840 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    841 /* Using with EERD */
    842 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    843 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    844 /* Flash */
    845 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    846     unsigned int *);
    847 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    848 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    849 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    850 	uint32_t *);
    851 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    852 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    853 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    854 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    855 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    856 /* iNVM */
    857 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    858 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    859 /* Lock, detecting NVM type, validate checksum and read */
    860 static int	wm_nvm_acquire(struct wm_softc *);
    861 static void	wm_nvm_release(struct wm_softc *);
    862 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    863 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    864 static int	wm_nvm_validate_checksum(struct wm_softc *);
    865 static void	wm_nvm_version_invm(struct wm_softc *);
    866 static void	wm_nvm_version(struct wm_softc *);
    867 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    868 
    869 /*
    870  * Hardware semaphores.
    871  * Very complexed...
    872  */
    873 static int	wm_get_null(struct wm_softc *);
    874 static void	wm_put_null(struct wm_softc *);
    875 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    876 static void	wm_put_swsm_semaphore(struct wm_softc *);
    877 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    878 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    879 static int	wm_get_phy_82575(struct wm_softc *);
    880 static void	wm_put_phy_82575(struct wm_softc *);
    881 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    882 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    883 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    884 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    885 static int	wm_get_nvm_ich8lan(struct wm_softc *);		/* For NVM */
    886 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    887 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    888 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    889 
    890 /*
    891  * Management mode and power management related subroutines.
    892  * BMC, AMT, suspend/resume and EEE.
    893  */
    894 #if 0
    895 static int	wm_check_mng_mode(struct wm_softc *);
    896 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    897 static int	wm_check_mng_mode_82574(struct wm_softc *);
    898 static int	wm_check_mng_mode_generic(struct wm_softc *);
    899 #endif
    900 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    901 static bool	wm_phy_resetisblocked(struct wm_softc *);
    902 static void	wm_get_hw_control(struct wm_softc *);
    903 static void	wm_release_hw_control(struct wm_softc *);
    904 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    905 static void	wm_smbustopci(struct wm_softc *);
    906 static void	wm_init_manageability(struct wm_softc *);
    907 static void	wm_release_manageability(struct wm_softc *);
    908 static void	wm_get_wakeup(struct wm_softc *);
    909 static void	wm_ulp_disable(struct wm_softc *);
    910 static void	wm_enable_phy_wakeup(struct wm_softc *);
    911 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    912 static void	wm_enable_wakeup(struct wm_softc *);
    913 /* LPLU (Low Power Link Up) */
    914 static void	wm_lplu_d0_disable(struct wm_softc *);
    915 /* EEE */
    916 static void	wm_set_eee_i350(struct wm_softc *);
    917 
    918 /*
    919  * Workarounds (mainly PHY related).
    920  * Basically, PHY's workarounds are in the PHY drivers.
    921  */
    922 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    923 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    924 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    925 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    926 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    927 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    928 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    929 static void	wm_reset_init_script_82575(struct wm_softc *);
    930 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    931 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
    932 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
    933 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
    934 static void	wm_pll_workaround_i210(struct wm_softc *);
    935 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
    936 
    937 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    938     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    939 
    940 /*
    941  * Devices supported by this driver.
    942  */
    943 static const struct wm_product {
    944 	pci_vendor_id_t		wmp_vendor;
    945 	pci_product_id_t	wmp_product;
    946 	const char		*wmp_name;
    947 	wm_chip_type		wmp_type;
    948 	uint32_t		wmp_flags;
    949 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    950 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    951 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    952 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    953 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    954 } wm_products[] = {
    955 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    956 	  "Intel i82542 1000BASE-X Ethernet",
    957 	  WM_T_82542_2_1,	WMP_F_FIBER },
    958 
    959 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    960 	  "Intel i82543GC 1000BASE-X Ethernet",
    961 	  WM_T_82543,		WMP_F_FIBER },
    962 
    963 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    964 	  "Intel i82543GC 1000BASE-T Ethernet",
    965 	  WM_T_82543,		WMP_F_COPPER },
    966 
    967 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    968 	  "Intel i82544EI 1000BASE-T Ethernet",
    969 	  WM_T_82544,		WMP_F_COPPER },
    970 
    971 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    972 	  "Intel i82544EI 1000BASE-X Ethernet",
    973 	  WM_T_82544,		WMP_F_FIBER },
    974 
    975 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    976 	  "Intel i82544GC 1000BASE-T Ethernet",
    977 	  WM_T_82544,		WMP_F_COPPER },
    978 
    979 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    980 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    981 	  WM_T_82544,		WMP_F_COPPER },
    982 
    983 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    984 	  "Intel i82540EM 1000BASE-T Ethernet",
    985 	  WM_T_82540,		WMP_F_COPPER },
    986 
    987 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    988 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    989 	  WM_T_82540,		WMP_F_COPPER },
    990 
    991 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    992 	  "Intel i82540EP 1000BASE-T Ethernet",
    993 	  WM_T_82540,		WMP_F_COPPER },
    994 
    995 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    996 	  "Intel i82540EP 1000BASE-T Ethernet",
    997 	  WM_T_82540,		WMP_F_COPPER },
    998 
    999 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1000 	  "Intel i82540EP 1000BASE-T Ethernet",
   1001 	  WM_T_82540,		WMP_F_COPPER },
   1002 
   1003 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1004 	  "Intel i82545EM 1000BASE-T Ethernet",
   1005 	  WM_T_82545,		WMP_F_COPPER },
   1006 
   1007 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1008 	  "Intel i82545GM 1000BASE-T Ethernet",
   1009 	  WM_T_82545_3,		WMP_F_COPPER },
   1010 
   1011 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1012 	  "Intel i82545GM 1000BASE-X Ethernet",
   1013 	  WM_T_82545_3,		WMP_F_FIBER },
   1014 
   1015 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1016 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1017 	  WM_T_82545_3,		WMP_F_SERDES },
   1018 
   1019 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1020 	  "Intel i82546EB 1000BASE-T Ethernet",
   1021 	  WM_T_82546,		WMP_F_COPPER },
   1022 
   1023 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1024 	  "Intel i82546EB 1000BASE-T Ethernet",
   1025 	  WM_T_82546,		WMP_F_COPPER },
   1026 
   1027 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1028 	  "Intel i82545EM 1000BASE-X Ethernet",
   1029 	  WM_T_82545,		WMP_F_FIBER },
   1030 
   1031 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1032 	  "Intel i82546EB 1000BASE-X Ethernet",
   1033 	  WM_T_82546,		WMP_F_FIBER },
   1034 
   1035 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1036 	  "Intel i82546GB 1000BASE-T Ethernet",
   1037 	  WM_T_82546_3,		WMP_F_COPPER },
   1038 
   1039 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1040 	  "Intel i82546GB 1000BASE-X Ethernet",
   1041 	  WM_T_82546_3,		WMP_F_FIBER },
   1042 
   1043 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1044 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1045 	  WM_T_82546_3,		WMP_F_SERDES },
   1046 
   1047 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1048 	  "i82546GB quad-port Gigabit Ethernet",
   1049 	  WM_T_82546_3,		WMP_F_COPPER },
   1050 
   1051 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1052 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1053 	  WM_T_82546_3,		WMP_F_COPPER },
   1054 
   1055 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1056 	  "Intel PRO/1000MT (82546GB)",
   1057 	  WM_T_82546_3,		WMP_F_COPPER },
   1058 
   1059 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1060 	  "Intel i82541EI 1000BASE-T Ethernet",
   1061 	  WM_T_82541,		WMP_F_COPPER },
   1062 
   1063 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1064 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1065 	  WM_T_82541,		WMP_F_COPPER },
   1066 
   1067 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1068 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1069 	  WM_T_82541,		WMP_F_COPPER },
   1070 
   1071 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1072 	  "Intel i82541ER 1000BASE-T Ethernet",
   1073 	  WM_T_82541_2,		WMP_F_COPPER },
   1074 
   1075 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1076 	  "Intel i82541GI 1000BASE-T Ethernet",
   1077 	  WM_T_82541_2,		WMP_F_COPPER },
   1078 
   1079 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1080 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1081 	  WM_T_82541_2,		WMP_F_COPPER },
   1082 
   1083 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1084 	  "Intel i82541PI 1000BASE-T Ethernet",
   1085 	  WM_T_82541_2,		WMP_F_COPPER },
   1086 
   1087 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1088 	  "Intel i82547EI 1000BASE-T Ethernet",
   1089 	  WM_T_82547,		WMP_F_COPPER },
   1090 
   1091 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1092 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1093 	  WM_T_82547,		WMP_F_COPPER },
   1094 
   1095 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1096 	  "Intel i82547GI 1000BASE-T Ethernet",
   1097 	  WM_T_82547_2,		WMP_F_COPPER },
   1098 
   1099 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1100 	  "Intel PRO/1000 PT (82571EB)",
   1101 	  WM_T_82571,		WMP_F_COPPER },
   1102 
   1103 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1104 	  "Intel PRO/1000 PF (82571EB)",
   1105 	  WM_T_82571,		WMP_F_FIBER },
   1106 
   1107 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1108 	  "Intel PRO/1000 PB (82571EB)",
   1109 	  WM_T_82571,		WMP_F_SERDES },
   1110 
   1111 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1112 	  "Intel PRO/1000 QT (82571EB)",
   1113 	  WM_T_82571,		WMP_F_COPPER },
   1114 
   1115 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1116 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1117 	  WM_T_82571,		WMP_F_COPPER, },
   1118 
   1119 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1120 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1121 	  WM_T_82571,		WMP_F_COPPER, },
   1122 
   1123 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1124 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1125 	  WM_T_82571,		WMP_F_SERDES, },
   1126 
   1127 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1128 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1129 	  WM_T_82571,		WMP_F_SERDES, },
   1130 
   1131 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1132 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1133 	  WM_T_82571,		WMP_F_FIBER, },
   1134 
   1135 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1136 	  "Intel i82572EI 1000baseT Ethernet",
   1137 	  WM_T_82572,		WMP_F_COPPER },
   1138 
   1139 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1140 	  "Intel i82572EI 1000baseX Ethernet",
   1141 	  WM_T_82572,		WMP_F_FIBER },
   1142 
   1143 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1144 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1145 	  WM_T_82572,		WMP_F_SERDES },
   1146 
   1147 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1148 	  "Intel i82572EI 1000baseT Ethernet",
   1149 	  WM_T_82572,		WMP_F_COPPER },
   1150 
   1151 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1152 	  "Intel i82573E",
   1153 	  WM_T_82573,		WMP_F_COPPER },
   1154 
   1155 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1156 	  "Intel i82573E IAMT",
   1157 	  WM_T_82573,		WMP_F_COPPER },
   1158 
   1159 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1160 	  "Intel i82573L Gigabit Ethernet",
   1161 	  WM_T_82573,		WMP_F_COPPER },
   1162 
   1163 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1164 	  "Intel i82574L",
   1165 	  WM_T_82574,		WMP_F_COPPER },
   1166 
   1167 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1168 	  "Intel i82574L",
   1169 	  WM_T_82574,		WMP_F_COPPER },
   1170 
   1171 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1172 	  "Intel i82583V",
   1173 	  WM_T_82583,		WMP_F_COPPER },
   1174 
   1175 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1176 	  "i80003 dual 1000baseT Ethernet",
   1177 	  WM_T_80003,		WMP_F_COPPER },
   1178 
   1179 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1180 	  "i80003 dual 1000baseX Ethernet",
   1181 	  WM_T_80003,		WMP_F_COPPER },
   1182 
   1183 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1184 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1185 	  WM_T_80003,		WMP_F_SERDES },
   1186 
   1187 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1188 	  "Intel i80003 1000baseT Ethernet",
   1189 	  WM_T_80003,		WMP_F_COPPER },
   1190 
   1191 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1192 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1193 	  WM_T_80003,		WMP_F_SERDES },
   1194 
   1195 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1196 	  "Intel i82801H (M_AMT) LAN Controller",
   1197 	  WM_T_ICH8,		WMP_F_COPPER },
   1198 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1199 	  "Intel i82801H (AMT) LAN Controller",
   1200 	  WM_T_ICH8,		WMP_F_COPPER },
   1201 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1202 	  "Intel i82801H LAN Controller",
   1203 	  WM_T_ICH8,		WMP_F_COPPER },
   1204 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1205 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1206 	  WM_T_ICH8,		WMP_F_COPPER },
   1207 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1208 	  "Intel i82801H (M) LAN Controller",
   1209 	  WM_T_ICH8,		WMP_F_COPPER },
   1210 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1211 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1212 	  WM_T_ICH8,		WMP_F_COPPER },
   1213 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1214 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1215 	  WM_T_ICH8,		WMP_F_COPPER },
   1216 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1217 	  "82567V-3 LAN Controller",
   1218 	  WM_T_ICH8,		WMP_F_COPPER },
   1219 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1220 	  "82801I (AMT) LAN Controller",
   1221 	  WM_T_ICH9,		WMP_F_COPPER },
   1222 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1223 	  "82801I 10/100 LAN Controller",
   1224 	  WM_T_ICH9,		WMP_F_COPPER },
   1225 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1226 	  "82801I (G) 10/100 LAN Controller",
   1227 	  WM_T_ICH9,		WMP_F_COPPER },
   1228 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1229 	  "82801I (GT) 10/100 LAN Controller",
   1230 	  WM_T_ICH9,		WMP_F_COPPER },
   1231 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1232 	  "82801I (C) LAN Controller",
   1233 	  WM_T_ICH9,		WMP_F_COPPER },
   1234 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1235 	  "82801I mobile LAN Controller",
   1236 	  WM_T_ICH9,		WMP_F_COPPER },
   1237 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1238 	  "82801I mobile (V) LAN Controller",
   1239 	  WM_T_ICH9,		WMP_F_COPPER },
   1240 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1241 	  "82801I mobile (AMT) LAN Controller",
   1242 	  WM_T_ICH9,		WMP_F_COPPER },
   1243 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1244 	  "82567LM-4 LAN Controller",
   1245 	  WM_T_ICH9,		WMP_F_COPPER },
   1246 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1247 	  "82567LM-2 LAN Controller",
   1248 	  WM_T_ICH10,		WMP_F_COPPER },
   1249 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1250 	  "82567LF-2 LAN Controller",
   1251 	  WM_T_ICH10,		WMP_F_COPPER },
   1252 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1253 	  "82567LM-3 LAN Controller",
   1254 	  WM_T_ICH10,		WMP_F_COPPER },
   1255 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1256 	  "82567LF-3 LAN Controller",
   1257 	  WM_T_ICH10,		WMP_F_COPPER },
   1258 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1259 	  "82567V-2 LAN Controller",
   1260 	  WM_T_ICH10,		WMP_F_COPPER },
   1261 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1262 	  "82567V-3? LAN Controller",
   1263 	  WM_T_ICH10,		WMP_F_COPPER },
   1264 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1265 	  "HANKSVILLE LAN Controller",
   1266 	  WM_T_ICH10,		WMP_F_COPPER },
   1267 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1268 	  "PCH LAN (82577LM) Controller",
   1269 	  WM_T_PCH,		WMP_F_COPPER },
   1270 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1271 	  "PCH LAN (82577LC) Controller",
   1272 	  WM_T_PCH,		WMP_F_COPPER },
   1273 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1274 	  "PCH LAN (82578DM) Controller",
   1275 	  WM_T_PCH,		WMP_F_COPPER },
   1276 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1277 	  "PCH LAN (82578DC) Controller",
   1278 	  WM_T_PCH,		WMP_F_COPPER },
   1279 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1280 	  "PCH2 LAN (82579LM) Controller",
   1281 	  WM_T_PCH2,		WMP_F_COPPER },
   1282 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1283 	  "PCH2 LAN (82579V) Controller",
   1284 	  WM_T_PCH2,		WMP_F_COPPER },
   1285 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1286 	  "82575EB dual-1000baseT Ethernet",
   1287 	  WM_T_82575,		WMP_F_COPPER },
   1288 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1289 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1290 	  WM_T_82575,		WMP_F_SERDES },
   1291 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1292 	  "82575GB quad-1000baseT Ethernet",
   1293 	  WM_T_82575,		WMP_F_COPPER },
   1294 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1295 	  "82575GB quad-1000baseT Ethernet (PM)",
   1296 	  WM_T_82575,		WMP_F_COPPER },
   1297 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1298 	  "82576 1000BaseT Ethernet",
   1299 	  WM_T_82576,		WMP_F_COPPER },
   1300 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1301 	  "82576 1000BaseX Ethernet",
   1302 	  WM_T_82576,		WMP_F_FIBER },
   1303 
   1304 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1305 	  "82576 gigabit Ethernet (SERDES)",
   1306 	  WM_T_82576,		WMP_F_SERDES },
   1307 
   1308 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1309 	  "82576 quad-1000BaseT Ethernet",
   1310 	  WM_T_82576,		WMP_F_COPPER },
   1311 
   1312 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1313 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1314 	  WM_T_82576,		WMP_F_COPPER },
   1315 
   1316 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1317 	  "82576 gigabit Ethernet",
   1318 	  WM_T_82576,		WMP_F_COPPER },
   1319 
   1320 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1321 	  "82576 gigabit Ethernet (SERDES)",
   1322 	  WM_T_82576,		WMP_F_SERDES },
   1323 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1324 	  "82576 quad-gigabit Ethernet (SERDES)",
   1325 	  WM_T_82576,		WMP_F_SERDES },
   1326 
   1327 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1328 	  "82580 1000BaseT Ethernet",
   1329 	  WM_T_82580,		WMP_F_COPPER },
   1330 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1331 	  "82580 1000BaseX Ethernet",
   1332 	  WM_T_82580,		WMP_F_FIBER },
   1333 
   1334 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1335 	  "82580 1000BaseT Ethernet (SERDES)",
   1336 	  WM_T_82580,		WMP_F_SERDES },
   1337 
   1338 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1339 	  "82580 gigabit Ethernet (SGMII)",
   1340 	  WM_T_82580,		WMP_F_COPPER },
   1341 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1342 	  "82580 dual-1000BaseT Ethernet",
   1343 	  WM_T_82580,		WMP_F_COPPER },
   1344 
   1345 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1346 	  "82580 quad-1000BaseX Ethernet",
   1347 	  WM_T_82580,		WMP_F_FIBER },
   1348 
   1349 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1350 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1351 	  WM_T_82580,		WMP_F_COPPER },
   1352 
   1353 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1354 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1355 	  WM_T_82580,		WMP_F_SERDES },
   1356 
   1357 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1358 	  "DH89XXCC 1000BASE-KX Ethernet",
   1359 	  WM_T_82580,		WMP_F_SERDES },
   1360 
   1361 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1362 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1363 	  WM_T_82580,		WMP_F_SERDES },
   1364 
   1365 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1366 	  "I350 Gigabit Network Connection",
   1367 	  WM_T_I350,		WMP_F_COPPER },
   1368 
   1369 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1370 	  "I350 Gigabit Fiber Network Connection",
   1371 	  WM_T_I350,		WMP_F_FIBER },
   1372 
   1373 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1374 	  "I350 Gigabit Backplane Connection",
   1375 	  WM_T_I350,		WMP_F_SERDES },
   1376 
   1377 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1378 	  "I350 Quad Port Gigabit Ethernet",
   1379 	  WM_T_I350,		WMP_F_SERDES },
   1380 
   1381 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1382 	  "I350 Gigabit Connection",
   1383 	  WM_T_I350,		WMP_F_COPPER },
   1384 
   1385 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1386 	  "I354 Gigabit Ethernet (KX)",
   1387 	  WM_T_I354,		WMP_F_SERDES },
   1388 
   1389 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1390 	  "I354 Gigabit Ethernet (SGMII)",
   1391 	  WM_T_I354,		WMP_F_COPPER },
   1392 
   1393 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1394 	  "I354 Gigabit Ethernet (2.5G)",
   1395 	  WM_T_I354,		WMP_F_COPPER },
   1396 
   1397 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1398 	  "I210-T1 Ethernet Server Adapter",
   1399 	  WM_T_I210,		WMP_F_COPPER },
   1400 
   1401 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1402 	  "I210 Ethernet (Copper OEM)",
   1403 	  WM_T_I210,		WMP_F_COPPER },
   1404 
   1405 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1406 	  "I210 Ethernet (Copper IT)",
   1407 	  WM_T_I210,		WMP_F_COPPER },
   1408 
   1409 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1410 	  "I210 Ethernet (FLASH less)",
   1411 	  WM_T_I210,		WMP_F_COPPER },
   1412 
   1413 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1414 	  "I210 Gigabit Ethernet (Fiber)",
   1415 	  WM_T_I210,		WMP_F_FIBER },
   1416 
   1417 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1418 	  "I210 Gigabit Ethernet (SERDES)",
   1419 	  WM_T_I210,		WMP_F_SERDES },
   1420 
   1421 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1422 	  "I210 Gigabit Ethernet (FLASH less)",
   1423 	  WM_T_I210,		WMP_F_SERDES },
   1424 
   1425 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1426 	  "I210 Gigabit Ethernet (SGMII)",
   1427 	  WM_T_I210,		WMP_F_COPPER },
   1428 
   1429 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1430 	  "I211 Ethernet (COPPER)",
   1431 	  WM_T_I211,		WMP_F_COPPER },
   1432 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1433 	  "I217 V Ethernet Connection",
   1434 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1435 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1436 	  "I217 LM Ethernet Connection",
   1437 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1438 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1439 	  "I218 V Ethernet Connection",
   1440 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1441 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1442 	  "I218 V Ethernet Connection",
   1443 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1444 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1445 	  "I218 V Ethernet Connection",
   1446 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1447 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1448 	  "I218 LM Ethernet Connection",
   1449 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1450 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1451 	  "I218 LM Ethernet Connection",
   1452 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1453 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1454 	  "I218 LM Ethernet Connection",
   1455 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1456 #if 0
   1457 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1458 	  "I219 V Ethernet Connection",
   1459 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1460 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1461 	  "I219 V Ethernet Connection",
   1462 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1463 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1464 	  "I219 V Ethernet Connection",
   1465 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1466 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1467 	  "I219 V Ethernet Connection",
   1468 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1469 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1470 	  "I219 LM Ethernet Connection",
   1471 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1472 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1473 	  "I219 LM Ethernet Connection",
   1474 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1475 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1476 	  "I219 LM Ethernet Connection",
   1477 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1478 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1479 	  "I219 LM Ethernet Connection",
   1480 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1481 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1482 	  "I219 LM Ethernet Connection",
   1483 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1484 #endif
   1485 	{ 0,			0,
   1486 	  NULL,
   1487 	  0,			0 },
   1488 };
   1489 
   1490 /*
   1491  * Register read/write functions.
   1492  * Other than CSR_{READ|WRITE}().
   1493  */
   1494 
   1495 #if 0 /* Not currently used */
   1496 static inline uint32_t
   1497 wm_io_read(struct wm_softc *sc, int reg)
   1498 {
   1499 
   1500 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1501 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1502 }
   1503 #endif
   1504 
   1505 static inline void
   1506 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1507 {
   1508 
   1509 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1510 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1511 }
   1512 
   1513 static inline void
   1514 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1515     uint32_t data)
   1516 {
   1517 	uint32_t regval;
   1518 	int i;
   1519 
   1520 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1521 
   1522 	CSR_WRITE(sc, reg, regval);
   1523 
   1524 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1525 		delay(5);
   1526 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1527 			break;
   1528 	}
   1529 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1530 		aprint_error("%s: WARNING:"
   1531 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1532 		    device_xname(sc->sc_dev), reg);
   1533 	}
   1534 }
   1535 
   1536 static inline void
   1537 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1538 {
   1539 	wa->wa_low = htole32(v & 0xffffffffU);
   1540 	if (sizeof(bus_addr_t) == 8)
   1541 		wa->wa_high = htole32((uint64_t) v >> 32);
   1542 	else
   1543 		wa->wa_high = 0;
   1544 }
   1545 
   1546 /*
   1547  * Descriptor sync/init functions.
   1548  */
   1549 static inline void
   1550 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1551 {
   1552 	struct wm_softc *sc = txq->txq_sc;
   1553 
   1554 	/* If it will wrap around, sync to the end of the ring. */
   1555 	if ((start + num) > WM_NTXDESC(txq)) {
   1556 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1557 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1558 		    (WM_NTXDESC(txq) - start), ops);
   1559 		num -= (WM_NTXDESC(txq) - start);
   1560 		start = 0;
   1561 	}
   1562 
   1563 	/* Now sync whatever is left. */
   1564 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1565 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1566 }
   1567 
   1568 static inline void
   1569 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1570 {
   1571 	struct wm_softc *sc = rxq->rxq_sc;
   1572 
   1573 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1574 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1575 }
   1576 
   1577 static inline void
   1578 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1579 {
   1580 	struct wm_softc *sc = rxq->rxq_sc;
   1581 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1582 	struct mbuf *m = rxs->rxs_mbuf;
   1583 
   1584 	/*
   1585 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1586 	 * so that the payload after the Ethernet header is aligned
   1587 	 * to a 4-byte boundary.
   1588 
   1589 	 * XXX BRAINDAMAGE ALERT!
   1590 	 * The stupid chip uses the same size for every buffer, which
   1591 	 * is set in the Receive Control register.  We are using the 2K
   1592 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1593 	 * reason, we can't "scoot" packets longer than the standard
   1594 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1595 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1596 	 * the upper layer copy the headers.
   1597 	 */
   1598 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1599 
   1600 	if (sc->sc_type == WM_T_82574) {
   1601 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1602 		rxd->erx_data.erxd_addr =
   1603 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1604 		rxd->erx_data.erxd_dd = 0;
   1605 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1606 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1607 
   1608 		rxd->nqrx_data.nrxd_paddr =
   1609 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1610 		/* Currently, split header is not supported. */
   1611 		rxd->nqrx_data.nrxd_haddr = 0;
   1612 	} else {
   1613 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1614 
   1615 		wm_set_dma_addr(&rxd->wrx_addr,
   1616 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1617 		rxd->wrx_len = 0;
   1618 		rxd->wrx_cksum = 0;
   1619 		rxd->wrx_status = 0;
   1620 		rxd->wrx_errors = 0;
   1621 		rxd->wrx_special = 0;
   1622 	}
   1623 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1624 
   1625 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1626 }
   1627 
   1628 /*
   1629  * Device driver interface functions and commonly used functions.
   1630  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1631  */
   1632 
   1633 /* Lookup supported device table */
   1634 static const struct wm_product *
   1635 wm_lookup(const struct pci_attach_args *pa)
   1636 {
   1637 	const struct wm_product *wmp;
   1638 
   1639 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1640 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1641 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1642 			return wmp;
   1643 	}
   1644 	return NULL;
   1645 }
   1646 
   1647 /* The match function (ca_match) */
   1648 static int
   1649 wm_match(device_t parent, cfdata_t cf, void *aux)
   1650 {
   1651 	struct pci_attach_args *pa = aux;
   1652 
   1653 	if (wm_lookup(pa) != NULL)
   1654 		return 1;
   1655 
   1656 	return 0;
   1657 }
   1658 
   1659 /* The attach function (ca_attach) */
   1660 static void
   1661 wm_attach(device_t parent, device_t self, void *aux)
   1662 {
   1663 	struct wm_softc *sc = device_private(self);
   1664 	struct pci_attach_args *pa = aux;
   1665 	prop_dictionary_t dict;
   1666 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1667 	pci_chipset_tag_t pc = pa->pa_pc;
   1668 	int counts[PCI_INTR_TYPE_SIZE];
   1669 	pci_intr_type_t max_type;
   1670 	const char *eetype, *xname;
   1671 	bus_space_tag_t memt;
   1672 	bus_space_handle_t memh;
   1673 	bus_size_t memsize;
   1674 	int memh_valid;
   1675 	int i, error;
   1676 	const struct wm_product *wmp;
   1677 	prop_data_t ea;
   1678 	prop_number_t pn;
   1679 	uint8_t enaddr[ETHER_ADDR_LEN];
   1680 	char buf[256];
   1681 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1682 	pcireg_t preg, memtype;
   1683 	uint16_t eeprom_data, apme_mask;
   1684 	bool force_clear_smbi;
   1685 	uint32_t link_mode;
   1686 	uint32_t reg;
   1687 
   1688 	sc->sc_dev = self;
   1689 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1690 	sc->sc_core_stopping = false;
   1691 
   1692 	wmp = wm_lookup(pa);
   1693 #ifdef DIAGNOSTIC
   1694 	if (wmp == NULL) {
   1695 		printf("\n");
   1696 		panic("wm_attach: impossible");
   1697 	}
   1698 #endif
   1699 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1700 
   1701 	sc->sc_pc = pa->pa_pc;
   1702 	sc->sc_pcitag = pa->pa_tag;
   1703 
   1704 	if (pci_dma64_available(pa))
   1705 		sc->sc_dmat = pa->pa_dmat64;
   1706 	else
   1707 		sc->sc_dmat = pa->pa_dmat;
   1708 
   1709 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1710 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1711 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1712 
   1713 	sc->sc_type = wmp->wmp_type;
   1714 
   1715 	/* Set default function pointers */
   1716 	sc->phy.acquire = wm_get_null;
   1717 	sc->phy.release = wm_put_null;
   1718 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1719 
   1720 	if (sc->sc_type < WM_T_82543) {
   1721 		if (sc->sc_rev < 2) {
   1722 			aprint_error_dev(sc->sc_dev,
   1723 			    "i82542 must be at least rev. 2\n");
   1724 			return;
   1725 		}
   1726 		if (sc->sc_rev < 3)
   1727 			sc->sc_type = WM_T_82542_2_0;
   1728 	}
   1729 
   1730 	/*
   1731 	 * Disable MSI for Errata:
   1732 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1733 	 *
   1734 	 *  82544: Errata 25
   1735 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1736 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1737 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1738 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1739 	 *
   1740 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1741 	 *
   1742 	 *  82571 & 82572: Errata 63
   1743 	 */
   1744 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1745 	    || (sc->sc_type == WM_T_82572))
   1746 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1747 
   1748 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1749 	    || (sc->sc_type == WM_T_82580)
   1750 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1751 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1752 		sc->sc_flags |= WM_F_NEWQUEUE;
   1753 
   1754 	/* Set device properties (mactype) */
   1755 	dict = device_properties(sc->sc_dev);
   1756 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1757 
   1758 	/*
   1759 	 * Map the device.  All devices support memory-mapped acccess,
   1760 	 * and it is really required for normal operation.
   1761 	 */
   1762 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1763 	switch (memtype) {
   1764 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1765 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1766 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1767 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1768 		break;
   1769 	default:
   1770 		memh_valid = 0;
   1771 		break;
   1772 	}
   1773 
   1774 	if (memh_valid) {
   1775 		sc->sc_st = memt;
   1776 		sc->sc_sh = memh;
   1777 		sc->sc_ss = memsize;
   1778 	} else {
   1779 		aprint_error_dev(sc->sc_dev,
   1780 		    "unable to map device registers\n");
   1781 		return;
   1782 	}
   1783 
   1784 	/*
   1785 	 * In addition, i82544 and later support I/O mapped indirect
   1786 	 * register access.  It is not desirable (nor supported in
   1787 	 * this driver) to use it for normal operation, though it is
   1788 	 * required to work around bugs in some chip versions.
   1789 	 */
   1790 	if (sc->sc_type >= WM_T_82544) {
   1791 		/* First we have to find the I/O BAR. */
   1792 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1793 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1794 			if (memtype == PCI_MAPREG_TYPE_IO)
   1795 				break;
   1796 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1797 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1798 				i += 4;	/* skip high bits, too */
   1799 		}
   1800 		if (i < PCI_MAPREG_END) {
   1801 			/*
   1802 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1803 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1804 			 * It's no problem because newer chips has no this
   1805 			 * bug.
   1806 			 *
   1807 			 * The i8254x doesn't apparently respond when the
   1808 			 * I/O BAR is 0, which looks somewhat like it's not
   1809 			 * been configured.
   1810 			 */
   1811 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1812 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1813 				aprint_error_dev(sc->sc_dev,
   1814 				    "WARNING: I/O BAR at zero.\n");
   1815 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1816 					0, &sc->sc_iot, &sc->sc_ioh,
   1817 					NULL, &sc->sc_ios) == 0) {
   1818 				sc->sc_flags |= WM_F_IOH_VALID;
   1819 			} else {
   1820 				aprint_error_dev(sc->sc_dev,
   1821 				    "WARNING: unable to map I/O space\n");
   1822 			}
   1823 		}
   1824 
   1825 	}
   1826 
   1827 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1828 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1829 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1830 	if (sc->sc_type < WM_T_82542_2_1)
   1831 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1832 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1833 
   1834 	/* power up chip */
   1835 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1836 	    NULL)) && error != EOPNOTSUPP) {
   1837 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1838 		return;
   1839 	}
   1840 
   1841 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1842 
   1843 	/* Allocation settings */
   1844 	max_type = PCI_INTR_TYPE_MSIX;
   1845 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1;
   1846 	counts[PCI_INTR_TYPE_MSI] = 1;
   1847 	counts[PCI_INTR_TYPE_INTX] = 1;
   1848 	/* overridden by disable flags */
   1849 	if (wm_disable_msi != 0) {
   1850 		counts[PCI_INTR_TYPE_MSI] = 0;
   1851 		if (wm_disable_msix != 0) {
   1852 			max_type = PCI_INTR_TYPE_INTX;
   1853 			counts[PCI_INTR_TYPE_MSIX] = 0;
   1854 		}
   1855 	} else if (wm_disable_msix != 0) {
   1856 		max_type = PCI_INTR_TYPE_MSI;
   1857 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1858 	}
   1859 
   1860 alloc_retry:
   1861 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1862 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1863 		return;
   1864 	}
   1865 
   1866 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1867 		error = wm_setup_msix(sc);
   1868 		if (error) {
   1869 			pci_intr_release(pc, sc->sc_intrs,
   1870 			    counts[PCI_INTR_TYPE_MSIX]);
   1871 
   1872 			/* Setup for MSI: Disable MSI-X */
   1873 			max_type = PCI_INTR_TYPE_MSI;
   1874 			counts[PCI_INTR_TYPE_MSI] = 1;
   1875 			counts[PCI_INTR_TYPE_INTX] = 1;
   1876 			goto alloc_retry;
   1877 		}
   1878 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1879 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1880 		error = wm_setup_legacy(sc);
   1881 		if (error) {
   1882 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1883 			    counts[PCI_INTR_TYPE_MSI]);
   1884 
   1885 			/* The next try is for INTx: Disable MSI */
   1886 			max_type = PCI_INTR_TYPE_INTX;
   1887 			counts[PCI_INTR_TYPE_INTX] = 1;
   1888 			goto alloc_retry;
   1889 		}
   1890 	} else {
   1891 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1892 		error = wm_setup_legacy(sc);
   1893 		if (error) {
   1894 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1895 			    counts[PCI_INTR_TYPE_INTX]);
   1896 			return;
   1897 		}
   1898 	}
   1899 
   1900 	/*
   1901 	 * Check the function ID (unit number of the chip).
   1902 	 */
   1903 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1904 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1905 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1906 	    || (sc->sc_type == WM_T_82580)
   1907 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1908 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1909 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1910 	else
   1911 		sc->sc_funcid = 0;
   1912 
   1913 	/*
   1914 	 * Determine a few things about the bus we're connected to.
   1915 	 */
   1916 	if (sc->sc_type < WM_T_82543) {
   1917 		/* We don't really know the bus characteristics here. */
   1918 		sc->sc_bus_speed = 33;
   1919 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1920 		/*
   1921 		 * CSA (Communication Streaming Architecture) is about as fast
   1922 		 * a 32-bit 66MHz PCI Bus.
   1923 		 */
   1924 		sc->sc_flags |= WM_F_CSA;
   1925 		sc->sc_bus_speed = 66;
   1926 		aprint_verbose_dev(sc->sc_dev,
   1927 		    "Communication Streaming Architecture\n");
   1928 		if (sc->sc_type == WM_T_82547) {
   1929 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1930 			callout_setfunc(&sc->sc_txfifo_ch,
   1931 					wm_82547_txfifo_stall, sc);
   1932 			aprint_verbose_dev(sc->sc_dev,
   1933 			    "using 82547 Tx FIFO stall work-around\n");
   1934 		}
   1935 	} else if (sc->sc_type >= WM_T_82571) {
   1936 		sc->sc_flags |= WM_F_PCIE;
   1937 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1938 		    && (sc->sc_type != WM_T_ICH10)
   1939 		    && (sc->sc_type != WM_T_PCH)
   1940 		    && (sc->sc_type != WM_T_PCH2)
   1941 		    && (sc->sc_type != WM_T_PCH_LPT)
   1942 		    && (sc->sc_type != WM_T_PCH_SPT)) {
   1943 			/* ICH* and PCH* have no PCIe capability registers */
   1944 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1945 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1946 				NULL) == 0)
   1947 				aprint_error_dev(sc->sc_dev,
   1948 				    "unable to find PCIe capability\n");
   1949 		}
   1950 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1951 	} else {
   1952 		reg = CSR_READ(sc, WMREG_STATUS);
   1953 		if (reg & STATUS_BUS64)
   1954 			sc->sc_flags |= WM_F_BUS64;
   1955 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1956 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1957 
   1958 			sc->sc_flags |= WM_F_PCIX;
   1959 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1960 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1961 				aprint_error_dev(sc->sc_dev,
   1962 				    "unable to find PCIX capability\n");
   1963 			else if (sc->sc_type != WM_T_82545_3 &&
   1964 				 sc->sc_type != WM_T_82546_3) {
   1965 				/*
   1966 				 * Work around a problem caused by the BIOS
   1967 				 * setting the max memory read byte count
   1968 				 * incorrectly.
   1969 				 */
   1970 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1971 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1972 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1973 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1974 
   1975 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1976 				    PCIX_CMD_BYTECNT_SHIFT;
   1977 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1978 				    PCIX_STATUS_MAXB_SHIFT;
   1979 				if (bytecnt > maxb) {
   1980 					aprint_verbose_dev(sc->sc_dev,
   1981 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1982 					    512 << bytecnt, 512 << maxb);
   1983 					pcix_cmd = (pcix_cmd &
   1984 					    ~PCIX_CMD_BYTECNT_MASK) |
   1985 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1986 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1987 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1988 					    pcix_cmd);
   1989 				}
   1990 			}
   1991 		}
   1992 		/*
   1993 		 * The quad port adapter is special; it has a PCIX-PCIX
   1994 		 * bridge on the board, and can run the secondary bus at
   1995 		 * a higher speed.
   1996 		 */
   1997 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   1998 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   1999 								      : 66;
   2000 		} else if (sc->sc_flags & WM_F_PCIX) {
   2001 			switch (reg & STATUS_PCIXSPD_MASK) {
   2002 			case STATUS_PCIXSPD_50_66:
   2003 				sc->sc_bus_speed = 66;
   2004 				break;
   2005 			case STATUS_PCIXSPD_66_100:
   2006 				sc->sc_bus_speed = 100;
   2007 				break;
   2008 			case STATUS_PCIXSPD_100_133:
   2009 				sc->sc_bus_speed = 133;
   2010 				break;
   2011 			default:
   2012 				aprint_error_dev(sc->sc_dev,
   2013 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2014 				    reg & STATUS_PCIXSPD_MASK);
   2015 				sc->sc_bus_speed = 66;
   2016 				break;
   2017 			}
   2018 		} else
   2019 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2020 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2021 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2022 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2023 	}
   2024 
   2025 	/* clear interesting stat counters */
   2026 	CSR_READ(sc, WMREG_COLC);
   2027 	CSR_READ(sc, WMREG_RXERRC);
   2028 
   2029 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2030 	    || (sc->sc_type >= WM_T_ICH8))
   2031 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2032 	if (sc->sc_type >= WM_T_ICH8)
   2033 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2034 
   2035 	/* Set PHY, NVM mutex related stuff */
   2036 	switch (sc->sc_type) {
   2037 	case WM_T_82542_2_0:
   2038 	case WM_T_82542_2_1:
   2039 	case WM_T_82543:
   2040 	case WM_T_82544:
   2041 		/* Microwire */
   2042 		sc->sc_nvm_wordsize = 64;
   2043 		sc->sc_nvm_addrbits = 6;
   2044 		break;
   2045 	case WM_T_82540:
   2046 	case WM_T_82545:
   2047 	case WM_T_82545_3:
   2048 	case WM_T_82546:
   2049 	case WM_T_82546_3:
   2050 		/* Microwire */
   2051 		reg = CSR_READ(sc, WMREG_EECD);
   2052 		if (reg & EECD_EE_SIZE) {
   2053 			sc->sc_nvm_wordsize = 256;
   2054 			sc->sc_nvm_addrbits = 8;
   2055 		} else {
   2056 			sc->sc_nvm_wordsize = 64;
   2057 			sc->sc_nvm_addrbits = 6;
   2058 		}
   2059 		sc->sc_flags |= WM_F_LOCK_EECD;
   2060 		break;
   2061 	case WM_T_82541:
   2062 	case WM_T_82541_2:
   2063 	case WM_T_82547:
   2064 	case WM_T_82547_2:
   2065 		sc->sc_flags |= WM_F_LOCK_EECD;
   2066 		reg = CSR_READ(sc, WMREG_EECD);
   2067 		if (reg & EECD_EE_TYPE) {
   2068 			/* SPI */
   2069 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2070 			wm_nvm_set_addrbits_size_eecd(sc);
   2071 		} else {
   2072 			/* Microwire */
   2073 			if ((reg & EECD_EE_ABITS) != 0) {
   2074 				sc->sc_nvm_wordsize = 256;
   2075 				sc->sc_nvm_addrbits = 8;
   2076 			} else {
   2077 				sc->sc_nvm_wordsize = 64;
   2078 				sc->sc_nvm_addrbits = 6;
   2079 			}
   2080 		}
   2081 		break;
   2082 	case WM_T_82571:
   2083 	case WM_T_82572:
   2084 		/* SPI */
   2085 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2086 		wm_nvm_set_addrbits_size_eecd(sc);
   2087 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
   2088 		sc->phy.acquire = wm_get_swsm_semaphore;
   2089 		sc->phy.release = wm_put_swsm_semaphore;
   2090 		break;
   2091 	case WM_T_82573:
   2092 	case WM_T_82574:
   2093 	case WM_T_82583:
   2094 		if (sc->sc_type == WM_T_82573) {
   2095 			sc->sc_flags |= WM_F_LOCK_SWSM;
   2096 			sc->phy.acquire = wm_get_swsm_semaphore;
   2097 			sc->phy.release = wm_put_swsm_semaphore;
   2098 		} else {
   2099 			sc->sc_flags |= WM_F_LOCK_EXTCNF;
   2100 			/* Both PHY and NVM use the same semaphore. */
   2101 			sc->phy.acquire
   2102 			    = wm_get_swfwhw_semaphore;
   2103 			sc->phy.release
   2104 			    = wm_put_swfwhw_semaphore;
   2105 		}
   2106 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2107 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2108 			sc->sc_nvm_wordsize = 2048;
   2109 		} else {
   2110 			/* SPI */
   2111 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2112 			wm_nvm_set_addrbits_size_eecd(sc);
   2113 		}
   2114 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2115 		break;
   2116 	case WM_T_82575:
   2117 	case WM_T_82576:
   2118 	case WM_T_82580:
   2119 	case WM_T_I350:
   2120 	case WM_T_I354:
   2121 	case WM_T_80003:
   2122 		/* SPI */
   2123 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2124 		wm_nvm_set_addrbits_size_eecd(sc);
   2125 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
   2126 		    | WM_F_LOCK_SWSM;
   2127 		sc->phy.acquire = wm_get_phy_82575;
   2128 		sc->phy.release = wm_put_phy_82575;
   2129 		break;
   2130 	case WM_T_ICH8:
   2131 	case WM_T_ICH9:
   2132 	case WM_T_ICH10:
   2133 	case WM_T_PCH:
   2134 	case WM_T_PCH2:
   2135 	case WM_T_PCH_LPT:
   2136 		/* FLASH */
   2137 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2138 		sc->sc_nvm_wordsize = 2048;
   2139 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2140 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2141 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2142 			aprint_error_dev(sc->sc_dev,
   2143 			    "can't map FLASH registers\n");
   2144 			goto out;
   2145 		}
   2146 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2147 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2148 		    ICH_FLASH_SECTOR_SIZE;
   2149 		sc->sc_ich8_flash_bank_size =
   2150 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2151 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2152 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2153 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2154 		sc->sc_flashreg_offset = 0;
   2155 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2156 		sc->phy.release = wm_put_swflag_ich8lan;
   2157 		break;
   2158 	case WM_T_PCH_SPT:
   2159 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2160 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2161 		sc->sc_flasht = sc->sc_st;
   2162 		sc->sc_flashh = sc->sc_sh;
   2163 		sc->sc_ich8_flash_base = 0;
   2164 		sc->sc_nvm_wordsize =
   2165 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2166 			* NVM_SIZE_MULTIPLIER;
   2167 		/* It is size in bytes, we want words */
   2168 		sc->sc_nvm_wordsize /= 2;
   2169 		/* assume 2 banks */
   2170 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2171 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2172 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2173 		sc->phy.release = wm_put_swflag_ich8lan;
   2174 		break;
   2175 	case WM_T_I210:
   2176 	case WM_T_I211:
   2177 		if (wm_nvm_get_flash_presence_i210(sc)) {
   2178 			wm_nvm_set_addrbits_size_eecd(sc);
   2179 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2180 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2181 		} else {
   2182 			sc->sc_nvm_wordsize = INVM_SIZE;
   2183 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2184 		}
   2185 		sc->sc_flags |= WM_F_LOCK_SWFW | WM_F_LOCK_SWSM;
   2186 		sc->phy.acquire = wm_get_phy_82575;
   2187 		sc->phy.release = wm_put_phy_82575;
   2188 		break;
   2189 	default:
   2190 		break;
   2191 	}
   2192 
   2193 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2194 	switch (sc->sc_type) {
   2195 	case WM_T_82571:
   2196 	case WM_T_82572:
   2197 		reg = CSR_READ(sc, WMREG_SWSM2);
   2198 		if ((reg & SWSM2_LOCK) == 0) {
   2199 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2200 			force_clear_smbi = true;
   2201 		} else
   2202 			force_clear_smbi = false;
   2203 		break;
   2204 	case WM_T_82573:
   2205 	case WM_T_82574:
   2206 	case WM_T_82583:
   2207 		force_clear_smbi = true;
   2208 		break;
   2209 	default:
   2210 		force_clear_smbi = false;
   2211 		break;
   2212 	}
   2213 	if (force_clear_smbi) {
   2214 		reg = CSR_READ(sc, WMREG_SWSM);
   2215 		if ((reg & SWSM_SMBI) != 0)
   2216 			aprint_error_dev(sc->sc_dev,
   2217 			    "Please update the Bootagent\n");
   2218 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2219 	}
   2220 
   2221 	/*
   2222 	 * Defer printing the EEPROM type until after verifying the checksum
   2223 	 * This allows the EEPROM type to be printed correctly in the case
   2224 	 * that no EEPROM is attached.
   2225 	 */
   2226 	/*
   2227 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2228 	 * this for later, so we can fail future reads from the EEPROM.
   2229 	 */
   2230 	if (wm_nvm_validate_checksum(sc)) {
   2231 		/*
   2232 		 * Read twice again because some PCI-e parts fail the
   2233 		 * first check due to the link being in sleep state.
   2234 		 */
   2235 		if (wm_nvm_validate_checksum(sc))
   2236 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2237 	}
   2238 
   2239 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2240 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2241 	else {
   2242 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2243 		    sc->sc_nvm_wordsize);
   2244 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2245 			aprint_verbose("iNVM");
   2246 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2247 			aprint_verbose("FLASH(HW)");
   2248 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2249 			aprint_verbose("FLASH");
   2250 		else {
   2251 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2252 				eetype = "SPI";
   2253 			else
   2254 				eetype = "MicroWire";
   2255 			aprint_verbose("(%d address bits) %s EEPROM",
   2256 			    sc->sc_nvm_addrbits, eetype);
   2257 		}
   2258 	}
   2259 	wm_nvm_version(sc);
   2260 	aprint_verbose("\n");
   2261 
   2262 	/*
   2263 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2264 	 * incorrect.
   2265 	 */
   2266 	wm_gmii_setup_phytype(sc, 0, 0);
   2267 
   2268 	/* Reset the chip to a known state. */
   2269 	wm_reset(sc);
   2270 
   2271 	/* Check for I21[01] PLL workaround */
   2272 	if (sc->sc_type == WM_T_I210)
   2273 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2274 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2275 		/* NVM image release 3.25 has a workaround */
   2276 		if ((sc->sc_nvm_ver_major < 3)
   2277 		    || ((sc->sc_nvm_ver_major == 3)
   2278 			&& (sc->sc_nvm_ver_minor < 25))) {
   2279 			aprint_verbose_dev(sc->sc_dev,
   2280 			    "ROM image version %d.%d is older than 3.25\n",
   2281 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2282 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2283 		}
   2284 	}
   2285 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2286 		wm_pll_workaround_i210(sc);
   2287 
   2288 	wm_get_wakeup(sc);
   2289 
   2290 	/* Non-AMT based hardware can now take control from firmware */
   2291 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2292 		wm_get_hw_control(sc);
   2293 
   2294 	/*
   2295 	 * Read the Ethernet address from the EEPROM, if not first found
   2296 	 * in device properties.
   2297 	 */
   2298 	ea = prop_dictionary_get(dict, "mac-address");
   2299 	if (ea != NULL) {
   2300 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2301 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2302 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2303 	} else {
   2304 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2305 			aprint_error_dev(sc->sc_dev,
   2306 			    "unable to read Ethernet address\n");
   2307 			goto out;
   2308 		}
   2309 	}
   2310 
   2311 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2312 	    ether_sprintf(enaddr));
   2313 
   2314 	/*
   2315 	 * Read the config info from the EEPROM, and set up various
   2316 	 * bits in the control registers based on their contents.
   2317 	 */
   2318 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2319 	if (pn != NULL) {
   2320 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2321 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2322 	} else {
   2323 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2324 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2325 			goto out;
   2326 		}
   2327 	}
   2328 
   2329 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2330 	if (pn != NULL) {
   2331 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2332 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2333 	} else {
   2334 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2335 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2336 			goto out;
   2337 		}
   2338 	}
   2339 
   2340 	/* check for WM_F_WOL */
   2341 	switch (sc->sc_type) {
   2342 	case WM_T_82542_2_0:
   2343 	case WM_T_82542_2_1:
   2344 	case WM_T_82543:
   2345 		/* dummy? */
   2346 		eeprom_data = 0;
   2347 		apme_mask = NVM_CFG3_APME;
   2348 		break;
   2349 	case WM_T_82544:
   2350 		apme_mask = NVM_CFG2_82544_APM_EN;
   2351 		eeprom_data = cfg2;
   2352 		break;
   2353 	case WM_T_82546:
   2354 	case WM_T_82546_3:
   2355 	case WM_T_82571:
   2356 	case WM_T_82572:
   2357 	case WM_T_82573:
   2358 	case WM_T_82574:
   2359 	case WM_T_82583:
   2360 	case WM_T_80003:
   2361 	default:
   2362 		apme_mask = NVM_CFG3_APME;
   2363 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2364 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2365 		break;
   2366 	case WM_T_82575:
   2367 	case WM_T_82576:
   2368 	case WM_T_82580:
   2369 	case WM_T_I350:
   2370 	case WM_T_I354: /* XXX ok? */
   2371 	case WM_T_ICH8:
   2372 	case WM_T_ICH9:
   2373 	case WM_T_ICH10:
   2374 	case WM_T_PCH:
   2375 	case WM_T_PCH2:
   2376 	case WM_T_PCH_LPT:
   2377 	case WM_T_PCH_SPT:
   2378 		/* XXX The funcid should be checked on some devices */
   2379 		apme_mask = WUC_APME;
   2380 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2381 		break;
   2382 	}
   2383 
   2384 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2385 	if ((eeprom_data & apme_mask) != 0)
   2386 		sc->sc_flags |= WM_F_WOL;
   2387 
   2388 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2389 		/* Check NVM for autonegotiation */
   2390 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2391 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2392 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2393 		}
   2394 	}
   2395 
   2396 	/*
   2397 	 * XXX need special handling for some multiple port cards
   2398 	 * to disable a paticular port.
   2399 	 */
   2400 
   2401 	if (sc->sc_type >= WM_T_82544) {
   2402 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2403 		if (pn != NULL) {
   2404 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2405 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2406 		} else {
   2407 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2408 				aprint_error_dev(sc->sc_dev,
   2409 				    "unable to read SWDPIN\n");
   2410 				goto out;
   2411 			}
   2412 		}
   2413 	}
   2414 
   2415 	if (cfg1 & NVM_CFG1_ILOS)
   2416 		sc->sc_ctrl |= CTRL_ILOS;
   2417 
   2418 	/*
   2419 	 * XXX
   2420 	 * This code isn't correct because pin 2 and 3 are located
   2421 	 * in different position on newer chips. Check all datasheet.
   2422 	 *
   2423 	 * Until resolve this problem, check if a chip < 82580
   2424 	 */
   2425 	if (sc->sc_type <= WM_T_82580) {
   2426 		if (sc->sc_type >= WM_T_82544) {
   2427 			sc->sc_ctrl |=
   2428 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2429 			    CTRL_SWDPIO_SHIFT;
   2430 			sc->sc_ctrl |=
   2431 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2432 			    CTRL_SWDPINS_SHIFT;
   2433 		} else {
   2434 			sc->sc_ctrl |=
   2435 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2436 			    CTRL_SWDPIO_SHIFT;
   2437 		}
   2438 	}
   2439 
   2440 	/* XXX For other than 82580? */
   2441 	if (sc->sc_type == WM_T_82580) {
   2442 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2443 		if (nvmword & __BIT(13))
   2444 			sc->sc_ctrl |= CTRL_ILOS;
   2445 	}
   2446 
   2447 #if 0
   2448 	if (sc->sc_type >= WM_T_82544) {
   2449 		if (cfg1 & NVM_CFG1_IPS0)
   2450 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2451 		if (cfg1 & NVM_CFG1_IPS1)
   2452 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2453 		sc->sc_ctrl_ext |=
   2454 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2455 		    CTRL_EXT_SWDPIO_SHIFT;
   2456 		sc->sc_ctrl_ext |=
   2457 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2458 		    CTRL_EXT_SWDPINS_SHIFT;
   2459 	} else {
   2460 		sc->sc_ctrl_ext |=
   2461 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2462 		    CTRL_EXT_SWDPIO_SHIFT;
   2463 	}
   2464 #endif
   2465 
   2466 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2467 #if 0
   2468 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2469 #endif
   2470 
   2471 	if (sc->sc_type == WM_T_PCH) {
   2472 		uint16_t val;
   2473 
   2474 		/* Save the NVM K1 bit setting */
   2475 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2476 
   2477 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2478 			sc->sc_nvm_k1_enabled = 1;
   2479 		else
   2480 			sc->sc_nvm_k1_enabled = 0;
   2481 	}
   2482 
   2483 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2484 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2485 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2486 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2487 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
   2488 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2489 		/* Copper only */
   2490 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2491 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2492 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2493 	    || (sc->sc_type ==WM_T_I211)) {
   2494 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2495 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2496 		switch (link_mode) {
   2497 		case CTRL_EXT_LINK_MODE_1000KX:
   2498 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2499 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2500 			break;
   2501 		case CTRL_EXT_LINK_MODE_SGMII:
   2502 			if (wm_sgmii_uses_mdio(sc)) {
   2503 				aprint_verbose_dev(sc->sc_dev,
   2504 				    "SGMII(MDIO)\n");
   2505 				sc->sc_flags |= WM_F_SGMII;
   2506 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2507 				break;
   2508 			}
   2509 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2510 			/*FALLTHROUGH*/
   2511 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2512 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2513 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2514 				if (link_mode
   2515 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2516 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2517 					sc->sc_flags |= WM_F_SGMII;
   2518 				} else {
   2519 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2520 					aprint_verbose_dev(sc->sc_dev,
   2521 					    "SERDES\n");
   2522 				}
   2523 				break;
   2524 			}
   2525 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2526 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2527 
   2528 			/* Change current link mode setting */
   2529 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2530 			switch (sc->sc_mediatype) {
   2531 			case WM_MEDIATYPE_COPPER:
   2532 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2533 				break;
   2534 			case WM_MEDIATYPE_SERDES:
   2535 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2536 				break;
   2537 			default:
   2538 				break;
   2539 			}
   2540 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2541 			break;
   2542 		case CTRL_EXT_LINK_MODE_GMII:
   2543 		default:
   2544 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2545 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2546 			break;
   2547 		}
   2548 
   2549 		reg &= ~CTRL_EXT_I2C_ENA;
   2550 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2551 			reg |= CTRL_EXT_I2C_ENA;
   2552 		else
   2553 			reg &= ~CTRL_EXT_I2C_ENA;
   2554 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2555 	} else if (sc->sc_type < WM_T_82543 ||
   2556 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2557 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2558 			aprint_error_dev(sc->sc_dev,
   2559 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2560 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2561 		}
   2562 	} else {
   2563 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2564 			aprint_error_dev(sc->sc_dev,
   2565 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2566 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2567 		}
   2568 	}
   2569 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2570 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2571 
   2572 	/* Set device properties (macflags) */
   2573 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2574 
   2575 	/* Initialize the media structures accordingly. */
   2576 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2577 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2578 	else
   2579 		wm_tbi_mediainit(sc); /* All others */
   2580 
   2581 	ifp = &sc->sc_ethercom.ec_if;
   2582 	xname = device_xname(sc->sc_dev);
   2583 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2584 	ifp->if_softc = sc;
   2585 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2586 #ifdef WM_MPSAFE
   2587 	ifp->if_extflags = IFEF_START_MPSAFE;
   2588 #endif
   2589 	ifp->if_ioctl = wm_ioctl;
   2590 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2591 		ifp->if_start = wm_nq_start;
   2592 		/*
   2593 		 * When the number of CPUs is one and the controller can use
   2594 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2595 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2596 		 * and the other is used for link status changing.
   2597 		 * In this situation, wm_nq_transmit() is disadvantageous
   2598 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2599 		 */
   2600 		if (wm_is_using_multiqueue(sc))
   2601 			ifp->if_transmit = wm_nq_transmit;
   2602 	} else {
   2603 		ifp->if_start = wm_start;
   2604 		/*
   2605 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2606 		 */
   2607 		if (wm_is_using_multiqueue(sc))
   2608 			ifp->if_transmit = wm_transmit;
   2609 	}
   2610 	ifp->if_watchdog = wm_watchdog;
   2611 	ifp->if_init = wm_init;
   2612 	ifp->if_stop = wm_stop;
   2613 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2614 	IFQ_SET_READY(&ifp->if_snd);
   2615 
   2616 	/* Check for jumbo frame */
   2617 	switch (sc->sc_type) {
   2618 	case WM_T_82573:
   2619 		/* XXX limited to 9234 if ASPM is disabled */
   2620 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2621 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2622 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2623 		break;
   2624 	case WM_T_82571:
   2625 	case WM_T_82572:
   2626 	case WM_T_82574:
   2627 	case WM_T_82575:
   2628 	case WM_T_82576:
   2629 	case WM_T_82580:
   2630 	case WM_T_I350:
   2631 	case WM_T_I354: /* XXXX ok? */
   2632 	case WM_T_I210:
   2633 	case WM_T_I211:
   2634 	case WM_T_80003:
   2635 	case WM_T_ICH9:
   2636 	case WM_T_ICH10:
   2637 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2638 	case WM_T_PCH_LPT:
   2639 	case WM_T_PCH_SPT:
   2640 		/* XXX limited to 9234 */
   2641 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2642 		break;
   2643 	case WM_T_PCH:
   2644 		/* XXX limited to 4096 */
   2645 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2646 		break;
   2647 	case WM_T_82542_2_0:
   2648 	case WM_T_82542_2_1:
   2649 	case WM_T_82583:
   2650 	case WM_T_ICH8:
   2651 		/* No support for jumbo frame */
   2652 		break;
   2653 	default:
   2654 		/* ETHER_MAX_LEN_JUMBO */
   2655 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2656 		break;
   2657 	}
   2658 
   2659 	/* If we're a i82543 or greater, we can support VLANs. */
   2660 	if (sc->sc_type >= WM_T_82543)
   2661 		sc->sc_ethercom.ec_capabilities |=
   2662 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2663 
   2664 	/*
   2665 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2666 	 * on i82543 and later.
   2667 	 */
   2668 	if (sc->sc_type >= WM_T_82543) {
   2669 		ifp->if_capabilities |=
   2670 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2671 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2672 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2673 		    IFCAP_CSUM_TCPv6_Tx |
   2674 		    IFCAP_CSUM_UDPv6_Tx;
   2675 	}
   2676 
   2677 	/*
   2678 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2679 	 *
   2680 	 *	82541GI (8086:1076) ... no
   2681 	 *	82572EI (8086:10b9) ... yes
   2682 	 */
   2683 	if (sc->sc_type >= WM_T_82571) {
   2684 		ifp->if_capabilities |=
   2685 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2686 	}
   2687 
   2688 	/*
   2689 	 * If we're a i82544 or greater (except i82547), we can do
   2690 	 * TCP segmentation offload.
   2691 	 */
   2692 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2693 		ifp->if_capabilities |= IFCAP_TSOv4;
   2694 	}
   2695 
   2696 	if (sc->sc_type >= WM_T_82571) {
   2697 		ifp->if_capabilities |= IFCAP_TSOv6;
   2698 	}
   2699 
   2700 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2701 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2702 
   2703 #ifdef WM_MPSAFE
   2704 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2705 #else
   2706 	sc->sc_core_lock = NULL;
   2707 #endif
   2708 
   2709 	/* Attach the interface. */
   2710 	if_initialize(ifp);
   2711 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2712 	ether_ifattach(ifp, enaddr);
   2713 	if_register(ifp);
   2714 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2715 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2716 			  RND_FLAG_DEFAULT);
   2717 
   2718 #ifdef WM_EVENT_COUNTERS
   2719 	/* Attach event counters. */
   2720 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2721 	    NULL, xname, "linkintr");
   2722 
   2723 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2724 	    NULL, xname, "tx_xoff");
   2725 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2726 	    NULL, xname, "tx_xon");
   2727 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2728 	    NULL, xname, "rx_xoff");
   2729 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2730 	    NULL, xname, "rx_xon");
   2731 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2732 	    NULL, xname, "rx_macctl");
   2733 #endif /* WM_EVENT_COUNTERS */
   2734 
   2735 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2736 		pmf_class_network_register(self, ifp);
   2737 	else
   2738 		aprint_error_dev(self, "couldn't establish power handler\n");
   2739 
   2740 	sc->sc_flags |= WM_F_ATTACHED;
   2741  out:
   2742 	return;
   2743 }
   2744 
   2745 /* The detach function (ca_detach) */
   2746 static int
   2747 wm_detach(device_t self, int flags __unused)
   2748 {
   2749 	struct wm_softc *sc = device_private(self);
   2750 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2751 	int i;
   2752 
   2753 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2754 		return 0;
   2755 
   2756 	/* Stop the interface. Callouts are stopped in it. */
   2757 	wm_stop(ifp, 1);
   2758 
   2759 	pmf_device_deregister(self);
   2760 
   2761 #ifdef WM_EVENT_COUNTERS
   2762 	evcnt_detach(&sc->sc_ev_linkintr);
   2763 
   2764 	evcnt_detach(&sc->sc_ev_tx_xoff);
   2765 	evcnt_detach(&sc->sc_ev_tx_xon);
   2766 	evcnt_detach(&sc->sc_ev_rx_xoff);
   2767 	evcnt_detach(&sc->sc_ev_rx_xon);
   2768 	evcnt_detach(&sc->sc_ev_rx_macctl);
   2769 #endif /* WM_EVENT_COUNTERS */
   2770 
   2771 	/* Tell the firmware about the release */
   2772 	WM_CORE_LOCK(sc);
   2773 	wm_release_manageability(sc);
   2774 	wm_release_hw_control(sc);
   2775 	wm_enable_wakeup(sc);
   2776 	WM_CORE_UNLOCK(sc);
   2777 
   2778 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2779 
   2780 	/* Delete all remaining media. */
   2781 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2782 
   2783 	ether_ifdetach(ifp);
   2784 	if_detach(ifp);
   2785 	if_percpuq_destroy(sc->sc_ipq);
   2786 
   2787 	/* Unload RX dmamaps and free mbufs */
   2788 	for (i = 0; i < sc->sc_nqueues; i++) {
   2789 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2790 		mutex_enter(rxq->rxq_lock);
   2791 		wm_rxdrain(rxq);
   2792 		mutex_exit(rxq->rxq_lock);
   2793 	}
   2794 	/* Must unlock here */
   2795 
   2796 	/* Disestablish the interrupt handler */
   2797 	for (i = 0; i < sc->sc_nintrs; i++) {
   2798 		if (sc->sc_ihs[i] != NULL) {
   2799 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2800 			sc->sc_ihs[i] = NULL;
   2801 		}
   2802 	}
   2803 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2804 
   2805 	wm_free_txrx_queues(sc);
   2806 
   2807 	/* Unmap the registers */
   2808 	if (sc->sc_ss) {
   2809 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2810 		sc->sc_ss = 0;
   2811 	}
   2812 	if (sc->sc_ios) {
   2813 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2814 		sc->sc_ios = 0;
   2815 	}
   2816 	if (sc->sc_flashs) {
   2817 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2818 		sc->sc_flashs = 0;
   2819 	}
   2820 
   2821 	if (sc->sc_core_lock)
   2822 		mutex_obj_free(sc->sc_core_lock);
   2823 	if (sc->sc_ich_phymtx)
   2824 		mutex_obj_free(sc->sc_ich_phymtx);
   2825 	if (sc->sc_ich_nvmmtx)
   2826 		mutex_obj_free(sc->sc_ich_nvmmtx);
   2827 
   2828 	return 0;
   2829 }
   2830 
   2831 static bool
   2832 wm_suspend(device_t self, const pmf_qual_t *qual)
   2833 {
   2834 	struct wm_softc *sc = device_private(self);
   2835 
   2836 	wm_release_manageability(sc);
   2837 	wm_release_hw_control(sc);
   2838 	wm_enable_wakeup(sc);
   2839 
   2840 	return true;
   2841 }
   2842 
   2843 static bool
   2844 wm_resume(device_t self, const pmf_qual_t *qual)
   2845 {
   2846 	struct wm_softc *sc = device_private(self);
   2847 
   2848 	wm_init_manageability(sc);
   2849 
   2850 	return true;
   2851 }
   2852 
   2853 /*
   2854  * wm_watchdog:		[ifnet interface function]
   2855  *
   2856  *	Watchdog timer handler.
   2857  */
   2858 static void
   2859 wm_watchdog(struct ifnet *ifp)
   2860 {
   2861 	int qid;
   2862 	struct wm_softc *sc = ifp->if_softc;
   2863 
   2864 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2865 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2866 
   2867 		wm_watchdog_txq(ifp, txq);
   2868 	}
   2869 
   2870 	/* Reset the interface. */
   2871 	(void) wm_init(ifp);
   2872 
   2873 	/*
   2874 	 * There are still some upper layer processing which call
   2875 	 * ifp->if_start(). e.g. ALTQ or one CPU system
   2876 	 */
   2877 	/* Try to get more packets going. */
   2878 	ifp->if_start(ifp);
   2879 }
   2880 
   2881 static void
   2882 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
   2883 {
   2884 	struct wm_softc *sc = ifp->if_softc;
   2885 
   2886 	/*
   2887 	 * Since we're using delayed interrupts, sweep up
   2888 	 * before we report an error.
   2889 	 */
   2890 	mutex_enter(txq->txq_lock);
   2891 	wm_txeof(sc, txq);
   2892 	mutex_exit(txq->txq_lock);
   2893 
   2894 	if (txq->txq_free != WM_NTXDESC(txq)) {
   2895 #ifdef WM_DEBUG
   2896 		int i, j;
   2897 		struct wm_txsoft *txs;
   2898 #endif
   2899 		log(LOG_ERR,
   2900 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2901 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   2902 		    txq->txq_next);
   2903 		ifp->if_oerrors++;
   2904 #ifdef WM_DEBUG
   2905 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   2906 		    i = WM_NEXTTXS(txq, i)) {
   2907 		    txs = &txq->txq_soft[i];
   2908 		    printf("txs %d tx %d -> %d\n",
   2909 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2910 		    for (j = txs->txs_firstdesc; ;
   2911 			j = WM_NEXTTX(txq, j)) {
   2912 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2913 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   2914 			printf("\t %#08x%08x\n",
   2915 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   2916 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   2917 			if (j == txs->txs_lastdesc)
   2918 				break;
   2919 			}
   2920 		}
   2921 #endif
   2922 	}
   2923 }
   2924 
   2925 /*
   2926  * wm_tick:
   2927  *
   2928  *	One second timer, used to check link status, sweep up
   2929  *	completed transmit jobs, etc.
   2930  */
   2931 static void
   2932 wm_tick(void *arg)
   2933 {
   2934 	struct wm_softc *sc = arg;
   2935 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2936 #ifndef WM_MPSAFE
   2937 	int s = splnet();
   2938 #endif
   2939 
   2940 	WM_CORE_LOCK(sc);
   2941 
   2942 	if (sc->sc_core_stopping)
   2943 		goto out;
   2944 
   2945 	if (sc->sc_type >= WM_T_82542_2_1) {
   2946 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2947 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2948 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2949 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2950 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2951 	}
   2952 
   2953 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2954 	ifp->if_ierrors += 0ULL /* ensure quad_t */
   2955 	    + CSR_READ(sc, WMREG_CRCERRS)
   2956 	    + CSR_READ(sc, WMREG_ALGNERRC)
   2957 	    + CSR_READ(sc, WMREG_SYMERRC)
   2958 	    + CSR_READ(sc, WMREG_RXERRC)
   2959 	    + CSR_READ(sc, WMREG_SEC)
   2960 	    + CSR_READ(sc, WMREG_CEXTERR)
   2961 	    + CSR_READ(sc, WMREG_RLEC);
   2962 	/*
   2963 	 * WMREG_RNBC is incremented when there is no available buffers in host
   2964 	 * memory. It does not mean the number of dropped packet. Because
   2965 	 * ethernet controller can receive packets in such case if there is
   2966 	 * space in phy's FIFO.
   2967 	 *
   2968 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   2969 	 * own EVCNT instead of if_iqdrops.
   2970 	 */
   2971 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   2972 
   2973 	if (sc->sc_flags & WM_F_HAS_MII)
   2974 		mii_tick(&sc->sc_mii);
   2975 	else if ((sc->sc_type >= WM_T_82575)
   2976 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   2977 		wm_serdes_tick(sc);
   2978 	else
   2979 		wm_tbi_tick(sc);
   2980 
   2981 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2982 out:
   2983 	WM_CORE_UNLOCK(sc);
   2984 #ifndef WM_MPSAFE
   2985 	splx(s);
   2986 #endif
   2987 }
   2988 
   2989 static int
   2990 wm_ifflags_cb(struct ethercom *ec)
   2991 {
   2992 	struct ifnet *ifp = &ec->ec_if;
   2993 	struct wm_softc *sc = ifp->if_softc;
   2994 	int rc = 0;
   2995 
   2996 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   2997 		device_xname(sc->sc_dev), __func__));
   2998 
   2999 	WM_CORE_LOCK(sc);
   3000 
   3001 	int change = ifp->if_flags ^ sc->sc_if_flags;
   3002 	sc->sc_if_flags = ifp->if_flags;
   3003 
   3004 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3005 		rc = ENETRESET;
   3006 		goto out;
   3007 	}
   3008 
   3009 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   3010 		wm_set_filter(sc);
   3011 
   3012 	wm_set_vlan(sc);
   3013 
   3014 out:
   3015 	WM_CORE_UNLOCK(sc);
   3016 
   3017 	return rc;
   3018 }
   3019 
   3020 /*
   3021  * wm_ioctl:		[ifnet interface function]
   3022  *
   3023  *	Handle control requests from the operator.
   3024  */
   3025 static int
   3026 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3027 {
   3028 	struct wm_softc *sc = ifp->if_softc;
   3029 	struct ifreq *ifr = (struct ifreq *) data;
   3030 	struct ifaddr *ifa = (struct ifaddr *)data;
   3031 	struct sockaddr_dl *sdl;
   3032 	int s, error;
   3033 
   3034 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3035 		device_xname(sc->sc_dev), __func__));
   3036 
   3037 #ifndef WM_MPSAFE
   3038 	s = splnet();
   3039 #endif
   3040 	switch (cmd) {
   3041 	case SIOCSIFMEDIA:
   3042 	case SIOCGIFMEDIA:
   3043 		WM_CORE_LOCK(sc);
   3044 		/* Flow control requires full-duplex mode. */
   3045 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3046 		    (ifr->ifr_media & IFM_FDX) == 0)
   3047 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3048 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3049 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3050 				/* We can do both TXPAUSE and RXPAUSE. */
   3051 				ifr->ifr_media |=
   3052 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3053 			}
   3054 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3055 		}
   3056 		WM_CORE_UNLOCK(sc);
   3057 #ifdef WM_MPSAFE
   3058 		s = splnet();
   3059 #endif
   3060 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3061 #ifdef WM_MPSAFE
   3062 		splx(s);
   3063 #endif
   3064 		break;
   3065 	case SIOCINITIFADDR:
   3066 		WM_CORE_LOCK(sc);
   3067 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3068 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3069 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3070 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3071 			/* unicast address is first multicast entry */
   3072 			wm_set_filter(sc);
   3073 			error = 0;
   3074 			WM_CORE_UNLOCK(sc);
   3075 			break;
   3076 		}
   3077 		WM_CORE_UNLOCK(sc);
   3078 		/*FALLTHROUGH*/
   3079 	default:
   3080 #ifdef WM_MPSAFE
   3081 		s = splnet();
   3082 #endif
   3083 		/* It may call wm_start, so unlock here */
   3084 		error = ether_ioctl(ifp, cmd, data);
   3085 #ifdef WM_MPSAFE
   3086 		splx(s);
   3087 #endif
   3088 		if (error != ENETRESET)
   3089 			break;
   3090 
   3091 		error = 0;
   3092 
   3093 		if (cmd == SIOCSIFCAP) {
   3094 			error = (*ifp->if_init)(ifp);
   3095 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3096 			;
   3097 		else if (ifp->if_flags & IFF_RUNNING) {
   3098 			/*
   3099 			 * Multicast list has changed; set the hardware filter
   3100 			 * accordingly.
   3101 			 */
   3102 			WM_CORE_LOCK(sc);
   3103 			wm_set_filter(sc);
   3104 			WM_CORE_UNLOCK(sc);
   3105 		}
   3106 		break;
   3107 	}
   3108 
   3109 #ifndef WM_MPSAFE
   3110 	splx(s);
   3111 #endif
   3112 	return error;
   3113 }
   3114 
   3115 /* MAC address related */
   3116 
   3117 /*
   3118  * Get the offset of MAC address and return it.
   3119  * If error occured, use offset 0.
   3120  */
   3121 static uint16_t
   3122 wm_check_alt_mac_addr(struct wm_softc *sc)
   3123 {
   3124 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3125 	uint16_t offset = NVM_OFF_MACADDR;
   3126 
   3127 	/* Try to read alternative MAC address pointer */
   3128 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3129 		return 0;
   3130 
   3131 	/* Check pointer if it's valid or not. */
   3132 	if ((offset == 0x0000) || (offset == 0xffff))
   3133 		return 0;
   3134 
   3135 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3136 	/*
   3137 	 * Check whether alternative MAC address is valid or not.
   3138 	 * Some cards have non 0xffff pointer but those don't use
   3139 	 * alternative MAC address in reality.
   3140 	 *
   3141 	 * Check whether the broadcast bit is set or not.
   3142 	 */
   3143 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3144 		if (((myea[0] & 0xff) & 0x01) == 0)
   3145 			return offset; /* Found */
   3146 
   3147 	/* Not found */
   3148 	return 0;
   3149 }
   3150 
   3151 static int
   3152 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3153 {
   3154 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3155 	uint16_t offset = NVM_OFF_MACADDR;
   3156 	int do_invert = 0;
   3157 
   3158 	switch (sc->sc_type) {
   3159 	case WM_T_82580:
   3160 	case WM_T_I350:
   3161 	case WM_T_I354:
   3162 		/* EEPROM Top Level Partitioning */
   3163 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3164 		break;
   3165 	case WM_T_82571:
   3166 	case WM_T_82575:
   3167 	case WM_T_82576:
   3168 	case WM_T_80003:
   3169 	case WM_T_I210:
   3170 	case WM_T_I211:
   3171 		offset = wm_check_alt_mac_addr(sc);
   3172 		if (offset == 0)
   3173 			if ((sc->sc_funcid & 0x01) == 1)
   3174 				do_invert = 1;
   3175 		break;
   3176 	default:
   3177 		if ((sc->sc_funcid & 0x01) == 1)
   3178 			do_invert = 1;
   3179 		break;
   3180 	}
   3181 
   3182 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3183 		goto bad;
   3184 
   3185 	enaddr[0] = myea[0] & 0xff;
   3186 	enaddr[1] = myea[0] >> 8;
   3187 	enaddr[2] = myea[1] & 0xff;
   3188 	enaddr[3] = myea[1] >> 8;
   3189 	enaddr[4] = myea[2] & 0xff;
   3190 	enaddr[5] = myea[2] >> 8;
   3191 
   3192 	/*
   3193 	 * Toggle the LSB of the MAC address on the second port
   3194 	 * of some dual port cards.
   3195 	 */
   3196 	if (do_invert != 0)
   3197 		enaddr[5] ^= 1;
   3198 
   3199 	return 0;
   3200 
   3201  bad:
   3202 	return -1;
   3203 }
   3204 
   3205 /*
   3206  * wm_set_ral:
   3207  *
   3208  *	Set an entery in the receive address list.
   3209  */
   3210 static void
   3211 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3212 {
   3213 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3214 	uint32_t wlock_mac;
   3215 	int rv;
   3216 
   3217 	if (enaddr != NULL) {
   3218 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3219 		    (enaddr[3] << 24);
   3220 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3221 		ral_hi |= RAL_AV;
   3222 	} else {
   3223 		ral_lo = 0;
   3224 		ral_hi = 0;
   3225 	}
   3226 
   3227 	switch (sc->sc_type) {
   3228 	case WM_T_82542_2_0:
   3229 	case WM_T_82542_2_1:
   3230 	case WM_T_82543:
   3231 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3232 		CSR_WRITE_FLUSH(sc);
   3233 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3234 		CSR_WRITE_FLUSH(sc);
   3235 		break;
   3236 	case WM_T_PCH2:
   3237 	case WM_T_PCH_LPT:
   3238 	case WM_T_PCH_SPT:
   3239 		if (idx == 0) {
   3240 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3241 			CSR_WRITE_FLUSH(sc);
   3242 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3243 			CSR_WRITE_FLUSH(sc);
   3244 			return;
   3245 		}
   3246 		if (sc->sc_type != WM_T_PCH2) {
   3247 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3248 			    FWSM_WLOCK_MAC);
   3249 			addrl = WMREG_SHRAL(idx - 1);
   3250 			addrh = WMREG_SHRAH(idx - 1);
   3251 		} else {
   3252 			wlock_mac = 0;
   3253 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3254 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3255 		}
   3256 
   3257 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3258 			rv = wm_get_swflag_ich8lan(sc);
   3259 			if (rv != 0)
   3260 				return;
   3261 			CSR_WRITE(sc, addrl, ral_lo);
   3262 			CSR_WRITE_FLUSH(sc);
   3263 			CSR_WRITE(sc, addrh, ral_hi);
   3264 			CSR_WRITE_FLUSH(sc);
   3265 			wm_put_swflag_ich8lan(sc);
   3266 		}
   3267 
   3268 		break;
   3269 	default:
   3270 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3271 		CSR_WRITE_FLUSH(sc);
   3272 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3273 		CSR_WRITE_FLUSH(sc);
   3274 		break;
   3275 	}
   3276 }
   3277 
   3278 /*
   3279  * wm_mchash:
   3280  *
   3281  *	Compute the hash of the multicast address for the 4096-bit
   3282  *	multicast filter.
   3283  */
   3284 static uint32_t
   3285 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3286 {
   3287 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3288 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3289 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3290 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3291 	uint32_t hash;
   3292 
   3293 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3294 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3295 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3296 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   3297 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3298 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3299 		return (hash & 0x3ff);
   3300 	}
   3301 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3302 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3303 
   3304 	return (hash & 0xfff);
   3305 }
   3306 
   3307 /*
   3308  * wm_set_filter:
   3309  *
   3310  *	Set up the receive filter.
   3311  */
   3312 static void
   3313 wm_set_filter(struct wm_softc *sc)
   3314 {
   3315 	struct ethercom *ec = &sc->sc_ethercom;
   3316 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3317 	struct ether_multi *enm;
   3318 	struct ether_multistep step;
   3319 	bus_addr_t mta_reg;
   3320 	uint32_t hash, reg, bit;
   3321 	int i, size, ralmax;
   3322 
   3323 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3324 		device_xname(sc->sc_dev), __func__));
   3325 
   3326 	if (sc->sc_type >= WM_T_82544)
   3327 		mta_reg = WMREG_CORDOVA_MTA;
   3328 	else
   3329 		mta_reg = WMREG_MTA;
   3330 
   3331 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3332 
   3333 	if (ifp->if_flags & IFF_BROADCAST)
   3334 		sc->sc_rctl |= RCTL_BAM;
   3335 	if (ifp->if_flags & IFF_PROMISC) {
   3336 		sc->sc_rctl |= RCTL_UPE;
   3337 		goto allmulti;
   3338 	}
   3339 
   3340 	/*
   3341 	 * Set the station address in the first RAL slot, and
   3342 	 * clear the remaining slots.
   3343 	 */
   3344 	if (sc->sc_type == WM_T_ICH8)
   3345 		size = WM_RAL_TABSIZE_ICH8 -1;
   3346 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3347 	    || (sc->sc_type == WM_T_PCH))
   3348 		size = WM_RAL_TABSIZE_ICH8;
   3349 	else if (sc->sc_type == WM_T_PCH2)
   3350 		size = WM_RAL_TABSIZE_PCH2;
   3351 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
   3352 		size = WM_RAL_TABSIZE_PCH_LPT;
   3353 	else if (sc->sc_type == WM_T_82575)
   3354 		size = WM_RAL_TABSIZE_82575;
   3355 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3356 		size = WM_RAL_TABSIZE_82576;
   3357 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3358 		size = WM_RAL_TABSIZE_I350;
   3359 	else
   3360 		size = WM_RAL_TABSIZE;
   3361 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3362 
   3363 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   3364 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3365 		switch (i) {
   3366 		case 0:
   3367 			/* We can use all entries */
   3368 			ralmax = size;
   3369 			break;
   3370 		case 1:
   3371 			/* Only RAR[0] */
   3372 			ralmax = 1;
   3373 			break;
   3374 		default:
   3375 			/* available SHRA + RAR[0] */
   3376 			ralmax = i + 1;
   3377 		}
   3378 	} else
   3379 		ralmax = size;
   3380 	for (i = 1; i < size; i++) {
   3381 		if (i < ralmax)
   3382 			wm_set_ral(sc, NULL, i);
   3383 	}
   3384 
   3385 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3386 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3387 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3388 	    || (sc->sc_type == WM_T_PCH_SPT))
   3389 		size = WM_ICH8_MC_TABSIZE;
   3390 	else
   3391 		size = WM_MC_TABSIZE;
   3392 	/* Clear out the multicast table. */
   3393 	for (i = 0; i < size; i++) {
   3394 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3395 		CSR_WRITE_FLUSH(sc);
   3396 	}
   3397 
   3398 	ETHER_LOCK(ec);
   3399 	ETHER_FIRST_MULTI(step, ec, enm);
   3400 	while (enm != NULL) {
   3401 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3402 			ETHER_UNLOCK(ec);
   3403 			/*
   3404 			 * We must listen to a range of multicast addresses.
   3405 			 * For now, just accept all multicasts, rather than
   3406 			 * trying to set only those filter bits needed to match
   3407 			 * the range.  (At this time, the only use of address
   3408 			 * ranges is for IP multicast routing, for which the
   3409 			 * range is big enough to require all bits set.)
   3410 			 */
   3411 			goto allmulti;
   3412 		}
   3413 
   3414 		hash = wm_mchash(sc, enm->enm_addrlo);
   3415 
   3416 		reg = (hash >> 5);
   3417 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3418 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3419 		    || (sc->sc_type == WM_T_PCH2)
   3420 		    || (sc->sc_type == WM_T_PCH_LPT)
   3421 		    || (sc->sc_type == WM_T_PCH_SPT))
   3422 			reg &= 0x1f;
   3423 		else
   3424 			reg &= 0x7f;
   3425 		bit = hash & 0x1f;
   3426 
   3427 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3428 		hash |= 1U << bit;
   3429 
   3430 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3431 			/*
   3432 			 * 82544 Errata 9: Certain register cannot be written
   3433 			 * with particular alignments in PCI-X bus operation
   3434 			 * (FCAH, MTA and VFTA).
   3435 			 */
   3436 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3437 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3438 			CSR_WRITE_FLUSH(sc);
   3439 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3440 			CSR_WRITE_FLUSH(sc);
   3441 		} else {
   3442 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3443 			CSR_WRITE_FLUSH(sc);
   3444 		}
   3445 
   3446 		ETHER_NEXT_MULTI(step, enm);
   3447 	}
   3448 	ETHER_UNLOCK(ec);
   3449 
   3450 	ifp->if_flags &= ~IFF_ALLMULTI;
   3451 	goto setit;
   3452 
   3453  allmulti:
   3454 	ifp->if_flags |= IFF_ALLMULTI;
   3455 	sc->sc_rctl |= RCTL_MPE;
   3456 
   3457  setit:
   3458 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3459 }
   3460 
   3461 /* Reset and init related */
   3462 
   3463 static void
   3464 wm_set_vlan(struct wm_softc *sc)
   3465 {
   3466 
   3467 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3468 		device_xname(sc->sc_dev), __func__));
   3469 
   3470 	/* Deal with VLAN enables. */
   3471 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3472 		sc->sc_ctrl |= CTRL_VME;
   3473 	else
   3474 		sc->sc_ctrl &= ~CTRL_VME;
   3475 
   3476 	/* Write the control registers. */
   3477 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3478 }
   3479 
   3480 static void
   3481 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3482 {
   3483 	uint32_t gcr;
   3484 	pcireg_t ctrl2;
   3485 
   3486 	gcr = CSR_READ(sc, WMREG_GCR);
   3487 
   3488 	/* Only take action if timeout value is defaulted to 0 */
   3489 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3490 		goto out;
   3491 
   3492 	if ((gcr & GCR_CAP_VER2) == 0) {
   3493 		gcr |= GCR_CMPL_TMOUT_10MS;
   3494 		goto out;
   3495 	}
   3496 
   3497 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3498 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3499 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3500 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3501 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3502 
   3503 out:
   3504 	/* Disable completion timeout resend */
   3505 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3506 
   3507 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3508 }
   3509 
   3510 void
   3511 wm_get_auto_rd_done(struct wm_softc *sc)
   3512 {
   3513 	int i;
   3514 
   3515 	/* wait for eeprom to reload */
   3516 	switch (sc->sc_type) {
   3517 	case WM_T_82571:
   3518 	case WM_T_82572:
   3519 	case WM_T_82573:
   3520 	case WM_T_82574:
   3521 	case WM_T_82583:
   3522 	case WM_T_82575:
   3523 	case WM_T_82576:
   3524 	case WM_T_82580:
   3525 	case WM_T_I350:
   3526 	case WM_T_I354:
   3527 	case WM_T_I210:
   3528 	case WM_T_I211:
   3529 	case WM_T_80003:
   3530 	case WM_T_ICH8:
   3531 	case WM_T_ICH9:
   3532 		for (i = 0; i < 10; i++) {
   3533 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3534 				break;
   3535 			delay(1000);
   3536 		}
   3537 		if (i == 10) {
   3538 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3539 			    "complete\n", device_xname(sc->sc_dev));
   3540 		}
   3541 		break;
   3542 	default:
   3543 		break;
   3544 	}
   3545 }
   3546 
   3547 void
   3548 wm_lan_init_done(struct wm_softc *sc)
   3549 {
   3550 	uint32_t reg = 0;
   3551 	int i;
   3552 
   3553 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3554 		device_xname(sc->sc_dev), __func__));
   3555 
   3556 	/* Wait for eeprom to reload */
   3557 	switch (sc->sc_type) {
   3558 	case WM_T_ICH10:
   3559 	case WM_T_PCH:
   3560 	case WM_T_PCH2:
   3561 	case WM_T_PCH_LPT:
   3562 	case WM_T_PCH_SPT:
   3563 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3564 			reg = CSR_READ(sc, WMREG_STATUS);
   3565 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3566 				break;
   3567 			delay(100);
   3568 		}
   3569 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3570 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3571 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3572 		}
   3573 		break;
   3574 	default:
   3575 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3576 		    __func__);
   3577 		break;
   3578 	}
   3579 
   3580 	reg &= ~STATUS_LAN_INIT_DONE;
   3581 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3582 }
   3583 
   3584 void
   3585 wm_get_cfg_done(struct wm_softc *sc)
   3586 {
   3587 	int mask;
   3588 	uint32_t reg;
   3589 	int i;
   3590 
   3591 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3592 		device_xname(sc->sc_dev), __func__));
   3593 
   3594 	/* Wait for eeprom to reload */
   3595 	switch (sc->sc_type) {
   3596 	case WM_T_82542_2_0:
   3597 	case WM_T_82542_2_1:
   3598 		/* null */
   3599 		break;
   3600 	case WM_T_82543:
   3601 	case WM_T_82544:
   3602 	case WM_T_82540:
   3603 	case WM_T_82545:
   3604 	case WM_T_82545_3:
   3605 	case WM_T_82546:
   3606 	case WM_T_82546_3:
   3607 	case WM_T_82541:
   3608 	case WM_T_82541_2:
   3609 	case WM_T_82547:
   3610 	case WM_T_82547_2:
   3611 	case WM_T_82573:
   3612 	case WM_T_82574:
   3613 	case WM_T_82583:
   3614 		/* generic */
   3615 		delay(10*1000);
   3616 		break;
   3617 	case WM_T_80003:
   3618 	case WM_T_82571:
   3619 	case WM_T_82572:
   3620 	case WM_T_82575:
   3621 	case WM_T_82576:
   3622 	case WM_T_82580:
   3623 	case WM_T_I350:
   3624 	case WM_T_I354:
   3625 	case WM_T_I210:
   3626 	case WM_T_I211:
   3627 		if (sc->sc_type == WM_T_82571) {
   3628 			/* Only 82571 shares port 0 */
   3629 			mask = EEMNGCTL_CFGDONE_0;
   3630 		} else
   3631 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3632 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3633 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3634 				break;
   3635 			delay(1000);
   3636 		}
   3637 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3638 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3639 				device_xname(sc->sc_dev), __func__));
   3640 		}
   3641 		break;
   3642 	case WM_T_ICH8:
   3643 	case WM_T_ICH9:
   3644 	case WM_T_ICH10:
   3645 	case WM_T_PCH:
   3646 	case WM_T_PCH2:
   3647 	case WM_T_PCH_LPT:
   3648 	case WM_T_PCH_SPT:
   3649 		delay(10*1000);
   3650 		if (sc->sc_type >= WM_T_ICH10)
   3651 			wm_lan_init_done(sc);
   3652 		else
   3653 			wm_get_auto_rd_done(sc);
   3654 
   3655 		reg = CSR_READ(sc, WMREG_STATUS);
   3656 		if ((reg & STATUS_PHYRA) != 0)
   3657 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3658 		break;
   3659 	default:
   3660 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3661 		    __func__);
   3662 		break;
   3663 	}
   3664 }
   3665 
   3666 void
   3667 wm_phy_post_reset(struct wm_softc *sc)
   3668 {
   3669 	uint32_t reg;
   3670 
   3671 	/* This function is only for ICH8 and newer. */
   3672 	if (sc->sc_type < WM_T_ICH8)
   3673 		return;
   3674 
   3675 	if (wm_phy_resetisblocked(sc)) {
   3676 		/* XXX */
   3677 		device_printf(sc->sc_dev, " PHY is blocked\n");
   3678 		return;
   3679 	}
   3680 
   3681 	/* Allow time for h/w to get to quiescent state after reset */
   3682 	delay(10*1000);
   3683 
   3684 	/* Perform any necessary post-reset workarounds */
   3685 	if (sc->sc_type == WM_T_PCH)
   3686 		wm_hv_phy_workaround_ich8lan(sc);
   3687 	if (sc->sc_type == WM_T_PCH2)
   3688 		wm_lv_phy_workaround_ich8lan(sc);
   3689 
   3690 	/* Clear the host wakeup bit after lcd reset */
   3691 	if (sc->sc_type >= WM_T_PCH) {
   3692 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   3693 		    BM_PORT_GEN_CFG);
   3694 		reg &= ~BM_WUC_HOST_WU_BIT;
   3695 		wm_gmii_hv_writereg(sc->sc_dev, 2,
   3696 		    BM_PORT_GEN_CFG, reg);
   3697 	}
   3698 
   3699 	/* Configure the LCD with the extended configuration region in NVM */
   3700 	wm_init_lcd_from_nvm(sc);
   3701 
   3702 	/* Configure the LCD with the OEM bits in NVM */
   3703 }
   3704 
   3705 /* Only for PCH and newer */
   3706 static void
   3707 wm_write_smbus_addr(struct wm_softc *sc)
   3708 {
   3709 	uint32_t strap, freq;
   3710 	uint32_t phy_data;
   3711 
   3712 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3713 		device_xname(sc->sc_dev), __func__));
   3714 
   3715 	strap = CSR_READ(sc, WMREG_STRAP);
   3716 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   3717 
   3718 	phy_data = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR);
   3719 
   3720 	phy_data &= ~HV_SMB_ADDR_ADDR;
   3721 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   3722 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   3723 
   3724 	if (sc->sc_phytype == WMPHY_I217) {
   3725 		/* Restore SMBus frequency */
   3726 		if (freq --) {
   3727 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   3728 			    | HV_SMB_ADDR_FREQ_HIGH);
   3729 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   3730 			    HV_SMB_ADDR_FREQ_LOW);
   3731 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   3732 			    HV_SMB_ADDR_FREQ_HIGH);
   3733 		} else {
   3734 			DPRINTF(WM_DEBUG_INIT,
   3735 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   3736 				device_xname(sc->sc_dev), __func__));
   3737 		}
   3738 	}
   3739 
   3740 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR, phy_data);
   3741 }
   3742 
   3743 void
   3744 wm_init_lcd_from_nvm(struct wm_softc *sc)
   3745 {
   3746 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   3747 	uint16_t phy_page = 0;
   3748 
   3749 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3750 		device_xname(sc->sc_dev), __func__));
   3751 
   3752 	switch (sc->sc_type) {
   3753 	case WM_T_ICH8:
   3754 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   3755 		    || (sc->sc_phytype != WMPHY_IGP_3))
   3756 			return;
   3757 
   3758 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   3759 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   3760 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   3761 			break;
   3762 		}
   3763 		/* FALLTHROUGH */
   3764 	case WM_T_PCH:
   3765 	case WM_T_PCH2:
   3766 	case WM_T_PCH_LPT:
   3767 	case WM_T_PCH_SPT:
   3768 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   3769 		break;
   3770 	default:
   3771 		return;
   3772 	}
   3773 
   3774 	sc->phy.acquire(sc);
   3775 
   3776 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   3777 	if ((reg & sw_cfg_mask) == 0)
   3778 		goto release;
   3779 
   3780 	/*
   3781 	 * Make sure HW does not configure LCD from PHY extended configuration
   3782 	 * before SW configuration
   3783 	 */
   3784 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   3785 	if ((sc->sc_type < WM_T_PCH2)
   3786 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   3787 		goto release;
   3788 
   3789 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   3790 		device_xname(sc->sc_dev), __func__));
   3791 	/* word_addr is in DWORD */
   3792 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   3793 
   3794 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   3795 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   3796 
   3797 	if (((sc->sc_type == WM_T_PCH)
   3798 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   3799 	    || (sc->sc_type > WM_T_PCH)) {
   3800 		/*
   3801 		 * HW configures the SMBus address and LEDs when the OEM and
   3802 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   3803 		 * are cleared, SW will configure them instead.
   3804 		 */
   3805 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   3806 			device_xname(sc->sc_dev), __func__));
   3807 		wm_write_smbus_addr(sc);
   3808 
   3809 		reg = CSR_READ(sc, WMREG_LEDCTL);
   3810 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG, reg);
   3811 	}
   3812 
   3813 	/* Configure LCD from extended configuration region. */
   3814 	for (i = 0; i < cnf_size; i++) {
   3815 		uint16_t reg_data, reg_addr;
   3816 
   3817 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   3818 			goto release;
   3819 
   3820 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   3821 			goto release;
   3822 
   3823 		if (reg_addr == MII_IGPHY_PAGE_SELECT)
   3824 			phy_page = reg_data;
   3825 
   3826 		reg_addr &= IGPHY_MAXREGADDR;
   3827 		reg_addr |= phy_page;
   3828 
   3829 		sc->phy.release(sc); /* XXX */
   3830 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, reg_addr, reg_data);
   3831 		sc->phy.acquire(sc); /* XXX */
   3832 	}
   3833 
   3834 release:
   3835 	sc->phy.release(sc);
   3836 	return;
   3837 }
   3838 
   3839 
   3840 /* Init hardware bits */
   3841 void
   3842 wm_initialize_hardware_bits(struct wm_softc *sc)
   3843 {
   3844 	uint32_t tarc0, tarc1, reg;
   3845 
   3846 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3847 		device_xname(sc->sc_dev), __func__));
   3848 
   3849 	/* For 82571 variant, 80003 and ICHs */
   3850 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3851 	    || (sc->sc_type >= WM_T_80003)) {
   3852 
   3853 		/* Transmit Descriptor Control 0 */
   3854 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3855 		reg |= TXDCTL_COUNT_DESC;
   3856 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3857 
   3858 		/* Transmit Descriptor Control 1 */
   3859 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3860 		reg |= TXDCTL_COUNT_DESC;
   3861 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3862 
   3863 		/* TARC0 */
   3864 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3865 		switch (sc->sc_type) {
   3866 		case WM_T_82571:
   3867 		case WM_T_82572:
   3868 		case WM_T_82573:
   3869 		case WM_T_82574:
   3870 		case WM_T_82583:
   3871 		case WM_T_80003:
   3872 			/* Clear bits 30..27 */
   3873 			tarc0 &= ~__BITS(30, 27);
   3874 			break;
   3875 		default:
   3876 			break;
   3877 		}
   3878 
   3879 		switch (sc->sc_type) {
   3880 		case WM_T_82571:
   3881 		case WM_T_82572:
   3882 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3883 
   3884 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3885 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3886 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3887 			/* 8257[12] Errata No.7 */
   3888 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3889 
   3890 			/* TARC1 bit 28 */
   3891 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3892 				tarc1 &= ~__BIT(28);
   3893 			else
   3894 				tarc1 |= __BIT(28);
   3895 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3896 
   3897 			/*
   3898 			 * 8257[12] Errata No.13
   3899 			 * Disable Dyamic Clock Gating.
   3900 			 */
   3901 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3902 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3903 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3904 			break;
   3905 		case WM_T_82573:
   3906 		case WM_T_82574:
   3907 		case WM_T_82583:
   3908 			if ((sc->sc_type == WM_T_82574)
   3909 			    || (sc->sc_type == WM_T_82583))
   3910 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3911 
   3912 			/* Extended Device Control */
   3913 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3914 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3915 			reg |= __BIT(22);	/* Set bit 22 */
   3916 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3917 
   3918 			/* Device Control */
   3919 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   3920 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3921 
   3922 			/* PCIe Control Register */
   3923 			/*
   3924 			 * 82573 Errata (unknown).
   3925 			 *
   3926 			 * 82574 Errata 25 and 82583 Errata 12
   3927 			 * "Dropped Rx Packets":
   3928 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   3929 			 */
   3930 			reg = CSR_READ(sc, WMREG_GCR);
   3931 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   3932 			CSR_WRITE(sc, WMREG_GCR, reg);
   3933 
   3934 			if ((sc->sc_type == WM_T_82574)
   3935 			    || (sc->sc_type == WM_T_82583)) {
   3936 				/*
   3937 				 * Document says this bit must be set for
   3938 				 * proper operation.
   3939 				 */
   3940 				reg = CSR_READ(sc, WMREG_GCR);
   3941 				reg |= __BIT(22);
   3942 				CSR_WRITE(sc, WMREG_GCR, reg);
   3943 
   3944 				/*
   3945 				 * Apply workaround for hardware errata
   3946 				 * documented in errata docs Fixes issue where
   3947 				 * some error prone or unreliable PCIe
   3948 				 * completions are occurring, particularly
   3949 				 * with ASPM enabled. Without fix, issue can
   3950 				 * cause Tx timeouts.
   3951 				 */
   3952 				reg = CSR_READ(sc, WMREG_GCR2);
   3953 				reg |= __BIT(0);
   3954 				CSR_WRITE(sc, WMREG_GCR2, reg);
   3955 			}
   3956 			break;
   3957 		case WM_T_80003:
   3958 			/* TARC0 */
   3959 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   3960 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3961 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   3962 
   3963 			/* TARC1 bit 28 */
   3964 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3965 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3966 				tarc1 &= ~__BIT(28);
   3967 			else
   3968 				tarc1 |= __BIT(28);
   3969 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3970 			break;
   3971 		case WM_T_ICH8:
   3972 		case WM_T_ICH9:
   3973 		case WM_T_ICH10:
   3974 		case WM_T_PCH:
   3975 		case WM_T_PCH2:
   3976 		case WM_T_PCH_LPT:
   3977 		case WM_T_PCH_SPT:
   3978 			/* TARC0 */
   3979 			if ((sc->sc_type == WM_T_ICH8)
   3980 			    || (sc->sc_type == WM_T_PCH_SPT)) {
   3981 				/* Set TARC0 bits 29 and 28 */
   3982 				tarc0 |= __BITS(29, 28);
   3983 			}
   3984 			/* Set TARC0 bits 23,24,26,27 */
   3985 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   3986 
   3987 			/* CTRL_EXT */
   3988 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3989 			reg |= __BIT(22);	/* Set bit 22 */
   3990 			/*
   3991 			 * Enable PHY low-power state when MAC is at D3
   3992 			 * w/o WoL
   3993 			 */
   3994 			if (sc->sc_type >= WM_T_PCH)
   3995 				reg |= CTRL_EXT_PHYPDEN;
   3996 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3997 
   3998 			/* TARC1 */
   3999 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4000 			/* bit 28 */
   4001 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4002 				tarc1 &= ~__BIT(28);
   4003 			else
   4004 				tarc1 |= __BIT(28);
   4005 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4006 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4007 
   4008 			/* Device Status */
   4009 			if (sc->sc_type == WM_T_ICH8) {
   4010 				reg = CSR_READ(sc, WMREG_STATUS);
   4011 				reg &= ~__BIT(31);
   4012 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4013 
   4014 			}
   4015 
   4016 			/* IOSFPC */
   4017 			if (sc->sc_type == WM_T_PCH_SPT) {
   4018 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4019 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4020 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4021 			}
   4022 			/*
   4023 			 * Work-around descriptor data corruption issue during
   4024 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4025 			 * capability.
   4026 			 */
   4027 			reg = CSR_READ(sc, WMREG_RFCTL);
   4028 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4029 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4030 			break;
   4031 		default:
   4032 			break;
   4033 		}
   4034 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4035 
   4036 		switch (sc->sc_type) {
   4037 		/*
   4038 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4039 		 * Avoid RSS Hash Value bug.
   4040 		 */
   4041 		case WM_T_82571:
   4042 		case WM_T_82572:
   4043 		case WM_T_82573:
   4044 		case WM_T_80003:
   4045 		case WM_T_ICH8:
   4046 			reg = CSR_READ(sc, WMREG_RFCTL);
   4047 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4048 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4049 			break;
   4050 		case WM_T_82574:
   4051 			/* use extened Rx descriptor. */
   4052 			reg = CSR_READ(sc, WMREG_RFCTL);
   4053 			reg |= WMREG_RFCTL_EXSTEN;
   4054 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4055 			break;
   4056 		default:
   4057 			break;
   4058 		}
   4059 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4060 		/*
   4061 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4062 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4063 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4064 		 * Correctly by the Device"
   4065 		 *
   4066 		 * I354(C2000) Errata AVR53:
   4067 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4068 		 * Hang"
   4069 		 */
   4070 		reg = CSR_READ(sc, WMREG_RFCTL);
   4071 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4072 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4073 	}
   4074 }
   4075 
   4076 static uint32_t
   4077 wm_rxpbs_adjust_82580(uint32_t val)
   4078 {
   4079 	uint32_t rv = 0;
   4080 
   4081 	if (val < __arraycount(wm_82580_rxpbs_table))
   4082 		rv = wm_82580_rxpbs_table[val];
   4083 
   4084 	return rv;
   4085 }
   4086 
   4087 /*
   4088  * wm_reset_phy:
   4089  *
   4090  *	generic PHY reset function.
   4091  *	Same as e1000_phy_hw_reset_generic()
   4092  */
   4093 static void
   4094 wm_reset_phy(struct wm_softc *sc)
   4095 {
   4096 	uint32_t reg;
   4097 
   4098 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4099 		device_xname(sc->sc_dev), __func__));
   4100 	if (wm_phy_resetisblocked(sc))
   4101 		return;
   4102 
   4103 	sc->phy.acquire(sc);
   4104 
   4105 	reg = CSR_READ(sc, WMREG_CTRL);
   4106 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4107 	CSR_WRITE_FLUSH(sc);
   4108 
   4109 	delay(sc->phy.reset_delay_us);
   4110 
   4111 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4112 	CSR_WRITE_FLUSH(sc);
   4113 
   4114 	delay(150);
   4115 
   4116 	sc->phy.release(sc);
   4117 
   4118 	wm_get_cfg_done(sc);
   4119 	wm_phy_post_reset(sc);
   4120 }
   4121 
   4122 static void
   4123 wm_flush_desc_rings(struct wm_softc *sc)
   4124 {
   4125 	pcireg_t preg;
   4126 	uint32_t reg;
   4127 	struct wm_txqueue *txq;
   4128 	wiseman_txdesc_t *txd;
   4129 	int nexttx;
   4130 	uint32_t rctl;
   4131 
   4132 	/* First, disable MULR fix in FEXTNVM11 */
   4133 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4134 	reg |= FEXTNVM11_DIS_MULRFIX;
   4135 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4136 
   4137 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4138 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4139 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4140 		return;
   4141 
   4142 	/* TX */
   4143 	printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   4144 	    device_xname(sc->sc_dev), preg, reg);
   4145 	reg = CSR_READ(sc, WMREG_TCTL);
   4146 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4147 
   4148 	txq = &sc->sc_queue[0].wmq_txq;
   4149 	nexttx = txq->txq_next;
   4150 	txd = &txq->txq_descs[nexttx];
   4151 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4152 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS| 512);
   4153 	txd->wtx_fields.wtxu_status = 0;
   4154 	txd->wtx_fields.wtxu_options = 0;
   4155 	txd->wtx_fields.wtxu_vlan = 0;
   4156 
   4157 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4158 	    BUS_SPACE_BARRIER_WRITE);
   4159 
   4160 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4161 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4162 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4163 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4164 	delay(250);
   4165 
   4166 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4167 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4168 		return;
   4169 
   4170 	/* RX */
   4171 	printf("%s: Need RX flush (reg = %08x)\n",
   4172 	    device_xname(sc->sc_dev), preg);
   4173 	rctl = CSR_READ(sc, WMREG_RCTL);
   4174 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4175 	CSR_WRITE_FLUSH(sc);
   4176 	delay(150);
   4177 
   4178 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4179 	/* zero the lower 14 bits (prefetch and host thresholds) */
   4180 	reg &= 0xffffc000;
   4181 	/*
   4182 	 * update thresholds: prefetch threshold to 31, host threshold
   4183 	 * to 1 and make sure the granularity is "descriptors" and not
   4184 	 * "cache lines"
   4185 	 */
   4186 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4187 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4188 
   4189 	/*
   4190 	 * momentarily enable the RX ring for the changes to take
   4191 	 * effect
   4192 	 */
   4193 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4194 	CSR_WRITE_FLUSH(sc);
   4195 	delay(150);
   4196 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4197 }
   4198 
   4199 /*
   4200  * wm_reset:
   4201  *
   4202  *	Reset the i82542 chip.
   4203  */
   4204 static void
   4205 wm_reset(struct wm_softc *sc)
   4206 {
   4207 	int phy_reset = 0;
   4208 	int i, error = 0;
   4209 	uint32_t reg;
   4210 
   4211 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4212 		device_xname(sc->sc_dev), __func__));
   4213 	KASSERT(sc->sc_type != 0);
   4214 
   4215 	/*
   4216 	 * Allocate on-chip memory according to the MTU size.
   4217 	 * The Packet Buffer Allocation register must be written
   4218 	 * before the chip is reset.
   4219 	 */
   4220 	switch (sc->sc_type) {
   4221 	case WM_T_82547:
   4222 	case WM_T_82547_2:
   4223 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4224 		    PBA_22K : PBA_30K;
   4225 		for (i = 0; i < sc->sc_nqueues; i++) {
   4226 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4227 			txq->txq_fifo_head = 0;
   4228 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4229 			txq->txq_fifo_size =
   4230 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4231 			txq->txq_fifo_stall = 0;
   4232 		}
   4233 		break;
   4234 	case WM_T_82571:
   4235 	case WM_T_82572:
   4236 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4237 	case WM_T_80003:
   4238 		sc->sc_pba = PBA_32K;
   4239 		break;
   4240 	case WM_T_82573:
   4241 		sc->sc_pba = PBA_12K;
   4242 		break;
   4243 	case WM_T_82574:
   4244 	case WM_T_82583:
   4245 		sc->sc_pba = PBA_20K;
   4246 		break;
   4247 	case WM_T_82576:
   4248 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4249 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4250 		break;
   4251 	case WM_T_82580:
   4252 	case WM_T_I350:
   4253 	case WM_T_I354:
   4254 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4255 		break;
   4256 	case WM_T_I210:
   4257 	case WM_T_I211:
   4258 		sc->sc_pba = PBA_34K;
   4259 		break;
   4260 	case WM_T_ICH8:
   4261 		/* Workaround for a bit corruption issue in FIFO memory */
   4262 		sc->sc_pba = PBA_8K;
   4263 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4264 		break;
   4265 	case WM_T_ICH9:
   4266 	case WM_T_ICH10:
   4267 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4268 		    PBA_14K : PBA_10K;
   4269 		break;
   4270 	case WM_T_PCH:
   4271 	case WM_T_PCH2:
   4272 	case WM_T_PCH_LPT:
   4273 	case WM_T_PCH_SPT:
   4274 		sc->sc_pba = PBA_26K;
   4275 		break;
   4276 	default:
   4277 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4278 		    PBA_40K : PBA_48K;
   4279 		break;
   4280 	}
   4281 	/*
   4282 	 * Only old or non-multiqueue devices have the PBA register
   4283 	 * XXX Need special handling for 82575.
   4284 	 */
   4285 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4286 	    || (sc->sc_type == WM_T_82575))
   4287 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4288 
   4289 	/* Prevent the PCI-E bus from sticking */
   4290 	if (sc->sc_flags & WM_F_PCIE) {
   4291 		int timeout = 800;
   4292 
   4293 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4294 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4295 
   4296 		while (timeout--) {
   4297 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4298 			    == 0)
   4299 				break;
   4300 			delay(100);
   4301 		}
   4302 		if (timeout == 0)
   4303 			device_printf(sc->sc_dev,
   4304 			    "failed to disable busmastering\n");
   4305 	}
   4306 
   4307 	/* Set the completion timeout for interface */
   4308 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4309 	    || (sc->sc_type == WM_T_82580)
   4310 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4311 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4312 		wm_set_pcie_completion_timeout(sc);
   4313 
   4314 	/* Clear interrupt */
   4315 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4316 	if (wm_is_using_msix(sc)) {
   4317 		if (sc->sc_type != WM_T_82574) {
   4318 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4319 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4320 		} else {
   4321 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4322 		}
   4323 	}
   4324 
   4325 	/* Stop the transmit and receive processes. */
   4326 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4327 	sc->sc_rctl &= ~RCTL_EN;
   4328 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4329 	CSR_WRITE_FLUSH(sc);
   4330 
   4331 	/* XXX set_tbi_sbp_82543() */
   4332 
   4333 	delay(10*1000);
   4334 
   4335 	/* Must acquire the MDIO ownership before MAC reset */
   4336 	switch (sc->sc_type) {
   4337 	case WM_T_82573:
   4338 	case WM_T_82574:
   4339 	case WM_T_82583:
   4340 		error = wm_get_hw_semaphore_82573(sc);
   4341 		break;
   4342 	default:
   4343 		break;
   4344 	}
   4345 
   4346 	/*
   4347 	 * 82541 Errata 29? & 82547 Errata 28?
   4348 	 * See also the description about PHY_RST bit in CTRL register
   4349 	 * in 8254x_GBe_SDM.pdf.
   4350 	 */
   4351 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4352 		CSR_WRITE(sc, WMREG_CTRL,
   4353 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4354 		CSR_WRITE_FLUSH(sc);
   4355 		delay(5000);
   4356 	}
   4357 
   4358 	switch (sc->sc_type) {
   4359 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4360 	case WM_T_82541:
   4361 	case WM_T_82541_2:
   4362 	case WM_T_82547:
   4363 	case WM_T_82547_2:
   4364 		/*
   4365 		 * On some chipsets, a reset through a memory-mapped write
   4366 		 * cycle can cause the chip to reset before completing the
   4367 		 * write cycle.  This causes major headache that can be
   4368 		 * avoided by issuing the reset via indirect register writes
   4369 		 * through I/O space.
   4370 		 *
   4371 		 * So, if we successfully mapped the I/O BAR at attach time,
   4372 		 * use that.  Otherwise, try our luck with a memory-mapped
   4373 		 * reset.
   4374 		 */
   4375 		if (sc->sc_flags & WM_F_IOH_VALID)
   4376 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4377 		else
   4378 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4379 		break;
   4380 	case WM_T_82545_3:
   4381 	case WM_T_82546_3:
   4382 		/* Use the shadow control register on these chips. */
   4383 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4384 		break;
   4385 	case WM_T_80003:
   4386 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4387 		sc->phy.acquire(sc);
   4388 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4389 		sc->phy.release(sc);
   4390 		break;
   4391 	case WM_T_ICH8:
   4392 	case WM_T_ICH9:
   4393 	case WM_T_ICH10:
   4394 	case WM_T_PCH:
   4395 	case WM_T_PCH2:
   4396 	case WM_T_PCH_LPT:
   4397 	case WM_T_PCH_SPT:
   4398 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4399 		if (wm_phy_resetisblocked(sc) == false) {
   4400 			/*
   4401 			 * Gate automatic PHY configuration by hardware on
   4402 			 * non-managed 82579
   4403 			 */
   4404 			if ((sc->sc_type == WM_T_PCH2)
   4405 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4406 				== 0))
   4407 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4408 
   4409 			reg |= CTRL_PHY_RESET;
   4410 			phy_reset = 1;
   4411 		} else
   4412 			printf("XXX reset is blocked!!!\n");
   4413 		sc->phy.acquire(sc);
   4414 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4415 		/* Don't insert a completion barrier when reset */
   4416 		delay(20*1000);
   4417 		mutex_exit(sc->sc_ich_phymtx);
   4418 		break;
   4419 	case WM_T_82580:
   4420 	case WM_T_I350:
   4421 	case WM_T_I354:
   4422 	case WM_T_I210:
   4423 	case WM_T_I211:
   4424 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4425 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4426 			CSR_WRITE_FLUSH(sc);
   4427 		delay(5000);
   4428 		break;
   4429 	case WM_T_82542_2_0:
   4430 	case WM_T_82542_2_1:
   4431 	case WM_T_82543:
   4432 	case WM_T_82540:
   4433 	case WM_T_82545:
   4434 	case WM_T_82546:
   4435 	case WM_T_82571:
   4436 	case WM_T_82572:
   4437 	case WM_T_82573:
   4438 	case WM_T_82574:
   4439 	case WM_T_82575:
   4440 	case WM_T_82576:
   4441 	case WM_T_82583:
   4442 	default:
   4443 		/* Everything else can safely use the documented method. */
   4444 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4445 		break;
   4446 	}
   4447 
   4448 	/* Must release the MDIO ownership after MAC reset */
   4449 	switch (sc->sc_type) {
   4450 	case WM_T_82573:
   4451 	case WM_T_82574:
   4452 	case WM_T_82583:
   4453 		if (error == 0)
   4454 			wm_put_hw_semaphore_82573(sc);
   4455 		break;
   4456 	default:
   4457 		break;
   4458 	}
   4459 
   4460 	if (phy_reset != 0)
   4461 		wm_get_cfg_done(sc);
   4462 
   4463 	/* reload EEPROM */
   4464 	switch (sc->sc_type) {
   4465 	case WM_T_82542_2_0:
   4466 	case WM_T_82542_2_1:
   4467 	case WM_T_82543:
   4468 	case WM_T_82544:
   4469 		delay(10);
   4470 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4471 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4472 		CSR_WRITE_FLUSH(sc);
   4473 		delay(2000);
   4474 		break;
   4475 	case WM_T_82540:
   4476 	case WM_T_82545:
   4477 	case WM_T_82545_3:
   4478 	case WM_T_82546:
   4479 	case WM_T_82546_3:
   4480 		delay(5*1000);
   4481 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4482 		break;
   4483 	case WM_T_82541:
   4484 	case WM_T_82541_2:
   4485 	case WM_T_82547:
   4486 	case WM_T_82547_2:
   4487 		delay(20000);
   4488 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4489 		break;
   4490 	case WM_T_82571:
   4491 	case WM_T_82572:
   4492 	case WM_T_82573:
   4493 	case WM_T_82574:
   4494 	case WM_T_82583:
   4495 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4496 			delay(10);
   4497 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4498 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4499 			CSR_WRITE_FLUSH(sc);
   4500 		}
   4501 		/* check EECD_EE_AUTORD */
   4502 		wm_get_auto_rd_done(sc);
   4503 		/*
   4504 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4505 		 * is set.
   4506 		 */
   4507 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4508 		    || (sc->sc_type == WM_T_82583))
   4509 			delay(25*1000);
   4510 		break;
   4511 	case WM_T_82575:
   4512 	case WM_T_82576:
   4513 	case WM_T_82580:
   4514 	case WM_T_I350:
   4515 	case WM_T_I354:
   4516 	case WM_T_I210:
   4517 	case WM_T_I211:
   4518 	case WM_T_80003:
   4519 		/* check EECD_EE_AUTORD */
   4520 		wm_get_auto_rd_done(sc);
   4521 		break;
   4522 	case WM_T_ICH8:
   4523 	case WM_T_ICH9:
   4524 	case WM_T_ICH10:
   4525 	case WM_T_PCH:
   4526 	case WM_T_PCH2:
   4527 	case WM_T_PCH_LPT:
   4528 	case WM_T_PCH_SPT:
   4529 		break;
   4530 	default:
   4531 		panic("%s: unknown type\n", __func__);
   4532 	}
   4533 
   4534 	/* Check whether EEPROM is present or not */
   4535 	switch (sc->sc_type) {
   4536 	case WM_T_82575:
   4537 	case WM_T_82576:
   4538 	case WM_T_82580:
   4539 	case WM_T_I350:
   4540 	case WM_T_I354:
   4541 	case WM_T_ICH8:
   4542 	case WM_T_ICH9:
   4543 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4544 			/* Not found */
   4545 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4546 			if (sc->sc_type == WM_T_82575)
   4547 				wm_reset_init_script_82575(sc);
   4548 		}
   4549 		break;
   4550 	default:
   4551 		break;
   4552 	}
   4553 
   4554 	if (phy_reset != 0)
   4555 		wm_phy_post_reset(sc);
   4556 
   4557 	if ((sc->sc_type == WM_T_82580)
   4558 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4559 		/* clear global device reset status bit */
   4560 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4561 	}
   4562 
   4563 	/* Clear any pending interrupt events. */
   4564 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4565 	reg = CSR_READ(sc, WMREG_ICR);
   4566 	if (wm_is_using_msix(sc)) {
   4567 		if (sc->sc_type != WM_T_82574) {
   4568 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4569 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4570 		} else
   4571 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4572 	}
   4573 
   4574 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4575 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4576 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4577 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   4578 		reg = CSR_READ(sc, WMREG_KABGTXD);
   4579 		reg |= KABGTXD_BGSQLBIAS;
   4580 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   4581 	}
   4582 
   4583 	/* reload sc_ctrl */
   4584 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4585 
   4586 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4587 		wm_set_eee_i350(sc);
   4588 
   4589 	/*
   4590 	 * For PCH, this write will make sure that any noise will be detected
   4591 	 * as a CRC error and be dropped rather than show up as a bad packet
   4592 	 * to the DMA engine
   4593 	 */
   4594 	if (sc->sc_type == WM_T_PCH)
   4595 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4596 
   4597 	if (sc->sc_type >= WM_T_82544)
   4598 		CSR_WRITE(sc, WMREG_WUC, 0);
   4599 
   4600 	wm_reset_mdicnfg_82580(sc);
   4601 
   4602 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4603 		wm_pll_workaround_i210(sc);
   4604 }
   4605 
   4606 /*
   4607  * wm_add_rxbuf:
   4608  *
   4609  *	Add a receive buffer to the indiciated descriptor.
   4610  */
   4611 static int
   4612 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4613 {
   4614 	struct wm_softc *sc = rxq->rxq_sc;
   4615 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4616 	struct mbuf *m;
   4617 	int error;
   4618 
   4619 	KASSERT(mutex_owned(rxq->rxq_lock));
   4620 
   4621 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4622 	if (m == NULL)
   4623 		return ENOBUFS;
   4624 
   4625 	MCLGET(m, M_DONTWAIT);
   4626 	if ((m->m_flags & M_EXT) == 0) {
   4627 		m_freem(m);
   4628 		return ENOBUFS;
   4629 	}
   4630 
   4631 	if (rxs->rxs_mbuf != NULL)
   4632 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4633 
   4634 	rxs->rxs_mbuf = m;
   4635 
   4636 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4637 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4638 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4639 	if (error) {
   4640 		/* XXX XXX XXX */
   4641 		aprint_error_dev(sc->sc_dev,
   4642 		    "unable to load rx DMA map %d, error = %d\n",
   4643 		    idx, error);
   4644 		panic("wm_add_rxbuf");
   4645 	}
   4646 
   4647 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4648 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4649 
   4650 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4651 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4652 			wm_init_rxdesc(rxq, idx);
   4653 	} else
   4654 		wm_init_rxdesc(rxq, idx);
   4655 
   4656 	return 0;
   4657 }
   4658 
   4659 /*
   4660  * wm_rxdrain:
   4661  *
   4662  *	Drain the receive queue.
   4663  */
   4664 static void
   4665 wm_rxdrain(struct wm_rxqueue *rxq)
   4666 {
   4667 	struct wm_softc *sc = rxq->rxq_sc;
   4668 	struct wm_rxsoft *rxs;
   4669 	int i;
   4670 
   4671 	KASSERT(mutex_owned(rxq->rxq_lock));
   4672 
   4673 	for (i = 0; i < WM_NRXDESC; i++) {
   4674 		rxs = &rxq->rxq_soft[i];
   4675 		if (rxs->rxs_mbuf != NULL) {
   4676 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4677 			m_freem(rxs->rxs_mbuf);
   4678 			rxs->rxs_mbuf = NULL;
   4679 		}
   4680 	}
   4681 }
   4682 
   4683 
   4684 /*
   4685  * XXX copy from FreeBSD's sys/net/rss_config.c
   4686  */
   4687 /*
   4688  * RSS secret key, intended to prevent attacks on load-balancing.  Its
   4689  * effectiveness may be limited by algorithm choice and available entropy
   4690  * during the boot.
   4691  *
   4692  * XXXRW: And that we don't randomize it yet!
   4693  *
   4694  * This is the default Microsoft RSS specification key which is also
   4695  * the Chelsio T5 firmware default key.
   4696  */
   4697 #define RSS_KEYSIZE 40
   4698 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
   4699 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
   4700 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
   4701 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
   4702 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
   4703 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
   4704 };
   4705 
   4706 /*
   4707  * Caller must pass an array of size sizeof(rss_key).
   4708  *
   4709  * XXX
   4710  * As if_ixgbe may use this function, this function should not be
   4711  * if_wm specific function.
   4712  */
   4713 static void
   4714 wm_rss_getkey(uint8_t *key)
   4715 {
   4716 
   4717 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
   4718 }
   4719 
   4720 /*
   4721  * Setup registers for RSS.
   4722  *
   4723  * XXX not yet VMDq support
   4724  */
   4725 static void
   4726 wm_init_rss(struct wm_softc *sc)
   4727 {
   4728 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4729 	int i;
   4730 
   4731 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
   4732 
   4733 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4734 		int qid, reta_ent;
   4735 
   4736 		qid  = i % sc->sc_nqueues;
   4737 		switch(sc->sc_type) {
   4738 		case WM_T_82574:
   4739 			reta_ent = __SHIFTIN(qid,
   4740 			    RETA_ENT_QINDEX_MASK_82574);
   4741 			break;
   4742 		case WM_T_82575:
   4743 			reta_ent = __SHIFTIN(qid,
   4744 			    RETA_ENT_QINDEX1_MASK_82575);
   4745 			break;
   4746 		default:
   4747 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4748 			break;
   4749 		}
   4750 
   4751 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4752 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4753 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4754 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4755 	}
   4756 
   4757 	wm_rss_getkey((uint8_t *)rss_key);
   4758 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4759 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4760 
   4761 	if (sc->sc_type == WM_T_82574)
   4762 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4763 	else
   4764 		mrqc = MRQC_ENABLE_RSS_MQ;
   4765 
   4766 	/*
   4767 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   4768 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   4769 	 */
   4770 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4771 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4772 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4773 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4774 
   4775 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4776 }
   4777 
   4778 /*
   4779  * Adjust TX and RX queue numbers which the system actulally uses.
   4780  *
   4781  * The numbers are affected by below parameters.
   4782  *     - The nubmer of hardware queues
   4783  *     - The number of MSI-X vectors (= "nvectors" argument)
   4784  *     - ncpu
   4785  */
   4786 static void
   4787 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4788 {
   4789 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4790 
   4791 	if (nvectors < 2) {
   4792 		sc->sc_nqueues = 1;
   4793 		return;
   4794 	}
   4795 
   4796 	switch(sc->sc_type) {
   4797 	case WM_T_82572:
   4798 		hw_ntxqueues = 2;
   4799 		hw_nrxqueues = 2;
   4800 		break;
   4801 	case WM_T_82574:
   4802 		hw_ntxqueues = 2;
   4803 		hw_nrxqueues = 2;
   4804 		break;
   4805 	case WM_T_82575:
   4806 		hw_ntxqueues = 4;
   4807 		hw_nrxqueues = 4;
   4808 		break;
   4809 	case WM_T_82576:
   4810 		hw_ntxqueues = 16;
   4811 		hw_nrxqueues = 16;
   4812 		break;
   4813 	case WM_T_82580:
   4814 	case WM_T_I350:
   4815 	case WM_T_I354:
   4816 		hw_ntxqueues = 8;
   4817 		hw_nrxqueues = 8;
   4818 		break;
   4819 	case WM_T_I210:
   4820 		hw_ntxqueues = 4;
   4821 		hw_nrxqueues = 4;
   4822 		break;
   4823 	case WM_T_I211:
   4824 		hw_ntxqueues = 2;
   4825 		hw_nrxqueues = 2;
   4826 		break;
   4827 		/*
   4828 		 * As below ethernet controllers does not support MSI-X,
   4829 		 * this driver let them not use multiqueue.
   4830 		 *     - WM_T_80003
   4831 		 *     - WM_T_ICH8
   4832 		 *     - WM_T_ICH9
   4833 		 *     - WM_T_ICH10
   4834 		 *     - WM_T_PCH
   4835 		 *     - WM_T_PCH2
   4836 		 *     - WM_T_PCH_LPT
   4837 		 */
   4838 	default:
   4839 		hw_ntxqueues = 1;
   4840 		hw_nrxqueues = 1;
   4841 		break;
   4842 	}
   4843 
   4844 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
   4845 
   4846 	/*
   4847 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   4848 	 * the number of queues used actually.
   4849 	 */
   4850 	if (nvectors < hw_nqueues + 1) {
   4851 		sc->sc_nqueues = nvectors - 1;
   4852 	} else {
   4853 		sc->sc_nqueues = hw_nqueues;
   4854 	}
   4855 
   4856 	/*
   4857 	 * As queues more then cpus cannot improve scaling, we limit
   4858 	 * the number of queues used actually.
   4859 	 */
   4860 	if (ncpu < sc->sc_nqueues)
   4861 		sc->sc_nqueues = ncpu;
   4862 }
   4863 
   4864 static inline bool
   4865 wm_is_using_msix(struct wm_softc *sc)
   4866 {
   4867 
   4868 	return (sc->sc_nintrs > 1);
   4869 }
   4870 
   4871 static inline bool
   4872 wm_is_using_multiqueue(struct wm_softc *sc)
   4873 {
   4874 
   4875 	return (sc->sc_nqueues > 1);
   4876 }
   4877 
   4878 static int
   4879 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   4880 {
   4881 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   4882 	wmq->wmq_id = qidx;
   4883 	wmq->wmq_intr_idx = intr_idx;
   4884 	wmq->wmq_si = softint_establish(SOFTINT_NET
   4885 #ifdef WM_MPSAFE
   4886 	    | SOFTINT_MPSAFE
   4887 #endif
   4888 	    , wm_handle_queue, wmq);
   4889 	if (wmq->wmq_si != NULL)
   4890 		return 0;
   4891 
   4892 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   4893 	    wmq->wmq_id);
   4894 
   4895 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   4896 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4897 	return ENOMEM;
   4898 }
   4899 
   4900 /*
   4901  * Both single interrupt MSI and INTx can use this function.
   4902  */
   4903 static int
   4904 wm_setup_legacy(struct wm_softc *sc)
   4905 {
   4906 	pci_chipset_tag_t pc = sc->sc_pc;
   4907 	const char *intrstr = NULL;
   4908 	char intrbuf[PCI_INTRSTR_LEN];
   4909 	int error;
   4910 
   4911 	error = wm_alloc_txrx_queues(sc);
   4912 	if (error) {
   4913 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4914 		    error);
   4915 		return ENOMEM;
   4916 	}
   4917 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   4918 	    sizeof(intrbuf));
   4919 #ifdef WM_MPSAFE
   4920 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   4921 #endif
   4922 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   4923 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   4924 	if (sc->sc_ihs[0] == NULL) {
   4925 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   4926 		    (pci_intr_type(pc, sc->sc_intrs[0])
   4927 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   4928 		return ENOMEM;
   4929 	}
   4930 
   4931 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   4932 	sc->sc_nintrs = 1;
   4933 
   4934 	return wm_softint_establish(sc, 0, 0);
   4935 }
   4936 
   4937 static int
   4938 wm_setup_msix(struct wm_softc *sc)
   4939 {
   4940 	void *vih;
   4941 	kcpuset_t *affinity;
   4942 	int qidx, error, intr_idx, txrx_established;
   4943 	pci_chipset_tag_t pc = sc->sc_pc;
   4944 	const char *intrstr = NULL;
   4945 	char intrbuf[PCI_INTRSTR_LEN];
   4946 	char intr_xname[INTRDEVNAMEBUF];
   4947 
   4948 	if (sc->sc_nqueues < ncpu) {
   4949 		/*
   4950 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   4951 		 * interrupts start from CPU#1.
   4952 		 */
   4953 		sc->sc_affinity_offset = 1;
   4954 	} else {
   4955 		/*
   4956 		 * In this case, this device use all CPUs. So, we unify
   4957 		 * affinitied cpu_index to msix vector number for readability.
   4958 		 */
   4959 		sc->sc_affinity_offset = 0;
   4960 	}
   4961 
   4962 	error = wm_alloc_txrx_queues(sc);
   4963 	if (error) {
   4964 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4965 		    error);
   4966 		return ENOMEM;
   4967 	}
   4968 
   4969 	kcpuset_create(&affinity, false);
   4970 	intr_idx = 0;
   4971 
   4972 	/*
   4973 	 * TX and RX
   4974 	 */
   4975 	txrx_established = 0;
   4976 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   4977 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4978 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   4979 
   4980 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4981 		    sizeof(intrbuf));
   4982 #ifdef WM_MPSAFE
   4983 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   4984 		    PCI_INTR_MPSAFE, true);
   4985 #endif
   4986 		memset(intr_xname, 0, sizeof(intr_xname));
   4987 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   4988 		    device_xname(sc->sc_dev), qidx);
   4989 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4990 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   4991 		if (vih == NULL) {
   4992 			aprint_error_dev(sc->sc_dev,
   4993 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   4994 			    intrstr ? " at " : "",
   4995 			    intrstr ? intrstr : "");
   4996 
   4997 			goto fail;
   4998 		}
   4999 		kcpuset_zero(affinity);
   5000 		/* Round-robin affinity */
   5001 		kcpuset_set(affinity, affinity_to);
   5002 		error = interrupt_distribute(vih, affinity, NULL);
   5003 		if (error == 0) {
   5004 			aprint_normal_dev(sc->sc_dev,
   5005 			    "for TX and RX interrupting at %s affinity to %u\n",
   5006 			    intrstr, affinity_to);
   5007 		} else {
   5008 			aprint_normal_dev(sc->sc_dev,
   5009 			    "for TX and RX interrupting at %s\n", intrstr);
   5010 		}
   5011 		sc->sc_ihs[intr_idx] = vih;
   5012 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   5013 			goto fail;
   5014 		txrx_established++;
   5015 		intr_idx++;
   5016 	}
   5017 
   5018 	/*
   5019 	 * LINK
   5020 	 */
   5021 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5022 	    sizeof(intrbuf));
   5023 #ifdef WM_MPSAFE
   5024 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5025 #endif
   5026 	memset(intr_xname, 0, sizeof(intr_xname));
   5027 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5028 	    device_xname(sc->sc_dev));
   5029 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5030 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5031 	if (vih == NULL) {
   5032 		aprint_error_dev(sc->sc_dev,
   5033 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5034 		    intrstr ? " at " : "",
   5035 		    intrstr ? intrstr : "");
   5036 
   5037 		goto fail;
   5038 	}
   5039 	/* keep default affinity to LINK interrupt */
   5040 	aprint_normal_dev(sc->sc_dev,
   5041 	    "for LINK interrupting at %s\n", intrstr);
   5042 	sc->sc_ihs[intr_idx] = vih;
   5043 	sc->sc_link_intr_idx = intr_idx;
   5044 
   5045 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5046 	kcpuset_destroy(affinity);
   5047 	return 0;
   5048 
   5049  fail:
   5050 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5051 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5052 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5053 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5054 	}
   5055 
   5056 	kcpuset_destroy(affinity);
   5057 	return ENOMEM;
   5058 }
   5059 
   5060 static void
   5061 wm_turnon(struct wm_softc *sc)
   5062 {
   5063 	int i;
   5064 
   5065 	KASSERT(WM_CORE_LOCKED(sc));
   5066 
   5067 	/*
   5068 	 * must unset stopping flags in ascending order.
   5069 	 */
   5070 	for(i = 0; i < sc->sc_nqueues; i++) {
   5071 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5072 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5073 
   5074 		mutex_enter(txq->txq_lock);
   5075 		txq->txq_stopping = false;
   5076 		mutex_exit(txq->txq_lock);
   5077 
   5078 		mutex_enter(rxq->rxq_lock);
   5079 		rxq->rxq_stopping = false;
   5080 		mutex_exit(rxq->rxq_lock);
   5081 	}
   5082 
   5083 	sc->sc_core_stopping = false;
   5084 }
   5085 
   5086 static void
   5087 wm_turnoff(struct wm_softc *sc)
   5088 {
   5089 	int i;
   5090 
   5091 	KASSERT(WM_CORE_LOCKED(sc));
   5092 
   5093 	sc->sc_core_stopping = true;
   5094 
   5095 	/*
   5096 	 * must set stopping flags in ascending order.
   5097 	 */
   5098 	for(i = 0; i < sc->sc_nqueues; i++) {
   5099 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5100 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5101 
   5102 		mutex_enter(rxq->rxq_lock);
   5103 		rxq->rxq_stopping = true;
   5104 		mutex_exit(rxq->rxq_lock);
   5105 
   5106 		mutex_enter(txq->txq_lock);
   5107 		txq->txq_stopping = true;
   5108 		mutex_exit(txq->txq_lock);
   5109 	}
   5110 }
   5111 
   5112 /*
   5113  * write interrupt interval value to ITR or EITR
   5114  */
   5115 static void
   5116 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5117 {
   5118 
   5119 	if (!wmq->wmq_set_itr)
   5120 		return;
   5121 
   5122 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5123 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5124 
   5125 		/*
   5126 		 * 82575 doesn't have CNT_INGR field.
   5127 		 * So, overwrite counter field by software.
   5128 		 */
   5129 		if (sc->sc_type == WM_T_82575)
   5130 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5131 		else
   5132 			eitr |= EITR_CNT_INGR;
   5133 
   5134 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5135 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5136 		/*
   5137 		 * 82574 has both ITR and EITR. SET EITR when we use
   5138 		 * the multi queue function with MSI-X.
   5139 		 */
   5140 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5141 			    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5142 	} else {
   5143 		KASSERT(wmq->wmq_id == 0);
   5144 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5145 	}
   5146 
   5147 	wmq->wmq_set_itr = false;
   5148 }
   5149 
   5150 /*
   5151  * TODO
   5152  * Below dynamic calculation of itr is almost the same as linux igb,
   5153  * however it does not fit to wm(4). So, we will have been disable AIM
   5154  * until we will find appropriate calculation of itr.
   5155  */
   5156 /*
   5157  * calculate interrupt interval value to be going to write register in
   5158  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5159  */
   5160 static void
   5161 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5162 {
   5163 #ifdef NOTYET
   5164 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5165 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5166 	uint32_t avg_size = 0;
   5167 	uint32_t new_itr;
   5168 
   5169 	if (rxq->rxq_packets)
   5170 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5171 	if (txq->txq_packets)
   5172 		avg_size = max(avg_size, txq->txq_bytes / txq->txq_packets);
   5173 
   5174 	if (avg_size == 0) {
   5175 		new_itr = 450; /* restore default value */
   5176 		goto out;
   5177 	}
   5178 
   5179 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5180 	avg_size += 24;
   5181 
   5182 	/* Don't starve jumbo frames */
   5183 	avg_size = min(avg_size, 3000);
   5184 
   5185 	/* Give a little boost to mid-size frames */
   5186 	if ((avg_size > 300) && (avg_size < 1200))
   5187 		new_itr = avg_size / 3;
   5188 	else
   5189 		new_itr = avg_size / 2;
   5190 
   5191 out:
   5192 	/*
   5193 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5194 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5195 	 */
   5196 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5197 		new_itr *= 4;
   5198 
   5199 	if (new_itr != wmq->wmq_itr) {
   5200 		wmq->wmq_itr = new_itr;
   5201 		wmq->wmq_set_itr = true;
   5202 	} else
   5203 		wmq->wmq_set_itr = false;
   5204 
   5205 	rxq->rxq_packets = 0;
   5206 	rxq->rxq_bytes = 0;
   5207 	txq->txq_packets = 0;
   5208 	txq->txq_bytes = 0;
   5209 #endif
   5210 }
   5211 
   5212 /*
   5213  * wm_init:		[ifnet interface function]
   5214  *
   5215  *	Initialize the interface.
   5216  */
   5217 static int
   5218 wm_init(struct ifnet *ifp)
   5219 {
   5220 	struct wm_softc *sc = ifp->if_softc;
   5221 	int ret;
   5222 
   5223 	WM_CORE_LOCK(sc);
   5224 	ret = wm_init_locked(ifp);
   5225 	WM_CORE_UNLOCK(sc);
   5226 
   5227 	return ret;
   5228 }
   5229 
   5230 static int
   5231 wm_init_locked(struct ifnet *ifp)
   5232 {
   5233 	struct wm_softc *sc = ifp->if_softc;
   5234 	int i, j, trynum, error = 0;
   5235 	uint32_t reg;
   5236 
   5237 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5238 		device_xname(sc->sc_dev), __func__));
   5239 	KASSERT(WM_CORE_LOCKED(sc));
   5240 
   5241 	/*
   5242 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5243 	 * There is a small but measurable benefit to avoiding the adjusment
   5244 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5245 	 * on such platforms.  One possibility is that the DMA itself is
   5246 	 * slightly more efficient if the front of the entire packet (instead
   5247 	 * of the front of the headers) is aligned.
   5248 	 *
   5249 	 * Note we must always set align_tweak to 0 if we are using
   5250 	 * jumbo frames.
   5251 	 */
   5252 #ifdef __NO_STRICT_ALIGNMENT
   5253 	sc->sc_align_tweak = 0;
   5254 #else
   5255 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5256 		sc->sc_align_tweak = 0;
   5257 	else
   5258 		sc->sc_align_tweak = 2;
   5259 #endif /* __NO_STRICT_ALIGNMENT */
   5260 
   5261 	/* Cancel any pending I/O. */
   5262 	wm_stop_locked(ifp, 0);
   5263 
   5264 	/* update statistics before reset */
   5265 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   5266 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   5267 
   5268 	/* PCH_SPT hardware workaround */
   5269 	if (sc->sc_type == WM_T_PCH_SPT)
   5270 		wm_flush_desc_rings(sc);
   5271 
   5272 	/* Reset the chip to a known state. */
   5273 	wm_reset(sc);
   5274 
   5275 	/*
   5276 	 * AMT based hardware can now take control from firmware
   5277 	 * Do this after reset.
   5278 	 */
   5279 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5280 		wm_get_hw_control(sc);
   5281 
   5282 	if ((sc->sc_type == WM_T_PCH_SPT) &&
   5283 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5284 		wm_legacy_irq_quirk_spt(sc);
   5285 
   5286 	/* Init hardware bits */
   5287 	wm_initialize_hardware_bits(sc);
   5288 
   5289 	/* Reset the PHY. */
   5290 	if (sc->sc_flags & WM_F_HAS_MII)
   5291 		wm_gmii_reset(sc);
   5292 
   5293 	/* Calculate (E)ITR value */
   5294 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5295 		/*
   5296 		 * For NEWQUEUE's EITR (except for 82575).
   5297 		 * 82575's EITR should be set same throttling value as other
   5298 		 * old controllers' ITR because the interrupt/sec calculation
   5299 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5300 		 *
   5301 		 * 82574's EITR should be set same throttling value as ITR.
   5302 		 *
   5303 		 * For N interrupts/sec, set this value to:
   5304 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5305 		 */
   5306 		sc->sc_itr_init = 450;
   5307 	} else if (sc->sc_type >= WM_T_82543) {
   5308 		/*
   5309 		 * Set up the interrupt throttling register (units of 256ns)
   5310 		 * Note that a footnote in Intel's documentation says this
   5311 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5312 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5313 		 * that that is also true for the 1024ns units of the other
   5314 		 * interrupt-related timer registers -- so, really, we ought
   5315 		 * to divide this value by 4 when the link speed is low.
   5316 		 *
   5317 		 * XXX implement this division at link speed change!
   5318 		 */
   5319 
   5320 		/*
   5321 		 * For N interrupts/sec, set this value to:
   5322 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5323 		 * absolute and packet timer values to this value
   5324 		 * divided by 4 to get "simple timer" behavior.
   5325 		 */
   5326 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5327 	}
   5328 
   5329 	error = wm_init_txrx_queues(sc);
   5330 	if (error)
   5331 		goto out;
   5332 
   5333 	/*
   5334 	 * Clear out the VLAN table -- we don't use it (yet).
   5335 	 */
   5336 	CSR_WRITE(sc, WMREG_VET, 0);
   5337 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5338 		trynum = 10; /* Due to hw errata */
   5339 	else
   5340 		trynum = 1;
   5341 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5342 		for (j = 0; j < trynum; j++)
   5343 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5344 
   5345 	/*
   5346 	 * Set up flow-control parameters.
   5347 	 *
   5348 	 * XXX Values could probably stand some tuning.
   5349 	 */
   5350 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5351 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5352 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5353 	    && (sc->sc_type != WM_T_PCH_SPT)) {
   5354 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5355 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5356 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5357 	}
   5358 
   5359 	sc->sc_fcrtl = FCRTL_DFLT;
   5360 	if (sc->sc_type < WM_T_82543) {
   5361 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5362 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5363 	} else {
   5364 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5365 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5366 	}
   5367 
   5368 	if (sc->sc_type == WM_T_80003)
   5369 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5370 	else
   5371 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5372 
   5373 	/* Writes the control register. */
   5374 	wm_set_vlan(sc);
   5375 
   5376 	if (sc->sc_flags & WM_F_HAS_MII) {
   5377 		int val;
   5378 
   5379 		switch (sc->sc_type) {
   5380 		case WM_T_80003:
   5381 		case WM_T_ICH8:
   5382 		case WM_T_ICH9:
   5383 		case WM_T_ICH10:
   5384 		case WM_T_PCH:
   5385 		case WM_T_PCH2:
   5386 		case WM_T_PCH_LPT:
   5387 		case WM_T_PCH_SPT:
   5388 			/*
   5389 			 * Set the mac to wait the maximum time between each
   5390 			 * iteration and increase the max iterations when
   5391 			 * polling the phy; this fixes erroneous timeouts at
   5392 			 * 10Mbps.
   5393 			 */
   5394 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5395 			    0xFFFF);
   5396 			val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
   5397 			val |= 0x3F;
   5398 			wm_kmrn_writereg(sc,
   5399 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
   5400 			break;
   5401 		default:
   5402 			break;
   5403 		}
   5404 
   5405 		if (sc->sc_type == WM_T_80003) {
   5406 			val = CSR_READ(sc, WMREG_CTRL_EXT);
   5407 			val &= ~CTRL_EXT_LINK_MODE_MASK;
   5408 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   5409 
   5410 			/* Bypass RX and TX FIFO's */
   5411 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5412 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5413 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5414 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5415 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5416 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5417 		}
   5418 	}
   5419 #if 0
   5420 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5421 #endif
   5422 
   5423 	/* Set up checksum offload parameters. */
   5424 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5425 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5426 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5427 		reg |= RXCSUM_IPOFL;
   5428 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5429 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5430 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5431 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5432 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5433 
   5434 	/* Set registers about MSI-X */
   5435 	if (wm_is_using_msix(sc)) {
   5436 		uint32_t ivar;
   5437 		struct wm_queue *wmq;
   5438 		int qid, qintr_idx;
   5439 
   5440 		if (sc->sc_type == WM_T_82575) {
   5441 			/* Interrupt control */
   5442 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5443 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5444 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5445 
   5446 			/* TX and RX */
   5447 			for (i = 0; i < sc->sc_nqueues; i++) {
   5448 				wmq = &sc->sc_queue[i];
   5449 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5450 				    EITR_TX_QUEUE(wmq->wmq_id)
   5451 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5452 			}
   5453 			/* Link status */
   5454 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5455 			    EITR_OTHER);
   5456 		} else if (sc->sc_type == WM_T_82574) {
   5457 			/* Interrupt control */
   5458 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5459 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5460 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5461 
   5462 			/*
   5463 			 * workaround issue with spurious interrupts
   5464 			 * in MSI-X mode.
   5465 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5466 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5467 			 */
   5468 			reg = CSR_READ(sc, WMREG_RFCTL);
   5469 			reg |= WMREG_RFCTL_ACKDIS;
   5470 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5471 
   5472 			ivar = 0;
   5473 			/* TX and RX */
   5474 			for (i = 0; i < sc->sc_nqueues; i++) {
   5475 				wmq = &sc->sc_queue[i];
   5476 				qid = wmq->wmq_id;
   5477 				qintr_idx = wmq->wmq_intr_idx;
   5478 
   5479 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5480 				    IVAR_TX_MASK_Q_82574(qid));
   5481 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5482 				    IVAR_RX_MASK_Q_82574(qid));
   5483 			}
   5484 			/* Link status */
   5485 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5486 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5487 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5488 		} else {
   5489 			/* Interrupt control */
   5490 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5491 			    | GPIE_EIAME | GPIE_PBA);
   5492 
   5493 			switch (sc->sc_type) {
   5494 			case WM_T_82580:
   5495 			case WM_T_I350:
   5496 			case WM_T_I354:
   5497 			case WM_T_I210:
   5498 			case WM_T_I211:
   5499 				/* TX and RX */
   5500 				for (i = 0; i < sc->sc_nqueues; i++) {
   5501 					wmq = &sc->sc_queue[i];
   5502 					qid = wmq->wmq_id;
   5503 					qintr_idx = wmq->wmq_intr_idx;
   5504 
   5505 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5506 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5507 					ivar |= __SHIFTIN((qintr_idx
   5508 						| IVAR_VALID),
   5509 					    IVAR_TX_MASK_Q(qid));
   5510 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5511 					ivar |= __SHIFTIN((qintr_idx
   5512 						| IVAR_VALID),
   5513 					    IVAR_RX_MASK_Q(qid));
   5514 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5515 				}
   5516 				break;
   5517 			case WM_T_82576:
   5518 				/* TX and RX */
   5519 				for (i = 0; i < sc->sc_nqueues; i++) {
   5520 					wmq = &sc->sc_queue[i];
   5521 					qid = wmq->wmq_id;
   5522 					qintr_idx = wmq->wmq_intr_idx;
   5523 
   5524 					ivar = CSR_READ(sc,
   5525 					    WMREG_IVAR_Q_82576(qid));
   5526 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   5527 					ivar |= __SHIFTIN((qintr_idx
   5528 						| IVAR_VALID),
   5529 					    IVAR_TX_MASK_Q_82576(qid));
   5530 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   5531 					ivar |= __SHIFTIN((qintr_idx
   5532 						| IVAR_VALID),
   5533 					    IVAR_RX_MASK_Q_82576(qid));
   5534 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   5535 					    ivar);
   5536 				}
   5537 				break;
   5538 			default:
   5539 				break;
   5540 			}
   5541 
   5542 			/* Link status */
   5543 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   5544 			    IVAR_MISC_OTHER);
   5545 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   5546 		}
   5547 
   5548 		if (wm_is_using_multiqueue(sc)) {
   5549 			wm_init_rss(sc);
   5550 
   5551 			/*
   5552 			** NOTE: Receive Full-Packet Checksum Offload
   5553 			** is mutually exclusive with Multiqueue. However
   5554 			** this is not the same as TCP/IP checksums which
   5555 			** still work.
   5556 			*/
   5557 			reg = CSR_READ(sc, WMREG_RXCSUM);
   5558 			reg |= RXCSUM_PCSD;
   5559 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5560 		}
   5561 	}
   5562 
   5563 	/* Set up the interrupt registers. */
   5564 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5565 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   5566 	    ICR_RXO | ICR_RXT0;
   5567 	if (wm_is_using_msix(sc)) {
   5568 		uint32_t mask;
   5569 		struct wm_queue *wmq;
   5570 
   5571 		switch (sc->sc_type) {
   5572 		case WM_T_82574:
   5573 			mask = 0;
   5574 			for (i = 0; i < sc->sc_nqueues; i++) {
   5575 				wmq = &sc->sc_queue[i];
   5576 				mask |= ICR_TXQ(wmq->wmq_id);
   5577 				mask |= ICR_RXQ(wmq->wmq_id);
   5578 			}
   5579 			mask |= ICR_OTHER;
   5580 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   5581 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   5582 			break;
   5583 		default:
   5584 			if (sc->sc_type == WM_T_82575) {
   5585 				mask = 0;
   5586 				for (i = 0; i < sc->sc_nqueues; i++) {
   5587 					wmq = &sc->sc_queue[i];
   5588 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   5589 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   5590 				}
   5591 				mask |= EITR_OTHER;
   5592 			} else {
   5593 				mask = 0;
   5594 				for (i = 0; i < sc->sc_nqueues; i++) {
   5595 					wmq = &sc->sc_queue[i];
   5596 					mask |= 1 << wmq->wmq_intr_idx;
   5597 				}
   5598 				mask |= 1 << sc->sc_link_intr_idx;
   5599 			}
   5600 			CSR_WRITE(sc, WMREG_EIAC, mask);
   5601 			CSR_WRITE(sc, WMREG_EIAM, mask);
   5602 			CSR_WRITE(sc, WMREG_EIMS, mask);
   5603 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   5604 			break;
   5605 		}
   5606 	} else
   5607 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5608 
   5609 	/* Set up the inter-packet gap. */
   5610 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   5611 
   5612 	if (sc->sc_type >= WM_T_82543) {
   5613 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5614 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   5615 			wm_itrs_writereg(sc, wmq);
   5616 		}
   5617 		/*
   5618 		 * Link interrupts occur much less than TX
   5619 		 * interrupts and RX interrupts. So, we don't
   5620 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   5621 		 * FreeBSD's if_igb.
   5622 		 */
   5623 	}
   5624 
   5625 	/* Set the VLAN ethernetype. */
   5626 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   5627 
   5628 	/*
   5629 	 * Set up the transmit control register; we start out with
   5630 	 * a collision distance suitable for FDX, but update it whe
   5631 	 * we resolve the media type.
   5632 	 */
   5633 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   5634 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   5635 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5636 	if (sc->sc_type >= WM_T_82571)
   5637 		sc->sc_tctl |= TCTL_MULR;
   5638 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5639 
   5640 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5641 		/* Write TDT after TCTL.EN is set. See the document. */
   5642 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   5643 	}
   5644 
   5645 	if (sc->sc_type == WM_T_80003) {
   5646 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   5647 		reg &= ~TCTL_EXT_GCEX_MASK;
   5648 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   5649 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   5650 	}
   5651 
   5652 	/* Set the media. */
   5653 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   5654 		goto out;
   5655 
   5656 	/* Configure for OS presence */
   5657 	wm_init_manageability(sc);
   5658 
   5659 	/*
   5660 	 * Set up the receive control register; we actually program
   5661 	 * the register when we set the receive filter.  Use multicast
   5662 	 * address offset type 0.
   5663 	 *
   5664 	 * Only the i82544 has the ability to strip the incoming
   5665 	 * CRC, so we don't enable that feature.
   5666 	 */
   5667 	sc->sc_mchash_type = 0;
   5668 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   5669 	    | RCTL_MO(sc->sc_mchash_type);
   5670 
   5671 	/*
   5672 	 * 82574 use one buffer extended Rx descriptor.
   5673 	 */
   5674 	if (sc->sc_type == WM_T_82574)
   5675 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   5676 
   5677 	/*
   5678 	 * The I350 has a bug where it always strips the CRC whether
   5679 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   5680 	 */
   5681 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5682 	    || (sc->sc_type == WM_T_I210))
   5683 		sc->sc_rctl |= RCTL_SECRC;
   5684 
   5685 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5686 	    && (ifp->if_mtu > ETHERMTU)) {
   5687 		sc->sc_rctl |= RCTL_LPE;
   5688 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5689 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5690 	}
   5691 
   5692 	if (MCLBYTES == 2048) {
   5693 		sc->sc_rctl |= RCTL_2k;
   5694 	} else {
   5695 		if (sc->sc_type >= WM_T_82543) {
   5696 			switch (MCLBYTES) {
   5697 			case 4096:
   5698 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5699 				break;
   5700 			case 8192:
   5701 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5702 				break;
   5703 			case 16384:
   5704 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5705 				break;
   5706 			default:
   5707 				panic("wm_init: MCLBYTES %d unsupported",
   5708 				    MCLBYTES);
   5709 				break;
   5710 			}
   5711 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   5712 	}
   5713 
   5714 	/* Enable ECC */
   5715 	switch (sc->sc_type) {
   5716 	case WM_T_82571:
   5717 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5718 		reg |= PBA_ECC_CORR_EN;
   5719 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5720 		break;
   5721 	case WM_T_PCH_LPT:
   5722 	case WM_T_PCH_SPT:
   5723 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5724 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5725 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5726 
   5727 		sc->sc_ctrl |= CTRL_MEHE;
   5728 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5729 		break;
   5730 	default:
   5731 		break;
   5732 	}
   5733 
   5734 	/* On 575 and later set RDT only if RX enabled */
   5735 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5736 		int qidx;
   5737 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5738 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5739 			for (i = 0; i < WM_NRXDESC; i++) {
   5740 				mutex_enter(rxq->rxq_lock);
   5741 				wm_init_rxdesc(rxq, i);
   5742 				mutex_exit(rxq->rxq_lock);
   5743 
   5744 			}
   5745 		}
   5746 	}
   5747 
   5748 	/* Set the receive filter. */
   5749 	wm_set_filter(sc);
   5750 
   5751 	wm_turnon(sc);
   5752 
   5753 	/* Start the one second link check clock. */
   5754 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5755 
   5756 	/* ...all done! */
   5757 	ifp->if_flags |= IFF_RUNNING;
   5758 	ifp->if_flags &= ~IFF_OACTIVE;
   5759 
   5760  out:
   5761 	sc->sc_if_flags = ifp->if_flags;
   5762 	if (error)
   5763 		log(LOG_ERR, "%s: interface not running\n",
   5764 		    device_xname(sc->sc_dev));
   5765 	return error;
   5766 }
   5767 
   5768 /*
   5769  * wm_stop:		[ifnet interface function]
   5770  *
   5771  *	Stop transmission on the interface.
   5772  */
   5773 static void
   5774 wm_stop(struct ifnet *ifp, int disable)
   5775 {
   5776 	struct wm_softc *sc = ifp->if_softc;
   5777 
   5778 	WM_CORE_LOCK(sc);
   5779 	wm_stop_locked(ifp, disable);
   5780 	WM_CORE_UNLOCK(sc);
   5781 }
   5782 
   5783 static void
   5784 wm_stop_locked(struct ifnet *ifp, int disable)
   5785 {
   5786 	struct wm_softc *sc = ifp->if_softc;
   5787 	struct wm_txsoft *txs;
   5788 	int i, qidx;
   5789 
   5790 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5791 		device_xname(sc->sc_dev), __func__));
   5792 	KASSERT(WM_CORE_LOCKED(sc));
   5793 
   5794 	wm_turnoff(sc);
   5795 
   5796 	/* Stop the one second clock. */
   5797 	callout_stop(&sc->sc_tick_ch);
   5798 
   5799 	/* Stop the 82547 Tx FIFO stall check timer. */
   5800 	if (sc->sc_type == WM_T_82547)
   5801 		callout_stop(&sc->sc_txfifo_ch);
   5802 
   5803 	if (sc->sc_flags & WM_F_HAS_MII) {
   5804 		/* Down the MII. */
   5805 		mii_down(&sc->sc_mii);
   5806 	} else {
   5807 #if 0
   5808 		/* Should we clear PHY's status properly? */
   5809 		wm_reset(sc);
   5810 #endif
   5811 	}
   5812 
   5813 	/* Stop the transmit and receive processes. */
   5814 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5815 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5816 	sc->sc_rctl &= ~RCTL_EN;
   5817 
   5818 	/*
   5819 	 * Clear the interrupt mask to ensure the device cannot assert its
   5820 	 * interrupt line.
   5821 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5822 	 * service any currently pending or shared interrupt.
   5823 	 */
   5824 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5825 	sc->sc_icr = 0;
   5826 	if (wm_is_using_msix(sc)) {
   5827 		if (sc->sc_type != WM_T_82574) {
   5828 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5829 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5830 		} else
   5831 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5832 	}
   5833 
   5834 	/* Release any queued transmit buffers. */
   5835 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5836 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5837 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5838 		mutex_enter(txq->txq_lock);
   5839 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5840 			txs = &txq->txq_soft[i];
   5841 			if (txs->txs_mbuf != NULL) {
   5842 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   5843 				m_freem(txs->txs_mbuf);
   5844 				txs->txs_mbuf = NULL;
   5845 			}
   5846 		}
   5847 		mutex_exit(txq->txq_lock);
   5848 	}
   5849 
   5850 	/* Mark the interface as down and cancel the watchdog timer. */
   5851 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5852 	ifp->if_timer = 0;
   5853 
   5854 	if (disable) {
   5855 		for (i = 0; i < sc->sc_nqueues; i++) {
   5856 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5857 			mutex_enter(rxq->rxq_lock);
   5858 			wm_rxdrain(rxq);
   5859 			mutex_exit(rxq->rxq_lock);
   5860 		}
   5861 	}
   5862 
   5863 #if 0 /* notyet */
   5864 	if (sc->sc_type >= WM_T_82544)
   5865 		CSR_WRITE(sc, WMREG_WUC, 0);
   5866 #endif
   5867 }
   5868 
   5869 static void
   5870 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   5871 {
   5872 	struct mbuf *m;
   5873 	int i;
   5874 
   5875 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   5876 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   5877 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   5878 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   5879 		    m->m_data, m->m_len, m->m_flags);
   5880 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   5881 	    i, i == 1 ? "" : "s");
   5882 }
   5883 
   5884 /*
   5885  * wm_82547_txfifo_stall:
   5886  *
   5887  *	Callout used to wait for the 82547 Tx FIFO to drain,
   5888  *	reset the FIFO pointers, and restart packet transmission.
   5889  */
   5890 static void
   5891 wm_82547_txfifo_stall(void *arg)
   5892 {
   5893 	struct wm_softc *sc = arg;
   5894 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5895 
   5896 	mutex_enter(txq->txq_lock);
   5897 
   5898 	if (txq->txq_stopping)
   5899 		goto out;
   5900 
   5901 	if (txq->txq_fifo_stall) {
   5902 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   5903 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   5904 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   5905 			/*
   5906 			 * Packets have drained.  Stop transmitter, reset
   5907 			 * FIFO pointers, restart transmitter, and kick
   5908 			 * the packet queue.
   5909 			 */
   5910 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   5911 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   5912 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   5913 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   5914 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   5915 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   5916 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   5917 			CSR_WRITE_FLUSH(sc);
   5918 
   5919 			txq->txq_fifo_head = 0;
   5920 			txq->txq_fifo_stall = 0;
   5921 			wm_start_locked(&sc->sc_ethercom.ec_if);
   5922 		} else {
   5923 			/*
   5924 			 * Still waiting for packets to drain; try again in
   5925 			 * another tick.
   5926 			 */
   5927 			callout_schedule(&sc->sc_txfifo_ch, 1);
   5928 		}
   5929 	}
   5930 
   5931 out:
   5932 	mutex_exit(txq->txq_lock);
   5933 }
   5934 
   5935 /*
   5936  * wm_82547_txfifo_bugchk:
   5937  *
   5938  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   5939  *	prevent enqueueing a packet that would wrap around the end
   5940  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   5941  *
   5942  *	We do this by checking the amount of space before the end
   5943  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   5944  *	the Tx FIFO, wait for all remaining packets to drain, reset
   5945  *	the internal FIFO pointers to the beginning, and restart
   5946  *	transmission on the interface.
   5947  */
   5948 #define	WM_FIFO_HDR		0x10
   5949 #define	WM_82547_PAD_LEN	0x3e0
   5950 static int
   5951 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   5952 {
   5953 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5954 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   5955 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   5956 
   5957 	/* Just return if already stalled. */
   5958 	if (txq->txq_fifo_stall)
   5959 		return 1;
   5960 
   5961 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   5962 		/* Stall only occurs in half-duplex mode. */
   5963 		goto send_packet;
   5964 	}
   5965 
   5966 	if (len >= WM_82547_PAD_LEN + space) {
   5967 		txq->txq_fifo_stall = 1;
   5968 		callout_schedule(&sc->sc_txfifo_ch, 1);
   5969 		return 1;
   5970 	}
   5971 
   5972  send_packet:
   5973 	txq->txq_fifo_head += len;
   5974 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   5975 		txq->txq_fifo_head -= txq->txq_fifo_size;
   5976 
   5977 	return 0;
   5978 }
   5979 
   5980 static int
   5981 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5982 {
   5983 	int error;
   5984 
   5985 	/*
   5986 	 * Allocate the control data structures, and create and load the
   5987 	 * DMA map for it.
   5988 	 *
   5989 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5990 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5991 	 * both sets within the same 4G segment.
   5992 	 */
   5993 	if (sc->sc_type < WM_T_82544)
   5994 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   5995 	else
   5996 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   5997 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5998 		txq->txq_descsize = sizeof(nq_txdesc_t);
   5999 	else
   6000 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6001 
   6002 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6003 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6004 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6005 		aprint_error_dev(sc->sc_dev,
   6006 		    "unable to allocate TX control data, error = %d\n",
   6007 		    error);
   6008 		goto fail_0;
   6009 	}
   6010 
   6011 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6012 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6013 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6014 		aprint_error_dev(sc->sc_dev,
   6015 		    "unable to map TX control data, error = %d\n", error);
   6016 		goto fail_1;
   6017 	}
   6018 
   6019 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6020 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6021 		aprint_error_dev(sc->sc_dev,
   6022 		    "unable to create TX control data DMA map, error = %d\n",
   6023 		    error);
   6024 		goto fail_2;
   6025 	}
   6026 
   6027 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6028 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6029 		aprint_error_dev(sc->sc_dev,
   6030 		    "unable to load TX control data DMA map, error = %d\n",
   6031 		    error);
   6032 		goto fail_3;
   6033 	}
   6034 
   6035 	return 0;
   6036 
   6037  fail_3:
   6038 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6039  fail_2:
   6040 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6041 	    WM_TXDESCS_SIZE(txq));
   6042  fail_1:
   6043 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6044  fail_0:
   6045 	return error;
   6046 }
   6047 
   6048 static void
   6049 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6050 {
   6051 
   6052 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6053 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6054 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6055 	    WM_TXDESCS_SIZE(txq));
   6056 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6057 }
   6058 
   6059 static int
   6060 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6061 {
   6062 	int error;
   6063 	size_t rxq_descs_size;
   6064 
   6065 	/*
   6066 	 * Allocate the control data structures, and create and load the
   6067 	 * DMA map for it.
   6068 	 *
   6069 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6070 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6071 	 * both sets within the same 4G segment.
   6072 	 */
   6073 	rxq->rxq_ndesc = WM_NRXDESC;
   6074 	if (sc->sc_type == WM_T_82574)
   6075 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6076 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6077 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6078 	else
   6079 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6080 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6081 
   6082 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6083 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6084 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6085 		aprint_error_dev(sc->sc_dev,
   6086 		    "unable to allocate RX control data, error = %d\n",
   6087 		    error);
   6088 		goto fail_0;
   6089 	}
   6090 
   6091 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6092 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6093 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6094 		aprint_error_dev(sc->sc_dev,
   6095 		    "unable to map RX control data, error = %d\n", error);
   6096 		goto fail_1;
   6097 	}
   6098 
   6099 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6100 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6101 		aprint_error_dev(sc->sc_dev,
   6102 		    "unable to create RX control data DMA map, error = %d\n",
   6103 		    error);
   6104 		goto fail_2;
   6105 	}
   6106 
   6107 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6108 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6109 		aprint_error_dev(sc->sc_dev,
   6110 		    "unable to load RX control data DMA map, error = %d\n",
   6111 		    error);
   6112 		goto fail_3;
   6113 	}
   6114 
   6115 	return 0;
   6116 
   6117  fail_3:
   6118 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6119  fail_2:
   6120 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6121 	    rxq_descs_size);
   6122  fail_1:
   6123 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6124  fail_0:
   6125 	return error;
   6126 }
   6127 
   6128 static void
   6129 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6130 {
   6131 
   6132 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6133 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6134 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6135 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6136 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6137 }
   6138 
   6139 
   6140 static int
   6141 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6142 {
   6143 	int i, error;
   6144 
   6145 	/* Create the transmit buffer DMA maps. */
   6146 	WM_TXQUEUELEN(txq) =
   6147 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6148 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6149 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6150 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6151 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6152 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6153 			aprint_error_dev(sc->sc_dev,
   6154 			    "unable to create Tx DMA map %d, error = %d\n",
   6155 			    i, error);
   6156 			goto fail;
   6157 		}
   6158 	}
   6159 
   6160 	return 0;
   6161 
   6162  fail:
   6163 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6164 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6165 			bus_dmamap_destroy(sc->sc_dmat,
   6166 			    txq->txq_soft[i].txs_dmamap);
   6167 	}
   6168 	return error;
   6169 }
   6170 
   6171 static void
   6172 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6173 {
   6174 	int i;
   6175 
   6176 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6177 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6178 			bus_dmamap_destroy(sc->sc_dmat,
   6179 			    txq->txq_soft[i].txs_dmamap);
   6180 	}
   6181 }
   6182 
   6183 static int
   6184 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6185 {
   6186 	int i, error;
   6187 
   6188 	/* Create the receive buffer DMA maps. */
   6189 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6190 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6191 			    MCLBYTES, 0, 0,
   6192 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6193 			aprint_error_dev(sc->sc_dev,
   6194 			    "unable to create Rx DMA map %d error = %d\n",
   6195 			    i, error);
   6196 			goto fail;
   6197 		}
   6198 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6199 	}
   6200 
   6201 	return 0;
   6202 
   6203  fail:
   6204 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6205 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6206 			bus_dmamap_destroy(sc->sc_dmat,
   6207 			    rxq->rxq_soft[i].rxs_dmamap);
   6208 	}
   6209 	return error;
   6210 }
   6211 
   6212 static void
   6213 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6214 {
   6215 	int i;
   6216 
   6217 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6218 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6219 			bus_dmamap_destroy(sc->sc_dmat,
   6220 			    rxq->rxq_soft[i].rxs_dmamap);
   6221 	}
   6222 }
   6223 
   6224 /*
   6225  * wm_alloc_quques:
   6226  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6227  */
   6228 static int
   6229 wm_alloc_txrx_queues(struct wm_softc *sc)
   6230 {
   6231 	int i, error, tx_done, rx_done;
   6232 
   6233 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6234 	    KM_SLEEP);
   6235 	if (sc->sc_queue == NULL) {
   6236 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6237 		error = ENOMEM;
   6238 		goto fail_0;
   6239 	}
   6240 
   6241 	/*
   6242 	 * For transmission
   6243 	 */
   6244 	error = 0;
   6245 	tx_done = 0;
   6246 	for (i = 0; i < sc->sc_nqueues; i++) {
   6247 #ifdef WM_EVENT_COUNTERS
   6248 		int j;
   6249 		const char *xname;
   6250 #endif
   6251 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6252 		txq->txq_sc = sc;
   6253 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6254 
   6255 		error = wm_alloc_tx_descs(sc, txq);
   6256 		if (error)
   6257 			break;
   6258 		error = wm_alloc_tx_buffer(sc, txq);
   6259 		if (error) {
   6260 			wm_free_tx_descs(sc, txq);
   6261 			break;
   6262 		}
   6263 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6264 		if (txq->txq_interq == NULL) {
   6265 			wm_free_tx_descs(sc, txq);
   6266 			wm_free_tx_buffer(sc, txq);
   6267 			error = ENOMEM;
   6268 			break;
   6269 		}
   6270 
   6271 #ifdef WM_EVENT_COUNTERS
   6272 		xname = device_xname(sc->sc_dev);
   6273 
   6274 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6275 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6276 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
   6277 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6278 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6279 
   6280 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
   6281 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
   6282 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
   6283 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
   6284 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
   6285 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
   6286 
   6287 		for (j = 0; j < WM_NTXSEGS; j++) {
   6288 			snprintf(txq->txq_txseg_evcnt_names[j],
   6289 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6290 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6291 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6292 		}
   6293 
   6294 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
   6295 
   6296 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
   6297 #endif /* WM_EVENT_COUNTERS */
   6298 
   6299 		tx_done++;
   6300 	}
   6301 	if (error)
   6302 		goto fail_1;
   6303 
   6304 	/*
   6305 	 * For recieve
   6306 	 */
   6307 	error = 0;
   6308 	rx_done = 0;
   6309 	for (i = 0; i < sc->sc_nqueues; i++) {
   6310 #ifdef WM_EVENT_COUNTERS
   6311 		const char *xname;
   6312 #endif
   6313 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6314 		rxq->rxq_sc = sc;
   6315 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6316 
   6317 		error = wm_alloc_rx_descs(sc, rxq);
   6318 		if (error)
   6319 			break;
   6320 
   6321 		error = wm_alloc_rx_buffer(sc, rxq);
   6322 		if (error) {
   6323 			wm_free_rx_descs(sc, rxq);
   6324 			break;
   6325 		}
   6326 
   6327 #ifdef WM_EVENT_COUNTERS
   6328 		xname = device_xname(sc->sc_dev);
   6329 
   6330 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
   6331 
   6332 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
   6333 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
   6334 #endif /* WM_EVENT_COUNTERS */
   6335 
   6336 		rx_done++;
   6337 	}
   6338 	if (error)
   6339 		goto fail_2;
   6340 
   6341 	return 0;
   6342 
   6343  fail_2:
   6344 	for (i = 0; i < rx_done; i++) {
   6345 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6346 		wm_free_rx_buffer(sc, rxq);
   6347 		wm_free_rx_descs(sc, rxq);
   6348 		if (rxq->rxq_lock)
   6349 			mutex_obj_free(rxq->rxq_lock);
   6350 	}
   6351  fail_1:
   6352 	for (i = 0; i < tx_done; i++) {
   6353 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6354 		pcq_destroy(txq->txq_interq);
   6355 		wm_free_tx_buffer(sc, txq);
   6356 		wm_free_tx_descs(sc, txq);
   6357 		if (txq->txq_lock)
   6358 			mutex_obj_free(txq->txq_lock);
   6359 	}
   6360 
   6361 	kmem_free(sc->sc_queue,
   6362 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6363  fail_0:
   6364 	return error;
   6365 }
   6366 
   6367 /*
   6368  * wm_free_quques:
   6369  *	Free {tx,rx}descs and {tx,rx} buffers
   6370  */
   6371 static void
   6372 wm_free_txrx_queues(struct wm_softc *sc)
   6373 {
   6374 	int i;
   6375 
   6376 	for (i = 0; i < sc->sc_nqueues; i++) {
   6377 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6378 
   6379 #ifdef WM_EVENT_COUNTERS
   6380 		WM_Q_EVCNT_DETACH(rxq, rxintr, rxq, i);
   6381 		WM_Q_EVCNT_DETACH(rxq, rxipsum, rxq, i);
   6382 		WM_Q_EVCNT_DETACH(rxq, rxtusum, rxq, i);
   6383 #endif /* WM_EVENT_COUNTERS */
   6384 
   6385 		wm_free_rx_buffer(sc, rxq);
   6386 		wm_free_rx_descs(sc, rxq);
   6387 		if (rxq->rxq_lock)
   6388 			mutex_obj_free(rxq->rxq_lock);
   6389 	}
   6390 
   6391 	for (i = 0; i < sc->sc_nqueues; i++) {
   6392 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6393 		struct mbuf *m;
   6394 #ifdef WM_EVENT_COUNTERS
   6395 		int j;
   6396 
   6397 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6398 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6399 		WM_Q_EVCNT_DETACH(txq, txfifo_stall, txq, i);
   6400 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6401 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6402 		WM_Q_EVCNT_DETACH(txq, txipsum, txq, i);
   6403 		WM_Q_EVCNT_DETACH(txq, txtusum, txq, i);
   6404 		WM_Q_EVCNT_DETACH(txq, txtusum6, txq, i);
   6405 		WM_Q_EVCNT_DETACH(txq, txtso, txq, i);
   6406 		WM_Q_EVCNT_DETACH(txq, txtso6, txq, i);
   6407 		WM_Q_EVCNT_DETACH(txq, txtsopain, txq, i);
   6408 
   6409 		for (j = 0; j < WM_NTXSEGS; j++)
   6410 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6411 
   6412 		WM_Q_EVCNT_DETACH(txq, txdrop, txq, i);
   6413 		WM_Q_EVCNT_DETACH(txq, tu, txq, i);
   6414 #endif /* WM_EVENT_COUNTERS */
   6415 
   6416 		/* drain txq_interq */
   6417 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6418 			m_freem(m);
   6419 		pcq_destroy(txq->txq_interq);
   6420 
   6421 		wm_free_tx_buffer(sc, txq);
   6422 		wm_free_tx_descs(sc, txq);
   6423 		if (txq->txq_lock)
   6424 			mutex_obj_free(txq->txq_lock);
   6425 	}
   6426 
   6427 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6428 }
   6429 
   6430 static void
   6431 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6432 {
   6433 
   6434 	KASSERT(mutex_owned(txq->txq_lock));
   6435 
   6436 	/* Initialize the transmit descriptor ring. */
   6437 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6438 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6439 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6440 	txq->txq_free = WM_NTXDESC(txq);
   6441 	txq->txq_next = 0;
   6442 }
   6443 
   6444 static void
   6445 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6446     struct wm_txqueue *txq)
   6447 {
   6448 
   6449 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6450 		device_xname(sc->sc_dev), __func__));
   6451 	KASSERT(mutex_owned(txq->txq_lock));
   6452 
   6453 	if (sc->sc_type < WM_T_82543) {
   6454 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6455 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6456 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6457 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6458 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6459 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6460 	} else {
   6461 		int qid = wmq->wmq_id;
   6462 
   6463 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6464 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6465 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6466 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6467 
   6468 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6469 			/*
   6470 			 * Don't write TDT before TCTL.EN is set.
   6471 			 * See the document.
   6472 			 */
   6473 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6474 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6475 			    | TXDCTL_WTHRESH(0));
   6476 		else {
   6477 			/* XXX should update with AIM? */
   6478 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6479 			if (sc->sc_type >= WM_T_82540) {
   6480 				/* should be same */
   6481 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6482 			}
   6483 
   6484 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6485 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6486 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6487 		}
   6488 	}
   6489 }
   6490 
   6491 static void
   6492 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6493 {
   6494 	int i;
   6495 
   6496 	KASSERT(mutex_owned(txq->txq_lock));
   6497 
   6498 	/* Initialize the transmit job descriptors. */
   6499 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6500 		txq->txq_soft[i].txs_mbuf = NULL;
   6501 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6502 	txq->txq_snext = 0;
   6503 	txq->txq_sdirty = 0;
   6504 }
   6505 
   6506 static void
   6507 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6508     struct wm_txqueue *txq)
   6509 {
   6510 
   6511 	KASSERT(mutex_owned(txq->txq_lock));
   6512 
   6513 	/*
   6514 	 * Set up some register offsets that are different between
   6515 	 * the i82542 and the i82543 and later chips.
   6516 	 */
   6517 	if (sc->sc_type < WM_T_82543)
   6518 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   6519 	else
   6520 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   6521 
   6522 	wm_init_tx_descs(sc, txq);
   6523 	wm_init_tx_regs(sc, wmq, txq);
   6524 	wm_init_tx_buffer(sc, txq);
   6525 }
   6526 
   6527 static void
   6528 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6529     struct wm_rxqueue *rxq)
   6530 {
   6531 
   6532 	KASSERT(mutex_owned(rxq->rxq_lock));
   6533 
   6534 	/*
   6535 	 * Initialize the receive descriptor and receive job
   6536 	 * descriptor rings.
   6537 	 */
   6538 	if (sc->sc_type < WM_T_82543) {
   6539 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   6540 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   6541 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   6542 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6543 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   6544 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   6545 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   6546 
   6547 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   6548 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   6549 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   6550 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   6551 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   6552 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   6553 	} else {
   6554 		int qid = wmq->wmq_id;
   6555 
   6556 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   6557 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   6558 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_descsize * rxq->rxq_ndesc);
   6559 
   6560 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6561 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   6562 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   6563 
   6564 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   6565 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   6566 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   6567 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   6568 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   6569 			    | RXDCTL_WTHRESH(1));
   6570 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6571 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6572 		} else {
   6573 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6574 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6575 			/* XXX should update with AIM? */
   6576 			CSR_WRITE(sc, WMREG_RDTR, (wmq->wmq_itr / 4) | RDTR_FPD);
   6577 			/* MUST be same */
   6578 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   6579 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   6580 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   6581 		}
   6582 	}
   6583 }
   6584 
   6585 static int
   6586 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6587 {
   6588 	struct wm_rxsoft *rxs;
   6589 	int error, i;
   6590 
   6591 	KASSERT(mutex_owned(rxq->rxq_lock));
   6592 
   6593 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6594 		rxs = &rxq->rxq_soft[i];
   6595 		if (rxs->rxs_mbuf == NULL) {
   6596 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   6597 				log(LOG_ERR, "%s: unable to allocate or map "
   6598 				    "rx buffer %d, error = %d\n",
   6599 				    device_xname(sc->sc_dev), i, error);
   6600 				/*
   6601 				 * XXX Should attempt to run with fewer receive
   6602 				 * XXX buffers instead of just failing.
   6603 				 */
   6604 				wm_rxdrain(rxq);
   6605 				return ENOMEM;
   6606 			}
   6607 		} else {
   6608 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   6609 				wm_init_rxdesc(rxq, i);
   6610 			/*
   6611 			 * For 82575 and newer device, the RX descriptors
   6612 			 * must be initialized after the setting of RCTL.EN in
   6613 			 * wm_set_filter()
   6614 			 */
   6615 		}
   6616 	}
   6617 	rxq->rxq_ptr = 0;
   6618 	rxq->rxq_discard = 0;
   6619 	WM_RXCHAIN_RESET(rxq);
   6620 
   6621 	return 0;
   6622 }
   6623 
   6624 static int
   6625 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6626     struct wm_rxqueue *rxq)
   6627 {
   6628 
   6629 	KASSERT(mutex_owned(rxq->rxq_lock));
   6630 
   6631 	/*
   6632 	 * Set up some register offsets that are different between
   6633 	 * the i82542 and the i82543 and later chips.
   6634 	 */
   6635 	if (sc->sc_type < WM_T_82543)
   6636 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   6637 	else
   6638 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   6639 
   6640 	wm_init_rx_regs(sc, wmq, rxq);
   6641 	return wm_init_rx_buffer(sc, rxq);
   6642 }
   6643 
   6644 /*
   6645  * wm_init_quques:
   6646  *	Initialize {tx,rx}descs and {tx,rx} buffers
   6647  */
   6648 static int
   6649 wm_init_txrx_queues(struct wm_softc *sc)
   6650 {
   6651 	int i, error = 0;
   6652 
   6653 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6654 		device_xname(sc->sc_dev), __func__));
   6655 
   6656 	for (i = 0; i < sc->sc_nqueues; i++) {
   6657 		struct wm_queue *wmq = &sc->sc_queue[i];
   6658 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6659 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6660 
   6661 		/*
   6662 		 * TODO
   6663 		 * Currently, use constant variable instead of AIM.
   6664 		 * Furthermore, the interrupt interval of multiqueue which use
   6665 		 * polling mode is less than default value.
   6666 		 * More tuning and AIM are required.
   6667 		 */
   6668 		if (wm_is_using_multiqueue(sc))
   6669 			wmq->wmq_itr = 50;
   6670 		else
   6671 			wmq->wmq_itr = sc->sc_itr_init;
   6672 		wmq->wmq_set_itr = true;
   6673 
   6674 		mutex_enter(txq->txq_lock);
   6675 		wm_init_tx_queue(sc, wmq, txq);
   6676 		mutex_exit(txq->txq_lock);
   6677 
   6678 		mutex_enter(rxq->rxq_lock);
   6679 		error = wm_init_rx_queue(sc, wmq, rxq);
   6680 		mutex_exit(rxq->rxq_lock);
   6681 		if (error)
   6682 			break;
   6683 	}
   6684 
   6685 	return error;
   6686 }
   6687 
   6688 /*
   6689  * wm_tx_offload:
   6690  *
   6691  *	Set up TCP/IP checksumming parameters for the
   6692  *	specified packet.
   6693  */
   6694 static int
   6695 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   6696     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   6697 {
   6698 	struct mbuf *m0 = txs->txs_mbuf;
   6699 	struct livengood_tcpip_ctxdesc *t;
   6700 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   6701 	uint32_t ipcse;
   6702 	struct ether_header *eh;
   6703 	int offset, iphl;
   6704 	uint8_t fields;
   6705 
   6706 	/*
   6707 	 * XXX It would be nice if the mbuf pkthdr had offset
   6708 	 * fields for the protocol headers.
   6709 	 */
   6710 
   6711 	eh = mtod(m0, struct ether_header *);
   6712 	switch (htons(eh->ether_type)) {
   6713 	case ETHERTYPE_IP:
   6714 	case ETHERTYPE_IPV6:
   6715 		offset = ETHER_HDR_LEN;
   6716 		break;
   6717 
   6718 	case ETHERTYPE_VLAN:
   6719 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6720 		break;
   6721 
   6722 	default:
   6723 		/*
   6724 		 * Don't support this protocol or encapsulation.
   6725 		 */
   6726 		*fieldsp = 0;
   6727 		*cmdp = 0;
   6728 		return 0;
   6729 	}
   6730 
   6731 	if ((m0->m_pkthdr.csum_flags &
   6732 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6733 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6734 	} else {
   6735 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6736 	}
   6737 	ipcse = offset + iphl - 1;
   6738 
   6739 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   6740 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   6741 	seg = 0;
   6742 	fields = 0;
   6743 
   6744 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6745 		int hlen = offset + iphl;
   6746 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6747 
   6748 		if (__predict_false(m0->m_len <
   6749 				    (hlen + sizeof(struct tcphdr)))) {
   6750 			/*
   6751 			 * TCP/IP headers are not in the first mbuf; we need
   6752 			 * to do this the slow and painful way.  Let's just
   6753 			 * hope this doesn't happen very often.
   6754 			 */
   6755 			struct tcphdr th;
   6756 
   6757 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6758 
   6759 			m_copydata(m0, hlen, sizeof(th), &th);
   6760 			if (v4) {
   6761 				struct ip ip;
   6762 
   6763 				m_copydata(m0, offset, sizeof(ip), &ip);
   6764 				ip.ip_len = 0;
   6765 				m_copyback(m0,
   6766 				    offset + offsetof(struct ip, ip_len),
   6767 				    sizeof(ip.ip_len), &ip.ip_len);
   6768 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6769 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6770 			} else {
   6771 				struct ip6_hdr ip6;
   6772 
   6773 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6774 				ip6.ip6_plen = 0;
   6775 				m_copyback(m0,
   6776 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6777 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6778 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6779 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6780 			}
   6781 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6782 			    sizeof(th.th_sum), &th.th_sum);
   6783 
   6784 			hlen += th.th_off << 2;
   6785 		} else {
   6786 			/*
   6787 			 * TCP/IP headers are in the first mbuf; we can do
   6788 			 * this the easy way.
   6789 			 */
   6790 			struct tcphdr *th;
   6791 
   6792 			if (v4) {
   6793 				struct ip *ip =
   6794 				    (void *)(mtod(m0, char *) + offset);
   6795 				th = (void *)(mtod(m0, char *) + hlen);
   6796 
   6797 				ip->ip_len = 0;
   6798 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6799 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6800 			} else {
   6801 				struct ip6_hdr *ip6 =
   6802 				    (void *)(mtod(m0, char *) + offset);
   6803 				th = (void *)(mtod(m0, char *) + hlen);
   6804 
   6805 				ip6->ip6_plen = 0;
   6806 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6807 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6808 			}
   6809 			hlen += th->th_off << 2;
   6810 		}
   6811 
   6812 		if (v4) {
   6813 			WM_Q_EVCNT_INCR(txq, txtso);
   6814 			cmdlen |= WTX_TCPIP_CMD_IP;
   6815 		} else {
   6816 			WM_Q_EVCNT_INCR(txq, txtso6);
   6817 			ipcse = 0;
   6818 		}
   6819 		cmd |= WTX_TCPIP_CMD_TSE;
   6820 		cmdlen |= WTX_TCPIP_CMD_TSE |
   6821 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   6822 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   6823 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   6824 	}
   6825 
   6826 	/*
   6827 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   6828 	 * offload feature, if we load the context descriptor, we
   6829 	 * MUST provide valid values for IPCSS and TUCSS fields.
   6830 	 */
   6831 
   6832 	ipcs = WTX_TCPIP_IPCSS(offset) |
   6833 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   6834 	    WTX_TCPIP_IPCSE(ipcse);
   6835 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   6836 		WM_Q_EVCNT_INCR(txq, txipsum);
   6837 		fields |= WTX_IXSM;
   6838 	}
   6839 
   6840 	offset += iphl;
   6841 
   6842 	if (m0->m_pkthdr.csum_flags &
   6843 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   6844 		WM_Q_EVCNT_INCR(txq, txtusum);
   6845 		fields |= WTX_TXSM;
   6846 		tucs = WTX_TCPIP_TUCSS(offset) |
   6847 		    WTX_TCPIP_TUCSO(offset +
   6848 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   6849 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6850 	} else if ((m0->m_pkthdr.csum_flags &
   6851 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   6852 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6853 		fields |= WTX_TXSM;
   6854 		tucs = WTX_TCPIP_TUCSS(offset) |
   6855 		    WTX_TCPIP_TUCSO(offset +
   6856 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   6857 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6858 	} else {
   6859 		/* Just initialize it to a valid TCP context. */
   6860 		tucs = WTX_TCPIP_TUCSS(offset) |
   6861 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   6862 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6863 	}
   6864 
   6865 	/*
   6866 	 * We don't have to write context descriptor for every packet
   6867 	 * except for 82574. For 82574, we must write context descriptor
   6868 	 * for every packet when we use two descriptor queues.
   6869 	 * It would be overhead to write context descriptor for every packet,
   6870 	 * however it does not cause problems.
   6871 	 */
   6872 	/* Fill in the context descriptor. */
   6873 	t = (struct livengood_tcpip_ctxdesc *)
   6874 	    &txq->txq_descs[txq->txq_next];
   6875 	t->tcpip_ipcs = htole32(ipcs);
   6876 	t->tcpip_tucs = htole32(tucs);
   6877 	t->tcpip_cmdlen = htole32(cmdlen);
   6878 	t->tcpip_seg = htole32(seg);
   6879 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6880 
   6881 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6882 	txs->txs_ndesc++;
   6883 
   6884 	*cmdp = cmd;
   6885 	*fieldsp = fields;
   6886 
   6887 	return 0;
   6888 }
   6889 
   6890 static inline int
   6891 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   6892 {
   6893 	struct wm_softc *sc = ifp->if_softc;
   6894 	u_int cpuid = cpu_index(curcpu());
   6895 
   6896 	/*
   6897 	 * Currently, simple distribute strategy.
   6898 	 * TODO:
   6899 	 * distribute by flowid(RSS has value).
   6900 	 */
   6901         return (cpuid + ncpu - sc->sc_affinity_offset) % sc->sc_nqueues;
   6902 }
   6903 
   6904 /*
   6905  * wm_start:		[ifnet interface function]
   6906  *
   6907  *	Start packet transmission on the interface.
   6908  */
   6909 static void
   6910 wm_start(struct ifnet *ifp)
   6911 {
   6912 	struct wm_softc *sc = ifp->if_softc;
   6913 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6914 
   6915 #ifdef WM_MPSAFE
   6916 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6917 #endif
   6918 	/*
   6919 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   6920 	 */
   6921 
   6922 	mutex_enter(txq->txq_lock);
   6923 	if (!txq->txq_stopping)
   6924 		wm_start_locked(ifp);
   6925 	mutex_exit(txq->txq_lock);
   6926 }
   6927 
   6928 static void
   6929 wm_start_locked(struct ifnet *ifp)
   6930 {
   6931 	struct wm_softc *sc = ifp->if_softc;
   6932 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6933 
   6934 	wm_send_common_locked(ifp, txq, false);
   6935 }
   6936 
   6937 static int
   6938 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   6939 {
   6940 	int qid;
   6941 	struct wm_softc *sc = ifp->if_softc;
   6942 	struct wm_txqueue *txq;
   6943 
   6944 	qid = wm_select_txqueue(ifp, m);
   6945 	txq = &sc->sc_queue[qid].wmq_txq;
   6946 
   6947 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   6948 		m_freem(m);
   6949 		WM_Q_EVCNT_INCR(txq, txdrop);
   6950 		return ENOBUFS;
   6951 	}
   6952 
   6953 	/*
   6954 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   6955 	 */
   6956 	ifp->if_obytes += m->m_pkthdr.len;
   6957 	if (m->m_flags & M_MCAST)
   6958 		ifp->if_omcasts++;
   6959 
   6960 	if (mutex_tryenter(txq->txq_lock)) {
   6961 		if (!txq->txq_stopping)
   6962 			wm_transmit_locked(ifp, txq);
   6963 		mutex_exit(txq->txq_lock);
   6964 	}
   6965 
   6966 	return 0;
   6967 }
   6968 
   6969 static void
   6970 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   6971 {
   6972 
   6973 	wm_send_common_locked(ifp, txq, true);
   6974 }
   6975 
   6976 static void
   6977 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   6978     bool is_transmit)
   6979 {
   6980 	struct wm_softc *sc = ifp->if_softc;
   6981 	struct mbuf *m0;
   6982 	struct m_tag *mtag;
   6983 	struct wm_txsoft *txs;
   6984 	bus_dmamap_t dmamap;
   6985 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   6986 	bus_addr_t curaddr;
   6987 	bus_size_t seglen, curlen;
   6988 	uint32_t cksumcmd;
   6989 	uint8_t cksumfields;
   6990 
   6991 	KASSERT(mutex_owned(txq->txq_lock));
   6992 
   6993 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   6994 		return;
   6995 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   6996 		return;
   6997 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   6998 		return;
   6999 
   7000 	/* Remember the previous number of free descriptors. */
   7001 	ofree = txq->txq_free;
   7002 
   7003 	/*
   7004 	 * Loop through the send queue, setting up transmit descriptors
   7005 	 * until we drain the queue, or use up all available transmit
   7006 	 * descriptors.
   7007 	 */
   7008 	for (;;) {
   7009 		m0 = NULL;
   7010 
   7011 		/* Get a work queue entry. */
   7012 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7013 			wm_txeof(sc, txq);
   7014 			if (txq->txq_sfree == 0) {
   7015 				DPRINTF(WM_DEBUG_TX,
   7016 				    ("%s: TX: no free job descriptors\n",
   7017 					device_xname(sc->sc_dev)));
   7018 				WM_Q_EVCNT_INCR(txq, txsstall);
   7019 				break;
   7020 			}
   7021 		}
   7022 
   7023 		/* Grab a packet off the queue. */
   7024 		if (is_transmit)
   7025 			m0 = pcq_get(txq->txq_interq);
   7026 		else
   7027 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7028 		if (m0 == NULL)
   7029 			break;
   7030 
   7031 		DPRINTF(WM_DEBUG_TX,
   7032 		    ("%s: TX: have packet to transmit: %p\n",
   7033 		    device_xname(sc->sc_dev), m0));
   7034 
   7035 		txs = &txq->txq_soft[txq->txq_snext];
   7036 		dmamap = txs->txs_dmamap;
   7037 
   7038 		use_tso = (m0->m_pkthdr.csum_flags &
   7039 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7040 
   7041 		/*
   7042 		 * So says the Linux driver:
   7043 		 * The controller does a simple calculation to make sure
   7044 		 * there is enough room in the FIFO before initiating the
   7045 		 * DMA for each buffer.  The calc is:
   7046 		 *	4 = ceil(buffer len / MSS)
   7047 		 * To make sure we don't overrun the FIFO, adjust the max
   7048 		 * buffer len if the MSS drops.
   7049 		 */
   7050 		dmamap->dm_maxsegsz =
   7051 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7052 		    ? m0->m_pkthdr.segsz << 2
   7053 		    : WTX_MAX_LEN;
   7054 
   7055 		/*
   7056 		 * Load the DMA map.  If this fails, the packet either
   7057 		 * didn't fit in the allotted number of segments, or we
   7058 		 * were short on resources.  For the too-many-segments
   7059 		 * case, we simply report an error and drop the packet,
   7060 		 * since we can't sanely copy a jumbo packet to a single
   7061 		 * buffer.
   7062 		 */
   7063 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7064 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7065 		if (error) {
   7066 			if (error == EFBIG) {
   7067 				WM_Q_EVCNT_INCR(txq, txdrop);
   7068 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7069 				    "DMA segments, dropping...\n",
   7070 				    device_xname(sc->sc_dev));
   7071 				wm_dump_mbuf_chain(sc, m0);
   7072 				m_freem(m0);
   7073 				continue;
   7074 			}
   7075 			/*  Short on resources, just stop for now. */
   7076 			DPRINTF(WM_DEBUG_TX,
   7077 			    ("%s: TX: dmamap load failed: %d\n",
   7078 			    device_xname(sc->sc_dev), error));
   7079 			break;
   7080 		}
   7081 
   7082 		segs_needed = dmamap->dm_nsegs;
   7083 		if (use_tso) {
   7084 			/* For sentinel descriptor; see below. */
   7085 			segs_needed++;
   7086 		}
   7087 
   7088 		/*
   7089 		 * Ensure we have enough descriptors free to describe
   7090 		 * the packet.  Note, we always reserve one descriptor
   7091 		 * at the end of the ring due to the semantics of the
   7092 		 * TDT register, plus one more in the event we need
   7093 		 * to load offload context.
   7094 		 */
   7095 		if (segs_needed > txq->txq_free - 2) {
   7096 			/*
   7097 			 * Not enough free descriptors to transmit this
   7098 			 * packet.  We haven't committed anything yet,
   7099 			 * so just unload the DMA map, put the packet
   7100 			 * pack on the queue, and punt.  Notify the upper
   7101 			 * layer that there are no more slots left.
   7102 			 */
   7103 			DPRINTF(WM_DEBUG_TX,
   7104 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7105 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7106 			    segs_needed, txq->txq_free - 1));
   7107 			if (!is_transmit)
   7108 				ifp->if_flags |= IFF_OACTIVE;
   7109 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7110 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7111 			WM_Q_EVCNT_INCR(txq, txdstall);
   7112 			break;
   7113 		}
   7114 
   7115 		/*
   7116 		 * Check for 82547 Tx FIFO bug.  We need to do this
   7117 		 * once we know we can transmit the packet, since we
   7118 		 * do some internal FIFO space accounting here.
   7119 		 */
   7120 		if (sc->sc_type == WM_T_82547 &&
   7121 		    wm_82547_txfifo_bugchk(sc, m0)) {
   7122 			DPRINTF(WM_DEBUG_TX,
   7123 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   7124 			    device_xname(sc->sc_dev)));
   7125 			if (!is_transmit)
   7126 				ifp->if_flags |= IFF_OACTIVE;
   7127 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7128 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7129 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
   7130 			break;
   7131 		}
   7132 
   7133 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7134 
   7135 		DPRINTF(WM_DEBUG_TX,
   7136 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7137 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7138 
   7139 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7140 
   7141 		/*
   7142 		 * Store a pointer to the packet so that we can free it
   7143 		 * later.
   7144 		 *
   7145 		 * Initially, we consider the number of descriptors the
   7146 		 * packet uses the number of DMA segments.  This may be
   7147 		 * incremented by 1 if we do checksum offload (a descriptor
   7148 		 * is used to set the checksum context).
   7149 		 */
   7150 		txs->txs_mbuf = m0;
   7151 		txs->txs_firstdesc = txq->txq_next;
   7152 		txs->txs_ndesc = segs_needed;
   7153 
   7154 		/* Set up offload parameters for this packet. */
   7155 		if (m0->m_pkthdr.csum_flags &
   7156 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7157 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7158 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7159 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   7160 					  &cksumfields) != 0) {
   7161 				/* Error message already displayed. */
   7162 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7163 				continue;
   7164 			}
   7165 		} else {
   7166 			cksumcmd = 0;
   7167 			cksumfields = 0;
   7168 		}
   7169 
   7170 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7171 
   7172 		/* Sync the DMA map. */
   7173 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7174 		    BUS_DMASYNC_PREWRITE);
   7175 
   7176 		/* Initialize the transmit descriptor. */
   7177 		for (nexttx = txq->txq_next, seg = 0;
   7178 		     seg < dmamap->dm_nsegs; seg++) {
   7179 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7180 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7181 			     seglen != 0;
   7182 			     curaddr += curlen, seglen -= curlen,
   7183 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7184 				curlen = seglen;
   7185 
   7186 				/*
   7187 				 * So says the Linux driver:
   7188 				 * Work around for premature descriptor
   7189 				 * write-backs in TSO mode.  Append a
   7190 				 * 4-byte sentinel descriptor.
   7191 				 */
   7192 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7193 				    curlen > 8)
   7194 					curlen -= 4;
   7195 
   7196 				wm_set_dma_addr(
   7197 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7198 				txq->txq_descs[nexttx].wtx_cmdlen
   7199 				    = htole32(cksumcmd | curlen);
   7200 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7201 				    = 0;
   7202 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7203 				    = cksumfields;
   7204 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7205 				lasttx = nexttx;
   7206 
   7207 				DPRINTF(WM_DEBUG_TX,
   7208 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7209 				     "len %#04zx\n",
   7210 				    device_xname(sc->sc_dev), nexttx,
   7211 				    (uint64_t)curaddr, curlen));
   7212 			}
   7213 		}
   7214 
   7215 		KASSERT(lasttx != -1);
   7216 
   7217 		/*
   7218 		 * Set up the command byte on the last descriptor of
   7219 		 * the packet.  If we're in the interrupt delay window,
   7220 		 * delay the interrupt.
   7221 		 */
   7222 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7223 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7224 
   7225 		/*
   7226 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7227 		 * up the descriptor to encapsulate the packet for us.
   7228 		 *
   7229 		 * This is only valid on the last descriptor of the packet.
   7230 		 */
   7231 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   7232 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7233 			    htole32(WTX_CMD_VLE);
   7234 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7235 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   7236 		}
   7237 
   7238 		txs->txs_lastdesc = lasttx;
   7239 
   7240 		DPRINTF(WM_DEBUG_TX,
   7241 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7242 		    device_xname(sc->sc_dev),
   7243 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7244 
   7245 		/* Sync the descriptors we're using. */
   7246 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7247 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7248 
   7249 		/* Give the packet to the chip. */
   7250 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7251 
   7252 		DPRINTF(WM_DEBUG_TX,
   7253 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7254 
   7255 		DPRINTF(WM_DEBUG_TX,
   7256 		    ("%s: TX: finished transmitting packet, job %d\n",
   7257 		    device_xname(sc->sc_dev), txq->txq_snext));
   7258 
   7259 		/* Advance the tx pointer. */
   7260 		txq->txq_free -= txs->txs_ndesc;
   7261 		txq->txq_next = nexttx;
   7262 
   7263 		txq->txq_sfree--;
   7264 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7265 
   7266 		/* Pass the packet to any BPF listeners. */
   7267 		bpf_mtap(ifp, m0);
   7268 	}
   7269 
   7270 	if (m0 != NULL) {
   7271 		if (!is_transmit)
   7272 			ifp->if_flags |= IFF_OACTIVE;
   7273 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7274 		WM_Q_EVCNT_INCR(txq, txdrop);
   7275 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7276 			__func__));
   7277 		m_freem(m0);
   7278 	}
   7279 
   7280 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7281 		/* No more slots; notify upper layer. */
   7282 		if (!is_transmit)
   7283 			ifp->if_flags |= IFF_OACTIVE;
   7284 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7285 	}
   7286 
   7287 	if (txq->txq_free != ofree) {
   7288 		/* Set a watchdog timer in case the chip flakes out. */
   7289 		ifp->if_timer = 5;
   7290 	}
   7291 }
   7292 
   7293 /*
   7294  * wm_nq_tx_offload:
   7295  *
   7296  *	Set up TCP/IP checksumming parameters for the
   7297  *	specified packet, for NEWQUEUE devices
   7298  */
   7299 static int
   7300 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7301     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7302 {
   7303 	struct mbuf *m0 = txs->txs_mbuf;
   7304 	struct m_tag *mtag;
   7305 	uint32_t vl_len, mssidx, cmdc;
   7306 	struct ether_header *eh;
   7307 	int offset, iphl;
   7308 
   7309 	/*
   7310 	 * XXX It would be nice if the mbuf pkthdr had offset
   7311 	 * fields for the protocol headers.
   7312 	 */
   7313 	*cmdlenp = 0;
   7314 	*fieldsp = 0;
   7315 
   7316 	eh = mtod(m0, struct ether_header *);
   7317 	switch (htons(eh->ether_type)) {
   7318 	case ETHERTYPE_IP:
   7319 	case ETHERTYPE_IPV6:
   7320 		offset = ETHER_HDR_LEN;
   7321 		break;
   7322 
   7323 	case ETHERTYPE_VLAN:
   7324 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7325 		break;
   7326 
   7327 	default:
   7328 		/* Don't support this protocol or encapsulation. */
   7329 		*do_csum = false;
   7330 		return 0;
   7331 	}
   7332 	*do_csum = true;
   7333 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7334 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7335 
   7336 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7337 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7338 
   7339 	if ((m0->m_pkthdr.csum_flags &
   7340 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7341 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7342 	} else {
   7343 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   7344 	}
   7345 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7346 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7347 
   7348 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   7349 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   7350 		     << NQTXC_VLLEN_VLAN_SHIFT);
   7351 		*cmdlenp |= NQTX_CMD_VLE;
   7352 	}
   7353 
   7354 	mssidx = 0;
   7355 
   7356 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7357 		int hlen = offset + iphl;
   7358 		int tcp_hlen;
   7359 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7360 
   7361 		if (__predict_false(m0->m_len <
   7362 				    (hlen + sizeof(struct tcphdr)))) {
   7363 			/*
   7364 			 * TCP/IP headers are not in the first mbuf; we need
   7365 			 * to do this the slow and painful way.  Let's just
   7366 			 * hope this doesn't happen very often.
   7367 			 */
   7368 			struct tcphdr th;
   7369 
   7370 			WM_Q_EVCNT_INCR(txq, txtsopain);
   7371 
   7372 			m_copydata(m0, hlen, sizeof(th), &th);
   7373 			if (v4) {
   7374 				struct ip ip;
   7375 
   7376 				m_copydata(m0, offset, sizeof(ip), &ip);
   7377 				ip.ip_len = 0;
   7378 				m_copyback(m0,
   7379 				    offset + offsetof(struct ip, ip_len),
   7380 				    sizeof(ip.ip_len), &ip.ip_len);
   7381 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7382 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7383 			} else {
   7384 				struct ip6_hdr ip6;
   7385 
   7386 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7387 				ip6.ip6_plen = 0;
   7388 				m_copyback(m0,
   7389 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7390 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7391 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7392 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7393 			}
   7394 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7395 			    sizeof(th.th_sum), &th.th_sum);
   7396 
   7397 			tcp_hlen = th.th_off << 2;
   7398 		} else {
   7399 			/*
   7400 			 * TCP/IP headers are in the first mbuf; we can do
   7401 			 * this the easy way.
   7402 			 */
   7403 			struct tcphdr *th;
   7404 
   7405 			if (v4) {
   7406 				struct ip *ip =
   7407 				    (void *)(mtod(m0, char *) + offset);
   7408 				th = (void *)(mtod(m0, char *) + hlen);
   7409 
   7410 				ip->ip_len = 0;
   7411 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7412 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7413 			} else {
   7414 				struct ip6_hdr *ip6 =
   7415 				    (void *)(mtod(m0, char *) + offset);
   7416 				th = (void *)(mtod(m0, char *) + hlen);
   7417 
   7418 				ip6->ip6_plen = 0;
   7419 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7420 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7421 			}
   7422 			tcp_hlen = th->th_off << 2;
   7423 		}
   7424 		hlen += tcp_hlen;
   7425 		*cmdlenp |= NQTX_CMD_TSE;
   7426 
   7427 		if (v4) {
   7428 			WM_Q_EVCNT_INCR(txq, txtso);
   7429 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7430 		} else {
   7431 			WM_Q_EVCNT_INCR(txq, txtso6);
   7432 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7433 		}
   7434 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7435 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7436 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7437 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7438 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7439 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7440 	} else {
   7441 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7442 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7443 	}
   7444 
   7445 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7446 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7447 		cmdc |= NQTXC_CMD_IP4;
   7448 	}
   7449 
   7450 	if (m0->m_pkthdr.csum_flags &
   7451 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7452 		WM_Q_EVCNT_INCR(txq, txtusum);
   7453 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7454 			cmdc |= NQTXC_CMD_TCP;
   7455 		} else {
   7456 			cmdc |= NQTXC_CMD_UDP;
   7457 		}
   7458 		cmdc |= NQTXC_CMD_IP4;
   7459 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7460 	}
   7461 	if (m0->m_pkthdr.csum_flags &
   7462 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7463 		WM_Q_EVCNT_INCR(txq, txtusum6);
   7464 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7465 			cmdc |= NQTXC_CMD_TCP;
   7466 		} else {
   7467 			cmdc |= NQTXC_CMD_UDP;
   7468 		}
   7469 		cmdc |= NQTXC_CMD_IP6;
   7470 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7471 	}
   7472 
   7473 	/*
   7474 	 * We don't have to write context descriptor for every packet to
   7475 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   7476 	 * I210 and I211. It is enough to write once per a Tx queue for these
   7477 	 * controllers.
   7478 	 * It would be overhead to write context descriptor for every packet,
   7479 	 * however it does not cause problems.
   7480 	 */
   7481 	/* Fill in the context descriptor. */
   7482 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   7483 	    htole32(vl_len);
   7484 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   7485 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   7486 	    htole32(cmdc);
   7487 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   7488 	    htole32(mssidx);
   7489 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7490 	DPRINTF(WM_DEBUG_TX,
   7491 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   7492 	    txq->txq_next, 0, vl_len));
   7493 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   7494 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7495 	txs->txs_ndesc++;
   7496 	return 0;
   7497 }
   7498 
   7499 /*
   7500  * wm_nq_start:		[ifnet interface function]
   7501  *
   7502  *	Start packet transmission on the interface for NEWQUEUE devices
   7503  */
   7504 static void
   7505 wm_nq_start(struct ifnet *ifp)
   7506 {
   7507 	struct wm_softc *sc = ifp->if_softc;
   7508 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7509 
   7510 #ifdef WM_MPSAFE
   7511 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   7512 #endif
   7513 	/*
   7514 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7515 	 */
   7516 
   7517 	mutex_enter(txq->txq_lock);
   7518 	if (!txq->txq_stopping)
   7519 		wm_nq_start_locked(ifp);
   7520 	mutex_exit(txq->txq_lock);
   7521 }
   7522 
   7523 static void
   7524 wm_nq_start_locked(struct ifnet *ifp)
   7525 {
   7526 	struct wm_softc *sc = ifp->if_softc;
   7527 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7528 
   7529 	wm_nq_send_common_locked(ifp, txq, false);
   7530 }
   7531 
   7532 static int
   7533 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   7534 {
   7535 	int qid;
   7536 	struct wm_softc *sc = ifp->if_softc;
   7537 	struct wm_txqueue *txq;
   7538 
   7539 	qid = wm_select_txqueue(ifp, m);
   7540 	txq = &sc->sc_queue[qid].wmq_txq;
   7541 
   7542 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7543 		m_freem(m);
   7544 		WM_Q_EVCNT_INCR(txq, txdrop);
   7545 		return ENOBUFS;
   7546 	}
   7547 
   7548 	/*
   7549 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7550 	 */
   7551 	ifp->if_obytes += m->m_pkthdr.len;
   7552 	if (m->m_flags & M_MCAST)
   7553 		ifp->if_omcasts++;
   7554 
   7555 	/*
   7556 	 * The situations which this mutex_tryenter() fails at running time
   7557 	 * are below two patterns.
   7558 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   7559 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   7560 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   7561 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   7562 	 * In the case of (2), the last packet enqueued to txq->txq_interq is also
   7563 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck, either.
   7564 	 */
   7565 	if (mutex_tryenter(txq->txq_lock)) {
   7566 		if (!txq->txq_stopping)
   7567 			wm_nq_transmit_locked(ifp, txq);
   7568 		mutex_exit(txq->txq_lock);
   7569 	}
   7570 
   7571 	return 0;
   7572 }
   7573 
   7574 static void
   7575 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7576 {
   7577 
   7578 	wm_nq_send_common_locked(ifp, txq, true);
   7579 }
   7580 
   7581 static void
   7582 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7583     bool is_transmit)
   7584 {
   7585 	struct wm_softc *sc = ifp->if_softc;
   7586 	struct mbuf *m0;
   7587 	struct m_tag *mtag;
   7588 	struct wm_txsoft *txs;
   7589 	bus_dmamap_t dmamap;
   7590 	int error, nexttx, lasttx = -1, seg, segs_needed;
   7591 	bool do_csum, sent;
   7592 
   7593 	KASSERT(mutex_owned(txq->txq_lock));
   7594 
   7595 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7596 		return;
   7597 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7598 		return;
   7599 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7600 		return;
   7601 
   7602 	sent = false;
   7603 
   7604 	/*
   7605 	 * Loop through the send queue, setting up transmit descriptors
   7606 	 * until we drain the queue, or use up all available transmit
   7607 	 * descriptors.
   7608 	 */
   7609 	for (;;) {
   7610 		m0 = NULL;
   7611 
   7612 		/* Get a work queue entry. */
   7613 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7614 			wm_txeof(sc, txq);
   7615 			if (txq->txq_sfree == 0) {
   7616 				DPRINTF(WM_DEBUG_TX,
   7617 				    ("%s: TX: no free job descriptors\n",
   7618 					device_xname(sc->sc_dev)));
   7619 				WM_Q_EVCNT_INCR(txq, txsstall);
   7620 				break;
   7621 			}
   7622 		}
   7623 
   7624 		/* Grab a packet off the queue. */
   7625 		if (is_transmit)
   7626 			m0 = pcq_get(txq->txq_interq);
   7627 		else
   7628 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7629 		if (m0 == NULL)
   7630 			break;
   7631 
   7632 		DPRINTF(WM_DEBUG_TX,
   7633 		    ("%s: TX: have packet to transmit: %p\n",
   7634 		    device_xname(sc->sc_dev), m0));
   7635 
   7636 		txs = &txq->txq_soft[txq->txq_snext];
   7637 		dmamap = txs->txs_dmamap;
   7638 
   7639 		/*
   7640 		 * Load the DMA map.  If this fails, the packet either
   7641 		 * didn't fit in the allotted number of segments, or we
   7642 		 * were short on resources.  For the too-many-segments
   7643 		 * case, we simply report an error and drop the packet,
   7644 		 * since we can't sanely copy a jumbo packet to a single
   7645 		 * buffer.
   7646 		 */
   7647 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7648 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7649 		if (error) {
   7650 			if (error == EFBIG) {
   7651 				WM_Q_EVCNT_INCR(txq, txdrop);
   7652 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7653 				    "DMA segments, dropping...\n",
   7654 				    device_xname(sc->sc_dev));
   7655 				wm_dump_mbuf_chain(sc, m0);
   7656 				m_freem(m0);
   7657 				continue;
   7658 			}
   7659 			/* Short on resources, just stop for now. */
   7660 			DPRINTF(WM_DEBUG_TX,
   7661 			    ("%s: TX: dmamap load failed: %d\n",
   7662 			    device_xname(sc->sc_dev), error));
   7663 			break;
   7664 		}
   7665 
   7666 		segs_needed = dmamap->dm_nsegs;
   7667 
   7668 		/*
   7669 		 * Ensure we have enough descriptors free to describe
   7670 		 * the packet.  Note, we always reserve one descriptor
   7671 		 * at the end of the ring due to the semantics of the
   7672 		 * TDT register, plus one more in the event we need
   7673 		 * to load offload context.
   7674 		 */
   7675 		if (segs_needed > txq->txq_free - 2) {
   7676 			/*
   7677 			 * Not enough free descriptors to transmit this
   7678 			 * packet.  We haven't committed anything yet,
   7679 			 * so just unload the DMA map, put the packet
   7680 			 * pack on the queue, and punt.  Notify the upper
   7681 			 * layer that there are no more slots left.
   7682 			 */
   7683 			DPRINTF(WM_DEBUG_TX,
   7684 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7685 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7686 			    segs_needed, txq->txq_free - 1));
   7687 			if (!is_transmit)
   7688 				ifp->if_flags |= IFF_OACTIVE;
   7689 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7690 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7691 			WM_Q_EVCNT_INCR(txq, txdstall);
   7692 			break;
   7693 		}
   7694 
   7695 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7696 
   7697 		DPRINTF(WM_DEBUG_TX,
   7698 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7699 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7700 
   7701 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7702 
   7703 		/*
   7704 		 * Store a pointer to the packet so that we can free it
   7705 		 * later.
   7706 		 *
   7707 		 * Initially, we consider the number of descriptors the
   7708 		 * packet uses the number of DMA segments.  This may be
   7709 		 * incremented by 1 if we do checksum offload (a descriptor
   7710 		 * is used to set the checksum context).
   7711 		 */
   7712 		txs->txs_mbuf = m0;
   7713 		txs->txs_firstdesc = txq->txq_next;
   7714 		txs->txs_ndesc = segs_needed;
   7715 
   7716 		/* Set up offload parameters for this packet. */
   7717 		uint32_t cmdlen, fields, dcmdlen;
   7718 		if (m0->m_pkthdr.csum_flags &
   7719 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7720 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7721 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7722 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   7723 			    &do_csum) != 0) {
   7724 				/* Error message already displayed. */
   7725 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7726 				continue;
   7727 			}
   7728 		} else {
   7729 			do_csum = false;
   7730 			cmdlen = 0;
   7731 			fields = 0;
   7732 		}
   7733 
   7734 		/* Sync the DMA map. */
   7735 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7736 		    BUS_DMASYNC_PREWRITE);
   7737 
   7738 		/* Initialize the first transmit descriptor. */
   7739 		nexttx = txq->txq_next;
   7740 		if (!do_csum) {
   7741 			/* setup a legacy descriptor */
   7742 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   7743 			    dmamap->dm_segs[0].ds_addr);
   7744 			txq->txq_descs[nexttx].wtx_cmdlen =
   7745 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   7746 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   7747 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   7748 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   7749 			    NULL) {
   7750 				txq->txq_descs[nexttx].wtx_cmdlen |=
   7751 				    htole32(WTX_CMD_VLE);
   7752 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   7753 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   7754 			} else {
   7755 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7756 			}
   7757 			dcmdlen = 0;
   7758 		} else {
   7759 			/* setup an advanced data descriptor */
   7760 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7761 			    htole64(dmamap->dm_segs[0].ds_addr);
   7762 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   7763 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7764 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   7765 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   7766 			    htole32(fields);
   7767 			DPRINTF(WM_DEBUG_TX,
   7768 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   7769 			    device_xname(sc->sc_dev), nexttx,
   7770 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   7771 			DPRINTF(WM_DEBUG_TX,
   7772 			    ("\t 0x%08x%08x\n", fields,
   7773 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   7774 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   7775 		}
   7776 
   7777 		lasttx = nexttx;
   7778 		nexttx = WM_NEXTTX(txq, nexttx);
   7779 		/*
   7780 		 * fill in the next descriptors. legacy or adcanced format
   7781 		 * is the same here
   7782 		 */
   7783 		for (seg = 1; seg < dmamap->dm_nsegs;
   7784 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   7785 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7786 			    htole64(dmamap->dm_segs[seg].ds_addr);
   7787 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7788 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   7789 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   7790 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   7791 			lasttx = nexttx;
   7792 
   7793 			DPRINTF(WM_DEBUG_TX,
   7794 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   7795 			     "len %#04zx\n",
   7796 			    device_xname(sc->sc_dev), nexttx,
   7797 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   7798 			    dmamap->dm_segs[seg].ds_len));
   7799 		}
   7800 
   7801 		KASSERT(lasttx != -1);
   7802 
   7803 		/*
   7804 		 * Set up the command byte on the last descriptor of
   7805 		 * the packet.  If we're in the interrupt delay window,
   7806 		 * delay the interrupt.
   7807 		 */
   7808 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   7809 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   7810 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7811 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7812 
   7813 		txs->txs_lastdesc = lasttx;
   7814 
   7815 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7816 		    device_xname(sc->sc_dev),
   7817 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7818 
   7819 		/* Sync the descriptors we're using. */
   7820 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7821 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7822 
   7823 		/* Give the packet to the chip. */
   7824 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7825 		sent = true;
   7826 
   7827 		DPRINTF(WM_DEBUG_TX,
   7828 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7829 
   7830 		DPRINTF(WM_DEBUG_TX,
   7831 		    ("%s: TX: finished transmitting packet, job %d\n",
   7832 		    device_xname(sc->sc_dev), txq->txq_snext));
   7833 
   7834 		/* Advance the tx pointer. */
   7835 		txq->txq_free -= txs->txs_ndesc;
   7836 		txq->txq_next = nexttx;
   7837 
   7838 		txq->txq_sfree--;
   7839 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7840 
   7841 		/* Pass the packet to any BPF listeners. */
   7842 		bpf_mtap(ifp, m0);
   7843 	}
   7844 
   7845 	if (m0 != NULL) {
   7846 		if (!is_transmit)
   7847 			ifp->if_flags |= IFF_OACTIVE;
   7848 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7849 		WM_Q_EVCNT_INCR(txq, txdrop);
   7850 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7851 			__func__));
   7852 		m_freem(m0);
   7853 	}
   7854 
   7855 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7856 		/* No more slots; notify upper layer. */
   7857 		if (!is_transmit)
   7858 			ifp->if_flags |= IFF_OACTIVE;
   7859 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7860 	}
   7861 
   7862 	if (sent) {
   7863 		/* Set a watchdog timer in case the chip flakes out. */
   7864 		ifp->if_timer = 5;
   7865 	}
   7866 }
   7867 
   7868 static void
   7869 wm_deferred_start_locked(struct wm_txqueue *txq)
   7870 {
   7871 	struct wm_softc *sc = txq->txq_sc;
   7872 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7873 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   7874 	int qid = wmq->wmq_id;
   7875 
   7876 	KASSERT(mutex_owned(txq->txq_lock));
   7877 
   7878 	if (txq->txq_stopping) {
   7879 		mutex_exit(txq->txq_lock);
   7880 		return;
   7881 	}
   7882 
   7883 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7884 		/* XXX need for ALTQ or one CPU system */
   7885 		if (qid == 0)
   7886 			wm_nq_start_locked(ifp);
   7887 		wm_nq_transmit_locked(ifp, txq);
   7888 	} else {
   7889 		/* XXX need for ALTQ or one CPU system */
   7890 		if (qid == 0)
   7891 			wm_start_locked(ifp);
   7892 		wm_transmit_locked(ifp, txq);
   7893 	}
   7894 }
   7895 
   7896 /* Interrupt */
   7897 
   7898 /*
   7899  * wm_txeof:
   7900  *
   7901  *	Helper; handle transmit interrupts.
   7902  */
   7903 static int
   7904 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
   7905 {
   7906 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7907 	struct wm_txsoft *txs;
   7908 	bool processed = false;
   7909 	int count = 0;
   7910 	int i;
   7911 	uint8_t status;
   7912 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   7913 
   7914 	KASSERT(mutex_owned(txq->txq_lock));
   7915 
   7916 	if (txq->txq_stopping)
   7917 		return 0;
   7918 
   7919 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   7920 	/* for ALTQ and legacy(not use multiqueue) ethernet controller */
   7921 	if (wmq->wmq_id == 0)
   7922 		ifp->if_flags &= ~IFF_OACTIVE;
   7923 
   7924 	/*
   7925 	 * Go through the Tx list and free mbufs for those
   7926 	 * frames which have been transmitted.
   7927 	 */
   7928 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   7929 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   7930 		txs = &txq->txq_soft[i];
   7931 
   7932 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   7933 			device_xname(sc->sc_dev), i));
   7934 
   7935 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   7936 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   7937 
   7938 		status =
   7939 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   7940 		if ((status & WTX_ST_DD) == 0) {
   7941 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   7942 			    BUS_DMASYNC_PREREAD);
   7943 			break;
   7944 		}
   7945 
   7946 		processed = true;
   7947 		count++;
   7948 		DPRINTF(WM_DEBUG_TX,
   7949 		    ("%s: TX: job %d done: descs %d..%d\n",
   7950 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   7951 		    txs->txs_lastdesc));
   7952 
   7953 		/*
   7954 		 * XXX We should probably be using the statistics
   7955 		 * XXX registers, but I don't know if they exist
   7956 		 * XXX on chips before the i82544.
   7957 		 */
   7958 
   7959 #ifdef WM_EVENT_COUNTERS
   7960 		if (status & WTX_ST_TU)
   7961 			WM_Q_EVCNT_INCR(txq, tu);
   7962 #endif /* WM_EVENT_COUNTERS */
   7963 
   7964 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   7965 			ifp->if_oerrors++;
   7966 			if (status & WTX_ST_LC)
   7967 				log(LOG_WARNING, "%s: late collision\n",
   7968 				    device_xname(sc->sc_dev));
   7969 			else if (status & WTX_ST_EC) {
   7970 				ifp->if_collisions += 16;
   7971 				log(LOG_WARNING, "%s: excessive collisions\n",
   7972 				    device_xname(sc->sc_dev));
   7973 			}
   7974 		} else
   7975 			ifp->if_opackets++;
   7976 
   7977 		txq->txq_packets++;
   7978 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   7979 
   7980 		txq->txq_free += txs->txs_ndesc;
   7981 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   7982 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   7983 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   7984 		m_freem(txs->txs_mbuf);
   7985 		txs->txs_mbuf = NULL;
   7986 	}
   7987 
   7988 	/* Update the dirty transmit buffer pointer. */
   7989 	txq->txq_sdirty = i;
   7990 	DPRINTF(WM_DEBUG_TX,
   7991 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   7992 
   7993 	if (count != 0)
   7994 		rnd_add_uint32(&sc->rnd_source, count);
   7995 
   7996 	/*
   7997 	 * If there are no more pending transmissions, cancel the watchdog
   7998 	 * timer.
   7999 	 */
   8000 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8001 		ifp->if_timer = 0;
   8002 
   8003 	return processed;
   8004 }
   8005 
   8006 static inline uint32_t
   8007 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8008 {
   8009 	struct wm_softc *sc = rxq->rxq_sc;
   8010 
   8011 	if (sc->sc_type == WM_T_82574)
   8012 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8013 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8014 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8015 	else
   8016 		return rxq->rxq_descs[idx].wrx_status;
   8017 }
   8018 
   8019 static inline uint32_t
   8020 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8021 {
   8022 	struct wm_softc *sc = rxq->rxq_sc;
   8023 
   8024 	if (sc->sc_type == WM_T_82574)
   8025 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8026 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8027 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8028 	else
   8029 		return rxq->rxq_descs[idx].wrx_errors;
   8030 }
   8031 
   8032 static inline uint16_t
   8033 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8034 {
   8035 	struct wm_softc *sc = rxq->rxq_sc;
   8036 
   8037 	if (sc->sc_type == WM_T_82574)
   8038 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8039 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8040 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8041 	else
   8042 		return rxq->rxq_descs[idx].wrx_special;
   8043 }
   8044 
   8045 static inline int
   8046 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8047 {
   8048 	struct wm_softc *sc = rxq->rxq_sc;
   8049 
   8050 	if (sc->sc_type == WM_T_82574)
   8051 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8052 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8053 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   8054 	else
   8055 		return rxq->rxq_descs[idx].wrx_len;
   8056 }
   8057 
   8058 #ifdef WM_DEBUG
   8059 static inline uint32_t
   8060 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   8061 {
   8062 	struct wm_softc *sc = rxq->rxq_sc;
   8063 
   8064 	if (sc->sc_type == WM_T_82574)
   8065 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   8066 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8067 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   8068 	else
   8069 		return 0;
   8070 }
   8071 
   8072 static inline uint8_t
   8073 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   8074 {
   8075 	struct wm_softc *sc = rxq->rxq_sc;
   8076 
   8077 	if (sc->sc_type == WM_T_82574)
   8078 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   8079 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8080 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   8081 	else
   8082 		return 0;
   8083 }
   8084 #endif /* WM_DEBUG */
   8085 
   8086 static inline bool
   8087 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   8088     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8089 {
   8090 
   8091 	if (sc->sc_type == WM_T_82574)
   8092 		return (status & ext_bit) != 0;
   8093 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8094 		return (status & nq_bit) != 0;
   8095 	else
   8096 		return (status & legacy_bit) != 0;
   8097 }
   8098 
   8099 static inline bool
   8100 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   8101     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8102 {
   8103 
   8104 	if (sc->sc_type == WM_T_82574)
   8105 		return (error & ext_bit) != 0;
   8106 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8107 		return (error & nq_bit) != 0;
   8108 	else
   8109 		return (error & legacy_bit) != 0;
   8110 }
   8111 
   8112 static inline bool
   8113 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   8114 {
   8115 
   8116 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8117 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   8118 		return true;
   8119 	else
   8120 		return false;
   8121 }
   8122 
   8123 static inline bool
   8124 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   8125 {
   8126 	struct wm_softc *sc = rxq->rxq_sc;
   8127 
   8128 	/* XXXX missing error bit for newqueue? */
   8129 	if (wm_rxdesc_is_set_error(sc, errors,
   8130 		WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE,
   8131 		EXTRXC_ERROR_CE|EXTRXC_ERROR_SE|EXTRXC_ERROR_SEQ|EXTRXC_ERROR_CXE|EXTRXC_ERROR_RXE,
   8132 		NQRXC_ERROR_RXE)) {
   8133 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE, EXTRXC_ERROR_SE, 0))
   8134 			log(LOG_WARNING, "%s: symbol error\n",
   8135 			    device_xname(sc->sc_dev));
   8136 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ, EXTRXC_ERROR_SEQ, 0))
   8137 			log(LOG_WARNING, "%s: receive sequence error\n",
   8138 			    device_xname(sc->sc_dev));
   8139 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE, EXTRXC_ERROR_CE, 0))
   8140 			log(LOG_WARNING, "%s: CRC error\n",
   8141 			    device_xname(sc->sc_dev));
   8142 		return true;
   8143 	}
   8144 
   8145 	return false;
   8146 }
   8147 
   8148 static inline bool
   8149 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8150 {
   8151 	struct wm_softc *sc = rxq->rxq_sc;
   8152 
   8153 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8154 		NQRXC_STATUS_DD)) {
   8155 		/* We have processed all of the receive descriptors. */
   8156 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8157 		return false;
   8158 	}
   8159 
   8160 	return true;
   8161 }
   8162 
   8163 static inline bool
   8164 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status, uint16_t vlantag,
   8165     struct mbuf *m)
   8166 {
   8167 	struct ifnet *ifp = &rxq->rxq_sc->sc_ethercom.ec_if;
   8168 
   8169 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8170 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8171 		VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), return false);
   8172 	}
   8173 
   8174 	return true;
   8175 }
   8176 
   8177 static inline void
   8178 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8179     uint32_t errors, struct mbuf *m)
   8180 {
   8181 	struct wm_softc *sc = rxq->rxq_sc;
   8182 
   8183 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8184 		if (wm_rxdesc_is_set_status(sc, status,
   8185 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8186 			WM_Q_EVCNT_INCR(rxq, rxipsum);
   8187 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8188 			if (wm_rxdesc_is_set_error(sc, errors,
   8189 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8190 				m->m_pkthdr.csum_flags |=
   8191 					M_CSUM_IPv4_BAD;
   8192 		}
   8193 		if (wm_rxdesc_is_set_status(sc, status,
   8194 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8195 			/*
   8196 			 * Note: we don't know if this was TCP or UDP,
   8197 			 * so we just set both bits, and expect the
   8198 			 * upper layers to deal.
   8199 			 */
   8200 			WM_Q_EVCNT_INCR(rxq, rxtusum);
   8201 			m->m_pkthdr.csum_flags |=
   8202 				M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8203 				M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8204 			if (wm_rxdesc_is_set_error(sc, errors,
   8205 				WRX_ER_TCPE, EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8206 				m->m_pkthdr.csum_flags |=
   8207 					M_CSUM_TCP_UDP_BAD;
   8208 		}
   8209 	}
   8210 }
   8211 
   8212 /*
   8213  * wm_rxeof:
   8214  *
   8215  *	Helper; handle receive interrupts.
   8216  */
   8217 static void
   8218 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8219 {
   8220 	struct wm_softc *sc = rxq->rxq_sc;
   8221 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8222 	struct wm_rxsoft *rxs;
   8223 	struct mbuf *m;
   8224 	int i, len;
   8225 	int count = 0;
   8226 	uint32_t status, errors;
   8227 	uint16_t vlantag;
   8228 
   8229 	KASSERT(mutex_owned(rxq->rxq_lock));
   8230 
   8231 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8232 		if (limit-- == 0) {
   8233 			rxq->rxq_ptr = i;
   8234 			break;
   8235 		}
   8236 
   8237 		rxs = &rxq->rxq_soft[i];
   8238 
   8239 		DPRINTF(WM_DEBUG_RX,
   8240 		    ("%s: RX: checking descriptor %d\n",
   8241 		    device_xname(sc->sc_dev), i));
   8242 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   8243 
   8244 		status = wm_rxdesc_get_status(rxq, i);
   8245 		errors = wm_rxdesc_get_errors(rxq, i);
   8246 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8247 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8248 #ifdef WM_DEBUG
   8249 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8250 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8251 #endif
   8252 
   8253 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8254 			/*
   8255 			 * Update the receive pointer holding rxq_lock
   8256 			 * consistent with increment counter.
   8257 			 */
   8258 			rxq->rxq_ptr = i;
   8259 			break;
   8260 		}
   8261 
   8262 		count++;
   8263 		if (__predict_false(rxq->rxq_discard)) {
   8264 			DPRINTF(WM_DEBUG_RX,
   8265 			    ("%s: RX: discarding contents of descriptor %d\n",
   8266 			    device_xname(sc->sc_dev), i));
   8267 			wm_init_rxdesc(rxq, i);
   8268 			if (wm_rxdesc_is_eop(rxq, status)) {
   8269 				/* Reset our state. */
   8270 				DPRINTF(WM_DEBUG_RX,
   8271 				    ("%s: RX: resetting rxdiscard -> 0\n",
   8272 				    device_xname(sc->sc_dev)));
   8273 				rxq->rxq_discard = 0;
   8274 			}
   8275 			continue;
   8276 		}
   8277 
   8278 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8279 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   8280 
   8281 		m = rxs->rxs_mbuf;
   8282 
   8283 		/*
   8284 		 * Add a new receive buffer to the ring, unless of
   8285 		 * course the length is zero. Treat the latter as a
   8286 		 * failed mapping.
   8287 		 */
   8288 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   8289 			/*
   8290 			 * Failed, throw away what we've done so
   8291 			 * far, and discard the rest of the packet.
   8292 			 */
   8293 			ifp->if_ierrors++;
   8294 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8295 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   8296 			wm_init_rxdesc(rxq, i);
   8297 			if (!wm_rxdesc_is_eop(rxq, status))
   8298 				rxq->rxq_discard = 1;
   8299 			if (rxq->rxq_head != NULL)
   8300 				m_freem(rxq->rxq_head);
   8301 			WM_RXCHAIN_RESET(rxq);
   8302 			DPRINTF(WM_DEBUG_RX,
   8303 			    ("%s: RX: Rx buffer allocation failed, "
   8304 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8305 			    rxq->rxq_discard ? " (discard)" : ""));
   8306 			continue;
   8307 		}
   8308 
   8309 		m->m_len = len;
   8310 		rxq->rxq_len += len;
   8311 		DPRINTF(WM_DEBUG_RX,
   8312 		    ("%s: RX: buffer at %p len %d\n",
   8313 		    device_xname(sc->sc_dev), m->m_data, len));
   8314 
   8315 		/* If this is not the end of the packet, keep looking. */
   8316 		if (!wm_rxdesc_is_eop(rxq, status)) {
   8317 			WM_RXCHAIN_LINK(rxq, m);
   8318 			DPRINTF(WM_DEBUG_RX,
   8319 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   8320 			    device_xname(sc->sc_dev), rxq->rxq_len));
   8321 			continue;
   8322 		}
   8323 
   8324 		/*
   8325 		 * Okay, we have the entire packet now.  The chip is
   8326 		 * configured to include the FCS except I350 and I21[01]
   8327 		 * (not all chips can be configured to strip it),
   8328 		 * so we need to trim it.
   8329 		 * May need to adjust length of previous mbuf in the
   8330 		 * chain if the current mbuf is too short.
   8331 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   8332 		 * is always set in I350, so we don't trim it.
   8333 		 */
   8334 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   8335 		    && (sc->sc_type != WM_T_I210)
   8336 		    && (sc->sc_type != WM_T_I211)) {
   8337 			if (m->m_len < ETHER_CRC_LEN) {
   8338 				rxq->rxq_tail->m_len
   8339 				    -= (ETHER_CRC_LEN - m->m_len);
   8340 				m->m_len = 0;
   8341 			} else
   8342 				m->m_len -= ETHER_CRC_LEN;
   8343 			len = rxq->rxq_len - ETHER_CRC_LEN;
   8344 		} else
   8345 			len = rxq->rxq_len;
   8346 
   8347 		WM_RXCHAIN_LINK(rxq, m);
   8348 
   8349 		*rxq->rxq_tailp = NULL;
   8350 		m = rxq->rxq_head;
   8351 
   8352 		WM_RXCHAIN_RESET(rxq);
   8353 
   8354 		DPRINTF(WM_DEBUG_RX,
   8355 		    ("%s: RX: have entire packet, len -> %d\n",
   8356 		    device_xname(sc->sc_dev), len));
   8357 
   8358 		/* If an error occurred, update stats and drop the packet. */
   8359 		if (wm_rxdesc_has_errors(rxq, errors)) {
   8360 			m_freem(m);
   8361 			continue;
   8362 		}
   8363 
   8364 		/* No errors.  Receive the packet. */
   8365 		m_set_rcvif(m, ifp);
   8366 		m->m_pkthdr.len = len;
   8367 		/*
   8368 		 * TODO
   8369 		 * should be save rsshash and rsstype to this mbuf.
   8370 		 */
   8371 		DPRINTF(WM_DEBUG_RX,
   8372 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   8373 			device_xname(sc->sc_dev), rsstype, rsshash));
   8374 
   8375 		/*
   8376 		 * If VLANs are enabled, VLAN packets have been unwrapped
   8377 		 * for us.  Associate the tag with the packet.
   8378 		 */
   8379 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   8380 			continue;
   8381 
   8382 		/* Set up checksum info for this packet. */
   8383 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   8384 		/*
   8385 		 * Update the receive pointer holding rxq_lock consistent with
   8386 		 * increment counter.
   8387 		 */
   8388 		rxq->rxq_ptr = i;
   8389 		rxq->rxq_packets++;
   8390 		rxq->rxq_bytes += len;
   8391 		mutex_exit(rxq->rxq_lock);
   8392 
   8393 		/* Pass it on. */
   8394 		if_percpuq_enqueue(sc->sc_ipq, m);
   8395 
   8396 		mutex_enter(rxq->rxq_lock);
   8397 
   8398 		if (rxq->rxq_stopping)
   8399 			break;
   8400 	}
   8401 
   8402 	if (count != 0)
   8403 		rnd_add_uint32(&sc->rnd_source, count);
   8404 
   8405 	DPRINTF(WM_DEBUG_RX,
   8406 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8407 }
   8408 
   8409 /*
   8410  * wm_linkintr_gmii:
   8411  *
   8412  *	Helper; handle link interrupts for GMII.
   8413  */
   8414 static void
   8415 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8416 {
   8417 
   8418 	KASSERT(WM_CORE_LOCKED(sc));
   8419 
   8420 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8421 		__func__));
   8422 
   8423 	if (icr & ICR_LSC) {
   8424 		uint32_t reg;
   8425 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   8426 
   8427 		if ((status & STATUS_LU) != 0) {
   8428 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8429 				device_xname(sc->sc_dev),
   8430 				(status & STATUS_FD) ? "FDX" : "HDX"));
   8431 		} else {
   8432 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8433 				device_xname(sc->sc_dev)));
   8434 		}
   8435 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   8436 			wm_gig_downshift_workaround_ich8lan(sc);
   8437 
   8438 		if ((sc->sc_type == WM_T_ICH8)
   8439 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   8440 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   8441 		}
   8442 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   8443 			device_xname(sc->sc_dev)));
   8444 		mii_pollstat(&sc->sc_mii);
   8445 		if (sc->sc_type == WM_T_82543) {
   8446 			int miistatus, active;
   8447 
   8448 			/*
   8449 			 * With 82543, we need to force speed and
   8450 			 * duplex on the MAC equal to what the PHY
   8451 			 * speed and duplex configuration is.
   8452 			 */
   8453 			miistatus = sc->sc_mii.mii_media_status;
   8454 
   8455 			if (miistatus & IFM_ACTIVE) {
   8456 				active = sc->sc_mii.mii_media_active;
   8457 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8458 				switch (IFM_SUBTYPE(active)) {
   8459 				case IFM_10_T:
   8460 					sc->sc_ctrl |= CTRL_SPEED_10;
   8461 					break;
   8462 				case IFM_100_TX:
   8463 					sc->sc_ctrl |= CTRL_SPEED_100;
   8464 					break;
   8465 				case IFM_1000_T:
   8466 					sc->sc_ctrl |= CTRL_SPEED_1000;
   8467 					break;
   8468 				default:
   8469 					/*
   8470 					 * fiber?
   8471 					 * Shoud not enter here.
   8472 					 */
   8473 					printf("unknown media (%x)\n", active);
   8474 					break;
   8475 				}
   8476 				if (active & IFM_FDX)
   8477 					sc->sc_ctrl |= CTRL_FD;
   8478 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8479 			}
   8480 		} else if (sc->sc_type == WM_T_PCH) {
   8481 			wm_k1_gig_workaround_hv(sc,
   8482 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   8483 		}
   8484 
   8485 		if ((sc->sc_phytype == WMPHY_82578)
   8486 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   8487 			== IFM_1000_T)) {
   8488 
   8489 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   8490 				delay(200*1000); /* XXX too big */
   8491 
   8492 				/* Link stall fix for link up */
   8493 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8494 				    HV_MUX_DATA_CTRL,
   8495 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   8496 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   8497 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8498 				    HV_MUX_DATA_CTRL,
   8499 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   8500 			}
   8501 		}
   8502 		/*
   8503 		 * I217 Packet Loss issue:
   8504 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
   8505 		 * on power up.
   8506 		 * Set the Beacon Duration for I217 to 8 usec
   8507 		 */
   8508 		if ((sc->sc_type == WM_T_PCH_LPT)
   8509 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8510 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   8511 			reg &= ~FEXTNVM4_BEACON_DURATION;
   8512 			reg |= FEXTNVM4_BEACON_DURATION_8US;
   8513 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   8514 		}
   8515 
   8516 		/* XXX Work-around I218 hang issue */
   8517 		/* e1000_k1_workaround_lpt_lp() */
   8518 
   8519 		if ((sc->sc_type == WM_T_PCH_LPT)
   8520 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8521 			/*
   8522 			 * Set platform power management values for Latency
   8523 			 * Tolerance Reporting (LTR)
   8524 			 */
   8525 			wm_platform_pm_pch_lpt(sc,
   8526 				((sc->sc_mii.mii_media_status & IFM_ACTIVE)
   8527 				    != 0));
   8528 		}
   8529 
   8530 		/* FEXTNVM6 K1-off workaround */
   8531 		if (sc->sc_type == WM_T_PCH_SPT) {
   8532 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
   8533 			if (CSR_READ(sc, WMREG_PCIEANACFG)
   8534 			    & FEXTNVM6_K1_OFF_ENABLE)
   8535 				reg |= FEXTNVM6_K1_OFF_ENABLE;
   8536 			else
   8537 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   8538 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   8539 		}
   8540 	} else if (icr & ICR_RXSEQ) {
   8541 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   8542 			device_xname(sc->sc_dev)));
   8543 	}
   8544 }
   8545 
   8546 /*
   8547  * wm_linkintr_tbi:
   8548  *
   8549  *	Helper; handle link interrupts for TBI mode.
   8550  */
   8551 static void
   8552 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   8553 {
   8554 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8555 	uint32_t status;
   8556 
   8557 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8558 		__func__));
   8559 
   8560 	status = CSR_READ(sc, WMREG_STATUS);
   8561 	if (icr & ICR_LSC) {
   8562 		if (status & STATUS_LU) {
   8563 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8564 			    device_xname(sc->sc_dev),
   8565 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   8566 			/*
   8567 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   8568 			 * so we should update sc->sc_ctrl
   8569 			 */
   8570 
   8571 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   8572 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8573 			sc->sc_fcrtl &= ~FCRTL_XONE;
   8574 			if (status & STATUS_FD)
   8575 				sc->sc_tctl |=
   8576 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8577 			else
   8578 				sc->sc_tctl |=
   8579 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8580 			if (sc->sc_ctrl & CTRL_TFCE)
   8581 				sc->sc_fcrtl |= FCRTL_XONE;
   8582 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8583 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   8584 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   8585 				      sc->sc_fcrtl);
   8586 			sc->sc_tbi_linkup = 1;
   8587 			if_link_state_change(ifp, LINK_STATE_UP);
   8588 		} else {
   8589 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8590 			    device_xname(sc->sc_dev)));
   8591 			sc->sc_tbi_linkup = 0;
   8592 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8593 		}
   8594 		/* Update LED */
   8595 		wm_tbi_serdes_set_linkled(sc);
   8596 	} else if (icr & ICR_RXSEQ) {
   8597 		DPRINTF(WM_DEBUG_LINK,
   8598 		    ("%s: LINK: Receive sequence error\n",
   8599 		    device_xname(sc->sc_dev)));
   8600 	}
   8601 }
   8602 
   8603 /*
   8604  * wm_linkintr_serdes:
   8605  *
   8606  *	Helper; handle link interrupts for TBI mode.
   8607  */
   8608 static void
   8609 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   8610 {
   8611 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8612 	struct mii_data *mii = &sc->sc_mii;
   8613 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8614 	uint32_t pcs_adv, pcs_lpab, reg;
   8615 
   8616 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8617 		__func__));
   8618 
   8619 	if (icr & ICR_LSC) {
   8620 		/* Check PCS */
   8621 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8622 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   8623 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   8624 				device_xname(sc->sc_dev)));
   8625 			mii->mii_media_status |= IFM_ACTIVE;
   8626 			sc->sc_tbi_linkup = 1;
   8627 			if_link_state_change(ifp, LINK_STATE_UP);
   8628 		} else {
   8629 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8630 				device_xname(sc->sc_dev)));
   8631 			mii->mii_media_status |= IFM_NONE;
   8632 			sc->sc_tbi_linkup = 0;
   8633 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8634 			wm_tbi_serdes_set_linkled(sc);
   8635 			return;
   8636 		}
   8637 		mii->mii_media_active |= IFM_1000_SX;
   8638 		if ((reg & PCS_LSTS_FDX) != 0)
   8639 			mii->mii_media_active |= IFM_FDX;
   8640 		else
   8641 			mii->mii_media_active |= IFM_HDX;
   8642 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   8643 			/* Check flow */
   8644 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8645 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   8646 				DPRINTF(WM_DEBUG_LINK,
   8647 				    ("XXX LINKOK but not ACOMP\n"));
   8648 				return;
   8649 			}
   8650 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   8651 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   8652 			DPRINTF(WM_DEBUG_LINK,
   8653 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   8654 			if ((pcs_adv & TXCW_SYM_PAUSE)
   8655 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   8656 				mii->mii_media_active |= IFM_FLOW
   8657 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   8658 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   8659 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8660 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   8661 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8662 				mii->mii_media_active |= IFM_FLOW
   8663 				    | IFM_ETH_TXPAUSE;
   8664 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   8665 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8666 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   8667 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8668 				mii->mii_media_active |= IFM_FLOW
   8669 				    | IFM_ETH_RXPAUSE;
   8670 		}
   8671 		/* Update LED */
   8672 		wm_tbi_serdes_set_linkled(sc);
   8673 	} else {
   8674 		DPRINTF(WM_DEBUG_LINK,
   8675 		    ("%s: LINK: Receive sequence error\n",
   8676 		    device_xname(sc->sc_dev)));
   8677 	}
   8678 }
   8679 
   8680 /*
   8681  * wm_linkintr:
   8682  *
   8683  *	Helper; handle link interrupts.
   8684  */
   8685 static void
   8686 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   8687 {
   8688 
   8689 	KASSERT(WM_CORE_LOCKED(sc));
   8690 
   8691 	if (sc->sc_flags & WM_F_HAS_MII)
   8692 		wm_linkintr_gmii(sc, icr);
   8693 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   8694 	    && (sc->sc_type >= WM_T_82575))
   8695 		wm_linkintr_serdes(sc, icr);
   8696 	else
   8697 		wm_linkintr_tbi(sc, icr);
   8698 }
   8699 
   8700 /*
   8701  * wm_intr_legacy:
   8702  *
   8703  *	Interrupt service routine for INTx and MSI.
   8704  */
   8705 static int
   8706 wm_intr_legacy(void *arg)
   8707 {
   8708 	struct wm_softc *sc = arg;
   8709 	struct wm_queue *wmq = &sc->sc_queue[0];
   8710 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8711 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8712 	uint32_t icr, rndval = 0;
   8713 	int handled = 0;
   8714 
   8715 	while (1 /* CONSTCOND */) {
   8716 		icr = CSR_READ(sc, WMREG_ICR);
   8717 		if ((icr & sc->sc_icr) == 0)
   8718 			break;
   8719 		if (handled == 0) {
   8720 			DPRINTF(WM_DEBUG_TX,
   8721 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   8722 		}
   8723 		if (rndval == 0)
   8724 			rndval = icr;
   8725 
   8726 		mutex_enter(rxq->rxq_lock);
   8727 
   8728 		if (rxq->rxq_stopping) {
   8729 			mutex_exit(rxq->rxq_lock);
   8730 			break;
   8731 		}
   8732 
   8733 		handled = 1;
   8734 
   8735 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8736 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   8737 			DPRINTF(WM_DEBUG_RX,
   8738 			    ("%s: RX: got Rx intr 0x%08x\n",
   8739 			    device_xname(sc->sc_dev),
   8740 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
   8741 			WM_Q_EVCNT_INCR(rxq, rxintr);
   8742 		}
   8743 #endif
   8744 		wm_rxeof(rxq, UINT_MAX);
   8745 
   8746 		mutex_exit(rxq->rxq_lock);
   8747 		mutex_enter(txq->txq_lock);
   8748 
   8749 		if (txq->txq_stopping) {
   8750 			mutex_exit(txq->txq_lock);
   8751 			break;
   8752 		}
   8753 
   8754 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8755 		if (icr & ICR_TXDW) {
   8756 			DPRINTF(WM_DEBUG_TX,
   8757 			    ("%s: TX: got TXDW interrupt\n",
   8758 			    device_xname(sc->sc_dev)));
   8759 			WM_Q_EVCNT_INCR(txq, txdw);
   8760 		}
   8761 #endif
   8762 		wm_txeof(sc, txq);
   8763 
   8764 		mutex_exit(txq->txq_lock);
   8765 		WM_CORE_LOCK(sc);
   8766 
   8767 		if (sc->sc_core_stopping) {
   8768 			WM_CORE_UNLOCK(sc);
   8769 			break;
   8770 		}
   8771 
   8772 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   8773 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8774 			wm_linkintr(sc, icr);
   8775 		}
   8776 
   8777 		WM_CORE_UNLOCK(sc);
   8778 
   8779 		if (icr & ICR_RXO) {
   8780 #if defined(WM_DEBUG)
   8781 			log(LOG_WARNING, "%s: Receive overrun\n",
   8782 			    device_xname(sc->sc_dev));
   8783 #endif /* defined(WM_DEBUG) */
   8784 		}
   8785 	}
   8786 
   8787 	rnd_add_uint32(&sc->rnd_source, rndval);
   8788 
   8789 	if (handled) {
   8790 		/* Try to get more packets going. */
   8791 		softint_schedule(wmq->wmq_si);
   8792 	}
   8793 
   8794 	return handled;
   8795 }
   8796 
   8797 static inline void
   8798 wm_txrxintr_disable(struct wm_queue *wmq)
   8799 {
   8800 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8801 
   8802 	if (sc->sc_type == WM_T_82574)
   8803 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8804 	else if (sc->sc_type == WM_T_82575)
   8805 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8806 	else
   8807 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   8808 }
   8809 
   8810 static inline void
   8811 wm_txrxintr_enable(struct wm_queue *wmq)
   8812 {
   8813 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8814 
   8815 	wm_itrs_calculate(sc, wmq);
   8816 
   8817 	if (sc->sc_type == WM_T_82574)
   8818 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8819 	else if (sc->sc_type == WM_T_82575)
   8820 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8821 	else
   8822 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   8823 }
   8824 
   8825 static int
   8826 wm_txrxintr_msix(void *arg)
   8827 {
   8828 	struct wm_queue *wmq = arg;
   8829 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8830 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8831 	struct wm_softc *sc = txq->txq_sc;
   8832 	u_int limit = sc->sc_rx_intr_process_limit;
   8833 
   8834 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   8835 
   8836 	DPRINTF(WM_DEBUG_TX,
   8837 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   8838 
   8839 	wm_txrxintr_disable(wmq);
   8840 
   8841 	mutex_enter(txq->txq_lock);
   8842 
   8843 	if (txq->txq_stopping) {
   8844 		mutex_exit(txq->txq_lock);
   8845 		return 0;
   8846 	}
   8847 
   8848 	WM_Q_EVCNT_INCR(txq, txdw);
   8849 	wm_txeof(sc, txq);
   8850 	/* wm_deferred start() is done in wm_handle_queue(). */
   8851 	mutex_exit(txq->txq_lock);
   8852 
   8853 	DPRINTF(WM_DEBUG_RX,
   8854 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   8855 	mutex_enter(rxq->rxq_lock);
   8856 
   8857 	if (rxq->rxq_stopping) {
   8858 		mutex_exit(rxq->rxq_lock);
   8859 		return 0;
   8860 	}
   8861 
   8862 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8863 	wm_rxeof(rxq, limit);
   8864 	mutex_exit(rxq->rxq_lock);
   8865 
   8866 	wm_itrs_writereg(sc, wmq);
   8867 
   8868 	softint_schedule(wmq->wmq_si);
   8869 
   8870 	return 1;
   8871 }
   8872 
   8873 static void
   8874 wm_handle_queue(void *arg)
   8875 {
   8876 	struct wm_queue *wmq = arg;
   8877 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8878 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8879 	struct wm_softc *sc = txq->txq_sc;
   8880 	u_int limit = sc->sc_rx_process_limit;
   8881 
   8882 	mutex_enter(txq->txq_lock);
   8883 	if (txq->txq_stopping) {
   8884 		mutex_exit(txq->txq_lock);
   8885 		return;
   8886 	}
   8887 	wm_txeof(sc, txq);
   8888 	wm_deferred_start_locked(txq);
   8889 	mutex_exit(txq->txq_lock);
   8890 
   8891 	mutex_enter(rxq->rxq_lock);
   8892 	if (rxq->rxq_stopping) {
   8893 		mutex_exit(rxq->rxq_lock);
   8894 		return;
   8895 	}
   8896 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8897 	wm_rxeof(rxq, limit);
   8898 	mutex_exit(rxq->rxq_lock);
   8899 
   8900 	wm_txrxintr_enable(wmq);
   8901 }
   8902 
   8903 /*
   8904  * wm_linkintr_msix:
   8905  *
   8906  *	Interrupt service routine for link status change for MSI-X.
   8907  */
   8908 static int
   8909 wm_linkintr_msix(void *arg)
   8910 {
   8911 	struct wm_softc *sc = arg;
   8912 	uint32_t reg;
   8913 
   8914 	DPRINTF(WM_DEBUG_LINK,
   8915 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   8916 
   8917 	reg = CSR_READ(sc, WMREG_ICR);
   8918 	WM_CORE_LOCK(sc);
   8919 	if ((sc->sc_core_stopping) || ((reg & ICR_LSC) == 0))
   8920 		goto out;
   8921 
   8922 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8923 	wm_linkintr(sc, ICR_LSC);
   8924 
   8925 out:
   8926 	WM_CORE_UNLOCK(sc);
   8927 
   8928 	if (sc->sc_type == WM_T_82574)
   8929 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   8930 	else if (sc->sc_type == WM_T_82575)
   8931 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   8932 	else
   8933 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   8934 
   8935 	return 1;
   8936 }
   8937 
   8938 /*
   8939  * Media related.
   8940  * GMII, SGMII, TBI (and SERDES)
   8941  */
   8942 
   8943 /* Common */
   8944 
   8945 /*
   8946  * wm_tbi_serdes_set_linkled:
   8947  *
   8948  *	Update the link LED on TBI and SERDES devices.
   8949  */
   8950 static void
   8951 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   8952 {
   8953 
   8954 	if (sc->sc_tbi_linkup)
   8955 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   8956 	else
   8957 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   8958 
   8959 	/* 82540 or newer devices are active low */
   8960 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   8961 
   8962 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8963 }
   8964 
   8965 /* GMII related */
   8966 
   8967 /*
   8968  * wm_gmii_reset:
   8969  *
   8970  *	Reset the PHY.
   8971  */
   8972 static void
   8973 wm_gmii_reset(struct wm_softc *sc)
   8974 {
   8975 	uint32_t reg;
   8976 	int rv;
   8977 
   8978 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   8979 		device_xname(sc->sc_dev), __func__));
   8980 
   8981 	rv = sc->phy.acquire(sc);
   8982 	if (rv != 0) {
   8983 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8984 		    __func__);
   8985 		return;
   8986 	}
   8987 
   8988 	switch (sc->sc_type) {
   8989 	case WM_T_82542_2_0:
   8990 	case WM_T_82542_2_1:
   8991 		/* null */
   8992 		break;
   8993 	case WM_T_82543:
   8994 		/*
   8995 		 * With 82543, we need to force speed and duplex on the MAC
   8996 		 * equal to what the PHY speed and duplex configuration is.
   8997 		 * In addition, we need to perform a hardware reset on the PHY
   8998 		 * to take it out of reset.
   8999 		 */
   9000 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9001 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9002 
   9003 		/* The PHY reset pin is active-low. */
   9004 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9005 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   9006 		    CTRL_EXT_SWDPIN(4));
   9007 		reg |= CTRL_EXT_SWDPIO(4);
   9008 
   9009 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9010 		CSR_WRITE_FLUSH(sc);
   9011 		delay(10*1000);
   9012 
   9013 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   9014 		CSR_WRITE_FLUSH(sc);
   9015 		delay(150);
   9016 #if 0
   9017 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   9018 #endif
   9019 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   9020 		break;
   9021 	case WM_T_82544:	/* reset 10000us */
   9022 	case WM_T_82540:
   9023 	case WM_T_82545:
   9024 	case WM_T_82545_3:
   9025 	case WM_T_82546:
   9026 	case WM_T_82546_3:
   9027 	case WM_T_82541:
   9028 	case WM_T_82541_2:
   9029 	case WM_T_82547:
   9030 	case WM_T_82547_2:
   9031 	case WM_T_82571:	/* reset 100us */
   9032 	case WM_T_82572:
   9033 	case WM_T_82573:
   9034 	case WM_T_82574:
   9035 	case WM_T_82575:
   9036 	case WM_T_82576:
   9037 	case WM_T_82580:
   9038 	case WM_T_I350:
   9039 	case WM_T_I354:
   9040 	case WM_T_I210:
   9041 	case WM_T_I211:
   9042 	case WM_T_82583:
   9043 	case WM_T_80003:
   9044 		/* generic reset */
   9045 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9046 		CSR_WRITE_FLUSH(sc);
   9047 		delay(20000);
   9048 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9049 		CSR_WRITE_FLUSH(sc);
   9050 		delay(20000);
   9051 
   9052 		if ((sc->sc_type == WM_T_82541)
   9053 		    || (sc->sc_type == WM_T_82541_2)
   9054 		    || (sc->sc_type == WM_T_82547)
   9055 		    || (sc->sc_type == WM_T_82547_2)) {
   9056 			/* workaround for igp are done in igp_reset() */
   9057 			/* XXX add code to set LED after phy reset */
   9058 		}
   9059 		break;
   9060 	case WM_T_ICH8:
   9061 	case WM_T_ICH9:
   9062 	case WM_T_ICH10:
   9063 	case WM_T_PCH:
   9064 	case WM_T_PCH2:
   9065 	case WM_T_PCH_LPT:
   9066 	case WM_T_PCH_SPT:
   9067 		/* generic reset */
   9068 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9069 		CSR_WRITE_FLUSH(sc);
   9070 		delay(100);
   9071 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9072 		CSR_WRITE_FLUSH(sc);
   9073 		delay(150);
   9074 		break;
   9075 	default:
   9076 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   9077 		    __func__);
   9078 		break;
   9079 	}
   9080 
   9081 	sc->phy.release(sc);
   9082 
   9083 	/* get_cfg_done */
   9084 	wm_get_cfg_done(sc);
   9085 
   9086 	/* extra setup */
   9087 	switch (sc->sc_type) {
   9088 	case WM_T_82542_2_0:
   9089 	case WM_T_82542_2_1:
   9090 	case WM_T_82543:
   9091 	case WM_T_82544:
   9092 	case WM_T_82540:
   9093 	case WM_T_82545:
   9094 	case WM_T_82545_3:
   9095 	case WM_T_82546:
   9096 	case WM_T_82546_3:
   9097 	case WM_T_82541_2:
   9098 	case WM_T_82547_2:
   9099 	case WM_T_82571:
   9100 	case WM_T_82572:
   9101 	case WM_T_82573:
   9102 	case WM_T_82574:
   9103 	case WM_T_82583:
   9104 	case WM_T_82575:
   9105 	case WM_T_82576:
   9106 	case WM_T_82580:
   9107 	case WM_T_I350:
   9108 	case WM_T_I354:
   9109 	case WM_T_I210:
   9110 	case WM_T_I211:
   9111 	case WM_T_80003:
   9112 		/* null */
   9113 		break;
   9114 	case WM_T_82541:
   9115 	case WM_T_82547:
   9116 		/* XXX Configure actively LED after PHY reset */
   9117 		break;
   9118 	case WM_T_ICH8:
   9119 	case WM_T_ICH9:
   9120 	case WM_T_ICH10:
   9121 	case WM_T_PCH:
   9122 	case WM_T_PCH2:
   9123 	case WM_T_PCH_LPT:
   9124 	case WM_T_PCH_SPT:
   9125 		wm_phy_post_reset(sc);
   9126 		break;
   9127 	default:
   9128 		panic("%s: unknown type\n", __func__);
   9129 		break;
   9130 	}
   9131 }
   9132 
   9133 /*
   9134  * Setup sc_phytype and mii_{read|write}reg.
   9135  *
   9136  *  To identify PHY type, correct read/write function should be selected.
   9137  * To select correct read/write function, PCI ID or MAC type are required
   9138  * without accessing PHY registers.
   9139  *
   9140  *  On the first call of this function, PHY ID is not known yet. Check
   9141  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   9142  * result might be incorrect.
   9143  *
   9144  *  In the second call, PHY OUI and model is used to identify PHY type.
   9145  * It might not be perfpect because of the lack of compared entry, but it
   9146  * would be better than the first call.
   9147  *
   9148  *  If the detected new result and previous assumption is different,
   9149  * diagnous message will be printed.
   9150  */
   9151 static void
   9152 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   9153     uint16_t phy_model)
   9154 {
   9155 	device_t dev = sc->sc_dev;
   9156 	struct mii_data *mii = &sc->sc_mii;
   9157 	uint16_t new_phytype = WMPHY_UNKNOWN;
   9158 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   9159 	mii_readreg_t new_readreg;
   9160 	mii_writereg_t new_writereg;
   9161 
   9162 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9163 		device_xname(sc->sc_dev), __func__));
   9164 
   9165 	if (mii->mii_readreg == NULL) {
   9166 		/*
   9167 		 *  This is the first call of this function. For ICH and PCH
   9168 		 * variants, it's difficult to determine the PHY access method
   9169 		 * by sc_type, so use the PCI product ID for some devices.
   9170 		 */
   9171 
   9172 		switch (sc->sc_pcidevid) {
   9173 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   9174 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   9175 			/* 82577 */
   9176 			new_phytype = WMPHY_82577;
   9177 			break;
   9178 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   9179 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   9180 			/* 82578 */
   9181 			new_phytype = WMPHY_82578;
   9182 			break;
   9183 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   9184 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   9185 			/* 82579 */
   9186 			new_phytype = WMPHY_82579;
   9187 			break;
   9188 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   9189 		case PCI_PRODUCT_INTEL_82801I_BM:
   9190 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   9191 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   9192 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   9193 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   9194 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   9195 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   9196 			/* ICH8, 9, 10 with 82567 */
   9197 			new_phytype = WMPHY_BM;
   9198 			break;
   9199 		default:
   9200 			break;
   9201 		}
   9202 	} else {
   9203 		/* It's not the first call. Use PHY OUI and model */
   9204 		switch (phy_oui) {
   9205 		case MII_OUI_ATHEROS: /* XXX ??? */
   9206 			switch (phy_model) {
   9207 			case 0x0004: /* XXX */
   9208 				new_phytype = WMPHY_82578;
   9209 				break;
   9210 			default:
   9211 				break;
   9212 			}
   9213 			break;
   9214 		case MII_OUI_xxMARVELL:
   9215 			switch (phy_model) {
   9216 			case MII_MODEL_xxMARVELL_I210:
   9217 				new_phytype = WMPHY_I210;
   9218 				break;
   9219 			case MII_MODEL_xxMARVELL_E1011:
   9220 			case MII_MODEL_xxMARVELL_E1000_3:
   9221 			case MII_MODEL_xxMARVELL_E1000_5:
   9222 			case MII_MODEL_xxMARVELL_E1112:
   9223 				new_phytype = WMPHY_M88;
   9224 				break;
   9225 			case MII_MODEL_xxMARVELL_E1149:
   9226 				new_phytype = WMPHY_BM;
   9227 				break;
   9228 			case MII_MODEL_xxMARVELL_E1111:
   9229 			case MII_MODEL_xxMARVELL_I347:
   9230 			case MII_MODEL_xxMARVELL_E1512:
   9231 			case MII_MODEL_xxMARVELL_E1340M:
   9232 			case MII_MODEL_xxMARVELL_E1543:
   9233 				new_phytype = WMPHY_M88;
   9234 				break;
   9235 			case MII_MODEL_xxMARVELL_I82563:
   9236 				new_phytype = WMPHY_GG82563;
   9237 				break;
   9238 			default:
   9239 				break;
   9240 			}
   9241 			break;
   9242 		case MII_OUI_INTEL:
   9243 			switch (phy_model) {
   9244 			case MII_MODEL_INTEL_I82577:
   9245 				new_phytype = WMPHY_82577;
   9246 				break;
   9247 			case MII_MODEL_INTEL_I82579:
   9248 				new_phytype = WMPHY_82579;
   9249 				break;
   9250 			case MII_MODEL_INTEL_I217:
   9251 				new_phytype = WMPHY_I217;
   9252 				break;
   9253 			case MII_MODEL_INTEL_I82580:
   9254 			case MII_MODEL_INTEL_I350:
   9255 				new_phytype = WMPHY_82580;
   9256 				break;
   9257 			default:
   9258 				break;
   9259 			}
   9260 			break;
   9261 		case MII_OUI_yyINTEL:
   9262 			switch (phy_model) {
   9263 			case MII_MODEL_yyINTEL_I82562G:
   9264 			case MII_MODEL_yyINTEL_I82562EM:
   9265 			case MII_MODEL_yyINTEL_I82562ET:
   9266 				new_phytype = WMPHY_IFE;
   9267 				break;
   9268 			case MII_MODEL_yyINTEL_IGP01E1000:
   9269 				new_phytype = WMPHY_IGP;
   9270 				break;
   9271 			case MII_MODEL_yyINTEL_I82566:
   9272 				new_phytype = WMPHY_IGP_3;
   9273 				break;
   9274 			default:
   9275 				break;
   9276 			}
   9277 			break;
   9278 		default:
   9279 			break;
   9280 		}
   9281 		if (new_phytype == WMPHY_UNKNOWN)
   9282 			aprint_verbose_dev(dev, "%s: unknown PHY model\n",
   9283 			    __func__);
   9284 
   9285 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9286 		    && (sc->sc_phytype != new_phytype )) {
   9287 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9288 			    "was incorrect. PHY type from PHY ID = %u\n",
   9289 			    sc->sc_phytype, new_phytype);
   9290 		}
   9291 	}
   9292 
   9293 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   9294 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   9295 		/* SGMII */
   9296 		new_readreg = wm_sgmii_readreg;
   9297 		new_writereg = wm_sgmii_writereg;
   9298 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   9299 		/* BM2 (phyaddr == 1) */
   9300 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9301 		    && (new_phytype != WMPHY_BM)
   9302 		    && (new_phytype != WMPHY_UNKNOWN))
   9303 			doubt_phytype = new_phytype;
   9304 		new_phytype = WMPHY_BM;
   9305 		new_readreg = wm_gmii_bm_readreg;
   9306 		new_writereg = wm_gmii_bm_writereg;
   9307 	} else if (sc->sc_type >= WM_T_PCH) {
   9308 		/* All PCH* use _hv_ */
   9309 		new_readreg = wm_gmii_hv_readreg;
   9310 		new_writereg = wm_gmii_hv_writereg;
   9311 	} else if (sc->sc_type >= WM_T_ICH8) {
   9312 		/* non-82567 ICH8, 9 and 10 */
   9313 		new_readreg = wm_gmii_i82544_readreg;
   9314 		new_writereg = wm_gmii_i82544_writereg;
   9315 	} else if (sc->sc_type >= WM_T_80003) {
   9316 		/* 80003 */
   9317 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9318 		    && (new_phytype != WMPHY_GG82563)
   9319 		    && (new_phytype != WMPHY_UNKNOWN))
   9320 			doubt_phytype = new_phytype;
   9321 		new_phytype = WMPHY_GG82563;
   9322 		new_readreg = wm_gmii_i80003_readreg;
   9323 		new_writereg = wm_gmii_i80003_writereg;
   9324 	} else if (sc->sc_type >= WM_T_I210) {
   9325 		/* I210 and I211 */
   9326 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9327 		    && (new_phytype != WMPHY_I210)
   9328 		    && (new_phytype != WMPHY_UNKNOWN))
   9329 			doubt_phytype = new_phytype;
   9330 		new_phytype = WMPHY_I210;
   9331 		new_readreg = wm_gmii_gs40g_readreg;
   9332 		new_writereg = wm_gmii_gs40g_writereg;
   9333 	} else if (sc->sc_type >= WM_T_82580) {
   9334 		/* 82580, I350 and I354 */
   9335 		new_readreg = wm_gmii_82580_readreg;
   9336 		new_writereg = wm_gmii_82580_writereg;
   9337 	} else if (sc->sc_type >= WM_T_82544) {
   9338 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   9339 		new_readreg = wm_gmii_i82544_readreg;
   9340 		new_writereg = wm_gmii_i82544_writereg;
   9341 	} else {
   9342 		new_readreg = wm_gmii_i82543_readreg;
   9343 		new_writereg = wm_gmii_i82543_writereg;
   9344 	}
   9345 
   9346 	if (new_phytype == WMPHY_BM) {
   9347 		/* All BM use _bm_ */
   9348 		new_readreg = wm_gmii_bm_readreg;
   9349 		new_writereg = wm_gmii_bm_writereg;
   9350 	}
   9351 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
   9352 		/* All PCH* use _hv_ */
   9353 		new_readreg = wm_gmii_hv_readreg;
   9354 		new_writereg = wm_gmii_hv_writereg;
   9355 	}
   9356 
   9357 	/* Diag output */
   9358 	if (doubt_phytype != WMPHY_UNKNOWN)
   9359 		aprint_error_dev(dev, "Assumed new PHY type was "
   9360 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   9361 		    new_phytype);
   9362 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9363 	    && (sc->sc_phytype != new_phytype ))
   9364 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9365 		    "was incorrect. New PHY type = %u\n",
   9366 		    sc->sc_phytype, new_phytype);
   9367 
   9368 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   9369 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   9370 
   9371 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   9372 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   9373 		    "function was incorrect.\n");
   9374 
   9375 	/* Update now */
   9376 	sc->sc_phytype = new_phytype;
   9377 	mii->mii_readreg = new_readreg;
   9378 	mii->mii_writereg = new_writereg;
   9379 }
   9380 
   9381 /*
   9382  * wm_get_phy_id_82575:
   9383  *
   9384  * Return PHY ID. Return -1 if it failed.
   9385  */
   9386 static int
   9387 wm_get_phy_id_82575(struct wm_softc *sc)
   9388 {
   9389 	uint32_t reg;
   9390 	int phyid = -1;
   9391 
   9392 	/* XXX */
   9393 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   9394 		return -1;
   9395 
   9396 	if (wm_sgmii_uses_mdio(sc)) {
   9397 		switch (sc->sc_type) {
   9398 		case WM_T_82575:
   9399 		case WM_T_82576:
   9400 			reg = CSR_READ(sc, WMREG_MDIC);
   9401 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   9402 			break;
   9403 		case WM_T_82580:
   9404 		case WM_T_I350:
   9405 		case WM_T_I354:
   9406 		case WM_T_I210:
   9407 		case WM_T_I211:
   9408 			reg = CSR_READ(sc, WMREG_MDICNFG);
   9409 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   9410 			break;
   9411 		default:
   9412 			return -1;
   9413 		}
   9414 	}
   9415 
   9416 	return phyid;
   9417 }
   9418 
   9419 
   9420 /*
   9421  * wm_gmii_mediainit:
   9422  *
   9423  *	Initialize media for use on 1000BASE-T devices.
   9424  */
   9425 static void
   9426 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   9427 {
   9428 	device_t dev = sc->sc_dev;
   9429 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9430 	struct mii_data *mii = &sc->sc_mii;
   9431 	uint32_t reg;
   9432 
   9433 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9434 		device_xname(sc->sc_dev), __func__));
   9435 
   9436 	/* We have GMII. */
   9437 	sc->sc_flags |= WM_F_HAS_MII;
   9438 
   9439 	if (sc->sc_type == WM_T_80003)
   9440 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9441 	else
   9442 		sc->sc_tipg = TIPG_1000T_DFLT;
   9443 
   9444 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   9445 	if ((sc->sc_type == WM_T_82580)
   9446 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   9447 	    || (sc->sc_type == WM_T_I211)) {
   9448 		reg = CSR_READ(sc, WMREG_PHPM);
   9449 		reg &= ~PHPM_GO_LINK_D;
   9450 		CSR_WRITE(sc, WMREG_PHPM, reg);
   9451 	}
   9452 
   9453 	/*
   9454 	 * Let the chip set speed/duplex on its own based on
   9455 	 * signals from the PHY.
   9456 	 * XXXbouyer - I'm not sure this is right for the 80003,
   9457 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   9458 	 */
   9459 	sc->sc_ctrl |= CTRL_SLU;
   9460 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9461 
   9462 	/* Initialize our media structures and probe the GMII. */
   9463 	mii->mii_ifp = ifp;
   9464 
   9465 	mii->mii_statchg = wm_gmii_statchg;
   9466 
   9467 	/* get PHY control from SMBus to PCIe */
   9468 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   9469 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
   9470 		wm_smbustopci(sc);
   9471 
   9472 	wm_gmii_reset(sc);
   9473 
   9474 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9475 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   9476 	    wm_gmii_mediastatus);
   9477 
   9478 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   9479 	    || (sc->sc_type == WM_T_82580)
   9480 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   9481 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   9482 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   9483 			/* Attach only one port */
   9484 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   9485 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9486 		} else {
   9487 			int i, id;
   9488 			uint32_t ctrl_ext;
   9489 
   9490 			id = wm_get_phy_id_82575(sc);
   9491 			if (id != -1) {
   9492 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   9493 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   9494 			}
   9495 			if ((id == -1)
   9496 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9497 				/* Power on sgmii phy if it is disabled */
   9498 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9499 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   9500 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   9501 				CSR_WRITE_FLUSH(sc);
   9502 				delay(300*1000); /* XXX too long */
   9503 
   9504 				/* from 1 to 8 */
   9505 				for (i = 1; i < 8; i++)
   9506 					mii_attach(sc->sc_dev, &sc->sc_mii,
   9507 					    0xffffffff, i, MII_OFFSET_ANY,
   9508 					    MIIF_DOPAUSE);
   9509 
   9510 				/* restore previous sfp cage power state */
   9511 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9512 			}
   9513 		}
   9514 	} else {
   9515 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9516 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9517 	}
   9518 
   9519 	/*
   9520 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   9521 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   9522 	 */
   9523 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   9524 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9525 		wm_set_mdio_slow_mode_hv(sc);
   9526 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9527 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9528 	}
   9529 
   9530 	/*
   9531 	 * (For ICH8 variants)
   9532 	 * If PHY detection failed, use BM's r/w function and retry.
   9533 	 */
   9534 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9535 		/* if failed, retry with *_bm_* */
   9536 		aprint_verbose_dev(dev, "Assumed PHY access function "
   9537 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   9538 		    sc->sc_phytype);
   9539 		sc->sc_phytype = WMPHY_BM;
   9540 		mii->mii_readreg = wm_gmii_bm_readreg;
   9541 		mii->mii_writereg = wm_gmii_bm_writereg;
   9542 
   9543 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9544 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9545 	}
   9546 
   9547 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9548 		/* Any PHY wasn't find */
   9549 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   9550 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   9551 		sc->sc_phytype = WMPHY_NONE;
   9552 	} else {
   9553 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   9554 
   9555 		/*
   9556 		 * PHY Found! Check PHY type again by the second call of
   9557 		 * wm_gmii_setup_phytype.
   9558 		 */
   9559 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   9560 		    child->mii_mpd_model);
   9561 
   9562 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   9563 	}
   9564 }
   9565 
   9566 /*
   9567  * wm_gmii_mediachange:	[ifmedia interface function]
   9568  *
   9569  *	Set hardware to newly-selected media on a 1000BASE-T device.
   9570  */
   9571 static int
   9572 wm_gmii_mediachange(struct ifnet *ifp)
   9573 {
   9574 	struct wm_softc *sc = ifp->if_softc;
   9575 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9576 	int rc;
   9577 
   9578 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9579 		device_xname(sc->sc_dev), __func__));
   9580 	if ((ifp->if_flags & IFF_UP) == 0)
   9581 		return 0;
   9582 
   9583 	/* Disable D0 LPLU. */
   9584 	wm_lplu_d0_disable(sc);
   9585 
   9586 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9587 	sc->sc_ctrl |= CTRL_SLU;
   9588 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9589 	    || (sc->sc_type > WM_T_82543)) {
   9590 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   9591 	} else {
   9592 		sc->sc_ctrl &= ~CTRL_ASDE;
   9593 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9594 		if (ife->ifm_media & IFM_FDX)
   9595 			sc->sc_ctrl |= CTRL_FD;
   9596 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   9597 		case IFM_10_T:
   9598 			sc->sc_ctrl |= CTRL_SPEED_10;
   9599 			break;
   9600 		case IFM_100_TX:
   9601 			sc->sc_ctrl |= CTRL_SPEED_100;
   9602 			break;
   9603 		case IFM_1000_T:
   9604 			sc->sc_ctrl |= CTRL_SPEED_1000;
   9605 			break;
   9606 		default:
   9607 			panic("wm_gmii_mediachange: bad media 0x%x",
   9608 			    ife->ifm_media);
   9609 		}
   9610 	}
   9611 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9612 	CSR_WRITE_FLUSH(sc);
   9613 	if (sc->sc_type <= WM_T_82543)
   9614 		wm_gmii_reset(sc);
   9615 
   9616 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   9617 		return 0;
   9618 	return rc;
   9619 }
   9620 
   9621 /*
   9622  * wm_gmii_mediastatus:	[ifmedia interface function]
   9623  *
   9624  *	Get the current interface media status on a 1000BASE-T device.
   9625  */
   9626 static void
   9627 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9628 {
   9629 	struct wm_softc *sc = ifp->if_softc;
   9630 
   9631 	ether_mediastatus(ifp, ifmr);
   9632 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9633 	    | sc->sc_flowflags;
   9634 }
   9635 
   9636 #define	MDI_IO		CTRL_SWDPIN(2)
   9637 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   9638 #define	MDI_CLK		CTRL_SWDPIN(3)
   9639 
   9640 static void
   9641 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   9642 {
   9643 	uint32_t i, v;
   9644 
   9645 	v = CSR_READ(sc, WMREG_CTRL);
   9646 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9647 	v |= MDI_DIR | CTRL_SWDPIO(3);
   9648 
   9649 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   9650 		if (data & i)
   9651 			v |= MDI_IO;
   9652 		else
   9653 			v &= ~MDI_IO;
   9654 		CSR_WRITE(sc, WMREG_CTRL, v);
   9655 		CSR_WRITE_FLUSH(sc);
   9656 		delay(10);
   9657 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9658 		CSR_WRITE_FLUSH(sc);
   9659 		delay(10);
   9660 		CSR_WRITE(sc, WMREG_CTRL, v);
   9661 		CSR_WRITE_FLUSH(sc);
   9662 		delay(10);
   9663 	}
   9664 }
   9665 
   9666 static uint32_t
   9667 wm_i82543_mii_recvbits(struct wm_softc *sc)
   9668 {
   9669 	uint32_t v, i, data = 0;
   9670 
   9671 	v = CSR_READ(sc, WMREG_CTRL);
   9672 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9673 	v |= CTRL_SWDPIO(3);
   9674 
   9675 	CSR_WRITE(sc, WMREG_CTRL, v);
   9676 	CSR_WRITE_FLUSH(sc);
   9677 	delay(10);
   9678 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9679 	CSR_WRITE_FLUSH(sc);
   9680 	delay(10);
   9681 	CSR_WRITE(sc, WMREG_CTRL, v);
   9682 	CSR_WRITE_FLUSH(sc);
   9683 	delay(10);
   9684 
   9685 	for (i = 0; i < 16; i++) {
   9686 		data <<= 1;
   9687 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9688 		CSR_WRITE_FLUSH(sc);
   9689 		delay(10);
   9690 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   9691 			data |= 1;
   9692 		CSR_WRITE(sc, WMREG_CTRL, v);
   9693 		CSR_WRITE_FLUSH(sc);
   9694 		delay(10);
   9695 	}
   9696 
   9697 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9698 	CSR_WRITE_FLUSH(sc);
   9699 	delay(10);
   9700 	CSR_WRITE(sc, WMREG_CTRL, v);
   9701 	CSR_WRITE_FLUSH(sc);
   9702 	delay(10);
   9703 
   9704 	return data;
   9705 }
   9706 
   9707 #undef MDI_IO
   9708 #undef MDI_DIR
   9709 #undef MDI_CLK
   9710 
   9711 /*
   9712  * wm_gmii_i82543_readreg:	[mii interface function]
   9713  *
   9714  *	Read a PHY register on the GMII (i82543 version).
   9715  */
   9716 static int
   9717 wm_gmii_i82543_readreg(device_t dev, int phy, int reg)
   9718 {
   9719 	struct wm_softc *sc = device_private(dev);
   9720 	int rv;
   9721 
   9722 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9723 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   9724 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   9725 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   9726 
   9727 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   9728 	    device_xname(dev), phy, reg, rv));
   9729 
   9730 	return rv;
   9731 }
   9732 
   9733 /*
   9734  * wm_gmii_i82543_writereg:	[mii interface function]
   9735  *
   9736  *	Write a PHY register on the GMII (i82543 version).
   9737  */
   9738 static void
   9739 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, int val)
   9740 {
   9741 	struct wm_softc *sc = device_private(dev);
   9742 
   9743 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9744 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   9745 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   9746 	    (MII_COMMAND_START << 30), 32);
   9747 }
   9748 
   9749 /*
   9750  * wm_gmii_mdic_readreg:	[mii interface function]
   9751  *
   9752  *	Read a PHY register on the GMII.
   9753  */
   9754 static int
   9755 wm_gmii_mdic_readreg(device_t dev, int phy, int reg)
   9756 {
   9757 	struct wm_softc *sc = device_private(dev);
   9758 	uint32_t mdic = 0;
   9759 	int i, rv;
   9760 
   9761 	if (reg > MII_ADDRMASK) {
   9762 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   9763 		    __func__, sc->sc_phytype, reg);
   9764 		reg &= MII_ADDRMASK;
   9765 	}
   9766 
   9767 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   9768 	    MDIC_REGADD(reg));
   9769 
   9770 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9771 		mdic = CSR_READ(sc, WMREG_MDIC);
   9772 		if (mdic & MDIC_READY)
   9773 			break;
   9774 		delay(50);
   9775 	}
   9776 
   9777 	if ((mdic & MDIC_READY) == 0) {
   9778 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   9779 		    device_xname(dev), phy, reg);
   9780 		rv = 0;
   9781 	} else if (mdic & MDIC_E) {
   9782 #if 0 /* This is normal if no PHY is present. */
   9783 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   9784 		    device_xname(dev), phy, reg);
   9785 #endif
   9786 		rv = 0;
   9787 	} else {
   9788 		rv = MDIC_DATA(mdic);
   9789 		if (rv == 0xffff)
   9790 			rv = 0;
   9791 	}
   9792 
   9793 	return rv;
   9794 }
   9795 
   9796 /*
   9797  * wm_gmii_mdic_writereg:	[mii interface function]
   9798  *
   9799  *	Write a PHY register on the GMII.
   9800  */
   9801 static void
   9802 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, int val)
   9803 {
   9804 	struct wm_softc *sc = device_private(dev);
   9805 	uint32_t mdic = 0;
   9806 	int i;
   9807 
   9808 	if (reg > MII_ADDRMASK) {
   9809 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   9810 		    __func__, sc->sc_phytype, reg);
   9811 		reg &= MII_ADDRMASK;
   9812 	}
   9813 
   9814 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   9815 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   9816 
   9817 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9818 		mdic = CSR_READ(sc, WMREG_MDIC);
   9819 		if (mdic & MDIC_READY)
   9820 			break;
   9821 		delay(50);
   9822 	}
   9823 
   9824 	if ((mdic & MDIC_READY) == 0)
   9825 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   9826 		    device_xname(dev), phy, reg);
   9827 	else if (mdic & MDIC_E)
   9828 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   9829 		    device_xname(dev), phy, reg);
   9830 }
   9831 
   9832 /*
   9833  * wm_gmii_i82544_readreg:	[mii interface function]
   9834  *
   9835  *	Read a PHY register on the GMII.
   9836  */
   9837 static int
   9838 wm_gmii_i82544_readreg(device_t dev, int phy, int reg)
   9839 {
   9840 	struct wm_softc *sc = device_private(dev);
   9841 	int rv;
   9842 
   9843 	if (sc->phy.acquire(sc)) {
   9844 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   9845 		return 0;
   9846 	}
   9847 
   9848 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9849 		switch (sc->sc_phytype) {
   9850 		case WMPHY_IGP:
   9851 		case WMPHY_IGP_2:
   9852 		case WMPHY_IGP_3:
   9853 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT, reg);
   9854 			break;
   9855 		default:
   9856 #ifdef WM_DEBUG
   9857 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   9858 			    __func__, sc->sc_phytype, reg);
   9859 #endif
   9860 			break;
   9861 		}
   9862 	}
   9863 
   9864 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   9865 	sc->phy.release(sc);
   9866 
   9867 	return rv;
   9868 }
   9869 
   9870 /*
   9871  * wm_gmii_i82544_writereg:	[mii interface function]
   9872  *
   9873  *	Write a PHY register on the GMII.
   9874  */
   9875 static void
   9876 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, int val)
   9877 {
   9878 	struct wm_softc *sc = device_private(dev);
   9879 
   9880 	if (sc->phy.acquire(sc)) {
   9881 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   9882 		return;
   9883 	}
   9884 
   9885 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9886 		switch (sc->sc_phytype) {
   9887 		case WMPHY_IGP:
   9888 		case WMPHY_IGP_2:
   9889 		case WMPHY_IGP_3:
   9890 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT, reg);
   9891 			break;
   9892 		default:
   9893 #ifdef WM_DEBUG
   9894 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   9895 			    __func__, sc->sc_phytype, reg);
   9896 #endif
   9897 			break;
   9898 		}
   9899 	}
   9900 
   9901 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   9902 	sc->phy.release(sc);
   9903 }
   9904 
   9905 /*
   9906  * wm_gmii_i80003_readreg:	[mii interface function]
   9907  *
   9908  *	Read a PHY register on the kumeran
   9909  * This could be handled by the PHY layer if we didn't have to lock the
   9910  * ressource ...
   9911  */
   9912 static int
   9913 wm_gmii_i80003_readreg(device_t dev, int phy, int reg)
   9914 {
   9915 	struct wm_softc *sc = device_private(dev);
   9916 	int rv;
   9917 
   9918 	if (phy != 1) /* only one PHY on kumeran bus */
   9919 		return 0;
   9920 
   9921 	if (sc->phy.acquire(sc)) {
   9922 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   9923 		return 0;
   9924 	}
   9925 
   9926 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   9927 		wm_gmii_mdic_writereg(dev, phy, GG82563_PHY_PAGE_SELECT,
   9928 		    reg >> GG82563_PAGE_SHIFT);
   9929 	} else {
   9930 		wm_gmii_mdic_writereg(dev, phy, GG82563_PHY_PAGE_SELECT_ALT,
   9931 		    reg >> GG82563_PAGE_SHIFT);
   9932 	}
   9933 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   9934 	delay(200);
   9935 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   9936 	delay(200);
   9937 	sc->phy.release(sc);
   9938 
   9939 	return rv;
   9940 }
   9941 
   9942 /*
   9943  * wm_gmii_i80003_writereg:	[mii interface function]
   9944  *
   9945  *	Write a PHY register on the kumeran.
   9946  * This could be handled by the PHY layer if we didn't have to lock the
   9947  * ressource ...
   9948  */
   9949 static void
   9950 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, int val)
   9951 {
   9952 	struct wm_softc *sc = device_private(dev);
   9953 
   9954 	if (phy != 1) /* only one PHY on kumeran bus */
   9955 		return;
   9956 
   9957 	if (sc->phy.acquire(sc)) {
   9958 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   9959 		return;
   9960 	}
   9961 
   9962 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   9963 		wm_gmii_mdic_writereg(dev, phy, GG82563_PHY_PAGE_SELECT,
   9964 		    reg >> GG82563_PAGE_SHIFT);
   9965 	} else {
   9966 		wm_gmii_mdic_writereg(dev, phy, GG82563_PHY_PAGE_SELECT_ALT,
   9967 		    reg >> GG82563_PAGE_SHIFT);
   9968 	}
   9969 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   9970 	delay(200);
   9971 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   9972 	delay(200);
   9973 
   9974 	sc->phy.release(sc);
   9975 }
   9976 
   9977 /*
   9978  * wm_gmii_bm_readreg:	[mii interface function]
   9979  *
   9980  *	Read a PHY register on the kumeran
   9981  * This could be handled by the PHY layer if we didn't have to lock the
   9982  * ressource ...
   9983  */
   9984 static int
   9985 wm_gmii_bm_readreg(device_t dev, int phy, int reg)
   9986 {
   9987 	struct wm_softc *sc = device_private(dev);
   9988 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   9989 	uint16_t val;
   9990 	int rv;
   9991 
   9992 	if (sc->phy.acquire(sc)) {
   9993 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   9994 		return 0;
   9995 	}
   9996 
   9997 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   9998 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   9999 		    || (reg == 31)) ? 1 : phy;
   10000 	/* Page 800 works differently than the rest so it has its own func */
   10001 	if (page == BM_WUC_PAGE) {
   10002 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
   10003 		rv = val;
   10004 		goto release;
   10005 	}
   10006 
   10007 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10008 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10009 		    && (sc->sc_type != WM_T_82583))
   10010 			wm_gmii_mdic_writereg(dev, phy,
   10011 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10012 		else
   10013 			wm_gmii_mdic_writereg(dev, phy,
   10014 			    BME1000_PHY_PAGE_SELECT, page);
   10015 	}
   10016 
   10017 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10018 
   10019 release:
   10020 	sc->phy.release(sc);
   10021 	return rv;
   10022 }
   10023 
   10024 /*
   10025  * wm_gmii_bm_writereg:	[mii interface function]
   10026  *
   10027  *	Write a PHY register on the kumeran.
   10028  * This could be handled by the PHY layer if we didn't have to lock the
   10029  * ressource ...
   10030  */
   10031 static void
   10032 wm_gmii_bm_writereg(device_t dev, int phy, int reg, int val)
   10033 {
   10034 	struct wm_softc *sc = device_private(dev);
   10035 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10036 
   10037 	if (sc->phy.acquire(sc)) {
   10038 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10039 		return;
   10040 	}
   10041 
   10042 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10043 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10044 		    || (reg == 31)) ? 1 : phy;
   10045 	/* Page 800 works differently than the rest so it has its own func */
   10046 	if (page == BM_WUC_PAGE) {
   10047 		uint16_t tmp;
   10048 
   10049 		tmp = val;
   10050 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   10051 		goto release;
   10052 	}
   10053 
   10054 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10055 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10056 		    && (sc->sc_type != WM_T_82583))
   10057 			wm_gmii_mdic_writereg(dev, phy,
   10058 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10059 		else
   10060 			wm_gmii_mdic_writereg(dev, phy,
   10061 			    BME1000_PHY_PAGE_SELECT, page);
   10062 	}
   10063 
   10064 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10065 
   10066 release:
   10067 	sc->phy.release(sc);
   10068 }
   10069 
   10070 static void
   10071 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd)
   10072 {
   10073 	struct wm_softc *sc = device_private(dev);
   10074 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   10075 	uint16_t wuce, reg;
   10076 
   10077 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10078 		device_xname(dev), __func__));
   10079 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   10080 	if (sc->sc_type == WM_T_PCH) {
   10081 		/* XXX e1000 driver do nothing... why? */
   10082 	}
   10083 
   10084 	/*
   10085 	 * 1) Enable PHY wakeup register first.
   10086 	 * See e1000_enable_phy_wakeup_reg_access_bm().
   10087 	 */
   10088 
   10089 	/* Set page 769 */
   10090 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10091 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   10092 
   10093 	/* Read WUCE and save it */
   10094 	wuce = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG);
   10095 
   10096 	reg = wuce | BM_WUC_ENABLE_BIT;
   10097 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   10098 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, reg);
   10099 
   10100 	/* Select page 800 */
   10101 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10102 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   10103 
   10104 	/*
   10105 	 * 2) Access PHY wakeup register.
   10106 	 * See e1000_access_phy_wakeup_reg_bm.
   10107 	 */
   10108 
   10109 	/* Write page 800 */
   10110 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   10111 
   10112 	if (rd)
   10113 		*val = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE);
   10114 	else
   10115 		wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   10116 
   10117 	/*
   10118 	 * 3) Disable PHY wakeup register.
   10119 	 * See e1000_disable_phy_wakeup_reg_access_bm().
   10120 	 */
   10121 	/* Set page 769 */
   10122 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10123 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   10124 
   10125 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, wuce);
   10126 }
   10127 
   10128 /*
   10129  * wm_gmii_hv_readreg:	[mii interface function]
   10130  *
   10131  *	Read a PHY register on the kumeran
   10132  * This could be handled by the PHY layer if we didn't have to lock the
   10133  * ressource ...
   10134  */
   10135 static int
   10136 wm_gmii_hv_readreg(device_t dev, int phy, int reg)
   10137 {
   10138 	struct wm_softc *sc = device_private(dev);
   10139 	int rv;
   10140 
   10141 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10142 		device_xname(dev), __func__));
   10143 	if (sc->phy.acquire(sc)) {
   10144 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10145 		return 0;
   10146 	}
   10147 
   10148 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg);
   10149 	sc->phy.release(sc);
   10150 	return rv;
   10151 }
   10152 
   10153 static int
   10154 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg)
   10155 {
   10156 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10157 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10158 	uint16_t val;
   10159 	int rv;
   10160 
   10161 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10162 
   10163 	/* Page 800 works differently than the rest so it has its own func */
   10164 	if (page == BM_WUC_PAGE) {
   10165 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
   10166 		return val;
   10167 	}
   10168 
   10169 	/*
   10170 	 * Lower than page 768 works differently than the rest so it has its
   10171 	 * own func
   10172 	 */
   10173 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10174 		printf("gmii_hv_readreg!!!\n");
   10175 		return 0;
   10176 	}
   10177 
   10178 	/*
   10179 	 * XXX I21[789] documents say that the SMBus Address register is at
   10180 	 * PHY address 01, Page 0 (not 768), Register 26.
   10181 	 */
   10182 	if (page == HV_INTC_FC_PAGE_START)
   10183 		page = 0;
   10184 
   10185 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10186 		wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10187 		    page << BME1000_PAGE_SHIFT);
   10188 	}
   10189 
   10190 	rv = wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK);
   10191 	return rv;
   10192 }
   10193 
   10194 /*
   10195  * wm_gmii_hv_writereg:	[mii interface function]
   10196  *
   10197  *	Write a PHY register on the kumeran.
   10198  * This could be handled by the PHY layer if we didn't have to lock the
   10199  * ressource ...
   10200  */
   10201 static void
   10202 wm_gmii_hv_writereg(device_t dev, int phy, int reg, int val)
   10203 {
   10204 	struct wm_softc *sc = device_private(dev);
   10205 
   10206 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10207 		device_xname(dev), __func__));
   10208 
   10209 	if (sc->phy.acquire(sc)) {
   10210 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10211 		return;
   10212 	}
   10213 
   10214 	wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   10215 	sc->phy.release(sc);
   10216 }
   10217 
   10218 static void
   10219 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, int val)
   10220 {
   10221 	struct wm_softc *sc = device_private(dev);
   10222 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10223 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10224 
   10225 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10226 
   10227 	/* Page 800 works differently than the rest so it has its own func */
   10228 	if (page == BM_WUC_PAGE) {
   10229 		uint16_t tmp;
   10230 
   10231 		tmp = val;
   10232 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   10233 		return;
   10234 	}
   10235 
   10236 	/*
   10237 	 * Lower than page 768 works differently than the rest so it has its
   10238 	 * own func
   10239 	 */
   10240 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10241 		printf("gmii_hv_writereg!!!\n");
   10242 		return;
   10243 	}
   10244 
   10245 	{
   10246 		/*
   10247 		 * XXX I21[789] documents say that the SMBus Address register
   10248 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   10249 		 */
   10250 		if (page == HV_INTC_FC_PAGE_START)
   10251 			page = 0;
   10252 
   10253 		/*
   10254 		 * XXX Workaround MDIO accesses being disabled after entering
   10255 		 * IEEE Power Down (whenever bit 11 of the PHY control
   10256 		 * register is set)
   10257 		 */
   10258 		if (sc->sc_phytype == WMPHY_82578) {
   10259 			struct mii_softc *child;
   10260 
   10261 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   10262 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   10263 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   10264 			    && ((val & (1 << 11)) != 0)) {
   10265 				printf("XXX need workaround\n");
   10266 			}
   10267 		}
   10268 
   10269 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10270 			wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10271 			    page << BME1000_PAGE_SHIFT);
   10272 		}
   10273 	}
   10274 
   10275 	wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   10276 }
   10277 
   10278 /*
   10279  * wm_gmii_82580_readreg:	[mii interface function]
   10280  *
   10281  *	Read a PHY register on the 82580 and I350.
   10282  * This could be handled by the PHY layer if we didn't have to lock the
   10283  * ressource ...
   10284  */
   10285 static int
   10286 wm_gmii_82580_readreg(device_t dev, int phy, int reg)
   10287 {
   10288 	struct wm_softc *sc = device_private(dev);
   10289 	int rv;
   10290 
   10291 	if (sc->phy.acquire(sc) != 0) {
   10292 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10293 		return 0;
   10294 	}
   10295 
   10296 #ifdef DIAGNOSTIC
   10297 	if (reg > MII_ADDRMASK) {
   10298 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10299 		    __func__, sc->sc_phytype, reg);
   10300 		reg &= MII_ADDRMASK;
   10301 	}
   10302 #endif
   10303 	rv = wm_gmii_mdic_readreg(dev, phy, reg);
   10304 
   10305 	sc->phy.release(sc);
   10306 	return rv;
   10307 }
   10308 
   10309 /*
   10310  * wm_gmii_82580_writereg:	[mii interface function]
   10311  *
   10312  *	Write a PHY register on the 82580 and I350.
   10313  * This could be handled by the PHY layer if we didn't have to lock the
   10314  * ressource ...
   10315  */
   10316 static void
   10317 wm_gmii_82580_writereg(device_t dev, int phy, int reg, int val)
   10318 {
   10319 	struct wm_softc *sc = device_private(dev);
   10320 
   10321 	if (sc->phy.acquire(sc) != 0) {
   10322 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10323 		return;
   10324 	}
   10325 
   10326 #ifdef DIAGNOSTIC
   10327 	if (reg > MII_ADDRMASK) {
   10328 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10329 		    __func__, sc->sc_phytype, reg);
   10330 		reg &= MII_ADDRMASK;
   10331 	}
   10332 #endif
   10333 	wm_gmii_mdic_writereg(dev, phy, reg, val);
   10334 
   10335 	sc->phy.release(sc);
   10336 }
   10337 
   10338 /*
   10339  * wm_gmii_gs40g_readreg:	[mii interface function]
   10340  *
   10341  *	Read a PHY register on the I2100 and I211.
   10342  * This could be handled by the PHY layer if we didn't have to lock the
   10343  * ressource ...
   10344  */
   10345 static int
   10346 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg)
   10347 {
   10348 	struct wm_softc *sc = device_private(dev);
   10349 	int page, offset;
   10350 	int rv;
   10351 
   10352 	/* Acquire semaphore */
   10353 	if (sc->phy.acquire(sc)) {
   10354 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10355 		return 0;
   10356 	}
   10357 
   10358 	/* Page select */
   10359 	page = reg >> GS40G_PAGE_SHIFT;
   10360 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10361 
   10362 	/* Read reg */
   10363 	offset = reg & GS40G_OFFSET_MASK;
   10364 	rv = wm_gmii_mdic_readreg(dev, phy, offset);
   10365 
   10366 	sc->phy.release(sc);
   10367 	return rv;
   10368 }
   10369 
   10370 /*
   10371  * wm_gmii_gs40g_writereg:	[mii interface function]
   10372  *
   10373  *	Write a PHY register on the I210 and I211.
   10374  * This could be handled by the PHY layer if we didn't have to lock the
   10375  * ressource ...
   10376  */
   10377 static void
   10378 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, int val)
   10379 {
   10380 	struct wm_softc *sc = device_private(dev);
   10381 	int page, offset;
   10382 
   10383 	/* Acquire semaphore */
   10384 	if (sc->phy.acquire(sc)) {
   10385 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10386 		return;
   10387 	}
   10388 
   10389 	/* Page select */
   10390 	page = reg >> GS40G_PAGE_SHIFT;
   10391 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10392 
   10393 	/* Write reg */
   10394 	offset = reg & GS40G_OFFSET_MASK;
   10395 	wm_gmii_mdic_writereg(dev, phy, offset, val);
   10396 
   10397 	/* Release semaphore */
   10398 	sc->phy.release(sc);
   10399 }
   10400 
   10401 /*
   10402  * wm_gmii_statchg:	[mii interface function]
   10403  *
   10404  *	Callback from MII layer when media changes.
   10405  */
   10406 static void
   10407 wm_gmii_statchg(struct ifnet *ifp)
   10408 {
   10409 	struct wm_softc *sc = ifp->if_softc;
   10410 	struct mii_data *mii = &sc->sc_mii;
   10411 
   10412 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   10413 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10414 	sc->sc_fcrtl &= ~FCRTL_XONE;
   10415 
   10416 	/*
   10417 	 * Get flow control negotiation result.
   10418 	 */
   10419 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   10420 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   10421 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   10422 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   10423 	}
   10424 
   10425 	if (sc->sc_flowflags & IFM_FLOW) {
   10426 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   10427 			sc->sc_ctrl |= CTRL_TFCE;
   10428 			sc->sc_fcrtl |= FCRTL_XONE;
   10429 		}
   10430 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   10431 			sc->sc_ctrl |= CTRL_RFCE;
   10432 	}
   10433 
   10434 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   10435 		DPRINTF(WM_DEBUG_LINK,
   10436 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   10437 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10438 	} else {
   10439 		DPRINTF(WM_DEBUG_LINK,
   10440 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   10441 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10442 	}
   10443 
   10444 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10445 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10446 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   10447 						 : WMREG_FCRTL, sc->sc_fcrtl);
   10448 	if (sc->sc_type == WM_T_80003) {
   10449 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   10450 		case IFM_1000_T:
   10451 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10452 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   10453 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10454 			break;
   10455 		default:
   10456 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10457 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   10458 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   10459 			break;
   10460 		}
   10461 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   10462 	}
   10463 }
   10464 
   10465 /* kumeran related (80003, ICH* and PCH*) */
   10466 
   10467 /*
   10468  * wm_kmrn_readreg:
   10469  *
   10470  *	Read a kumeran register
   10471  */
   10472 static int
   10473 wm_kmrn_readreg(struct wm_softc *sc, int reg)
   10474 {
   10475 	int rv;
   10476 
   10477 	if (sc->sc_type == WM_T_80003)
   10478 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10479 	else
   10480 		rv = sc->phy.acquire(sc);
   10481 	if (rv != 0) {
   10482 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   10483 		    __func__);
   10484 		return 0;
   10485 	}
   10486 
   10487 	rv = wm_kmrn_readreg_locked(sc, reg);
   10488 
   10489 	if (sc->sc_type == WM_T_80003)
   10490 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10491 	else
   10492 		sc->phy.release(sc);
   10493 
   10494 	return rv;
   10495 }
   10496 
   10497 static int
   10498 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg)
   10499 {
   10500 	int rv;
   10501 
   10502 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10503 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10504 	    KUMCTRLSTA_REN);
   10505 	CSR_WRITE_FLUSH(sc);
   10506 	delay(2);
   10507 
   10508 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   10509 
   10510 	return rv;
   10511 }
   10512 
   10513 /*
   10514  * wm_kmrn_writereg:
   10515  *
   10516  *	Write a kumeran register
   10517  */
   10518 static void
   10519 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
   10520 {
   10521 	int rv;
   10522 
   10523 	if (sc->sc_type == WM_T_80003)
   10524 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10525 	else
   10526 		rv = sc->phy.acquire(sc);
   10527 	if (rv != 0) {
   10528 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   10529 		    __func__);
   10530 		return;
   10531 	}
   10532 
   10533 	wm_kmrn_writereg_locked(sc, reg, val);
   10534 
   10535 	if (sc->sc_type == WM_T_80003)
   10536 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10537 	else
   10538 		sc->phy.release(sc);
   10539 }
   10540 
   10541 static void
   10542 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, int val)
   10543 {
   10544 
   10545 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10546 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10547 	    (val & KUMCTRLSTA_MASK));
   10548 }
   10549 
   10550 /* SGMII related */
   10551 
   10552 /*
   10553  * wm_sgmii_uses_mdio
   10554  *
   10555  * Check whether the transaction is to the internal PHY or the external
   10556  * MDIO interface. Return true if it's MDIO.
   10557  */
   10558 static bool
   10559 wm_sgmii_uses_mdio(struct wm_softc *sc)
   10560 {
   10561 	uint32_t reg;
   10562 	bool ismdio = false;
   10563 
   10564 	switch (sc->sc_type) {
   10565 	case WM_T_82575:
   10566 	case WM_T_82576:
   10567 		reg = CSR_READ(sc, WMREG_MDIC);
   10568 		ismdio = ((reg & MDIC_DEST) != 0);
   10569 		break;
   10570 	case WM_T_82580:
   10571 	case WM_T_I350:
   10572 	case WM_T_I354:
   10573 	case WM_T_I210:
   10574 	case WM_T_I211:
   10575 		reg = CSR_READ(sc, WMREG_MDICNFG);
   10576 		ismdio = ((reg & MDICNFG_DEST) != 0);
   10577 		break;
   10578 	default:
   10579 		break;
   10580 	}
   10581 
   10582 	return ismdio;
   10583 }
   10584 
   10585 /*
   10586  * wm_sgmii_readreg:	[mii interface function]
   10587  *
   10588  *	Read a PHY register on the SGMII
   10589  * This could be handled by the PHY layer if we didn't have to lock the
   10590  * ressource ...
   10591  */
   10592 static int
   10593 wm_sgmii_readreg(device_t dev, int phy, int reg)
   10594 {
   10595 	struct wm_softc *sc = device_private(dev);
   10596 	uint32_t i2ccmd;
   10597 	int i, rv;
   10598 
   10599 	if (sc->phy.acquire(sc)) {
   10600 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10601 		return 0;
   10602 	}
   10603 
   10604 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10605 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10606 	    | I2CCMD_OPCODE_READ;
   10607 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10608 
   10609 	/* Poll the ready bit */
   10610 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10611 		delay(50);
   10612 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10613 		if (i2ccmd & I2CCMD_READY)
   10614 			break;
   10615 	}
   10616 	if ((i2ccmd & I2CCMD_READY) == 0)
   10617 		device_printf(dev, "I2CCMD Read did not complete\n");
   10618 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10619 		device_printf(dev, "I2CCMD Error bit set\n");
   10620 
   10621 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   10622 
   10623 	sc->phy.release(sc);
   10624 	return rv;
   10625 }
   10626 
   10627 /*
   10628  * wm_sgmii_writereg:	[mii interface function]
   10629  *
   10630  *	Write a PHY register on the SGMII.
   10631  * This could be handled by the PHY layer if we didn't have to lock the
   10632  * ressource ...
   10633  */
   10634 static void
   10635 wm_sgmii_writereg(device_t dev, int phy, int reg, int val)
   10636 {
   10637 	struct wm_softc *sc = device_private(dev);
   10638 	uint32_t i2ccmd;
   10639 	int i;
   10640 	int val_swapped;
   10641 
   10642 	if (sc->phy.acquire(sc) != 0) {
   10643 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10644 		return;
   10645 	}
   10646 	/* Swap the data bytes for the I2C interface */
   10647 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   10648 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10649 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10650 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   10651 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10652 
   10653 	/* Poll the ready bit */
   10654 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10655 		delay(50);
   10656 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10657 		if (i2ccmd & I2CCMD_READY)
   10658 			break;
   10659 	}
   10660 	if ((i2ccmd & I2CCMD_READY) == 0)
   10661 		device_printf(dev, "I2CCMD Write did not complete\n");
   10662 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10663 		device_printf(dev, "I2CCMD Error bit set\n");
   10664 
   10665 	sc->phy.release(sc);
   10666 }
   10667 
   10668 /* TBI related */
   10669 
   10670 /*
   10671  * wm_tbi_mediainit:
   10672  *
   10673  *	Initialize media for use on 1000BASE-X devices.
   10674  */
   10675 static void
   10676 wm_tbi_mediainit(struct wm_softc *sc)
   10677 {
   10678 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10679 	const char *sep = "";
   10680 
   10681 	if (sc->sc_type < WM_T_82543)
   10682 		sc->sc_tipg = TIPG_WM_DFLT;
   10683 	else
   10684 		sc->sc_tipg = TIPG_LG_DFLT;
   10685 
   10686 	sc->sc_tbi_serdes_anegticks = 5;
   10687 
   10688 	/* Initialize our media structures */
   10689 	sc->sc_mii.mii_ifp = ifp;
   10690 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10691 
   10692 	if ((sc->sc_type >= WM_T_82575)
   10693 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   10694 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10695 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   10696 	else
   10697 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10698 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   10699 
   10700 	/*
   10701 	 * SWD Pins:
   10702 	 *
   10703 	 *	0 = Link LED (output)
   10704 	 *	1 = Loss Of Signal (input)
   10705 	 */
   10706 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   10707 
   10708 	/* XXX Perhaps this is only for TBI */
   10709 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   10710 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   10711 
   10712 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   10713 		sc->sc_ctrl &= ~CTRL_LRST;
   10714 
   10715 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10716 
   10717 #define	ADD(ss, mm, dd)							\
   10718 do {									\
   10719 	aprint_normal("%s%s", sep, ss);					\
   10720 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   10721 	sep = ", ";							\
   10722 } while (/*CONSTCOND*/0)
   10723 
   10724 	aprint_normal_dev(sc->sc_dev, "");
   10725 
   10726 	if (sc->sc_type == WM_T_I354) {
   10727 		uint32_t status;
   10728 
   10729 		status = CSR_READ(sc, WMREG_STATUS);
   10730 		if (((status & STATUS_2P5_SKU) != 0)
   10731 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   10732 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   10733 		} else
   10734 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   10735 	} else if (sc->sc_type == WM_T_82545) {
   10736 		/* Only 82545 is LX (XXX except SFP) */
   10737 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   10738 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   10739 	} else {
   10740 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   10741 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   10742 	}
   10743 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   10744 	aprint_normal("\n");
   10745 
   10746 #undef ADD
   10747 
   10748 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   10749 }
   10750 
   10751 /*
   10752  * wm_tbi_mediachange:	[ifmedia interface function]
   10753  *
   10754  *	Set hardware to newly-selected media on a 1000BASE-X device.
   10755  */
   10756 static int
   10757 wm_tbi_mediachange(struct ifnet *ifp)
   10758 {
   10759 	struct wm_softc *sc = ifp->if_softc;
   10760 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10761 	uint32_t status;
   10762 	int i;
   10763 
   10764 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   10765 		/* XXX need some work for >= 82571 and < 82575 */
   10766 		if (sc->sc_type < WM_T_82575)
   10767 			return 0;
   10768 	}
   10769 
   10770 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   10771 	    || (sc->sc_type >= WM_T_82575))
   10772 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   10773 
   10774 	sc->sc_ctrl &= ~CTRL_LRST;
   10775 	sc->sc_txcw = TXCW_ANE;
   10776 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10777 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   10778 	else if (ife->ifm_media & IFM_FDX)
   10779 		sc->sc_txcw |= TXCW_FD;
   10780 	else
   10781 		sc->sc_txcw |= TXCW_HD;
   10782 
   10783 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   10784 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   10785 
   10786 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   10787 		    device_xname(sc->sc_dev), sc->sc_txcw));
   10788 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10789 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10790 	CSR_WRITE_FLUSH(sc);
   10791 	delay(1000);
   10792 
   10793 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   10794 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   10795 
   10796 	/*
   10797 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   10798 	 * optics detect a signal, 0 if they don't.
   10799 	 */
   10800 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   10801 		/* Have signal; wait for the link to come up. */
   10802 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   10803 			delay(10000);
   10804 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   10805 				break;
   10806 		}
   10807 
   10808 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   10809 			    device_xname(sc->sc_dev),i));
   10810 
   10811 		status = CSR_READ(sc, WMREG_STATUS);
   10812 		DPRINTF(WM_DEBUG_LINK,
   10813 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   10814 			device_xname(sc->sc_dev),status, STATUS_LU));
   10815 		if (status & STATUS_LU) {
   10816 			/* Link is up. */
   10817 			DPRINTF(WM_DEBUG_LINK,
   10818 			    ("%s: LINK: set media -> link up %s\n",
   10819 			    device_xname(sc->sc_dev),
   10820 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   10821 
   10822 			/*
   10823 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   10824 			 * so we should update sc->sc_ctrl
   10825 			 */
   10826 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   10827 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10828 			sc->sc_fcrtl &= ~FCRTL_XONE;
   10829 			if (status & STATUS_FD)
   10830 				sc->sc_tctl |=
   10831 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10832 			else
   10833 				sc->sc_tctl |=
   10834 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10835 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   10836 				sc->sc_fcrtl |= FCRTL_XONE;
   10837 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10838 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   10839 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   10840 				      sc->sc_fcrtl);
   10841 			sc->sc_tbi_linkup = 1;
   10842 		} else {
   10843 			if (i == WM_LINKUP_TIMEOUT)
   10844 				wm_check_for_link(sc);
   10845 			/* Link is down. */
   10846 			DPRINTF(WM_DEBUG_LINK,
   10847 			    ("%s: LINK: set media -> link down\n",
   10848 			    device_xname(sc->sc_dev)));
   10849 			sc->sc_tbi_linkup = 0;
   10850 		}
   10851 	} else {
   10852 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   10853 		    device_xname(sc->sc_dev)));
   10854 		sc->sc_tbi_linkup = 0;
   10855 	}
   10856 
   10857 	wm_tbi_serdes_set_linkled(sc);
   10858 
   10859 	return 0;
   10860 }
   10861 
   10862 /*
   10863  * wm_tbi_mediastatus:	[ifmedia interface function]
   10864  *
   10865  *	Get the current interface media status on a 1000BASE-X device.
   10866  */
   10867 static void
   10868 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10869 {
   10870 	struct wm_softc *sc = ifp->if_softc;
   10871 	uint32_t ctrl, status;
   10872 
   10873 	ifmr->ifm_status = IFM_AVALID;
   10874 	ifmr->ifm_active = IFM_ETHER;
   10875 
   10876 	status = CSR_READ(sc, WMREG_STATUS);
   10877 	if ((status & STATUS_LU) == 0) {
   10878 		ifmr->ifm_active |= IFM_NONE;
   10879 		return;
   10880 	}
   10881 
   10882 	ifmr->ifm_status |= IFM_ACTIVE;
   10883 	/* Only 82545 is LX */
   10884 	if (sc->sc_type == WM_T_82545)
   10885 		ifmr->ifm_active |= IFM_1000_LX;
   10886 	else
   10887 		ifmr->ifm_active |= IFM_1000_SX;
   10888 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   10889 		ifmr->ifm_active |= IFM_FDX;
   10890 	else
   10891 		ifmr->ifm_active |= IFM_HDX;
   10892 	ctrl = CSR_READ(sc, WMREG_CTRL);
   10893 	if (ctrl & CTRL_RFCE)
   10894 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   10895 	if (ctrl & CTRL_TFCE)
   10896 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   10897 }
   10898 
   10899 /* XXX TBI only */
   10900 static int
   10901 wm_check_for_link(struct wm_softc *sc)
   10902 {
   10903 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10904 	uint32_t rxcw;
   10905 	uint32_t ctrl;
   10906 	uint32_t status;
   10907 	uint32_t sig;
   10908 
   10909 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   10910 		/* XXX need some work for >= 82571 */
   10911 		if (sc->sc_type >= WM_T_82571) {
   10912 			sc->sc_tbi_linkup = 1;
   10913 			return 0;
   10914 		}
   10915 	}
   10916 
   10917 	rxcw = CSR_READ(sc, WMREG_RXCW);
   10918 	ctrl = CSR_READ(sc, WMREG_CTRL);
   10919 	status = CSR_READ(sc, WMREG_STATUS);
   10920 
   10921 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   10922 
   10923 	DPRINTF(WM_DEBUG_LINK,
   10924 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   10925 		device_xname(sc->sc_dev), __func__,
   10926 		((ctrl & CTRL_SWDPIN(1)) == sig),
   10927 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   10928 
   10929 	/*
   10930 	 * SWDPIN   LU RXCW
   10931 	 *      0    0    0
   10932 	 *      0    0    1	(should not happen)
   10933 	 *      0    1    0	(should not happen)
   10934 	 *      0    1    1	(should not happen)
   10935 	 *      1    0    0	Disable autonego and force linkup
   10936 	 *      1    0    1	got /C/ but not linkup yet
   10937 	 *      1    1    0	(linkup)
   10938 	 *      1    1    1	If IFM_AUTO, back to autonego
   10939 	 *
   10940 	 */
   10941 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   10942 	    && ((status & STATUS_LU) == 0)
   10943 	    && ((rxcw & RXCW_C) == 0)) {
   10944 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   10945 			__func__));
   10946 		sc->sc_tbi_linkup = 0;
   10947 		/* Disable auto-negotiation in the TXCW register */
   10948 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   10949 
   10950 		/*
   10951 		 * Force link-up and also force full-duplex.
   10952 		 *
   10953 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   10954 		 * so we should update sc->sc_ctrl
   10955 		 */
   10956 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   10957 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10958 	} else if (((status & STATUS_LU) != 0)
   10959 	    && ((rxcw & RXCW_C) != 0)
   10960 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   10961 		sc->sc_tbi_linkup = 1;
   10962 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   10963 			__func__));
   10964 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10965 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   10966 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   10967 	    && ((rxcw & RXCW_C) != 0)) {
   10968 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   10969 	} else {
   10970 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   10971 			status));
   10972 	}
   10973 
   10974 	return 0;
   10975 }
   10976 
   10977 /*
   10978  * wm_tbi_tick:
   10979  *
   10980  *	Check the link on TBI devices.
   10981  *	This function acts as mii_tick().
   10982  */
   10983 static void
   10984 wm_tbi_tick(struct wm_softc *sc)
   10985 {
   10986 	struct mii_data *mii = &sc->sc_mii;
   10987 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10988 	uint32_t status;
   10989 
   10990 	KASSERT(WM_CORE_LOCKED(sc));
   10991 
   10992 	status = CSR_READ(sc, WMREG_STATUS);
   10993 
   10994 	/* XXX is this needed? */
   10995 	(void)CSR_READ(sc, WMREG_RXCW);
   10996 	(void)CSR_READ(sc, WMREG_CTRL);
   10997 
   10998 	/* set link status */
   10999 	if ((status & STATUS_LU) == 0) {
   11000 		DPRINTF(WM_DEBUG_LINK,
   11001 		    ("%s: LINK: checklink -> down\n",
   11002 			device_xname(sc->sc_dev)));
   11003 		sc->sc_tbi_linkup = 0;
   11004 	} else if (sc->sc_tbi_linkup == 0) {
   11005 		DPRINTF(WM_DEBUG_LINK,
   11006 		    ("%s: LINK: checklink -> up %s\n",
   11007 			device_xname(sc->sc_dev),
   11008 			(status & STATUS_FD) ? "FDX" : "HDX"));
   11009 		sc->sc_tbi_linkup = 1;
   11010 		sc->sc_tbi_serdes_ticks = 0;
   11011 	}
   11012 
   11013 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   11014 		goto setled;
   11015 
   11016 	if ((status & STATUS_LU) == 0) {
   11017 		sc->sc_tbi_linkup = 0;
   11018 		/* If the timer expired, retry autonegotiation */
   11019 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11020 		    && (++sc->sc_tbi_serdes_ticks
   11021 			>= sc->sc_tbi_serdes_anegticks)) {
   11022 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11023 			sc->sc_tbi_serdes_ticks = 0;
   11024 			/*
   11025 			 * Reset the link, and let autonegotiation do
   11026 			 * its thing
   11027 			 */
   11028 			sc->sc_ctrl |= CTRL_LRST;
   11029 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11030 			CSR_WRITE_FLUSH(sc);
   11031 			delay(1000);
   11032 			sc->sc_ctrl &= ~CTRL_LRST;
   11033 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11034 			CSR_WRITE_FLUSH(sc);
   11035 			delay(1000);
   11036 			CSR_WRITE(sc, WMREG_TXCW,
   11037 			    sc->sc_txcw & ~TXCW_ANE);
   11038 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11039 		}
   11040 	}
   11041 
   11042 setled:
   11043 	wm_tbi_serdes_set_linkled(sc);
   11044 }
   11045 
   11046 /* SERDES related */
   11047 static void
   11048 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   11049 {
   11050 	uint32_t reg;
   11051 
   11052 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11053 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   11054 		return;
   11055 
   11056 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   11057 	reg |= PCS_CFG_PCS_EN;
   11058 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   11059 
   11060 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11061 	reg &= ~CTRL_EXT_SWDPIN(3);
   11062 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11063 	CSR_WRITE_FLUSH(sc);
   11064 }
   11065 
   11066 static int
   11067 wm_serdes_mediachange(struct ifnet *ifp)
   11068 {
   11069 	struct wm_softc *sc = ifp->if_softc;
   11070 	bool pcs_autoneg = true; /* XXX */
   11071 	uint32_t ctrl_ext, pcs_lctl, reg;
   11072 
   11073 	/* XXX Currently, this function is not called on 8257[12] */
   11074 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11075 	    || (sc->sc_type >= WM_T_82575))
   11076 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11077 
   11078 	wm_serdes_power_up_link_82575(sc);
   11079 
   11080 	sc->sc_ctrl |= CTRL_SLU;
   11081 
   11082 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   11083 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   11084 
   11085 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11086 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   11087 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   11088 	case CTRL_EXT_LINK_MODE_SGMII:
   11089 		pcs_autoneg = true;
   11090 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   11091 		break;
   11092 	case CTRL_EXT_LINK_MODE_1000KX:
   11093 		pcs_autoneg = false;
   11094 		/* FALLTHROUGH */
   11095 	default:
   11096 		if ((sc->sc_type == WM_T_82575)
   11097 		    || (sc->sc_type == WM_T_82576)) {
   11098 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   11099 				pcs_autoneg = false;
   11100 		}
   11101 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   11102 		    | CTRL_FRCFDX;
   11103 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   11104 	}
   11105 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11106 
   11107 	if (pcs_autoneg) {
   11108 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   11109 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   11110 
   11111 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   11112 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   11113 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   11114 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   11115 	} else
   11116 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   11117 
   11118 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   11119 
   11120 
   11121 	return 0;
   11122 }
   11123 
   11124 static void
   11125 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11126 {
   11127 	struct wm_softc *sc = ifp->if_softc;
   11128 	struct mii_data *mii = &sc->sc_mii;
   11129 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11130 	uint32_t pcs_adv, pcs_lpab, reg;
   11131 
   11132 	ifmr->ifm_status = IFM_AVALID;
   11133 	ifmr->ifm_active = IFM_ETHER;
   11134 
   11135 	/* Check PCS */
   11136 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11137 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   11138 		ifmr->ifm_active |= IFM_NONE;
   11139 		sc->sc_tbi_linkup = 0;
   11140 		goto setled;
   11141 	}
   11142 
   11143 	sc->sc_tbi_linkup = 1;
   11144 	ifmr->ifm_status |= IFM_ACTIVE;
   11145 	if (sc->sc_type == WM_T_I354) {
   11146 		uint32_t status;
   11147 
   11148 		status = CSR_READ(sc, WMREG_STATUS);
   11149 		if (((status & STATUS_2P5_SKU) != 0)
   11150 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11151 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
   11152 		} else
   11153 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
   11154 	} else {
   11155 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   11156 		case PCS_LSTS_SPEED_10:
   11157 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   11158 			break;
   11159 		case PCS_LSTS_SPEED_100:
   11160 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   11161 			break;
   11162 		case PCS_LSTS_SPEED_1000:
   11163 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11164 			break;
   11165 		default:
   11166 			device_printf(sc->sc_dev, "Unknown speed\n");
   11167 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11168 			break;
   11169 		}
   11170 	}
   11171 	if ((reg & PCS_LSTS_FDX) != 0)
   11172 		ifmr->ifm_active |= IFM_FDX;
   11173 	else
   11174 		ifmr->ifm_active |= IFM_HDX;
   11175 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   11176 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   11177 		/* Check flow */
   11178 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11179 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   11180 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   11181 			goto setled;
   11182 		}
   11183 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   11184 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   11185 		DPRINTF(WM_DEBUG_LINK,
   11186 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   11187 		if ((pcs_adv & TXCW_SYM_PAUSE)
   11188 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   11189 			mii->mii_media_active |= IFM_FLOW
   11190 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   11191 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   11192 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11193 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   11194 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11195 			mii->mii_media_active |= IFM_FLOW
   11196 			    | IFM_ETH_TXPAUSE;
   11197 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   11198 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11199 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   11200 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11201 			mii->mii_media_active |= IFM_FLOW
   11202 			    | IFM_ETH_RXPAUSE;
   11203 		}
   11204 	}
   11205 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   11206 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   11207 setled:
   11208 	wm_tbi_serdes_set_linkled(sc);
   11209 }
   11210 
   11211 /*
   11212  * wm_serdes_tick:
   11213  *
   11214  *	Check the link on serdes devices.
   11215  */
   11216 static void
   11217 wm_serdes_tick(struct wm_softc *sc)
   11218 {
   11219 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11220 	struct mii_data *mii = &sc->sc_mii;
   11221 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11222 	uint32_t reg;
   11223 
   11224 	KASSERT(WM_CORE_LOCKED(sc));
   11225 
   11226 	mii->mii_media_status = IFM_AVALID;
   11227 	mii->mii_media_active = IFM_ETHER;
   11228 
   11229 	/* Check PCS */
   11230 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11231 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   11232 		mii->mii_media_status |= IFM_ACTIVE;
   11233 		sc->sc_tbi_linkup = 1;
   11234 		sc->sc_tbi_serdes_ticks = 0;
   11235 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   11236 		if ((reg & PCS_LSTS_FDX) != 0)
   11237 			mii->mii_media_active |= IFM_FDX;
   11238 		else
   11239 			mii->mii_media_active |= IFM_HDX;
   11240 	} else {
   11241 		mii->mii_media_status |= IFM_NONE;
   11242 		sc->sc_tbi_linkup = 0;
   11243 		/* If the timer expired, retry autonegotiation */
   11244 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11245 		    && (++sc->sc_tbi_serdes_ticks
   11246 			>= sc->sc_tbi_serdes_anegticks)) {
   11247 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11248 			sc->sc_tbi_serdes_ticks = 0;
   11249 			/* XXX */
   11250 			wm_serdes_mediachange(ifp);
   11251 		}
   11252 	}
   11253 
   11254 	wm_tbi_serdes_set_linkled(sc);
   11255 }
   11256 
   11257 /* SFP related */
   11258 
   11259 static int
   11260 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   11261 {
   11262 	uint32_t i2ccmd;
   11263 	int i;
   11264 
   11265 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11266 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11267 
   11268 	/* Poll the ready bit */
   11269 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11270 		delay(50);
   11271 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11272 		if (i2ccmd & I2CCMD_READY)
   11273 			break;
   11274 	}
   11275 	if ((i2ccmd & I2CCMD_READY) == 0)
   11276 		return -1;
   11277 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   11278 		return -1;
   11279 
   11280 	*data = i2ccmd & 0x00ff;
   11281 
   11282 	return 0;
   11283 }
   11284 
   11285 static uint32_t
   11286 wm_sfp_get_media_type(struct wm_softc *sc)
   11287 {
   11288 	uint32_t ctrl_ext;
   11289 	uint8_t val = 0;
   11290 	int timeout = 3;
   11291 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   11292 	int rv = -1;
   11293 
   11294 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11295 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   11296 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   11297 	CSR_WRITE_FLUSH(sc);
   11298 
   11299 	/* Read SFP module data */
   11300 	while (timeout) {
   11301 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   11302 		if (rv == 0)
   11303 			break;
   11304 		delay(100*1000); /* XXX too big */
   11305 		timeout--;
   11306 	}
   11307 	if (rv != 0)
   11308 		goto out;
   11309 	switch (val) {
   11310 	case SFF_SFP_ID_SFF:
   11311 		aprint_normal_dev(sc->sc_dev,
   11312 		    "Module/Connector soldered to board\n");
   11313 		break;
   11314 	case SFF_SFP_ID_SFP:
   11315 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   11316 		break;
   11317 	case SFF_SFP_ID_UNKNOWN:
   11318 		goto out;
   11319 	default:
   11320 		break;
   11321 	}
   11322 
   11323 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   11324 	if (rv != 0) {
   11325 		goto out;
   11326 	}
   11327 
   11328 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   11329 		mediatype = WM_MEDIATYPE_SERDES;
   11330 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   11331 		sc->sc_flags |= WM_F_SGMII;
   11332 		mediatype = WM_MEDIATYPE_COPPER;
   11333 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   11334 		sc->sc_flags |= WM_F_SGMII;
   11335 		mediatype = WM_MEDIATYPE_SERDES;
   11336 	}
   11337 
   11338 out:
   11339 	/* Restore I2C interface setting */
   11340 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11341 
   11342 	return mediatype;
   11343 }
   11344 
   11345 /*
   11346  * NVM related.
   11347  * Microwire, SPI (w/wo EERD) and Flash.
   11348  */
   11349 
   11350 /* Both spi and uwire */
   11351 
   11352 /*
   11353  * wm_eeprom_sendbits:
   11354  *
   11355  *	Send a series of bits to the EEPROM.
   11356  */
   11357 static void
   11358 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   11359 {
   11360 	uint32_t reg;
   11361 	int x;
   11362 
   11363 	reg = CSR_READ(sc, WMREG_EECD);
   11364 
   11365 	for (x = nbits; x > 0; x--) {
   11366 		if (bits & (1U << (x - 1)))
   11367 			reg |= EECD_DI;
   11368 		else
   11369 			reg &= ~EECD_DI;
   11370 		CSR_WRITE(sc, WMREG_EECD, reg);
   11371 		CSR_WRITE_FLUSH(sc);
   11372 		delay(2);
   11373 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11374 		CSR_WRITE_FLUSH(sc);
   11375 		delay(2);
   11376 		CSR_WRITE(sc, WMREG_EECD, reg);
   11377 		CSR_WRITE_FLUSH(sc);
   11378 		delay(2);
   11379 	}
   11380 }
   11381 
   11382 /*
   11383  * wm_eeprom_recvbits:
   11384  *
   11385  *	Receive a series of bits from the EEPROM.
   11386  */
   11387 static void
   11388 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   11389 {
   11390 	uint32_t reg, val;
   11391 	int x;
   11392 
   11393 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   11394 
   11395 	val = 0;
   11396 	for (x = nbits; x > 0; x--) {
   11397 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11398 		CSR_WRITE_FLUSH(sc);
   11399 		delay(2);
   11400 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   11401 			val |= (1U << (x - 1));
   11402 		CSR_WRITE(sc, WMREG_EECD, reg);
   11403 		CSR_WRITE_FLUSH(sc);
   11404 		delay(2);
   11405 	}
   11406 	*valp = val;
   11407 }
   11408 
   11409 /* Microwire */
   11410 
   11411 /*
   11412  * wm_nvm_read_uwire:
   11413  *
   11414  *	Read a word from the EEPROM using the MicroWire protocol.
   11415  */
   11416 static int
   11417 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11418 {
   11419 	uint32_t reg, val;
   11420 	int i;
   11421 
   11422 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11423 		device_xname(sc->sc_dev), __func__));
   11424 
   11425 	for (i = 0; i < wordcnt; i++) {
   11426 		/* Clear SK and DI. */
   11427 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   11428 		CSR_WRITE(sc, WMREG_EECD, reg);
   11429 
   11430 		/*
   11431 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   11432 		 * and Xen.
   11433 		 *
   11434 		 * We use this workaround only for 82540 because qemu's
   11435 		 * e1000 act as 82540.
   11436 		 */
   11437 		if (sc->sc_type == WM_T_82540) {
   11438 			reg |= EECD_SK;
   11439 			CSR_WRITE(sc, WMREG_EECD, reg);
   11440 			reg &= ~EECD_SK;
   11441 			CSR_WRITE(sc, WMREG_EECD, reg);
   11442 			CSR_WRITE_FLUSH(sc);
   11443 			delay(2);
   11444 		}
   11445 		/* XXX: end of workaround */
   11446 
   11447 		/* Set CHIP SELECT. */
   11448 		reg |= EECD_CS;
   11449 		CSR_WRITE(sc, WMREG_EECD, reg);
   11450 		CSR_WRITE_FLUSH(sc);
   11451 		delay(2);
   11452 
   11453 		/* Shift in the READ command. */
   11454 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   11455 
   11456 		/* Shift in address. */
   11457 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   11458 
   11459 		/* Shift out the data. */
   11460 		wm_eeprom_recvbits(sc, &val, 16);
   11461 		data[i] = val & 0xffff;
   11462 
   11463 		/* Clear CHIP SELECT. */
   11464 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   11465 		CSR_WRITE(sc, WMREG_EECD, reg);
   11466 		CSR_WRITE_FLUSH(sc);
   11467 		delay(2);
   11468 	}
   11469 
   11470 	return 0;
   11471 }
   11472 
   11473 /* SPI */
   11474 
   11475 /*
   11476  * Set SPI and FLASH related information from the EECD register.
   11477  * For 82541 and 82547, the word size is taken from EEPROM.
   11478  */
   11479 static int
   11480 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   11481 {
   11482 	int size;
   11483 	uint32_t reg;
   11484 	uint16_t data;
   11485 
   11486 	reg = CSR_READ(sc, WMREG_EECD);
   11487 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   11488 
   11489 	/* Read the size of NVM from EECD by default */
   11490 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11491 	switch (sc->sc_type) {
   11492 	case WM_T_82541:
   11493 	case WM_T_82541_2:
   11494 	case WM_T_82547:
   11495 	case WM_T_82547_2:
   11496 		/* Set dummy value to access EEPROM */
   11497 		sc->sc_nvm_wordsize = 64;
   11498 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
   11499 		reg = data;
   11500 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11501 		if (size == 0)
   11502 			size = 6; /* 64 word size */
   11503 		else
   11504 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   11505 		break;
   11506 	case WM_T_80003:
   11507 	case WM_T_82571:
   11508 	case WM_T_82572:
   11509 	case WM_T_82573: /* SPI case */
   11510 	case WM_T_82574: /* SPI case */
   11511 	case WM_T_82583: /* SPI case */
   11512 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11513 		if (size > 14)
   11514 			size = 14;
   11515 		break;
   11516 	case WM_T_82575:
   11517 	case WM_T_82576:
   11518 	case WM_T_82580:
   11519 	case WM_T_I350:
   11520 	case WM_T_I354:
   11521 	case WM_T_I210:
   11522 	case WM_T_I211:
   11523 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11524 		if (size > 15)
   11525 			size = 15;
   11526 		break;
   11527 	default:
   11528 		aprint_error_dev(sc->sc_dev,
   11529 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   11530 		return -1;
   11531 		break;
   11532 	}
   11533 
   11534 	sc->sc_nvm_wordsize = 1 << size;
   11535 
   11536 	return 0;
   11537 }
   11538 
   11539 /*
   11540  * wm_nvm_ready_spi:
   11541  *
   11542  *	Wait for a SPI EEPROM to be ready for commands.
   11543  */
   11544 static int
   11545 wm_nvm_ready_spi(struct wm_softc *sc)
   11546 {
   11547 	uint32_t val;
   11548 	int usec;
   11549 
   11550 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11551 		device_xname(sc->sc_dev), __func__));
   11552 
   11553 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   11554 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   11555 		wm_eeprom_recvbits(sc, &val, 8);
   11556 		if ((val & SPI_SR_RDY) == 0)
   11557 			break;
   11558 	}
   11559 	if (usec >= SPI_MAX_RETRIES) {
   11560 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   11561 		return 1;
   11562 	}
   11563 	return 0;
   11564 }
   11565 
   11566 /*
   11567  * wm_nvm_read_spi:
   11568  *
   11569  *	Read a work from the EEPROM using the SPI protocol.
   11570  */
   11571 static int
   11572 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11573 {
   11574 	uint32_t reg, val;
   11575 	int i;
   11576 	uint8_t opc;
   11577 
   11578 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11579 		device_xname(sc->sc_dev), __func__));
   11580 
   11581 	/* Clear SK and CS. */
   11582 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   11583 	CSR_WRITE(sc, WMREG_EECD, reg);
   11584 	CSR_WRITE_FLUSH(sc);
   11585 	delay(2);
   11586 
   11587 	if (wm_nvm_ready_spi(sc))
   11588 		return 1;
   11589 
   11590 	/* Toggle CS to flush commands. */
   11591 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   11592 	CSR_WRITE_FLUSH(sc);
   11593 	delay(2);
   11594 	CSR_WRITE(sc, WMREG_EECD, reg);
   11595 	CSR_WRITE_FLUSH(sc);
   11596 	delay(2);
   11597 
   11598 	opc = SPI_OPC_READ;
   11599 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   11600 		opc |= SPI_OPC_A8;
   11601 
   11602 	wm_eeprom_sendbits(sc, opc, 8);
   11603 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   11604 
   11605 	for (i = 0; i < wordcnt; i++) {
   11606 		wm_eeprom_recvbits(sc, &val, 16);
   11607 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   11608 	}
   11609 
   11610 	/* Raise CS and clear SK. */
   11611 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   11612 	CSR_WRITE(sc, WMREG_EECD, reg);
   11613 	CSR_WRITE_FLUSH(sc);
   11614 	delay(2);
   11615 
   11616 	return 0;
   11617 }
   11618 
   11619 /* Using with EERD */
   11620 
   11621 static int
   11622 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   11623 {
   11624 	uint32_t attempts = 100000;
   11625 	uint32_t i, reg = 0;
   11626 	int32_t done = -1;
   11627 
   11628 	for (i = 0; i < attempts; i++) {
   11629 		reg = CSR_READ(sc, rw);
   11630 
   11631 		if (reg & EERD_DONE) {
   11632 			done = 0;
   11633 			break;
   11634 		}
   11635 		delay(5);
   11636 	}
   11637 
   11638 	return done;
   11639 }
   11640 
   11641 static int
   11642 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   11643     uint16_t *data)
   11644 {
   11645 	int i, eerd = 0;
   11646 	int error = 0;
   11647 
   11648 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11649 		device_xname(sc->sc_dev), __func__));
   11650 
   11651 	for (i = 0; i < wordcnt; i++) {
   11652 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   11653 
   11654 		CSR_WRITE(sc, WMREG_EERD, eerd);
   11655 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   11656 		if (error != 0)
   11657 			break;
   11658 
   11659 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   11660 	}
   11661 
   11662 	return error;
   11663 }
   11664 
   11665 /* Flash */
   11666 
   11667 static int
   11668 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   11669 {
   11670 	uint32_t eecd;
   11671 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   11672 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   11673 	uint8_t sig_byte = 0;
   11674 
   11675 	switch (sc->sc_type) {
   11676 	case WM_T_PCH_SPT:
   11677 		/*
   11678 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
   11679 		 * sector valid bits from the NVM.
   11680 		 */
   11681 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
   11682 		if ((*bank == 0) || (*bank == 1)) {
   11683 			aprint_error_dev(sc->sc_dev,
   11684 			    "%s: no valid NVM bank present (%u)\n", __func__,
   11685 				*bank);
   11686 			return -1;
   11687 		} else {
   11688 			*bank = *bank - 2;
   11689 			return 0;
   11690 		}
   11691 	case WM_T_ICH8:
   11692 	case WM_T_ICH9:
   11693 		eecd = CSR_READ(sc, WMREG_EECD);
   11694 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   11695 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   11696 			return 0;
   11697 		}
   11698 		/* FALLTHROUGH */
   11699 	default:
   11700 		/* Default to 0 */
   11701 		*bank = 0;
   11702 
   11703 		/* Check bank 0 */
   11704 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   11705 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11706 			*bank = 0;
   11707 			return 0;
   11708 		}
   11709 
   11710 		/* Check bank 1 */
   11711 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   11712 		    &sig_byte);
   11713 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11714 			*bank = 1;
   11715 			return 0;
   11716 		}
   11717 	}
   11718 
   11719 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   11720 		device_xname(sc->sc_dev)));
   11721 	return -1;
   11722 }
   11723 
   11724 /******************************************************************************
   11725  * This function does initial flash setup so that a new read/write/erase cycle
   11726  * can be started.
   11727  *
   11728  * sc - The pointer to the hw structure
   11729  ****************************************************************************/
   11730 static int32_t
   11731 wm_ich8_cycle_init(struct wm_softc *sc)
   11732 {
   11733 	uint16_t hsfsts;
   11734 	int32_t error = 1;
   11735 	int32_t i     = 0;
   11736 
   11737 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11738 
   11739 	/* May be check the Flash Des Valid bit in Hw status */
   11740 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   11741 		return error;
   11742 	}
   11743 
   11744 	/* Clear FCERR in Hw status by writing 1 */
   11745 	/* Clear DAEL in Hw status by writing a 1 */
   11746 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   11747 
   11748 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11749 
   11750 	/*
   11751 	 * Either we should have a hardware SPI cycle in progress bit to check
   11752 	 * against, in order to start a new cycle or FDONE bit should be
   11753 	 * changed in the hardware so that it is 1 after harware reset, which
   11754 	 * can then be used as an indication whether a cycle is in progress or
   11755 	 * has been completed .. we should also have some software semaphore
   11756 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   11757 	 * threads access to those bits can be sequentiallized or a way so that
   11758 	 * 2 threads dont start the cycle at the same time
   11759 	 */
   11760 
   11761 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11762 		/*
   11763 		 * There is no cycle running at present, so we can start a
   11764 		 * cycle
   11765 		 */
   11766 
   11767 		/* Begin by setting Flash Cycle Done. */
   11768 		hsfsts |= HSFSTS_DONE;
   11769 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11770 		error = 0;
   11771 	} else {
   11772 		/*
   11773 		 * otherwise poll for sometime so the current cycle has a
   11774 		 * chance to end before giving up.
   11775 		 */
   11776 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   11777 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11778 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11779 				error = 0;
   11780 				break;
   11781 			}
   11782 			delay(1);
   11783 		}
   11784 		if (error == 0) {
   11785 			/*
   11786 			 * Successful in waiting for previous cycle to timeout,
   11787 			 * now set the Flash Cycle Done.
   11788 			 */
   11789 			hsfsts |= HSFSTS_DONE;
   11790 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11791 		}
   11792 	}
   11793 	return error;
   11794 }
   11795 
   11796 /******************************************************************************
   11797  * This function starts a flash cycle and waits for its completion
   11798  *
   11799  * sc - The pointer to the hw structure
   11800  ****************************************************************************/
   11801 static int32_t
   11802 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   11803 {
   11804 	uint16_t hsflctl;
   11805 	uint16_t hsfsts;
   11806 	int32_t error = 1;
   11807 	uint32_t i = 0;
   11808 
   11809 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   11810 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11811 	hsflctl |= HSFCTL_GO;
   11812 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11813 
   11814 	/* Wait till FDONE bit is set to 1 */
   11815 	do {
   11816 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11817 		if (hsfsts & HSFSTS_DONE)
   11818 			break;
   11819 		delay(1);
   11820 		i++;
   11821 	} while (i < timeout);
   11822 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   11823 		error = 0;
   11824 
   11825 	return error;
   11826 }
   11827 
   11828 /******************************************************************************
   11829  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   11830  *
   11831  * sc - The pointer to the hw structure
   11832  * index - The index of the byte or word to read.
   11833  * size - Size of data to read, 1=byte 2=word, 4=dword
   11834  * data - Pointer to the word to store the value read.
   11835  *****************************************************************************/
   11836 static int32_t
   11837 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   11838     uint32_t size, uint32_t *data)
   11839 {
   11840 	uint16_t hsfsts;
   11841 	uint16_t hsflctl;
   11842 	uint32_t flash_linear_address;
   11843 	uint32_t flash_data = 0;
   11844 	int32_t error = 1;
   11845 	int32_t count = 0;
   11846 
   11847 	if (size < 1  || size > 4 || data == 0x0 ||
   11848 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   11849 		return error;
   11850 
   11851 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   11852 	    sc->sc_ich8_flash_base;
   11853 
   11854 	do {
   11855 		delay(1);
   11856 		/* Steps */
   11857 		error = wm_ich8_cycle_init(sc);
   11858 		if (error)
   11859 			break;
   11860 
   11861 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11862 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   11863 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   11864 		    & HSFCTL_BCOUNT_MASK;
   11865 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   11866 		if (sc->sc_type == WM_T_PCH_SPT) {
   11867 			/*
   11868 			 * In SPT, This register is in Lan memory space, not
   11869 			 * flash. Therefore, only 32 bit access is supported.
   11870 			 */
   11871 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
   11872 			    (uint32_t)hsflctl);
   11873 		} else
   11874 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11875 
   11876 		/*
   11877 		 * Write the last 24 bits of index into Flash Linear address
   11878 		 * field in Flash Address
   11879 		 */
   11880 		/* TODO: TBD maybe check the index against the size of flash */
   11881 
   11882 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   11883 
   11884 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   11885 
   11886 		/*
   11887 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   11888 		 * the whole sequence a few more times, else read in (shift in)
   11889 		 * the Flash Data0, the order is least significant byte first
   11890 		 * msb to lsb
   11891 		 */
   11892 		if (error == 0) {
   11893 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   11894 			if (size == 1)
   11895 				*data = (uint8_t)(flash_data & 0x000000FF);
   11896 			else if (size == 2)
   11897 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   11898 			else if (size == 4)
   11899 				*data = (uint32_t)flash_data;
   11900 			break;
   11901 		} else {
   11902 			/*
   11903 			 * If we've gotten here, then things are probably
   11904 			 * completely hosed, but if the error condition is
   11905 			 * detected, it won't hurt to give it another try...
   11906 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   11907 			 */
   11908 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11909 			if (hsfsts & HSFSTS_ERR) {
   11910 				/* Repeat for some time before giving up. */
   11911 				continue;
   11912 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   11913 				break;
   11914 		}
   11915 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   11916 
   11917 	return error;
   11918 }
   11919 
   11920 /******************************************************************************
   11921  * Reads a single byte from the NVM using the ICH8 flash access registers.
   11922  *
   11923  * sc - pointer to wm_hw structure
   11924  * index - The index of the byte to read.
   11925  * data - Pointer to a byte to store the value read.
   11926  *****************************************************************************/
   11927 static int32_t
   11928 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   11929 {
   11930 	int32_t status;
   11931 	uint32_t word = 0;
   11932 
   11933 	status = wm_read_ich8_data(sc, index, 1, &word);
   11934 	if (status == 0)
   11935 		*data = (uint8_t)word;
   11936 	else
   11937 		*data = 0;
   11938 
   11939 	return status;
   11940 }
   11941 
   11942 /******************************************************************************
   11943  * Reads a word from the NVM using the ICH8 flash access registers.
   11944  *
   11945  * sc - pointer to wm_hw structure
   11946  * index - The starting byte index of the word to read.
   11947  * data - Pointer to a word to store the value read.
   11948  *****************************************************************************/
   11949 static int32_t
   11950 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   11951 {
   11952 	int32_t status;
   11953 	uint32_t word = 0;
   11954 
   11955 	status = wm_read_ich8_data(sc, index, 2, &word);
   11956 	if (status == 0)
   11957 		*data = (uint16_t)word;
   11958 	else
   11959 		*data = 0;
   11960 
   11961 	return status;
   11962 }
   11963 
   11964 /******************************************************************************
   11965  * Reads a dword from the NVM using the ICH8 flash access registers.
   11966  *
   11967  * sc - pointer to wm_hw structure
   11968  * index - The starting byte index of the word to read.
   11969  * data - Pointer to a word to store the value read.
   11970  *****************************************************************************/
   11971 static int32_t
   11972 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   11973 {
   11974 	int32_t status;
   11975 
   11976 	status = wm_read_ich8_data(sc, index, 4, data);
   11977 	return status;
   11978 }
   11979 
   11980 /******************************************************************************
   11981  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   11982  * register.
   11983  *
   11984  * sc - Struct containing variables accessed by shared code
   11985  * offset - offset of word in the EEPROM to read
   11986  * data - word read from the EEPROM
   11987  * words - number of words to read
   11988  *****************************************************************************/
   11989 static int
   11990 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11991 {
   11992 	int32_t  error = 0;
   11993 	uint32_t flash_bank = 0;
   11994 	uint32_t act_offset = 0;
   11995 	uint32_t bank_offset = 0;
   11996 	uint16_t word = 0;
   11997 	uint16_t i = 0;
   11998 
   11999 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12000 		device_xname(sc->sc_dev), __func__));
   12001 
   12002 	/*
   12003 	 * We need to know which is the valid flash bank.  In the event
   12004 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12005 	 * managing flash_bank.  So it cannot be trusted and needs
   12006 	 * to be updated with each read.
   12007 	 */
   12008 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   12009 	if (error) {
   12010 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   12011 			device_xname(sc->sc_dev)));
   12012 		flash_bank = 0;
   12013 	}
   12014 
   12015 	/*
   12016 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   12017 	 * size
   12018 	 */
   12019 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   12020 
   12021 	for (i = 0; i < words; i++) {
   12022 		/* The NVM part needs a byte offset, hence * 2 */
   12023 		act_offset = bank_offset + ((offset + i) * 2);
   12024 		error = wm_read_ich8_word(sc, act_offset, &word);
   12025 		if (error) {
   12026 			aprint_error_dev(sc->sc_dev,
   12027 			    "%s: failed to read NVM\n", __func__);
   12028 			break;
   12029 		}
   12030 		data[i] = word;
   12031 	}
   12032 
   12033 	return error;
   12034 }
   12035 
   12036 /******************************************************************************
   12037  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   12038  * register.
   12039  *
   12040  * sc - Struct containing variables accessed by shared code
   12041  * offset - offset of word in the EEPROM to read
   12042  * data - word read from the EEPROM
   12043  * words - number of words to read
   12044  *****************************************************************************/
   12045 static int
   12046 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12047 {
   12048 	int32_t  error = 0;
   12049 	uint32_t flash_bank = 0;
   12050 	uint32_t act_offset = 0;
   12051 	uint32_t bank_offset = 0;
   12052 	uint32_t dword = 0;
   12053 	uint16_t i = 0;
   12054 
   12055 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12056 		device_xname(sc->sc_dev), __func__));
   12057 
   12058 	/*
   12059 	 * We need to know which is the valid flash bank.  In the event
   12060 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12061 	 * managing flash_bank.  So it cannot be trusted and needs
   12062 	 * to be updated with each read.
   12063 	 */
   12064 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   12065 	if (error) {
   12066 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   12067 			device_xname(sc->sc_dev)));
   12068 		flash_bank = 0;
   12069 	}
   12070 
   12071 	/*
   12072 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   12073 	 * size
   12074 	 */
   12075 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   12076 
   12077 	for (i = 0; i < words; i++) {
   12078 		/* The NVM part needs a byte offset, hence * 2 */
   12079 		act_offset = bank_offset + ((offset + i) * 2);
   12080 		/* but we must read dword aligned, so mask ... */
   12081 		error = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   12082 		if (error) {
   12083 			aprint_error_dev(sc->sc_dev,
   12084 			    "%s: failed to read NVM\n", __func__);
   12085 			break;
   12086 		}
   12087 		/* ... and pick out low or high word */
   12088 		if ((act_offset & 0x2) == 0)
   12089 			data[i] = (uint16_t)(dword & 0xFFFF);
   12090 		else
   12091 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   12092 	}
   12093 
   12094 	return error;
   12095 }
   12096 
   12097 /* iNVM */
   12098 
   12099 static int
   12100 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   12101 {
   12102 	int32_t  rv = 0;
   12103 	uint32_t invm_dword;
   12104 	uint16_t i;
   12105 	uint8_t record_type, word_address;
   12106 
   12107 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12108 		device_xname(sc->sc_dev), __func__));
   12109 
   12110 	for (i = 0; i < INVM_SIZE; i++) {
   12111 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   12112 		/* Get record type */
   12113 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   12114 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   12115 			break;
   12116 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   12117 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   12118 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   12119 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   12120 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   12121 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   12122 			if (word_address == address) {
   12123 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   12124 				rv = 0;
   12125 				break;
   12126 			}
   12127 		}
   12128 	}
   12129 
   12130 	return rv;
   12131 }
   12132 
   12133 static int
   12134 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12135 {
   12136 	int rv = 0;
   12137 	int i;
   12138 
   12139 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12140 		device_xname(sc->sc_dev), __func__));
   12141 
   12142 	for (i = 0; i < words; i++) {
   12143 		switch (offset + i) {
   12144 		case NVM_OFF_MACADDR:
   12145 		case NVM_OFF_MACADDR1:
   12146 		case NVM_OFF_MACADDR2:
   12147 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   12148 			if (rv != 0) {
   12149 				data[i] = 0xffff;
   12150 				rv = -1;
   12151 			}
   12152 			break;
   12153 		case NVM_OFF_CFG2:
   12154 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12155 			if (rv != 0) {
   12156 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   12157 				rv = 0;
   12158 			}
   12159 			break;
   12160 		case NVM_OFF_CFG4:
   12161 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12162 			if (rv != 0) {
   12163 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   12164 				rv = 0;
   12165 			}
   12166 			break;
   12167 		case NVM_OFF_LED_1_CFG:
   12168 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12169 			if (rv != 0) {
   12170 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   12171 				rv = 0;
   12172 			}
   12173 			break;
   12174 		case NVM_OFF_LED_0_2_CFG:
   12175 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12176 			if (rv != 0) {
   12177 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   12178 				rv = 0;
   12179 			}
   12180 			break;
   12181 		case NVM_OFF_ID_LED_SETTINGS:
   12182 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12183 			if (rv != 0) {
   12184 				*data = ID_LED_RESERVED_FFFF;
   12185 				rv = 0;
   12186 			}
   12187 			break;
   12188 		default:
   12189 			DPRINTF(WM_DEBUG_NVM,
   12190 			    ("NVM word 0x%02x is not mapped.\n", offset));
   12191 			*data = NVM_RESERVED_WORD;
   12192 			break;
   12193 		}
   12194 	}
   12195 
   12196 	return rv;
   12197 }
   12198 
   12199 /* Lock, detecting NVM type, validate checksum, version and read */
   12200 
   12201 /*
   12202  * wm_nvm_acquire:
   12203  *
   12204  *	Perform the EEPROM handshake required on some chips.
   12205  */
   12206 static int
   12207 wm_nvm_acquire(struct wm_softc *sc)
   12208 {
   12209 	uint32_t reg;
   12210 	int x;
   12211 	int ret = 0;
   12212 
   12213 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12214 		device_xname(sc->sc_dev), __func__));
   12215 
   12216 	if (sc->sc_type >= WM_T_ICH8) {
   12217 		ret = wm_get_nvm_ich8lan(sc);
   12218 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   12219 		ret = wm_get_swfwhw_semaphore(sc);
   12220 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
   12221 		/* This will also do wm_get_swsm_semaphore() if needed */
   12222 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
   12223 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
   12224 		ret = wm_get_swsm_semaphore(sc);
   12225 	}
   12226 
   12227 	if (ret) {
   12228 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   12229 			__func__);
   12230 		return 1;
   12231 	}
   12232 
   12233 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   12234 		reg = CSR_READ(sc, WMREG_EECD);
   12235 
   12236 		/* Request EEPROM access. */
   12237 		reg |= EECD_EE_REQ;
   12238 		CSR_WRITE(sc, WMREG_EECD, reg);
   12239 
   12240 		/* ..and wait for it to be granted. */
   12241 		for (x = 0; x < 1000; x++) {
   12242 			reg = CSR_READ(sc, WMREG_EECD);
   12243 			if (reg & EECD_EE_GNT)
   12244 				break;
   12245 			delay(5);
   12246 		}
   12247 		if ((reg & EECD_EE_GNT) == 0) {
   12248 			aprint_error_dev(sc->sc_dev,
   12249 			    "could not acquire EEPROM GNT\n");
   12250 			reg &= ~EECD_EE_REQ;
   12251 			CSR_WRITE(sc, WMREG_EECD, reg);
   12252 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   12253 				wm_put_swfwhw_semaphore(sc);
   12254 			if (sc->sc_flags & WM_F_LOCK_SWFW)
   12255 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   12256 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
   12257 				wm_put_swsm_semaphore(sc);
   12258 			return 1;
   12259 		}
   12260 	}
   12261 
   12262 	return 0;
   12263 }
   12264 
   12265 /*
   12266  * wm_nvm_release:
   12267  *
   12268  *	Release the EEPROM mutex.
   12269  */
   12270 static void
   12271 wm_nvm_release(struct wm_softc *sc)
   12272 {
   12273 	uint32_t reg;
   12274 
   12275 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12276 		device_xname(sc->sc_dev), __func__));
   12277 
   12278 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   12279 		reg = CSR_READ(sc, WMREG_EECD);
   12280 		reg &= ~EECD_EE_REQ;
   12281 		CSR_WRITE(sc, WMREG_EECD, reg);
   12282 	}
   12283 
   12284 	if (sc->sc_type >= WM_T_ICH8) {
   12285 		wm_put_nvm_ich8lan(sc);
   12286 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   12287 		wm_put_swfwhw_semaphore(sc);
   12288 	else if (sc->sc_flags & WM_F_LOCK_SWFW)
   12289 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   12290 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
   12291 		wm_put_swsm_semaphore(sc);
   12292 }
   12293 
   12294 static int
   12295 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   12296 {
   12297 	uint32_t eecd = 0;
   12298 
   12299 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   12300 	    || sc->sc_type == WM_T_82583) {
   12301 		eecd = CSR_READ(sc, WMREG_EECD);
   12302 
   12303 		/* Isolate bits 15 & 16 */
   12304 		eecd = ((eecd >> 15) & 0x03);
   12305 
   12306 		/* If both bits are set, device is Flash type */
   12307 		if (eecd == 0x03)
   12308 			return 0;
   12309 	}
   12310 	return 1;
   12311 }
   12312 
   12313 static int
   12314 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   12315 {
   12316 	uint32_t eec;
   12317 
   12318 	eec = CSR_READ(sc, WMREG_EEC);
   12319 	if ((eec & EEC_FLASH_DETECTED) != 0)
   12320 		return 1;
   12321 
   12322 	return 0;
   12323 }
   12324 
   12325 /*
   12326  * wm_nvm_validate_checksum
   12327  *
   12328  * The checksum is defined as the sum of the first 64 (16 bit) words.
   12329  */
   12330 static int
   12331 wm_nvm_validate_checksum(struct wm_softc *sc)
   12332 {
   12333 	uint16_t checksum;
   12334 	uint16_t eeprom_data;
   12335 #ifdef WM_DEBUG
   12336 	uint16_t csum_wordaddr, valid_checksum;
   12337 #endif
   12338 	int i;
   12339 
   12340 	checksum = 0;
   12341 
   12342 	/* Don't check for I211 */
   12343 	if (sc->sc_type == WM_T_I211)
   12344 		return 0;
   12345 
   12346 #ifdef WM_DEBUG
   12347 	if (sc->sc_type == WM_T_PCH_LPT) {
   12348 		csum_wordaddr = NVM_OFF_COMPAT;
   12349 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   12350 	} else {
   12351 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   12352 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   12353 	}
   12354 
   12355 	/* Dump EEPROM image for debug */
   12356 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12357 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12358 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   12359 		/* XXX PCH_SPT? */
   12360 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   12361 		if ((eeprom_data & valid_checksum) == 0) {
   12362 			DPRINTF(WM_DEBUG_NVM,
   12363 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   12364 				device_xname(sc->sc_dev), eeprom_data,
   12365 				    valid_checksum));
   12366 		}
   12367 	}
   12368 
   12369 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   12370 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   12371 		for (i = 0; i < NVM_SIZE; i++) {
   12372 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12373 				printf("XXXX ");
   12374 			else
   12375 				printf("%04hx ", eeprom_data);
   12376 			if (i % 8 == 7)
   12377 				printf("\n");
   12378 		}
   12379 	}
   12380 
   12381 #endif /* WM_DEBUG */
   12382 
   12383 	for (i = 0; i < NVM_SIZE; i++) {
   12384 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12385 			return 1;
   12386 		checksum += eeprom_data;
   12387 	}
   12388 
   12389 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   12390 #ifdef WM_DEBUG
   12391 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   12392 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   12393 #endif
   12394 	}
   12395 
   12396 	return 0;
   12397 }
   12398 
   12399 static void
   12400 wm_nvm_version_invm(struct wm_softc *sc)
   12401 {
   12402 	uint32_t dword;
   12403 
   12404 	/*
   12405 	 * Linux's code to decode version is very strange, so we don't
   12406 	 * obey that algorithm and just use word 61 as the document.
   12407 	 * Perhaps it's not perfect though...
   12408 	 *
   12409 	 * Example:
   12410 	 *
   12411 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   12412 	 */
   12413 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   12414 	dword = __SHIFTOUT(dword, INVM_VER_1);
   12415 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   12416 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   12417 }
   12418 
   12419 static void
   12420 wm_nvm_version(struct wm_softc *sc)
   12421 {
   12422 	uint16_t major, minor, build, patch;
   12423 	uint16_t uid0, uid1;
   12424 	uint16_t nvm_data;
   12425 	uint16_t off;
   12426 	bool check_version = false;
   12427 	bool check_optionrom = false;
   12428 	bool have_build = false;
   12429 	bool have_uid = true;
   12430 
   12431 	/*
   12432 	 * Version format:
   12433 	 *
   12434 	 * XYYZ
   12435 	 * X0YZ
   12436 	 * X0YY
   12437 	 *
   12438 	 * Example:
   12439 	 *
   12440 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   12441 	 *	82571	0x50a6	5.10.6?
   12442 	 *	82572	0x506a	5.6.10?
   12443 	 *	82572EI	0x5069	5.6.9?
   12444 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   12445 	 *		0x2013	2.1.3?
   12446 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   12447 	 */
   12448 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
   12449 	switch (sc->sc_type) {
   12450 	case WM_T_82571:
   12451 	case WM_T_82572:
   12452 	case WM_T_82574:
   12453 	case WM_T_82583:
   12454 		check_version = true;
   12455 		check_optionrom = true;
   12456 		have_build = true;
   12457 		break;
   12458 	case WM_T_82575:
   12459 	case WM_T_82576:
   12460 	case WM_T_82580:
   12461 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   12462 			check_version = true;
   12463 		break;
   12464 	case WM_T_I211:
   12465 		wm_nvm_version_invm(sc);
   12466 		have_uid = false;
   12467 		goto printver;
   12468 	case WM_T_I210:
   12469 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   12470 			wm_nvm_version_invm(sc);
   12471 			have_uid = false;
   12472 			goto printver;
   12473 		}
   12474 		/* FALLTHROUGH */
   12475 	case WM_T_I350:
   12476 	case WM_T_I354:
   12477 		check_version = true;
   12478 		check_optionrom = true;
   12479 		break;
   12480 	default:
   12481 		return;
   12482 	}
   12483 	if (check_version) {
   12484 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
   12485 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   12486 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   12487 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   12488 			build = nvm_data & NVM_BUILD_MASK;
   12489 			have_build = true;
   12490 		} else
   12491 			minor = nvm_data & 0x00ff;
   12492 
   12493 		/* Decimal */
   12494 		minor = (minor / 16) * 10 + (minor % 16);
   12495 		sc->sc_nvm_ver_major = major;
   12496 		sc->sc_nvm_ver_minor = minor;
   12497 
   12498 printver:
   12499 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   12500 		    sc->sc_nvm_ver_minor);
   12501 		if (have_build) {
   12502 			sc->sc_nvm_ver_build = build;
   12503 			aprint_verbose(".%d", build);
   12504 		}
   12505 	}
   12506 	if (check_optionrom) {
   12507 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
   12508 		/* Option ROM Version */
   12509 		if ((off != 0x0000) && (off != 0xffff)) {
   12510 			off += NVM_COMBO_VER_OFF;
   12511 			wm_nvm_read(sc, off + 1, 1, &uid1);
   12512 			wm_nvm_read(sc, off, 1, &uid0);
   12513 			if ((uid0 != 0) && (uid0 != 0xffff)
   12514 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   12515 				/* 16bits */
   12516 				major = uid0 >> 8;
   12517 				build = (uid0 << 8) | (uid1 >> 8);
   12518 				patch = uid1 & 0x00ff;
   12519 				aprint_verbose(", option ROM Version %d.%d.%d",
   12520 				    major, build, patch);
   12521 			}
   12522 		}
   12523 	}
   12524 
   12525 	if (have_uid) {
   12526 		wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
   12527 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   12528 	}
   12529 }
   12530 
   12531 /*
   12532  * wm_nvm_read:
   12533  *
   12534  *	Read data from the serial EEPROM.
   12535  */
   12536 static int
   12537 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12538 {
   12539 	int rv;
   12540 
   12541 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12542 		device_xname(sc->sc_dev), __func__));
   12543 
   12544 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   12545 		return 1;
   12546 
   12547 	if (wm_nvm_acquire(sc))
   12548 		return 1;
   12549 
   12550 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12551 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12552 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   12553 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
   12554 	else if (sc->sc_type == WM_T_PCH_SPT)
   12555 		rv = wm_nvm_read_spt(sc, word, wordcnt, data);
   12556 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
   12557 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
   12558 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
   12559 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
   12560 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
   12561 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
   12562 	else
   12563 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
   12564 
   12565 	wm_nvm_release(sc);
   12566 	return rv;
   12567 }
   12568 
   12569 /*
   12570  * Hardware semaphores.
   12571  * Very complexed...
   12572  */
   12573 
   12574 static int
   12575 wm_get_null(struct wm_softc *sc)
   12576 {
   12577 
   12578 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12579 		device_xname(sc->sc_dev), __func__));
   12580 	return 0;
   12581 }
   12582 
   12583 static void
   12584 wm_put_null(struct wm_softc *sc)
   12585 {
   12586 
   12587 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12588 		device_xname(sc->sc_dev), __func__));
   12589 	return;
   12590 }
   12591 
   12592 /*
   12593  * Get hardware semaphore.
   12594  * Same as e1000_get_hw_semaphore_generic()
   12595  */
   12596 static int
   12597 wm_get_swsm_semaphore(struct wm_softc *sc)
   12598 {
   12599 	int32_t timeout;
   12600 	uint32_t swsm;
   12601 
   12602 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12603 		device_xname(sc->sc_dev), __func__));
   12604 	KASSERT(sc->sc_nvm_wordsize > 0);
   12605 
   12606 	/* Get the SW semaphore. */
   12607 	timeout = sc->sc_nvm_wordsize + 1;
   12608 	while (timeout) {
   12609 		swsm = CSR_READ(sc, WMREG_SWSM);
   12610 
   12611 		if ((swsm & SWSM_SMBI) == 0)
   12612 			break;
   12613 
   12614 		delay(50);
   12615 		timeout--;
   12616 	}
   12617 
   12618 	if (timeout == 0) {
   12619 		aprint_error_dev(sc->sc_dev,
   12620 		    "could not acquire SWSM SMBI\n");
   12621 		return 1;
   12622 	}
   12623 
   12624 	/* Get the FW semaphore. */
   12625 	timeout = sc->sc_nvm_wordsize + 1;
   12626 	while (timeout) {
   12627 		swsm = CSR_READ(sc, WMREG_SWSM);
   12628 		swsm |= SWSM_SWESMBI;
   12629 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   12630 		/* If we managed to set the bit we got the semaphore. */
   12631 		swsm = CSR_READ(sc, WMREG_SWSM);
   12632 		if (swsm & SWSM_SWESMBI)
   12633 			break;
   12634 
   12635 		delay(50);
   12636 		timeout--;
   12637 	}
   12638 
   12639 	if (timeout == 0) {
   12640 		aprint_error_dev(sc->sc_dev,
   12641 		    "could not acquire SWSM SWESMBI\n");
   12642 		/* Release semaphores */
   12643 		wm_put_swsm_semaphore(sc);
   12644 		return 1;
   12645 	}
   12646 	return 0;
   12647 }
   12648 
   12649 /*
   12650  * Put hardware semaphore.
   12651  * Same as e1000_put_hw_semaphore_generic()
   12652  */
   12653 static void
   12654 wm_put_swsm_semaphore(struct wm_softc *sc)
   12655 {
   12656 	uint32_t swsm;
   12657 
   12658 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12659 		device_xname(sc->sc_dev), __func__));
   12660 
   12661 	swsm = CSR_READ(sc, WMREG_SWSM);
   12662 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   12663 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   12664 }
   12665 
   12666 /*
   12667  * Get SW/FW semaphore.
   12668  * Same as e1000_acquire_swfw_sync_82575().
   12669  */
   12670 static int
   12671 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12672 {
   12673 	uint32_t swfw_sync;
   12674 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   12675 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   12676 	int timeout = 200;
   12677 
   12678 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12679 		device_xname(sc->sc_dev), __func__));
   12680 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   12681 
   12682 	for (timeout = 0; timeout < 200; timeout++) {
   12683 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
   12684 			if (wm_get_swsm_semaphore(sc)) {
   12685 				aprint_error_dev(sc->sc_dev,
   12686 				    "%s: failed to get semaphore\n",
   12687 				    __func__);
   12688 				return 1;
   12689 			}
   12690 		}
   12691 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12692 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   12693 			swfw_sync |= swmask;
   12694 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12695 			if (sc->sc_flags & WM_F_LOCK_SWSM)
   12696 				wm_put_swsm_semaphore(sc);
   12697 			return 0;
   12698 		}
   12699 		if (sc->sc_flags & WM_F_LOCK_SWSM)
   12700 			wm_put_swsm_semaphore(sc);
   12701 		delay(5000);
   12702 	}
   12703 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   12704 	    device_xname(sc->sc_dev), mask, swfw_sync);
   12705 	return 1;
   12706 }
   12707 
   12708 static void
   12709 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12710 {
   12711 	uint32_t swfw_sync;
   12712 
   12713 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12714 		device_xname(sc->sc_dev), __func__));
   12715 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   12716 
   12717 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   12718 		while (wm_get_swsm_semaphore(sc) != 0)
   12719 			continue;
   12720 	}
   12721 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12722 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   12723 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12724 	if (sc->sc_flags & WM_F_LOCK_SWSM)
   12725 		wm_put_swsm_semaphore(sc);
   12726 }
   12727 
   12728 static int
   12729 wm_get_phy_82575(struct wm_softc *sc)
   12730 {
   12731 
   12732 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12733 		device_xname(sc->sc_dev), __func__));
   12734 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   12735 }
   12736 
   12737 static void
   12738 wm_put_phy_82575(struct wm_softc *sc)
   12739 {
   12740 
   12741 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12742 		device_xname(sc->sc_dev), __func__));
   12743 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   12744 }
   12745 
   12746 static int
   12747 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   12748 {
   12749 	uint32_t ext_ctrl;
   12750 	int timeout = 200;
   12751 
   12752 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12753 		device_xname(sc->sc_dev), __func__));
   12754 
   12755 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12756 	for (timeout = 0; timeout < 200; timeout++) {
   12757 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12758 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12759 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12760 
   12761 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12762 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   12763 			return 0;
   12764 		delay(5000);
   12765 	}
   12766 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   12767 	    device_xname(sc->sc_dev), ext_ctrl);
   12768 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12769 	return 1;
   12770 }
   12771 
   12772 static void
   12773 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   12774 {
   12775 	uint32_t ext_ctrl;
   12776 
   12777 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12778 		device_xname(sc->sc_dev), __func__));
   12779 
   12780 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12781 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12782 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12783 
   12784 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12785 }
   12786 
   12787 static int
   12788 wm_get_swflag_ich8lan(struct wm_softc *sc)
   12789 {
   12790 	uint32_t ext_ctrl;
   12791 	int timeout;
   12792 
   12793 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12794 		device_xname(sc->sc_dev), __func__));
   12795 	mutex_enter(sc->sc_ich_phymtx);
   12796 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   12797 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12798 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   12799 			break;
   12800 		delay(1000);
   12801 	}
   12802 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   12803 		printf("%s: SW has already locked the resource\n",
   12804 		    device_xname(sc->sc_dev));
   12805 		goto out;
   12806 	}
   12807 
   12808 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12809 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12810 	for (timeout = 0; timeout < 1000; timeout++) {
   12811 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12812 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   12813 			break;
   12814 		delay(1000);
   12815 	}
   12816 	if (timeout >= 1000) {
   12817 		printf("%s: failed to acquire semaphore\n",
   12818 		    device_xname(sc->sc_dev));
   12819 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12820 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12821 		goto out;
   12822 	}
   12823 	return 0;
   12824 
   12825 out:
   12826 	mutex_exit(sc->sc_ich_phymtx);
   12827 	return 1;
   12828 }
   12829 
   12830 static void
   12831 wm_put_swflag_ich8lan(struct wm_softc *sc)
   12832 {
   12833 	uint32_t ext_ctrl;
   12834 
   12835 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12836 		device_xname(sc->sc_dev), __func__));
   12837 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12838 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   12839 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12840 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12841 	} else {
   12842 		printf("%s: Semaphore unexpectedly released\n",
   12843 		    device_xname(sc->sc_dev));
   12844 	}
   12845 
   12846 	mutex_exit(sc->sc_ich_phymtx);
   12847 }
   12848 
   12849 static int
   12850 wm_get_nvm_ich8lan(struct wm_softc *sc)
   12851 {
   12852 
   12853 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12854 		device_xname(sc->sc_dev), __func__));
   12855 	mutex_enter(sc->sc_ich_nvmmtx);
   12856 
   12857 	return 0;
   12858 }
   12859 
   12860 static void
   12861 wm_put_nvm_ich8lan(struct wm_softc *sc)
   12862 {
   12863 
   12864 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12865 		device_xname(sc->sc_dev), __func__));
   12866 	mutex_exit(sc->sc_ich_nvmmtx);
   12867 }
   12868 
   12869 static int
   12870 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   12871 {
   12872 	int i = 0;
   12873 	uint32_t reg;
   12874 
   12875 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12876 		device_xname(sc->sc_dev), __func__));
   12877 
   12878 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12879 	do {
   12880 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   12881 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   12882 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12883 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   12884 			break;
   12885 		delay(2*1000);
   12886 		i++;
   12887 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   12888 
   12889 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   12890 		wm_put_hw_semaphore_82573(sc);
   12891 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   12892 		    device_xname(sc->sc_dev));
   12893 		return -1;
   12894 	}
   12895 
   12896 	return 0;
   12897 }
   12898 
   12899 static void
   12900 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   12901 {
   12902 	uint32_t reg;
   12903 
   12904 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12905 		device_xname(sc->sc_dev), __func__));
   12906 
   12907 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12908 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12909 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   12910 }
   12911 
   12912 /*
   12913  * Management mode and power management related subroutines.
   12914  * BMC, AMT, suspend/resume and EEE.
   12915  */
   12916 
   12917 #ifdef WM_WOL
   12918 static int
   12919 wm_check_mng_mode(struct wm_softc *sc)
   12920 {
   12921 	int rv;
   12922 
   12923 	switch (sc->sc_type) {
   12924 	case WM_T_ICH8:
   12925 	case WM_T_ICH9:
   12926 	case WM_T_ICH10:
   12927 	case WM_T_PCH:
   12928 	case WM_T_PCH2:
   12929 	case WM_T_PCH_LPT:
   12930 	case WM_T_PCH_SPT:
   12931 		rv = wm_check_mng_mode_ich8lan(sc);
   12932 		break;
   12933 	case WM_T_82574:
   12934 	case WM_T_82583:
   12935 		rv = wm_check_mng_mode_82574(sc);
   12936 		break;
   12937 	case WM_T_82571:
   12938 	case WM_T_82572:
   12939 	case WM_T_82573:
   12940 	case WM_T_80003:
   12941 		rv = wm_check_mng_mode_generic(sc);
   12942 		break;
   12943 	default:
   12944 		/* noting to do */
   12945 		rv = 0;
   12946 		break;
   12947 	}
   12948 
   12949 	return rv;
   12950 }
   12951 
   12952 static int
   12953 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   12954 {
   12955 	uint32_t fwsm;
   12956 
   12957 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12958 
   12959 	if (((fwsm & FWSM_FW_VALID) != 0)
   12960 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   12961 		return 1;
   12962 
   12963 	return 0;
   12964 }
   12965 
   12966 static int
   12967 wm_check_mng_mode_82574(struct wm_softc *sc)
   12968 {
   12969 	uint16_t data;
   12970 
   12971 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   12972 
   12973 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   12974 		return 1;
   12975 
   12976 	return 0;
   12977 }
   12978 
   12979 static int
   12980 wm_check_mng_mode_generic(struct wm_softc *sc)
   12981 {
   12982 	uint32_t fwsm;
   12983 
   12984 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12985 
   12986 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   12987 		return 1;
   12988 
   12989 	return 0;
   12990 }
   12991 #endif /* WM_WOL */
   12992 
   12993 static int
   12994 wm_enable_mng_pass_thru(struct wm_softc *sc)
   12995 {
   12996 	uint32_t manc, fwsm, factps;
   12997 
   12998 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   12999 		return 0;
   13000 
   13001 	manc = CSR_READ(sc, WMREG_MANC);
   13002 
   13003 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   13004 		device_xname(sc->sc_dev), manc));
   13005 	if ((manc & MANC_RECV_TCO_EN) == 0)
   13006 		return 0;
   13007 
   13008 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   13009 		fwsm = CSR_READ(sc, WMREG_FWSM);
   13010 		factps = CSR_READ(sc, WMREG_FACTPS);
   13011 		if (((factps & FACTPS_MNGCG) == 0)
   13012 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   13013 			return 1;
   13014 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   13015 		uint16_t data;
   13016 
   13017 		factps = CSR_READ(sc, WMREG_FACTPS);
   13018 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   13019 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   13020 			device_xname(sc->sc_dev), factps, data));
   13021 		if (((factps & FACTPS_MNGCG) == 0)
   13022 		    && ((data & NVM_CFG2_MNGM_MASK)
   13023 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   13024 			return 1;
   13025 	} else if (((manc & MANC_SMBUS_EN) != 0)
   13026 	    && ((manc & MANC_ASF_EN) == 0))
   13027 		return 1;
   13028 
   13029 	return 0;
   13030 }
   13031 
   13032 static bool
   13033 wm_phy_resetisblocked(struct wm_softc *sc)
   13034 {
   13035 	bool blocked = false;
   13036 	uint32_t reg;
   13037 	int i = 0;
   13038 
   13039 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13040 		device_xname(sc->sc_dev), __func__));
   13041 
   13042 	switch (sc->sc_type) {
   13043 	case WM_T_ICH8:
   13044 	case WM_T_ICH9:
   13045 	case WM_T_ICH10:
   13046 	case WM_T_PCH:
   13047 	case WM_T_PCH2:
   13048 	case WM_T_PCH_LPT:
   13049 	case WM_T_PCH_SPT:
   13050 		do {
   13051 			reg = CSR_READ(sc, WMREG_FWSM);
   13052 			if ((reg & FWSM_RSPCIPHY) == 0) {
   13053 				blocked = true;
   13054 				delay(10*1000);
   13055 				continue;
   13056 			}
   13057 			blocked = false;
   13058 		} while (blocked && (i++ < 30));
   13059 		return blocked;
   13060 		break;
   13061 	case WM_T_82571:
   13062 	case WM_T_82572:
   13063 	case WM_T_82573:
   13064 	case WM_T_82574:
   13065 	case WM_T_82583:
   13066 	case WM_T_80003:
   13067 		reg = CSR_READ(sc, WMREG_MANC);
   13068 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   13069 			return true;
   13070 		else
   13071 			return false;
   13072 		break;
   13073 	default:
   13074 		/* no problem */
   13075 		break;
   13076 	}
   13077 
   13078 	return false;
   13079 }
   13080 
   13081 static void
   13082 wm_get_hw_control(struct wm_softc *sc)
   13083 {
   13084 	uint32_t reg;
   13085 
   13086 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13087 		device_xname(sc->sc_dev), __func__));
   13088 
   13089 	if (sc->sc_type == WM_T_82573) {
   13090 		reg = CSR_READ(sc, WMREG_SWSM);
   13091 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   13092 	} else if (sc->sc_type >= WM_T_82571) {
   13093 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13094 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   13095 	}
   13096 }
   13097 
   13098 static void
   13099 wm_release_hw_control(struct wm_softc *sc)
   13100 {
   13101 	uint32_t reg;
   13102 
   13103 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13104 		device_xname(sc->sc_dev), __func__));
   13105 
   13106 	if (sc->sc_type == WM_T_82573) {
   13107 		reg = CSR_READ(sc, WMREG_SWSM);
   13108 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   13109 	} else if (sc->sc_type >= WM_T_82571) {
   13110 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13111 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   13112 	}
   13113 }
   13114 
   13115 static void
   13116 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   13117 {
   13118 	uint32_t reg;
   13119 
   13120 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13121 		device_xname(sc->sc_dev), __func__));
   13122 
   13123 	if (sc->sc_type < WM_T_PCH2)
   13124 		return;
   13125 
   13126 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13127 
   13128 	if (gate)
   13129 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   13130 	else
   13131 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   13132 
   13133 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   13134 }
   13135 
   13136 static void
   13137 wm_smbustopci(struct wm_softc *sc)
   13138 {
   13139 	uint32_t fwsm, reg;
   13140 	int rv = 0;
   13141 
   13142 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13143 		device_xname(sc->sc_dev), __func__));
   13144 
   13145 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   13146 	wm_gate_hw_phy_config_ich8lan(sc, true);
   13147 
   13148 	/* Disable ULP */
   13149 	wm_ulp_disable(sc);
   13150 
   13151 	/* Acquire PHY semaphore */
   13152 	sc->phy.acquire(sc);
   13153 
   13154 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13155 	switch (sc->sc_type) {
   13156 	case WM_T_PCH_LPT:
   13157 	case WM_T_PCH_SPT:
   13158 		if (wm_phy_is_accessible_pchlan(sc))
   13159 			break;
   13160 
   13161 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13162 		reg |= CTRL_EXT_FORCE_SMBUS;
   13163 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13164 #if 0
   13165 		/* XXX Isn't this required??? */
   13166 		CSR_WRITE_FLUSH(sc);
   13167 #endif
   13168 		delay(50 * 1000);
   13169 		/* FALLTHROUGH */
   13170 	case WM_T_PCH2:
   13171 		if (wm_phy_is_accessible_pchlan(sc) == true)
   13172 			break;
   13173 		/* FALLTHROUGH */
   13174 	case WM_T_PCH:
   13175 		if (sc->sc_type == WM_T_PCH)
   13176 			if ((fwsm & FWSM_FW_VALID) != 0)
   13177 				break;
   13178 
   13179 		if (wm_phy_resetisblocked(sc) == true) {
   13180 			printf("XXX reset is blocked(3)\n");
   13181 			break;
   13182 		}
   13183 
   13184 		wm_toggle_lanphypc_pch_lpt(sc);
   13185 
   13186 		if (sc->sc_type >= WM_T_PCH_LPT) {
   13187 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13188 				break;
   13189 
   13190 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13191 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   13192 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13193 
   13194 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13195 				break;
   13196 			rv = -1;
   13197 		}
   13198 		break;
   13199 	default:
   13200 		break;
   13201 	}
   13202 
   13203 	/* Release semaphore */
   13204 	sc->phy.release(sc);
   13205 
   13206 	if (rv == 0) {
   13207 		if (wm_phy_resetisblocked(sc)) {
   13208 			printf("XXX reset is blocked(4)\n");
   13209 			goto out;
   13210 		}
   13211 		wm_reset_phy(sc);
   13212 		if (wm_phy_resetisblocked(sc))
   13213 			printf("XXX reset is blocked(4)\n");
   13214 	}
   13215 
   13216 out:
   13217 	/*
   13218 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   13219 	 */
   13220 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   13221 		delay(10*1000);
   13222 		wm_gate_hw_phy_config_ich8lan(sc, false);
   13223 	}
   13224 }
   13225 
   13226 static void
   13227 wm_init_manageability(struct wm_softc *sc)
   13228 {
   13229 
   13230 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13231 		device_xname(sc->sc_dev), __func__));
   13232 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13233 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   13234 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13235 
   13236 		/* Disable hardware interception of ARP */
   13237 		manc &= ~MANC_ARP_EN;
   13238 
   13239 		/* Enable receiving management packets to the host */
   13240 		if (sc->sc_type >= WM_T_82571) {
   13241 			manc |= MANC_EN_MNG2HOST;
   13242 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   13243 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   13244 		}
   13245 
   13246 		CSR_WRITE(sc, WMREG_MANC, manc);
   13247 	}
   13248 }
   13249 
   13250 static void
   13251 wm_release_manageability(struct wm_softc *sc)
   13252 {
   13253 
   13254 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13255 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13256 
   13257 		manc |= MANC_ARP_EN;
   13258 		if (sc->sc_type >= WM_T_82571)
   13259 			manc &= ~MANC_EN_MNG2HOST;
   13260 
   13261 		CSR_WRITE(sc, WMREG_MANC, manc);
   13262 	}
   13263 }
   13264 
   13265 static void
   13266 wm_get_wakeup(struct wm_softc *sc)
   13267 {
   13268 
   13269 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   13270 	switch (sc->sc_type) {
   13271 	case WM_T_82573:
   13272 	case WM_T_82583:
   13273 		sc->sc_flags |= WM_F_HAS_AMT;
   13274 		/* FALLTHROUGH */
   13275 	case WM_T_80003:
   13276 	case WM_T_82575:
   13277 	case WM_T_82576:
   13278 	case WM_T_82580:
   13279 	case WM_T_I350:
   13280 	case WM_T_I354:
   13281 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   13282 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   13283 		/* FALLTHROUGH */
   13284 	case WM_T_82541:
   13285 	case WM_T_82541_2:
   13286 	case WM_T_82547:
   13287 	case WM_T_82547_2:
   13288 	case WM_T_82571:
   13289 	case WM_T_82572:
   13290 	case WM_T_82574:
   13291 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13292 		break;
   13293 	case WM_T_ICH8:
   13294 	case WM_T_ICH9:
   13295 	case WM_T_ICH10:
   13296 	case WM_T_PCH:
   13297 	case WM_T_PCH2:
   13298 	case WM_T_PCH_LPT:
   13299 	case WM_T_PCH_SPT:
   13300 		sc->sc_flags |= WM_F_HAS_AMT;
   13301 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13302 		break;
   13303 	default:
   13304 		break;
   13305 	}
   13306 
   13307 	/* 1: HAS_MANAGE */
   13308 	if (wm_enable_mng_pass_thru(sc) != 0)
   13309 		sc->sc_flags |= WM_F_HAS_MANAGE;
   13310 
   13311 	/*
   13312 	 * Note that the WOL flags is set after the resetting of the eeprom
   13313 	 * stuff
   13314 	 */
   13315 }
   13316 
   13317 /*
   13318  * Unconfigure Ultra Low Power mode.
   13319  * Only for I217 and newer (see below).
   13320  */
   13321 static void
   13322 wm_ulp_disable(struct wm_softc *sc)
   13323 {
   13324 	uint32_t reg;
   13325 	int i = 0;
   13326 
   13327 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13328 		device_xname(sc->sc_dev), __func__));
   13329 	/* Exclude old devices */
   13330 	if ((sc->sc_type < WM_T_PCH_LPT)
   13331 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   13332 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   13333 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   13334 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   13335 		return;
   13336 
   13337 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   13338 		/* Request ME un-configure ULP mode in the PHY */
   13339 		reg = CSR_READ(sc, WMREG_H2ME);
   13340 		reg &= ~H2ME_ULP;
   13341 		reg |= H2ME_ENFORCE_SETTINGS;
   13342 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13343 
   13344 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   13345 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   13346 			if (i++ == 30) {
   13347 				printf("%s timed out\n", __func__);
   13348 				return;
   13349 			}
   13350 			delay(10 * 1000);
   13351 		}
   13352 		reg = CSR_READ(sc, WMREG_H2ME);
   13353 		reg &= ~H2ME_ENFORCE_SETTINGS;
   13354 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13355 
   13356 		return;
   13357 	}
   13358 
   13359 	/* Acquire semaphore */
   13360 	sc->phy.acquire(sc);
   13361 
   13362 	/* Toggle LANPHYPC */
   13363 	wm_toggle_lanphypc_pch_lpt(sc);
   13364 
   13365 	/* Unforce SMBus mode in PHY */
   13366 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13367 	if (reg == 0x0000 || reg == 0xffff) {
   13368 		uint32_t reg2;
   13369 
   13370 		printf("%s: Force SMBus first.\n", __func__);
   13371 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   13372 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   13373 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   13374 		delay(50 * 1000);
   13375 
   13376 		reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13377 	}
   13378 	reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13379 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg);
   13380 
   13381 	/* Unforce SMBus mode in MAC */
   13382 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13383 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   13384 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13385 
   13386 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL);
   13387 	reg |= HV_PM_CTRL_K1_ENA;
   13388 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg);
   13389 
   13390 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1);
   13391 	reg &= ~(I218_ULP_CONFIG1_IND
   13392 	    | I218_ULP_CONFIG1_STICKY_ULP
   13393 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   13394 	    | I218_ULP_CONFIG1_WOL_HOST
   13395 	    | I218_ULP_CONFIG1_INBAND_EXIT
   13396 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   13397 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   13398 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   13399 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13400 	reg |= I218_ULP_CONFIG1_START;
   13401 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13402 
   13403 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   13404 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   13405 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   13406 
   13407 	/* Release semaphore */
   13408 	sc->phy.release(sc);
   13409 	wm_gmii_reset(sc);
   13410 	delay(50 * 1000);
   13411 }
   13412 
   13413 /* WOL in the newer chipset interfaces (pchlan) */
   13414 static void
   13415 wm_enable_phy_wakeup(struct wm_softc *sc)
   13416 {
   13417 #if 0
   13418 	uint16_t preg;
   13419 
   13420 	/* Copy MAC RARs to PHY RARs */
   13421 
   13422 	/* Copy MAC MTA to PHY MTA */
   13423 
   13424 	/* Configure PHY Rx Control register */
   13425 
   13426 	/* Enable PHY wakeup in MAC register */
   13427 
   13428 	/* Configure and enable PHY wakeup in PHY registers */
   13429 
   13430 	/* Activate PHY wakeup */
   13431 
   13432 	/* XXX */
   13433 #endif
   13434 }
   13435 
   13436 /* Power down workaround on D3 */
   13437 static void
   13438 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   13439 {
   13440 	uint32_t reg;
   13441 	int i;
   13442 
   13443 	for (i = 0; i < 2; i++) {
   13444 		/* Disable link */
   13445 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13446 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13447 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13448 
   13449 		/*
   13450 		 * Call gig speed drop workaround on Gig disable before
   13451 		 * accessing any PHY registers
   13452 		 */
   13453 		if (sc->sc_type == WM_T_ICH8)
   13454 			wm_gig_downshift_workaround_ich8lan(sc);
   13455 
   13456 		/* Write VR power-down enable */
   13457 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13458 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13459 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   13460 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   13461 
   13462 		/* Read it back and test */
   13463 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13464 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13465 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   13466 			break;
   13467 
   13468 		/* Issue PHY reset and repeat at most one more time */
   13469 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   13470 	}
   13471 }
   13472 
   13473 static void
   13474 wm_enable_wakeup(struct wm_softc *sc)
   13475 {
   13476 	uint32_t reg, pmreg;
   13477 	pcireg_t pmode;
   13478 
   13479 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13480 		device_xname(sc->sc_dev), __func__));
   13481 
   13482 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   13483 		&pmreg, NULL) == 0)
   13484 		return;
   13485 
   13486 	/* Advertise the wakeup capability */
   13487 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   13488 	    | CTRL_SWDPIN(3));
   13489 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   13490 
   13491 	/* ICH workaround */
   13492 	switch (sc->sc_type) {
   13493 	case WM_T_ICH8:
   13494 	case WM_T_ICH9:
   13495 	case WM_T_ICH10:
   13496 	case WM_T_PCH:
   13497 	case WM_T_PCH2:
   13498 	case WM_T_PCH_LPT:
   13499 	case WM_T_PCH_SPT:
   13500 		/* Disable gig during WOL */
   13501 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13502 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   13503 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13504 		if (sc->sc_type == WM_T_PCH)
   13505 			wm_gmii_reset(sc);
   13506 
   13507 		/* Power down workaround */
   13508 		if (sc->sc_phytype == WMPHY_82577) {
   13509 			struct mii_softc *child;
   13510 
   13511 			/* Assume that the PHY is copper */
   13512 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13513 			if ((child != NULL) && (child->mii_mpd_rev <= 2))
   13514 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   13515 				    (768 << 5) | 25, 0x0444); /* magic num */
   13516 		}
   13517 		break;
   13518 	default:
   13519 		break;
   13520 	}
   13521 
   13522 	/* Keep the laser running on fiber adapters */
   13523 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   13524 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   13525 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13526 		reg |= CTRL_EXT_SWDPIN(3);
   13527 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13528 	}
   13529 
   13530 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   13531 #if 0	/* for the multicast packet */
   13532 	reg |= WUFC_MC;
   13533 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   13534 #endif
   13535 
   13536 	if (sc->sc_type >= WM_T_PCH)
   13537 		wm_enable_phy_wakeup(sc);
   13538 	else {
   13539 		CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
   13540 		CSR_WRITE(sc, WMREG_WUFC, reg);
   13541 	}
   13542 
   13543 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13544 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13545 		|| (sc->sc_type == WM_T_PCH2))
   13546 		    && (sc->sc_phytype == WMPHY_IGP_3))
   13547 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   13548 
   13549 	/* Request PME */
   13550 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   13551 #if 0
   13552 	/* Disable WOL */
   13553 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   13554 #else
   13555 	/* For WOL */
   13556 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   13557 #endif
   13558 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   13559 }
   13560 
   13561 /* LPLU */
   13562 
   13563 static void
   13564 wm_lplu_d0_disable(struct wm_softc *sc)
   13565 {
   13566 	struct mii_data *mii = &sc->sc_mii;
   13567 	uint32_t reg;
   13568 
   13569 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13570 		device_xname(sc->sc_dev), __func__));
   13571 
   13572 	if (sc->sc_phytype == WMPHY_IFE)
   13573 		return;
   13574 
   13575 	switch (sc->sc_type) {
   13576 	case WM_T_82571:
   13577 	case WM_T_82572:
   13578 	case WM_T_82573:
   13579 	case WM_T_82575:
   13580 	case WM_T_82576:
   13581 		reg = mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT);
   13582 		reg &= ~PMR_D0_LPLU;
   13583 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, reg);
   13584 		break;
   13585 	case WM_T_82580:
   13586 	case WM_T_I350:
   13587 	case WM_T_I210:
   13588 	case WM_T_I211:
   13589 		reg = CSR_READ(sc, WMREG_PHPM);
   13590 		reg &= ~PHPM_D0A_LPLU;
   13591 		CSR_WRITE(sc, WMREG_PHPM, reg);
   13592 		break;
   13593 	case WM_T_82574:
   13594 	case WM_T_82583:
   13595 	case WM_T_ICH8:
   13596 	case WM_T_ICH9:
   13597 	case WM_T_ICH10:
   13598 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13599 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   13600 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13601 		CSR_WRITE_FLUSH(sc);
   13602 		break;
   13603 	case WM_T_PCH:
   13604 	case WM_T_PCH2:
   13605 	case WM_T_PCH_LPT:
   13606 	case WM_T_PCH_SPT:
   13607 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   13608 		reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   13609 		if (wm_phy_resetisblocked(sc) == false)
   13610 			reg |= HV_OEM_BITS_ANEGNOW;
   13611 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   13612 		break;
   13613 	default:
   13614 		break;
   13615 	}
   13616 }
   13617 
   13618 /* EEE */
   13619 
   13620 static void
   13621 wm_set_eee_i350(struct wm_softc *sc)
   13622 {
   13623 	uint32_t ipcnfg, eeer;
   13624 
   13625 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   13626 	eeer = CSR_READ(sc, WMREG_EEER);
   13627 
   13628 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   13629 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   13630 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   13631 		    | EEER_LPI_FC);
   13632 	} else {
   13633 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   13634 		ipcnfg &= ~IPCNFG_10BASE_TE;
   13635 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   13636 		    | EEER_LPI_FC);
   13637 	}
   13638 
   13639 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   13640 	CSR_WRITE(sc, WMREG_EEER, eeer);
   13641 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   13642 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   13643 }
   13644 
   13645 /*
   13646  * Workarounds (mainly PHY related).
   13647  * Basically, PHY's workarounds are in the PHY drivers.
   13648  */
   13649 
   13650 /* Work-around for 82566 Kumeran PCS lock loss */
   13651 static void
   13652 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   13653 {
   13654 	struct mii_data *mii = &sc->sc_mii;
   13655 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   13656 	int i;
   13657 	int reg;
   13658 
   13659 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13660 		device_xname(sc->sc_dev), __func__));
   13661 
   13662 	/* If the link is not up, do nothing */
   13663 	if ((status & STATUS_LU) == 0)
   13664 		return;
   13665 
   13666 	/* Nothing to do if the link is other than 1Gbps */
   13667 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   13668 		return;
   13669 
   13670 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13671 	for (i = 0; i < 10; i++) {
   13672 		/* read twice */
   13673 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   13674 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   13675 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   13676 			goto out;	/* GOOD! */
   13677 
   13678 		/* Reset the PHY */
   13679 		wm_reset_phy(sc);
   13680 		delay(5*1000);
   13681 	}
   13682 
   13683 	/* Disable GigE link negotiation */
   13684 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13685 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13686 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13687 
   13688 	/*
   13689 	 * Call gig speed drop workaround on Gig disable before accessing
   13690 	 * any PHY registers.
   13691 	 */
   13692 	wm_gig_downshift_workaround_ich8lan(sc);
   13693 
   13694 out:
   13695 	return;
   13696 }
   13697 
   13698 /* WOL from S5 stops working */
   13699 static void
   13700 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   13701 {
   13702 	uint16_t kmrn_reg;
   13703 
   13704 	/* Only for igp3 */
   13705 	if (sc->sc_phytype == WMPHY_IGP_3) {
   13706 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
   13707 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
   13708 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   13709 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
   13710 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   13711 	}
   13712 }
   13713 
   13714 /*
   13715  * Workaround for pch's PHYs
   13716  * XXX should be moved to new PHY driver?
   13717  */
   13718 static void
   13719 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   13720 {
   13721 
   13722 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13723 		device_xname(sc->sc_dev), __func__));
   13724 	KASSERT(sc->sc_type == WM_T_PCH);
   13725 
   13726 	if (sc->sc_phytype == WMPHY_82577)
   13727 		wm_set_mdio_slow_mode_hv(sc);
   13728 
   13729 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   13730 
   13731 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   13732 
   13733 	/* 82578 */
   13734 	if (sc->sc_phytype == WMPHY_82578) {
   13735 		struct mii_softc *child;
   13736 
   13737 		/*
   13738 		 * Return registers to default by doing a soft reset then
   13739 		 * writing 0x3140 to the control register
   13740 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   13741 		 */
   13742 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13743 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   13744 			PHY_RESET(child);
   13745 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   13746 			    0x3140);
   13747 		}
   13748 	}
   13749 
   13750 	/* Select page 0 */
   13751 	sc->phy.acquire(sc);
   13752 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   13753 	sc->phy.release(sc);
   13754 
   13755 	/*
   13756 	 * Configure the K1 Si workaround during phy reset assuming there is
   13757 	 * link so that it disables K1 if link is in 1Gbps.
   13758 	 */
   13759 	wm_k1_gig_workaround_hv(sc, 1);
   13760 }
   13761 
   13762 static void
   13763 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   13764 {
   13765 
   13766 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13767 		device_xname(sc->sc_dev), __func__));
   13768 	KASSERT(sc->sc_type == WM_T_PCH2);
   13769 
   13770 	wm_set_mdio_slow_mode_hv(sc);
   13771 }
   13772 
   13773 static int
   13774 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   13775 {
   13776 	int k1_enable = sc->sc_nvm_k1_enabled;
   13777 
   13778 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13779 		device_xname(sc->sc_dev), __func__));
   13780 
   13781 	if (sc->phy.acquire(sc) != 0)
   13782 		return -1;
   13783 
   13784 	if (link) {
   13785 		k1_enable = 0;
   13786 
   13787 		/* Link stall fix for link up */
   13788 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   13789 	} else {
   13790 		/* Link stall fix for link down */
   13791 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   13792 	}
   13793 
   13794 	wm_configure_k1_ich8lan(sc, k1_enable);
   13795 	sc->phy.release(sc);
   13796 
   13797 	return 0;
   13798 }
   13799 
   13800 static void
   13801 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   13802 {
   13803 	uint32_t reg;
   13804 
   13805 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   13806 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   13807 	    reg | HV_KMRN_MDIO_SLOW);
   13808 }
   13809 
   13810 static void
   13811 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   13812 {
   13813 	uint32_t ctrl, ctrl_ext, tmp;
   13814 	uint16_t kmrn_reg;
   13815 
   13816 	kmrn_reg = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
   13817 
   13818 	if (k1_enable)
   13819 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
   13820 	else
   13821 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
   13822 
   13823 	wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
   13824 
   13825 	delay(20);
   13826 
   13827 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13828 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   13829 
   13830 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   13831 	tmp |= CTRL_FRCSPD;
   13832 
   13833 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   13834 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   13835 	CSR_WRITE_FLUSH(sc);
   13836 	delay(20);
   13837 
   13838 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   13839 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13840 	CSR_WRITE_FLUSH(sc);
   13841 	delay(20);
   13842 }
   13843 
   13844 /* special case - for 82575 - need to do manual init ... */
   13845 static void
   13846 wm_reset_init_script_82575(struct wm_softc *sc)
   13847 {
   13848 	/*
   13849 	 * remark: this is untested code - we have no board without EEPROM
   13850 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   13851 	 */
   13852 
   13853 	/* SerDes configuration via SERDESCTRL */
   13854 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   13855 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   13856 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   13857 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   13858 
   13859 	/* CCM configuration via CCMCTL register */
   13860 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   13861 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   13862 
   13863 	/* PCIe lanes configuration */
   13864 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   13865 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   13866 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   13867 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   13868 
   13869 	/* PCIe PLL Configuration */
   13870 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   13871 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   13872 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   13873 }
   13874 
   13875 static void
   13876 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   13877 {
   13878 	uint32_t reg;
   13879 	uint16_t nvmword;
   13880 	int rv;
   13881 
   13882 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   13883 		return;
   13884 
   13885 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   13886 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   13887 	if (rv != 0) {
   13888 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   13889 		    __func__);
   13890 		return;
   13891 	}
   13892 
   13893 	reg = CSR_READ(sc, WMREG_MDICNFG);
   13894 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   13895 		reg |= MDICNFG_DEST;
   13896 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   13897 		reg |= MDICNFG_COM_MDIO;
   13898 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   13899 }
   13900 
   13901 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   13902 
   13903 static bool
   13904 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   13905 {
   13906 	int i;
   13907 	uint32_t reg;
   13908 	uint16_t id1, id2;
   13909 
   13910 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13911 		device_xname(sc->sc_dev), __func__));
   13912 	id1 = id2 = 0xffff;
   13913 	for (i = 0; i < 2; i++) {
   13914 		id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1);
   13915 		if (MII_INVALIDID(id1))
   13916 			continue;
   13917 		id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2);
   13918 		if (MII_INVALIDID(id2))
   13919 			continue;
   13920 		break;
   13921 	}
   13922 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) {
   13923 		goto out;
   13924 	}
   13925 
   13926 	if (sc->sc_type < WM_T_PCH_LPT) {
   13927 		sc->phy.release(sc);
   13928 		wm_set_mdio_slow_mode_hv(sc);
   13929 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
   13930 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
   13931 		sc->phy.acquire(sc);
   13932 	}
   13933 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   13934 		printf("XXX return with false\n");
   13935 		return false;
   13936 	}
   13937 out:
   13938 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   13939 		/* Only unforce SMBus if ME is not active */
   13940 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   13941 			/* Unforce SMBus mode in PHY */
   13942 			reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   13943 			    CV_SMB_CTRL);
   13944 			reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13945 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   13946 			    CV_SMB_CTRL, reg);
   13947 
   13948 			/* Unforce SMBus mode in MAC */
   13949 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13950 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   13951 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13952 		}
   13953 	}
   13954 	return true;
   13955 }
   13956 
   13957 static void
   13958 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   13959 {
   13960 	uint32_t reg;
   13961 	int i;
   13962 
   13963 	/* Set PHY Config Counter to 50msec */
   13964 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   13965 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   13966 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   13967 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   13968 
   13969 	/* Toggle LANPHYPC */
   13970 	reg = CSR_READ(sc, WMREG_CTRL);
   13971 	reg |= CTRL_LANPHYPC_OVERRIDE;
   13972 	reg &= ~CTRL_LANPHYPC_VALUE;
   13973 	CSR_WRITE(sc, WMREG_CTRL, reg);
   13974 	CSR_WRITE_FLUSH(sc);
   13975 	delay(1000);
   13976 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   13977 	CSR_WRITE(sc, WMREG_CTRL, reg);
   13978 	CSR_WRITE_FLUSH(sc);
   13979 
   13980 	if (sc->sc_type < WM_T_PCH_LPT)
   13981 		delay(50 * 1000);
   13982 	else {
   13983 		i = 20;
   13984 
   13985 		do {
   13986 			delay(5 * 1000);
   13987 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   13988 		    && i--);
   13989 
   13990 		delay(30 * 1000);
   13991 	}
   13992 }
   13993 
   13994 static int
   13995 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   13996 {
   13997 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   13998 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   13999 	uint32_t rxa;
   14000 	uint16_t scale = 0, lat_enc = 0;
   14001 	int32_t obff_hwm = 0;
   14002 	int64_t lat_ns, value;
   14003 
   14004 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14005 		device_xname(sc->sc_dev), __func__));
   14006 
   14007 	if (link) {
   14008 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   14009 		uint32_t status;
   14010 		uint16_t speed;
   14011 		pcireg_t preg;
   14012 
   14013 		status = CSR_READ(sc, WMREG_STATUS);
   14014 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   14015 		case STATUS_SPEED_10:
   14016 			speed = 10;
   14017 			break;
   14018 		case STATUS_SPEED_100:
   14019 			speed = 100;
   14020 			break;
   14021 		case STATUS_SPEED_1000:
   14022 			speed = 1000;
   14023 			break;
   14024 		default:
   14025 			device_printf(sc->sc_dev, "Unknown speed "
   14026 			    "(status = %08x)\n", status);
   14027 			return -1;
   14028 		}
   14029 
   14030 		/* Rx Packet Buffer Allocation size (KB) */
   14031 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   14032 
   14033 		/*
   14034 		 * Determine the maximum latency tolerated by the device.
   14035 		 *
   14036 		 * Per the PCIe spec, the tolerated latencies are encoded as
   14037 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   14038 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   14039 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   14040 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   14041 		 */
   14042 		lat_ns = ((int64_t)rxa * 1024 -
   14043 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   14044 			+ ETHER_HDR_LEN))) * 8 * 1000;
   14045 		if (lat_ns < 0)
   14046 			lat_ns = 0;
   14047 		else
   14048 			lat_ns /= speed;
   14049 		value = lat_ns;
   14050 
   14051 		while (value > LTRV_VALUE) {
   14052 			scale ++;
   14053 			value = howmany(value, __BIT(5));
   14054 		}
   14055 		if (scale > LTRV_SCALE_MAX) {
   14056 			printf("%s: Invalid LTR latency scale %d\n",
   14057 			    device_xname(sc->sc_dev), scale);
   14058 			return -1;
   14059 		}
   14060 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   14061 
   14062 		/* Determine the maximum latency tolerated by the platform */
   14063 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14064 		    WM_PCI_LTR_CAP_LPT);
   14065 		max_snoop = preg & 0xffff;
   14066 		max_nosnoop = preg >> 16;
   14067 
   14068 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   14069 
   14070 		if (lat_enc > max_ltr_enc) {
   14071 			lat_enc = max_ltr_enc;
   14072 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   14073 			    * PCI_LTR_SCALETONS(
   14074 				    __SHIFTOUT(lat_enc,
   14075 					PCI_LTR_MAXSNOOPLAT_SCALE));
   14076 		}
   14077 
   14078 		if (lat_ns) {
   14079 			lat_ns *= speed * 1000;
   14080 			lat_ns /= 8;
   14081 			lat_ns /= 1000000000;
   14082 			obff_hwm = (int32_t)(rxa - lat_ns);
   14083 		}
   14084 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   14085 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   14086 			    "(rxa = %d, lat_ns = %d)\n",
   14087 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   14088 			return -1;
   14089 		}
   14090 	}
   14091 	/* Snoop and No-Snoop latencies the same */
   14092 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   14093 	CSR_WRITE(sc, WMREG_LTRV, reg);
   14094 
   14095 	/* Set OBFF high water mark */
   14096 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   14097 	reg |= obff_hwm;
   14098 	CSR_WRITE(sc, WMREG_SVT, reg);
   14099 
   14100 	/* Enable OBFF */
   14101 	reg = CSR_READ(sc, WMREG_SVCR);
   14102 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   14103 	CSR_WRITE(sc, WMREG_SVCR, reg);
   14104 
   14105 	return 0;
   14106 }
   14107 
   14108 /*
   14109  * I210 Errata 25 and I211 Errata 10
   14110  * Slow System Clock.
   14111  */
   14112 static void
   14113 wm_pll_workaround_i210(struct wm_softc *sc)
   14114 {
   14115 	uint32_t mdicnfg, wuc;
   14116 	uint32_t reg;
   14117 	pcireg_t pcireg;
   14118 	uint32_t pmreg;
   14119 	uint16_t nvmword, tmp_nvmword;
   14120 	int phyval;
   14121 	bool wa_done = false;
   14122 	int i;
   14123 
   14124 	/* Save WUC and MDICNFG registers */
   14125 	wuc = CSR_READ(sc, WMREG_WUC);
   14126 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   14127 
   14128 	reg = mdicnfg & ~MDICNFG_DEST;
   14129 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   14130 
   14131 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   14132 		nvmword = INVM_DEFAULT_AL;
   14133 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   14134 
   14135 	/* Get Power Management cap offset */
   14136 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   14137 		&pmreg, NULL) == 0)
   14138 		return;
   14139 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   14140 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   14141 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   14142 
   14143 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   14144 			break; /* OK */
   14145 		}
   14146 
   14147 		wa_done = true;
   14148 		/* Directly reset the internal PHY */
   14149 		reg = CSR_READ(sc, WMREG_CTRL);
   14150 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   14151 
   14152 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14153 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   14154 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14155 
   14156 		CSR_WRITE(sc, WMREG_WUC, 0);
   14157 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   14158 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   14159 
   14160 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14161 		    pmreg + PCI_PMCSR);
   14162 		pcireg |= PCI_PMCSR_STATE_D3;
   14163 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14164 		    pmreg + PCI_PMCSR, pcireg);
   14165 		delay(1000);
   14166 		pcireg &= ~PCI_PMCSR_STATE_D3;
   14167 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14168 		    pmreg + PCI_PMCSR, pcireg);
   14169 
   14170 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   14171 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   14172 
   14173 		/* Restore WUC register */
   14174 		CSR_WRITE(sc, WMREG_WUC, wuc);
   14175 	}
   14176 
   14177 	/* Restore MDICNFG setting */
   14178 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   14179 	if (wa_done)
   14180 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   14181 }
   14182 
   14183 static void
   14184 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   14185 {
   14186 	uint32_t reg;
   14187 
   14188 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14189 		device_xname(sc->sc_dev), __func__));
   14190 	KASSERT(sc->sc_type == WM_T_PCH_SPT);
   14191 
   14192 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   14193 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   14194 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   14195 
   14196 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   14197 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   14198 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   14199 }
   14200