Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.514
      1 /*	$NetBSD: if_wm.c,v 1.514 2017/06/26 04:09:02 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- Disable D0 LPLU on 8257[12356], 82580 and I350.
     77  *	- TX Multi queue improvement (refine queue selection logic)
     78  *	- Split header buffer for newer descriptors
     79  *	- EEE (Energy Efficiency Ethernet)
     80  *	- Virtual Function
     81  *	- Set LED correctly (based on contents in EEPROM)
     82  *	- Rework how parameters are loaded from the EEPROM.
     83  *	- Image Unique ID
     84  */
     85 
     86 #include <sys/cdefs.h>
     87 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.514 2017/06/26 04:09:02 msaitoh Exp $");
     88 
     89 #ifdef _KERNEL_OPT
     90 #include "opt_net_mpsafe.h"
     91 #include "opt_if_wm.h"
     92 #endif
     93 
     94 #include <sys/param.h>
     95 #include <sys/systm.h>
     96 #include <sys/callout.h>
     97 #include <sys/mbuf.h>
     98 #include <sys/malloc.h>
     99 #include <sys/kmem.h>
    100 #include <sys/kernel.h>
    101 #include <sys/socket.h>
    102 #include <sys/ioctl.h>
    103 #include <sys/errno.h>
    104 #include <sys/device.h>
    105 #include <sys/queue.h>
    106 #include <sys/syslog.h>
    107 #include <sys/interrupt.h>
    108 #include <sys/cpu.h>
    109 #include <sys/pcq.h>
    110 
    111 #include <sys/rndsource.h>
    112 
    113 #include <net/if.h>
    114 #include <net/if_dl.h>
    115 #include <net/if_media.h>
    116 #include <net/if_ether.h>
    117 
    118 #include <net/bpf.h>
    119 
    120 #include <netinet/in.h>			/* XXX for struct ip */
    121 #include <netinet/in_systm.h>		/* XXX for struct ip */
    122 #include <netinet/ip.h>			/* XXX for struct ip */
    123 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    124 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    125 
    126 #include <sys/bus.h>
    127 #include <sys/intr.h>
    128 #include <machine/endian.h>
    129 
    130 #include <dev/mii/mii.h>
    131 #include <dev/mii/miivar.h>
    132 #include <dev/mii/miidevs.h>
    133 #include <dev/mii/mii_bitbang.h>
    134 #include <dev/mii/ikphyreg.h>
    135 #include <dev/mii/igphyreg.h>
    136 #include <dev/mii/igphyvar.h>
    137 #include <dev/mii/inbmphyreg.h>
    138 
    139 #include <dev/pci/pcireg.h>
    140 #include <dev/pci/pcivar.h>
    141 #include <dev/pci/pcidevs.h>
    142 
    143 #include <dev/pci/if_wmreg.h>
    144 #include <dev/pci/if_wmvar.h>
    145 
    146 #ifdef WM_DEBUG
    147 #define	WM_DEBUG_LINK		__BIT(0)
    148 #define	WM_DEBUG_TX		__BIT(1)
    149 #define	WM_DEBUG_RX		__BIT(2)
    150 #define	WM_DEBUG_GMII		__BIT(3)
    151 #define	WM_DEBUG_MANAGE		__BIT(4)
    152 #define	WM_DEBUG_NVM		__BIT(5)
    153 #define	WM_DEBUG_INIT		__BIT(6)
    154 #define	WM_DEBUG_LOCK		__BIT(7)
    155 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    156     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    157 
    158 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    159 #else
    160 #define	DPRINTF(x, y)	/* nothing */
    161 #endif /* WM_DEBUG */
    162 
    163 #ifdef NET_MPSAFE
    164 #define WM_MPSAFE	1
    165 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    166 #else
    167 #define CALLOUT_FLAGS	0
    168 #endif
    169 
    170 /*
    171  * This device driver's max interrupt numbers.
    172  */
    173 #define WM_MAX_NQUEUEINTR	16
    174 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    175 
    176 #ifndef WM_DISABLE_MSI
    177 #define	WM_DISABLE_MSI 0
    178 #endif
    179 #ifndef WM_DISABLE_MSIX
    180 #define	WM_DISABLE_MSIX 0
    181 #endif
    182 
    183 int wm_disable_msi = WM_DISABLE_MSI;
    184 int wm_disable_msix = WM_DISABLE_MSIX;
    185 
    186 /*
    187  * Transmit descriptor list size.  Due to errata, we can only have
    188  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    189  * on >= 82544.  We tell the upper layers that they can queue a lot
    190  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    191  * of them at a time.
    192  *
    193  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    194  * chains containing many small mbufs have been observed in zero-copy
    195  * situations with jumbo frames.
    196  */
    197 #define	WM_NTXSEGS		256
    198 #define	WM_IFQUEUELEN		256
    199 #define	WM_TXQUEUELEN_MAX	64
    200 #define	WM_TXQUEUELEN_MAX_82547	16
    201 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    202 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    203 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    204 #define	WM_NTXDESC_82542	256
    205 #define	WM_NTXDESC_82544	4096
    206 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    207 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    208 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    209 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    210 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    211 
    212 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    213 
    214 #define	WM_TXINTERQSIZE		256
    215 
    216 /*
    217  * Receive descriptor list size.  We have one Rx buffer for normal
    218  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    219  * packet.  We allocate 256 receive descriptors, each with a 2k
    220  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    221  */
    222 #define	WM_NRXDESC		256
    223 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    224 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    225 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    226 
    227 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    228 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    229 #endif
    230 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    231 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    232 #endif
    233 
    234 typedef union txdescs {
    235 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    236 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    237 } txdescs_t;
    238 
    239 typedef union rxdescs {
    240 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    241 	ext_rxdesc_t      sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    242 	nq_rxdesc_t      sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    243 } rxdescs_t;
    244 
    245 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    246 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    247 
    248 /*
    249  * Software state for transmit jobs.
    250  */
    251 struct wm_txsoft {
    252 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    253 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    254 	int txs_firstdesc;		/* first descriptor in packet */
    255 	int txs_lastdesc;		/* last descriptor in packet */
    256 	int txs_ndesc;			/* # of descriptors used */
    257 };
    258 
    259 /*
    260  * Software state for receive buffers.  Each descriptor gets a
    261  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    262  * more than one buffer, we chain them together.
    263  */
    264 struct wm_rxsoft {
    265 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    266 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    267 };
    268 
    269 #define WM_LINKUP_TIMEOUT	50
    270 
    271 static uint16_t swfwphysem[] = {
    272 	SWFW_PHY0_SM,
    273 	SWFW_PHY1_SM,
    274 	SWFW_PHY2_SM,
    275 	SWFW_PHY3_SM
    276 };
    277 
    278 static const uint32_t wm_82580_rxpbs_table[] = {
    279 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    280 };
    281 
    282 struct wm_softc;
    283 
    284 #ifdef WM_EVENT_COUNTERS
    285 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    286 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    287 	struct evcnt qname##_ev_##evname;
    288 
    289 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    290 	do{								\
    291 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    292 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    293 		    "%s%02d%s", #qname, (qnum), #evname);		\
    294 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    295 		    (evtype), NULL, (xname),				\
    296 		    (q)->qname##_##evname##_evcnt_name);		\
    297 	}while(0)
    298 
    299 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    300 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    301 
    302 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    303 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    304 
    305 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    306 	evcnt_detach(&(q)->qname##_ev_##evname);
    307 #endif /* WM_EVENT_COUNTERS */
    308 
    309 struct wm_txqueue {
    310 	kmutex_t *txq_lock;		/* lock for tx operations */
    311 
    312 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    313 
    314 	/* Software state for the transmit descriptors. */
    315 	int txq_num;			/* must be a power of two */
    316 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    317 
    318 	/* TX control data structures. */
    319 	int txq_ndesc;			/* must be a power of two */
    320 	size_t txq_descsize;		/* a tx descriptor size */
    321 	txdescs_t *txq_descs_u;
    322         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    323 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    324 	int txq_desc_rseg;		/* real number of control segment */
    325 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    326 #define	txq_descs	txq_descs_u->sctxu_txdescs
    327 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    328 
    329 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    330 
    331 	int txq_free;			/* number of free Tx descriptors */
    332 	int txq_next;			/* next ready Tx descriptor */
    333 
    334 	int txq_sfree;			/* number of free Tx jobs */
    335 	int txq_snext;			/* next free Tx job */
    336 	int txq_sdirty;			/* dirty Tx jobs */
    337 
    338 	/* These 4 variables are used only on the 82547. */
    339 	int txq_fifo_size;		/* Tx FIFO size */
    340 	int txq_fifo_head;		/* current head of FIFO */
    341 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    342 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    343 
    344 	/*
    345 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    346 	 * CPUs. This queue intermediate them without block.
    347 	 */
    348 	pcq_t *txq_interq;
    349 
    350 	/*
    351 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    352 	 * to manage Tx H/W queue's busy flag.
    353 	 */
    354 	int txq_flags;			/* flags for H/W queue, see below */
    355 #define	WM_TXQ_NO_SPACE	0x1
    356 
    357 	bool txq_stopping;
    358 
    359 	uint32_t txq_packets;		/* for AIM */
    360 	uint32_t txq_bytes;		/* for AIM */
    361 #ifdef WM_EVENT_COUNTERS
    362 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
    363 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
    364 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
    365 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
    366 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
    367 						/* XXX not used? */
    368 
    369 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
    370 	WM_Q_EVCNT_DEFINE(txq,txtusum)		/* TCP/UDP cksums comp. out-bound */
    371 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
    372 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
    373 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
    374 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
    375 
    376 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
    377 
    378 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
    379 
    380 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    381 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    382 #endif /* WM_EVENT_COUNTERS */
    383 };
    384 
    385 struct wm_rxqueue {
    386 	kmutex_t *rxq_lock;		/* lock for rx operations */
    387 
    388 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    389 
    390 	/* Software state for the receive descriptors. */
    391 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    392 
    393 	/* RX control data structures. */
    394 	int rxq_ndesc;			/* must be a power of two */
    395 	size_t rxq_descsize;		/* a rx descriptor size */
    396 	rxdescs_t *rxq_descs_u;
    397 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    398 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    399 	int rxq_desc_rseg;		/* real number of control segment */
    400 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    401 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    402 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    403 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    404 
    405 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    406 
    407 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    408 	int rxq_discard;
    409 	int rxq_len;
    410 	struct mbuf *rxq_head;
    411 	struct mbuf *rxq_tail;
    412 	struct mbuf **rxq_tailp;
    413 
    414 	bool rxq_stopping;
    415 
    416 	uint32_t rxq_packets;		/* for AIM */
    417 	uint32_t rxq_bytes;		/* for AIM */
    418 #ifdef WM_EVENT_COUNTERS
    419 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
    420 
    421 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
    422 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
    423 #endif
    424 };
    425 
    426 struct wm_queue {
    427 	int wmq_id;			/* index of transmit and receive queues */
    428 	int wmq_intr_idx;		/* index of MSI-X tables */
    429 
    430 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    431 	bool wmq_set_itr;
    432 
    433 	struct wm_txqueue wmq_txq;
    434 	struct wm_rxqueue wmq_rxq;
    435 
    436 	void *wmq_si;
    437 };
    438 
    439 struct wm_phyop {
    440 	int (*acquire)(struct wm_softc *);
    441 	void (*release)(struct wm_softc *);
    442 	int reset_delay_us;
    443 };
    444 
    445 /*
    446  * Software state per device.
    447  */
    448 struct wm_softc {
    449 	device_t sc_dev;		/* generic device information */
    450 	bus_space_tag_t sc_st;		/* bus space tag */
    451 	bus_space_handle_t sc_sh;	/* bus space handle */
    452 	bus_size_t sc_ss;		/* bus space size */
    453 	bus_space_tag_t sc_iot;		/* I/O space tag */
    454 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    455 	bus_size_t sc_ios;		/* I/O space size */
    456 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    457 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    458 	bus_size_t sc_flashs;		/* flash registers space size */
    459 	off_t sc_flashreg_offset;	/*
    460 					 * offset to flash registers from
    461 					 * start of BAR
    462 					 */
    463 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    464 
    465 	struct ethercom sc_ethercom;	/* ethernet common data */
    466 	struct mii_data sc_mii;		/* MII/media information */
    467 
    468 	pci_chipset_tag_t sc_pc;
    469 	pcitag_t sc_pcitag;
    470 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    471 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    472 
    473 	uint16_t sc_pcidevid;		/* PCI device ID */
    474 	wm_chip_type sc_type;		/* MAC type */
    475 	int sc_rev;			/* MAC revision */
    476 	wm_phy_type sc_phytype;		/* PHY type */
    477 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    478 #define	WM_MEDIATYPE_UNKNOWN		0x00
    479 #define	WM_MEDIATYPE_FIBER		0x01
    480 #define	WM_MEDIATYPE_COPPER		0x02
    481 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    482 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    483 	int sc_flags;			/* flags; see below */
    484 	int sc_if_flags;		/* last if_flags */
    485 	int sc_flowflags;		/* 802.3x flow control flags */
    486 	int sc_align_tweak;
    487 
    488 	void *sc_ihs[WM_MAX_NINTR];	/*
    489 					 * interrupt cookie.
    490 					 * - legacy and msi use sc_ihs[0] only
    491 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    492 					 */
    493 	pci_intr_handle_t *sc_intrs;	/*
    494 					 * legacy and msi use sc_intrs[0] only
    495 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    496 					 */
    497 	int sc_nintrs;			/* number of interrupts */
    498 
    499 	int sc_link_intr_idx;		/* index of MSI-X tables */
    500 
    501 	callout_t sc_tick_ch;		/* tick callout */
    502 	bool sc_core_stopping;
    503 
    504 	int sc_nvm_ver_major;
    505 	int sc_nvm_ver_minor;
    506 	int sc_nvm_ver_build;
    507 	int sc_nvm_addrbits;		/* NVM address bits */
    508 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    509 	int sc_ich8_flash_base;
    510 	int sc_ich8_flash_bank_size;
    511 	int sc_nvm_k1_enabled;
    512 
    513 	int sc_nqueues;
    514 	struct wm_queue *sc_queue;
    515 	u_int sc_rx_process_limit;	/* Rx processing repeat limit in softint */
    516 	u_int sc_rx_intr_process_limit;	/* Rx processing repeat limit in H/W intr */
    517 
    518 	int sc_affinity_offset;
    519 
    520 #ifdef WM_EVENT_COUNTERS
    521 	/* Event counters. */
    522 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    523 
    524         /* WM_T_82542_2_1 only */
    525 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    526 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    527 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    528 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    529 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    530 #endif /* WM_EVENT_COUNTERS */
    531 
    532 	/* This variable are used only on the 82547. */
    533 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    534 
    535 	uint32_t sc_ctrl;		/* prototype CTRL register */
    536 #if 0
    537 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    538 #endif
    539 	uint32_t sc_icr;		/* prototype interrupt bits */
    540 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    541 	uint32_t sc_tctl;		/* prototype TCTL register */
    542 	uint32_t sc_rctl;		/* prototype RCTL register */
    543 	uint32_t sc_txcw;		/* prototype TXCW register */
    544 	uint32_t sc_tipg;		/* prototype TIPG register */
    545 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    546 	uint32_t sc_pba;		/* prototype PBA register */
    547 
    548 	int sc_tbi_linkup;		/* TBI link status */
    549 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    550 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    551 
    552 	int sc_mchash_type;		/* multicast filter offset */
    553 
    554 	krndsource_t rnd_source;	/* random source */
    555 
    556 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    557 
    558 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    559 	kmutex_t *sc_ich_phymtx;	/*
    560 					 * 82574/82583/ICH/PCH specific PHY
    561 					 * mutex. For 82574/82583, the mutex
    562 					 * is used for both PHY and NVM.
    563 					 */
    564 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    565 
    566 	struct wm_phyop phy;
    567 };
    568 
    569 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    570 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    571 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    572 
    573 #define	WM_RXCHAIN_RESET(rxq)						\
    574 do {									\
    575 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    576 	*(rxq)->rxq_tailp = NULL;					\
    577 	(rxq)->rxq_len = 0;						\
    578 } while (/*CONSTCOND*/0)
    579 
    580 #define	WM_RXCHAIN_LINK(rxq, m)						\
    581 do {									\
    582 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    583 	(rxq)->rxq_tailp = &(m)->m_next;				\
    584 } while (/*CONSTCOND*/0)
    585 
    586 #ifdef WM_EVENT_COUNTERS
    587 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    588 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    589 
    590 #define WM_Q_EVCNT_INCR(qname, evname)			\
    591 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    592 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    593 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    594 #else /* !WM_EVENT_COUNTERS */
    595 #define	WM_EVCNT_INCR(ev)	/* nothing */
    596 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    597 
    598 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    599 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    600 #endif /* !WM_EVENT_COUNTERS */
    601 
    602 #define	CSR_READ(sc, reg)						\
    603 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    604 #define	CSR_WRITE(sc, reg, val)						\
    605 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    606 #define	CSR_WRITE_FLUSH(sc)						\
    607 	(void) CSR_READ((sc), WMREG_STATUS)
    608 
    609 #define ICH8_FLASH_READ32(sc, reg)					\
    610 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    611 	    (reg) + sc->sc_flashreg_offset)
    612 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    613 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    614 	    (reg) + sc->sc_flashreg_offset, (data))
    615 
    616 #define ICH8_FLASH_READ16(sc, reg)					\
    617 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    618 	    (reg) + sc->sc_flashreg_offset)
    619 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    620 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    621 	    (reg) + sc->sc_flashreg_offset, (data))
    622 
    623 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    624 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    625 
    626 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    627 #define	WM_CDTXADDR_HI(txq, x)						\
    628 	(sizeof(bus_addr_t) == 8 ?					\
    629 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    630 
    631 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    632 #define	WM_CDRXADDR_HI(rxq, x)						\
    633 	(sizeof(bus_addr_t) == 8 ?					\
    634 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    635 
    636 /*
    637  * Register read/write functions.
    638  * Other than CSR_{READ|WRITE}().
    639  */
    640 #if 0
    641 static inline uint32_t wm_io_read(struct wm_softc *, int);
    642 #endif
    643 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    644 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    645 	uint32_t, uint32_t);
    646 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    647 
    648 /*
    649  * Descriptor sync/init functions.
    650  */
    651 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    652 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    653 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    654 
    655 /*
    656  * Device driver interface functions and commonly used functions.
    657  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    658  */
    659 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    660 static int	wm_match(device_t, cfdata_t, void *);
    661 static void	wm_attach(device_t, device_t, void *);
    662 static int	wm_detach(device_t, int);
    663 static bool	wm_suspend(device_t, const pmf_qual_t *);
    664 static bool	wm_resume(device_t, const pmf_qual_t *);
    665 static void	wm_watchdog(struct ifnet *);
    666 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
    667 static void	wm_tick(void *);
    668 static int	wm_ifflags_cb(struct ethercom *);
    669 static int	wm_ioctl(struct ifnet *, u_long, void *);
    670 /* MAC address related */
    671 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    672 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    673 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    674 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    675 static void	wm_set_filter(struct wm_softc *);
    676 /* Reset and init related */
    677 static void	wm_set_vlan(struct wm_softc *);
    678 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    679 static void	wm_get_auto_rd_done(struct wm_softc *);
    680 static void	wm_lan_init_done(struct wm_softc *);
    681 static void	wm_get_cfg_done(struct wm_softc *);
    682 static void	wm_initialize_hardware_bits(struct wm_softc *);
    683 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    684 static void	wm_reset_phy(struct wm_softc *);
    685 static void	wm_flush_desc_rings(struct wm_softc *);
    686 static void	wm_reset(struct wm_softc *);
    687 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    688 static void	wm_rxdrain(struct wm_rxqueue *);
    689 static void	wm_rss_getkey(uint8_t *);
    690 static void	wm_init_rss(struct wm_softc *);
    691 static void	wm_adjust_qnum(struct wm_softc *, int);
    692 static inline bool	wm_is_using_msix(struct wm_softc *);
    693 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    694 static int	wm_softint_establish(struct wm_softc *, int, int);
    695 static int	wm_setup_legacy(struct wm_softc *);
    696 static int	wm_setup_msix(struct wm_softc *);
    697 static int	wm_init(struct ifnet *);
    698 static int	wm_init_locked(struct ifnet *);
    699 static void	wm_turnon(struct wm_softc *);
    700 static void	wm_turnoff(struct wm_softc *);
    701 static void	wm_stop(struct ifnet *, int);
    702 static void	wm_stop_locked(struct ifnet *, int);
    703 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    704 static void	wm_82547_txfifo_stall(void *);
    705 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    706 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    707 /* DMA related */
    708 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    709 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    710 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    711 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    712     struct wm_txqueue *);
    713 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    714 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    715 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    716     struct wm_rxqueue *);
    717 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    718 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    719 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    720 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    721 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    722 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    723 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    724     struct wm_txqueue *);
    725 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    726     struct wm_rxqueue *);
    727 static int	wm_alloc_txrx_queues(struct wm_softc *);
    728 static void	wm_free_txrx_queues(struct wm_softc *);
    729 static int	wm_init_txrx_queues(struct wm_softc *);
    730 /* Start */
    731 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    732     struct wm_txsoft *, uint32_t *, uint8_t *);
    733 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    734 static void	wm_start(struct ifnet *);
    735 static void	wm_start_locked(struct ifnet *);
    736 static int	wm_transmit(struct ifnet *, struct mbuf *);
    737 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    738 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    739 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    740     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    741 static void	wm_nq_start(struct ifnet *);
    742 static void	wm_nq_start_locked(struct ifnet *);
    743 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    744 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    745 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    746 static void	wm_deferred_start_locked(struct wm_txqueue *);
    747 static void	wm_handle_queue(void *);
    748 /* Interrupt */
    749 static int	wm_txeof(struct wm_softc *, struct wm_txqueue *);
    750 static void	wm_rxeof(struct wm_rxqueue *, u_int);
    751 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    752 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    753 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    754 static void	wm_linkintr(struct wm_softc *, uint32_t);
    755 static int	wm_intr_legacy(void *);
    756 static inline void	wm_txrxintr_disable(struct wm_queue *);
    757 static inline void	wm_txrxintr_enable(struct wm_queue *);
    758 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    759 static int	wm_txrxintr_msix(void *);
    760 static int	wm_linkintr_msix(void *);
    761 
    762 /*
    763  * Media related.
    764  * GMII, SGMII, TBI, SERDES and SFP.
    765  */
    766 /* Common */
    767 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    768 /* GMII related */
    769 static void	wm_gmii_reset(struct wm_softc *);
    770 static void	wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t, uint16_t);
    771 static int	wm_get_phy_id_82575(struct wm_softc *);
    772 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    773 static int	wm_gmii_mediachange(struct ifnet *);
    774 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    775 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    776 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    777 static int	wm_gmii_i82543_readreg(device_t, int, int);
    778 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    779 static int	wm_gmii_mdic_readreg(device_t, int, int);
    780 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    781 static int	wm_gmii_i82544_readreg(device_t, int, int);
    782 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    783 static int	wm_gmii_i80003_readreg(device_t, int, int);
    784 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    785 static int	wm_gmii_bm_readreg(device_t, int, int);
    786 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    787 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    788 static int	wm_gmii_hv_readreg(device_t, int, int);
    789 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
    790 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    791 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
    792 static int	wm_gmii_82580_readreg(device_t, int, int);
    793 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    794 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    795 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    796 static void	wm_gmii_statchg(struct ifnet *);
    797 /*
    798  * kumeran related (80003, ICH* and PCH*).
    799  * These functions are not for accessing MII registers but for accessing
    800  * kumeran specific registers.
    801  */
    802 static int	wm_kmrn_readreg(struct wm_softc *, int);
    803 static int	wm_kmrn_readreg_locked(struct wm_softc *, int);
    804 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
    805 static void	wm_kmrn_writereg_locked(struct wm_softc *, int, int);
    806 /* SGMII */
    807 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    808 static int	wm_sgmii_readreg(device_t, int, int);
    809 static void	wm_sgmii_writereg(device_t, int, int, int);
    810 /* TBI related */
    811 static void	wm_tbi_mediainit(struct wm_softc *);
    812 static int	wm_tbi_mediachange(struct ifnet *);
    813 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    814 static int	wm_check_for_link(struct wm_softc *);
    815 static void	wm_tbi_tick(struct wm_softc *);
    816 /* SERDES related */
    817 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    818 static int	wm_serdes_mediachange(struct ifnet *);
    819 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    820 static void	wm_serdes_tick(struct wm_softc *);
    821 /* SFP related */
    822 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    823 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    824 
    825 /*
    826  * NVM related.
    827  * Microwire, SPI (w/wo EERD) and Flash.
    828  */
    829 /* Misc functions */
    830 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    831 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    832 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    833 /* Microwire */
    834 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    835 /* SPI */
    836 static int	wm_nvm_ready_spi(struct wm_softc *);
    837 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    838 /* Using with EERD */
    839 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    840 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    841 /* Flash */
    842 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    843     unsigned int *);
    844 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    845 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    846 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    847 	uint32_t *);
    848 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    849 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    850 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    851 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    852 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    853 /* iNVM */
    854 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    855 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    856 /* Lock, detecting NVM type, validate checksum and read */
    857 static int	wm_nvm_acquire(struct wm_softc *);
    858 static void	wm_nvm_release(struct wm_softc *);
    859 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    860 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    861 static int	wm_nvm_validate_checksum(struct wm_softc *);
    862 static void	wm_nvm_version_invm(struct wm_softc *);
    863 static void	wm_nvm_version(struct wm_softc *);
    864 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    865 
    866 /*
    867  * Hardware semaphores.
    868  * Very complexed...
    869  */
    870 static int	wm_get_null(struct wm_softc *);
    871 static void	wm_put_null(struct wm_softc *);
    872 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    873 static void	wm_put_swsm_semaphore(struct wm_softc *);
    874 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    875 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    876 static int	wm_get_phy_82575(struct wm_softc *);
    877 static void	wm_put_phy_82575(struct wm_softc *);
    878 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    879 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    880 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    881 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    882 static int	wm_get_nvm_ich8lan(struct wm_softc *);		/* For NVM */
    883 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    884 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    885 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    886 
    887 /*
    888  * Management mode and power management related subroutines.
    889  * BMC, AMT, suspend/resume and EEE.
    890  */
    891 #if 0
    892 static int	wm_check_mng_mode(struct wm_softc *);
    893 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    894 static int	wm_check_mng_mode_82574(struct wm_softc *);
    895 static int	wm_check_mng_mode_generic(struct wm_softc *);
    896 #endif
    897 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    898 static bool	wm_phy_resetisblocked(struct wm_softc *);
    899 static void	wm_get_hw_control(struct wm_softc *);
    900 static void	wm_release_hw_control(struct wm_softc *);
    901 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    902 static void	wm_smbustopci(struct wm_softc *);
    903 static void	wm_init_manageability(struct wm_softc *);
    904 static void	wm_release_manageability(struct wm_softc *);
    905 static void	wm_get_wakeup(struct wm_softc *);
    906 static void	wm_ulp_disable(struct wm_softc *);
    907 static void	wm_enable_phy_wakeup(struct wm_softc *);
    908 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    909 static void	wm_enable_wakeup(struct wm_softc *);
    910 /* LPLU (Low Power Link Up) */
    911 static void	wm_lplu_d0_disable(struct wm_softc *);
    912 static void	wm_lplu_d0_disable_pch(struct wm_softc *);
    913 /* EEE */
    914 static void	wm_set_eee_i350(struct wm_softc *);
    915 
    916 /*
    917  * Workarounds (mainly PHY related).
    918  * Basically, PHY's workarounds are in the PHY drivers.
    919  */
    920 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    921 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    922 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    923 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    924 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    925 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    926 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    927 static void	wm_reset_init_script_82575(struct wm_softc *);
    928 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    929 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
    930 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
    931 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
    932 static void	wm_pll_workaround_i210(struct wm_softc *);
    933 
    934 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    935     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    936 
    937 /*
    938  * Devices supported by this driver.
    939  */
    940 static const struct wm_product {
    941 	pci_vendor_id_t		wmp_vendor;
    942 	pci_product_id_t	wmp_product;
    943 	const char		*wmp_name;
    944 	wm_chip_type		wmp_type;
    945 	uint32_t		wmp_flags;
    946 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    947 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    948 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    949 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    950 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    951 } wm_products[] = {
    952 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    953 	  "Intel i82542 1000BASE-X Ethernet",
    954 	  WM_T_82542_2_1,	WMP_F_FIBER },
    955 
    956 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    957 	  "Intel i82543GC 1000BASE-X Ethernet",
    958 	  WM_T_82543,		WMP_F_FIBER },
    959 
    960 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    961 	  "Intel i82543GC 1000BASE-T Ethernet",
    962 	  WM_T_82543,		WMP_F_COPPER },
    963 
    964 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    965 	  "Intel i82544EI 1000BASE-T Ethernet",
    966 	  WM_T_82544,		WMP_F_COPPER },
    967 
    968 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    969 	  "Intel i82544EI 1000BASE-X Ethernet",
    970 	  WM_T_82544,		WMP_F_FIBER },
    971 
    972 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    973 	  "Intel i82544GC 1000BASE-T Ethernet",
    974 	  WM_T_82544,		WMP_F_COPPER },
    975 
    976 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    977 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    978 	  WM_T_82544,		WMP_F_COPPER },
    979 
    980 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    981 	  "Intel i82540EM 1000BASE-T Ethernet",
    982 	  WM_T_82540,		WMP_F_COPPER },
    983 
    984 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    985 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    986 	  WM_T_82540,		WMP_F_COPPER },
    987 
    988 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    989 	  "Intel i82540EP 1000BASE-T Ethernet",
    990 	  WM_T_82540,		WMP_F_COPPER },
    991 
    992 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    993 	  "Intel i82540EP 1000BASE-T Ethernet",
    994 	  WM_T_82540,		WMP_F_COPPER },
    995 
    996 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    997 	  "Intel i82540EP 1000BASE-T Ethernet",
    998 	  WM_T_82540,		WMP_F_COPPER },
    999 
   1000 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1001 	  "Intel i82545EM 1000BASE-T Ethernet",
   1002 	  WM_T_82545,		WMP_F_COPPER },
   1003 
   1004 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1005 	  "Intel i82545GM 1000BASE-T Ethernet",
   1006 	  WM_T_82545_3,		WMP_F_COPPER },
   1007 
   1008 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1009 	  "Intel i82545GM 1000BASE-X Ethernet",
   1010 	  WM_T_82545_3,		WMP_F_FIBER },
   1011 
   1012 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1013 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1014 	  WM_T_82545_3,		WMP_F_SERDES },
   1015 
   1016 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1017 	  "Intel i82546EB 1000BASE-T Ethernet",
   1018 	  WM_T_82546,		WMP_F_COPPER },
   1019 
   1020 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1021 	  "Intel i82546EB 1000BASE-T Ethernet",
   1022 	  WM_T_82546,		WMP_F_COPPER },
   1023 
   1024 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1025 	  "Intel i82545EM 1000BASE-X Ethernet",
   1026 	  WM_T_82545,		WMP_F_FIBER },
   1027 
   1028 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1029 	  "Intel i82546EB 1000BASE-X Ethernet",
   1030 	  WM_T_82546,		WMP_F_FIBER },
   1031 
   1032 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1033 	  "Intel i82546GB 1000BASE-T Ethernet",
   1034 	  WM_T_82546_3,		WMP_F_COPPER },
   1035 
   1036 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1037 	  "Intel i82546GB 1000BASE-X Ethernet",
   1038 	  WM_T_82546_3,		WMP_F_FIBER },
   1039 
   1040 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1041 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1042 	  WM_T_82546_3,		WMP_F_SERDES },
   1043 
   1044 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1045 	  "i82546GB quad-port Gigabit Ethernet",
   1046 	  WM_T_82546_3,		WMP_F_COPPER },
   1047 
   1048 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1049 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1050 	  WM_T_82546_3,		WMP_F_COPPER },
   1051 
   1052 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1053 	  "Intel PRO/1000MT (82546GB)",
   1054 	  WM_T_82546_3,		WMP_F_COPPER },
   1055 
   1056 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1057 	  "Intel i82541EI 1000BASE-T Ethernet",
   1058 	  WM_T_82541,		WMP_F_COPPER },
   1059 
   1060 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1061 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1062 	  WM_T_82541,		WMP_F_COPPER },
   1063 
   1064 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1065 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1066 	  WM_T_82541,		WMP_F_COPPER },
   1067 
   1068 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1069 	  "Intel i82541ER 1000BASE-T Ethernet",
   1070 	  WM_T_82541_2,		WMP_F_COPPER },
   1071 
   1072 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1073 	  "Intel i82541GI 1000BASE-T Ethernet",
   1074 	  WM_T_82541_2,		WMP_F_COPPER },
   1075 
   1076 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1077 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1078 	  WM_T_82541_2,		WMP_F_COPPER },
   1079 
   1080 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1081 	  "Intel i82541PI 1000BASE-T Ethernet",
   1082 	  WM_T_82541_2,		WMP_F_COPPER },
   1083 
   1084 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1085 	  "Intel i82547EI 1000BASE-T Ethernet",
   1086 	  WM_T_82547,		WMP_F_COPPER },
   1087 
   1088 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1089 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1090 	  WM_T_82547,		WMP_F_COPPER },
   1091 
   1092 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1093 	  "Intel i82547GI 1000BASE-T Ethernet",
   1094 	  WM_T_82547_2,		WMP_F_COPPER },
   1095 
   1096 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1097 	  "Intel PRO/1000 PT (82571EB)",
   1098 	  WM_T_82571,		WMP_F_COPPER },
   1099 
   1100 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1101 	  "Intel PRO/1000 PF (82571EB)",
   1102 	  WM_T_82571,		WMP_F_FIBER },
   1103 
   1104 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1105 	  "Intel PRO/1000 PB (82571EB)",
   1106 	  WM_T_82571,		WMP_F_SERDES },
   1107 
   1108 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1109 	  "Intel PRO/1000 QT (82571EB)",
   1110 	  WM_T_82571,		WMP_F_COPPER },
   1111 
   1112 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1113 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1114 	  WM_T_82571,		WMP_F_COPPER, },
   1115 
   1116 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1117 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1118 	  WM_T_82571,		WMP_F_COPPER, },
   1119 
   1120 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1121 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1122 	  WM_T_82571,		WMP_F_SERDES, },
   1123 
   1124 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1125 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1126 	  WM_T_82571,		WMP_F_SERDES, },
   1127 
   1128 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1129 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1130 	  WM_T_82571,		WMP_F_FIBER, },
   1131 
   1132 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1133 	  "Intel i82572EI 1000baseT Ethernet",
   1134 	  WM_T_82572,		WMP_F_COPPER },
   1135 
   1136 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1137 	  "Intel i82572EI 1000baseX Ethernet",
   1138 	  WM_T_82572,		WMP_F_FIBER },
   1139 
   1140 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1141 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1142 	  WM_T_82572,		WMP_F_SERDES },
   1143 
   1144 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1145 	  "Intel i82572EI 1000baseT Ethernet",
   1146 	  WM_T_82572,		WMP_F_COPPER },
   1147 
   1148 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1149 	  "Intel i82573E",
   1150 	  WM_T_82573,		WMP_F_COPPER },
   1151 
   1152 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1153 	  "Intel i82573E IAMT",
   1154 	  WM_T_82573,		WMP_F_COPPER },
   1155 
   1156 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1157 	  "Intel i82573L Gigabit Ethernet",
   1158 	  WM_T_82573,		WMP_F_COPPER },
   1159 
   1160 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1161 	  "Intel i82574L",
   1162 	  WM_T_82574,		WMP_F_COPPER },
   1163 
   1164 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1165 	  "Intel i82574L",
   1166 	  WM_T_82574,		WMP_F_COPPER },
   1167 
   1168 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1169 	  "Intel i82583V",
   1170 	  WM_T_82583,		WMP_F_COPPER },
   1171 
   1172 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1173 	  "i80003 dual 1000baseT Ethernet",
   1174 	  WM_T_80003,		WMP_F_COPPER },
   1175 
   1176 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1177 	  "i80003 dual 1000baseX Ethernet",
   1178 	  WM_T_80003,		WMP_F_COPPER },
   1179 
   1180 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1181 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1182 	  WM_T_80003,		WMP_F_SERDES },
   1183 
   1184 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1185 	  "Intel i80003 1000baseT Ethernet",
   1186 	  WM_T_80003,		WMP_F_COPPER },
   1187 
   1188 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1189 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1190 	  WM_T_80003,		WMP_F_SERDES },
   1191 
   1192 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1193 	  "Intel i82801H (M_AMT) LAN Controller",
   1194 	  WM_T_ICH8,		WMP_F_COPPER },
   1195 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1196 	  "Intel i82801H (AMT) LAN Controller",
   1197 	  WM_T_ICH8,		WMP_F_COPPER },
   1198 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1199 	  "Intel i82801H LAN Controller",
   1200 	  WM_T_ICH8,		WMP_F_COPPER },
   1201 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1202 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1203 	  WM_T_ICH8,		WMP_F_COPPER },
   1204 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1205 	  "Intel i82801H (M) LAN Controller",
   1206 	  WM_T_ICH8,		WMP_F_COPPER },
   1207 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1208 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1209 	  WM_T_ICH8,		WMP_F_COPPER },
   1210 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1211 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1212 	  WM_T_ICH8,		WMP_F_COPPER },
   1213 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1214 	  "82567V-3 LAN Controller",
   1215 	  WM_T_ICH8,		WMP_F_COPPER },
   1216 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1217 	  "82801I (AMT) LAN Controller",
   1218 	  WM_T_ICH9,		WMP_F_COPPER },
   1219 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1220 	  "82801I 10/100 LAN Controller",
   1221 	  WM_T_ICH9,		WMP_F_COPPER },
   1222 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1223 	  "82801I (G) 10/100 LAN Controller",
   1224 	  WM_T_ICH9,		WMP_F_COPPER },
   1225 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1226 	  "82801I (GT) 10/100 LAN Controller",
   1227 	  WM_T_ICH9,		WMP_F_COPPER },
   1228 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1229 	  "82801I (C) LAN Controller",
   1230 	  WM_T_ICH9,		WMP_F_COPPER },
   1231 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1232 	  "82801I mobile LAN Controller",
   1233 	  WM_T_ICH9,		WMP_F_COPPER },
   1234 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1235 	  "82801I mobile (V) LAN Controller",
   1236 	  WM_T_ICH9,		WMP_F_COPPER },
   1237 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1238 	  "82801I mobile (AMT) LAN Controller",
   1239 	  WM_T_ICH9,		WMP_F_COPPER },
   1240 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1241 	  "82567LM-4 LAN Controller",
   1242 	  WM_T_ICH9,		WMP_F_COPPER },
   1243 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1244 	  "82567LM-2 LAN Controller",
   1245 	  WM_T_ICH10,		WMP_F_COPPER },
   1246 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1247 	  "82567LF-2 LAN Controller",
   1248 	  WM_T_ICH10,		WMP_F_COPPER },
   1249 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1250 	  "82567LM-3 LAN Controller",
   1251 	  WM_T_ICH10,		WMP_F_COPPER },
   1252 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1253 	  "82567LF-3 LAN Controller",
   1254 	  WM_T_ICH10,		WMP_F_COPPER },
   1255 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1256 	  "82567V-2 LAN Controller",
   1257 	  WM_T_ICH10,		WMP_F_COPPER },
   1258 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1259 	  "82567V-3? LAN Controller",
   1260 	  WM_T_ICH10,		WMP_F_COPPER },
   1261 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1262 	  "HANKSVILLE LAN Controller",
   1263 	  WM_T_ICH10,		WMP_F_COPPER },
   1264 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1265 	  "PCH LAN (82577LM) Controller",
   1266 	  WM_T_PCH,		WMP_F_COPPER },
   1267 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1268 	  "PCH LAN (82577LC) Controller",
   1269 	  WM_T_PCH,		WMP_F_COPPER },
   1270 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1271 	  "PCH LAN (82578DM) Controller",
   1272 	  WM_T_PCH,		WMP_F_COPPER },
   1273 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1274 	  "PCH LAN (82578DC) Controller",
   1275 	  WM_T_PCH,		WMP_F_COPPER },
   1276 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1277 	  "PCH2 LAN (82579LM) Controller",
   1278 	  WM_T_PCH2,		WMP_F_COPPER },
   1279 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1280 	  "PCH2 LAN (82579V) Controller",
   1281 	  WM_T_PCH2,		WMP_F_COPPER },
   1282 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1283 	  "82575EB dual-1000baseT Ethernet",
   1284 	  WM_T_82575,		WMP_F_COPPER },
   1285 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1286 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1287 	  WM_T_82575,		WMP_F_SERDES },
   1288 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1289 	  "82575GB quad-1000baseT Ethernet",
   1290 	  WM_T_82575,		WMP_F_COPPER },
   1291 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1292 	  "82575GB quad-1000baseT Ethernet (PM)",
   1293 	  WM_T_82575,		WMP_F_COPPER },
   1294 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1295 	  "82576 1000BaseT Ethernet",
   1296 	  WM_T_82576,		WMP_F_COPPER },
   1297 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1298 	  "82576 1000BaseX Ethernet",
   1299 	  WM_T_82576,		WMP_F_FIBER },
   1300 
   1301 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1302 	  "82576 gigabit Ethernet (SERDES)",
   1303 	  WM_T_82576,		WMP_F_SERDES },
   1304 
   1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1306 	  "82576 quad-1000BaseT Ethernet",
   1307 	  WM_T_82576,		WMP_F_COPPER },
   1308 
   1309 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1310 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1311 	  WM_T_82576,		WMP_F_COPPER },
   1312 
   1313 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1314 	  "82576 gigabit Ethernet",
   1315 	  WM_T_82576,		WMP_F_COPPER },
   1316 
   1317 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1318 	  "82576 gigabit Ethernet (SERDES)",
   1319 	  WM_T_82576,		WMP_F_SERDES },
   1320 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1321 	  "82576 quad-gigabit Ethernet (SERDES)",
   1322 	  WM_T_82576,		WMP_F_SERDES },
   1323 
   1324 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1325 	  "82580 1000BaseT Ethernet",
   1326 	  WM_T_82580,		WMP_F_COPPER },
   1327 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1328 	  "82580 1000BaseX Ethernet",
   1329 	  WM_T_82580,		WMP_F_FIBER },
   1330 
   1331 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1332 	  "82580 1000BaseT Ethernet (SERDES)",
   1333 	  WM_T_82580,		WMP_F_SERDES },
   1334 
   1335 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1336 	  "82580 gigabit Ethernet (SGMII)",
   1337 	  WM_T_82580,		WMP_F_COPPER },
   1338 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1339 	  "82580 dual-1000BaseT Ethernet",
   1340 	  WM_T_82580,		WMP_F_COPPER },
   1341 
   1342 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1343 	  "82580 quad-1000BaseX Ethernet",
   1344 	  WM_T_82580,		WMP_F_FIBER },
   1345 
   1346 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1347 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1348 	  WM_T_82580,		WMP_F_COPPER },
   1349 
   1350 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1351 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1352 	  WM_T_82580,		WMP_F_SERDES },
   1353 
   1354 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1355 	  "DH89XXCC 1000BASE-KX Ethernet",
   1356 	  WM_T_82580,		WMP_F_SERDES },
   1357 
   1358 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1359 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1360 	  WM_T_82580,		WMP_F_SERDES },
   1361 
   1362 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1363 	  "I350 Gigabit Network Connection",
   1364 	  WM_T_I350,		WMP_F_COPPER },
   1365 
   1366 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1367 	  "I350 Gigabit Fiber Network Connection",
   1368 	  WM_T_I350,		WMP_F_FIBER },
   1369 
   1370 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1371 	  "I350 Gigabit Backplane Connection",
   1372 	  WM_T_I350,		WMP_F_SERDES },
   1373 
   1374 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1375 	  "I350 Quad Port Gigabit Ethernet",
   1376 	  WM_T_I350,		WMP_F_SERDES },
   1377 
   1378 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1379 	  "I350 Gigabit Connection",
   1380 	  WM_T_I350,		WMP_F_COPPER },
   1381 
   1382 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1383 	  "I354 Gigabit Ethernet (KX)",
   1384 	  WM_T_I354,		WMP_F_SERDES },
   1385 
   1386 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1387 	  "I354 Gigabit Ethernet (SGMII)",
   1388 	  WM_T_I354,		WMP_F_COPPER },
   1389 
   1390 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1391 	  "I354 Gigabit Ethernet (2.5G)",
   1392 	  WM_T_I354,		WMP_F_COPPER },
   1393 
   1394 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1395 	  "I210-T1 Ethernet Server Adapter",
   1396 	  WM_T_I210,		WMP_F_COPPER },
   1397 
   1398 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1399 	  "I210 Ethernet (Copper OEM)",
   1400 	  WM_T_I210,		WMP_F_COPPER },
   1401 
   1402 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1403 	  "I210 Ethernet (Copper IT)",
   1404 	  WM_T_I210,		WMP_F_COPPER },
   1405 
   1406 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1407 	  "I210 Ethernet (FLASH less)",
   1408 	  WM_T_I210,		WMP_F_COPPER },
   1409 
   1410 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1411 	  "I210 Gigabit Ethernet (Fiber)",
   1412 	  WM_T_I210,		WMP_F_FIBER },
   1413 
   1414 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1415 	  "I210 Gigabit Ethernet (SERDES)",
   1416 	  WM_T_I210,		WMP_F_SERDES },
   1417 
   1418 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1419 	  "I210 Gigabit Ethernet (FLASH less)",
   1420 	  WM_T_I210,		WMP_F_SERDES },
   1421 
   1422 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1423 	  "I210 Gigabit Ethernet (SGMII)",
   1424 	  WM_T_I210,		WMP_F_COPPER },
   1425 
   1426 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1427 	  "I211 Ethernet (COPPER)",
   1428 	  WM_T_I211,		WMP_F_COPPER },
   1429 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1430 	  "I217 V Ethernet Connection",
   1431 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1432 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1433 	  "I217 LM Ethernet Connection",
   1434 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1435 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1436 	  "I218 V Ethernet Connection",
   1437 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1438 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1439 	  "I218 V Ethernet Connection",
   1440 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1441 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1442 	  "I218 V Ethernet Connection",
   1443 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1444 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1445 	  "I218 LM Ethernet Connection",
   1446 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1447 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1448 	  "I218 LM Ethernet Connection",
   1449 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1450 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1451 	  "I218 LM Ethernet Connection",
   1452 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1453 #if 0
   1454 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1455 	  "I219 V Ethernet Connection",
   1456 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1457 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1458 	  "I219 V Ethernet Connection",
   1459 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1460 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1461 	  "I219 V Ethernet Connection",
   1462 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1463 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1464 	  "I219 V Ethernet Connection",
   1465 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1466 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1467 	  "I219 LM Ethernet Connection",
   1468 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1469 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1470 	  "I219 LM Ethernet Connection",
   1471 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1472 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1473 	  "I219 LM Ethernet Connection",
   1474 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1475 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1476 	  "I219 LM Ethernet Connection",
   1477 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1478 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1479 	  "I219 LM Ethernet Connection",
   1480 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1481 #endif
   1482 	{ 0,			0,
   1483 	  NULL,
   1484 	  0,			0 },
   1485 };
   1486 
   1487 /*
   1488  * Register read/write functions.
   1489  * Other than CSR_{READ|WRITE}().
   1490  */
   1491 
   1492 #if 0 /* Not currently used */
   1493 static inline uint32_t
   1494 wm_io_read(struct wm_softc *sc, int reg)
   1495 {
   1496 
   1497 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1498 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1499 }
   1500 #endif
   1501 
   1502 static inline void
   1503 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1504 {
   1505 
   1506 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1507 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1508 }
   1509 
   1510 static inline void
   1511 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1512     uint32_t data)
   1513 {
   1514 	uint32_t regval;
   1515 	int i;
   1516 
   1517 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1518 
   1519 	CSR_WRITE(sc, reg, regval);
   1520 
   1521 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1522 		delay(5);
   1523 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1524 			break;
   1525 	}
   1526 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1527 		aprint_error("%s: WARNING:"
   1528 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1529 		    device_xname(sc->sc_dev), reg);
   1530 	}
   1531 }
   1532 
   1533 static inline void
   1534 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1535 {
   1536 	wa->wa_low = htole32(v & 0xffffffffU);
   1537 	if (sizeof(bus_addr_t) == 8)
   1538 		wa->wa_high = htole32((uint64_t) v >> 32);
   1539 	else
   1540 		wa->wa_high = 0;
   1541 }
   1542 
   1543 /*
   1544  * Descriptor sync/init functions.
   1545  */
   1546 static inline void
   1547 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1548 {
   1549 	struct wm_softc *sc = txq->txq_sc;
   1550 
   1551 	/* If it will wrap around, sync to the end of the ring. */
   1552 	if ((start + num) > WM_NTXDESC(txq)) {
   1553 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1554 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1555 		    (WM_NTXDESC(txq) - start), ops);
   1556 		num -= (WM_NTXDESC(txq) - start);
   1557 		start = 0;
   1558 	}
   1559 
   1560 	/* Now sync whatever is left. */
   1561 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1562 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1563 }
   1564 
   1565 static inline void
   1566 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1567 {
   1568 	struct wm_softc *sc = rxq->rxq_sc;
   1569 
   1570 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1571 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1572 }
   1573 
   1574 static inline void
   1575 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1576 {
   1577 	struct wm_softc *sc = rxq->rxq_sc;
   1578 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1579 	struct mbuf *m = rxs->rxs_mbuf;
   1580 
   1581 	/*
   1582 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1583 	 * so that the payload after the Ethernet header is aligned
   1584 	 * to a 4-byte boundary.
   1585 
   1586 	 * XXX BRAINDAMAGE ALERT!
   1587 	 * The stupid chip uses the same size for every buffer, which
   1588 	 * is set in the Receive Control register.  We are using the 2K
   1589 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1590 	 * reason, we can't "scoot" packets longer than the standard
   1591 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1592 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1593 	 * the upper layer copy the headers.
   1594 	 */
   1595 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1596 
   1597 	if (sc->sc_type == WM_T_82574) {
   1598 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1599 		rxd->erx_data.erxd_addr =
   1600 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1601 		rxd->erx_data.erxd_dd = 0;
   1602 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1603 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1604 
   1605 		rxd->nqrx_data.nrxd_paddr =
   1606 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1607 		/* Currently, split header is not supported. */
   1608 		rxd->nqrx_data.nrxd_haddr = 0;
   1609 	} else {
   1610 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1611 
   1612 		wm_set_dma_addr(&rxd->wrx_addr,
   1613 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1614 		rxd->wrx_len = 0;
   1615 		rxd->wrx_cksum = 0;
   1616 		rxd->wrx_status = 0;
   1617 		rxd->wrx_errors = 0;
   1618 		rxd->wrx_special = 0;
   1619 	}
   1620 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1621 
   1622 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1623 }
   1624 
   1625 /*
   1626  * Device driver interface functions and commonly used functions.
   1627  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1628  */
   1629 
   1630 /* Lookup supported device table */
   1631 static const struct wm_product *
   1632 wm_lookup(const struct pci_attach_args *pa)
   1633 {
   1634 	const struct wm_product *wmp;
   1635 
   1636 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1637 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1638 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1639 			return wmp;
   1640 	}
   1641 	return NULL;
   1642 }
   1643 
   1644 /* The match function (ca_match) */
   1645 static int
   1646 wm_match(device_t parent, cfdata_t cf, void *aux)
   1647 {
   1648 	struct pci_attach_args *pa = aux;
   1649 
   1650 	if (wm_lookup(pa) != NULL)
   1651 		return 1;
   1652 
   1653 	return 0;
   1654 }
   1655 
   1656 /* The attach function (ca_attach) */
   1657 static void
   1658 wm_attach(device_t parent, device_t self, void *aux)
   1659 {
   1660 	struct wm_softc *sc = device_private(self);
   1661 	struct pci_attach_args *pa = aux;
   1662 	prop_dictionary_t dict;
   1663 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1664 	pci_chipset_tag_t pc = pa->pa_pc;
   1665 	int counts[PCI_INTR_TYPE_SIZE];
   1666 	pci_intr_type_t max_type;
   1667 	const char *eetype, *xname;
   1668 	bus_space_tag_t memt;
   1669 	bus_space_handle_t memh;
   1670 	bus_size_t memsize;
   1671 	int memh_valid;
   1672 	int i, error;
   1673 	const struct wm_product *wmp;
   1674 	prop_data_t ea;
   1675 	prop_number_t pn;
   1676 	uint8_t enaddr[ETHER_ADDR_LEN];
   1677 	char buf[256];
   1678 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1679 	pcireg_t preg, memtype;
   1680 	uint16_t eeprom_data, apme_mask;
   1681 	bool force_clear_smbi;
   1682 	uint32_t link_mode;
   1683 	uint32_t reg;
   1684 
   1685 	sc->sc_dev = self;
   1686 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1687 	sc->sc_core_stopping = false;
   1688 
   1689 	wmp = wm_lookup(pa);
   1690 #ifdef DIAGNOSTIC
   1691 	if (wmp == NULL) {
   1692 		printf("\n");
   1693 		panic("wm_attach: impossible");
   1694 	}
   1695 #endif
   1696 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1697 
   1698 	sc->sc_pc = pa->pa_pc;
   1699 	sc->sc_pcitag = pa->pa_tag;
   1700 
   1701 	if (pci_dma64_available(pa))
   1702 		sc->sc_dmat = pa->pa_dmat64;
   1703 	else
   1704 		sc->sc_dmat = pa->pa_dmat;
   1705 
   1706 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1707 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1708 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1709 
   1710 	sc->sc_type = wmp->wmp_type;
   1711 
   1712 	/* Set default function pointers */
   1713 	sc->phy.acquire = wm_get_null;
   1714 	sc->phy.release = wm_put_null;
   1715 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1716 
   1717 	if (sc->sc_type < WM_T_82543) {
   1718 		if (sc->sc_rev < 2) {
   1719 			aprint_error_dev(sc->sc_dev,
   1720 			    "i82542 must be at least rev. 2\n");
   1721 			return;
   1722 		}
   1723 		if (sc->sc_rev < 3)
   1724 			sc->sc_type = WM_T_82542_2_0;
   1725 	}
   1726 
   1727 	/*
   1728 	 * Disable MSI for Errata:
   1729 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1730 	 *
   1731 	 *  82544: Errata 25
   1732 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1733 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1734 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1735 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1736 	 *
   1737 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1738 	 *
   1739 	 *  82571 & 82572: Errata 63
   1740 	 */
   1741 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1742 	    || (sc->sc_type == WM_T_82572))
   1743 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1744 
   1745 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1746 	    || (sc->sc_type == WM_T_82580)
   1747 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1748 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1749 		sc->sc_flags |= WM_F_NEWQUEUE;
   1750 
   1751 	/* Set device properties (mactype) */
   1752 	dict = device_properties(sc->sc_dev);
   1753 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1754 
   1755 	/*
   1756 	 * Map the device.  All devices support memory-mapped acccess,
   1757 	 * and it is really required for normal operation.
   1758 	 */
   1759 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1760 	switch (memtype) {
   1761 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1762 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1763 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1764 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1765 		break;
   1766 	default:
   1767 		memh_valid = 0;
   1768 		break;
   1769 	}
   1770 
   1771 	if (memh_valid) {
   1772 		sc->sc_st = memt;
   1773 		sc->sc_sh = memh;
   1774 		sc->sc_ss = memsize;
   1775 	} else {
   1776 		aprint_error_dev(sc->sc_dev,
   1777 		    "unable to map device registers\n");
   1778 		return;
   1779 	}
   1780 
   1781 	/*
   1782 	 * In addition, i82544 and later support I/O mapped indirect
   1783 	 * register access.  It is not desirable (nor supported in
   1784 	 * this driver) to use it for normal operation, though it is
   1785 	 * required to work around bugs in some chip versions.
   1786 	 */
   1787 	if (sc->sc_type >= WM_T_82544) {
   1788 		/* First we have to find the I/O BAR. */
   1789 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1790 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1791 			if (memtype == PCI_MAPREG_TYPE_IO)
   1792 				break;
   1793 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1794 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1795 				i += 4;	/* skip high bits, too */
   1796 		}
   1797 		if (i < PCI_MAPREG_END) {
   1798 			/*
   1799 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1800 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1801 			 * It's no problem because newer chips has no this
   1802 			 * bug.
   1803 			 *
   1804 			 * The i8254x doesn't apparently respond when the
   1805 			 * I/O BAR is 0, which looks somewhat like it's not
   1806 			 * been configured.
   1807 			 */
   1808 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1809 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1810 				aprint_error_dev(sc->sc_dev,
   1811 				    "WARNING: I/O BAR at zero.\n");
   1812 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1813 					0, &sc->sc_iot, &sc->sc_ioh,
   1814 					NULL, &sc->sc_ios) == 0) {
   1815 				sc->sc_flags |= WM_F_IOH_VALID;
   1816 			} else {
   1817 				aprint_error_dev(sc->sc_dev,
   1818 				    "WARNING: unable to map I/O space\n");
   1819 			}
   1820 		}
   1821 
   1822 	}
   1823 
   1824 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1825 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1826 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1827 	if (sc->sc_type < WM_T_82542_2_1)
   1828 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1829 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1830 
   1831 	/* power up chip */
   1832 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1833 	    NULL)) && error != EOPNOTSUPP) {
   1834 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1835 		return;
   1836 	}
   1837 
   1838 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1839 
   1840 	/* Allocation settings */
   1841 	max_type = PCI_INTR_TYPE_MSIX;
   1842 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1;
   1843 	counts[PCI_INTR_TYPE_MSI] = 1;
   1844 	counts[PCI_INTR_TYPE_INTX] = 1;
   1845 	/* overridden by disable flags */
   1846 	if (wm_disable_msi != 0) {
   1847 		counts[PCI_INTR_TYPE_MSI] = 0;
   1848 		if (wm_disable_msix != 0) {
   1849 			max_type = PCI_INTR_TYPE_INTX;
   1850 			counts[PCI_INTR_TYPE_MSIX] = 0;
   1851 		}
   1852 	} else if (wm_disable_msix != 0) {
   1853 		max_type = PCI_INTR_TYPE_MSI;
   1854 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1855 	}
   1856 
   1857 alloc_retry:
   1858 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1859 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1860 		return;
   1861 	}
   1862 
   1863 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1864 		error = wm_setup_msix(sc);
   1865 		if (error) {
   1866 			pci_intr_release(pc, sc->sc_intrs,
   1867 			    counts[PCI_INTR_TYPE_MSIX]);
   1868 
   1869 			/* Setup for MSI: Disable MSI-X */
   1870 			max_type = PCI_INTR_TYPE_MSI;
   1871 			counts[PCI_INTR_TYPE_MSI] = 1;
   1872 			counts[PCI_INTR_TYPE_INTX] = 1;
   1873 			goto alloc_retry;
   1874 		}
   1875 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1876 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1877 		error = wm_setup_legacy(sc);
   1878 		if (error) {
   1879 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1880 			    counts[PCI_INTR_TYPE_MSI]);
   1881 
   1882 			/* The next try is for INTx: Disable MSI */
   1883 			max_type = PCI_INTR_TYPE_INTX;
   1884 			counts[PCI_INTR_TYPE_INTX] = 1;
   1885 			goto alloc_retry;
   1886 		}
   1887 	} else {
   1888 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1889 		error = wm_setup_legacy(sc);
   1890 		if (error) {
   1891 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1892 			    counts[PCI_INTR_TYPE_INTX]);
   1893 			return;
   1894 		}
   1895 	}
   1896 
   1897 	/*
   1898 	 * Check the function ID (unit number of the chip).
   1899 	 */
   1900 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1901 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1902 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1903 	    || (sc->sc_type == WM_T_82580)
   1904 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1905 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1906 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1907 	else
   1908 		sc->sc_funcid = 0;
   1909 
   1910 	/*
   1911 	 * Determine a few things about the bus we're connected to.
   1912 	 */
   1913 	if (sc->sc_type < WM_T_82543) {
   1914 		/* We don't really know the bus characteristics here. */
   1915 		sc->sc_bus_speed = 33;
   1916 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1917 		/*
   1918 		 * CSA (Communication Streaming Architecture) is about as fast
   1919 		 * a 32-bit 66MHz PCI Bus.
   1920 		 */
   1921 		sc->sc_flags |= WM_F_CSA;
   1922 		sc->sc_bus_speed = 66;
   1923 		aprint_verbose_dev(sc->sc_dev,
   1924 		    "Communication Streaming Architecture\n");
   1925 		if (sc->sc_type == WM_T_82547) {
   1926 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1927 			callout_setfunc(&sc->sc_txfifo_ch,
   1928 					wm_82547_txfifo_stall, sc);
   1929 			aprint_verbose_dev(sc->sc_dev,
   1930 			    "using 82547 Tx FIFO stall work-around\n");
   1931 		}
   1932 	} else if (sc->sc_type >= WM_T_82571) {
   1933 		sc->sc_flags |= WM_F_PCIE;
   1934 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1935 		    && (sc->sc_type != WM_T_ICH10)
   1936 		    && (sc->sc_type != WM_T_PCH)
   1937 		    && (sc->sc_type != WM_T_PCH2)
   1938 		    && (sc->sc_type != WM_T_PCH_LPT)
   1939 		    && (sc->sc_type != WM_T_PCH_SPT)) {
   1940 			/* ICH* and PCH* have no PCIe capability registers */
   1941 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1942 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1943 				NULL) == 0)
   1944 				aprint_error_dev(sc->sc_dev,
   1945 				    "unable to find PCIe capability\n");
   1946 		}
   1947 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1948 	} else {
   1949 		reg = CSR_READ(sc, WMREG_STATUS);
   1950 		if (reg & STATUS_BUS64)
   1951 			sc->sc_flags |= WM_F_BUS64;
   1952 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1953 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1954 
   1955 			sc->sc_flags |= WM_F_PCIX;
   1956 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1957 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1958 				aprint_error_dev(sc->sc_dev,
   1959 				    "unable to find PCIX capability\n");
   1960 			else if (sc->sc_type != WM_T_82545_3 &&
   1961 				 sc->sc_type != WM_T_82546_3) {
   1962 				/*
   1963 				 * Work around a problem caused by the BIOS
   1964 				 * setting the max memory read byte count
   1965 				 * incorrectly.
   1966 				 */
   1967 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1968 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1969 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1970 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1971 
   1972 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1973 				    PCIX_CMD_BYTECNT_SHIFT;
   1974 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1975 				    PCIX_STATUS_MAXB_SHIFT;
   1976 				if (bytecnt > maxb) {
   1977 					aprint_verbose_dev(sc->sc_dev,
   1978 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1979 					    512 << bytecnt, 512 << maxb);
   1980 					pcix_cmd = (pcix_cmd &
   1981 					    ~PCIX_CMD_BYTECNT_MASK) |
   1982 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1983 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1984 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1985 					    pcix_cmd);
   1986 				}
   1987 			}
   1988 		}
   1989 		/*
   1990 		 * The quad port adapter is special; it has a PCIX-PCIX
   1991 		 * bridge on the board, and can run the secondary bus at
   1992 		 * a higher speed.
   1993 		 */
   1994 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   1995 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   1996 								      : 66;
   1997 		} else if (sc->sc_flags & WM_F_PCIX) {
   1998 			switch (reg & STATUS_PCIXSPD_MASK) {
   1999 			case STATUS_PCIXSPD_50_66:
   2000 				sc->sc_bus_speed = 66;
   2001 				break;
   2002 			case STATUS_PCIXSPD_66_100:
   2003 				sc->sc_bus_speed = 100;
   2004 				break;
   2005 			case STATUS_PCIXSPD_100_133:
   2006 				sc->sc_bus_speed = 133;
   2007 				break;
   2008 			default:
   2009 				aprint_error_dev(sc->sc_dev,
   2010 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2011 				    reg & STATUS_PCIXSPD_MASK);
   2012 				sc->sc_bus_speed = 66;
   2013 				break;
   2014 			}
   2015 		} else
   2016 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2017 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2018 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2019 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2020 	}
   2021 
   2022 	/* clear interesting stat counters */
   2023 	CSR_READ(sc, WMREG_COLC);
   2024 	CSR_READ(sc, WMREG_RXERRC);
   2025 
   2026 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2027 	    || (sc->sc_type >= WM_T_ICH8))
   2028 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2029 	if (sc->sc_type >= WM_T_ICH8)
   2030 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2031 
   2032 	/* Set PHY, NVM mutex related stuff */
   2033 	switch (sc->sc_type) {
   2034 	case WM_T_82542_2_0:
   2035 	case WM_T_82542_2_1:
   2036 	case WM_T_82543:
   2037 	case WM_T_82544:
   2038 		/* Microwire */
   2039 		sc->sc_nvm_wordsize = 64;
   2040 		sc->sc_nvm_addrbits = 6;
   2041 		break;
   2042 	case WM_T_82540:
   2043 	case WM_T_82545:
   2044 	case WM_T_82545_3:
   2045 	case WM_T_82546:
   2046 	case WM_T_82546_3:
   2047 		/* Microwire */
   2048 		reg = CSR_READ(sc, WMREG_EECD);
   2049 		if (reg & EECD_EE_SIZE) {
   2050 			sc->sc_nvm_wordsize = 256;
   2051 			sc->sc_nvm_addrbits = 8;
   2052 		} else {
   2053 			sc->sc_nvm_wordsize = 64;
   2054 			sc->sc_nvm_addrbits = 6;
   2055 		}
   2056 		sc->sc_flags |= WM_F_LOCK_EECD;
   2057 		break;
   2058 	case WM_T_82541:
   2059 	case WM_T_82541_2:
   2060 	case WM_T_82547:
   2061 	case WM_T_82547_2:
   2062 		sc->sc_flags |= WM_F_LOCK_EECD;
   2063 		reg = CSR_READ(sc, WMREG_EECD);
   2064 		if (reg & EECD_EE_TYPE) {
   2065 			/* SPI */
   2066 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2067 			wm_nvm_set_addrbits_size_eecd(sc);
   2068 		} else {
   2069 			/* Microwire */
   2070 			if ((reg & EECD_EE_ABITS) != 0) {
   2071 				sc->sc_nvm_wordsize = 256;
   2072 				sc->sc_nvm_addrbits = 8;
   2073 			} else {
   2074 				sc->sc_nvm_wordsize = 64;
   2075 				sc->sc_nvm_addrbits = 6;
   2076 			}
   2077 		}
   2078 		break;
   2079 	case WM_T_82571:
   2080 	case WM_T_82572:
   2081 		/* SPI */
   2082 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2083 		wm_nvm_set_addrbits_size_eecd(sc);
   2084 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
   2085 		sc->phy.acquire = wm_get_swsm_semaphore;
   2086 		sc->phy.release = wm_put_swsm_semaphore;
   2087 		break;
   2088 	case WM_T_82573:
   2089 	case WM_T_82574:
   2090 	case WM_T_82583:
   2091 		if (sc->sc_type == WM_T_82573) {
   2092 			sc->sc_flags |= WM_F_LOCK_SWSM;
   2093 			sc->phy.acquire = wm_get_swsm_semaphore;
   2094 			sc->phy.release = wm_put_swsm_semaphore;
   2095 		} else {
   2096 			sc->sc_flags |= WM_F_LOCK_EXTCNF;
   2097 			/* Both PHY and NVM use the same semaphore. */
   2098 			sc->phy.acquire
   2099 			    = wm_get_swfwhw_semaphore;
   2100 			sc->phy.release
   2101 			    = wm_put_swfwhw_semaphore;
   2102 		}
   2103 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2104 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2105 			sc->sc_nvm_wordsize = 2048;
   2106 		} else {
   2107 			/* SPI */
   2108 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2109 			wm_nvm_set_addrbits_size_eecd(sc);
   2110 		}
   2111 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2112 		break;
   2113 	case WM_T_82575:
   2114 	case WM_T_82576:
   2115 	case WM_T_82580:
   2116 	case WM_T_I350:
   2117 	case WM_T_I354:
   2118 	case WM_T_80003:
   2119 		/* SPI */
   2120 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2121 		wm_nvm_set_addrbits_size_eecd(sc);
   2122 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
   2123 		    | WM_F_LOCK_SWSM;
   2124 		sc->phy.acquire = wm_get_phy_82575;
   2125 		sc->phy.release = wm_put_phy_82575;
   2126 		break;
   2127 	case WM_T_ICH8:
   2128 	case WM_T_ICH9:
   2129 	case WM_T_ICH10:
   2130 	case WM_T_PCH:
   2131 	case WM_T_PCH2:
   2132 	case WM_T_PCH_LPT:
   2133 		/* FLASH */
   2134 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2135 		sc->sc_nvm_wordsize = 2048;
   2136 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2137 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2138 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2139 			aprint_error_dev(sc->sc_dev,
   2140 			    "can't map FLASH registers\n");
   2141 			goto out;
   2142 		}
   2143 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2144 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2145 		    ICH_FLASH_SECTOR_SIZE;
   2146 		sc->sc_ich8_flash_bank_size =
   2147 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2148 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2149 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2150 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2151 		sc->sc_flashreg_offset = 0;
   2152 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2153 		sc->phy.release = wm_put_swflag_ich8lan;
   2154 		break;
   2155 	case WM_T_PCH_SPT:
   2156 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2157 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2158 		sc->sc_flasht = sc->sc_st;
   2159 		sc->sc_flashh = sc->sc_sh;
   2160 		sc->sc_ich8_flash_base = 0;
   2161 		sc->sc_nvm_wordsize =
   2162 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2163 			* NVM_SIZE_MULTIPLIER;
   2164 		/* It is size in bytes, we want words */
   2165 		sc->sc_nvm_wordsize /= 2;
   2166 		/* assume 2 banks */
   2167 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2168 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2169 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2170 		sc->phy.release = wm_put_swflag_ich8lan;
   2171 		break;
   2172 	case WM_T_I210:
   2173 	case WM_T_I211:
   2174 		if (wm_nvm_get_flash_presence_i210(sc)) {
   2175 			wm_nvm_set_addrbits_size_eecd(sc);
   2176 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2177 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2178 		} else {
   2179 			sc->sc_nvm_wordsize = INVM_SIZE;
   2180 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2181 		}
   2182 		sc->sc_flags |= WM_F_LOCK_SWFW | WM_F_LOCK_SWSM;
   2183 		sc->phy.acquire = wm_get_phy_82575;
   2184 		sc->phy.release = wm_put_phy_82575;
   2185 		break;
   2186 	default:
   2187 		break;
   2188 	}
   2189 
   2190 	/* Reset the chip to a known state. */
   2191 	wm_reset(sc);
   2192 
   2193 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2194 	switch (sc->sc_type) {
   2195 	case WM_T_82571:
   2196 	case WM_T_82572:
   2197 		reg = CSR_READ(sc, WMREG_SWSM2);
   2198 		if ((reg & SWSM2_LOCK) == 0) {
   2199 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2200 			force_clear_smbi = true;
   2201 		} else
   2202 			force_clear_smbi = false;
   2203 		break;
   2204 	case WM_T_82573:
   2205 	case WM_T_82574:
   2206 	case WM_T_82583:
   2207 		force_clear_smbi = true;
   2208 		break;
   2209 	default:
   2210 		force_clear_smbi = false;
   2211 		break;
   2212 	}
   2213 	if (force_clear_smbi) {
   2214 		reg = CSR_READ(sc, WMREG_SWSM);
   2215 		if ((reg & SWSM_SMBI) != 0)
   2216 			aprint_error_dev(sc->sc_dev,
   2217 			    "Please update the Bootagent\n");
   2218 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2219 	}
   2220 
   2221 	/*
   2222 	 * Defer printing the EEPROM type until after verifying the checksum
   2223 	 * This allows the EEPROM type to be printed correctly in the case
   2224 	 * that no EEPROM is attached.
   2225 	 */
   2226 	/*
   2227 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2228 	 * this for later, so we can fail future reads from the EEPROM.
   2229 	 */
   2230 	if (wm_nvm_validate_checksum(sc)) {
   2231 		/*
   2232 		 * Read twice again because some PCI-e parts fail the
   2233 		 * first check due to the link being in sleep state.
   2234 		 */
   2235 		if (wm_nvm_validate_checksum(sc))
   2236 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2237 	}
   2238 
   2239 	/* Set device properties (macflags) */
   2240 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2241 
   2242 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2243 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2244 	else {
   2245 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2246 		    sc->sc_nvm_wordsize);
   2247 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2248 			aprint_verbose("iNVM");
   2249 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2250 			aprint_verbose("FLASH(HW)");
   2251 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2252 			aprint_verbose("FLASH");
   2253 		else {
   2254 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2255 				eetype = "SPI";
   2256 			else
   2257 				eetype = "MicroWire";
   2258 			aprint_verbose("(%d address bits) %s EEPROM",
   2259 			    sc->sc_nvm_addrbits, eetype);
   2260 		}
   2261 	}
   2262 	wm_nvm_version(sc);
   2263 	aprint_verbose("\n");
   2264 
   2265 	/* Check for I21[01] PLL workaround */
   2266 	if (sc->sc_type == WM_T_I210)
   2267 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2268 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2269 		/* NVM image release 3.25 has a workaround */
   2270 		if ((sc->sc_nvm_ver_major < 3)
   2271 		    || ((sc->sc_nvm_ver_major == 3)
   2272 			&& (sc->sc_nvm_ver_minor < 25))) {
   2273 			aprint_verbose_dev(sc->sc_dev,
   2274 			    "ROM image version %d.%d is older than 3.25\n",
   2275 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2276 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2277 		}
   2278 	}
   2279 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2280 		wm_pll_workaround_i210(sc);
   2281 
   2282 	wm_get_wakeup(sc);
   2283 
   2284 	/* Non-AMT based hardware can now take control from firmware */
   2285 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2286 		wm_get_hw_control(sc);
   2287 
   2288 	/*
   2289 	 * Read the Ethernet address from the EEPROM, if not first found
   2290 	 * in device properties.
   2291 	 */
   2292 	ea = prop_dictionary_get(dict, "mac-address");
   2293 	if (ea != NULL) {
   2294 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2295 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2296 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2297 	} else {
   2298 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2299 			aprint_error_dev(sc->sc_dev,
   2300 			    "unable to read Ethernet address\n");
   2301 			goto out;
   2302 		}
   2303 	}
   2304 
   2305 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2306 	    ether_sprintf(enaddr));
   2307 
   2308 	/*
   2309 	 * Read the config info from the EEPROM, and set up various
   2310 	 * bits in the control registers based on their contents.
   2311 	 */
   2312 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2313 	if (pn != NULL) {
   2314 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2315 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2316 	} else {
   2317 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2318 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2319 			goto out;
   2320 		}
   2321 	}
   2322 
   2323 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2324 	if (pn != NULL) {
   2325 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2326 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2327 	} else {
   2328 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2329 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2330 			goto out;
   2331 		}
   2332 	}
   2333 
   2334 	/* check for WM_F_WOL */
   2335 	switch (sc->sc_type) {
   2336 	case WM_T_82542_2_0:
   2337 	case WM_T_82542_2_1:
   2338 	case WM_T_82543:
   2339 		/* dummy? */
   2340 		eeprom_data = 0;
   2341 		apme_mask = NVM_CFG3_APME;
   2342 		break;
   2343 	case WM_T_82544:
   2344 		apme_mask = NVM_CFG2_82544_APM_EN;
   2345 		eeprom_data = cfg2;
   2346 		break;
   2347 	case WM_T_82546:
   2348 	case WM_T_82546_3:
   2349 	case WM_T_82571:
   2350 	case WM_T_82572:
   2351 	case WM_T_82573:
   2352 	case WM_T_82574:
   2353 	case WM_T_82583:
   2354 	case WM_T_80003:
   2355 	default:
   2356 		apme_mask = NVM_CFG3_APME;
   2357 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2358 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2359 		break;
   2360 	case WM_T_82575:
   2361 	case WM_T_82576:
   2362 	case WM_T_82580:
   2363 	case WM_T_I350:
   2364 	case WM_T_I354: /* XXX ok? */
   2365 	case WM_T_ICH8:
   2366 	case WM_T_ICH9:
   2367 	case WM_T_ICH10:
   2368 	case WM_T_PCH:
   2369 	case WM_T_PCH2:
   2370 	case WM_T_PCH_LPT:
   2371 	case WM_T_PCH_SPT:
   2372 		/* XXX The funcid should be checked on some devices */
   2373 		apme_mask = WUC_APME;
   2374 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2375 		break;
   2376 	}
   2377 
   2378 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2379 	if ((eeprom_data & apme_mask) != 0)
   2380 		sc->sc_flags |= WM_F_WOL;
   2381 
   2382 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2383 		/* Check NVM for autonegotiation */
   2384 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2385 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2386 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2387 		}
   2388 	}
   2389 
   2390 	/*
   2391 	 * XXX need special handling for some multiple port cards
   2392 	 * to disable a paticular port.
   2393 	 */
   2394 
   2395 	if (sc->sc_type >= WM_T_82544) {
   2396 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2397 		if (pn != NULL) {
   2398 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2399 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2400 		} else {
   2401 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2402 				aprint_error_dev(sc->sc_dev,
   2403 				    "unable to read SWDPIN\n");
   2404 				goto out;
   2405 			}
   2406 		}
   2407 	}
   2408 
   2409 	if (cfg1 & NVM_CFG1_ILOS)
   2410 		sc->sc_ctrl |= CTRL_ILOS;
   2411 
   2412 	/*
   2413 	 * XXX
   2414 	 * This code isn't correct because pin 2 and 3 are located
   2415 	 * in different position on newer chips. Check all datasheet.
   2416 	 *
   2417 	 * Until resolve this problem, check if a chip < 82580
   2418 	 */
   2419 	if (sc->sc_type <= WM_T_82580) {
   2420 		if (sc->sc_type >= WM_T_82544) {
   2421 			sc->sc_ctrl |=
   2422 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2423 			    CTRL_SWDPIO_SHIFT;
   2424 			sc->sc_ctrl |=
   2425 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2426 			    CTRL_SWDPINS_SHIFT;
   2427 		} else {
   2428 			sc->sc_ctrl |=
   2429 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2430 			    CTRL_SWDPIO_SHIFT;
   2431 		}
   2432 	}
   2433 
   2434 	/* XXX For other than 82580? */
   2435 	if (sc->sc_type == WM_T_82580) {
   2436 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2437 		if (nvmword & __BIT(13))
   2438 			sc->sc_ctrl |= CTRL_ILOS;
   2439 	}
   2440 
   2441 #if 0
   2442 	if (sc->sc_type >= WM_T_82544) {
   2443 		if (cfg1 & NVM_CFG1_IPS0)
   2444 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2445 		if (cfg1 & NVM_CFG1_IPS1)
   2446 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2447 		sc->sc_ctrl_ext |=
   2448 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2449 		    CTRL_EXT_SWDPIO_SHIFT;
   2450 		sc->sc_ctrl_ext |=
   2451 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2452 		    CTRL_EXT_SWDPINS_SHIFT;
   2453 	} else {
   2454 		sc->sc_ctrl_ext |=
   2455 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2456 		    CTRL_EXT_SWDPIO_SHIFT;
   2457 	}
   2458 #endif
   2459 
   2460 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2461 #if 0
   2462 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2463 #endif
   2464 
   2465 	if (sc->sc_type == WM_T_PCH) {
   2466 		uint16_t val;
   2467 
   2468 		/* Save the NVM K1 bit setting */
   2469 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2470 
   2471 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2472 			sc->sc_nvm_k1_enabled = 1;
   2473 		else
   2474 			sc->sc_nvm_k1_enabled = 0;
   2475 	}
   2476 
   2477 	/*
   2478 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
   2479 	 * media structures accordingly.
   2480 	 */
   2481 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2482 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2483 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2484 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
   2485 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2486 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
   2487 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2488 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2489 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2490 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2491 	    || (sc->sc_type ==WM_T_I211)) {
   2492 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2493 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2494 		switch (link_mode) {
   2495 		case CTRL_EXT_LINK_MODE_1000KX:
   2496 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2497 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2498 			break;
   2499 		case CTRL_EXT_LINK_MODE_SGMII:
   2500 			if (wm_sgmii_uses_mdio(sc)) {
   2501 				aprint_verbose_dev(sc->sc_dev,
   2502 				    "SGMII(MDIO)\n");
   2503 				sc->sc_flags |= WM_F_SGMII;
   2504 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2505 				break;
   2506 			}
   2507 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2508 			/*FALLTHROUGH*/
   2509 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2510 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2511 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2512 				if (link_mode
   2513 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2514 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2515 					sc->sc_flags |= WM_F_SGMII;
   2516 				} else {
   2517 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2518 					aprint_verbose_dev(sc->sc_dev,
   2519 					    "SERDES\n");
   2520 				}
   2521 				break;
   2522 			}
   2523 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2524 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2525 
   2526 			/* Change current link mode setting */
   2527 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2528 			switch (sc->sc_mediatype) {
   2529 			case WM_MEDIATYPE_COPPER:
   2530 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2531 				break;
   2532 			case WM_MEDIATYPE_SERDES:
   2533 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2534 				break;
   2535 			default:
   2536 				break;
   2537 			}
   2538 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2539 			break;
   2540 		case CTRL_EXT_LINK_MODE_GMII:
   2541 		default:
   2542 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2543 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2544 			break;
   2545 		}
   2546 
   2547 		reg &= ~CTRL_EXT_I2C_ENA;
   2548 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2549 			reg |= CTRL_EXT_I2C_ENA;
   2550 		else
   2551 			reg &= ~CTRL_EXT_I2C_ENA;
   2552 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2553 
   2554 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2555 			wm_gmii_mediainit(sc, wmp->wmp_product);
   2556 		else
   2557 			wm_tbi_mediainit(sc);
   2558 	} else if (sc->sc_type < WM_T_82543 ||
   2559 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2560 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2561 			aprint_error_dev(sc->sc_dev,
   2562 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2563 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2564 		}
   2565 		wm_tbi_mediainit(sc);
   2566 	} else {
   2567 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2568 			aprint_error_dev(sc->sc_dev,
   2569 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2570 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2571 		}
   2572 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2573 	}
   2574 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2575 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2576 
   2577 	ifp = &sc->sc_ethercom.ec_if;
   2578 	xname = device_xname(sc->sc_dev);
   2579 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2580 	ifp->if_softc = sc;
   2581 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2582 #ifdef WM_MPSAFE
   2583 	ifp->if_extflags = IFEF_START_MPSAFE;
   2584 #endif
   2585 	ifp->if_ioctl = wm_ioctl;
   2586 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2587 		ifp->if_start = wm_nq_start;
   2588 		/*
   2589 		 * When the number of CPUs is one and the controller can use
   2590 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2591 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2592 		 * and the other is used for link status changing.
   2593 		 * In this situation, wm_nq_transmit() is disadvantageous
   2594 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2595 		 */
   2596 		if (wm_is_using_multiqueue(sc))
   2597 			ifp->if_transmit = wm_nq_transmit;
   2598 	} else {
   2599 		ifp->if_start = wm_start;
   2600 		/*
   2601 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2602 		 */
   2603 		if (wm_is_using_multiqueue(sc))
   2604 			ifp->if_transmit = wm_transmit;
   2605 	}
   2606 	ifp->if_watchdog = wm_watchdog;
   2607 	ifp->if_init = wm_init;
   2608 	ifp->if_stop = wm_stop;
   2609 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2610 	IFQ_SET_READY(&ifp->if_snd);
   2611 
   2612 	/* Check for jumbo frame */
   2613 	switch (sc->sc_type) {
   2614 	case WM_T_82573:
   2615 		/* XXX limited to 9234 if ASPM is disabled */
   2616 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2617 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2618 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2619 		break;
   2620 	case WM_T_82571:
   2621 	case WM_T_82572:
   2622 	case WM_T_82574:
   2623 	case WM_T_82575:
   2624 	case WM_T_82576:
   2625 	case WM_T_82580:
   2626 	case WM_T_I350:
   2627 	case WM_T_I354: /* XXXX ok? */
   2628 	case WM_T_I210:
   2629 	case WM_T_I211:
   2630 	case WM_T_80003:
   2631 	case WM_T_ICH9:
   2632 	case WM_T_ICH10:
   2633 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2634 	case WM_T_PCH_LPT:
   2635 	case WM_T_PCH_SPT:
   2636 		/* XXX limited to 9234 */
   2637 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2638 		break;
   2639 	case WM_T_PCH:
   2640 		/* XXX limited to 4096 */
   2641 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2642 		break;
   2643 	case WM_T_82542_2_0:
   2644 	case WM_T_82542_2_1:
   2645 	case WM_T_82583:
   2646 	case WM_T_ICH8:
   2647 		/* No support for jumbo frame */
   2648 		break;
   2649 	default:
   2650 		/* ETHER_MAX_LEN_JUMBO */
   2651 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2652 		break;
   2653 	}
   2654 
   2655 	/* If we're a i82543 or greater, we can support VLANs. */
   2656 	if (sc->sc_type >= WM_T_82543)
   2657 		sc->sc_ethercom.ec_capabilities |=
   2658 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2659 
   2660 	/*
   2661 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2662 	 * on i82543 and later.
   2663 	 */
   2664 	if (sc->sc_type >= WM_T_82543) {
   2665 		ifp->if_capabilities |=
   2666 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2667 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2668 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2669 		    IFCAP_CSUM_TCPv6_Tx |
   2670 		    IFCAP_CSUM_UDPv6_Tx;
   2671 	}
   2672 
   2673 	/*
   2674 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2675 	 *
   2676 	 *	82541GI (8086:1076) ... no
   2677 	 *	82572EI (8086:10b9) ... yes
   2678 	 */
   2679 	if (sc->sc_type >= WM_T_82571) {
   2680 		ifp->if_capabilities |=
   2681 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2682 	}
   2683 
   2684 	/*
   2685 	 * If we're a i82544 or greater (except i82547), we can do
   2686 	 * TCP segmentation offload.
   2687 	 */
   2688 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2689 		ifp->if_capabilities |= IFCAP_TSOv4;
   2690 	}
   2691 
   2692 	if (sc->sc_type >= WM_T_82571) {
   2693 		ifp->if_capabilities |= IFCAP_TSOv6;
   2694 	}
   2695 
   2696 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2697 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2698 
   2699 #ifdef WM_MPSAFE
   2700 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2701 #else
   2702 	sc->sc_core_lock = NULL;
   2703 #endif
   2704 
   2705 	/* Attach the interface. */
   2706 	if_initialize(ifp);
   2707 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2708 	ether_ifattach(ifp, enaddr);
   2709 	if_register(ifp);
   2710 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2711 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2712 			  RND_FLAG_DEFAULT);
   2713 
   2714 #ifdef WM_EVENT_COUNTERS
   2715 	/* Attach event counters. */
   2716 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2717 	    NULL, xname, "linkintr");
   2718 
   2719 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2720 	    NULL, xname, "tx_xoff");
   2721 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2722 	    NULL, xname, "tx_xon");
   2723 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2724 	    NULL, xname, "rx_xoff");
   2725 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2726 	    NULL, xname, "rx_xon");
   2727 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2728 	    NULL, xname, "rx_macctl");
   2729 #endif /* WM_EVENT_COUNTERS */
   2730 
   2731 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2732 		pmf_class_network_register(self, ifp);
   2733 	else
   2734 		aprint_error_dev(self, "couldn't establish power handler\n");
   2735 
   2736 	sc->sc_flags |= WM_F_ATTACHED;
   2737  out:
   2738 	return;
   2739 }
   2740 
   2741 /* The detach function (ca_detach) */
   2742 static int
   2743 wm_detach(device_t self, int flags __unused)
   2744 {
   2745 	struct wm_softc *sc = device_private(self);
   2746 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2747 	int i;
   2748 
   2749 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2750 		return 0;
   2751 
   2752 	/* Stop the interface. Callouts are stopped in it. */
   2753 	wm_stop(ifp, 1);
   2754 
   2755 	pmf_device_deregister(self);
   2756 
   2757 #ifdef WM_EVENT_COUNTERS
   2758 	evcnt_detach(&sc->sc_ev_linkintr);
   2759 
   2760 	evcnt_detach(&sc->sc_ev_tx_xoff);
   2761 	evcnt_detach(&sc->sc_ev_tx_xon);
   2762 	evcnt_detach(&sc->sc_ev_rx_xoff);
   2763 	evcnt_detach(&sc->sc_ev_rx_xon);
   2764 	evcnt_detach(&sc->sc_ev_rx_macctl);
   2765 #endif /* WM_EVENT_COUNTERS */
   2766 
   2767 	/* Tell the firmware about the release */
   2768 	WM_CORE_LOCK(sc);
   2769 	wm_release_manageability(sc);
   2770 	wm_release_hw_control(sc);
   2771 	wm_enable_wakeup(sc);
   2772 	WM_CORE_UNLOCK(sc);
   2773 
   2774 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2775 
   2776 	/* Delete all remaining media. */
   2777 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2778 
   2779 	ether_ifdetach(ifp);
   2780 	if_detach(ifp);
   2781 	if_percpuq_destroy(sc->sc_ipq);
   2782 
   2783 	/* Unload RX dmamaps and free mbufs */
   2784 	for (i = 0; i < sc->sc_nqueues; i++) {
   2785 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2786 		mutex_enter(rxq->rxq_lock);
   2787 		wm_rxdrain(rxq);
   2788 		mutex_exit(rxq->rxq_lock);
   2789 	}
   2790 	/* Must unlock here */
   2791 
   2792 	/* Disestablish the interrupt handler */
   2793 	for (i = 0; i < sc->sc_nintrs; i++) {
   2794 		if (sc->sc_ihs[i] != NULL) {
   2795 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2796 			sc->sc_ihs[i] = NULL;
   2797 		}
   2798 	}
   2799 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2800 
   2801 	wm_free_txrx_queues(sc);
   2802 
   2803 	/* Unmap the registers */
   2804 	if (sc->sc_ss) {
   2805 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2806 		sc->sc_ss = 0;
   2807 	}
   2808 	if (sc->sc_ios) {
   2809 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2810 		sc->sc_ios = 0;
   2811 	}
   2812 	if (sc->sc_flashs) {
   2813 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2814 		sc->sc_flashs = 0;
   2815 	}
   2816 
   2817 	if (sc->sc_core_lock)
   2818 		mutex_obj_free(sc->sc_core_lock);
   2819 	if (sc->sc_ich_phymtx)
   2820 		mutex_obj_free(sc->sc_ich_phymtx);
   2821 	if (sc->sc_ich_nvmmtx)
   2822 		mutex_obj_free(sc->sc_ich_nvmmtx);
   2823 
   2824 	return 0;
   2825 }
   2826 
   2827 static bool
   2828 wm_suspend(device_t self, const pmf_qual_t *qual)
   2829 {
   2830 	struct wm_softc *sc = device_private(self);
   2831 
   2832 	wm_release_manageability(sc);
   2833 	wm_release_hw_control(sc);
   2834 	wm_enable_wakeup(sc);
   2835 
   2836 	return true;
   2837 }
   2838 
   2839 static bool
   2840 wm_resume(device_t self, const pmf_qual_t *qual)
   2841 {
   2842 	struct wm_softc *sc = device_private(self);
   2843 
   2844 	wm_init_manageability(sc);
   2845 
   2846 	return true;
   2847 }
   2848 
   2849 /*
   2850  * wm_watchdog:		[ifnet interface function]
   2851  *
   2852  *	Watchdog timer handler.
   2853  */
   2854 static void
   2855 wm_watchdog(struct ifnet *ifp)
   2856 {
   2857 	int qid;
   2858 	struct wm_softc *sc = ifp->if_softc;
   2859 
   2860 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2861 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2862 
   2863 		wm_watchdog_txq(ifp, txq);
   2864 	}
   2865 
   2866 	/* Reset the interface. */
   2867 	(void) wm_init(ifp);
   2868 
   2869 	/*
   2870 	 * There are still some upper layer processing which call
   2871 	 * ifp->if_start(). e.g. ALTQ or one CPU system
   2872 	 */
   2873 	/* Try to get more packets going. */
   2874 	ifp->if_start(ifp);
   2875 }
   2876 
   2877 static void
   2878 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
   2879 {
   2880 	struct wm_softc *sc = ifp->if_softc;
   2881 
   2882 	/*
   2883 	 * Since we're using delayed interrupts, sweep up
   2884 	 * before we report an error.
   2885 	 */
   2886 	mutex_enter(txq->txq_lock);
   2887 	wm_txeof(sc, txq);
   2888 	mutex_exit(txq->txq_lock);
   2889 
   2890 	if (txq->txq_free != WM_NTXDESC(txq)) {
   2891 #ifdef WM_DEBUG
   2892 		int i, j;
   2893 		struct wm_txsoft *txs;
   2894 #endif
   2895 		log(LOG_ERR,
   2896 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2897 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   2898 		    txq->txq_next);
   2899 		ifp->if_oerrors++;
   2900 #ifdef WM_DEBUG
   2901 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   2902 		    i = WM_NEXTTXS(txq, i)) {
   2903 		    txs = &txq->txq_soft[i];
   2904 		    printf("txs %d tx %d -> %d\n",
   2905 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2906 		    for (j = txs->txs_firstdesc; ;
   2907 			j = WM_NEXTTX(txq, j)) {
   2908 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2909 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   2910 			printf("\t %#08x%08x\n",
   2911 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   2912 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   2913 			if (j == txs->txs_lastdesc)
   2914 				break;
   2915 			}
   2916 		}
   2917 #endif
   2918 	}
   2919 }
   2920 
   2921 /*
   2922  * wm_tick:
   2923  *
   2924  *	One second timer, used to check link status, sweep up
   2925  *	completed transmit jobs, etc.
   2926  */
   2927 static void
   2928 wm_tick(void *arg)
   2929 {
   2930 	struct wm_softc *sc = arg;
   2931 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2932 #ifndef WM_MPSAFE
   2933 	int s = splnet();
   2934 #endif
   2935 
   2936 	WM_CORE_LOCK(sc);
   2937 
   2938 	if (sc->sc_core_stopping)
   2939 		goto out;
   2940 
   2941 	if (sc->sc_type >= WM_T_82542_2_1) {
   2942 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2943 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2944 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2945 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2946 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2947 	}
   2948 
   2949 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2950 	ifp->if_ierrors += 0ULL /* ensure quad_t */
   2951 	    + CSR_READ(sc, WMREG_CRCERRS)
   2952 	    + CSR_READ(sc, WMREG_ALGNERRC)
   2953 	    + CSR_READ(sc, WMREG_SYMERRC)
   2954 	    + CSR_READ(sc, WMREG_RXERRC)
   2955 	    + CSR_READ(sc, WMREG_SEC)
   2956 	    + CSR_READ(sc, WMREG_CEXTERR)
   2957 	    + CSR_READ(sc, WMREG_RLEC);
   2958 	/*
   2959 	 * WMREG_RNBC is incremented when there is no available buffers in host
   2960 	 * memory. It does not mean the number of dropped packet. Because
   2961 	 * ethernet controller can receive packets in such case if there is
   2962 	 * space in phy's FIFO.
   2963 	 *
   2964 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   2965 	 * own EVCNT instead of if_iqdrops.
   2966 	 */
   2967 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   2968 
   2969 	if (sc->sc_flags & WM_F_HAS_MII)
   2970 		mii_tick(&sc->sc_mii);
   2971 	else if ((sc->sc_type >= WM_T_82575)
   2972 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   2973 		wm_serdes_tick(sc);
   2974 	else
   2975 		wm_tbi_tick(sc);
   2976 
   2977 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2978 out:
   2979 	WM_CORE_UNLOCK(sc);
   2980 #ifndef WM_MPSAFE
   2981 	splx(s);
   2982 #endif
   2983 }
   2984 
   2985 static int
   2986 wm_ifflags_cb(struct ethercom *ec)
   2987 {
   2988 	struct ifnet *ifp = &ec->ec_if;
   2989 	struct wm_softc *sc = ifp->if_softc;
   2990 	int rc = 0;
   2991 
   2992 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   2993 		device_xname(sc->sc_dev), __func__));
   2994 
   2995 	WM_CORE_LOCK(sc);
   2996 
   2997 	int change = ifp->if_flags ^ sc->sc_if_flags;
   2998 	sc->sc_if_flags = ifp->if_flags;
   2999 
   3000 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3001 		rc = ENETRESET;
   3002 		goto out;
   3003 	}
   3004 
   3005 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   3006 		wm_set_filter(sc);
   3007 
   3008 	wm_set_vlan(sc);
   3009 
   3010 out:
   3011 	WM_CORE_UNLOCK(sc);
   3012 
   3013 	return rc;
   3014 }
   3015 
   3016 /*
   3017  * wm_ioctl:		[ifnet interface function]
   3018  *
   3019  *	Handle control requests from the operator.
   3020  */
   3021 static int
   3022 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3023 {
   3024 	struct wm_softc *sc = ifp->if_softc;
   3025 	struct ifreq *ifr = (struct ifreq *) data;
   3026 	struct ifaddr *ifa = (struct ifaddr *)data;
   3027 	struct sockaddr_dl *sdl;
   3028 	int s, error;
   3029 
   3030 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3031 		device_xname(sc->sc_dev), __func__));
   3032 
   3033 #ifndef WM_MPSAFE
   3034 	s = splnet();
   3035 #endif
   3036 	switch (cmd) {
   3037 	case SIOCSIFMEDIA:
   3038 	case SIOCGIFMEDIA:
   3039 		WM_CORE_LOCK(sc);
   3040 		/* Flow control requires full-duplex mode. */
   3041 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3042 		    (ifr->ifr_media & IFM_FDX) == 0)
   3043 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3044 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3045 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3046 				/* We can do both TXPAUSE and RXPAUSE. */
   3047 				ifr->ifr_media |=
   3048 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3049 			}
   3050 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3051 		}
   3052 		WM_CORE_UNLOCK(sc);
   3053 #ifdef WM_MPSAFE
   3054 		s = splnet();
   3055 #endif
   3056 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3057 #ifdef WM_MPSAFE
   3058 		splx(s);
   3059 #endif
   3060 		break;
   3061 	case SIOCINITIFADDR:
   3062 		WM_CORE_LOCK(sc);
   3063 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3064 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3065 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3066 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3067 			/* unicast address is first multicast entry */
   3068 			wm_set_filter(sc);
   3069 			error = 0;
   3070 			WM_CORE_UNLOCK(sc);
   3071 			break;
   3072 		}
   3073 		WM_CORE_UNLOCK(sc);
   3074 		/*FALLTHROUGH*/
   3075 	default:
   3076 #ifdef WM_MPSAFE
   3077 		s = splnet();
   3078 #endif
   3079 		/* It may call wm_start, so unlock here */
   3080 		error = ether_ioctl(ifp, cmd, data);
   3081 #ifdef WM_MPSAFE
   3082 		splx(s);
   3083 #endif
   3084 		if (error != ENETRESET)
   3085 			break;
   3086 
   3087 		error = 0;
   3088 
   3089 		if (cmd == SIOCSIFCAP) {
   3090 			error = (*ifp->if_init)(ifp);
   3091 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3092 			;
   3093 		else if (ifp->if_flags & IFF_RUNNING) {
   3094 			/*
   3095 			 * Multicast list has changed; set the hardware filter
   3096 			 * accordingly.
   3097 			 */
   3098 			WM_CORE_LOCK(sc);
   3099 			wm_set_filter(sc);
   3100 			WM_CORE_UNLOCK(sc);
   3101 		}
   3102 		break;
   3103 	}
   3104 
   3105 #ifndef WM_MPSAFE
   3106 	splx(s);
   3107 #endif
   3108 	return error;
   3109 }
   3110 
   3111 /* MAC address related */
   3112 
   3113 /*
   3114  * Get the offset of MAC address and return it.
   3115  * If error occured, use offset 0.
   3116  */
   3117 static uint16_t
   3118 wm_check_alt_mac_addr(struct wm_softc *sc)
   3119 {
   3120 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3121 	uint16_t offset = NVM_OFF_MACADDR;
   3122 
   3123 	/* Try to read alternative MAC address pointer */
   3124 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3125 		return 0;
   3126 
   3127 	/* Check pointer if it's valid or not. */
   3128 	if ((offset == 0x0000) || (offset == 0xffff))
   3129 		return 0;
   3130 
   3131 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3132 	/*
   3133 	 * Check whether alternative MAC address is valid or not.
   3134 	 * Some cards have non 0xffff pointer but those don't use
   3135 	 * alternative MAC address in reality.
   3136 	 *
   3137 	 * Check whether the broadcast bit is set or not.
   3138 	 */
   3139 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3140 		if (((myea[0] & 0xff) & 0x01) == 0)
   3141 			return offset; /* Found */
   3142 
   3143 	/* Not found */
   3144 	return 0;
   3145 }
   3146 
   3147 static int
   3148 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3149 {
   3150 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3151 	uint16_t offset = NVM_OFF_MACADDR;
   3152 	int do_invert = 0;
   3153 
   3154 	switch (sc->sc_type) {
   3155 	case WM_T_82580:
   3156 	case WM_T_I350:
   3157 	case WM_T_I354:
   3158 		/* EEPROM Top Level Partitioning */
   3159 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3160 		break;
   3161 	case WM_T_82571:
   3162 	case WM_T_82575:
   3163 	case WM_T_82576:
   3164 	case WM_T_80003:
   3165 	case WM_T_I210:
   3166 	case WM_T_I211:
   3167 		offset = wm_check_alt_mac_addr(sc);
   3168 		if (offset == 0)
   3169 			if ((sc->sc_funcid & 0x01) == 1)
   3170 				do_invert = 1;
   3171 		break;
   3172 	default:
   3173 		if ((sc->sc_funcid & 0x01) == 1)
   3174 			do_invert = 1;
   3175 		break;
   3176 	}
   3177 
   3178 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3179 		goto bad;
   3180 
   3181 	enaddr[0] = myea[0] & 0xff;
   3182 	enaddr[1] = myea[0] >> 8;
   3183 	enaddr[2] = myea[1] & 0xff;
   3184 	enaddr[3] = myea[1] >> 8;
   3185 	enaddr[4] = myea[2] & 0xff;
   3186 	enaddr[5] = myea[2] >> 8;
   3187 
   3188 	/*
   3189 	 * Toggle the LSB of the MAC address on the second port
   3190 	 * of some dual port cards.
   3191 	 */
   3192 	if (do_invert != 0)
   3193 		enaddr[5] ^= 1;
   3194 
   3195 	return 0;
   3196 
   3197  bad:
   3198 	return -1;
   3199 }
   3200 
   3201 /*
   3202  * wm_set_ral:
   3203  *
   3204  *	Set an entery in the receive address list.
   3205  */
   3206 static void
   3207 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3208 {
   3209 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3210 	uint32_t wlock_mac;
   3211 	int rv;
   3212 
   3213 	if (enaddr != NULL) {
   3214 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3215 		    (enaddr[3] << 24);
   3216 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3217 		ral_hi |= RAL_AV;
   3218 	} else {
   3219 		ral_lo = 0;
   3220 		ral_hi = 0;
   3221 	}
   3222 
   3223 	switch (sc->sc_type) {
   3224 	case WM_T_82542_2_0:
   3225 	case WM_T_82542_2_1:
   3226 	case WM_T_82543:
   3227 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3228 		CSR_WRITE_FLUSH(sc);
   3229 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3230 		CSR_WRITE_FLUSH(sc);
   3231 		break;
   3232 	case WM_T_PCH2:
   3233 	case WM_T_PCH_LPT:
   3234 	case WM_T_PCH_SPT:
   3235 		if (idx == 0) {
   3236 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3237 			CSR_WRITE_FLUSH(sc);
   3238 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3239 			CSR_WRITE_FLUSH(sc);
   3240 			return;
   3241 		}
   3242 		if (sc->sc_type != WM_T_PCH2) {
   3243 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3244 			    FWSM_WLOCK_MAC);
   3245 			addrl = WMREG_SHRAL(idx - 1);
   3246 			addrh = WMREG_SHRAH(idx - 1);
   3247 		} else {
   3248 			wlock_mac = 0;
   3249 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3250 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3251 		}
   3252 
   3253 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3254 			rv = wm_get_swflag_ich8lan(sc);
   3255 			if (rv != 0)
   3256 				return;
   3257 			CSR_WRITE(sc, addrl, ral_lo);
   3258 			CSR_WRITE_FLUSH(sc);
   3259 			CSR_WRITE(sc, addrh, ral_hi);
   3260 			CSR_WRITE_FLUSH(sc);
   3261 			wm_put_swflag_ich8lan(sc);
   3262 		}
   3263 
   3264 		break;
   3265 	default:
   3266 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3267 		CSR_WRITE_FLUSH(sc);
   3268 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3269 		CSR_WRITE_FLUSH(sc);
   3270 		break;
   3271 	}
   3272 }
   3273 
   3274 /*
   3275  * wm_mchash:
   3276  *
   3277  *	Compute the hash of the multicast address for the 4096-bit
   3278  *	multicast filter.
   3279  */
   3280 static uint32_t
   3281 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3282 {
   3283 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3284 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3285 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3286 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3287 	uint32_t hash;
   3288 
   3289 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3290 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3291 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3292 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   3293 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3294 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3295 		return (hash & 0x3ff);
   3296 	}
   3297 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3298 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3299 
   3300 	return (hash & 0xfff);
   3301 }
   3302 
   3303 /*
   3304  * wm_set_filter:
   3305  *
   3306  *	Set up the receive filter.
   3307  */
   3308 static void
   3309 wm_set_filter(struct wm_softc *sc)
   3310 {
   3311 	struct ethercom *ec = &sc->sc_ethercom;
   3312 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3313 	struct ether_multi *enm;
   3314 	struct ether_multistep step;
   3315 	bus_addr_t mta_reg;
   3316 	uint32_t hash, reg, bit;
   3317 	int i, size, ralmax;
   3318 
   3319 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3320 		device_xname(sc->sc_dev), __func__));
   3321 
   3322 	if (sc->sc_type >= WM_T_82544)
   3323 		mta_reg = WMREG_CORDOVA_MTA;
   3324 	else
   3325 		mta_reg = WMREG_MTA;
   3326 
   3327 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3328 
   3329 	if (ifp->if_flags & IFF_BROADCAST)
   3330 		sc->sc_rctl |= RCTL_BAM;
   3331 	if (ifp->if_flags & IFF_PROMISC) {
   3332 		sc->sc_rctl |= RCTL_UPE;
   3333 		goto allmulti;
   3334 	}
   3335 
   3336 	/*
   3337 	 * Set the station address in the first RAL slot, and
   3338 	 * clear the remaining slots.
   3339 	 */
   3340 	if (sc->sc_type == WM_T_ICH8)
   3341 		size = WM_RAL_TABSIZE_ICH8 -1;
   3342 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3343 	    || (sc->sc_type == WM_T_PCH))
   3344 		size = WM_RAL_TABSIZE_ICH8;
   3345 	else if (sc->sc_type == WM_T_PCH2)
   3346 		size = WM_RAL_TABSIZE_PCH2;
   3347 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
   3348 		size = WM_RAL_TABSIZE_PCH_LPT;
   3349 	else if (sc->sc_type == WM_T_82575)
   3350 		size = WM_RAL_TABSIZE_82575;
   3351 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3352 		size = WM_RAL_TABSIZE_82576;
   3353 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3354 		size = WM_RAL_TABSIZE_I350;
   3355 	else
   3356 		size = WM_RAL_TABSIZE;
   3357 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3358 
   3359 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   3360 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3361 		switch (i) {
   3362 		case 0:
   3363 			/* We can use all entries */
   3364 			ralmax = size;
   3365 			break;
   3366 		case 1:
   3367 			/* Only RAR[0] */
   3368 			ralmax = 1;
   3369 			break;
   3370 		default:
   3371 			/* available SHRA + RAR[0] */
   3372 			ralmax = i + 1;
   3373 		}
   3374 	} else
   3375 		ralmax = size;
   3376 	for (i = 1; i < size; i++) {
   3377 		if (i < ralmax)
   3378 			wm_set_ral(sc, NULL, i);
   3379 	}
   3380 
   3381 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3382 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3383 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3384 	    || (sc->sc_type == WM_T_PCH_SPT))
   3385 		size = WM_ICH8_MC_TABSIZE;
   3386 	else
   3387 		size = WM_MC_TABSIZE;
   3388 	/* Clear out the multicast table. */
   3389 	for (i = 0; i < size; i++)
   3390 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3391 
   3392 	ETHER_LOCK(ec);
   3393 	ETHER_FIRST_MULTI(step, ec, enm);
   3394 	while (enm != NULL) {
   3395 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3396 			ETHER_UNLOCK(ec);
   3397 			/*
   3398 			 * We must listen to a range of multicast addresses.
   3399 			 * For now, just accept all multicasts, rather than
   3400 			 * trying to set only those filter bits needed to match
   3401 			 * the range.  (At this time, the only use of address
   3402 			 * ranges is for IP multicast routing, for which the
   3403 			 * range is big enough to require all bits set.)
   3404 			 */
   3405 			goto allmulti;
   3406 		}
   3407 
   3408 		hash = wm_mchash(sc, enm->enm_addrlo);
   3409 
   3410 		reg = (hash >> 5);
   3411 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3412 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3413 		    || (sc->sc_type == WM_T_PCH2)
   3414 		    || (sc->sc_type == WM_T_PCH_LPT)
   3415 		    || (sc->sc_type == WM_T_PCH_SPT))
   3416 			reg &= 0x1f;
   3417 		else
   3418 			reg &= 0x7f;
   3419 		bit = hash & 0x1f;
   3420 
   3421 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3422 		hash |= 1U << bit;
   3423 
   3424 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3425 			/*
   3426 			 * 82544 Errata 9: Certain register cannot be written
   3427 			 * with particular alignments in PCI-X bus operation
   3428 			 * (FCAH, MTA and VFTA).
   3429 			 */
   3430 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3431 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3432 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3433 		} else
   3434 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3435 
   3436 		ETHER_NEXT_MULTI(step, enm);
   3437 	}
   3438 	ETHER_UNLOCK(ec);
   3439 
   3440 	ifp->if_flags &= ~IFF_ALLMULTI;
   3441 	goto setit;
   3442 
   3443  allmulti:
   3444 	ifp->if_flags |= IFF_ALLMULTI;
   3445 	sc->sc_rctl |= RCTL_MPE;
   3446 
   3447  setit:
   3448 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3449 }
   3450 
   3451 /* Reset and init related */
   3452 
   3453 static void
   3454 wm_set_vlan(struct wm_softc *sc)
   3455 {
   3456 
   3457 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3458 		device_xname(sc->sc_dev), __func__));
   3459 
   3460 	/* Deal with VLAN enables. */
   3461 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3462 		sc->sc_ctrl |= CTRL_VME;
   3463 	else
   3464 		sc->sc_ctrl &= ~CTRL_VME;
   3465 
   3466 	/* Write the control registers. */
   3467 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3468 }
   3469 
   3470 static void
   3471 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3472 {
   3473 	uint32_t gcr;
   3474 	pcireg_t ctrl2;
   3475 
   3476 	gcr = CSR_READ(sc, WMREG_GCR);
   3477 
   3478 	/* Only take action if timeout value is defaulted to 0 */
   3479 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3480 		goto out;
   3481 
   3482 	if ((gcr & GCR_CAP_VER2) == 0) {
   3483 		gcr |= GCR_CMPL_TMOUT_10MS;
   3484 		goto out;
   3485 	}
   3486 
   3487 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3488 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3489 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3490 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3491 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3492 
   3493 out:
   3494 	/* Disable completion timeout resend */
   3495 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3496 
   3497 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3498 }
   3499 
   3500 void
   3501 wm_get_auto_rd_done(struct wm_softc *sc)
   3502 {
   3503 	int i;
   3504 
   3505 	/* wait for eeprom to reload */
   3506 	switch (sc->sc_type) {
   3507 	case WM_T_82571:
   3508 	case WM_T_82572:
   3509 	case WM_T_82573:
   3510 	case WM_T_82574:
   3511 	case WM_T_82583:
   3512 	case WM_T_82575:
   3513 	case WM_T_82576:
   3514 	case WM_T_82580:
   3515 	case WM_T_I350:
   3516 	case WM_T_I354:
   3517 	case WM_T_I210:
   3518 	case WM_T_I211:
   3519 	case WM_T_80003:
   3520 	case WM_T_ICH8:
   3521 	case WM_T_ICH9:
   3522 		for (i = 0; i < 10; i++) {
   3523 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3524 				break;
   3525 			delay(1000);
   3526 		}
   3527 		if (i == 10) {
   3528 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3529 			    "complete\n", device_xname(sc->sc_dev));
   3530 		}
   3531 		break;
   3532 	default:
   3533 		break;
   3534 	}
   3535 }
   3536 
   3537 void
   3538 wm_lan_init_done(struct wm_softc *sc)
   3539 {
   3540 	uint32_t reg = 0;
   3541 	int i;
   3542 
   3543 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3544 		device_xname(sc->sc_dev), __func__));
   3545 
   3546 	/* Wait for eeprom to reload */
   3547 	switch (sc->sc_type) {
   3548 	case WM_T_ICH10:
   3549 	case WM_T_PCH:
   3550 	case WM_T_PCH2:
   3551 	case WM_T_PCH_LPT:
   3552 	case WM_T_PCH_SPT:
   3553 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3554 			reg = CSR_READ(sc, WMREG_STATUS);
   3555 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3556 				break;
   3557 			delay(100);
   3558 		}
   3559 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3560 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3561 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3562 		}
   3563 		break;
   3564 	default:
   3565 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3566 		    __func__);
   3567 		break;
   3568 	}
   3569 
   3570 	reg &= ~STATUS_LAN_INIT_DONE;
   3571 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3572 }
   3573 
   3574 void
   3575 wm_get_cfg_done(struct wm_softc *sc)
   3576 {
   3577 	int mask;
   3578 	uint32_t reg;
   3579 	int i;
   3580 
   3581 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3582 		device_xname(sc->sc_dev), __func__));
   3583 
   3584 	/* Wait for eeprom to reload */
   3585 	switch (sc->sc_type) {
   3586 	case WM_T_82542_2_0:
   3587 	case WM_T_82542_2_1:
   3588 		/* null */
   3589 		break;
   3590 	case WM_T_82543:
   3591 	case WM_T_82544:
   3592 	case WM_T_82540:
   3593 	case WM_T_82545:
   3594 	case WM_T_82545_3:
   3595 	case WM_T_82546:
   3596 	case WM_T_82546_3:
   3597 	case WM_T_82541:
   3598 	case WM_T_82541_2:
   3599 	case WM_T_82547:
   3600 	case WM_T_82547_2:
   3601 	case WM_T_82573:
   3602 	case WM_T_82574:
   3603 	case WM_T_82583:
   3604 		/* generic */
   3605 		delay(10*1000);
   3606 		break;
   3607 	case WM_T_80003:
   3608 	case WM_T_82571:
   3609 	case WM_T_82572:
   3610 	case WM_T_82575:
   3611 	case WM_T_82576:
   3612 	case WM_T_82580:
   3613 	case WM_T_I350:
   3614 	case WM_T_I354:
   3615 	case WM_T_I210:
   3616 	case WM_T_I211:
   3617 		if (sc->sc_type == WM_T_82571) {
   3618 			/* Only 82571 shares port 0 */
   3619 			mask = EEMNGCTL_CFGDONE_0;
   3620 		} else
   3621 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3622 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3623 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3624 				break;
   3625 			delay(1000);
   3626 		}
   3627 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3628 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3629 				device_xname(sc->sc_dev), __func__));
   3630 		}
   3631 		break;
   3632 	case WM_T_ICH8:
   3633 	case WM_T_ICH9:
   3634 	case WM_T_ICH10:
   3635 	case WM_T_PCH:
   3636 	case WM_T_PCH2:
   3637 	case WM_T_PCH_LPT:
   3638 	case WM_T_PCH_SPT:
   3639 		delay(10*1000);
   3640 		if (sc->sc_type >= WM_T_ICH10)
   3641 			wm_lan_init_done(sc);
   3642 		else
   3643 			wm_get_auto_rd_done(sc);
   3644 
   3645 		reg = CSR_READ(sc, WMREG_STATUS);
   3646 		if ((reg & STATUS_PHYRA) != 0)
   3647 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3648 		break;
   3649 	default:
   3650 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3651 		    __func__);
   3652 		break;
   3653 	}
   3654 }
   3655 
   3656 /* Init hardware bits */
   3657 void
   3658 wm_initialize_hardware_bits(struct wm_softc *sc)
   3659 {
   3660 	uint32_t tarc0, tarc1, reg;
   3661 
   3662 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3663 		device_xname(sc->sc_dev), __func__));
   3664 
   3665 	/* For 82571 variant, 80003 and ICHs */
   3666 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3667 	    || (sc->sc_type >= WM_T_80003)) {
   3668 
   3669 		/* Transmit Descriptor Control 0 */
   3670 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3671 		reg |= TXDCTL_COUNT_DESC;
   3672 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3673 
   3674 		/* Transmit Descriptor Control 1 */
   3675 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3676 		reg |= TXDCTL_COUNT_DESC;
   3677 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3678 
   3679 		/* TARC0 */
   3680 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3681 		switch (sc->sc_type) {
   3682 		case WM_T_82571:
   3683 		case WM_T_82572:
   3684 		case WM_T_82573:
   3685 		case WM_T_82574:
   3686 		case WM_T_82583:
   3687 		case WM_T_80003:
   3688 			/* Clear bits 30..27 */
   3689 			tarc0 &= ~__BITS(30, 27);
   3690 			break;
   3691 		default:
   3692 			break;
   3693 		}
   3694 
   3695 		switch (sc->sc_type) {
   3696 		case WM_T_82571:
   3697 		case WM_T_82572:
   3698 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3699 
   3700 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3701 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3702 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3703 			/* 8257[12] Errata No.7 */
   3704 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3705 
   3706 			/* TARC1 bit 28 */
   3707 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3708 				tarc1 &= ~__BIT(28);
   3709 			else
   3710 				tarc1 |= __BIT(28);
   3711 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3712 
   3713 			/*
   3714 			 * 8257[12] Errata No.13
   3715 			 * Disable Dyamic Clock Gating.
   3716 			 */
   3717 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3718 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3719 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3720 			break;
   3721 		case WM_T_82573:
   3722 		case WM_T_82574:
   3723 		case WM_T_82583:
   3724 			if ((sc->sc_type == WM_T_82574)
   3725 			    || (sc->sc_type == WM_T_82583))
   3726 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3727 
   3728 			/* Extended Device Control */
   3729 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3730 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3731 			reg |= __BIT(22);	/* Set bit 22 */
   3732 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3733 
   3734 			/* Device Control */
   3735 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   3736 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3737 
   3738 			/* PCIe Control Register */
   3739 			/*
   3740 			 * 82573 Errata (unknown).
   3741 			 *
   3742 			 * 82574 Errata 25 and 82583 Errata 12
   3743 			 * "Dropped Rx Packets":
   3744 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   3745 			 */
   3746 			reg = CSR_READ(sc, WMREG_GCR);
   3747 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   3748 			CSR_WRITE(sc, WMREG_GCR, reg);
   3749 
   3750 			if ((sc->sc_type == WM_T_82574)
   3751 			    || (sc->sc_type == WM_T_82583)) {
   3752 				/*
   3753 				 * Document says this bit must be set for
   3754 				 * proper operation.
   3755 				 */
   3756 				reg = CSR_READ(sc, WMREG_GCR);
   3757 				reg |= __BIT(22);
   3758 				CSR_WRITE(sc, WMREG_GCR, reg);
   3759 
   3760 				/*
   3761 				 * Apply workaround for hardware errata
   3762 				 * documented in errata docs Fixes issue where
   3763 				 * some error prone or unreliable PCIe
   3764 				 * completions are occurring, particularly
   3765 				 * with ASPM enabled. Without fix, issue can
   3766 				 * cause Tx timeouts.
   3767 				 */
   3768 				reg = CSR_READ(sc, WMREG_GCR2);
   3769 				reg |= __BIT(0);
   3770 				CSR_WRITE(sc, WMREG_GCR2, reg);
   3771 			}
   3772 			break;
   3773 		case WM_T_80003:
   3774 			/* TARC0 */
   3775 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   3776 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3777 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   3778 
   3779 			/* TARC1 bit 28 */
   3780 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3781 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3782 				tarc1 &= ~__BIT(28);
   3783 			else
   3784 				tarc1 |= __BIT(28);
   3785 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3786 			break;
   3787 		case WM_T_ICH8:
   3788 		case WM_T_ICH9:
   3789 		case WM_T_ICH10:
   3790 		case WM_T_PCH:
   3791 		case WM_T_PCH2:
   3792 		case WM_T_PCH_LPT:
   3793 		case WM_T_PCH_SPT:
   3794 			/* TARC0 */
   3795 			if ((sc->sc_type == WM_T_ICH8)
   3796 			    || (sc->sc_type == WM_T_PCH_SPT)) {
   3797 				/* Set TARC0 bits 29 and 28 */
   3798 				tarc0 |= __BITS(29, 28);
   3799 			}
   3800 			/* Set TARC0 bits 23,24,26,27 */
   3801 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   3802 
   3803 			/* CTRL_EXT */
   3804 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3805 			reg |= __BIT(22);	/* Set bit 22 */
   3806 			/*
   3807 			 * Enable PHY low-power state when MAC is at D3
   3808 			 * w/o WoL
   3809 			 */
   3810 			if (sc->sc_type >= WM_T_PCH)
   3811 				reg |= CTRL_EXT_PHYPDEN;
   3812 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3813 
   3814 			/* TARC1 */
   3815 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3816 			/* bit 28 */
   3817 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3818 				tarc1 &= ~__BIT(28);
   3819 			else
   3820 				tarc1 |= __BIT(28);
   3821 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   3822 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3823 
   3824 			/* Device Status */
   3825 			if (sc->sc_type == WM_T_ICH8) {
   3826 				reg = CSR_READ(sc, WMREG_STATUS);
   3827 				reg &= ~__BIT(31);
   3828 				CSR_WRITE(sc, WMREG_STATUS, reg);
   3829 
   3830 			}
   3831 
   3832 			/* IOSFPC */
   3833 			if (sc->sc_type == WM_T_PCH_SPT) {
   3834 				reg = CSR_READ(sc, WMREG_IOSFPC);
   3835 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   3836 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   3837 			}
   3838 			/*
   3839 			 * Work-around descriptor data corruption issue during
   3840 			 * NFS v2 UDP traffic, just disable the NFS filtering
   3841 			 * capability.
   3842 			 */
   3843 			reg = CSR_READ(sc, WMREG_RFCTL);
   3844 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   3845 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3846 			break;
   3847 		default:
   3848 			break;
   3849 		}
   3850 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   3851 
   3852 		switch (sc->sc_type) {
   3853 		/*
   3854 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   3855 		 * Avoid RSS Hash Value bug.
   3856 		 */
   3857 		case WM_T_82571:
   3858 		case WM_T_82572:
   3859 		case WM_T_82573:
   3860 		case WM_T_80003:
   3861 		case WM_T_ICH8:
   3862 			reg = CSR_READ(sc, WMREG_RFCTL);
   3863 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   3864 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3865 			break;
   3866 		case WM_T_82574:
   3867 			/* use extened Rx descriptor. */
   3868 			reg = CSR_READ(sc, WMREG_RFCTL);
   3869 			reg |= WMREG_RFCTL_EXSTEN;
   3870 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3871 			break;
   3872 		default:
   3873 			break;
   3874 		}
   3875 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   3876 		/*
   3877 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   3878 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   3879 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   3880 		 * Correctly by the Device"
   3881 		 *
   3882 		 * I354(C2000) Errata AVR53:
   3883 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   3884 		 * Hang"
   3885 		 */
   3886 		reg = CSR_READ(sc, WMREG_RFCTL);
   3887 		reg |= WMREG_RFCTL_IPV6EXDIS;
   3888 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   3889 	}
   3890 }
   3891 
   3892 static uint32_t
   3893 wm_rxpbs_adjust_82580(uint32_t val)
   3894 {
   3895 	uint32_t rv = 0;
   3896 
   3897 	if (val < __arraycount(wm_82580_rxpbs_table))
   3898 		rv = wm_82580_rxpbs_table[val];
   3899 
   3900 	return rv;
   3901 }
   3902 
   3903 /*
   3904  * wm_reset_phy:
   3905  *
   3906  *	generic PHY reset function.
   3907  *	Same as e1000_phy_hw_reset_generic()
   3908  */
   3909 static void
   3910 wm_reset_phy(struct wm_softc *sc)
   3911 {
   3912 	uint32_t reg;
   3913 
   3914 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3915 		device_xname(sc->sc_dev), __func__));
   3916 	if (wm_phy_resetisblocked(sc))
   3917 		return;
   3918 
   3919 	sc->phy.acquire(sc);
   3920 
   3921 	reg = CSR_READ(sc, WMREG_CTRL);
   3922 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   3923 	CSR_WRITE_FLUSH(sc);
   3924 
   3925 	delay(sc->phy.reset_delay_us);
   3926 
   3927 	CSR_WRITE(sc, WMREG_CTRL, reg);
   3928 	CSR_WRITE_FLUSH(sc);
   3929 
   3930 	delay(150);
   3931 
   3932 	sc->phy.release(sc);
   3933 
   3934 	wm_get_cfg_done(sc);
   3935 }
   3936 
   3937 static void
   3938 wm_flush_desc_rings(struct wm_softc *sc)
   3939 {
   3940 	pcireg_t preg;
   3941 	uint32_t reg;
   3942 	int nexttx;
   3943 
   3944 	/* First, disable MULR fix in FEXTNVM11 */
   3945 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   3946 	reg |= FEXTNVM11_DIS_MULRFIX;
   3947 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   3948 
   3949 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   3950 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   3951 	if (((preg & DESCRING_STATUS_FLUSH_REQ) != 0) && (reg != 0)) {
   3952 		struct wm_txqueue *txq;
   3953 		wiseman_txdesc_t *txd;
   3954 
   3955 		/* TX */
   3956 		printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   3957 		    device_xname(sc->sc_dev), preg, reg);
   3958 		reg = CSR_READ(sc, WMREG_TCTL);
   3959 		CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   3960 
   3961 		txq = &sc->sc_queue[0].wmq_txq;
   3962 		nexttx = txq->txq_next;
   3963 		txd = &txq->txq_descs[nexttx];
   3964 		wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   3965 		txd->wtx_cmdlen = htole32(WTX_CMD_IFCS| 512);
   3966 		txd->wtx_fields.wtxu_status = 0;
   3967 		txd->wtx_fields.wtxu_options = 0;
   3968 		txd->wtx_fields.wtxu_vlan = 0;
   3969 
   3970 		bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   3971 			BUS_SPACE_BARRIER_WRITE);
   3972 
   3973 		txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   3974 		CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   3975 		bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   3976 			BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   3977 		delay(250);
   3978 	}
   3979 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   3980 	if (preg & DESCRING_STATUS_FLUSH_REQ) {
   3981 		uint32_t rctl;
   3982 
   3983 		/* RX */
   3984 		printf("%s: Need RX flush (reg = %08x)\n",
   3985 		    device_xname(sc->sc_dev), preg);
   3986 		rctl = CSR_READ(sc, WMREG_RCTL);
   3987 		CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   3988 		CSR_WRITE_FLUSH(sc);
   3989 		delay(150);
   3990 
   3991 		reg = CSR_READ(sc, WMREG_RXDCTL(0));
   3992 		/* zero the lower 14 bits (prefetch and host thresholds) */
   3993 		reg &= 0xffffc000;
   3994 		/*
   3995 		 * update thresholds: prefetch threshold to 31, host threshold
   3996 		 * to 1 and make sure the granularity is "descriptors" and not
   3997 		 * "cache lines"
   3998 		 */
   3999 		reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4000 		CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4001 
   4002 		/*
   4003 		 * momentarily enable the RX ring for the changes to take
   4004 		 * effect
   4005 		 */
   4006 		CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4007 		CSR_WRITE_FLUSH(sc);
   4008 		delay(150);
   4009 		CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4010 	}
   4011 }
   4012 
   4013 /*
   4014  * wm_reset:
   4015  *
   4016  *	Reset the i82542 chip.
   4017  */
   4018 static void
   4019 wm_reset(struct wm_softc *sc)
   4020 {
   4021 	int phy_reset = 0;
   4022 	int i, error = 0;
   4023 	uint32_t reg;
   4024 
   4025 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4026 		device_xname(sc->sc_dev), __func__));
   4027 	KASSERT(sc->sc_type != 0);
   4028 
   4029 	/*
   4030 	 * Allocate on-chip memory according to the MTU size.
   4031 	 * The Packet Buffer Allocation register must be written
   4032 	 * before the chip is reset.
   4033 	 */
   4034 	switch (sc->sc_type) {
   4035 	case WM_T_82547:
   4036 	case WM_T_82547_2:
   4037 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4038 		    PBA_22K : PBA_30K;
   4039 		for (i = 0; i < sc->sc_nqueues; i++) {
   4040 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4041 			txq->txq_fifo_head = 0;
   4042 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4043 			txq->txq_fifo_size =
   4044 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4045 			txq->txq_fifo_stall = 0;
   4046 		}
   4047 		break;
   4048 	case WM_T_82571:
   4049 	case WM_T_82572:
   4050 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4051 	case WM_T_80003:
   4052 		sc->sc_pba = PBA_32K;
   4053 		break;
   4054 	case WM_T_82573:
   4055 		sc->sc_pba = PBA_12K;
   4056 		break;
   4057 	case WM_T_82574:
   4058 	case WM_T_82583:
   4059 		sc->sc_pba = PBA_20K;
   4060 		break;
   4061 	case WM_T_82576:
   4062 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4063 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4064 		break;
   4065 	case WM_T_82580:
   4066 	case WM_T_I350:
   4067 	case WM_T_I354:
   4068 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4069 		break;
   4070 	case WM_T_I210:
   4071 	case WM_T_I211:
   4072 		sc->sc_pba = PBA_34K;
   4073 		break;
   4074 	case WM_T_ICH8:
   4075 		/* Workaround for a bit corruption issue in FIFO memory */
   4076 		sc->sc_pba = PBA_8K;
   4077 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4078 		break;
   4079 	case WM_T_ICH9:
   4080 	case WM_T_ICH10:
   4081 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4082 		    PBA_14K : PBA_10K;
   4083 		break;
   4084 	case WM_T_PCH:
   4085 	case WM_T_PCH2:
   4086 	case WM_T_PCH_LPT:
   4087 	case WM_T_PCH_SPT:
   4088 		sc->sc_pba = PBA_26K;
   4089 		break;
   4090 	default:
   4091 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4092 		    PBA_40K : PBA_48K;
   4093 		break;
   4094 	}
   4095 	/*
   4096 	 * Only old or non-multiqueue devices have the PBA register
   4097 	 * XXX Need special handling for 82575.
   4098 	 */
   4099 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4100 	    || (sc->sc_type == WM_T_82575))
   4101 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4102 
   4103 	/* Prevent the PCI-E bus from sticking */
   4104 	if (sc->sc_flags & WM_F_PCIE) {
   4105 		int timeout = 800;
   4106 
   4107 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4108 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4109 
   4110 		while (timeout--) {
   4111 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4112 			    == 0)
   4113 				break;
   4114 			delay(100);
   4115 		}
   4116 		if (timeout == 0)
   4117 			device_printf(sc->sc_dev,
   4118 			    "failed to disable busmastering\n");
   4119 	}
   4120 
   4121 	/* Set the completion timeout for interface */
   4122 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4123 	    || (sc->sc_type == WM_T_82580)
   4124 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4125 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4126 		wm_set_pcie_completion_timeout(sc);
   4127 
   4128 	/* Clear interrupt */
   4129 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4130 	if (wm_is_using_msix(sc)) {
   4131 		if (sc->sc_type != WM_T_82574) {
   4132 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4133 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4134 		} else {
   4135 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4136 		}
   4137 	}
   4138 
   4139 	/* Stop the transmit and receive processes. */
   4140 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4141 	sc->sc_rctl &= ~RCTL_EN;
   4142 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4143 	CSR_WRITE_FLUSH(sc);
   4144 
   4145 	/* XXX set_tbi_sbp_82543() */
   4146 
   4147 	delay(10*1000);
   4148 
   4149 	/* Must acquire the MDIO ownership before MAC reset */
   4150 	switch (sc->sc_type) {
   4151 	case WM_T_82573:
   4152 	case WM_T_82574:
   4153 	case WM_T_82583:
   4154 		error = wm_get_hw_semaphore_82573(sc);
   4155 		break;
   4156 	default:
   4157 		break;
   4158 	}
   4159 
   4160 	/*
   4161 	 * 82541 Errata 29? & 82547 Errata 28?
   4162 	 * See also the description about PHY_RST bit in CTRL register
   4163 	 * in 8254x_GBe_SDM.pdf.
   4164 	 */
   4165 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4166 		CSR_WRITE(sc, WMREG_CTRL,
   4167 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4168 		CSR_WRITE_FLUSH(sc);
   4169 		delay(5000);
   4170 	}
   4171 
   4172 	switch (sc->sc_type) {
   4173 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4174 	case WM_T_82541:
   4175 	case WM_T_82541_2:
   4176 	case WM_T_82547:
   4177 	case WM_T_82547_2:
   4178 		/*
   4179 		 * On some chipsets, a reset through a memory-mapped write
   4180 		 * cycle can cause the chip to reset before completing the
   4181 		 * write cycle.  This causes major headache that can be
   4182 		 * avoided by issuing the reset via indirect register writes
   4183 		 * through I/O space.
   4184 		 *
   4185 		 * So, if we successfully mapped the I/O BAR at attach time,
   4186 		 * use that.  Otherwise, try our luck with a memory-mapped
   4187 		 * reset.
   4188 		 */
   4189 		if (sc->sc_flags & WM_F_IOH_VALID)
   4190 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4191 		else
   4192 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4193 		break;
   4194 	case WM_T_82545_3:
   4195 	case WM_T_82546_3:
   4196 		/* Use the shadow control register on these chips. */
   4197 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4198 		break;
   4199 	case WM_T_80003:
   4200 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4201 		sc->phy.acquire(sc);
   4202 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4203 		sc->phy.release(sc);
   4204 		break;
   4205 	case WM_T_ICH8:
   4206 	case WM_T_ICH9:
   4207 	case WM_T_ICH10:
   4208 	case WM_T_PCH:
   4209 	case WM_T_PCH2:
   4210 	case WM_T_PCH_LPT:
   4211 	case WM_T_PCH_SPT:
   4212 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4213 		if (wm_phy_resetisblocked(sc) == false) {
   4214 			/*
   4215 			 * Gate automatic PHY configuration by hardware on
   4216 			 * non-managed 82579
   4217 			 */
   4218 			if ((sc->sc_type == WM_T_PCH2)
   4219 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4220 				== 0))
   4221 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4222 
   4223 			reg |= CTRL_PHY_RESET;
   4224 			phy_reset = 1;
   4225 		} else
   4226 			printf("XXX reset is blocked!!!\n");
   4227 		sc->phy.acquire(sc);
   4228 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4229 		/* Don't insert a completion barrier when reset */
   4230 		delay(20*1000);
   4231 		mutex_exit(sc->sc_ich_phymtx);
   4232 		break;
   4233 	case WM_T_82580:
   4234 	case WM_T_I350:
   4235 	case WM_T_I354:
   4236 	case WM_T_I210:
   4237 	case WM_T_I211:
   4238 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4239 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4240 			CSR_WRITE_FLUSH(sc);
   4241 		delay(5000);
   4242 		break;
   4243 	case WM_T_82542_2_0:
   4244 	case WM_T_82542_2_1:
   4245 	case WM_T_82543:
   4246 	case WM_T_82540:
   4247 	case WM_T_82545:
   4248 	case WM_T_82546:
   4249 	case WM_T_82571:
   4250 	case WM_T_82572:
   4251 	case WM_T_82573:
   4252 	case WM_T_82574:
   4253 	case WM_T_82575:
   4254 	case WM_T_82576:
   4255 	case WM_T_82583:
   4256 	default:
   4257 		/* Everything else can safely use the documented method. */
   4258 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4259 		break;
   4260 	}
   4261 
   4262 	/* Must release the MDIO ownership after MAC reset */
   4263 	switch (sc->sc_type) {
   4264 	case WM_T_82573:
   4265 	case WM_T_82574:
   4266 	case WM_T_82583:
   4267 		if (error == 0)
   4268 			wm_put_hw_semaphore_82573(sc);
   4269 		break;
   4270 	default:
   4271 		break;
   4272 	}
   4273 
   4274 	if (phy_reset != 0)
   4275 		wm_get_cfg_done(sc);
   4276 
   4277 	/* reload EEPROM */
   4278 	switch (sc->sc_type) {
   4279 	case WM_T_82542_2_0:
   4280 	case WM_T_82542_2_1:
   4281 	case WM_T_82543:
   4282 	case WM_T_82544:
   4283 		delay(10);
   4284 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4285 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4286 		CSR_WRITE_FLUSH(sc);
   4287 		delay(2000);
   4288 		break;
   4289 	case WM_T_82540:
   4290 	case WM_T_82545:
   4291 	case WM_T_82545_3:
   4292 	case WM_T_82546:
   4293 	case WM_T_82546_3:
   4294 		delay(5*1000);
   4295 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4296 		break;
   4297 	case WM_T_82541:
   4298 	case WM_T_82541_2:
   4299 	case WM_T_82547:
   4300 	case WM_T_82547_2:
   4301 		delay(20000);
   4302 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4303 		break;
   4304 	case WM_T_82571:
   4305 	case WM_T_82572:
   4306 	case WM_T_82573:
   4307 	case WM_T_82574:
   4308 	case WM_T_82583:
   4309 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4310 			delay(10);
   4311 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4312 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4313 			CSR_WRITE_FLUSH(sc);
   4314 		}
   4315 		/* check EECD_EE_AUTORD */
   4316 		wm_get_auto_rd_done(sc);
   4317 		/*
   4318 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4319 		 * is set.
   4320 		 */
   4321 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4322 		    || (sc->sc_type == WM_T_82583))
   4323 			delay(25*1000);
   4324 		break;
   4325 	case WM_T_82575:
   4326 	case WM_T_82576:
   4327 	case WM_T_82580:
   4328 	case WM_T_I350:
   4329 	case WM_T_I354:
   4330 	case WM_T_I210:
   4331 	case WM_T_I211:
   4332 	case WM_T_80003:
   4333 		/* check EECD_EE_AUTORD */
   4334 		wm_get_auto_rd_done(sc);
   4335 		break;
   4336 	case WM_T_ICH8:
   4337 	case WM_T_ICH9:
   4338 	case WM_T_ICH10:
   4339 	case WM_T_PCH:
   4340 	case WM_T_PCH2:
   4341 	case WM_T_PCH_LPT:
   4342 	case WM_T_PCH_SPT:
   4343 		break;
   4344 	default:
   4345 		panic("%s: unknown type\n", __func__);
   4346 	}
   4347 
   4348 	/* Check whether EEPROM is present or not */
   4349 	switch (sc->sc_type) {
   4350 	case WM_T_82575:
   4351 	case WM_T_82576:
   4352 	case WM_T_82580:
   4353 	case WM_T_I350:
   4354 	case WM_T_I354:
   4355 	case WM_T_ICH8:
   4356 	case WM_T_ICH9:
   4357 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4358 			/* Not found */
   4359 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4360 			if (sc->sc_type == WM_T_82575)
   4361 				wm_reset_init_script_82575(sc);
   4362 		}
   4363 		break;
   4364 	default:
   4365 		break;
   4366 	}
   4367 
   4368 	if ((sc->sc_type == WM_T_82580)
   4369 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4370 		/* clear global device reset status bit */
   4371 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4372 	}
   4373 
   4374 	/* Clear any pending interrupt events. */
   4375 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4376 	reg = CSR_READ(sc, WMREG_ICR);
   4377 	if (wm_is_using_msix(sc)) {
   4378 		if (sc->sc_type != WM_T_82574) {
   4379 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4380 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4381 		} else
   4382 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4383 	}
   4384 
   4385 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4386 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4387 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4388 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   4389 		reg = CSR_READ(sc, WMREG_KABGTXD);
   4390 		reg |= KABGTXD_BGSQLBIAS;
   4391 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   4392 	}
   4393 
   4394 	/* reload sc_ctrl */
   4395 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4396 
   4397 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4398 		wm_set_eee_i350(sc);
   4399 
   4400 	/* Clear the host wakeup bit after lcd reset */
   4401 	if (sc->sc_type >= WM_T_PCH) {
   4402 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   4403 		    BM_PORT_GEN_CFG);
   4404 		reg &= ~BM_WUC_HOST_WU_BIT;
   4405 		wm_gmii_hv_writereg(sc->sc_dev, 2,
   4406 		    BM_PORT_GEN_CFG, reg);
   4407 	}
   4408 
   4409 	/*
   4410 	 * For PCH, this write will make sure that any noise will be detected
   4411 	 * as a CRC error and be dropped rather than show up as a bad packet
   4412 	 * to the DMA engine
   4413 	 */
   4414 	if (sc->sc_type == WM_T_PCH)
   4415 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4416 
   4417 	if (sc->sc_type >= WM_T_82544)
   4418 		CSR_WRITE(sc, WMREG_WUC, 0);
   4419 
   4420 	wm_reset_mdicnfg_82580(sc);
   4421 
   4422 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4423 		wm_pll_workaround_i210(sc);
   4424 }
   4425 
   4426 /*
   4427  * wm_add_rxbuf:
   4428  *
   4429  *	Add a receive buffer to the indiciated descriptor.
   4430  */
   4431 static int
   4432 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4433 {
   4434 	struct wm_softc *sc = rxq->rxq_sc;
   4435 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4436 	struct mbuf *m;
   4437 	int error;
   4438 
   4439 	KASSERT(mutex_owned(rxq->rxq_lock));
   4440 
   4441 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4442 	if (m == NULL)
   4443 		return ENOBUFS;
   4444 
   4445 	MCLGET(m, M_DONTWAIT);
   4446 	if ((m->m_flags & M_EXT) == 0) {
   4447 		m_freem(m);
   4448 		return ENOBUFS;
   4449 	}
   4450 
   4451 	if (rxs->rxs_mbuf != NULL)
   4452 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4453 
   4454 	rxs->rxs_mbuf = m;
   4455 
   4456 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4457 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4458 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4459 	if (error) {
   4460 		/* XXX XXX XXX */
   4461 		aprint_error_dev(sc->sc_dev,
   4462 		    "unable to load rx DMA map %d, error = %d\n",
   4463 		    idx, error);
   4464 		panic("wm_add_rxbuf");
   4465 	}
   4466 
   4467 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4468 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4469 
   4470 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4471 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4472 			wm_init_rxdesc(rxq, idx);
   4473 	} else
   4474 		wm_init_rxdesc(rxq, idx);
   4475 
   4476 	return 0;
   4477 }
   4478 
   4479 /*
   4480  * wm_rxdrain:
   4481  *
   4482  *	Drain the receive queue.
   4483  */
   4484 static void
   4485 wm_rxdrain(struct wm_rxqueue *rxq)
   4486 {
   4487 	struct wm_softc *sc = rxq->rxq_sc;
   4488 	struct wm_rxsoft *rxs;
   4489 	int i;
   4490 
   4491 	KASSERT(mutex_owned(rxq->rxq_lock));
   4492 
   4493 	for (i = 0; i < WM_NRXDESC; i++) {
   4494 		rxs = &rxq->rxq_soft[i];
   4495 		if (rxs->rxs_mbuf != NULL) {
   4496 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4497 			m_freem(rxs->rxs_mbuf);
   4498 			rxs->rxs_mbuf = NULL;
   4499 		}
   4500 	}
   4501 }
   4502 
   4503 
   4504 /*
   4505  * XXX copy from FreeBSD's sys/net/rss_config.c
   4506  */
   4507 /*
   4508  * RSS secret key, intended to prevent attacks on load-balancing.  Its
   4509  * effectiveness may be limited by algorithm choice and available entropy
   4510  * during the boot.
   4511  *
   4512  * XXXRW: And that we don't randomize it yet!
   4513  *
   4514  * This is the default Microsoft RSS specification key which is also
   4515  * the Chelsio T5 firmware default key.
   4516  */
   4517 #define RSS_KEYSIZE 40
   4518 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
   4519 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
   4520 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
   4521 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
   4522 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
   4523 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
   4524 };
   4525 
   4526 /*
   4527  * Caller must pass an array of size sizeof(rss_key).
   4528  *
   4529  * XXX
   4530  * As if_ixgbe may use this function, this function should not be
   4531  * if_wm specific function.
   4532  */
   4533 static void
   4534 wm_rss_getkey(uint8_t *key)
   4535 {
   4536 
   4537 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
   4538 }
   4539 
   4540 /*
   4541  * Setup registers for RSS.
   4542  *
   4543  * XXX not yet VMDq support
   4544  */
   4545 static void
   4546 wm_init_rss(struct wm_softc *sc)
   4547 {
   4548 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4549 	int i;
   4550 
   4551 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
   4552 
   4553 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4554 		int qid, reta_ent;
   4555 
   4556 		qid  = i % sc->sc_nqueues;
   4557 		switch(sc->sc_type) {
   4558 		case WM_T_82574:
   4559 			reta_ent = __SHIFTIN(qid,
   4560 			    RETA_ENT_QINDEX_MASK_82574);
   4561 			break;
   4562 		case WM_T_82575:
   4563 			reta_ent = __SHIFTIN(qid,
   4564 			    RETA_ENT_QINDEX1_MASK_82575);
   4565 			break;
   4566 		default:
   4567 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4568 			break;
   4569 		}
   4570 
   4571 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4572 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4573 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4574 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4575 	}
   4576 
   4577 	wm_rss_getkey((uint8_t *)rss_key);
   4578 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4579 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4580 
   4581 	if (sc->sc_type == WM_T_82574)
   4582 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4583 	else
   4584 		mrqc = MRQC_ENABLE_RSS_MQ;
   4585 
   4586 	/*
   4587 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   4588 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   4589 	 */
   4590 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4591 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4592 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4593 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4594 
   4595 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4596 }
   4597 
   4598 /*
   4599  * Adjust TX and RX queue numbers which the system actulally uses.
   4600  *
   4601  * The numbers are affected by below parameters.
   4602  *     - The nubmer of hardware queues
   4603  *     - The number of MSI-X vectors (= "nvectors" argument)
   4604  *     - ncpu
   4605  */
   4606 static void
   4607 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4608 {
   4609 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4610 
   4611 	if (nvectors < 2) {
   4612 		sc->sc_nqueues = 1;
   4613 		return;
   4614 	}
   4615 
   4616 	switch(sc->sc_type) {
   4617 	case WM_T_82572:
   4618 		hw_ntxqueues = 2;
   4619 		hw_nrxqueues = 2;
   4620 		break;
   4621 	case WM_T_82574:
   4622 		hw_ntxqueues = 2;
   4623 		hw_nrxqueues = 2;
   4624 		break;
   4625 	case WM_T_82575:
   4626 		hw_ntxqueues = 4;
   4627 		hw_nrxqueues = 4;
   4628 		break;
   4629 	case WM_T_82576:
   4630 		hw_ntxqueues = 16;
   4631 		hw_nrxqueues = 16;
   4632 		break;
   4633 	case WM_T_82580:
   4634 	case WM_T_I350:
   4635 	case WM_T_I354:
   4636 		hw_ntxqueues = 8;
   4637 		hw_nrxqueues = 8;
   4638 		break;
   4639 	case WM_T_I210:
   4640 		hw_ntxqueues = 4;
   4641 		hw_nrxqueues = 4;
   4642 		break;
   4643 	case WM_T_I211:
   4644 		hw_ntxqueues = 2;
   4645 		hw_nrxqueues = 2;
   4646 		break;
   4647 		/*
   4648 		 * As below ethernet controllers does not support MSI-X,
   4649 		 * this driver let them not use multiqueue.
   4650 		 *     - WM_T_80003
   4651 		 *     - WM_T_ICH8
   4652 		 *     - WM_T_ICH9
   4653 		 *     - WM_T_ICH10
   4654 		 *     - WM_T_PCH
   4655 		 *     - WM_T_PCH2
   4656 		 *     - WM_T_PCH_LPT
   4657 		 */
   4658 	default:
   4659 		hw_ntxqueues = 1;
   4660 		hw_nrxqueues = 1;
   4661 		break;
   4662 	}
   4663 
   4664 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
   4665 
   4666 	/*
   4667 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   4668 	 * the number of queues used actually.
   4669 	 */
   4670 	if (nvectors < hw_nqueues + 1) {
   4671 		sc->sc_nqueues = nvectors - 1;
   4672 	} else {
   4673 		sc->sc_nqueues = hw_nqueues;
   4674 	}
   4675 
   4676 	/*
   4677 	 * As queues more then cpus cannot improve scaling, we limit
   4678 	 * the number of queues used actually.
   4679 	 */
   4680 	if (ncpu < sc->sc_nqueues)
   4681 		sc->sc_nqueues = ncpu;
   4682 }
   4683 
   4684 static inline bool
   4685 wm_is_using_msix(struct wm_softc *sc)
   4686 {
   4687 
   4688 	return (sc->sc_nintrs > 1);
   4689 }
   4690 
   4691 static inline bool
   4692 wm_is_using_multiqueue(struct wm_softc *sc)
   4693 {
   4694 
   4695 	return (sc->sc_nqueues > 1);
   4696 }
   4697 
   4698 static int
   4699 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   4700 {
   4701 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   4702 	wmq->wmq_id = qidx;
   4703 	wmq->wmq_intr_idx = intr_idx;
   4704 	wmq->wmq_si = softint_establish(SOFTINT_NET
   4705 #ifdef WM_MPSAFE
   4706 	    | SOFTINT_MPSAFE
   4707 #endif
   4708 	    , wm_handle_queue, wmq);
   4709 	if (wmq->wmq_si != NULL)
   4710 		return 0;
   4711 
   4712 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   4713 	    wmq->wmq_id);
   4714 
   4715 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   4716 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4717 	return ENOMEM;
   4718 }
   4719 
   4720 /*
   4721  * Both single interrupt MSI and INTx can use this function.
   4722  */
   4723 static int
   4724 wm_setup_legacy(struct wm_softc *sc)
   4725 {
   4726 	pci_chipset_tag_t pc = sc->sc_pc;
   4727 	const char *intrstr = NULL;
   4728 	char intrbuf[PCI_INTRSTR_LEN];
   4729 	int error;
   4730 
   4731 	error = wm_alloc_txrx_queues(sc);
   4732 	if (error) {
   4733 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4734 		    error);
   4735 		return ENOMEM;
   4736 	}
   4737 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   4738 	    sizeof(intrbuf));
   4739 #ifdef WM_MPSAFE
   4740 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   4741 #endif
   4742 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   4743 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   4744 	if (sc->sc_ihs[0] == NULL) {
   4745 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   4746 		    (pci_intr_type(pc, sc->sc_intrs[0])
   4747 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   4748 		return ENOMEM;
   4749 	}
   4750 
   4751 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   4752 	sc->sc_nintrs = 1;
   4753 
   4754 	return wm_softint_establish(sc, 0, 0);
   4755 }
   4756 
   4757 static int
   4758 wm_setup_msix(struct wm_softc *sc)
   4759 {
   4760 	void *vih;
   4761 	kcpuset_t *affinity;
   4762 	int qidx, error, intr_idx, txrx_established;
   4763 	pci_chipset_tag_t pc = sc->sc_pc;
   4764 	const char *intrstr = NULL;
   4765 	char intrbuf[PCI_INTRSTR_LEN];
   4766 	char intr_xname[INTRDEVNAMEBUF];
   4767 
   4768 	if (sc->sc_nqueues < ncpu) {
   4769 		/*
   4770 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   4771 		 * interrupts start from CPU#1.
   4772 		 */
   4773 		sc->sc_affinity_offset = 1;
   4774 	} else {
   4775 		/*
   4776 		 * In this case, this device use all CPUs. So, we unify
   4777 		 * affinitied cpu_index to msix vector number for readability.
   4778 		 */
   4779 		sc->sc_affinity_offset = 0;
   4780 	}
   4781 
   4782 	error = wm_alloc_txrx_queues(sc);
   4783 	if (error) {
   4784 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4785 		    error);
   4786 		return ENOMEM;
   4787 	}
   4788 
   4789 	kcpuset_create(&affinity, false);
   4790 	intr_idx = 0;
   4791 
   4792 	/*
   4793 	 * TX and RX
   4794 	 */
   4795 	txrx_established = 0;
   4796 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   4797 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4798 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   4799 
   4800 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4801 		    sizeof(intrbuf));
   4802 #ifdef WM_MPSAFE
   4803 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   4804 		    PCI_INTR_MPSAFE, true);
   4805 #endif
   4806 		memset(intr_xname, 0, sizeof(intr_xname));
   4807 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   4808 		    device_xname(sc->sc_dev), qidx);
   4809 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4810 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   4811 		if (vih == NULL) {
   4812 			aprint_error_dev(sc->sc_dev,
   4813 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   4814 			    intrstr ? " at " : "",
   4815 			    intrstr ? intrstr : "");
   4816 
   4817 			goto fail;
   4818 		}
   4819 		kcpuset_zero(affinity);
   4820 		/* Round-robin affinity */
   4821 		kcpuset_set(affinity, affinity_to);
   4822 		error = interrupt_distribute(vih, affinity, NULL);
   4823 		if (error == 0) {
   4824 			aprint_normal_dev(sc->sc_dev,
   4825 			    "for TX and RX interrupting at %s affinity to %u\n",
   4826 			    intrstr, affinity_to);
   4827 		} else {
   4828 			aprint_normal_dev(sc->sc_dev,
   4829 			    "for TX and RX interrupting at %s\n", intrstr);
   4830 		}
   4831 		sc->sc_ihs[intr_idx] = vih;
   4832 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   4833 			goto fail;
   4834 		txrx_established++;
   4835 		intr_idx++;
   4836 	}
   4837 
   4838 	/*
   4839 	 * LINK
   4840 	 */
   4841 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4842 	    sizeof(intrbuf));
   4843 #ifdef WM_MPSAFE
   4844 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   4845 #endif
   4846 	memset(intr_xname, 0, sizeof(intr_xname));
   4847 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   4848 	    device_xname(sc->sc_dev));
   4849 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4850 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   4851 	if (vih == NULL) {
   4852 		aprint_error_dev(sc->sc_dev,
   4853 		    "unable to establish MSI-X(for LINK)%s%s\n",
   4854 		    intrstr ? " at " : "",
   4855 		    intrstr ? intrstr : "");
   4856 
   4857 		goto fail;
   4858 	}
   4859 	/* keep default affinity to LINK interrupt */
   4860 	aprint_normal_dev(sc->sc_dev,
   4861 	    "for LINK interrupting at %s\n", intrstr);
   4862 	sc->sc_ihs[intr_idx] = vih;
   4863 	sc->sc_link_intr_idx = intr_idx;
   4864 
   4865 	sc->sc_nintrs = sc->sc_nqueues + 1;
   4866 	kcpuset_destroy(affinity);
   4867 	return 0;
   4868 
   4869  fail:
   4870 	for (qidx = 0; qidx < txrx_established; qidx++) {
   4871 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4872 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   4873 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4874 	}
   4875 
   4876 	kcpuset_destroy(affinity);
   4877 	return ENOMEM;
   4878 }
   4879 
   4880 static void
   4881 wm_turnon(struct wm_softc *sc)
   4882 {
   4883 	int i;
   4884 
   4885 	KASSERT(WM_CORE_LOCKED(sc));
   4886 
   4887 	/*
   4888 	 * must unset stopping flags in ascending order.
   4889 	 */
   4890 	for(i = 0; i < sc->sc_nqueues; i++) {
   4891 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4892 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   4893 
   4894 		mutex_enter(txq->txq_lock);
   4895 		txq->txq_stopping = false;
   4896 		mutex_exit(txq->txq_lock);
   4897 
   4898 		mutex_enter(rxq->rxq_lock);
   4899 		rxq->rxq_stopping = false;
   4900 		mutex_exit(rxq->rxq_lock);
   4901 	}
   4902 
   4903 	sc->sc_core_stopping = false;
   4904 }
   4905 
   4906 static void
   4907 wm_turnoff(struct wm_softc *sc)
   4908 {
   4909 	int i;
   4910 
   4911 	KASSERT(WM_CORE_LOCKED(sc));
   4912 
   4913 	sc->sc_core_stopping = true;
   4914 
   4915 	/*
   4916 	 * must set stopping flags in ascending order.
   4917 	 */
   4918 	for(i = 0; i < sc->sc_nqueues; i++) {
   4919 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   4920 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4921 
   4922 		mutex_enter(rxq->rxq_lock);
   4923 		rxq->rxq_stopping = true;
   4924 		mutex_exit(rxq->rxq_lock);
   4925 
   4926 		mutex_enter(txq->txq_lock);
   4927 		txq->txq_stopping = true;
   4928 		mutex_exit(txq->txq_lock);
   4929 	}
   4930 }
   4931 
   4932 /*
   4933  * write interrupt interval value to ITR or EITR
   4934  */
   4935 static void
   4936 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   4937 {
   4938 
   4939 	if (!wmq->wmq_set_itr)
   4940 		return;
   4941 
   4942 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4943 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   4944 
   4945 		/*
   4946 		 * 82575 doesn't have CNT_INGR field.
   4947 		 * So, overwrite counter field by software.
   4948 		 */
   4949 		if (sc->sc_type == WM_T_82575)
   4950 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   4951 		else
   4952 			eitr |= EITR_CNT_INGR;
   4953 
   4954 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   4955 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   4956 		/*
   4957 		 * 82574 has both ITR and EITR. SET EITR when we use
   4958 		 * the multi queue function with MSI-X.
   4959 		 */
   4960 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   4961 			    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   4962 	} else {
   4963 		KASSERT(wmq->wmq_id == 0);
   4964 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   4965 	}
   4966 
   4967 	wmq->wmq_set_itr = false;
   4968 }
   4969 
   4970 /*
   4971  * TODO
   4972  * Below dynamic calculation of itr is almost the same as linux igb,
   4973  * however it does not fit to wm(4). So, we will have been disable AIM
   4974  * until we will find appropriate calculation of itr.
   4975  */
   4976 /*
   4977  * calculate interrupt interval value to be going to write register in
   4978  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   4979  */
   4980 static void
   4981 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   4982 {
   4983 #ifdef NOTYET
   4984 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   4985 	struct wm_txqueue *txq = &wmq->wmq_txq;
   4986 	uint32_t avg_size = 0;
   4987 	uint32_t new_itr;
   4988 
   4989 	if (rxq->rxq_packets)
   4990 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   4991 	if (txq->txq_packets)
   4992 		avg_size = max(avg_size, txq->txq_bytes / txq->txq_packets);
   4993 
   4994 	if (avg_size == 0) {
   4995 		new_itr = 450; /* restore default value */
   4996 		goto out;
   4997 	}
   4998 
   4999 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5000 	avg_size += 24;
   5001 
   5002 	/* Don't starve jumbo frames */
   5003 	avg_size = min(avg_size, 3000);
   5004 
   5005 	/* Give a little boost to mid-size frames */
   5006 	if ((avg_size > 300) && (avg_size < 1200))
   5007 		new_itr = avg_size / 3;
   5008 	else
   5009 		new_itr = avg_size / 2;
   5010 
   5011 out:
   5012 	/*
   5013 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5014 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5015 	 */
   5016 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5017 		new_itr *= 4;
   5018 
   5019 	if (new_itr != wmq->wmq_itr) {
   5020 		wmq->wmq_itr = new_itr;
   5021 		wmq->wmq_set_itr = true;
   5022 	} else
   5023 		wmq->wmq_set_itr = false;
   5024 
   5025 	rxq->rxq_packets = 0;
   5026 	rxq->rxq_bytes = 0;
   5027 	txq->txq_packets = 0;
   5028 	txq->txq_bytes = 0;
   5029 #endif
   5030 }
   5031 
   5032 /*
   5033  * wm_init:		[ifnet interface function]
   5034  *
   5035  *	Initialize the interface.
   5036  */
   5037 static int
   5038 wm_init(struct ifnet *ifp)
   5039 {
   5040 	struct wm_softc *sc = ifp->if_softc;
   5041 	int ret;
   5042 
   5043 	WM_CORE_LOCK(sc);
   5044 	ret = wm_init_locked(ifp);
   5045 	WM_CORE_UNLOCK(sc);
   5046 
   5047 	return ret;
   5048 }
   5049 
   5050 static int
   5051 wm_init_locked(struct ifnet *ifp)
   5052 {
   5053 	struct wm_softc *sc = ifp->if_softc;
   5054 	int i, j, trynum, error = 0;
   5055 	uint32_t reg;
   5056 
   5057 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5058 		device_xname(sc->sc_dev), __func__));
   5059 	KASSERT(WM_CORE_LOCKED(sc));
   5060 
   5061 	/*
   5062 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5063 	 * There is a small but measurable benefit to avoiding the adjusment
   5064 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5065 	 * on such platforms.  One possibility is that the DMA itself is
   5066 	 * slightly more efficient if the front of the entire packet (instead
   5067 	 * of the front of the headers) is aligned.
   5068 	 *
   5069 	 * Note we must always set align_tweak to 0 if we are using
   5070 	 * jumbo frames.
   5071 	 */
   5072 #ifdef __NO_STRICT_ALIGNMENT
   5073 	sc->sc_align_tweak = 0;
   5074 #else
   5075 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5076 		sc->sc_align_tweak = 0;
   5077 	else
   5078 		sc->sc_align_tweak = 2;
   5079 #endif /* __NO_STRICT_ALIGNMENT */
   5080 
   5081 	/* Cancel any pending I/O. */
   5082 	wm_stop_locked(ifp, 0);
   5083 
   5084 	/* update statistics before reset */
   5085 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   5086 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   5087 
   5088 	/* PCH_SPT hardware workaround */
   5089 	if (sc->sc_type == WM_T_PCH_SPT)
   5090 		wm_flush_desc_rings(sc);
   5091 
   5092 	/* Reset the chip to a known state. */
   5093 	wm_reset(sc);
   5094 
   5095 	/* AMT based hardware can now take control from firmware */
   5096 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5097 		wm_get_hw_control(sc);
   5098 
   5099 	/* Init hardware bits */
   5100 	wm_initialize_hardware_bits(sc);
   5101 
   5102 	/* Reset the PHY. */
   5103 	if (sc->sc_flags & WM_F_HAS_MII)
   5104 		wm_gmii_reset(sc);
   5105 
   5106 	/* Calculate (E)ITR value */
   5107 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5108 		/*
   5109 		 * For NEWQUEUE's EITR (except for 82575).
   5110 		 * 82575's EITR should be set same throttling value as other
   5111 		 * old controllers' ITR because the interrupt/sec calculation
   5112 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5113 		 *
   5114 		 * 82574's EITR should be set same throttling value as ITR.
   5115 		 *
   5116 		 * For N interrupts/sec, set this value to:
   5117 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5118 		 */
   5119 		sc->sc_itr_init = 450;
   5120 	} else if (sc->sc_type >= WM_T_82543) {
   5121 		/*
   5122 		 * Set up the interrupt throttling register (units of 256ns)
   5123 		 * Note that a footnote in Intel's documentation says this
   5124 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5125 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5126 		 * that that is also true for the 1024ns units of the other
   5127 		 * interrupt-related timer registers -- so, really, we ought
   5128 		 * to divide this value by 4 when the link speed is low.
   5129 		 *
   5130 		 * XXX implement this division at link speed change!
   5131 		 */
   5132 
   5133 		/*
   5134 		 * For N interrupts/sec, set this value to:
   5135 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5136 		 * absolute and packet timer values to this value
   5137 		 * divided by 4 to get "simple timer" behavior.
   5138 		 */
   5139 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5140 	}
   5141 
   5142 	error = wm_init_txrx_queues(sc);
   5143 	if (error)
   5144 		goto out;
   5145 
   5146 	/*
   5147 	 * Clear out the VLAN table -- we don't use it (yet).
   5148 	 */
   5149 	CSR_WRITE(sc, WMREG_VET, 0);
   5150 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5151 		trynum = 10; /* Due to hw errata */
   5152 	else
   5153 		trynum = 1;
   5154 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5155 		for (j = 0; j < trynum; j++)
   5156 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5157 
   5158 	/*
   5159 	 * Set up flow-control parameters.
   5160 	 *
   5161 	 * XXX Values could probably stand some tuning.
   5162 	 */
   5163 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5164 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5165 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5166 	    && (sc->sc_type != WM_T_PCH_SPT)) {
   5167 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5168 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5169 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5170 	}
   5171 
   5172 	sc->sc_fcrtl = FCRTL_DFLT;
   5173 	if (sc->sc_type < WM_T_82543) {
   5174 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5175 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5176 	} else {
   5177 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5178 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5179 	}
   5180 
   5181 	if (sc->sc_type == WM_T_80003)
   5182 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5183 	else
   5184 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5185 
   5186 	/* Writes the control register. */
   5187 	wm_set_vlan(sc);
   5188 
   5189 	if (sc->sc_flags & WM_F_HAS_MII) {
   5190 		int val;
   5191 
   5192 		switch (sc->sc_type) {
   5193 		case WM_T_80003:
   5194 		case WM_T_ICH8:
   5195 		case WM_T_ICH9:
   5196 		case WM_T_ICH10:
   5197 		case WM_T_PCH:
   5198 		case WM_T_PCH2:
   5199 		case WM_T_PCH_LPT:
   5200 		case WM_T_PCH_SPT:
   5201 			/*
   5202 			 * Set the mac to wait the maximum time between each
   5203 			 * iteration and increase the max iterations when
   5204 			 * polling the phy; this fixes erroneous timeouts at
   5205 			 * 10Mbps.
   5206 			 */
   5207 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5208 			    0xFFFF);
   5209 			val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
   5210 			val |= 0x3F;
   5211 			wm_kmrn_writereg(sc,
   5212 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
   5213 			break;
   5214 		default:
   5215 			break;
   5216 		}
   5217 
   5218 		if (sc->sc_type == WM_T_80003) {
   5219 			val = CSR_READ(sc, WMREG_CTRL_EXT);
   5220 			val &= ~CTRL_EXT_LINK_MODE_MASK;
   5221 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   5222 
   5223 			/* Bypass RX and TX FIFO's */
   5224 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5225 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5226 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5227 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5228 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5229 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5230 		}
   5231 	}
   5232 #if 0
   5233 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5234 #endif
   5235 
   5236 	/* Set up checksum offload parameters. */
   5237 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5238 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5239 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5240 		reg |= RXCSUM_IPOFL;
   5241 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5242 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5243 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5244 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5245 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5246 
   5247 	/* Set registers about MSI-X */
   5248 	if (wm_is_using_msix(sc)) {
   5249 		uint32_t ivar;
   5250 		struct wm_queue *wmq;
   5251 		int qid, qintr_idx;
   5252 
   5253 		if (sc->sc_type == WM_T_82575) {
   5254 			/* Interrupt control */
   5255 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5256 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5257 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5258 
   5259 			/* TX and RX */
   5260 			for (i = 0; i < sc->sc_nqueues; i++) {
   5261 				wmq = &sc->sc_queue[i];
   5262 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5263 				    EITR_TX_QUEUE(wmq->wmq_id)
   5264 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5265 			}
   5266 			/* Link status */
   5267 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5268 			    EITR_OTHER);
   5269 		} else if (sc->sc_type == WM_T_82574) {
   5270 			/* Interrupt control */
   5271 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5272 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5273 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5274 
   5275 			/*
   5276 			 * workaround issue with spurious interrupts
   5277 			 * in MSI-X mode.
   5278 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5279 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5280 			 */
   5281 			reg = CSR_READ(sc, WMREG_RFCTL);
   5282 			reg |= WMREG_RFCTL_ACKDIS;
   5283 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5284 
   5285 			ivar = 0;
   5286 			/* TX and RX */
   5287 			for (i = 0; i < sc->sc_nqueues; i++) {
   5288 				wmq = &sc->sc_queue[i];
   5289 				qid = wmq->wmq_id;
   5290 				qintr_idx = wmq->wmq_intr_idx;
   5291 
   5292 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5293 				    IVAR_TX_MASK_Q_82574(qid));
   5294 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5295 				    IVAR_RX_MASK_Q_82574(qid));
   5296 			}
   5297 			/* Link status */
   5298 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5299 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5300 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5301 		} else {
   5302 			/* Interrupt control */
   5303 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5304 			    | GPIE_EIAME | GPIE_PBA);
   5305 
   5306 			switch (sc->sc_type) {
   5307 			case WM_T_82580:
   5308 			case WM_T_I350:
   5309 			case WM_T_I354:
   5310 			case WM_T_I210:
   5311 			case WM_T_I211:
   5312 				/* TX and RX */
   5313 				for (i = 0; i < sc->sc_nqueues; i++) {
   5314 					wmq = &sc->sc_queue[i];
   5315 					qid = wmq->wmq_id;
   5316 					qintr_idx = wmq->wmq_intr_idx;
   5317 
   5318 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5319 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5320 					ivar |= __SHIFTIN((qintr_idx
   5321 						| IVAR_VALID),
   5322 					    IVAR_TX_MASK_Q(qid));
   5323 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5324 					ivar |= __SHIFTIN((qintr_idx
   5325 						| IVAR_VALID),
   5326 					    IVAR_RX_MASK_Q(qid));
   5327 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5328 				}
   5329 				break;
   5330 			case WM_T_82576:
   5331 				/* TX and RX */
   5332 				for (i = 0; i < sc->sc_nqueues; i++) {
   5333 					wmq = &sc->sc_queue[i];
   5334 					qid = wmq->wmq_id;
   5335 					qintr_idx = wmq->wmq_intr_idx;
   5336 
   5337 					ivar = CSR_READ(sc,
   5338 					    WMREG_IVAR_Q_82576(qid));
   5339 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   5340 					ivar |= __SHIFTIN((qintr_idx
   5341 						| IVAR_VALID),
   5342 					    IVAR_TX_MASK_Q_82576(qid));
   5343 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   5344 					ivar |= __SHIFTIN((qintr_idx
   5345 						| IVAR_VALID),
   5346 					    IVAR_RX_MASK_Q_82576(qid));
   5347 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   5348 					    ivar);
   5349 				}
   5350 				break;
   5351 			default:
   5352 				break;
   5353 			}
   5354 
   5355 			/* Link status */
   5356 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   5357 			    IVAR_MISC_OTHER);
   5358 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   5359 		}
   5360 
   5361 		if (wm_is_using_multiqueue(sc)) {
   5362 			wm_init_rss(sc);
   5363 
   5364 			/*
   5365 			** NOTE: Receive Full-Packet Checksum Offload
   5366 			** is mutually exclusive with Multiqueue. However
   5367 			** this is not the same as TCP/IP checksums which
   5368 			** still work.
   5369 			*/
   5370 			reg = CSR_READ(sc, WMREG_RXCSUM);
   5371 			reg |= RXCSUM_PCSD;
   5372 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5373 		}
   5374 	}
   5375 
   5376 	/* Set up the interrupt registers. */
   5377 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5378 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   5379 	    ICR_RXO | ICR_RXT0;
   5380 	if (wm_is_using_msix(sc)) {
   5381 		uint32_t mask;
   5382 		struct wm_queue *wmq;
   5383 
   5384 		switch (sc->sc_type) {
   5385 		case WM_T_82574:
   5386 			mask = 0;
   5387 			for (i = 0; i < sc->sc_nqueues; i++) {
   5388 				wmq = &sc->sc_queue[i];
   5389 				mask |= ICR_TXQ(wmq->wmq_id);
   5390 				mask |= ICR_RXQ(wmq->wmq_id);
   5391 			}
   5392 			mask |= ICR_OTHER;
   5393 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   5394 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   5395 			break;
   5396 		default:
   5397 			if (sc->sc_type == WM_T_82575) {
   5398 				mask = 0;
   5399 				for (i = 0; i < sc->sc_nqueues; i++) {
   5400 					wmq = &sc->sc_queue[i];
   5401 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   5402 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   5403 				}
   5404 				mask |= EITR_OTHER;
   5405 			} else {
   5406 				mask = 0;
   5407 				for (i = 0; i < sc->sc_nqueues; i++) {
   5408 					wmq = &sc->sc_queue[i];
   5409 					mask |= 1 << wmq->wmq_intr_idx;
   5410 				}
   5411 				mask |= 1 << sc->sc_link_intr_idx;
   5412 			}
   5413 			CSR_WRITE(sc, WMREG_EIAC, mask);
   5414 			CSR_WRITE(sc, WMREG_EIAM, mask);
   5415 			CSR_WRITE(sc, WMREG_EIMS, mask);
   5416 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   5417 			break;
   5418 		}
   5419 	} else
   5420 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5421 
   5422 	/* Set up the inter-packet gap. */
   5423 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   5424 
   5425 	if (sc->sc_type >= WM_T_82543) {
   5426 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5427 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   5428 			wm_itrs_writereg(sc, wmq);
   5429 		}
   5430 		/*
   5431 		 * Link interrupts occur much less than TX
   5432 		 * interrupts and RX interrupts. So, we don't
   5433 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   5434 		 * FreeBSD's if_igb.
   5435 		 */
   5436 	}
   5437 
   5438 	/* Set the VLAN ethernetype. */
   5439 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   5440 
   5441 	/*
   5442 	 * Set up the transmit control register; we start out with
   5443 	 * a collision distance suitable for FDX, but update it whe
   5444 	 * we resolve the media type.
   5445 	 */
   5446 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   5447 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   5448 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5449 	if (sc->sc_type >= WM_T_82571)
   5450 		sc->sc_tctl |= TCTL_MULR;
   5451 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5452 
   5453 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5454 		/* Write TDT after TCTL.EN is set. See the document. */
   5455 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   5456 	}
   5457 
   5458 	if (sc->sc_type == WM_T_80003) {
   5459 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   5460 		reg &= ~TCTL_EXT_GCEX_MASK;
   5461 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   5462 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   5463 	}
   5464 
   5465 	/* Set the media. */
   5466 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   5467 		goto out;
   5468 
   5469 	/* Configure for OS presence */
   5470 	wm_init_manageability(sc);
   5471 
   5472 	/*
   5473 	 * Set up the receive control register; we actually program
   5474 	 * the register when we set the receive filter.  Use multicast
   5475 	 * address offset type 0.
   5476 	 *
   5477 	 * Only the i82544 has the ability to strip the incoming
   5478 	 * CRC, so we don't enable that feature.
   5479 	 */
   5480 	sc->sc_mchash_type = 0;
   5481 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   5482 	    | RCTL_MO(sc->sc_mchash_type);
   5483 
   5484 	/*
   5485 	 * 82574 use one buffer extended Rx descriptor.
   5486 	 */
   5487 	if (sc->sc_type == WM_T_82574)
   5488 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   5489 
   5490 	/*
   5491 	 * The I350 has a bug where it always strips the CRC whether
   5492 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   5493 	 */
   5494 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5495 	    || (sc->sc_type == WM_T_I210))
   5496 		sc->sc_rctl |= RCTL_SECRC;
   5497 
   5498 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5499 	    && (ifp->if_mtu > ETHERMTU)) {
   5500 		sc->sc_rctl |= RCTL_LPE;
   5501 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5502 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5503 	}
   5504 
   5505 	if (MCLBYTES == 2048) {
   5506 		sc->sc_rctl |= RCTL_2k;
   5507 	} else {
   5508 		if (sc->sc_type >= WM_T_82543) {
   5509 			switch (MCLBYTES) {
   5510 			case 4096:
   5511 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5512 				break;
   5513 			case 8192:
   5514 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5515 				break;
   5516 			case 16384:
   5517 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5518 				break;
   5519 			default:
   5520 				panic("wm_init: MCLBYTES %d unsupported",
   5521 				    MCLBYTES);
   5522 				break;
   5523 			}
   5524 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   5525 	}
   5526 
   5527 	/* Set the receive filter. */
   5528 	wm_set_filter(sc);
   5529 
   5530 	/* Enable ECC */
   5531 	switch (sc->sc_type) {
   5532 	case WM_T_82571:
   5533 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5534 		reg |= PBA_ECC_CORR_EN;
   5535 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5536 		break;
   5537 	case WM_T_PCH_LPT:
   5538 	case WM_T_PCH_SPT:
   5539 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5540 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5541 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5542 
   5543 		sc->sc_ctrl |= CTRL_MEHE;
   5544 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5545 		break;
   5546 	default:
   5547 		break;
   5548 	}
   5549 
   5550 	/* On 575 and later set RDT only if RX enabled */
   5551 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5552 		int qidx;
   5553 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5554 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5555 			for (i = 0; i < WM_NRXDESC; i++) {
   5556 				mutex_enter(rxq->rxq_lock);
   5557 				wm_init_rxdesc(rxq, i);
   5558 				mutex_exit(rxq->rxq_lock);
   5559 
   5560 			}
   5561 		}
   5562 	}
   5563 
   5564 	wm_turnon(sc);
   5565 
   5566 	/* Start the one second link check clock. */
   5567 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5568 
   5569 	/* ...all done! */
   5570 	ifp->if_flags |= IFF_RUNNING;
   5571 	ifp->if_flags &= ~IFF_OACTIVE;
   5572 
   5573  out:
   5574 	sc->sc_if_flags = ifp->if_flags;
   5575 	if (error)
   5576 		log(LOG_ERR, "%s: interface not running\n",
   5577 		    device_xname(sc->sc_dev));
   5578 	return error;
   5579 }
   5580 
   5581 /*
   5582  * wm_stop:		[ifnet interface function]
   5583  *
   5584  *	Stop transmission on the interface.
   5585  */
   5586 static void
   5587 wm_stop(struct ifnet *ifp, int disable)
   5588 {
   5589 	struct wm_softc *sc = ifp->if_softc;
   5590 
   5591 	WM_CORE_LOCK(sc);
   5592 	wm_stop_locked(ifp, disable);
   5593 	WM_CORE_UNLOCK(sc);
   5594 }
   5595 
   5596 static void
   5597 wm_stop_locked(struct ifnet *ifp, int disable)
   5598 {
   5599 	struct wm_softc *sc = ifp->if_softc;
   5600 	struct wm_txsoft *txs;
   5601 	int i, qidx;
   5602 
   5603 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5604 		device_xname(sc->sc_dev), __func__));
   5605 	KASSERT(WM_CORE_LOCKED(sc));
   5606 
   5607 	wm_turnoff(sc);
   5608 
   5609 	/* Stop the one second clock. */
   5610 	callout_stop(&sc->sc_tick_ch);
   5611 
   5612 	/* Stop the 82547 Tx FIFO stall check timer. */
   5613 	if (sc->sc_type == WM_T_82547)
   5614 		callout_stop(&sc->sc_txfifo_ch);
   5615 
   5616 	if (sc->sc_flags & WM_F_HAS_MII) {
   5617 		/* Down the MII. */
   5618 		mii_down(&sc->sc_mii);
   5619 	} else {
   5620 #if 0
   5621 		/* Should we clear PHY's status properly? */
   5622 		wm_reset(sc);
   5623 #endif
   5624 	}
   5625 
   5626 	/* Stop the transmit and receive processes. */
   5627 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5628 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5629 	sc->sc_rctl &= ~RCTL_EN;
   5630 
   5631 	/*
   5632 	 * Clear the interrupt mask to ensure the device cannot assert its
   5633 	 * interrupt line.
   5634 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5635 	 * service any currently pending or shared interrupt.
   5636 	 */
   5637 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5638 	sc->sc_icr = 0;
   5639 	if (wm_is_using_msix(sc)) {
   5640 		if (sc->sc_type != WM_T_82574) {
   5641 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5642 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5643 		} else
   5644 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5645 	}
   5646 
   5647 	/* Release any queued transmit buffers. */
   5648 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5649 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5650 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5651 		mutex_enter(txq->txq_lock);
   5652 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5653 			txs = &txq->txq_soft[i];
   5654 			if (txs->txs_mbuf != NULL) {
   5655 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   5656 				m_freem(txs->txs_mbuf);
   5657 				txs->txs_mbuf = NULL;
   5658 			}
   5659 		}
   5660 		mutex_exit(txq->txq_lock);
   5661 	}
   5662 
   5663 	/* Mark the interface as down and cancel the watchdog timer. */
   5664 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5665 	ifp->if_timer = 0;
   5666 
   5667 	if (disable) {
   5668 		for (i = 0; i < sc->sc_nqueues; i++) {
   5669 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5670 			mutex_enter(rxq->rxq_lock);
   5671 			wm_rxdrain(rxq);
   5672 			mutex_exit(rxq->rxq_lock);
   5673 		}
   5674 	}
   5675 
   5676 #if 0 /* notyet */
   5677 	if (sc->sc_type >= WM_T_82544)
   5678 		CSR_WRITE(sc, WMREG_WUC, 0);
   5679 #endif
   5680 }
   5681 
   5682 static void
   5683 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   5684 {
   5685 	struct mbuf *m;
   5686 	int i;
   5687 
   5688 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   5689 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   5690 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   5691 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   5692 		    m->m_data, m->m_len, m->m_flags);
   5693 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   5694 	    i, i == 1 ? "" : "s");
   5695 }
   5696 
   5697 /*
   5698  * wm_82547_txfifo_stall:
   5699  *
   5700  *	Callout used to wait for the 82547 Tx FIFO to drain,
   5701  *	reset the FIFO pointers, and restart packet transmission.
   5702  */
   5703 static void
   5704 wm_82547_txfifo_stall(void *arg)
   5705 {
   5706 	struct wm_softc *sc = arg;
   5707 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5708 
   5709 	mutex_enter(txq->txq_lock);
   5710 
   5711 	if (txq->txq_stopping)
   5712 		goto out;
   5713 
   5714 	if (txq->txq_fifo_stall) {
   5715 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   5716 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   5717 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   5718 			/*
   5719 			 * Packets have drained.  Stop transmitter, reset
   5720 			 * FIFO pointers, restart transmitter, and kick
   5721 			 * the packet queue.
   5722 			 */
   5723 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   5724 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   5725 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   5726 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   5727 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   5728 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   5729 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   5730 			CSR_WRITE_FLUSH(sc);
   5731 
   5732 			txq->txq_fifo_head = 0;
   5733 			txq->txq_fifo_stall = 0;
   5734 			wm_start_locked(&sc->sc_ethercom.ec_if);
   5735 		} else {
   5736 			/*
   5737 			 * Still waiting for packets to drain; try again in
   5738 			 * another tick.
   5739 			 */
   5740 			callout_schedule(&sc->sc_txfifo_ch, 1);
   5741 		}
   5742 	}
   5743 
   5744 out:
   5745 	mutex_exit(txq->txq_lock);
   5746 }
   5747 
   5748 /*
   5749  * wm_82547_txfifo_bugchk:
   5750  *
   5751  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   5752  *	prevent enqueueing a packet that would wrap around the end
   5753  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   5754  *
   5755  *	We do this by checking the amount of space before the end
   5756  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   5757  *	the Tx FIFO, wait for all remaining packets to drain, reset
   5758  *	the internal FIFO pointers to the beginning, and restart
   5759  *	transmission on the interface.
   5760  */
   5761 #define	WM_FIFO_HDR		0x10
   5762 #define	WM_82547_PAD_LEN	0x3e0
   5763 static int
   5764 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   5765 {
   5766 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5767 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   5768 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   5769 
   5770 	/* Just return if already stalled. */
   5771 	if (txq->txq_fifo_stall)
   5772 		return 1;
   5773 
   5774 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   5775 		/* Stall only occurs in half-duplex mode. */
   5776 		goto send_packet;
   5777 	}
   5778 
   5779 	if (len >= WM_82547_PAD_LEN + space) {
   5780 		txq->txq_fifo_stall = 1;
   5781 		callout_schedule(&sc->sc_txfifo_ch, 1);
   5782 		return 1;
   5783 	}
   5784 
   5785  send_packet:
   5786 	txq->txq_fifo_head += len;
   5787 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   5788 		txq->txq_fifo_head -= txq->txq_fifo_size;
   5789 
   5790 	return 0;
   5791 }
   5792 
   5793 static int
   5794 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5795 {
   5796 	int error;
   5797 
   5798 	/*
   5799 	 * Allocate the control data structures, and create and load the
   5800 	 * DMA map for it.
   5801 	 *
   5802 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5803 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5804 	 * both sets within the same 4G segment.
   5805 	 */
   5806 	if (sc->sc_type < WM_T_82544)
   5807 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   5808 	else
   5809 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   5810 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5811 		txq->txq_descsize = sizeof(nq_txdesc_t);
   5812 	else
   5813 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   5814 
   5815 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   5816 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   5817 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   5818 		aprint_error_dev(sc->sc_dev,
   5819 		    "unable to allocate TX control data, error = %d\n",
   5820 		    error);
   5821 		goto fail_0;
   5822 	}
   5823 
   5824 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   5825 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   5826 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5827 		aprint_error_dev(sc->sc_dev,
   5828 		    "unable to map TX control data, error = %d\n", error);
   5829 		goto fail_1;
   5830 	}
   5831 
   5832 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   5833 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   5834 		aprint_error_dev(sc->sc_dev,
   5835 		    "unable to create TX control data DMA map, error = %d\n",
   5836 		    error);
   5837 		goto fail_2;
   5838 	}
   5839 
   5840 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   5841 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   5842 		aprint_error_dev(sc->sc_dev,
   5843 		    "unable to load TX control data DMA map, error = %d\n",
   5844 		    error);
   5845 		goto fail_3;
   5846 	}
   5847 
   5848 	return 0;
   5849 
   5850  fail_3:
   5851 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5852  fail_2:
   5853 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5854 	    WM_TXDESCS_SIZE(txq));
   5855  fail_1:
   5856 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5857  fail_0:
   5858 	return error;
   5859 }
   5860 
   5861 static void
   5862 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5863 {
   5864 
   5865 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   5866 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5867 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5868 	    WM_TXDESCS_SIZE(txq));
   5869 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5870 }
   5871 
   5872 static int
   5873 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5874 {
   5875 	int error;
   5876 	size_t rxq_descs_size;
   5877 
   5878 	/*
   5879 	 * Allocate the control data structures, and create and load the
   5880 	 * DMA map for it.
   5881 	 *
   5882 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5883 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5884 	 * both sets within the same 4G segment.
   5885 	 */
   5886 	rxq->rxq_ndesc = WM_NRXDESC;
   5887 	if (sc->sc_type == WM_T_82574)
   5888 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   5889 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5890 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   5891 	else
   5892 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   5893 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   5894 
   5895 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   5896 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   5897 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   5898 		aprint_error_dev(sc->sc_dev,
   5899 		    "unable to allocate RX control data, error = %d\n",
   5900 		    error);
   5901 		goto fail_0;
   5902 	}
   5903 
   5904 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   5905 		    rxq->rxq_desc_rseg, rxq_descs_size,
   5906 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5907 		aprint_error_dev(sc->sc_dev,
   5908 		    "unable to map RX control data, error = %d\n", error);
   5909 		goto fail_1;
   5910 	}
   5911 
   5912 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   5913 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   5914 		aprint_error_dev(sc->sc_dev,
   5915 		    "unable to create RX control data DMA map, error = %d\n",
   5916 		    error);
   5917 		goto fail_2;
   5918 	}
   5919 
   5920 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   5921 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   5922 		aprint_error_dev(sc->sc_dev,
   5923 		    "unable to load RX control data DMA map, error = %d\n",
   5924 		    error);
   5925 		goto fail_3;
   5926 	}
   5927 
   5928 	return 0;
   5929 
   5930  fail_3:
   5931 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5932  fail_2:
   5933 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   5934 	    rxq_descs_size);
   5935  fail_1:
   5936 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5937  fail_0:
   5938 	return error;
   5939 }
   5940 
   5941 static void
   5942 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5943 {
   5944 
   5945 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5946 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5947 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   5948 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   5949 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5950 }
   5951 
   5952 
   5953 static int
   5954 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5955 {
   5956 	int i, error;
   5957 
   5958 	/* Create the transmit buffer DMA maps. */
   5959 	WM_TXQUEUELEN(txq) =
   5960 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   5961 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   5962 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5963 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   5964 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   5965 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   5966 			aprint_error_dev(sc->sc_dev,
   5967 			    "unable to create Tx DMA map %d, error = %d\n",
   5968 			    i, error);
   5969 			goto fail;
   5970 		}
   5971 	}
   5972 
   5973 	return 0;
   5974 
   5975  fail:
   5976 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5977 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5978 			bus_dmamap_destroy(sc->sc_dmat,
   5979 			    txq->txq_soft[i].txs_dmamap);
   5980 	}
   5981 	return error;
   5982 }
   5983 
   5984 static void
   5985 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5986 {
   5987 	int i;
   5988 
   5989 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5990 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5991 			bus_dmamap_destroy(sc->sc_dmat,
   5992 			    txq->txq_soft[i].txs_dmamap);
   5993 	}
   5994 }
   5995 
   5996 static int
   5997 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5998 {
   5999 	int i, error;
   6000 
   6001 	/* Create the receive buffer DMA maps. */
   6002 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6003 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6004 			    MCLBYTES, 0, 0,
   6005 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6006 			aprint_error_dev(sc->sc_dev,
   6007 			    "unable to create Rx DMA map %d error = %d\n",
   6008 			    i, error);
   6009 			goto fail;
   6010 		}
   6011 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6012 	}
   6013 
   6014 	return 0;
   6015 
   6016  fail:
   6017 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6018 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6019 			bus_dmamap_destroy(sc->sc_dmat,
   6020 			    rxq->rxq_soft[i].rxs_dmamap);
   6021 	}
   6022 	return error;
   6023 }
   6024 
   6025 static void
   6026 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6027 {
   6028 	int i;
   6029 
   6030 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6031 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6032 			bus_dmamap_destroy(sc->sc_dmat,
   6033 			    rxq->rxq_soft[i].rxs_dmamap);
   6034 	}
   6035 }
   6036 
   6037 /*
   6038  * wm_alloc_quques:
   6039  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6040  */
   6041 static int
   6042 wm_alloc_txrx_queues(struct wm_softc *sc)
   6043 {
   6044 	int i, error, tx_done, rx_done;
   6045 
   6046 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6047 	    KM_SLEEP);
   6048 	if (sc->sc_queue == NULL) {
   6049 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6050 		error = ENOMEM;
   6051 		goto fail_0;
   6052 	}
   6053 
   6054 	/*
   6055 	 * For transmission
   6056 	 */
   6057 	error = 0;
   6058 	tx_done = 0;
   6059 	for (i = 0; i < sc->sc_nqueues; i++) {
   6060 #ifdef WM_EVENT_COUNTERS
   6061 		int j;
   6062 		const char *xname;
   6063 #endif
   6064 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6065 		txq->txq_sc = sc;
   6066 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6067 
   6068 		error = wm_alloc_tx_descs(sc, txq);
   6069 		if (error)
   6070 			break;
   6071 		error = wm_alloc_tx_buffer(sc, txq);
   6072 		if (error) {
   6073 			wm_free_tx_descs(sc, txq);
   6074 			break;
   6075 		}
   6076 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6077 		if (txq->txq_interq == NULL) {
   6078 			wm_free_tx_descs(sc, txq);
   6079 			wm_free_tx_buffer(sc, txq);
   6080 			error = ENOMEM;
   6081 			break;
   6082 		}
   6083 
   6084 #ifdef WM_EVENT_COUNTERS
   6085 		xname = device_xname(sc->sc_dev);
   6086 
   6087 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6088 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6089 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
   6090 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6091 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6092 
   6093 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
   6094 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
   6095 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
   6096 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
   6097 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
   6098 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
   6099 
   6100 		for (j = 0; j < WM_NTXSEGS; j++) {
   6101 			snprintf(txq->txq_txseg_evcnt_names[j],
   6102 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6103 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6104 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6105 		}
   6106 
   6107 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
   6108 
   6109 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
   6110 #endif /* WM_EVENT_COUNTERS */
   6111 
   6112 		tx_done++;
   6113 	}
   6114 	if (error)
   6115 		goto fail_1;
   6116 
   6117 	/*
   6118 	 * For recieve
   6119 	 */
   6120 	error = 0;
   6121 	rx_done = 0;
   6122 	for (i = 0; i < sc->sc_nqueues; i++) {
   6123 #ifdef WM_EVENT_COUNTERS
   6124 		const char *xname;
   6125 #endif
   6126 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6127 		rxq->rxq_sc = sc;
   6128 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6129 
   6130 		error = wm_alloc_rx_descs(sc, rxq);
   6131 		if (error)
   6132 			break;
   6133 
   6134 		error = wm_alloc_rx_buffer(sc, rxq);
   6135 		if (error) {
   6136 			wm_free_rx_descs(sc, rxq);
   6137 			break;
   6138 		}
   6139 
   6140 #ifdef WM_EVENT_COUNTERS
   6141 		xname = device_xname(sc->sc_dev);
   6142 
   6143 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
   6144 
   6145 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
   6146 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
   6147 #endif /* WM_EVENT_COUNTERS */
   6148 
   6149 		rx_done++;
   6150 	}
   6151 	if (error)
   6152 		goto fail_2;
   6153 
   6154 	return 0;
   6155 
   6156  fail_2:
   6157 	for (i = 0; i < rx_done; i++) {
   6158 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6159 		wm_free_rx_buffer(sc, rxq);
   6160 		wm_free_rx_descs(sc, rxq);
   6161 		if (rxq->rxq_lock)
   6162 			mutex_obj_free(rxq->rxq_lock);
   6163 	}
   6164  fail_1:
   6165 	for (i = 0; i < tx_done; i++) {
   6166 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6167 		pcq_destroy(txq->txq_interq);
   6168 		wm_free_tx_buffer(sc, txq);
   6169 		wm_free_tx_descs(sc, txq);
   6170 		if (txq->txq_lock)
   6171 			mutex_obj_free(txq->txq_lock);
   6172 	}
   6173 
   6174 	kmem_free(sc->sc_queue,
   6175 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6176  fail_0:
   6177 	return error;
   6178 }
   6179 
   6180 /*
   6181  * wm_free_quques:
   6182  *	Free {tx,rx}descs and {tx,rx} buffers
   6183  */
   6184 static void
   6185 wm_free_txrx_queues(struct wm_softc *sc)
   6186 {
   6187 	int i;
   6188 
   6189 	for (i = 0; i < sc->sc_nqueues; i++) {
   6190 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6191 
   6192 #ifdef WM_EVENT_COUNTERS
   6193 		WM_Q_EVCNT_DETACH(rxq, rxintr, rxq, i);
   6194 		WM_Q_EVCNT_DETACH(rxq, rxipsum, rxq, i);
   6195 		WM_Q_EVCNT_DETACH(rxq, rxtusum, rxq, i);
   6196 #endif /* WM_EVENT_COUNTERS */
   6197 
   6198 		wm_free_rx_buffer(sc, rxq);
   6199 		wm_free_rx_descs(sc, rxq);
   6200 		if (rxq->rxq_lock)
   6201 			mutex_obj_free(rxq->rxq_lock);
   6202 	}
   6203 
   6204 	for (i = 0; i < sc->sc_nqueues; i++) {
   6205 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6206 		struct mbuf *m;
   6207 #ifdef WM_EVENT_COUNTERS
   6208 		int j;
   6209 
   6210 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6211 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6212 		WM_Q_EVCNT_DETACH(txq, txfifo_stall, txq, i);
   6213 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6214 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6215 		WM_Q_EVCNT_DETACH(txq, txipsum, txq, i);
   6216 		WM_Q_EVCNT_DETACH(txq, txtusum, txq, i);
   6217 		WM_Q_EVCNT_DETACH(txq, txtusum6, txq, i);
   6218 		WM_Q_EVCNT_DETACH(txq, txtso, txq, i);
   6219 		WM_Q_EVCNT_DETACH(txq, txtso6, txq, i);
   6220 		WM_Q_EVCNT_DETACH(txq, txtsopain, txq, i);
   6221 
   6222 		for (j = 0; j < WM_NTXSEGS; j++)
   6223 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6224 
   6225 		WM_Q_EVCNT_DETACH(txq, txdrop, txq, i);
   6226 		WM_Q_EVCNT_DETACH(txq, tu, txq, i);
   6227 #endif /* WM_EVENT_COUNTERS */
   6228 
   6229 		/* drain txq_interq */
   6230 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6231 			m_freem(m);
   6232 		pcq_destroy(txq->txq_interq);
   6233 
   6234 		wm_free_tx_buffer(sc, txq);
   6235 		wm_free_tx_descs(sc, txq);
   6236 		if (txq->txq_lock)
   6237 			mutex_obj_free(txq->txq_lock);
   6238 	}
   6239 
   6240 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6241 }
   6242 
   6243 static void
   6244 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6245 {
   6246 
   6247 	KASSERT(mutex_owned(txq->txq_lock));
   6248 
   6249 	/* Initialize the transmit descriptor ring. */
   6250 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6251 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6252 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6253 	txq->txq_free = WM_NTXDESC(txq);
   6254 	txq->txq_next = 0;
   6255 }
   6256 
   6257 static void
   6258 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6259     struct wm_txqueue *txq)
   6260 {
   6261 
   6262 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6263 		device_xname(sc->sc_dev), __func__));
   6264 	KASSERT(mutex_owned(txq->txq_lock));
   6265 
   6266 	if (sc->sc_type < WM_T_82543) {
   6267 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6268 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6269 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6270 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6271 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6272 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6273 	} else {
   6274 		int qid = wmq->wmq_id;
   6275 
   6276 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6277 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6278 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6279 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6280 
   6281 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6282 			/*
   6283 			 * Don't write TDT before TCTL.EN is set.
   6284 			 * See the document.
   6285 			 */
   6286 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6287 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6288 			    | TXDCTL_WTHRESH(0));
   6289 		else {
   6290 			/* XXX should update with AIM? */
   6291 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6292 			if (sc->sc_type >= WM_T_82540) {
   6293 				/* should be same */
   6294 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6295 			}
   6296 
   6297 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6298 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6299 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6300 		}
   6301 	}
   6302 }
   6303 
   6304 static void
   6305 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6306 {
   6307 	int i;
   6308 
   6309 	KASSERT(mutex_owned(txq->txq_lock));
   6310 
   6311 	/* Initialize the transmit job descriptors. */
   6312 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6313 		txq->txq_soft[i].txs_mbuf = NULL;
   6314 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6315 	txq->txq_snext = 0;
   6316 	txq->txq_sdirty = 0;
   6317 }
   6318 
   6319 static void
   6320 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6321     struct wm_txqueue *txq)
   6322 {
   6323 
   6324 	KASSERT(mutex_owned(txq->txq_lock));
   6325 
   6326 	/*
   6327 	 * Set up some register offsets that are different between
   6328 	 * the i82542 and the i82543 and later chips.
   6329 	 */
   6330 	if (sc->sc_type < WM_T_82543)
   6331 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   6332 	else
   6333 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   6334 
   6335 	wm_init_tx_descs(sc, txq);
   6336 	wm_init_tx_regs(sc, wmq, txq);
   6337 	wm_init_tx_buffer(sc, txq);
   6338 }
   6339 
   6340 static void
   6341 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6342     struct wm_rxqueue *rxq)
   6343 {
   6344 
   6345 	KASSERT(mutex_owned(rxq->rxq_lock));
   6346 
   6347 	/*
   6348 	 * Initialize the receive descriptor and receive job
   6349 	 * descriptor rings.
   6350 	 */
   6351 	if (sc->sc_type < WM_T_82543) {
   6352 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   6353 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   6354 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   6355 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6356 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   6357 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   6358 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   6359 
   6360 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   6361 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   6362 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   6363 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   6364 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   6365 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   6366 	} else {
   6367 		int qid = wmq->wmq_id;
   6368 
   6369 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   6370 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   6371 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_descsize * rxq->rxq_ndesc);
   6372 
   6373 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6374 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   6375 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   6376 
   6377 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   6378 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   6379 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   6380 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   6381 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   6382 			    | RXDCTL_WTHRESH(1));
   6383 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6384 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6385 		} else {
   6386 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6387 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6388 			/* XXX should update with AIM? */
   6389 			CSR_WRITE(sc, WMREG_RDTR, (wmq->wmq_itr / 4) | RDTR_FPD);
   6390 			/* MUST be same */
   6391 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   6392 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   6393 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   6394 		}
   6395 	}
   6396 }
   6397 
   6398 static int
   6399 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6400 {
   6401 	struct wm_rxsoft *rxs;
   6402 	int error, i;
   6403 
   6404 	KASSERT(mutex_owned(rxq->rxq_lock));
   6405 
   6406 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6407 		rxs = &rxq->rxq_soft[i];
   6408 		if (rxs->rxs_mbuf == NULL) {
   6409 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   6410 				log(LOG_ERR, "%s: unable to allocate or map "
   6411 				    "rx buffer %d, error = %d\n",
   6412 				    device_xname(sc->sc_dev), i, error);
   6413 				/*
   6414 				 * XXX Should attempt to run with fewer receive
   6415 				 * XXX buffers instead of just failing.
   6416 				 */
   6417 				wm_rxdrain(rxq);
   6418 				return ENOMEM;
   6419 			}
   6420 		} else {
   6421 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   6422 				wm_init_rxdesc(rxq, i);
   6423 			/*
   6424 			 * For 82575 and newer device, the RX descriptors
   6425 			 * must be initialized after the setting of RCTL.EN in
   6426 			 * wm_set_filter()
   6427 			 */
   6428 		}
   6429 	}
   6430 	rxq->rxq_ptr = 0;
   6431 	rxq->rxq_discard = 0;
   6432 	WM_RXCHAIN_RESET(rxq);
   6433 
   6434 	return 0;
   6435 }
   6436 
   6437 static int
   6438 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6439     struct wm_rxqueue *rxq)
   6440 {
   6441 
   6442 	KASSERT(mutex_owned(rxq->rxq_lock));
   6443 
   6444 	/*
   6445 	 * Set up some register offsets that are different between
   6446 	 * the i82542 and the i82543 and later chips.
   6447 	 */
   6448 	if (sc->sc_type < WM_T_82543)
   6449 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   6450 	else
   6451 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   6452 
   6453 	wm_init_rx_regs(sc, wmq, rxq);
   6454 	return wm_init_rx_buffer(sc, rxq);
   6455 }
   6456 
   6457 /*
   6458  * wm_init_quques:
   6459  *	Initialize {tx,rx}descs and {tx,rx} buffers
   6460  */
   6461 static int
   6462 wm_init_txrx_queues(struct wm_softc *sc)
   6463 {
   6464 	int i, error = 0;
   6465 
   6466 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6467 		device_xname(sc->sc_dev), __func__));
   6468 
   6469 	for (i = 0; i < sc->sc_nqueues; i++) {
   6470 		struct wm_queue *wmq = &sc->sc_queue[i];
   6471 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6472 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6473 
   6474 		/*
   6475 		 * TODO
   6476 		 * Currently, use constant variable instead of AIM.
   6477 		 * Furthermore, the interrupt interval of multiqueue which use
   6478 		 * polling mode is less than default value.
   6479 		 * More tuning and AIM are required.
   6480 		 */
   6481 		if (wm_is_using_multiqueue(sc))
   6482 			wmq->wmq_itr = 50;
   6483 		else
   6484 			wmq->wmq_itr = sc->sc_itr_init;
   6485 		wmq->wmq_set_itr = true;
   6486 
   6487 		mutex_enter(txq->txq_lock);
   6488 		wm_init_tx_queue(sc, wmq, txq);
   6489 		mutex_exit(txq->txq_lock);
   6490 
   6491 		mutex_enter(rxq->rxq_lock);
   6492 		error = wm_init_rx_queue(sc, wmq, rxq);
   6493 		mutex_exit(rxq->rxq_lock);
   6494 		if (error)
   6495 			break;
   6496 	}
   6497 
   6498 	return error;
   6499 }
   6500 
   6501 /*
   6502  * wm_tx_offload:
   6503  *
   6504  *	Set up TCP/IP checksumming parameters for the
   6505  *	specified packet.
   6506  */
   6507 static int
   6508 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   6509     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   6510 {
   6511 	struct mbuf *m0 = txs->txs_mbuf;
   6512 	struct livengood_tcpip_ctxdesc *t;
   6513 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   6514 	uint32_t ipcse;
   6515 	struct ether_header *eh;
   6516 	int offset, iphl;
   6517 	uint8_t fields;
   6518 
   6519 	/*
   6520 	 * XXX It would be nice if the mbuf pkthdr had offset
   6521 	 * fields for the protocol headers.
   6522 	 */
   6523 
   6524 	eh = mtod(m0, struct ether_header *);
   6525 	switch (htons(eh->ether_type)) {
   6526 	case ETHERTYPE_IP:
   6527 	case ETHERTYPE_IPV6:
   6528 		offset = ETHER_HDR_LEN;
   6529 		break;
   6530 
   6531 	case ETHERTYPE_VLAN:
   6532 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6533 		break;
   6534 
   6535 	default:
   6536 		/*
   6537 		 * Don't support this protocol or encapsulation.
   6538 		 */
   6539 		*fieldsp = 0;
   6540 		*cmdp = 0;
   6541 		return 0;
   6542 	}
   6543 
   6544 	if ((m0->m_pkthdr.csum_flags &
   6545 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6546 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6547 	} else {
   6548 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6549 	}
   6550 	ipcse = offset + iphl - 1;
   6551 
   6552 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   6553 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   6554 	seg = 0;
   6555 	fields = 0;
   6556 
   6557 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6558 		int hlen = offset + iphl;
   6559 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6560 
   6561 		if (__predict_false(m0->m_len <
   6562 				    (hlen + sizeof(struct tcphdr)))) {
   6563 			/*
   6564 			 * TCP/IP headers are not in the first mbuf; we need
   6565 			 * to do this the slow and painful way.  Let's just
   6566 			 * hope this doesn't happen very often.
   6567 			 */
   6568 			struct tcphdr th;
   6569 
   6570 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6571 
   6572 			m_copydata(m0, hlen, sizeof(th), &th);
   6573 			if (v4) {
   6574 				struct ip ip;
   6575 
   6576 				m_copydata(m0, offset, sizeof(ip), &ip);
   6577 				ip.ip_len = 0;
   6578 				m_copyback(m0,
   6579 				    offset + offsetof(struct ip, ip_len),
   6580 				    sizeof(ip.ip_len), &ip.ip_len);
   6581 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6582 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6583 			} else {
   6584 				struct ip6_hdr ip6;
   6585 
   6586 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6587 				ip6.ip6_plen = 0;
   6588 				m_copyback(m0,
   6589 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6590 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6591 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6592 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6593 			}
   6594 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6595 			    sizeof(th.th_sum), &th.th_sum);
   6596 
   6597 			hlen += th.th_off << 2;
   6598 		} else {
   6599 			/*
   6600 			 * TCP/IP headers are in the first mbuf; we can do
   6601 			 * this the easy way.
   6602 			 */
   6603 			struct tcphdr *th;
   6604 
   6605 			if (v4) {
   6606 				struct ip *ip =
   6607 				    (void *)(mtod(m0, char *) + offset);
   6608 				th = (void *)(mtod(m0, char *) + hlen);
   6609 
   6610 				ip->ip_len = 0;
   6611 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6612 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6613 			} else {
   6614 				struct ip6_hdr *ip6 =
   6615 				    (void *)(mtod(m0, char *) + offset);
   6616 				th = (void *)(mtod(m0, char *) + hlen);
   6617 
   6618 				ip6->ip6_plen = 0;
   6619 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6620 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6621 			}
   6622 			hlen += th->th_off << 2;
   6623 		}
   6624 
   6625 		if (v4) {
   6626 			WM_Q_EVCNT_INCR(txq, txtso);
   6627 			cmdlen |= WTX_TCPIP_CMD_IP;
   6628 		} else {
   6629 			WM_Q_EVCNT_INCR(txq, txtso6);
   6630 			ipcse = 0;
   6631 		}
   6632 		cmd |= WTX_TCPIP_CMD_TSE;
   6633 		cmdlen |= WTX_TCPIP_CMD_TSE |
   6634 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   6635 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   6636 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   6637 	}
   6638 
   6639 	/*
   6640 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   6641 	 * offload feature, if we load the context descriptor, we
   6642 	 * MUST provide valid values for IPCSS and TUCSS fields.
   6643 	 */
   6644 
   6645 	ipcs = WTX_TCPIP_IPCSS(offset) |
   6646 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   6647 	    WTX_TCPIP_IPCSE(ipcse);
   6648 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   6649 		WM_Q_EVCNT_INCR(txq, txipsum);
   6650 		fields |= WTX_IXSM;
   6651 	}
   6652 
   6653 	offset += iphl;
   6654 
   6655 	if (m0->m_pkthdr.csum_flags &
   6656 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   6657 		WM_Q_EVCNT_INCR(txq, txtusum);
   6658 		fields |= WTX_TXSM;
   6659 		tucs = WTX_TCPIP_TUCSS(offset) |
   6660 		    WTX_TCPIP_TUCSO(offset +
   6661 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   6662 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6663 	} else if ((m0->m_pkthdr.csum_flags &
   6664 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   6665 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6666 		fields |= WTX_TXSM;
   6667 		tucs = WTX_TCPIP_TUCSS(offset) |
   6668 		    WTX_TCPIP_TUCSO(offset +
   6669 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   6670 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6671 	} else {
   6672 		/* Just initialize it to a valid TCP context. */
   6673 		tucs = WTX_TCPIP_TUCSS(offset) |
   6674 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   6675 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6676 	}
   6677 
   6678 	/*
   6679 	 * We don't have to write context descriptor for every packet
   6680 	 * except for 82574. For 82574, we must write context descriptor
   6681 	 * for every packet when we use two descriptor queues.
   6682 	 * It would be overhead to write context descriptor for every packet,
   6683 	 * however it does not cause problems.
   6684 	 */
   6685 	/* Fill in the context descriptor. */
   6686 	t = (struct livengood_tcpip_ctxdesc *)
   6687 	    &txq->txq_descs[txq->txq_next];
   6688 	t->tcpip_ipcs = htole32(ipcs);
   6689 	t->tcpip_tucs = htole32(tucs);
   6690 	t->tcpip_cmdlen = htole32(cmdlen);
   6691 	t->tcpip_seg = htole32(seg);
   6692 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6693 
   6694 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6695 	txs->txs_ndesc++;
   6696 
   6697 	*cmdp = cmd;
   6698 	*fieldsp = fields;
   6699 
   6700 	return 0;
   6701 }
   6702 
   6703 static inline int
   6704 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   6705 {
   6706 	struct wm_softc *sc = ifp->if_softc;
   6707 	u_int cpuid = cpu_index(curcpu());
   6708 
   6709 	/*
   6710 	 * Currently, simple distribute strategy.
   6711 	 * TODO:
   6712 	 * distribute by flowid(RSS has value).
   6713 	 */
   6714         return (cpuid + ncpu - sc->sc_affinity_offset) % sc->sc_nqueues;
   6715 }
   6716 
   6717 /*
   6718  * wm_start:		[ifnet interface function]
   6719  *
   6720  *	Start packet transmission on the interface.
   6721  */
   6722 static void
   6723 wm_start(struct ifnet *ifp)
   6724 {
   6725 	struct wm_softc *sc = ifp->if_softc;
   6726 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6727 
   6728 #ifdef WM_MPSAFE
   6729 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6730 #endif
   6731 	/*
   6732 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   6733 	 */
   6734 
   6735 	mutex_enter(txq->txq_lock);
   6736 	if (!txq->txq_stopping)
   6737 		wm_start_locked(ifp);
   6738 	mutex_exit(txq->txq_lock);
   6739 }
   6740 
   6741 static void
   6742 wm_start_locked(struct ifnet *ifp)
   6743 {
   6744 	struct wm_softc *sc = ifp->if_softc;
   6745 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6746 
   6747 	wm_send_common_locked(ifp, txq, false);
   6748 }
   6749 
   6750 static int
   6751 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   6752 {
   6753 	int qid;
   6754 	struct wm_softc *sc = ifp->if_softc;
   6755 	struct wm_txqueue *txq;
   6756 
   6757 	qid = wm_select_txqueue(ifp, m);
   6758 	txq = &sc->sc_queue[qid].wmq_txq;
   6759 
   6760 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   6761 		m_freem(m);
   6762 		WM_Q_EVCNT_INCR(txq, txdrop);
   6763 		return ENOBUFS;
   6764 	}
   6765 
   6766 	/*
   6767 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   6768 	 */
   6769 	ifp->if_obytes += m->m_pkthdr.len;
   6770 	if (m->m_flags & M_MCAST)
   6771 		ifp->if_omcasts++;
   6772 
   6773 	if (mutex_tryenter(txq->txq_lock)) {
   6774 		if (!txq->txq_stopping)
   6775 			wm_transmit_locked(ifp, txq);
   6776 		mutex_exit(txq->txq_lock);
   6777 	}
   6778 
   6779 	return 0;
   6780 }
   6781 
   6782 static void
   6783 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   6784 {
   6785 
   6786 	wm_send_common_locked(ifp, txq, true);
   6787 }
   6788 
   6789 static void
   6790 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   6791     bool is_transmit)
   6792 {
   6793 	struct wm_softc *sc = ifp->if_softc;
   6794 	struct mbuf *m0;
   6795 	struct m_tag *mtag;
   6796 	struct wm_txsoft *txs;
   6797 	bus_dmamap_t dmamap;
   6798 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   6799 	bus_addr_t curaddr;
   6800 	bus_size_t seglen, curlen;
   6801 	uint32_t cksumcmd;
   6802 	uint8_t cksumfields;
   6803 
   6804 	KASSERT(mutex_owned(txq->txq_lock));
   6805 
   6806 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   6807 		return;
   6808 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   6809 		return;
   6810 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   6811 		return;
   6812 
   6813 	/* Remember the previous number of free descriptors. */
   6814 	ofree = txq->txq_free;
   6815 
   6816 	/*
   6817 	 * Loop through the send queue, setting up transmit descriptors
   6818 	 * until we drain the queue, or use up all available transmit
   6819 	 * descriptors.
   6820 	 */
   6821 	for (;;) {
   6822 		m0 = NULL;
   6823 
   6824 		/* Get a work queue entry. */
   6825 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6826 			wm_txeof(sc, txq);
   6827 			if (txq->txq_sfree == 0) {
   6828 				DPRINTF(WM_DEBUG_TX,
   6829 				    ("%s: TX: no free job descriptors\n",
   6830 					device_xname(sc->sc_dev)));
   6831 				WM_Q_EVCNT_INCR(txq, txsstall);
   6832 				break;
   6833 			}
   6834 		}
   6835 
   6836 		/* Grab a packet off the queue. */
   6837 		if (is_transmit)
   6838 			m0 = pcq_get(txq->txq_interq);
   6839 		else
   6840 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   6841 		if (m0 == NULL)
   6842 			break;
   6843 
   6844 		DPRINTF(WM_DEBUG_TX,
   6845 		    ("%s: TX: have packet to transmit: %p\n",
   6846 		    device_xname(sc->sc_dev), m0));
   6847 
   6848 		txs = &txq->txq_soft[txq->txq_snext];
   6849 		dmamap = txs->txs_dmamap;
   6850 
   6851 		use_tso = (m0->m_pkthdr.csum_flags &
   6852 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   6853 
   6854 		/*
   6855 		 * So says the Linux driver:
   6856 		 * The controller does a simple calculation to make sure
   6857 		 * there is enough room in the FIFO before initiating the
   6858 		 * DMA for each buffer.  The calc is:
   6859 		 *	4 = ceil(buffer len / MSS)
   6860 		 * To make sure we don't overrun the FIFO, adjust the max
   6861 		 * buffer len if the MSS drops.
   6862 		 */
   6863 		dmamap->dm_maxsegsz =
   6864 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   6865 		    ? m0->m_pkthdr.segsz << 2
   6866 		    : WTX_MAX_LEN;
   6867 
   6868 		/*
   6869 		 * Load the DMA map.  If this fails, the packet either
   6870 		 * didn't fit in the allotted number of segments, or we
   6871 		 * were short on resources.  For the too-many-segments
   6872 		 * case, we simply report an error and drop the packet,
   6873 		 * since we can't sanely copy a jumbo packet to a single
   6874 		 * buffer.
   6875 		 */
   6876 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6877 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   6878 		if (error) {
   6879 			if (error == EFBIG) {
   6880 				WM_Q_EVCNT_INCR(txq, txdrop);
   6881 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6882 				    "DMA segments, dropping...\n",
   6883 				    device_xname(sc->sc_dev));
   6884 				wm_dump_mbuf_chain(sc, m0);
   6885 				m_freem(m0);
   6886 				continue;
   6887 			}
   6888 			/*  Short on resources, just stop for now. */
   6889 			DPRINTF(WM_DEBUG_TX,
   6890 			    ("%s: TX: dmamap load failed: %d\n",
   6891 			    device_xname(sc->sc_dev), error));
   6892 			break;
   6893 		}
   6894 
   6895 		segs_needed = dmamap->dm_nsegs;
   6896 		if (use_tso) {
   6897 			/* For sentinel descriptor; see below. */
   6898 			segs_needed++;
   6899 		}
   6900 
   6901 		/*
   6902 		 * Ensure we have enough descriptors free to describe
   6903 		 * the packet.  Note, we always reserve one descriptor
   6904 		 * at the end of the ring due to the semantics of the
   6905 		 * TDT register, plus one more in the event we need
   6906 		 * to load offload context.
   6907 		 */
   6908 		if (segs_needed > txq->txq_free - 2) {
   6909 			/*
   6910 			 * Not enough free descriptors to transmit this
   6911 			 * packet.  We haven't committed anything yet,
   6912 			 * so just unload the DMA map, put the packet
   6913 			 * pack on the queue, and punt.  Notify the upper
   6914 			 * layer that there are no more slots left.
   6915 			 */
   6916 			DPRINTF(WM_DEBUG_TX,
   6917 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6918 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6919 			    segs_needed, txq->txq_free - 1));
   6920 			if (!is_transmit)
   6921 				ifp->if_flags |= IFF_OACTIVE;
   6922 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   6923 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6924 			WM_Q_EVCNT_INCR(txq, txdstall);
   6925 			break;
   6926 		}
   6927 
   6928 		/*
   6929 		 * Check for 82547 Tx FIFO bug.  We need to do this
   6930 		 * once we know we can transmit the packet, since we
   6931 		 * do some internal FIFO space accounting here.
   6932 		 */
   6933 		if (sc->sc_type == WM_T_82547 &&
   6934 		    wm_82547_txfifo_bugchk(sc, m0)) {
   6935 			DPRINTF(WM_DEBUG_TX,
   6936 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   6937 			    device_xname(sc->sc_dev)));
   6938 			if (!is_transmit)
   6939 				ifp->if_flags |= IFF_OACTIVE;
   6940 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   6941 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6942 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
   6943 			break;
   6944 		}
   6945 
   6946 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6947 
   6948 		DPRINTF(WM_DEBUG_TX,
   6949 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6950 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6951 
   6952 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   6953 
   6954 		/*
   6955 		 * Store a pointer to the packet so that we can free it
   6956 		 * later.
   6957 		 *
   6958 		 * Initially, we consider the number of descriptors the
   6959 		 * packet uses the number of DMA segments.  This may be
   6960 		 * incremented by 1 if we do checksum offload (a descriptor
   6961 		 * is used to set the checksum context).
   6962 		 */
   6963 		txs->txs_mbuf = m0;
   6964 		txs->txs_firstdesc = txq->txq_next;
   6965 		txs->txs_ndesc = segs_needed;
   6966 
   6967 		/* Set up offload parameters for this packet. */
   6968 		if (m0->m_pkthdr.csum_flags &
   6969 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   6970 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   6971 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   6972 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   6973 					  &cksumfields) != 0) {
   6974 				/* Error message already displayed. */
   6975 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   6976 				continue;
   6977 			}
   6978 		} else {
   6979 			cksumcmd = 0;
   6980 			cksumfields = 0;
   6981 		}
   6982 
   6983 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   6984 
   6985 		/* Sync the DMA map. */
   6986 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   6987 		    BUS_DMASYNC_PREWRITE);
   6988 
   6989 		/* Initialize the transmit descriptor. */
   6990 		for (nexttx = txq->txq_next, seg = 0;
   6991 		     seg < dmamap->dm_nsegs; seg++) {
   6992 			for (seglen = dmamap->dm_segs[seg].ds_len,
   6993 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   6994 			     seglen != 0;
   6995 			     curaddr += curlen, seglen -= curlen,
   6996 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   6997 				curlen = seglen;
   6998 
   6999 				/*
   7000 				 * So says the Linux driver:
   7001 				 * Work around for premature descriptor
   7002 				 * write-backs in TSO mode.  Append a
   7003 				 * 4-byte sentinel descriptor.
   7004 				 */
   7005 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7006 				    curlen > 8)
   7007 					curlen -= 4;
   7008 
   7009 				wm_set_dma_addr(
   7010 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7011 				txq->txq_descs[nexttx].wtx_cmdlen
   7012 				    = htole32(cksumcmd | curlen);
   7013 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7014 				    = 0;
   7015 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7016 				    = cksumfields;
   7017 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7018 				lasttx = nexttx;
   7019 
   7020 				DPRINTF(WM_DEBUG_TX,
   7021 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7022 				     "len %#04zx\n",
   7023 				    device_xname(sc->sc_dev), nexttx,
   7024 				    (uint64_t)curaddr, curlen));
   7025 			}
   7026 		}
   7027 
   7028 		KASSERT(lasttx != -1);
   7029 
   7030 		/*
   7031 		 * Set up the command byte on the last descriptor of
   7032 		 * the packet.  If we're in the interrupt delay window,
   7033 		 * delay the interrupt.
   7034 		 */
   7035 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7036 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7037 
   7038 		/*
   7039 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7040 		 * up the descriptor to encapsulate the packet for us.
   7041 		 *
   7042 		 * This is only valid on the last descriptor of the packet.
   7043 		 */
   7044 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   7045 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7046 			    htole32(WTX_CMD_VLE);
   7047 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7048 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   7049 		}
   7050 
   7051 		txs->txs_lastdesc = lasttx;
   7052 
   7053 		DPRINTF(WM_DEBUG_TX,
   7054 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7055 		    device_xname(sc->sc_dev),
   7056 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7057 
   7058 		/* Sync the descriptors we're using. */
   7059 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7060 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7061 
   7062 		/* Give the packet to the chip. */
   7063 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7064 
   7065 		DPRINTF(WM_DEBUG_TX,
   7066 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7067 
   7068 		DPRINTF(WM_DEBUG_TX,
   7069 		    ("%s: TX: finished transmitting packet, job %d\n",
   7070 		    device_xname(sc->sc_dev), txq->txq_snext));
   7071 
   7072 		/* Advance the tx pointer. */
   7073 		txq->txq_free -= txs->txs_ndesc;
   7074 		txq->txq_next = nexttx;
   7075 
   7076 		txq->txq_sfree--;
   7077 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7078 
   7079 		/* Pass the packet to any BPF listeners. */
   7080 		bpf_mtap(ifp, m0);
   7081 	}
   7082 
   7083 	if (m0 != NULL) {
   7084 		if (!is_transmit)
   7085 			ifp->if_flags |= IFF_OACTIVE;
   7086 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7087 		WM_Q_EVCNT_INCR(txq, txdrop);
   7088 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7089 			__func__));
   7090 		m_freem(m0);
   7091 	}
   7092 
   7093 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7094 		/* No more slots; notify upper layer. */
   7095 		if (!is_transmit)
   7096 			ifp->if_flags |= IFF_OACTIVE;
   7097 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7098 	}
   7099 
   7100 	if (txq->txq_free != ofree) {
   7101 		/* Set a watchdog timer in case the chip flakes out. */
   7102 		ifp->if_timer = 5;
   7103 	}
   7104 }
   7105 
   7106 /*
   7107  * wm_nq_tx_offload:
   7108  *
   7109  *	Set up TCP/IP checksumming parameters for the
   7110  *	specified packet, for NEWQUEUE devices
   7111  */
   7112 static int
   7113 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7114     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7115 {
   7116 	struct mbuf *m0 = txs->txs_mbuf;
   7117 	struct m_tag *mtag;
   7118 	uint32_t vl_len, mssidx, cmdc;
   7119 	struct ether_header *eh;
   7120 	int offset, iphl;
   7121 
   7122 	/*
   7123 	 * XXX It would be nice if the mbuf pkthdr had offset
   7124 	 * fields for the protocol headers.
   7125 	 */
   7126 	*cmdlenp = 0;
   7127 	*fieldsp = 0;
   7128 
   7129 	eh = mtod(m0, struct ether_header *);
   7130 	switch (htons(eh->ether_type)) {
   7131 	case ETHERTYPE_IP:
   7132 	case ETHERTYPE_IPV6:
   7133 		offset = ETHER_HDR_LEN;
   7134 		break;
   7135 
   7136 	case ETHERTYPE_VLAN:
   7137 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7138 		break;
   7139 
   7140 	default:
   7141 		/* Don't support this protocol or encapsulation. */
   7142 		*do_csum = false;
   7143 		return 0;
   7144 	}
   7145 	*do_csum = true;
   7146 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7147 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7148 
   7149 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7150 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7151 
   7152 	if ((m0->m_pkthdr.csum_flags &
   7153 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7154 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7155 	} else {
   7156 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   7157 	}
   7158 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7159 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7160 
   7161 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   7162 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   7163 		     << NQTXC_VLLEN_VLAN_SHIFT);
   7164 		*cmdlenp |= NQTX_CMD_VLE;
   7165 	}
   7166 
   7167 	mssidx = 0;
   7168 
   7169 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7170 		int hlen = offset + iphl;
   7171 		int tcp_hlen;
   7172 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7173 
   7174 		if (__predict_false(m0->m_len <
   7175 				    (hlen + sizeof(struct tcphdr)))) {
   7176 			/*
   7177 			 * TCP/IP headers are not in the first mbuf; we need
   7178 			 * to do this the slow and painful way.  Let's just
   7179 			 * hope this doesn't happen very often.
   7180 			 */
   7181 			struct tcphdr th;
   7182 
   7183 			WM_Q_EVCNT_INCR(txq, txtsopain);
   7184 
   7185 			m_copydata(m0, hlen, sizeof(th), &th);
   7186 			if (v4) {
   7187 				struct ip ip;
   7188 
   7189 				m_copydata(m0, offset, sizeof(ip), &ip);
   7190 				ip.ip_len = 0;
   7191 				m_copyback(m0,
   7192 				    offset + offsetof(struct ip, ip_len),
   7193 				    sizeof(ip.ip_len), &ip.ip_len);
   7194 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7195 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7196 			} else {
   7197 				struct ip6_hdr ip6;
   7198 
   7199 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7200 				ip6.ip6_plen = 0;
   7201 				m_copyback(m0,
   7202 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7203 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7204 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7205 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7206 			}
   7207 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7208 			    sizeof(th.th_sum), &th.th_sum);
   7209 
   7210 			tcp_hlen = th.th_off << 2;
   7211 		} else {
   7212 			/*
   7213 			 * TCP/IP headers are in the first mbuf; we can do
   7214 			 * this the easy way.
   7215 			 */
   7216 			struct tcphdr *th;
   7217 
   7218 			if (v4) {
   7219 				struct ip *ip =
   7220 				    (void *)(mtod(m0, char *) + offset);
   7221 				th = (void *)(mtod(m0, char *) + hlen);
   7222 
   7223 				ip->ip_len = 0;
   7224 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7225 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7226 			} else {
   7227 				struct ip6_hdr *ip6 =
   7228 				    (void *)(mtod(m0, char *) + offset);
   7229 				th = (void *)(mtod(m0, char *) + hlen);
   7230 
   7231 				ip6->ip6_plen = 0;
   7232 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7233 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7234 			}
   7235 			tcp_hlen = th->th_off << 2;
   7236 		}
   7237 		hlen += tcp_hlen;
   7238 		*cmdlenp |= NQTX_CMD_TSE;
   7239 
   7240 		if (v4) {
   7241 			WM_Q_EVCNT_INCR(txq, txtso);
   7242 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7243 		} else {
   7244 			WM_Q_EVCNT_INCR(txq, txtso6);
   7245 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7246 		}
   7247 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7248 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7249 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7250 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7251 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7252 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7253 	} else {
   7254 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7255 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7256 	}
   7257 
   7258 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7259 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7260 		cmdc |= NQTXC_CMD_IP4;
   7261 	}
   7262 
   7263 	if (m0->m_pkthdr.csum_flags &
   7264 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7265 		WM_Q_EVCNT_INCR(txq, txtusum);
   7266 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7267 			cmdc |= NQTXC_CMD_TCP;
   7268 		} else {
   7269 			cmdc |= NQTXC_CMD_UDP;
   7270 		}
   7271 		cmdc |= NQTXC_CMD_IP4;
   7272 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7273 	}
   7274 	if (m0->m_pkthdr.csum_flags &
   7275 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7276 		WM_Q_EVCNT_INCR(txq, txtusum6);
   7277 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7278 			cmdc |= NQTXC_CMD_TCP;
   7279 		} else {
   7280 			cmdc |= NQTXC_CMD_UDP;
   7281 		}
   7282 		cmdc |= NQTXC_CMD_IP6;
   7283 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7284 	}
   7285 
   7286 	/*
   7287 	 * We don't have to write context descriptor for every packet to
   7288 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   7289 	 * I210 and I211. It is enough to write once per a Tx queue for these
   7290 	 * controllers.
   7291 	 * It would be overhead to write context descriptor for every packet,
   7292 	 * however it does not cause problems.
   7293 	 */
   7294 	/* Fill in the context descriptor. */
   7295 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   7296 	    htole32(vl_len);
   7297 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   7298 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   7299 	    htole32(cmdc);
   7300 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   7301 	    htole32(mssidx);
   7302 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7303 	DPRINTF(WM_DEBUG_TX,
   7304 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   7305 	    txq->txq_next, 0, vl_len));
   7306 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   7307 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7308 	txs->txs_ndesc++;
   7309 	return 0;
   7310 }
   7311 
   7312 /*
   7313  * wm_nq_start:		[ifnet interface function]
   7314  *
   7315  *	Start packet transmission on the interface for NEWQUEUE devices
   7316  */
   7317 static void
   7318 wm_nq_start(struct ifnet *ifp)
   7319 {
   7320 	struct wm_softc *sc = ifp->if_softc;
   7321 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7322 
   7323 #ifdef WM_MPSAFE
   7324 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   7325 #endif
   7326 	/*
   7327 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7328 	 */
   7329 
   7330 	mutex_enter(txq->txq_lock);
   7331 	if (!txq->txq_stopping)
   7332 		wm_nq_start_locked(ifp);
   7333 	mutex_exit(txq->txq_lock);
   7334 }
   7335 
   7336 static void
   7337 wm_nq_start_locked(struct ifnet *ifp)
   7338 {
   7339 	struct wm_softc *sc = ifp->if_softc;
   7340 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7341 
   7342 	wm_nq_send_common_locked(ifp, txq, false);
   7343 }
   7344 
   7345 static int
   7346 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   7347 {
   7348 	int qid;
   7349 	struct wm_softc *sc = ifp->if_softc;
   7350 	struct wm_txqueue *txq;
   7351 
   7352 	qid = wm_select_txqueue(ifp, m);
   7353 	txq = &sc->sc_queue[qid].wmq_txq;
   7354 
   7355 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7356 		m_freem(m);
   7357 		WM_Q_EVCNT_INCR(txq, txdrop);
   7358 		return ENOBUFS;
   7359 	}
   7360 
   7361 	/*
   7362 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7363 	 */
   7364 	ifp->if_obytes += m->m_pkthdr.len;
   7365 	if (m->m_flags & M_MCAST)
   7366 		ifp->if_omcasts++;
   7367 
   7368 	/*
   7369 	 * The situations which this mutex_tryenter() fails at running time
   7370 	 * are below two patterns.
   7371 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   7372 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   7373 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   7374 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   7375 	 * In the case of (2), the last packet enqueued to txq->txq_interq is also
   7376 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck, either.
   7377 	 */
   7378 	if (mutex_tryenter(txq->txq_lock)) {
   7379 		if (!txq->txq_stopping)
   7380 			wm_nq_transmit_locked(ifp, txq);
   7381 		mutex_exit(txq->txq_lock);
   7382 	}
   7383 
   7384 	return 0;
   7385 }
   7386 
   7387 static void
   7388 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7389 {
   7390 
   7391 	wm_nq_send_common_locked(ifp, txq, true);
   7392 }
   7393 
   7394 static void
   7395 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7396     bool is_transmit)
   7397 {
   7398 	struct wm_softc *sc = ifp->if_softc;
   7399 	struct mbuf *m0;
   7400 	struct m_tag *mtag;
   7401 	struct wm_txsoft *txs;
   7402 	bus_dmamap_t dmamap;
   7403 	int error, nexttx, lasttx = -1, seg, segs_needed;
   7404 	bool do_csum, sent;
   7405 
   7406 	KASSERT(mutex_owned(txq->txq_lock));
   7407 
   7408 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7409 		return;
   7410 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7411 		return;
   7412 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7413 		return;
   7414 
   7415 	sent = false;
   7416 
   7417 	/*
   7418 	 * Loop through the send queue, setting up transmit descriptors
   7419 	 * until we drain the queue, or use up all available transmit
   7420 	 * descriptors.
   7421 	 */
   7422 	for (;;) {
   7423 		m0 = NULL;
   7424 
   7425 		/* Get a work queue entry. */
   7426 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7427 			wm_txeof(sc, txq);
   7428 			if (txq->txq_sfree == 0) {
   7429 				DPRINTF(WM_DEBUG_TX,
   7430 				    ("%s: TX: no free job descriptors\n",
   7431 					device_xname(sc->sc_dev)));
   7432 				WM_Q_EVCNT_INCR(txq, txsstall);
   7433 				break;
   7434 			}
   7435 		}
   7436 
   7437 		/* Grab a packet off the queue. */
   7438 		if (is_transmit)
   7439 			m0 = pcq_get(txq->txq_interq);
   7440 		else
   7441 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7442 		if (m0 == NULL)
   7443 			break;
   7444 
   7445 		DPRINTF(WM_DEBUG_TX,
   7446 		    ("%s: TX: have packet to transmit: %p\n",
   7447 		    device_xname(sc->sc_dev), m0));
   7448 
   7449 		txs = &txq->txq_soft[txq->txq_snext];
   7450 		dmamap = txs->txs_dmamap;
   7451 
   7452 		/*
   7453 		 * Load the DMA map.  If this fails, the packet either
   7454 		 * didn't fit in the allotted number of segments, or we
   7455 		 * were short on resources.  For the too-many-segments
   7456 		 * case, we simply report an error and drop the packet,
   7457 		 * since we can't sanely copy a jumbo packet to a single
   7458 		 * buffer.
   7459 		 */
   7460 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7461 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7462 		if (error) {
   7463 			if (error == EFBIG) {
   7464 				WM_Q_EVCNT_INCR(txq, txdrop);
   7465 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7466 				    "DMA segments, dropping...\n",
   7467 				    device_xname(sc->sc_dev));
   7468 				wm_dump_mbuf_chain(sc, m0);
   7469 				m_freem(m0);
   7470 				continue;
   7471 			}
   7472 			/* Short on resources, just stop for now. */
   7473 			DPRINTF(WM_DEBUG_TX,
   7474 			    ("%s: TX: dmamap load failed: %d\n",
   7475 			    device_xname(sc->sc_dev), error));
   7476 			break;
   7477 		}
   7478 
   7479 		segs_needed = dmamap->dm_nsegs;
   7480 
   7481 		/*
   7482 		 * Ensure we have enough descriptors free to describe
   7483 		 * the packet.  Note, we always reserve one descriptor
   7484 		 * at the end of the ring due to the semantics of the
   7485 		 * TDT register, plus one more in the event we need
   7486 		 * to load offload context.
   7487 		 */
   7488 		if (segs_needed > txq->txq_free - 2) {
   7489 			/*
   7490 			 * Not enough free descriptors to transmit this
   7491 			 * packet.  We haven't committed anything yet,
   7492 			 * so just unload the DMA map, put the packet
   7493 			 * pack on the queue, and punt.  Notify the upper
   7494 			 * layer that there are no more slots left.
   7495 			 */
   7496 			DPRINTF(WM_DEBUG_TX,
   7497 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7498 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7499 			    segs_needed, txq->txq_free - 1));
   7500 			if (!is_transmit)
   7501 				ifp->if_flags |= IFF_OACTIVE;
   7502 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7503 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7504 			WM_Q_EVCNT_INCR(txq, txdstall);
   7505 			break;
   7506 		}
   7507 
   7508 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7509 
   7510 		DPRINTF(WM_DEBUG_TX,
   7511 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7512 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7513 
   7514 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7515 
   7516 		/*
   7517 		 * Store a pointer to the packet so that we can free it
   7518 		 * later.
   7519 		 *
   7520 		 * Initially, we consider the number of descriptors the
   7521 		 * packet uses the number of DMA segments.  This may be
   7522 		 * incremented by 1 if we do checksum offload (a descriptor
   7523 		 * is used to set the checksum context).
   7524 		 */
   7525 		txs->txs_mbuf = m0;
   7526 		txs->txs_firstdesc = txq->txq_next;
   7527 		txs->txs_ndesc = segs_needed;
   7528 
   7529 		/* Set up offload parameters for this packet. */
   7530 		uint32_t cmdlen, fields, dcmdlen;
   7531 		if (m0->m_pkthdr.csum_flags &
   7532 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7533 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7534 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7535 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   7536 			    &do_csum) != 0) {
   7537 				/* Error message already displayed. */
   7538 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7539 				continue;
   7540 			}
   7541 		} else {
   7542 			do_csum = false;
   7543 			cmdlen = 0;
   7544 			fields = 0;
   7545 		}
   7546 
   7547 		/* Sync the DMA map. */
   7548 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7549 		    BUS_DMASYNC_PREWRITE);
   7550 
   7551 		/* Initialize the first transmit descriptor. */
   7552 		nexttx = txq->txq_next;
   7553 		if (!do_csum) {
   7554 			/* setup a legacy descriptor */
   7555 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   7556 			    dmamap->dm_segs[0].ds_addr);
   7557 			txq->txq_descs[nexttx].wtx_cmdlen =
   7558 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   7559 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   7560 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   7561 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   7562 			    NULL) {
   7563 				txq->txq_descs[nexttx].wtx_cmdlen |=
   7564 				    htole32(WTX_CMD_VLE);
   7565 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   7566 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   7567 			} else {
   7568 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7569 			}
   7570 			dcmdlen = 0;
   7571 		} else {
   7572 			/* setup an advanced data descriptor */
   7573 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7574 			    htole64(dmamap->dm_segs[0].ds_addr);
   7575 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   7576 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7577 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   7578 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   7579 			    htole32(fields);
   7580 			DPRINTF(WM_DEBUG_TX,
   7581 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   7582 			    device_xname(sc->sc_dev), nexttx,
   7583 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   7584 			DPRINTF(WM_DEBUG_TX,
   7585 			    ("\t 0x%08x%08x\n", fields,
   7586 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   7587 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   7588 		}
   7589 
   7590 		lasttx = nexttx;
   7591 		nexttx = WM_NEXTTX(txq, nexttx);
   7592 		/*
   7593 		 * fill in the next descriptors. legacy or adcanced format
   7594 		 * is the same here
   7595 		 */
   7596 		for (seg = 1; seg < dmamap->dm_nsegs;
   7597 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   7598 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7599 			    htole64(dmamap->dm_segs[seg].ds_addr);
   7600 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7601 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   7602 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   7603 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   7604 			lasttx = nexttx;
   7605 
   7606 			DPRINTF(WM_DEBUG_TX,
   7607 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   7608 			     "len %#04zx\n",
   7609 			    device_xname(sc->sc_dev), nexttx,
   7610 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   7611 			    dmamap->dm_segs[seg].ds_len));
   7612 		}
   7613 
   7614 		KASSERT(lasttx != -1);
   7615 
   7616 		/*
   7617 		 * Set up the command byte on the last descriptor of
   7618 		 * the packet.  If we're in the interrupt delay window,
   7619 		 * delay the interrupt.
   7620 		 */
   7621 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   7622 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   7623 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7624 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7625 
   7626 		txs->txs_lastdesc = lasttx;
   7627 
   7628 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7629 		    device_xname(sc->sc_dev),
   7630 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7631 
   7632 		/* Sync the descriptors we're using. */
   7633 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7634 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7635 
   7636 		/* Give the packet to the chip. */
   7637 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7638 		sent = true;
   7639 
   7640 		DPRINTF(WM_DEBUG_TX,
   7641 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7642 
   7643 		DPRINTF(WM_DEBUG_TX,
   7644 		    ("%s: TX: finished transmitting packet, job %d\n",
   7645 		    device_xname(sc->sc_dev), txq->txq_snext));
   7646 
   7647 		/* Advance the tx pointer. */
   7648 		txq->txq_free -= txs->txs_ndesc;
   7649 		txq->txq_next = nexttx;
   7650 
   7651 		txq->txq_sfree--;
   7652 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7653 
   7654 		/* Pass the packet to any BPF listeners. */
   7655 		bpf_mtap(ifp, m0);
   7656 	}
   7657 
   7658 	if (m0 != NULL) {
   7659 		if (!is_transmit)
   7660 			ifp->if_flags |= IFF_OACTIVE;
   7661 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7662 		WM_Q_EVCNT_INCR(txq, txdrop);
   7663 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7664 			__func__));
   7665 		m_freem(m0);
   7666 	}
   7667 
   7668 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7669 		/* No more slots; notify upper layer. */
   7670 		if (!is_transmit)
   7671 			ifp->if_flags |= IFF_OACTIVE;
   7672 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7673 	}
   7674 
   7675 	if (sent) {
   7676 		/* Set a watchdog timer in case the chip flakes out. */
   7677 		ifp->if_timer = 5;
   7678 	}
   7679 }
   7680 
   7681 static void
   7682 wm_deferred_start_locked(struct wm_txqueue *txq)
   7683 {
   7684 	struct wm_softc *sc = txq->txq_sc;
   7685 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7686 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   7687 	int qid = wmq->wmq_id;
   7688 
   7689 	KASSERT(mutex_owned(txq->txq_lock));
   7690 
   7691 	if (txq->txq_stopping) {
   7692 		mutex_exit(txq->txq_lock);
   7693 		return;
   7694 	}
   7695 
   7696 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7697 		/* XXX need for ALTQ or one CPU system */
   7698 		if (qid == 0)
   7699 			wm_nq_start_locked(ifp);
   7700 		wm_nq_transmit_locked(ifp, txq);
   7701 	} else {
   7702 		/* XXX need for ALTQ or one CPU system */
   7703 		if (qid == 0)
   7704 			wm_start_locked(ifp);
   7705 		wm_transmit_locked(ifp, txq);
   7706 	}
   7707 }
   7708 
   7709 /* Interrupt */
   7710 
   7711 /*
   7712  * wm_txeof:
   7713  *
   7714  *	Helper; handle transmit interrupts.
   7715  */
   7716 static int
   7717 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
   7718 {
   7719 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7720 	struct wm_txsoft *txs;
   7721 	bool processed = false;
   7722 	int count = 0;
   7723 	int i;
   7724 	uint8_t status;
   7725 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   7726 
   7727 	KASSERT(mutex_owned(txq->txq_lock));
   7728 
   7729 	if (txq->txq_stopping)
   7730 		return 0;
   7731 
   7732 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   7733 	/* for ALTQ and legacy(not use multiqueue) ethernet controller */
   7734 	if (wmq->wmq_id == 0)
   7735 		ifp->if_flags &= ~IFF_OACTIVE;
   7736 
   7737 	/*
   7738 	 * Go through the Tx list and free mbufs for those
   7739 	 * frames which have been transmitted.
   7740 	 */
   7741 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   7742 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   7743 		txs = &txq->txq_soft[i];
   7744 
   7745 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   7746 			device_xname(sc->sc_dev), i));
   7747 
   7748 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   7749 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   7750 
   7751 		status =
   7752 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   7753 		if ((status & WTX_ST_DD) == 0) {
   7754 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   7755 			    BUS_DMASYNC_PREREAD);
   7756 			break;
   7757 		}
   7758 
   7759 		processed = true;
   7760 		count++;
   7761 		DPRINTF(WM_DEBUG_TX,
   7762 		    ("%s: TX: job %d done: descs %d..%d\n",
   7763 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   7764 		    txs->txs_lastdesc));
   7765 
   7766 		/*
   7767 		 * XXX We should probably be using the statistics
   7768 		 * XXX registers, but I don't know if they exist
   7769 		 * XXX on chips before the i82544.
   7770 		 */
   7771 
   7772 #ifdef WM_EVENT_COUNTERS
   7773 		if (status & WTX_ST_TU)
   7774 			WM_Q_EVCNT_INCR(txq, tu);
   7775 #endif /* WM_EVENT_COUNTERS */
   7776 
   7777 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   7778 			ifp->if_oerrors++;
   7779 			if (status & WTX_ST_LC)
   7780 				log(LOG_WARNING, "%s: late collision\n",
   7781 				    device_xname(sc->sc_dev));
   7782 			else if (status & WTX_ST_EC) {
   7783 				ifp->if_collisions += 16;
   7784 				log(LOG_WARNING, "%s: excessive collisions\n",
   7785 				    device_xname(sc->sc_dev));
   7786 			}
   7787 		} else
   7788 			ifp->if_opackets++;
   7789 
   7790 		txq->txq_packets++;
   7791 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   7792 
   7793 		txq->txq_free += txs->txs_ndesc;
   7794 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   7795 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   7796 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   7797 		m_freem(txs->txs_mbuf);
   7798 		txs->txs_mbuf = NULL;
   7799 	}
   7800 
   7801 	/* Update the dirty transmit buffer pointer. */
   7802 	txq->txq_sdirty = i;
   7803 	DPRINTF(WM_DEBUG_TX,
   7804 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   7805 
   7806 	if (count != 0)
   7807 		rnd_add_uint32(&sc->rnd_source, count);
   7808 
   7809 	/*
   7810 	 * If there are no more pending transmissions, cancel the watchdog
   7811 	 * timer.
   7812 	 */
   7813 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   7814 		ifp->if_timer = 0;
   7815 
   7816 	return processed;
   7817 }
   7818 
   7819 static inline uint32_t
   7820 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   7821 {
   7822 	struct wm_softc *sc = rxq->rxq_sc;
   7823 
   7824 	if (sc->sc_type == WM_T_82574)
   7825 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   7826 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7827 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   7828 	else
   7829 		return rxq->rxq_descs[idx].wrx_status;
   7830 }
   7831 
   7832 static inline uint32_t
   7833 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   7834 {
   7835 	struct wm_softc *sc = rxq->rxq_sc;
   7836 
   7837 	if (sc->sc_type == WM_T_82574)
   7838 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   7839 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7840 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   7841 	else
   7842 		return rxq->rxq_descs[idx].wrx_errors;
   7843 }
   7844 
   7845 static inline uint16_t
   7846 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   7847 {
   7848 	struct wm_softc *sc = rxq->rxq_sc;
   7849 
   7850 	if (sc->sc_type == WM_T_82574)
   7851 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   7852 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7853 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   7854 	else
   7855 		return rxq->rxq_descs[idx].wrx_special;
   7856 }
   7857 
   7858 static inline int
   7859 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   7860 {
   7861 	struct wm_softc *sc = rxq->rxq_sc;
   7862 
   7863 	if (sc->sc_type == WM_T_82574)
   7864 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   7865 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7866 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   7867 	else
   7868 		return rxq->rxq_descs[idx].wrx_len;
   7869 }
   7870 
   7871 #ifdef WM_DEBUG
   7872 static inline uint32_t
   7873 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   7874 {
   7875 	struct wm_softc *sc = rxq->rxq_sc;
   7876 
   7877 	if (sc->sc_type == WM_T_82574)
   7878 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   7879 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7880 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   7881 	else
   7882 		return 0;
   7883 }
   7884 
   7885 static inline uint8_t
   7886 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   7887 {
   7888 	struct wm_softc *sc = rxq->rxq_sc;
   7889 
   7890 	if (sc->sc_type == WM_T_82574)
   7891 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   7892 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7893 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   7894 	else
   7895 		return 0;
   7896 }
   7897 #endif /* WM_DEBUG */
   7898 
   7899 static inline bool
   7900 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   7901     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   7902 {
   7903 
   7904 	if (sc->sc_type == WM_T_82574)
   7905 		return (status & ext_bit) != 0;
   7906 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7907 		return (status & nq_bit) != 0;
   7908 	else
   7909 		return (status & legacy_bit) != 0;
   7910 }
   7911 
   7912 static inline bool
   7913 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   7914     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   7915 {
   7916 
   7917 	if (sc->sc_type == WM_T_82574)
   7918 		return (error & ext_bit) != 0;
   7919 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7920 		return (error & nq_bit) != 0;
   7921 	else
   7922 		return (error & legacy_bit) != 0;
   7923 }
   7924 
   7925 static inline bool
   7926 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   7927 {
   7928 
   7929 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   7930 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   7931 		return true;
   7932 	else
   7933 		return false;
   7934 }
   7935 
   7936 static inline bool
   7937 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   7938 {
   7939 	struct wm_softc *sc = rxq->rxq_sc;
   7940 
   7941 	/* XXXX missing error bit for newqueue? */
   7942 	if (wm_rxdesc_is_set_error(sc, errors,
   7943 		WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE,
   7944 		EXTRXC_ERROR_CE|EXTRXC_ERROR_SE|EXTRXC_ERROR_SEQ|EXTRXC_ERROR_CXE|EXTRXC_ERROR_RXE,
   7945 		NQRXC_ERROR_RXE)) {
   7946 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE, EXTRXC_ERROR_SE, 0))
   7947 			log(LOG_WARNING, "%s: symbol error\n",
   7948 			    device_xname(sc->sc_dev));
   7949 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ, EXTRXC_ERROR_SEQ, 0))
   7950 			log(LOG_WARNING, "%s: receive sequence error\n",
   7951 			    device_xname(sc->sc_dev));
   7952 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE, EXTRXC_ERROR_CE, 0))
   7953 			log(LOG_WARNING, "%s: CRC error\n",
   7954 			    device_xname(sc->sc_dev));
   7955 		return true;
   7956 	}
   7957 
   7958 	return false;
   7959 }
   7960 
   7961 static inline bool
   7962 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   7963 {
   7964 	struct wm_softc *sc = rxq->rxq_sc;
   7965 
   7966 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   7967 		NQRXC_STATUS_DD)) {
   7968 		/* We have processed all of the receive descriptors. */
   7969 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   7970 		return false;
   7971 	}
   7972 
   7973 	return true;
   7974 }
   7975 
   7976 static inline bool
   7977 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status, uint16_t vlantag,
   7978     struct mbuf *m)
   7979 {
   7980 	struct ifnet *ifp = &rxq->rxq_sc->sc_ethercom.ec_if;
   7981 
   7982 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   7983 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   7984 		VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), return false);
   7985 	}
   7986 
   7987 	return true;
   7988 }
   7989 
   7990 static inline void
   7991 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   7992     uint32_t errors, struct mbuf *m)
   7993 {
   7994 	struct wm_softc *sc = rxq->rxq_sc;
   7995 
   7996 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   7997 		if (wm_rxdesc_is_set_status(sc, status,
   7998 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   7999 			WM_Q_EVCNT_INCR(rxq, rxipsum);
   8000 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8001 			if (wm_rxdesc_is_set_error(sc, errors,
   8002 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8003 				m->m_pkthdr.csum_flags |=
   8004 					M_CSUM_IPv4_BAD;
   8005 		}
   8006 		if (wm_rxdesc_is_set_status(sc, status,
   8007 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8008 			/*
   8009 			 * Note: we don't know if this was TCP or UDP,
   8010 			 * so we just set both bits, and expect the
   8011 			 * upper layers to deal.
   8012 			 */
   8013 			WM_Q_EVCNT_INCR(rxq, rxtusum);
   8014 			m->m_pkthdr.csum_flags |=
   8015 				M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8016 				M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8017 			if (wm_rxdesc_is_set_error(sc, errors,
   8018 				WRX_ER_TCPE, EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8019 				m->m_pkthdr.csum_flags |=
   8020 					M_CSUM_TCP_UDP_BAD;
   8021 		}
   8022 	}
   8023 }
   8024 
   8025 /*
   8026  * wm_rxeof:
   8027  *
   8028  *	Helper; handle receive interrupts.
   8029  */
   8030 static void
   8031 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8032 {
   8033 	struct wm_softc *sc = rxq->rxq_sc;
   8034 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8035 	struct wm_rxsoft *rxs;
   8036 	struct mbuf *m;
   8037 	int i, len;
   8038 	int count = 0;
   8039 	uint32_t status, errors;
   8040 	uint16_t vlantag;
   8041 
   8042 	KASSERT(mutex_owned(rxq->rxq_lock));
   8043 
   8044 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8045 		if (limit-- == 0) {
   8046 			rxq->rxq_ptr = i;
   8047 			break;
   8048 		}
   8049 
   8050 		rxs = &rxq->rxq_soft[i];
   8051 
   8052 		DPRINTF(WM_DEBUG_RX,
   8053 		    ("%s: RX: checking descriptor %d\n",
   8054 		    device_xname(sc->sc_dev), i));
   8055 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   8056 
   8057 		status = wm_rxdesc_get_status(rxq, i);
   8058 		errors = wm_rxdesc_get_errors(rxq, i);
   8059 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8060 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8061 #ifdef WM_DEBUG
   8062 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8063 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8064 #endif
   8065 
   8066 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8067 			/*
   8068 			 * Update the receive pointer holding rxq_lock
   8069 			 * consistent with increment counter.
   8070 			 */
   8071 			rxq->rxq_ptr = i;
   8072 			break;
   8073 		}
   8074 
   8075 		count++;
   8076 		if (__predict_false(rxq->rxq_discard)) {
   8077 			DPRINTF(WM_DEBUG_RX,
   8078 			    ("%s: RX: discarding contents of descriptor %d\n",
   8079 			    device_xname(sc->sc_dev), i));
   8080 			wm_init_rxdesc(rxq, i);
   8081 			if (wm_rxdesc_is_eop(rxq, status)) {
   8082 				/* Reset our state. */
   8083 				DPRINTF(WM_DEBUG_RX,
   8084 				    ("%s: RX: resetting rxdiscard -> 0\n",
   8085 				    device_xname(sc->sc_dev)));
   8086 				rxq->rxq_discard = 0;
   8087 			}
   8088 			continue;
   8089 		}
   8090 
   8091 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8092 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   8093 
   8094 		m = rxs->rxs_mbuf;
   8095 
   8096 		/*
   8097 		 * Add a new receive buffer to the ring, unless of
   8098 		 * course the length is zero. Treat the latter as a
   8099 		 * failed mapping.
   8100 		 */
   8101 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   8102 			/*
   8103 			 * Failed, throw away what we've done so
   8104 			 * far, and discard the rest of the packet.
   8105 			 */
   8106 			ifp->if_ierrors++;
   8107 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8108 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   8109 			wm_init_rxdesc(rxq, i);
   8110 			if (!wm_rxdesc_is_eop(rxq, status))
   8111 				rxq->rxq_discard = 1;
   8112 			if (rxq->rxq_head != NULL)
   8113 				m_freem(rxq->rxq_head);
   8114 			WM_RXCHAIN_RESET(rxq);
   8115 			DPRINTF(WM_DEBUG_RX,
   8116 			    ("%s: RX: Rx buffer allocation failed, "
   8117 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8118 			    rxq->rxq_discard ? " (discard)" : ""));
   8119 			continue;
   8120 		}
   8121 
   8122 		m->m_len = len;
   8123 		rxq->rxq_len += len;
   8124 		DPRINTF(WM_DEBUG_RX,
   8125 		    ("%s: RX: buffer at %p len %d\n",
   8126 		    device_xname(sc->sc_dev), m->m_data, len));
   8127 
   8128 		/* If this is not the end of the packet, keep looking. */
   8129 		if (!wm_rxdesc_is_eop(rxq, status)) {
   8130 			WM_RXCHAIN_LINK(rxq, m);
   8131 			DPRINTF(WM_DEBUG_RX,
   8132 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   8133 			    device_xname(sc->sc_dev), rxq->rxq_len));
   8134 			continue;
   8135 		}
   8136 
   8137 		/*
   8138 		 * Okay, we have the entire packet now.  The chip is
   8139 		 * configured to include the FCS except I350 and I21[01]
   8140 		 * (not all chips can be configured to strip it),
   8141 		 * so we need to trim it.
   8142 		 * May need to adjust length of previous mbuf in the
   8143 		 * chain if the current mbuf is too short.
   8144 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   8145 		 * is always set in I350, so we don't trim it.
   8146 		 */
   8147 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   8148 		    && (sc->sc_type != WM_T_I210)
   8149 		    && (sc->sc_type != WM_T_I211)) {
   8150 			if (m->m_len < ETHER_CRC_LEN) {
   8151 				rxq->rxq_tail->m_len
   8152 				    -= (ETHER_CRC_LEN - m->m_len);
   8153 				m->m_len = 0;
   8154 			} else
   8155 				m->m_len -= ETHER_CRC_LEN;
   8156 			len = rxq->rxq_len - ETHER_CRC_LEN;
   8157 		} else
   8158 			len = rxq->rxq_len;
   8159 
   8160 		WM_RXCHAIN_LINK(rxq, m);
   8161 
   8162 		*rxq->rxq_tailp = NULL;
   8163 		m = rxq->rxq_head;
   8164 
   8165 		WM_RXCHAIN_RESET(rxq);
   8166 
   8167 		DPRINTF(WM_DEBUG_RX,
   8168 		    ("%s: RX: have entire packet, len -> %d\n",
   8169 		    device_xname(sc->sc_dev), len));
   8170 
   8171 		/* If an error occurred, update stats and drop the packet. */
   8172 		if (wm_rxdesc_has_errors(rxq, errors)) {
   8173 			m_freem(m);
   8174 			continue;
   8175 		}
   8176 
   8177 		/* No errors.  Receive the packet. */
   8178 		m_set_rcvif(m, ifp);
   8179 		m->m_pkthdr.len = len;
   8180 		/*
   8181 		 * TODO
   8182 		 * should be save rsshash and rsstype to this mbuf.
   8183 		 */
   8184 		DPRINTF(WM_DEBUG_RX,
   8185 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   8186 			device_xname(sc->sc_dev), rsstype, rsshash));
   8187 
   8188 		/*
   8189 		 * If VLANs are enabled, VLAN packets have been unwrapped
   8190 		 * for us.  Associate the tag with the packet.
   8191 		 */
   8192 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   8193 			continue;
   8194 
   8195 		/* Set up checksum info for this packet. */
   8196 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   8197 		/*
   8198 		 * Update the receive pointer holding rxq_lock consistent with
   8199 		 * increment counter.
   8200 		 */
   8201 		rxq->rxq_ptr = i;
   8202 		rxq->rxq_packets++;
   8203 		rxq->rxq_bytes += len;
   8204 		mutex_exit(rxq->rxq_lock);
   8205 
   8206 		/* Pass it on. */
   8207 		if_percpuq_enqueue(sc->sc_ipq, m);
   8208 
   8209 		mutex_enter(rxq->rxq_lock);
   8210 
   8211 		if (rxq->rxq_stopping)
   8212 			break;
   8213 	}
   8214 
   8215 	if (count != 0)
   8216 		rnd_add_uint32(&sc->rnd_source, count);
   8217 
   8218 	DPRINTF(WM_DEBUG_RX,
   8219 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8220 }
   8221 
   8222 /*
   8223  * wm_linkintr_gmii:
   8224  *
   8225  *	Helper; handle link interrupts for GMII.
   8226  */
   8227 static void
   8228 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8229 {
   8230 
   8231 	KASSERT(WM_CORE_LOCKED(sc));
   8232 
   8233 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8234 		__func__));
   8235 
   8236 	if (icr & ICR_LSC) {
   8237 		uint32_t reg;
   8238 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   8239 
   8240 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   8241 			wm_gig_downshift_workaround_ich8lan(sc);
   8242 
   8243 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   8244 			device_xname(sc->sc_dev)));
   8245 		mii_pollstat(&sc->sc_mii);
   8246 		if (sc->sc_type == WM_T_82543) {
   8247 			int miistatus, active;
   8248 
   8249 			/*
   8250 			 * With 82543, we need to force speed and
   8251 			 * duplex on the MAC equal to what the PHY
   8252 			 * speed and duplex configuration is.
   8253 			 */
   8254 			miistatus = sc->sc_mii.mii_media_status;
   8255 
   8256 			if (miistatus & IFM_ACTIVE) {
   8257 				active = sc->sc_mii.mii_media_active;
   8258 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8259 				switch (IFM_SUBTYPE(active)) {
   8260 				case IFM_10_T:
   8261 					sc->sc_ctrl |= CTRL_SPEED_10;
   8262 					break;
   8263 				case IFM_100_TX:
   8264 					sc->sc_ctrl |= CTRL_SPEED_100;
   8265 					break;
   8266 				case IFM_1000_T:
   8267 					sc->sc_ctrl |= CTRL_SPEED_1000;
   8268 					break;
   8269 				default:
   8270 					/*
   8271 					 * fiber?
   8272 					 * Shoud not enter here.
   8273 					 */
   8274 					printf("unknown media (%x)\n", active);
   8275 					break;
   8276 				}
   8277 				if (active & IFM_FDX)
   8278 					sc->sc_ctrl |= CTRL_FD;
   8279 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8280 			}
   8281 		} else if ((sc->sc_type == WM_T_ICH8)
   8282 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   8283 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   8284 		} else if (sc->sc_type == WM_T_PCH) {
   8285 			wm_k1_gig_workaround_hv(sc,
   8286 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   8287 		}
   8288 
   8289 		if ((sc->sc_phytype == WMPHY_82578)
   8290 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   8291 			== IFM_1000_T)) {
   8292 
   8293 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   8294 				delay(200*1000); /* XXX too big */
   8295 
   8296 				/* Link stall fix for link up */
   8297 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8298 				    HV_MUX_DATA_CTRL,
   8299 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   8300 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   8301 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8302 				    HV_MUX_DATA_CTRL,
   8303 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   8304 			}
   8305 		}
   8306 		/*
   8307 		 * I217 Packet Loss issue:
   8308 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
   8309 		 * on power up.
   8310 		 * Set the Beacon Duration for I217 to 8 usec
   8311 		 */
   8312 		if ((sc->sc_type == WM_T_PCH_LPT)
   8313 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8314 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   8315 			reg &= ~FEXTNVM4_BEACON_DURATION;
   8316 			reg |= FEXTNVM4_BEACON_DURATION_8US;
   8317 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   8318 		}
   8319 
   8320 		/* XXX Work-around I218 hang issue */
   8321 		/* e1000_k1_workaround_lpt_lp() */
   8322 
   8323 		if ((sc->sc_type == WM_T_PCH_LPT)
   8324 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8325 			/*
   8326 			 * Set platform power management values for Latency
   8327 			 * Tolerance Reporting (LTR)
   8328 			 */
   8329 			wm_platform_pm_pch_lpt(sc,
   8330 				((sc->sc_mii.mii_media_status & IFM_ACTIVE)
   8331 				    != 0));
   8332 		}
   8333 
   8334 		/* FEXTNVM6 K1-off workaround */
   8335 		if (sc->sc_type == WM_T_PCH_SPT) {
   8336 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
   8337 			if (CSR_READ(sc, WMREG_PCIEANACFG)
   8338 			    & FEXTNVM6_K1_OFF_ENABLE)
   8339 				reg |= FEXTNVM6_K1_OFF_ENABLE;
   8340 			else
   8341 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   8342 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   8343 		}
   8344 	} else if (icr & ICR_RXSEQ) {
   8345 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   8346 			device_xname(sc->sc_dev)));
   8347 	}
   8348 }
   8349 
   8350 /*
   8351  * wm_linkintr_tbi:
   8352  *
   8353  *	Helper; handle link interrupts for TBI mode.
   8354  */
   8355 static void
   8356 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   8357 {
   8358 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8359 	uint32_t status;
   8360 
   8361 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8362 		__func__));
   8363 
   8364 	status = CSR_READ(sc, WMREG_STATUS);
   8365 	if (icr & ICR_LSC) {
   8366 		if (status & STATUS_LU) {
   8367 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8368 			    device_xname(sc->sc_dev),
   8369 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   8370 			/*
   8371 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   8372 			 * so we should update sc->sc_ctrl
   8373 			 */
   8374 
   8375 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   8376 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8377 			sc->sc_fcrtl &= ~FCRTL_XONE;
   8378 			if (status & STATUS_FD)
   8379 				sc->sc_tctl |=
   8380 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8381 			else
   8382 				sc->sc_tctl |=
   8383 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8384 			if (sc->sc_ctrl & CTRL_TFCE)
   8385 				sc->sc_fcrtl |= FCRTL_XONE;
   8386 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8387 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   8388 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   8389 				      sc->sc_fcrtl);
   8390 			sc->sc_tbi_linkup = 1;
   8391 			if_link_state_change(ifp, LINK_STATE_UP);
   8392 		} else {
   8393 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8394 			    device_xname(sc->sc_dev)));
   8395 			sc->sc_tbi_linkup = 0;
   8396 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8397 		}
   8398 		/* Update LED */
   8399 		wm_tbi_serdes_set_linkled(sc);
   8400 	} else if (icr & ICR_RXSEQ) {
   8401 		DPRINTF(WM_DEBUG_LINK,
   8402 		    ("%s: LINK: Receive sequence error\n",
   8403 		    device_xname(sc->sc_dev)));
   8404 	}
   8405 }
   8406 
   8407 /*
   8408  * wm_linkintr_serdes:
   8409  *
   8410  *	Helper; handle link interrupts for TBI mode.
   8411  */
   8412 static void
   8413 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   8414 {
   8415 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8416 	struct mii_data *mii = &sc->sc_mii;
   8417 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8418 	uint32_t pcs_adv, pcs_lpab, reg;
   8419 
   8420 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8421 		__func__));
   8422 
   8423 	if (icr & ICR_LSC) {
   8424 		/* Check PCS */
   8425 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8426 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   8427 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   8428 				device_xname(sc->sc_dev)));
   8429 			mii->mii_media_status |= IFM_ACTIVE;
   8430 			sc->sc_tbi_linkup = 1;
   8431 			if_link_state_change(ifp, LINK_STATE_UP);
   8432 		} else {
   8433 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8434 				device_xname(sc->sc_dev)));
   8435 			mii->mii_media_status |= IFM_NONE;
   8436 			sc->sc_tbi_linkup = 0;
   8437 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8438 			wm_tbi_serdes_set_linkled(sc);
   8439 			return;
   8440 		}
   8441 		mii->mii_media_active |= IFM_1000_SX;
   8442 		if ((reg & PCS_LSTS_FDX) != 0)
   8443 			mii->mii_media_active |= IFM_FDX;
   8444 		else
   8445 			mii->mii_media_active |= IFM_HDX;
   8446 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   8447 			/* Check flow */
   8448 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8449 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   8450 				DPRINTF(WM_DEBUG_LINK,
   8451 				    ("XXX LINKOK but not ACOMP\n"));
   8452 				return;
   8453 			}
   8454 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   8455 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   8456 			DPRINTF(WM_DEBUG_LINK,
   8457 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   8458 			if ((pcs_adv & TXCW_SYM_PAUSE)
   8459 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   8460 				mii->mii_media_active |= IFM_FLOW
   8461 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   8462 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   8463 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8464 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   8465 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8466 				mii->mii_media_active |= IFM_FLOW
   8467 				    | IFM_ETH_TXPAUSE;
   8468 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   8469 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8470 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   8471 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8472 				mii->mii_media_active |= IFM_FLOW
   8473 				    | IFM_ETH_RXPAUSE;
   8474 		}
   8475 		/* Update LED */
   8476 		wm_tbi_serdes_set_linkled(sc);
   8477 	} else {
   8478 		DPRINTF(WM_DEBUG_LINK,
   8479 		    ("%s: LINK: Receive sequence error\n",
   8480 		    device_xname(sc->sc_dev)));
   8481 	}
   8482 }
   8483 
   8484 /*
   8485  * wm_linkintr:
   8486  *
   8487  *	Helper; handle link interrupts.
   8488  */
   8489 static void
   8490 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   8491 {
   8492 
   8493 	KASSERT(WM_CORE_LOCKED(sc));
   8494 
   8495 	if (sc->sc_flags & WM_F_HAS_MII)
   8496 		wm_linkintr_gmii(sc, icr);
   8497 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   8498 	    && (sc->sc_type >= WM_T_82575))
   8499 		wm_linkintr_serdes(sc, icr);
   8500 	else
   8501 		wm_linkintr_tbi(sc, icr);
   8502 }
   8503 
   8504 /*
   8505  * wm_intr_legacy:
   8506  *
   8507  *	Interrupt service routine for INTx and MSI.
   8508  */
   8509 static int
   8510 wm_intr_legacy(void *arg)
   8511 {
   8512 	struct wm_softc *sc = arg;
   8513 	struct wm_queue *wmq = &sc->sc_queue[0];
   8514 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8515 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8516 	uint32_t icr, rndval = 0;
   8517 	int handled = 0;
   8518 
   8519 	while (1 /* CONSTCOND */) {
   8520 		icr = CSR_READ(sc, WMREG_ICR);
   8521 		if ((icr & sc->sc_icr) == 0)
   8522 			break;
   8523 		if (handled == 0) {
   8524 			DPRINTF(WM_DEBUG_TX,
   8525 			    ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
   8526 		}
   8527 		if (rndval == 0)
   8528 			rndval = icr;
   8529 
   8530 		mutex_enter(rxq->rxq_lock);
   8531 
   8532 		if (rxq->rxq_stopping) {
   8533 			mutex_exit(rxq->rxq_lock);
   8534 			break;
   8535 		}
   8536 
   8537 		handled = 1;
   8538 
   8539 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8540 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   8541 			DPRINTF(WM_DEBUG_RX,
   8542 			    ("%s: RX: got Rx intr 0x%08x\n",
   8543 			    device_xname(sc->sc_dev),
   8544 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
   8545 			WM_Q_EVCNT_INCR(rxq, rxintr);
   8546 		}
   8547 #endif
   8548 		wm_rxeof(rxq, UINT_MAX);
   8549 
   8550 		mutex_exit(rxq->rxq_lock);
   8551 		mutex_enter(txq->txq_lock);
   8552 
   8553 		if (txq->txq_stopping) {
   8554 			mutex_exit(txq->txq_lock);
   8555 			break;
   8556 		}
   8557 
   8558 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8559 		if (icr & ICR_TXDW) {
   8560 			DPRINTF(WM_DEBUG_TX,
   8561 			    ("%s: TX: got TXDW interrupt\n",
   8562 			    device_xname(sc->sc_dev)));
   8563 			WM_Q_EVCNT_INCR(txq, txdw);
   8564 		}
   8565 #endif
   8566 		wm_txeof(sc, txq);
   8567 
   8568 		mutex_exit(txq->txq_lock);
   8569 		WM_CORE_LOCK(sc);
   8570 
   8571 		if (sc->sc_core_stopping) {
   8572 			WM_CORE_UNLOCK(sc);
   8573 			break;
   8574 		}
   8575 
   8576 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   8577 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8578 			wm_linkintr(sc, icr);
   8579 		}
   8580 
   8581 		WM_CORE_UNLOCK(sc);
   8582 
   8583 		if (icr & ICR_RXO) {
   8584 #if defined(WM_DEBUG)
   8585 			log(LOG_WARNING, "%s: Receive overrun\n",
   8586 			    device_xname(sc->sc_dev));
   8587 #endif /* defined(WM_DEBUG) */
   8588 		}
   8589 	}
   8590 
   8591 	rnd_add_uint32(&sc->rnd_source, rndval);
   8592 
   8593 	if (handled) {
   8594 		/* Try to get more packets going. */
   8595 		softint_schedule(wmq->wmq_si);
   8596 	}
   8597 
   8598 	return handled;
   8599 }
   8600 
   8601 static inline void
   8602 wm_txrxintr_disable(struct wm_queue *wmq)
   8603 {
   8604 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8605 
   8606 	if (sc->sc_type == WM_T_82574)
   8607 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8608 	else if (sc->sc_type == WM_T_82575)
   8609 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8610 	else
   8611 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   8612 }
   8613 
   8614 static inline void
   8615 wm_txrxintr_enable(struct wm_queue *wmq)
   8616 {
   8617 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8618 
   8619 	wm_itrs_calculate(sc, wmq);
   8620 
   8621 	if (sc->sc_type == WM_T_82574)
   8622 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8623 	else if (sc->sc_type == WM_T_82575)
   8624 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8625 	else
   8626 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   8627 }
   8628 
   8629 static int
   8630 wm_txrxintr_msix(void *arg)
   8631 {
   8632 	struct wm_queue *wmq = arg;
   8633 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8634 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8635 	struct wm_softc *sc = txq->txq_sc;
   8636 	u_int limit = sc->sc_rx_intr_process_limit;
   8637 
   8638 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   8639 
   8640 	DPRINTF(WM_DEBUG_TX,
   8641 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   8642 
   8643 	wm_txrxintr_disable(wmq);
   8644 
   8645 	mutex_enter(txq->txq_lock);
   8646 
   8647 	if (txq->txq_stopping) {
   8648 		mutex_exit(txq->txq_lock);
   8649 		return 0;
   8650 	}
   8651 
   8652 	WM_Q_EVCNT_INCR(txq, txdw);
   8653 	wm_txeof(sc, txq);
   8654 	/* wm_deferred start() is done in wm_handle_queue(). */
   8655 	mutex_exit(txq->txq_lock);
   8656 
   8657 	DPRINTF(WM_DEBUG_RX,
   8658 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   8659 	mutex_enter(rxq->rxq_lock);
   8660 
   8661 	if (rxq->rxq_stopping) {
   8662 		mutex_exit(rxq->rxq_lock);
   8663 		return 0;
   8664 	}
   8665 
   8666 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8667 	wm_rxeof(rxq, limit);
   8668 	mutex_exit(rxq->rxq_lock);
   8669 
   8670 	wm_itrs_writereg(sc, wmq);
   8671 
   8672 	softint_schedule(wmq->wmq_si);
   8673 
   8674 	return 1;
   8675 }
   8676 
   8677 static void
   8678 wm_handle_queue(void *arg)
   8679 {
   8680 	struct wm_queue *wmq = arg;
   8681 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8682 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8683 	struct wm_softc *sc = txq->txq_sc;
   8684 	u_int limit = sc->sc_rx_process_limit;
   8685 
   8686 	mutex_enter(txq->txq_lock);
   8687 	if (txq->txq_stopping) {
   8688 		mutex_exit(txq->txq_lock);
   8689 		return;
   8690 	}
   8691 	wm_txeof(sc, txq);
   8692 	wm_deferred_start_locked(txq);
   8693 	mutex_exit(txq->txq_lock);
   8694 
   8695 	mutex_enter(rxq->rxq_lock);
   8696 	if (rxq->rxq_stopping) {
   8697 		mutex_exit(rxq->rxq_lock);
   8698 		return;
   8699 	}
   8700 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8701 	wm_rxeof(rxq, limit);
   8702 	mutex_exit(rxq->rxq_lock);
   8703 
   8704 	wm_txrxintr_enable(wmq);
   8705 }
   8706 
   8707 /*
   8708  * wm_linkintr_msix:
   8709  *
   8710  *	Interrupt service routine for link status change for MSI-X.
   8711  */
   8712 static int
   8713 wm_linkintr_msix(void *arg)
   8714 {
   8715 	struct wm_softc *sc = arg;
   8716 	uint32_t reg;
   8717 
   8718 	DPRINTF(WM_DEBUG_LINK,
   8719 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   8720 
   8721 	reg = CSR_READ(sc, WMREG_ICR);
   8722 	WM_CORE_LOCK(sc);
   8723 	if ((sc->sc_core_stopping) || ((reg & ICR_LSC) == 0))
   8724 		goto out;
   8725 
   8726 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8727 	wm_linkintr(sc, ICR_LSC);
   8728 
   8729 out:
   8730 	WM_CORE_UNLOCK(sc);
   8731 
   8732 	if (sc->sc_type == WM_T_82574)
   8733 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   8734 	else if (sc->sc_type == WM_T_82575)
   8735 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   8736 	else
   8737 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   8738 
   8739 	return 1;
   8740 }
   8741 
   8742 /*
   8743  * Media related.
   8744  * GMII, SGMII, TBI (and SERDES)
   8745  */
   8746 
   8747 /* Common */
   8748 
   8749 /*
   8750  * wm_tbi_serdes_set_linkled:
   8751  *
   8752  *	Update the link LED on TBI and SERDES devices.
   8753  */
   8754 static void
   8755 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   8756 {
   8757 
   8758 	if (sc->sc_tbi_linkup)
   8759 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   8760 	else
   8761 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   8762 
   8763 	/* 82540 or newer devices are active low */
   8764 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   8765 
   8766 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8767 }
   8768 
   8769 /* GMII related */
   8770 
   8771 /*
   8772  * wm_gmii_reset:
   8773  *
   8774  *	Reset the PHY.
   8775  */
   8776 static void
   8777 wm_gmii_reset(struct wm_softc *sc)
   8778 {
   8779 	uint32_t reg;
   8780 	int rv;
   8781 
   8782 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   8783 		device_xname(sc->sc_dev), __func__));
   8784 
   8785 	rv = sc->phy.acquire(sc);
   8786 	if (rv != 0) {
   8787 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8788 		    __func__);
   8789 		return;
   8790 	}
   8791 
   8792 	switch (sc->sc_type) {
   8793 	case WM_T_82542_2_0:
   8794 	case WM_T_82542_2_1:
   8795 		/* null */
   8796 		break;
   8797 	case WM_T_82543:
   8798 		/*
   8799 		 * With 82543, we need to force speed and duplex on the MAC
   8800 		 * equal to what the PHY speed and duplex configuration is.
   8801 		 * In addition, we need to perform a hardware reset on the PHY
   8802 		 * to take it out of reset.
   8803 		 */
   8804 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   8805 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8806 
   8807 		/* The PHY reset pin is active-low. */
   8808 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   8809 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   8810 		    CTRL_EXT_SWDPIN(4));
   8811 		reg |= CTRL_EXT_SWDPIO(4);
   8812 
   8813 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   8814 		CSR_WRITE_FLUSH(sc);
   8815 		delay(10*1000);
   8816 
   8817 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   8818 		CSR_WRITE_FLUSH(sc);
   8819 		delay(150);
   8820 #if 0
   8821 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   8822 #endif
   8823 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   8824 		break;
   8825 	case WM_T_82544:	/* reset 10000us */
   8826 	case WM_T_82540:
   8827 	case WM_T_82545:
   8828 	case WM_T_82545_3:
   8829 	case WM_T_82546:
   8830 	case WM_T_82546_3:
   8831 	case WM_T_82541:
   8832 	case WM_T_82541_2:
   8833 	case WM_T_82547:
   8834 	case WM_T_82547_2:
   8835 	case WM_T_82571:	/* reset 100us */
   8836 	case WM_T_82572:
   8837 	case WM_T_82573:
   8838 	case WM_T_82574:
   8839 	case WM_T_82575:
   8840 	case WM_T_82576:
   8841 	case WM_T_82580:
   8842 	case WM_T_I350:
   8843 	case WM_T_I354:
   8844 	case WM_T_I210:
   8845 	case WM_T_I211:
   8846 	case WM_T_82583:
   8847 	case WM_T_80003:
   8848 		/* generic reset */
   8849 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8850 		CSR_WRITE_FLUSH(sc);
   8851 		delay(20000);
   8852 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8853 		CSR_WRITE_FLUSH(sc);
   8854 		delay(20000);
   8855 
   8856 		if ((sc->sc_type == WM_T_82541)
   8857 		    || (sc->sc_type == WM_T_82541_2)
   8858 		    || (sc->sc_type == WM_T_82547)
   8859 		    || (sc->sc_type == WM_T_82547_2)) {
   8860 			/* workaround for igp are done in igp_reset() */
   8861 			/* XXX add code to set LED after phy reset */
   8862 		}
   8863 		break;
   8864 	case WM_T_ICH8:
   8865 	case WM_T_ICH9:
   8866 	case WM_T_ICH10:
   8867 	case WM_T_PCH:
   8868 	case WM_T_PCH2:
   8869 	case WM_T_PCH_LPT:
   8870 	case WM_T_PCH_SPT:
   8871 		/* generic reset */
   8872 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8873 		CSR_WRITE_FLUSH(sc);
   8874 		delay(100);
   8875 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8876 		CSR_WRITE_FLUSH(sc);
   8877 		delay(150);
   8878 		break;
   8879 	default:
   8880 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   8881 		    __func__);
   8882 		break;
   8883 	}
   8884 
   8885 	sc->phy.release(sc);
   8886 
   8887 	/* get_cfg_done */
   8888 	wm_get_cfg_done(sc);
   8889 
   8890 	/* extra setup */
   8891 	switch (sc->sc_type) {
   8892 	case WM_T_82542_2_0:
   8893 	case WM_T_82542_2_1:
   8894 	case WM_T_82543:
   8895 	case WM_T_82544:
   8896 	case WM_T_82540:
   8897 	case WM_T_82545:
   8898 	case WM_T_82545_3:
   8899 	case WM_T_82546:
   8900 	case WM_T_82546_3:
   8901 	case WM_T_82541_2:
   8902 	case WM_T_82547_2:
   8903 	case WM_T_82571:
   8904 	case WM_T_82572:
   8905 	case WM_T_82573:
   8906 	case WM_T_82575:
   8907 	case WM_T_82576:
   8908 	case WM_T_82580:
   8909 	case WM_T_I350:
   8910 	case WM_T_I354:
   8911 	case WM_T_I210:
   8912 	case WM_T_I211:
   8913 	case WM_T_80003:
   8914 		/* null */
   8915 		break;
   8916 	case WM_T_82574:
   8917 	case WM_T_82583:
   8918 		wm_lplu_d0_disable(sc);
   8919 		break;
   8920 	case WM_T_82541:
   8921 	case WM_T_82547:
   8922 		/* XXX Configure actively LED after PHY reset */
   8923 		break;
   8924 	case WM_T_ICH8:
   8925 	case WM_T_ICH9:
   8926 	case WM_T_ICH10:
   8927 	case WM_T_PCH:
   8928 	case WM_T_PCH2:
   8929 	case WM_T_PCH_LPT:
   8930 	case WM_T_PCH_SPT:
   8931 		/* Allow time for h/w to get to a quiescent state afer reset */
   8932 		delay(10*1000);
   8933 
   8934 		if (sc->sc_type == WM_T_PCH)
   8935 			wm_hv_phy_workaround_ich8lan(sc);
   8936 
   8937 		if (sc->sc_type == WM_T_PCH2)
   8938 			wm_lv_phy_workaround_ich8lan(sc);
   8939 
   8940 		/* Clear the host wakeup bit after lcd reset */
   8941 		if (sc->sc_type >= WM_T_PCH) {
   8942 			reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   8943 			    BM_PORT_GEN_CFG);
   8944 			reg &= ~BM_WUC_HOST_WU_BIT;
   8945 			wm_gmii_hv_writereg(sc->sc_dev, 2,
   8946 			    BM_PORT_GEN_CFG, reg);
   8947 		}
   8948 
   8949 		/*
   8950 		 * XXX Configure the LCD with th extended configuration region
   8951 		 * in NVM
   8952 		 */
   8953 
   8954 		/* Disable D0 LPLU. */
   8955 		if (sc->sc_type >= WM_T_PCH)	/* PCH* */
   8956 			wm_lplu_d0_disable_pch(sc);
   8957 		else
   8958 			wm_lplu_d0_disable(sc);	/* ICH* */
   8959 		break;
   8960 	default:
   8961 		panic("%s: unknown type\n", __func__);
   8962 		break;
   8963 	}
   8964 }
   8965 
   8966 /*
   8967  * Setup sc_phytype and mii_{read|write}reg.
   8968  *
   8969  *  To identify PHY type, correct read/write function should be selected.
   8970  * To select correct read/write function, PCI ID or MAC type are required
   8971  * without accessing PHY registers.
   8972  *
   8973  *  On the first call of this function, PHY ID is not known yet. Check
   8974  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   8975  * result might be incorrect.
   8976  *
   8977  *  In the second call, PHY OUI and model is used to identify PHY type.
   8978  * It might not be perfpect because of the lack of compared entry, but it
   8979  * would be better than the first call.
   8980  *
   8981  *  If the detected new result and previous assumption is different,
   8982  * diagnous message will be printed.
   8983  */
   8984 static void
   8985 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   8986     uint16_t phy_model)
   8987 {
   8988 	device_t dev = sc->sc_dev;
   8989 	struct mii_data *mii = &sc->sc_mii;
   8990 	uint16_t new_phytype = WMPHY_UNKNOWN;
   8991 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   8992 	mii_readreg_t new_readreg;
   8993 	mii_writereg_t new_writereg;
   8994 
   8995 	if (mii->mii_readreg == NULL) {
   8996 		/*
   8997 		 *  This is the first call of this function. For ICH and PCH
   8998 		 * variants, it's difficult to determine the PHY access method
   8999 		 * by sc_type, so use the PCI product ID for some devices.
   9000 		 */
   9001 
   9002 		switch (sc->sc_pcidevid) {
   9003 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   9004 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   9005 			/* 82577 */
   9006 			new_phytype = WMPHY_82577;
   9007 			break;
   9008 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   9009 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   9010 			/* 82578 */
   9011 			new_phytype = WMPHY_82578;
   9012 			break;
   9013 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   9014 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   9015 			/* 82579 */
   9016 			new_phytype = WMPHY_82579;
   9017 			break;
   9018 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   9019 		case PCI_PRODUCT_INTEL_82801I_BM:
   9020 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   9021 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   9022 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   9023 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   9024 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   9025 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   9026 			/* ICH8, 9, 10 with 82567 */
   9027 			new_phytype = WMPHY_BM;
   9028 			break;
   9029 		default:
   9030 			break;
   9031 		}
   9032 	} else {
   9033 		/* It's not the first call. Use PHY OUI and model */
   9034 		switch (phy_oui) {
   9035 		case MII_OUI_ATHEROS: /* XXX ??? */
   9036 			switch (phy_model) {
   9037 			case 0x0004: /* XXX */
   9038 				new_phytype = WMPHY_82578;
   9039 				break;
   9040 			default:
   9041 				break;
   9042 			}
   9043 			break;
   9044 		case MII_OUI_xxMARVELL:
   9045 			switch (phy_model) {
   9046 			case MII_MODEL_xxMARVELL_I210:
   9047 				new_phytype = WMPHY_I210;
   9048 				break;
   9049 			case MII_MODEL_xxMARVELL_E1011:
   9050 			case MII_MODEL_xxMARVELL_E1000_3:
   9051 			case MII_MODEL_xxMARVELL_E1000_5:
   9052 			case MII_MODEL_xxMARVELL_E1112:
   9053 				new_phytype = WMPHY_M88;
   9054 				break;
   9055 			case MII_MODEL_xxMARVELL_E1149:
   9056 				new_phytype = WMPHY_BM;
   9057 				break;
   9058 			case MII_MODEL_xxMARVELL_E1111:
   9059 			case MII_MODEL_xxMARVELL_I347:
   9060 			case MII_MODEL_xxMARVELL_E1512:
   9061 			case MII_MODEL_xxMARVELL_E1340M:
   9062 			case MII_MODEL_xxMARVELL_E1543:
   9063 				new_phytype = WMPHY_M88;
   9064 				break;
   9065 			case MII_MODEL_xxMARVELL_I82563:
   9066 				new_phytype = WMPHY_GG82563;
   9067 				break;
   9068 			default:
   9069 				break;
   9070 			}
   9071 			break;
   9072 		case MII_OUI_INTEL:
   9073 			switch (phy_model) {
   9074 			case MII_MODEL_INTEL_I82577:
   9075 				new_phytype = WMPHY_82577;
   9076 				break;
   9077 			case MII_MODEL_INTEL_I82579:
   9078 				new_phytype = WMPHY_82579;
   9079 				break;
   9080 			case MII_MODEL_INTEL_I217:
   9081 				new_phytype = WMPHY_I217;
   9082 				break;
   9083 			case MII_MODEL_INTEL_I82580:
   9084 			case MII_MODEL_INTEL_I350:
   9085 				new_phytype = WMPHY_82580;
   9086 				break;
   9087 			default:
   9088 				break;
   9089 			}
   9090 			break;
   9091 		case MII_OUI_yyINTEL:
   9092 			switch (phy_model) {
   9093 			case MII_MODEL_yyINTEL_I82562G:
   9094 			case MII_MODEL_yyINTEL_I82562EM:
   9095 			case MII_MODEL_yyINTEL_I82562ET:
   9096 				new_phytype = WMPHY_IFE;
   9097 				break;
   9098 			case MII_MODEL_yyINTEL_IGP01E1000:
   9099 				new_phytype = WMPHY_IGP;
   9100 				break;
   9101 			case MII_MODEL_yyINTEL_I82566:
   9102 				new_phytype = WMPHY_IGP_3;
   9103 				break;
   9104 			default:
   9105 				break;
   9106 			}
   9107 			break;
   9108 		default:
   9109 			break;
   9110 		}
   9111 		if (new_phytype == WMPHY_UNKNOWN)
   9112 			aprint_verbose_dev(dev, "%s: unknown PHY model\n",
   9113 			    __func__);
   9114 
   9115 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9116 		    && (sc->sc_phytype != new_phytype )) {
   9117 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9118 			    "was incorrect. PHY type from PHY ID = %u\n",
   9119 			    sc->sc_phytype, new_phytype);
   9120 		}
   9121 	}
   9122 
   9123 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   9124 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   9125 		/* SGMII */
   9126 		new_readreg = wm_sgmii_readreg;
   9127 		new_writereg = wm_sgmii_writereg;
   9128 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   9129 		/* BM2 (phyaddr == 1) */
   9130 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9131 		    && (new_phytype != WMPHY_BM)
   9132 		    && (new_phytype != WMPHY_UNKNOWN))
   9133 			doubt_phytype = new_phytype;
   9134 		new_phytype = WMPHY_BM;
   9135 		new_readreg = wm_gmii_bm_readreg;
   9136 		new_writereg = wm_gmii_bm_writereg;
   9137 	} else if (sc->sc_type >= WM_T_PCH) {
   9138 		/* All PCH* use _hv_ */
   9139 		new_readreg = wm_gmii_hv_readreg;
   9140 		new_writereg = wm_gmii_hv_writereg;
   9141 	} else if (sc->sc_type >= WM_T_ICH8) {
   9142 		/* non-82567 ICH8, 9 and 10 */
   9143 		new_readreg = wm_gmii_i82544_readreg;
   9144 		new_writereg = wm_gmii_i82544_writereg;
   9145 	} else if (sc->sc_type >= WM_T_80003) {
   9146 		/* 80003 */
   9147 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9148 		    && (new_phytype != WMPHY_GG82563)
   9149 		    && (new_phytype != WMPHY_UNKNOWN))
   9150 			doubt_phytype = new_phytype;
   9151 		new_phytype = WMPHY_GG82563;
   9152 		new_readreg = wm_gmii_i80003_readreg;
   9153 		new_writereg = wm_gmii_i80003_writereg;
   9154 	} else if (sc->sc_type >= WM_T_I210) {
   9155 		/* I210 and I211 */
   9156 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9157 		    && (new_phytype != WMPHY_I210)
   9158 		    && (new_phytype != WMPHY_UNKNOWN))
   9159 			doubt_phytype = new_phytype;
   9160 		new_phytype = WMPHY_I210;
   9161 		new_readreg = wm_gmii_gs40g_readreg;
   9162 		new_writereg = wm_gmii_gs40g_writereg;
   9163 	} else if (sc->sc_type >= WM_T_82580) {
   9164 		/* 82580, I350 and I354 */
   9165 		new_readreg = wm_gmii_82580_readreg;
   9166 		new_writereg = wm_gmii_82580_writereg;
   9167 	} else if (sc->sc_type >= WM_T_82544) {
   9168 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   9169 		new_readreg = wm_gmii_i82544_readreg;
   9170 		new_writereg = wm_gmii_i82544_writereg;
   9171 	} else {
   9172 		new_readreg = wm_gmii_i82543_readreg;
   9173 		new_writereg = wm_gmii_i82543_writereg;
   9174 	}
   9175 
   9176 	if (new_phytype == WMPHY_BM) {
   9177 		/* All BM use _bm_ */
   9178 		new_readreg = wm_gmii_bm_readreg;
   9179 		new_writereg = wm_gmii_bm_writereg;
   9180 	}
   9181 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
   9182 		/* All PCH* use _hv_ */
   9183 		new_readreg = wm_gmii_hv_readreg;
   9184 		new_writereg = wm_gmii_hv_writereg;
   9185 	}
   9186 
   9187 	/* Diag output */
   9188 	if (doubt_phytype != WMPHY_UNKNOWN)
   9189 		aprint_error_dev(dev, "Assumed new PHY type was "
   9190 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   9191 		    new_phytype);
   9192 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9193 	    && (sc->sc_phytype != new_phytype ))
   9194 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9195 		    "was incorrect. New PHY type = %u\n",
   9196 		    sc->sc_phytype, new_phytype);
   9197 
   9198 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   9199 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   9200 
   9201 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   9202 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   9203 		    "function was incorrect.\n");
   9204 
   9205 	/* Update now */
   9206 	sc->sc_phytype = new_phytype;
   9207 	mii->mii_readreg = new_readreg;
   9208 	mii->mii_writereg = new_writereg;
   9209 }
   9210 
   9211 /*
   9212  * wm_get_phy_id_82575:
   9213  *
   9214  * Return PHY ID. Return -1 if it failed.
   9215  */
   9216 static int
   9217 wm_get_phy_id_82575(struct wm_softc *sc)
   9218 {
   9219 	uint32_t reg;
   9220 	int phyid = -1;
   9221 
   9222 	/* XXX */
   9223 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   9224 		return -1;
   9225 
   9226 	if (wm_sgmii_uses_mdio(sc)) {
   9227 		switch (sc->sc_type) {
   9228 		case WM_T_82575:
   9229 		case WM_T_82576:
   9230 			reg = CSR_READ(sc, WMREG_MDIC);
   9231 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   9232 			break;
   9233 		case WM_T_82580:
   9234 		case WM_T_I350:
   9235 		case WM_T_I354:
   9236 		case WM_T_I210:
   9237 		case WM_T_I211:
   9238 			reg = CSR_READ(sc, WMREG_MDICNFG);
   9239 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   9240 			break;
   9241 		default:
   9242 			return -1;
   9243 		}
   9244 	}
   9245 
   9246 	return phyid;
   9247 }
   9248 
   9249 
   9250 /*
   9251  * wm_gmii_mediainit:
   9252  *
   9253  *	Initialize media for use on 1000BASE-T devices.
   9254  */
   9255 static void
   9256 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   9257 {
   9258 	device_t dev = sc->sc_dev;
   9259 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9260 	struct mii_data *mii = &sc->sc_mii;
   9261 	uint32_t reg;
   9262 
   9263 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9264 		device_xname(sc->sc_dev), __func__));
   9265 
   9266 	/* We have GMII. */
   9267 	sc->sc_flags |= WM_F_HAS_MII;
   9268 
   9269 	if (sc->sc_type == WM_T_80003)
   9270 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9271 	else
   9272 		sc->sc_tipg = TIPG_1000T_DFLT;
   9273 
   9274 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   9275 	if ((sc->sc_type == WM_T_82580)
   9276 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   9277 	    || (sc->sc_type == WM_T_I211)) {
   9278 		reg = CSR_READ(sc, WMREG_PHPM);
   9279 		reg &= ~PHPM_GO_LINK_D;
   9280 		CSR_WRITE(sc, WMREG_PHPM, reg);
   9281 	}
   9282 
   9283 	/*
   9284 	 * Let the chip set speed/duplex on its own based on
   9285 	 * signals from the PHY.
   9286 	 * XXXbouyer - I'm not sure this is right for the 80003,
   9287 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   9288 	 */
   9289 	sc->sc_ctrl |= CTRL_SLU;
   9290 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9291 
   9292 	/* Initialize our media structures and probe the GMII. */
   9293 	mii->mii_ifp = ifp;
   9294 
   9295 	/*
   9296 	 * The first call of wm_mii_setup_phytype. The result might be
   9297 	 * incorrect.
   9298 	 */
   9299 	wm_gmii_setup_phytype(sc, 0, 0);
   9300 
   9301 	mii->mii_statchg = wm_gmii_statchg;
   9302 
   9303 	/* get PHY control from SMBus to PCIe */
   9304 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   9305 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
   9306 		wm_smbustopci(sc);
   9307 
   9308 	wm_gmii_reset(sc);
   9309 
   9310 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9311 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   9312 	    wm_gmii_mediastatus);
   9313 
   9314 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   9315 	    || (sc->sc_type == WM_T_82580)
   9316 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   9317 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   9318 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   9319 			/* Attach only one port */
   9320 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   9321 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9322 		} else {
   9323 			int i, id;
   9324 			uint32_t ctrl_ext;
   9325 
   9326 			id = wm_get_phy_id_82575(sc);
   9327 			if (id != -1) {
   9328 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   9329 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   9330 			}
   9331 			if ((id == -1)
   9332 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9333 				/* Power on sgmii phy if it is disabled */
   9334 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9335 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   9336 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   9337 				CSR_WRITE_FLUSH(sc);
   9338 				delay(300*1000); /* XXX too long */
   9339 
   9340 				/* from 1 to 8 */
   9341 				for (i = 1; i < 8; i++)
   9342 					mii_attach(sc->sc_dev, &sc->sc_mii,
   9343 					    0xffffffff, i, MII_OFFSET_ANY,
   9344 					    MIIF_DOPAUSE);
   9345 
   9346 				/* restore previous sfp cage power state */
   9347 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9348 			}
   9349 		}
   9350 	} else {
   9351 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9352 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9353 	}
   9354 
   9355 	/*
   9356 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   9357 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   9358 	 */
   9359 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   9360 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9361 		wm_set_mdio_slow_mode_hv(sc);
   9362 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9363 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9364 	}
   9365 
   9366 	/*
   9367 	 * (For ICH8 variants)
   9368 	 * If PHY detection failed, use BM's r/w function and retry.
   9369 	 */
   9370 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9371 		/* if failed, retry with *_bm_* */
   9372 		aprint_verbose_dev(dev, "Assumed PHY access function "
   9373 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   9374 		    sc->sc_phytype);
   9375 		sc->sc_phytype = WMPHY_BM;
   9376 		mii->mii_readreg = wm_gmii_bm_readreg;
   9377 		mii->mii_writereg = wm_gmii_bm_writereg;
   9378 
   9379 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9380 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9381 	}
   9382 
   9383 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9384 		/* Any PHY wasn't find */
   9385 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   9386 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   9387 		sc->sc_phytype = WMPHY_NONE;
   9388 	} else {
   9389 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   9390 
   9391 		/*
   9392 		 * PHY Found! Check PHY type again by the second call of
   9393 		 * wm_mii_setup_phytype.
   9394 		 */
   9395 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   9396 		    child->mii_mpd_model);
   9397 
   9398 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   9399 	}
   9400 }
   9401 
   9402 /*
   9403  * wm_gmii_mediachange:	[ifmedia interface function]
   9404  *
   9405  *	Set hardware to newly-selected media on a 1000BASE-T device.
   9406  */
   9407 static int
   9408 wm_gmii_mediachange(struct ifnet *ifp)
   9409 {
   9410 	struct wm_softc *sc = ifp->if_softc;
   9411 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9412 	int rc;
   9413 
   9414 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9415 		device_xname(sc->sc_dev), __func__));
   9416 	if ((ifp->if_flags & IFF_UP) == 0)
   9417 		return 0;
   9418 
   9419 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9420 	sc->sc_ctrl |= CTRL_SLU;
   9421 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9422 	    || (sc->sc_type > WM_T_82543)) {
   9423 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   9424 	} else {
   9425 		sc->sc_ctrl &= ~CTRL_ASDE;
   9426 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9427 		if (ife->ifm_media & IFM_FDX)
   9428 			sc->sc_ctrl |= CTRL_FD;
   9429 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   9430 		case IFM_10_T:
   9431 			sc->sc_ctrl |= CTRL_SPEED_10;
   9432 			break;
   9433 		case IFM_100_TX:
   9434 			sc->sc_ctrl |= CTRL_SPEED_100;
   9435 			break;
   9436 		case IFM_1000_T:
   9437 			sc->sc_ctrl |= CTRL_SPEED_1000;
   9438 			break;
   9439 		default:
   9440 			panic("wm_gmii_mediachange: bad media 0x%x",
   9441 			    ife->ifm_media);
   9442 		}
   9443 	}
   9444 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9445 	if (sc->sc_type <= WM_T_82543)
   9446 		wm_gmii_reset(sc);
   9447 
   9448 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   9449 		return 0;
   9450 	return rc;
   9451 }
   9452 
   9453 /*
   9454  * wm_gmii_mediastatus:	[ifmedia interface function]
   9455  *
   9456  *	Get the current interface media status on a 1000BASE-T device.
   9457  */
   9458 static void
   9459 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9460 {
   9461 	struct wm_softc *sc = ifp->if_softc;
   9462 
   9463 	ether_mediastatus(ifp, ifmr);
   9464 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9465 	    | sc->sc_flowflags;
   9466 }
   9467 
   9468 #define	MDI_IO		CTRL_SWDPIN(2)
   9469 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   9470 #define	MDI_CLK		CTRL_SWDPIN(3)
   9471 
   9472 static void
   9473 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   9474 {
   9475 	uint32_t i, v;
   9476 
   9477 	v = CSR_READ(sc, WMREG_CTRL);
   9478 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9479 	v |= MDI_DIR | CTRL_SWDPIO(3);
   9480 
   9481 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   9482 		if (data & i)
   9483 			v |= MDI_IO;
   9484 		else
   9485 			v &= ~MDI_IO;
   9486 		CSR_WRITE(sc, WMREG_CTRL, v);
   9487 		CSR_WRITE_FLUSH(sc);
   9488 		delay(10);
   9489 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9490 		CSR_WRITE_FLUSH(sc);
   9491 		delay(10);
   9492 		CSR_WRITE(sc, WMREG_CTRL, v);
   9493 		CSR_WRITE_FLUSH(sc);
   9494 		delay(10);
   9495 	}
   9496 }
   9497 
   9498 static uint32_t
   9499 wm_i82543_mii_recvbits(struct wm_softc *sc)
   9500 {
   9501 	uint32_t v, i, data = 0;
   9502 
   9503 	v = CSR_READ(sc, WMREG_CTRL);
   9504 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9505 	v |= CTRL_SWDPIO(3);
   9506 
   9507 	CSR_WRITE(sc, WMREG_CTRL, v);
   9508 	CSR_WRITE_FLUSH(sc);
   9509 	delay(10);
   9510 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9511 	CSR_WRITE_FLUSH(sc);
   9512 	delay(10);
   9513 	CSR_WRITE(sc, WMREG_CTRL, v);
   9514 	CSR_WRITE_FLUSH(sc);
   9515 	delay(10);
   9516 
   9517 	for (i = 0; i < 16; i++) {
   9518 		data <<= 1;
   9519 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9520 		CSR_WRITE_FLUSH(sc);
   9521 		delay(10);
   9522 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   9523 			data |= 1;
   9524 		CSR_WRITE(sc, WMREG_CTRL, v);
   9525 		CSR_WRITE_FLUSH(sc);
   9526 		delay(10);
   9527 	}
   9528 
   9529 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9530 	CSR_WRITE_FLUSH(sc);
   9531 	delay(10);
   9532 	CSR_WRITE(sc, WMREG_CTRL, v);
   9533 	CSR_WRITE_FLUSH(sc);
   9534 	delay(10);
   9535 
   9536 	return data;
   9537 }
   9538 
   9539 #undef MDI_IO
   9540 #undef MDI_DIR
   9541 #undef MDI_CLK
   9542 
   9543 /*
   9544  * wm_gmii_i82543_readreg:	[mii interface function]
   9545  *
   9546  *	Read a PHY register on the GMII (i82543 version).
   9547  */
   9548 static int
   9549 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
   9550 {
   9551 	struct wm_softc *sc = device_private(self);
   9552 	int rv;
   9553 
   9554 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9555 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   9556 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   9557 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   9558 
   9559 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   9560 	    device_xname(sc->sc_dev), phy, reg, rv));
   9561 
   9562 	return rv;
   9563 }
   9564 
   9565 /*
   9566  * wm_gmii_i82543_writereg:	[mii interface function]
   9567  *
   9568  *	Write a PHY register on the GMII (i82543 version).
   9569  */
   9570 static void
   9571 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
   9572 {
   9573 	struct wm_softc *sc = device_private(self);
   9574 
   9575 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9576 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   9577 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   9578 	    (MII_COMMAND_START << 30), 32);
   9579 }
   9580 
   9581 /*
   9582  * wm_gmii_mdic_readreg:	[mii interface function]
   9583  *
   9584  *	Read a PHY register on the GMII.
   9585  */
   9586 static int
   9587 wm_gmii_mdic_readreg(device_t self, int phy, int reg)
   9588 {
   9589 	struct wm_softc *sc = device_private(self);
   9590 	uint32_t mdic = 0;
   9591 	int i, rv;
   9592 
   9593 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   9594 	    MDIC_REGADD(reg));
   9595 
   9596 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9597 		mdic = CSR_READ(sc, WMREG_MDIC);
   9598 		if (mdic & MDIC_READY)
   9599 			break;
   9600 		delay(50);
   9601 	}
   9602 
   9603 	if ((mdic & MDIC_READY) == 0) {
   9604 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   9605 		    device_xname(sc->sc_dev), phy, reg);
   9606 		rv = 0;
   9607 	} else if (mdic & MDIC_E) {
   9608 #if 0 /* This is normal if no PHY is present. */
   9609 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   9610 		    device_xname(sc->sc_dev), phy, reg);
   9611 #endif
   9612 		rv = 0;
   9613 	} else {
   9614 		rv = MDIC_DATA(mdic);
   9615 		if (rv == 0xffff)
   9616 			rv = 0;
   9617 	}
   9618 
   9619 	return rv;
   9620 }
   9621 
   9622 /*
   9623  * wm_gmii_mdic_writereg:	[mii interface function]
   9624  *
   9625  *	Write a PHY register on the GMII.
   9626  */
   9627 static void
   9628 wm_gmii_mdic_writereg(device_t self, int phy, int reg, int val)
   9629 {
   9630 	struct wm_softc *sc = device_private(self);
   9631 	uint32_t mdic = 0;
   9632 	int i;
   9633 
   9634 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   9635 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   9636 
   9637 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9638 		mdic = CSR_READ(sc, WMREG_MDIC);
   9639 		if (mdic & MDIC_READY)
   9640 			break;
   9641 		delay(50);
   9642 	}
   9643 
   9644 	if ((mdic & MDIC_READY) == 0)
   9645 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   9646 		    device_xname(sc->sc_dev), phy, reg);
   9647 	else if (mdic & MDIC_E)
   9648 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   9649 		    device_xname(sc->sc_dev), phy, reg);
   9650 }
   9651 
   9652 /*
   9653  * wm_gmii_i82544_readreg:	[mii interface function]
   9654  *
   9655  *	Read a PHY register on the GMII.
   9656  */
   9657 static int
   9658 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
   9659 {
   9660 	struct wm_softc *sc = device_private(self);
   9661 	int rv;
   9662 
   9663 	if (sc->phy.acquire(sc)) {
   9664 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9665 		    __func__);
   9666 		return 0;
   9667 	}
   9668 	rv = wm_gmii_mdic_readreg(self, phy, reg);
   9669 	sc->phy.release(sc);
   9670 
   9671 	return rv;
   9672 }
   9673 
   9674 /*
   9675  * wm_gmii_i82544_writereg:	[mii interface function]
   9676  *
   9677  *	Write a PHY register on the GMII.
   9678  */
   9679 static void
   9680 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
   9681 {
   9682 	struct wm_softc *sc = device_private(self);
   9683 
   9684 	if (sc->phy.acquire(sc)) {
   9685 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9686 		    __func__);
   9687 	}
   9688 	wm_gmii_mdic_writereg(self, phy, reg, val);
   9689 	sc->phy.release(sc);
   9690 }
   9691 
   9692 /*
   9693  * wm_gmii_i80003_readreg:	[mii interface function]
   9694  *
   9695  *	Read a PHY register on the kumeran
   9696  * This could be handled by the PHY layer if we didn't have to lock the
   9697  * ressource ...
   9698  */
   9699 static int
   9700 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
   9701 {
   9702 	struct wm_softc *sc = device_private(self);
   9703 	int rv;
   9704 
   9705 	if (phy != 1) /* only one PHY on kumeran bus */
   9706 		return 0;
   9707 
   9708 	if (sc->phy.acquire(sc)) {
   9709 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9710 		    __func__);
   9711 		return 0;
   9712 	}
   9713 
   9714 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   9715 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   9716 		    reg >> GG82563_PAGE_SHIFT);
   9717 	} else {
   9718 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   9719 		    reg >> GG82563_PAGE_SHIFT);
   9720 	}
   9721 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   9722 	delay(200);
   9723 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
   9724 	delay(200);
   9725 	sc->phy.release(sc);
   9726 
   9727 	return rv;
   9728 }
   9729 
   9730 /*
   9731  * wm_gmii_i80003_writereg:	[mii interface function]
   9732  *
   9733  *	Write a PHY register on the kumeran.
   9734  * This could be handled by the PHY layer if we didn't have to lock the
   9735  * ressource ...
   9736  */
   9737 static void
   9738 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
   9739 {
   9740 	struct wm_softc *sc = device_private(self);
   9741 
   9742 	if (phy != 1) /* only one PHY on kumeran bus */
   9743 		return;
   9744 
   9745 	if (sc->phy.acquire(sc)) {
   9746 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9747 		    __func__);
   9748 		return;
   9749 	}
   9750 
   9751 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   9752 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   9753 		    reg >> GG82563_PAGE_SHIFT);
   9754 	} else {
   9755 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   9756 		    reg >> GG82563_PAGE_SHIFT);
   9757 	}
   9758 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   9759 	delay(200);
   9760 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
   9761 	delay(200);
   9762 
   9763 	sc->phy.release(sc);
   9764 }
   9765 
   9766 /*
   9767  * wm_gmii_bm_readreg:	[mii interface function]
   9768  *
   9769  *	Read a PHY register on the kumeran
   9770  * This could be handled by the PHY layer if we didn't have to lock the
   9771  * ressource ...
   9772  */
   9773 static int
   9774 wm_gmii_bm_readreg(device_t self, int phy, int reg)
   9775 {
   9776 	struct wm_softc *sc = device_private(self);
   9777 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   9778 	uint16_t val;
   9779 	int rv;
   9780 
   9781 	if (sc->phy.acquire(sc)) {
   9782 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9783 		    __func__);
   9784 		return 0;
   9785 	}
   9786 
   9787 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   9788 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   9789 		    || (reg == 31)) ? 1 : phy;
   9790 	/* Page 800 works differently than the rest so it has its own func */
   9791 	if (page == BM_WUC_PAGE) {
   9792 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   9793 		rv = val;
   9794 		goto release;
   9795 	}
   9796 
   9797 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9798 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   9799 		    && (sc->sc_type != WM_T_82583))
   9800 			wm_gmii_mdic_writereg(self, phy,
   9801 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   9802 		else
   9803 			wm_gmii_mdic_writereg(self, phy,
   9804 			    BME1000_PHY_PAGE_SELECT, page);
   9805 	}
   9806 
   9807 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
   9808 
   9809 release:
   9810 	sc->phy.release(sc);
   9811 	return rv;
   9812 }
   9813 
   9814 /*
   9815  * wm_gmii_bm_writereg:	[mii interface function]
   9816  *
   9817  *	Write a PHY register on the kumeran.
   9818  * This could be handled by the PHY layer if we didn't have to lock the
   9819  * ressource ...
   9820  */
   9821 static void
   9822 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
   9823 {
   9824 	struct wm_softc *sc = device_private(self);
   9825 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   9826 
   9827 	if (sc->phy.acquire(sc)) {
   9828 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9829 		    __func__);
   9830 		return;
   9831 	}
   9832 
   9833 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   9834 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   9835 		    || (reg == 31)) ? 1 : phy;
   9836 	/* Page 800 works differently than the rest so it has its own func */
   9837 	if (page == BM_WUC_PAGE) {
   9838 		uint16_t tmp;
   9839 
   9840 		tmp = val;
   9841 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   9842 		goto release;
   9843 	}
   9844 
   9845 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9846 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   9847 		    && (sc->sc_type != WM_T_82583))
   9848 			wm_gmii_mdic_writereg(self, phy,
   9849 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   9850 		else
   9851 			wm_gmii_mdic_writereg(self, phy,
   9852 			    BME1000_PHY_PAGE_SELECT, page);
   9853 	}
   9854 
   9855 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
   9856 
   9857 release:
   9858 	sc->phy.release(sc);
   9859 }
   9860 
   9861 static void
   9862 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
   9863 {
   9864 	struct wm_softc *sc = device_private(self);
   9865 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   9866 	uint16_t wuce, reg;
   9867 
   9868 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9869 		device_xname(sc->sc_dev), __func__));
   9870 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   9871 	if (sc->sc_type == WM_T_PCH) {
   9872 		/* XXX e1000 driver do nothing... why? */
   9873 	}
   9874 
   9875 	/*
   9876 	 * 1) Enable PHY wakeup register first.
   9877 	 * See e1000_enable_phy_wakeup_reg_access_bm().
   9878 	 */
   9879 
   9880 	/* Set page 769 */
   9881 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9882 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   9883 
   9884 	/* Read WUCE and save it */
   9885 	wuce = wm_gmii_mdic_readreg(self, 1, BM_WUC_ENABLE_REG);
   9886 
   9887 	reg = wuce | BM_WUC_ENABLE_BIT;
   9888 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   9889 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, reg);
   9890 
   9891 	/* Select page 800 */
   9892 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9893 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   9894 
   9895 	/*
   9896 	 * 2) Access PHY wakeup register.
   9897 	 * See e1000_access_phy_wakeup_reg_bm.
   9898 	 */
   9899 
   9900 	/* Write page 800 */
   9901 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   9902 
   9903 	if (rd)
   9904 		*val = wm_gmii_mdic_readreg(self, 1, BM_WUC_DATA_OPCODE);
   9905 	else
   9906 		wm_gmii_mdic_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
   9907 
   9908 	/*
   9909 	 * 3) Disable PHY wakeup register.
   9910 	 * See e1000_disable_phy_wakeup_reg_access_bm().
   9911 	 */
   9912 	/* Set page 769 */
   9913 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9914 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   9915 
   9916 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
   9917 }
   9918 
   9919 /*
   9920  * wm_gmii_hv_readreg:	[mii interface function]
   9921  *
   9922  *	Read a PHY register on the kumeran
   9923  * This could be handled by the PHY layer if we didn't have to lock the
   9924  * ressource ...
   9925  */
   9926 static int
   9927 wm_gmii_hv_readreg(device_t self, int phy, int reg)
   9928 {
   9929 	struct wm_softc *sc = device_private(self);
   9930 	int rv;
   9931 
   9932 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9933 		device_xname(sc->sc_dev), __func__));
   9934 	if (sc->phy.acquire(sc)) {
   9935 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9936 		    __func__);
   9937 		return 0;
   9938 	}
   9939 
   9940 	rv = wm_gmii_hv_readreg_locked(self, phy, reg);
   9941 	sc->phy.release(sc);
   9942 	return rv;
   9943 }
   9944 
   9945 static int
   9946 wm_gmii_hv_readreg_locked(device_t self, int phy, int reg)
   9947 {
   9948 	uint16_t page = BM_PHY_REG_PAGE(reg);
   9949 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   9950 	uint16_t val;
   9951 	int rv;
   9952 
   9953 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   9954 
   9955 	/* Page 800 works differently than the rest so it has its own func */
   9956 	if (page == BM_WUC_PAGE) {
   9957 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   9958 		return val;
   9959 	}
   9960 
   9961 	/*
   9962 	 * Lower than page 768 works differently than the rest so it has its
   9963 	 * own func
   9964 	 */
   9965 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   9966 		printf("gmii_hv_readreg!!!\n");
   9967 		return 0;
   9968 	}
   9969 
   9970 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   9971 		wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9972 		    page << BME1000_PAGE_SHIFT);
   9973 	}
   9974 
   9975 	rv = wm_gmii_mdic_readreg(self, phy, regnum & MII_ADDRMASK);
   9976 	return rv;
   9977 }
   9978 
   9979 /*
   9980  * wm_gmii_hv_writereg:	[mii interface function]
   9981  *
   9982  *	Write a PHY register on the kumeran.
   9983  * This could be handled by the PHY layer if we didn't have to lock the
   9984  * ressource ...
   9985  */
   9986 static void
   9987 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
   9988 {
   9989 	struct wm_softc *sc = device_private(self);
   9990 
   9991 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9992 		device_xname(sc->sc_dev), __func__));
   9993 
   9994 	if (sc->phy.acquire(sc)) {
   9995 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9996 		    __func__);
   9997 		return;
   9998 	}
   9999 
   10000 	wm_gmii_hv_writereg_locked(self, phy, reg, val);
   10001 	sc->phy.release(sc);
   10002 }
   10003 
   10004 static void
   10005 wm_gmii_hv_writereg_locked(device_t self, int phy, int reg, int val)
   10006 {
   10007 	struct wm_softc *sc = device_private(self);
   10008 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10009 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10010 
   10011 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10012 
   10013 	/* Page 800 works differently than the rest so it has its own func */
   10014 	if (page == BM_WUC_PAGE) {
   10015 		uint16_t tmp;
   10016 
   10017 		tmp = val;
   10018 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   10019 		return;
   10020 	}
   10021 
   10022 	/*
   10023 	 * Lower than page 768 works differently than the rest so it has its
   10024 	 * own func
   10025 	 */
   10026 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10027 		printf("gmii_hv_writereg!!!\n");
   10028 		return;
   10029 	}
   10030 
   10031 	{
   10032 		/*
   10033 		 * XXX Workaround MDIO accesses being disabled after entering
   10034 		 * IEEE Power Down (whenever bit 11 of the PHY control
   10035 		 * register is set)
   10036 		 */
   10037 		if (sc->sc_phytype == WMPHY_82578) {
   10038 			struct mii_softc *child;
   10039 
   10040 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   10041 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   10042 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   10043 			    && ((val & (1 << 11)) != 0)) {
   10044 				printf("XXX need workaround\n");
   10045 			}
   10046 		}
   10047 
   10048 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10049 			wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   10050 			    page << BME1000_PAGE_SHIFT);
   10051 		}
   10052 	}
   10053 
   10054 	wm_gmii_mdic_writereg(self, phy, regnum & MII_ADDRMASK, val);
   10055 }
   10056 
   10057 /*
   10058  * wm_gmii_82580_readreg:	[mii interface function]
   10059  *
   10060  *	Read a PHY register on the 82580 and I350.
   10061  * This could be handled by the PHY layer if we didn't have to lock the
   10062  * ressource ...
   10063  */
   10064 static int
   10065 wm_gmii_82580_readreg(device_t self, int phy, int reg)
   10066 {
   10067 	struct wm_softc *sc = device_private(self);
   10068 	int rv;
   10069 
   10070 	if (sc->phy.acquire(sc) != 0) {
   10071 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10072 		    __func__);
   10073 		return 0;
   10074 	}
   10075 
   10076 	rv = wm_gmii_mdic_readreg(self, phy, reg);
   10077 
   10078 	sc->phy.release(sc);
   10079 	return rv;
   10080 }
   10081 
   10082 /*
   10083  * wm_gmii_82580_writereg:	[mii interface function]
   10084  *
   10085  *	Write a PHY register on the 82580 and I350.
   10086  * This could be handled by the PHY layer if we didn't have to lock the
   10087  * ressource ...
   10088  */
   10089 static void
   10090 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
   10091 {
   10092 	struct wm_softc *sc = device_private(self);
   10093 
   10094 	if (sc->phy.acquire(sc) != 0) {
   10095 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10096 		    __func__);
   10097 		return;
   10098 	}
   10099 
   10100 	wm_gmii_mdic_writereg(self, phy, reg, val);
   10101 
   10102 	sc->phy.release(sc);
   10103 }
   10104 
   10105 /*
   10106  * wm_gmii_gs40g_readreg:	[mii interface function]
   10107  *
   10108  *	Read a PHY register on the I2100 and I211.
   10109  * This could be handled by the PHY layer if we didn't have to lock the
   10110  * ressource ...
   10111  */
   10112 static int
   10113 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
   10114 {
   10115 	struct wm_softc *sc = device_private(self);
   10116 	int page, offset;
   10117 	int rv;
   10118 
   10119 	/* Acquire semaphore */
   10120 	if (sc->phy.acquire(sc)) {
   10121 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10122 		    __func__);
   10123 		return 0;
   10124 	}
   10125 
   10126 	/* Page select */
   10127 	page = reg >> GS40G_PAGE_SHIFT;
   10128 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
   10129 
   10130 	/* Read reg */
   10131 	offset = reg & GS40G_OFFSET_MASK;
   10132 	rv = wm_gmii_mdic_readreg(self, phy, offset);
   10133 
   10134 	sc->phy.release(sc);
   10135 	return rv;
   10136 }
   10137 
   10138 /*
   10139  * wm_gmii_gs40g_writereg:	[mii interface function]
   10140  *
   10141  *	Write a PHY register on the I210 and I211.
   10142  * This could be handled by the PHY layer if we didn't have to lock the
   10143  * ressource ...
   10144  */
   10145 static void
   10146 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
   10147 {
   10148 	struct wm_softc *sc = device_private(self);
   10149 	int page, offset;
   10150 
   10151 	/* Acquire semaphore */
   10152 	if (sc->phy.acquire(sc)) {
   10153 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10154 		    __func__);
   10155 		return;
   10156 	}
   10157 
   10158 	/* Page select */
   10159 	page = reg >> GS40G_PAGE_SHIFT;
   10160 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
   10161 
   10162 	/* Write reg */
   10163 	offset = reg & GS40G_OFFSET_MASK;
   10164 	wm_gmii_mdic_writereg(self, phy, offset, val);
   10165 
   10166 	/* Release semaphore */
   10167 	sc->phy.release(sc);
   10168 }
   10169 
   10170 /*
   10171  * wm_gmii_statchg:	[mii interface function]
   10172  *
   10173  *	Callback from MII layer when media changes.
   10174  */
   10175 static void
   10176 wm_gmii_statchg(struct ifnet *ifp)
   10177 {
   10178 	struct wm_softc *sc = ifp->if_softc;
   10179 	struct mii_data *mii = &sc->sc_mii;
   10180 
   10181 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   10182 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10183 	sc->sc_fcrtl &= ~FCRTL_XONE;
   10184 
   10185 	/*
   10186 	 * Get flow control negotiation result.
   10187 	 */
   10188 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   10189 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   10190 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   10191 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   10192 	}
   10193 
   10194 	if (sc->sc_flowflags & IFM_FLOW) {
   10195 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   10196 			sc->sc_ctrl |= CTRL_TFCE;
   10197 			sc->sc_fcrtl |= FCRTL_XONE;
   10198 		}
   10199 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   10200 			sc->sc_ctrl |= CTRL_RFCE;
   10201 	}
   10202 
   10203 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   10204 		DPRINTF(WM_DEBUG_LINK,
   10205 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   10206 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10207 	} else {
   10208 		DPRINTF(WM_DEBUG_LINK,
   10209 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   10210 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10211 	}
   10212 
   10213 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10214 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10215 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   10216 						 : WMREG_FCRTL, sc->sc_fcrtl);
   10217 	if (sc->sc_type == WM_T_80003) {
   10218 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   10219 		case IFM_1000_T:
   10220 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10221 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   10222 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10223 			break;
   10224 		default:
   10225 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10226 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   10227 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   10228 			break;
   10229 		}
   10230 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   10231 	}
   10232 }
   10233 
   10234 /* kumeran related (80003, ICH* and PCH*) */
   10235 
   10236 /*
   10237  * wm_kmrn_readreg:
   10238  *
   10239  *	Read a kumeran register
   10240  */
   10241 static int
   10242 wm_kmrn_readreg(struct wm_softc *sc, int reg)
   10243 {
   10244 	int rv;
   10245 
   10246 	if (sc->sc_type == WM_T_80003)
   10247 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10248 	else
   10249 		rv = sc->phy.acquire(sc);
   10250 	if (rv != 0) {
   10251 		aprint_error_dev(sc->sc_dev,
   10252 		    "%s: failed to get semaphore\n", __func__);
   10253 		return 0;
   10254 	}
   10255 
   10256 	rv = wm_kmrn_readreg_locked(sc, reg);
   10257 
   10258 	if (sc->sc_type == WM_T_80003)
   10259 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10260 	else
   10261 		sc->phy.release(sc);
   10262 
   10263 	return rv;
   10264 }
   10265 
   10266 static int
   10267 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg)
   10268 {
   10269 	int rv;
   10270 
   10271 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10272 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10273 	    KUMCTRLSTA_REN);
   10274 	CSR_WRITE_FLUSH(sc);
   10275 	delay(2);
   10276 
   10277 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   10278 
   10279 	return rv;
   10280 }
   10281 
   10282 /*
   10283  * wm_kmrn_writereg:
   10284  *
   10285  *	Write a kumeran register
   10286  */
   10287 static void
   10288 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
   10289 {
   10290 	int rv;
   10291 
   10292 	if (sc->sc_type == WM_T_80003)
   10293 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10294 	else
   10295 		rv = sc->phy.acquire(sc);
   10296 	if (rv != 0) {
   10297 		aprint_error_dev(sc->sc_dev,
   10298 		    "%s: failed to get semaphore\n", __func__);
   10299 		return;
   10300 	}
   10301 
   10302 	wm_kmrn_writereg_locked(sc, reg, val);
   10303 
   10304 	if (sc->sc_type == WM_T_80003)
   10305 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10306 	else
   10307 		sc->phy.release(sc);
   10308 }
   10309 
   10310 static void
   10311 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, int val)
   10312 {
   10313 
   10314 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10315 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10316 	    (val & KUMCTRLSTA_MASK));
   10317 }
   10318 
   10319 /* SGMII related */
   10320 
   10321 /*
   10322  * wm_sgmii_uses_mdio
   10323  *
   10324  * Check whether the transaction is to the internal PHY or the external
   10325  * MDIO interface. Return true if it's MDIO.
   10326  */
   10327 static bool
   10328 wm_sgmii_uses_mdio(struct wm_softc *sc)
   10329 {
   10330 	uint32_t reg;
   10331 	bool ismdio = false;
   10332 
   10333 	switch (sc->sc_type) {
   10334 	case WM_T_82575:
   10335 	case WM_T_82576:
   10336 		reg = CSR_READ(sc, WMREG_MDIC);
   10337 		ismdio = ((reg & MDIC_DEST) != 0);
   10338 		break;
   10339 	case WM_T_82580:
   10340 	case WM_T_I350:
   10341 	case WM_T_I354:
   10342 	case WM_T_I210:
   10343 	case WM_T_I211:
   10344 		reg = CSR_READ(sc, WMREG_MDICNFG);
   10345 		ismdio = ((reg & MDICNFG_DEST) != 0);
   10346 		break;
   10347 	default:
   10348 		break;
   10349 	}
   10350 
   10351 	return ismdio;
   10352 }
   10353 
   10354 /*
   10355  * wm_sgmii_readreg:	[mii interface function]
   10356  *
   10357  *	Read a PHY register on the SGMII
   10358  * This could be handled by the PHY layer if we didn't have to lock the
   10359  * ressource ...
   10360  */
   10361 static int
   10362 wm_sgmii_readreg(device_t self, int phy, int reg)
   10363 {
   10364 	struct wm_softc *sc = device_private(self);
   10365 	uint32_t i2ccmd;
   10366 	int i, rv;
   10367 
   10368 	if (sc->phy.acquire(sc)) {
   10369 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10370 		    __func__);
   10371 		return 0;
   10372 	}
   10373 
   10374 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10375 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10376 	    | I2CCMD_OPCODE_READ;
   10377 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10378 
   10379 	/* Poll the ready bit */
   10380 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10381 		delay(50);
   10382 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10383 		if (i2ccmd & I2CCMD_READY)
   10384 			break;
   10385 	}
   10386 	if ((i2ccmd & I2CCMD_READY) == 0)
   10387 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
   10388 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10389 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   10390 
   10391 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   10392 
   10393 	sc->phy.release(sc);
   10394 	return rv;
   10395 }
   10396 
   10397 /*
   10398  * wm_sgmii_writereg:	[mii interface function]
   10399  *
   10400  *	Write a PHY register on the SGMII.
   10401  * This could be handled by the PHY layer if we didn't have to lock the
   10402  * ressource ...
   10403  */
   10404 static void
   10405 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
   10406 {
   10407 	struct wm_softc *sc = device_private(self);
   10408 	uint32_t i2ccmd;
   10409 	int i;
   10410 	int val_swapped;
   10411 
   10412 	if (sc->phy.acquire(sc) != 0) {
   10413 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10414 		    __func__);
   10415 		return;
   10416 	}
   10417 	/* Swap the data bytes for the I2C interface */
   10418 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   10419 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10420 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10421 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   10422 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10423 
   10424 	/* Poll the ready bit */
   10425 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10426 		delay(50);
   10427 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10428 		if (i2ccmd & I2CCMD_READY)
   10429 			break;
   10430 	}
   10431 	if ((i2ccmd & I2CCMD_READY) == 0)
   10432 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
   10433 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10434 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   10435 
   10436 	sc->phy.release(sc);
   10437 }
   10438 
   10439 /* TBI related */
   10440 
   10441 /*
   10442  * wm_tbi_mediainit:
   10443  *
   10444  *	Initialize media for use on 1000BASE-X devices.
   10445  */
   10446 static void
   10447 wm_tbi_mediainit(struct wm_softc *sc)
   10448 {
   10449 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10450 	const char *sep = "";
   10451 
   10452 	if (sc->sc_type < WM_T_82543)
   10453 		sc->sc_tipg = TIPG_WM_DFLT;
   10454 	else
   10455 		sc->sc_tipg = TIPG_LG_DFLT;
   10456 
   10457 	sc->sc_tbi_serdes_anegticks = 5;
   10458 
   10459 	/* Initialize our media structures */
   10460 	sc->sc_mii.mii_ifp = ifp;
   10461 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10462 
   10463 	if ((sc->sc_type >= WM_T_82575)
   10464 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   10465 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10466 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   10467 	else
   10468 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10469 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   10470 
   10471 	/*
   10472 	 * SWD Pins:
   10473 	 *
   10474 	 *	0 = Link LED (output)
   10475 	 *	1 = Loss Of Signal (input)
   10476 	 */
   10477 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   10478 
   10479 	/* XXX Perhaps this is only for TBI */
   10480 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   10481 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   10482 
   10483 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   10484 		sc->sc_ctrl &= ~CTRL_LRST;
   10485 
   10486 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10487 
   10488 #define	ADD(ss, mm, dd)							\
   10489 do {									\
   10490 	aprint_normal("%s%s", sep, ss);					\
   10491 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   10492 	sep = ", ";							\
   10493 } while (/*CONSTCOND*/0)
   10494 
   10495 	aprint_normal_dev(sc->sc_dev, "");
   10496 
   10497 	if (sc->sc_type == WM_T_I354) {
   10498 		uint32_t status;
   10499 
   10500 		status = CSR_READ(sc, WMREG_STATUS);
   10501 		if (((status & STATUS_2P5_SKU) != 0)
   10502 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   10503 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   10504 		} else
   10505 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   10506 	} else if (sc->sc_type == WM_T_82545) {
   10507 		/* Only 82545 is LX (XXX except SFP) */
   10508 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   10509 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   10510 	} else {
   10511 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   10512 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   10513 	}
   10514 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   10515 	aprint_normal("\n");
   10516 
   10517 #undef ADD
   10518 
   10519 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   10520 }
   10521 
   10522 /*
   10523  * wm_tbi_mediachange:	[ifmedia interface function]
   10524  *
   10525  *	Set hardware to newly-selected media on a 1000BASE-X device.
   10526  */
   10527 static int
   10528 wm_tbi_mediachange(struct ifnet *ifp)
   10529 {
   10530 	struct wm_softc *sc = ifp->if_softc;
   10531 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10532 	uint32_t status;
   10533 	int i;
   10534 
   10535 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   10536 		/* XXX need some work for >= 82571 and < 82575 */
   10537 		if (sc->sc_type < WM_T_82575)
   10538 			return 0;
   10539 	}
   10540 
   10541 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   10542 	    || (sc->sc_type >= WM_T_82575))
   10543 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   10544 
   10545 	sc->sc_ctrl &= ~CTRL_LRST;
   10546 	sc->sc_txcw = TXCW_ANE;
   10547 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10548 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   10549 	else if (ife->ifm_media & IFM_FDX)
   10550 		sc->sc_txcw |= TXCW_FD;
   10551 	else
   10552 		sc->sc_txcw |= TXCW_HD;
   10553 
   10554 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   10555 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   10556 
   10557 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   10558 		    device_xname(sc->sc_dev), sc->sc_txcw));
   10559 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10560 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10561 	CSR_WRITE_FLUSH(sc);
   10562 	delay(1000);
   10563 
   10564 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   10565 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   10566 
   10567 	/*
   10568 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   10569 	 * optics detect a signal, 0 if they don't.
   10570 	 */
   10571 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   10572 		/* Have signal; wait for the link to come up. */
   10573 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   10574 			delay(10000);
   10575 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   10576 				break;
   10577 		}
   10578 
   10579 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   10580 			    device_xname(sc->sc_dev),i));
   10581 
   10582 		status = CSR_READ(sc, WMREG_STATUS);
   10583 		DPRINTF(WM_DEBUG_LINK,
   10584 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   10585 			device_xname(sc->sc_dev),status, STATUS_LU));
   10586 		if (status & STATUS_LU) {
   10587 			/* Link is up. */
   10588 			DPRINTF(WM_DEBUG_LINK,
   10589 			    ("%s: LINK: set media -> link up %s\n",
   10590 			    device_xname(sc->sc_dev),
   10591 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   10592 
   10593 			/*
   10594 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   10595 			 * so we should update sc->sc_ctrl
   10596 			 */
   10597 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   10598 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10599 			sc->sc_fcrtl &= ~FCRTL_XONE;
   10600 			if (status & STATUS_FD)
   10601 				sc->sc_tctl |=
   10602 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10603 			else
   10604 				sc->sc_tctl |=
   10605 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10606 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   10607 				sc->sc_fcrtl |= FCRTL_XONE;
   10608 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10609 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   10610 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   10611 				      sc->sc_fcrtl);
   10612 			sc->sc_tbi_linkup = 1;
   10613 		} else {
   10614 			if (i == WM_LINKUP_TIMEOUT)
   10615 				wm_check_for_link(sc);
   10616 			/* Link is down. */
   10617 			DPRINTF(WM_DEBUG_LINK,
   10618 			    ("%s: LINK: set media -> link down\n",
   10619 			    device_xname(sc->sc_dev)));
   10620 			sc->sc_tbi_linkup = 0;
   10621 		}
   10622 	} else {
   10623 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   10624 		    device_xname(sc->sc_dev)));
   10625 		sc->sc_tbi_linkup = 0;
   10626 	}
   10627 
   10628 	wm_tbi_serdes_set_linkled(sc);
   10629 
   10630 	return 0;
   10631 }
   10632 
   10633 /*
   10634  * wm_tbi_mediastatus:	[ifmedia interface function]
   10635  *
   10636  *	Get the current interface media status on a 1000BASE-X device.
   10637  */
   10638 static void
   10639 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10640 {
   10641 	struct wm_softc *sc = ifp->if_softc;
   10642 	uint32_t ctrl, status;
   10643 
   10644 	ifmr->ifm_status = IFM_AVALID;
   10645 	ifmr->ifm_active = IFM_ETHER;
   10646 
   10647 	status = CSR_READ(sc, WMREG_STATUS);
   10648 	if ((status & STATUS_LU) == 0) {
   10649 		ifmr->ifm_active |= IFM_NONE;
   10650 		return;
   10651 	}
   10652 
   10653 	ifmr->ifm_status |= IFM_ACTIVE;
   10654 	/* Only 82545 is LX */
   10655 	if (sc->sc_type == WM_T_82545)
   10656 		ifmr->ifm_active |= IFM_1000_LX;
   10657 	else
   10658 		ifmr->ifm_active |= IFM_1000_SX;
   10659 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   10660 		ifmr->ifm_active |= IFM_FDX;
   10661 	else
   10662 		ifmr->ifm_active |= IFM_HDX;
   10663 	ctrl = CSR_READ(sc, WMREG_CTRL);
   10664 	if (ctrl & CTRL_RFCE)
   10665 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   10666 	if (ctrl & CTRL_TFCE)
   10667 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   10668 }
   10669 
   10670 /* XXX TBI only */
   10671 static int
   10672 wm_check_for_link(struct wm_softc *sc)
   10673 {
   10674 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10675 	uint32_t rxcw;
   10676 	uint32_t ctrl;
   10677 	uint32_t status;
   10678 	uint32_t sig;
   10679 
   10680 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   10681 		/* XXX need some work for >= 82571 */
   10682 		if (sc->sc_type >= WM_T_82571) {
   10683 			sc->sc_tbi_linkup = 1;
   10684 			return 0;
   10685 		}
   10686 	}
   10687 
   10688 	rxcw = CSR_READ(sc, WMREG_RXCW);
   10689 	ctrl = CSR_READ(sc, WMREG_CTRL);
   10690 	status = CSR_READ(sc, WMREG_STATUS);
   10691 
   10692 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   10693 
   10694 	DPRINTF(WM_DEBUG_LINK,
   10695 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   10696 		device_xname(sc->sc_dev), __func__,
   10697 		((ctrl & CTRL_SWDPIN(1)) == sig),
   10698 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   10699 
   10700 	/*
   10701 	 * SWDPIN   LU RXCW
   10702 	 *      0    0    0
   10703 	 *      0    0    1	(should not happen)
   10704 	 *      0    1    0	(should not happen)
   10705 	 *      0    1    1	(should not happen)
   10706 	 *      1    0    0	Disable autonego and force linkup
   10707 	 *      1    0    1	got /C/ but not linkup yet
   10708 	 *      1    1    0	(linkup)
   10709 	 *      1    1    1	If IFM_AUTO, back to autonego
   10710 	 *
   10711 	 */
   10712 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   10713 	    && ((status & STATUS_LU) == 0)
   10714 	    && ((rxcw & RXCW_C) == 0)) {
   10715 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   10716 			__func__));
   10717 		sc->sc_tbi_linkup = 0;
   10718 		/* Disable auto-negotiation in the TXCW register */
   10719 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   10720 
   10721 		/*
   10722 		 * Force link-up and also force full-duplex.
   10723 		 *
   10724 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   10725 		 * so we should update sc->sc_ctrl
   10726 		 */
   10727 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   10728 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10729 	} else if (((status & STATUS_LU) != 0)
   10730 	    && ((rxcw & RXCW_C) != 0)
   10731 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   10732 		sc->sc_tbi_linkup = 1;
   10733 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   10734 			__func__));
   10735 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10736 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   10737 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   10738 	    && ((rxcw & RXCW_C) != 0)) {
   10739 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   10740 	} else {
   10741 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   10742 			status));
   10743 	}
   10744 
   10745 	return 0;
   10746 }
   10747 
   10748 /*
   10749  * wm_tbi_tick:
   10750  *
   10751  *	Check the link on TBI devices.
   10752  *	This function acts as mii_tick().
   10753  */
   10754 static void
   10755 wm_tbi_tick(struct wm_softc *sc)
   10756 {
   10757 	struct mii_data *mii = &sc->sc_mii;
   10758 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10759 	uint32_t status;
   10760 
   10761 	KASSERT(WM_CORE_LOCKED(sc));
   10762 
   10763 	status = CSR_READ(sc, WMREG_STATUS);
   10764 
   10765 	/* XXX is this needed? */
   10766 	(void)CSR_READ(sc, WMREG_RXCW);
   10767 	(void)CSR_READ(sc, WMREG_CTRL);
   10768 
   10769 	/* set link status */
   10770 	if ((status & STATUS_LU) == 0) {
   10771 		DPRINTF(WM_DEBUG_LINK,
   10772 		    ("%s: LINK: checklink -> down\n",
   10773 			device_xname(sc->sc_dev)));
   10774 		sc->sc_tbi_linkup = 0;
   10775 	} else if (sc->sc_tbi_linkup == 0) {
   10776 		DPRINTF(WM_DEBUG_LINK,
   10777 		    ("%s: LINK: checklink -> up %s\n",
   10778 			device_xname(sc->sc_dev),
   10779 			(status & STATUS_FD) ? "FDX" : "HDX"));
   10780 		sc->sc_tbi_linkup = 1;
   10781 		sc->sc_tbi_serdes_ticks = 0;
   10782 	}
   10783 
   10784 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   10785 		goto setled;
   10786 
   10787 	if ((status & STATUS_LU) == 0) {
   10788 		sc->sc_tbi_linkup = 0;
   10789 		/* If the timer expired, retry autonegotiation */
   10790 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10791 		    && (++sc->sc_tbi_serdes_ticks
   10792 			>= sc->sc_tbi_serdes_anegticks)) {
   10793 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   10794 			sc->sc_tbi_serdes_ticks = 0;
   10795 			/*
   10796 			 * Reset the link, and let autonegotiation do
   10797 			 * its thing
   10798 			 */
   10799 			sc->sc_ctrl |= CTRL_LRST;
   10800 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10801 			CSR_WRITE_FLUSH(sc);
   10802 			delay(1000);
   10803 			sc->sc_ctrl &= ~CTRL_LRST;
   10804 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10805 			CSR_WRITE_FLUSH(sc);
   10806 			delay(1000);
   10807 			CSR_WRITE(sc, WMREG_TXCW,
   10808 			    sc->sc_txcw & ~TXCW_ANE);
   10809 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10810 		}
   10811 	}
   10812 
   10813 setled:
   10814 	wm_tbi_serdes_set_linkled(sc);
   10815 }
   10816 
   10817 /* SERDES related */
   10818 static void
   10819 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   10820 {
   10821 	uint32_t reg;
   10822 
   10823 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   10824 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   10825 		return;
   10826 
   10827 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   10828 	reg |= PCS_CFG_PCS_EN;
   10829 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   10830 
   10831 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   10832 	reg &= ~CTRL_EXT_SWDPIN(3);
   10833 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   10834 	CSR_WRITE_FLUSH(sc);
   10835 }
   10836 
   10837 static int
   10838 wm_serdes_mediachange(struct ifnet *ifp)
   10839 {
   10840 	struct wm_softc *sc = ifp->if_softc;
   10841 	bool pcs_autoneg = true; /* XXX */
   10842 	uint32_t ctrl_ext, pcs_lctl, reg;
   10843 
   10844 	/* XXX Currently, this function is not called on 8257[12] */
   10845 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   10846 	    || (sc->sc_type >= WM_T_82575))
   10847 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   10848 
   10849 	wm_serdes_power_up_link_82575(sc);
   10850 
   10851 	sc->sc_ctrl |= CTRL_SLU;
   10852 
   10853 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   10854 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   10855 
   10856 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10857 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   10858 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   10859 	case CTRL_EXT_LINK_MODE_SGMII:
   10860 		pcs_autoneg = true;
   10861 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   10862 		break;
   10863 	case CTRL_EXT_LINK_MODE_1000KX:
   10864 		pcs_autoneg = false;
   10865 		/* FALLTHROUGH */
   10866 	default:
   10867 		if ((sc->sc_type == WM_T_82575)
   10868 		    || (sc->sc_type == WM_T_82576)) {
   10869 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   10870 				pcs_autoneg = false;
   10871 		}
   10872 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   10873 		    | CTRL_FRCFDX;
   10874 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   10875 	}
   10876 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10877 
   10878 	if (pcs_autoneg) {
   10879 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   10880 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   10881 
   10882 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   10883 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   10884 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   10885 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   10886 	} else
   10887 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   10888 
   10889 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   10890 
   10891 
   10892 	return 0;
   10893 }
   10894 
   10895 static void
   10896 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10897 {
   10898 	struct wm_softc *sc = ifp->if_softc;
   10899 	struct mii_data *mii = &sc->sc_mii;
   10900 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10901 	uint32_t pcs_adv, pcs_lpab, reg;
   10902 
   10903 	ifmr->ifm_status = IFM_AVALID;
   10904 	ifmr->ifm_active = IFM_ETHER;
   10905 
   10906 	/* Check PCS */
   10907 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10908 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   10909 		ifmr->ifm_active |= IFM_NONE;
   10910 		sc->sc_tbi_linkup = 0;
   10911 		goto setled;
   10912 	}
   10913 
   10914 	sc->sc_tbi_linkup = 1;
   10915 	ifmr->ifm_status |= IFM_ACTIVE;
   10916 	if (sc->sc_type == WM_T_I354) {
   10917 		uint32_t status;
   10918 
   10919 		status = CSR_READ(sc, WMREG_STATUS);
   10920 		if (((status & STATUS_2P5_SKU) != 0)
   10921 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   10922 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
   10923 		} else
   10924 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
   10925 	} else {
   10926 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   10927 		case PCS_LSTS_SPEED_10:
   10928 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   10929 			break;
   10930 		case PCS_LSTS_SPEED_100:
   10931 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   10932 			break;
   10933 		case PCS_LSTS_SPEED_1000:
   10934 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   10935 			break;
   10936 		default:
   10937 			device_printf(sc->sc_dev, "Unknown speed\n");
   10938 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   10939 			break;
   10940 		}
   10941 	}
   10942 	if ((reg & PCS_LSTS_FDX) != 0)
   10943 		ifmr->ifm_active |= IFM_FDX;
   10944 	else
   10945 		ifmr->ifm_active |= IFM_HDX;
   10946 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   10947 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   10948 		/* Check flow */
   10949 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10950 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   10951 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   10952 			goto setled;
   10953 		}
   10954 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   10955 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   10956 		DPRINTF(WM_DEBUG_LINK,
   10957 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   10958 		if ((pcs_adv & TXCW_SYM_PAUSE)
   10959 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   10960 			mii->mii_media_active |= IFM_FLOW
   10961 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   10962 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   10963 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   10964 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   10965 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   10966 			mii->mii_media_active |= IFM_FLOW
   10967 			    | IFM_ETH_TXPAUSE;
   10968 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   10969 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   10970 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   10971 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   10972 			mii->mii_media_active |= IFM_FLOW
   10973 			    | IFM_ETH_RXPAUSE;
   10974 		}
   10975 	}
   10976 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10977 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   10978 setled:
   10979 	wm_tbi_serdes_set_linkled(sc);
   10980 }
   10981 
   10982 /*
   10983  * wm_serdes_tick:
   10984  *
   10985  *	Check the link on serdes devices.
   10986  */
   10987 static void
   10988 wm_serdes_tick(struct wm_softc *sc)
   10989 {
   10990 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10991 	struct mii_data *mii = &sc->sc_mii;
   10992 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10993 	uint32_t reg;
   10994 
   10995 	KASSERT(WM_CORE_LOCKED(sc));
   10996 
   10997 	mii->mii_media_status = IFM_AVALID;
   10998 	mii->mii_media_active = IFM_ETHER;
   10999 
   11000 	/* Check PCS */
   11001 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11002 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   11003 		mii->mii_media_status |= IFM_ACTIVE;
   11004 		sc->sc_tbi_linkup = 1;
   11005 		sc->sc_tbi_serdes_ticks = 0;
   11006 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   11007 		if ((reg & PCS_LSTS_FDX) != 0)
   11008 			mii->mii_media_active |= IFM_FDX;
   11009 		else
   11010 			mii->mii_media_active |= IFM_HDX;
   11011 	} else {
   11012 		mii->mii_media_status |= IFM_NONE;
   11013 		sc->sc_tbi_linkup = 0;
   11014 		/* If the timer expired, retry autonegotiation */
   11015 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11016 		    && (++sc->sc_tbi_serdes_ticks
   11017 			>= sc->sc_tbi_serdes_anegticks)) {
   11018 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11019 			sc->sc_tbi_serdes_ticks = 0;
   11020 			/* XXX */
   11021 			wm_serdes_mediachange(ifp);
   11022 		}
   11023 	}
   11024 
   11025 	wm_tbi_serdes_set_linkled(sc);
   11026 }
   11027 
   11028 /* SFP related */
   11029 
   11030 static int
   11031 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   11032 {
   11033 	uint32_t i2ccmd;
   11034 	int i;
   11035 
   11036 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11037 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11038 
   11039 	/* Poll the ready bit */
   11040 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11041 		delay(50);
   11042 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11043 		if (i2ccmd & I2CCMD_READY)
   11044 			break;
   11045 	}
   11046 	if ((i2ccmd & I2CCMD_READY) == 0)
   11047 		return -1;
   11048 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   11049 		return -1;
   11050 
   11051 	*data = i2ccmd & 0x00ff;
   11052 
   11053 	return 0;
   11054 }
   11055 
   11056 static uint32_t
   11057 wm_sfp_get_media_type(struct wm_softc *sc)
   11058 {
   11059 	uint32_t ctrl_ext;
   11060 	uint8_t val = 0;
   11061 	int timeout = 3;
   11062 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   11063 	int rv = -1;
   11064 
   11065 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11066 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   11067 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   11068 	CSR_WRITE_FLUSH(sc);
   11069 
   11070 	/* Read SFP module data */
   11071 	while (timeout) {
   11072 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   11073 		if (rv == 0)
   11074 			break;
   11075 		delay(100*1000); /* XXX too big */
   11076 		timeout--;
   11077 	}
   11078 	if (rv != 0)
   11079 		goto out;
   11080 	switch (val) {
   11081 	case SFF_SFP_ID_SFF:
   11082 		aprint_normal_dev(sc->sc_dev,
   11083 		    "Module/Connector soldered to board\n");
   11084 		break;
   11085 	case SFF_SFP_ID_SFP:
   11086 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   11087 		break;
   11088 	case SFF_SFP_ID_UNKNOWN:
   11089 		goto out;
   11090 	default:
   11091 		break;
   11092 	}
   11093 
   11094 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   11095 	if (rv != 0) {
   11096 		goto out;
   11097 	}
   11098 
   11099 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   11100 		mediatype = WM_MEDIATYPE_SERDES;
   11101 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   11102 		sc->sc_flags |= WM_F_SGMII;
   11103 		mediatype = WM_MEDIATYPE_COPPER;
   11104 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   11105 		sc->sc_flags |= WM_F_SGMII;
   11106 		mediatype = WM_MEDIATYPE_SERDES;
   11107 	}
   11108 
   11109 out:
   11110 	/* Restore I2C interface setting */
   11111 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11112 
   11113 	return mediatype;
   11114 }
   11115 
   11116 /*
   11117  * NVM related.
   11118  * Microwire, SPI (w/wo EERD) and Flash.
   11119  */
   11120 
   11121 /* Both spi and uwire */
   11122 
   11123 /*
   11124  * wm_eeprom_sendbits:
   11125  *
   11126  *	Send a series of bits to the EEPROM.
   11127  */
   11128 static void
   11129 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   11130 {
   11131 	uint32_t reg;
   11132 	int x;
   11133 
   11134 	reg = CSR_READ(sc, WMREG_EECD);
   11135 
   11136 	for (x = nbits; x > 0; x--) {
   11137 		if (bits & (1U << (x - 1)))
   11138 			reg |= EECD_DI;
   11139 		else
   11140 			reg &= ~EECD_DI;
   11141 		CSR_WRITE(sc, WMREG_EECD, reg);
   11142 		CSR_WRITE_FLUSH(sc);
   11143 		delay(2);
   11144 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11145 		CSR_WRITE_FLUSH(sc);
   11146 		delay(2);
   11147 		CSR_WRITE(sc, WMREG_EECD, reg);
   11148 		CSR_WRITE_FLUSH(sc);
   11149 		delay(2);
   11150 	}
   11151 }
   11152 
   11153 /*
   11154  * wm_eeprom_recvbits:
   11155  *
   11156  *	Receive a series of bits from the EEPROM.
   11157  */
   11158 static void
   11159 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   11160 {
   11161 	uint32_t reg, val;
   11162 	int x;
   11163 
   11164 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   11165 
   11166 	val = 0;
   11167 	for (x = nbits; x > 0; x--) {
   11168 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11169 		CSR_WRITE_FLUSH(sc);
   11170 		delay(2);
   11171 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   11172 			val |= (1U << (x - 1));
   11173 		CSR_WRITE(sc, WMREG_EECD, reg);
   11174 		CSR_WRITE_FLUSH(sc);
   11175 		delay(2);
   11176 	}
   11177 	*valp = val;
   11178 }
   11179 
   11180 /* Microwire */
   11181 
   11182 /*
   11183  * wm_nvm_read_uwire:
   11184  *
   11185  *	Read a word from the EEPROM using the MicroWire protocol.
   11186  */
   11187 static int
   11188 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11189 {
   11190 	uint32_t reg, val;
   11191 	int i;
   11192 
   11193 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11194 		device_xname(sc->sc_dev), __func__));
   11195 
   11196 	for (i = 0; i < wordcnt; i++) {
   11197 		/* Clear SK and DI. */
   11198 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   11199 		CSR_WRITE(sc, WMREG_EECD, reg);
   11200 
   11201 		/*
   11202 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   11203 		 * and Xen.
   11204 		 *
   11205 		 * We use this workaround only for 82540 because qemu's
   11206 		 * e1000 act as 82540.
   11207 		 */
   11208 		if (sc->sc_type == WM_T_82540) {
   11209 			reg |= EECD_SK;
   11210 			CSR_WRITE(sc, WMREG_EECD, reg);
   11211 			reg &= ~EECD_SK;
   11212 			CSR_WRITE(sc, WMREG_EECD, reg);
   11213 			CSR_WRITE_FLUSH(sc);
   11214 			delay(2);
   11215 		}
   11216 		/* XXX: end of workaround */
   11217 
   11218 		/* Set CHIP SELECT. */
   11219 		reg |= EECD_CS;
   11220 		CSR_WRITE(sc, WMREG_EECD, reg);
   11221 		CSR_WRITE_FLUSH(sc);
   11222 		delay(2);
   11223 
   11224 		/* Shift in the READ command. */
   11225 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   11226 
   11227 		/* Shift in address. */
   11228 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   11229 
   11230 		/* Shift out the data. */
   11231 		wm_eeprom_recvbits(sc, &val, 16);
   11232 		data[i] = val & 0xffff;
   11233 
   11234 		/* Clear CHIP SELECT. */
   11235 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   11236 		CSR_WRITE(sc, WMREG_EECD, reg);
   11237 		CSR_WRITE_FLUSH(sc);
   11238 		delay(2);
   11239 	}
   11240 
   11241 	return 0;
   11242 }
   11243 
   11244 /* SPI */
   11245 
   11246 /*
   11247  * Set SPI and FLASH related information from the EECD register.
   11248  * For 82541 and 82547, the word size is taken from EEPROM.
   11249  */
   11250 static int
   11251 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   11252 {
   11253 	int size;
   11254 	uint32_t reg;
   11255 	uint16_t data;
   11256 
   11257 	reg = CSR_READ(sc, WMREG_EECD);
   11258 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   11259 
   11260 	/* Read the size of NVM from EECD by default */
   11261 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11262 	switch (sc->sc_type) {
   11263 	case WM_T_82541:
   11264 	case WM_T_82541_2:
   11265 	case WM_T_82547:
   11266 	case WM_T_82547_2:
   11267 		/* Set dummy value to access EEPROM */
   11268 		sc->sc_nvm_wordsize = 64;
   11269 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
   11270 		reg = data;
   11271 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11272 		if (size == 0)
   11273 			size = 6; /* 64 word size */
   11274 		else
   11275 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   11276 		break;
   11277 	case WM_T_80003:
   11278 	case WM_T_82571:
   11279 	case WM_T_82572:
   11280 	case WM_T_82573: /* SPI case */
   11281 	case WM_T_82574: /* SPI case */
   11282 	case WM_T_82583: /* SPI case */
   11283 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11284 		if (size > 14)
   11285 			size = 14;
   11286 		break;
   11287 	case WM_T_82575:
   11288 	case WM_T_82576:
   11289 	case WM_T_82580:
   11290 	case WM_T_I350:
   11291 	case WM_T_I354:
   11292 	case WM_T_I210:
   11293 	case WM_T_I211:
   11294 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11295 		if (size > 15)
   11296 			size = 15;
   11297 		break;
   11298 	default:
   11299 		aprint_error_dev(sc->sc_dev,
   11300 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   11301 		return -1;
   11302 		break;
   11303 	}
   11304 
   11305 	sc->sc_nvm_wordsize = 1 << size;
   11306 
   11307 	return 0;
   11308 }
   11309 
   11310 /*
   11311  * wm_nvm_ready_spi:
   11312  *
   11313  *	Wait for a SPI EEPROM to be ready for commands.
   11314  */
   11315 static int
   11316 wm_nvm_ready_spi(struct wm_softc *sc)
   11317 {
   11318 	uint32_t val;
   11319 	int usec;
   11320 
   11321 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11322 		device_xname(sc->sc_dev), __func__));
   11323 
   11324 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   11325 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   11326 		wm_eeprom_recvbits(sc, &val, 8);
   11327 		if ((val & SPI_SR_RDY) == 0)
   11328 			break;
   11329 	}
   11330 	if (usec >= SPI_MAX_RETRIES) {
   11331 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   11332 		return 1;
   11333 	}
   11334 	return 0;
   11335 }
   11336 
   11337 /*
   11338  * wm_nvm_read_spi:
   11339  *
   11340  *	Read a work from the EEPROM using the SPI protocol.
   11341  */
   11342 static int
   11343 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11344 {
   11345 	uint32_t reg, val;
   11346 	int i;
   11347 	uint8_t opc;
   11348 
   11349 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11350 		device_xname(sc->sc_dev), __func__));
   11351 
   11352 	/* Clear SK and CS. */
   11353 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   11354 	CSR_WRITE(sc, WMREG_EECD, reg);
   11355 	CSR_WRITE_FLUSH(sc);
   11356 	delay(2);
   11357 
   11358 	if (wm_nvm_ready_spi(sc))
   11359 		return 1;
   11360 
   11361 	/* Toggle CS to flush commands. */
   11362 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   11363 	CSR_WRITE_FLUSH(sc);
   11364 	delay(2);
   11365 	CSR_WRITE(sc, WMREG_EECD, reg);
   11366 	CSR_WRITE_FLUSH(sc);
   11367 	delay(2);
   11368 
   11369 	opc = SPI_OPC_READ;
   11370 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   11371 		opc |= SPI_OPC_A8;
   11372 
   11373 	wm_eeprom_sendbits(sc, opc, 8);
   11374 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   11375 
   11376 	for (i = 0; i < wordcnt; i++) {
   11377 		wm_eeprom_recvbits(sc, &val, 16);
   11378 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   11379 	}
   11380 
   11381 	/* Raise CS and clear SK. */
   11382 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   11383 	CSR_WRITE(sc, WMREG_EECD, reg);
   11384 	CSR_WRITE_FLUSH(sc);
   11385 	delay(2);
   11386 
   11387 	return 0;
   11388 }
   11389 
   11390 /* Using with EERD */
   11391 
   11392 static int
   11393 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   11394 {
   11395 	uint32_t attempts = 100000;
   11396 	uint32_t i, reg = 0;
   11397 	int32_t done = -1;
   11398 
   11399 	for (i = 0; i < attempts; i++) {
   11400 		reg = CSR_READ(sc, rw);
   11401 
   11402 		if (reg & EERD_DONE) {
   11403 			done = 0;
   11404 			break;
   11405 		}
   11406 		delay(5);
   11407 	}
   11408 
   11409 	return done;
   11410 }
   11411 
   11412 static int
   11413 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   11414     uint16_t *data)
   11415 {
   11416 	int i, eerd = 0;
   11417 	int error = 0;
   11418 
   11419 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11420 		device_xname(sc->sc_dev), __func__));
   11421 
   11422 	for (i = 0; i < wordcnt; i++) {
   11423 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   11424 
   11425 		CSR_WRITE(sc, WMREG_EERD, eerd);
   11426 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   11427 		if (error != 0)
   11428 			break;
   11429 
   11430 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   11431 	}
   11432 
   11433 	return error;
   11434 }
   11435 
   11436 /* Flash */
   11437 
   11438 static int
   11439 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   11440 {
   11441 	uint32_t eecd;
   11442 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   11443 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   11444 	uint8_t sig_byte = 0;
   11445 
   11446 	switch (sc->sc_type) {
   11447 	case WM_T_PCH_SPT:
   11448 		/*
   11449 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
   11450 		 * sector valid bits from the NVM.
   11451 		 */
   11452 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
   11453 		if ((*bank == 0) || (*bank == 1)) {
   11454 			aprint_error_dev(sc->sc_dev,
   11455 			    "%s: no valid NVM bank present (%u)\n", __func__,
   11456 				*bank);
   11457 			return -1;
   11458 		} else {
   11459 			*bank = *bank - 2;
   11460 			return 0;
   11461 		}
   11462 	case WM_T_ICH8:
   11463 	case WM_T_ICH9:
   11464 		eecd = CSR_READ(sc, WMREG_EECD);
   11465 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   11466 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   11467 			return 0;
   11468 		}
   11469 		/* FALLTHROUGH */
   11470 	default:
   11471 		/* Default to 0 */
   11472 		*bank = 0;
   11473 
   11474 		/* Check bank 0 */
   11475 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   11476 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11477 			*bank = 0;
   11478 			return 0;
   11479 		}
   11480 
   11481 		/* Check bank 1 */
   11482 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   11483 		    &sig_byte);
   11484 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11485 			*bank = 1;
   11486 			return 0;
   11487 		}
   11488 	}
   11489 
   11490 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   11491 		device_xname(sc->sc_dev)));
   11492 	return -1;
   11493 }
   11494 
   11495 /******************************************************************************
   11496  * This function does initial flash setup so that a new read/write/erase cycle
   11497  * can be started.
   11498  *
   11499  * sc - The pointer to the hw structure
   11500  ****************************************************************************/
   11501 static int32_t
   11502 wm_ich8_cycle_init(struct wm_softc *sc)
   11503 {
   11504 	uint16_t hsfsts;
   11505 	int32_t error = 1;
   11506 	int32_t i     = 0;
   11507 
   11508 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11509 
   11510 	/* May be check the Flash Des Valid bit in Hw status */
   11511 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   11512 		return error;
   11513 	}
   11514 
   11515 	/* Clear FCERR in Hw status by writing 1 */
   11516 	/* Clear DAEL in Hw status by writing a 1 */
   11517 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   11518 
   11519 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11520 
   11521 	/*
   11522 	 * Either we should have a hardware SPI cycle in progress bit to check
   11523 	 * against, in order to start a new cycle or FDONE bit should be
   11524 	 * changed in the hardware so that it is 1 after harware reset, which
   11525 	 * can then be used as an indication whether a cycle is in progress or
   11526 	 * has been completed .. we should also have some software semaphore
   11527 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   11528 	 * threads access to those bits can be sequentiallized or a way so that
   11529 	 * 2 threads dont start the cycle at the same time
   11530 	 */
   11531 
   11532 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11533 		/*
   11534 		 * There is no cycle running at present, so we can start a
   11535 		 * cycle
   11536 		 */
   11537 
   11538 		/* Begin by setting Flash Cycle Done. */
   11539 		hsfsts |= HSFSTS_DONE;
   11540 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11541 		error = 0;
   11542 	} else {
   11543 		/*
   11544 		 * otherwise poll for sometime so the current cycle has a
   11545 		 * chance to end before giving up.
   11546 		 */
   11547 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   11548 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11549 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11550 				error = 0;
   11551 				break;
   11552 			}
   11553 			delay(1);
   11554 		}
   11555 		if (error == 0) {
   11556 			/*
   11557 			 * Successful in waiting for previous cycle to timeout,
   11558 			 * now set the Flash Cycle Done.
   11559 			 */
   11560 			hsfsts |= HSFSTS_DONE;
   11561 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11562 		}
   11563 	}
   11564 	return error;
   11565 }
   11566 
   11567 /******************************************************************************
   11568  * This function starts a flash cycle and waits for its completion
   11569  *
   11570  * sc - The pointer to the hw structure
   11571  ****************************************************************************/
   11572 static int32_t
   11573 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   11574 {
   11575 	uint16_t hsflctl;
   11576 	uint16_t hsfsts;
   11577 	int32_t error = 1;
   11578 	uint32_t i = 0;
   11579 
   11580 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   11581 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11582 	hsflctl |= HSFCTL_GO;
   11583 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11584 
   11585 	/* Wait till FDONE bit is set to 1 */
   11586 	do {
   11587 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11588 		if (hsfsts & HSFSTS_DONE)
   11589 			break;
   11590 		delay(1);
   11591 		i++;
   11592 	} while (i < timeout);
   11593 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   11594 		error = 0;
   11595 
   11596 	return error;
   11597 }
   11598 
   11599 /******************************************************************************
   11600  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   11601  *
   11602  * sc - The pointer to the hw structure
   11603  * index - The index of the byte or word to read.
   11604  * size - Size of data to read, 1=byte 2=word, 4=dword
   11605  * data - Pointer to the word to store the value read.
   11606  *****************************************************************************/
   11607 static int32_t
   11608 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   11609     uint32_t size, uint32_t *data)
   11610 {
   11611 	uint16_t hsfsts;
   11612 	uint16_t hsflctl;
   11613 	uint32_t flash_linear_address;
   11614 	uint32_t flash_data = 0;
   11615 	int32_t error = 1;
   11616 	int32_t count = 0;
   11617 
   11618 	if (size < 1  || size > 4 || data == 0x0 ||
   11619 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   11620 		return error;
   11621 
   11622 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   11623 	    sc->sc_ich8_flash_base;
   11624 
   11625 	do {
   11626 		delay(1);
   11627 		/* Steps */
   11628 		error = wm_ich8_cycle_init(sc);
   11629 		if (error)
   11630 			break;
   11631 
   11632 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11633 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   11634 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   11635 		    & HSFCTL_BCOUNT_MASK;
   11636 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   11637 		if (sc->sc_type == WM_T_PCH_SPT) {
   11638 			/*
   11639 			 * In SPT, This register is in Lan memory space, not
   11640 			 * flash. Therefore, only 32 bit access is supported.
   11641 			 */
   11642 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
   11643 			    (uint32_t)hsflctl);
   11644 		} else
   11645 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11646 
   11647 		/*
   11648 		 * Write the last 24 bits of index into Flash Linear address
   11649 		 * field in Flash Address
   11650 		 */
   11651 		/* TODO: TBD maybe check the index against the size of flash */
   11652 
   11653 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   11654 
   11655 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   11656 
   11657 		/*
   11658 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   11659 		 * the whole sequence a few more times, else read in (shift in)
   11660 		 * the Flash Data0, the order is least significant byte first
   11661 		 * msb to lsb
   11662 		 */
   11663 		if (error == 0) {
   11664 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   11665 			if (size == 1)
   11666 				*data = (uint8_t)(flash_data & 0x000000FF);
   11667 			else if (size == 2)
   11668 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   11669 			else if (size == 4)
   11670 				*data = (uint32_t)flash_data;
   11671 			break;
   11672 		} else {
   11673 			/*
   11674 			 * If we've gotten here, then things are probably
   11675 			 * completely hosed, but if the error condition is
   11676 			 * detected, it won't hurt to give it another try...
   11677 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   11678 			 */
   11679 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11680 			if (hsfsts & HSFSTS_ERR) {
   11681 				/* Repeat for some time before giving up. */
   11682 				continue;
   11683 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   11684 				break;
   11685 		}
   11686 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   11687 
   11688 	return error;
   11689 }
   11690 
   11691 /******************************************************************************
   11692  * Reads a single byte from the NVM using the ICH8 flash access registers.
   11693  *
   11694  * sc - pointer to wm_hw structure
   11695  * index - The index of the byte to read.
   11696  * data - Pointer to a byte to store the value read.
   11697  *****************************************************************************/
   11698 static int32_t
   11699 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   11700 {
   11701 	int32_t status;
   11702 	uint32_t word = 0;
   11703 
   11704 	status = wm_read_ich8_data(sc, index, 1, &word);
   11705 	if (status == 0)
   11706 		*data = (uint8_t)word;
   11707 	else
   11708 		*data = 0;
   11709 
   11710 	return status;
   11711 }
   11712 
   11713 /******************************************************************************
   11714  * Reads a word from the NVM using the ICH8 flash access registers.
   11715  *
   11716  * sc - pointer to wm_hw structure
   11717  * index - The starting byte index of the word to read.
   11718  * data - Pointer to a word to store the value read.
   11719  *****************************************************************************/
   11720 static int32_t
   11721 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   11722 {
   11723 	int32_t status;
   11724 	uint32_t word = 0;
   11725 
   11726 	status = wm_read_ich8_data(sc, index, 2, &word);
   11727 	if (status == 0)
   11728 		*data = (uint16_t)word;
   11729 	else
   11730 		*data = 0;
   11731 
   11732 	return status;
   11733 }
   11734 
   11735 /******************************************************************************
   11736  * Reads a dword from the NVM using the ICH8 flash access registers.
   11737  *
   11738  * sc - pointer to wm_hw structure
   11739  * index - The starting byte index of the word to read.
   11740  * data - Pointer to a word to store the value read.
   11741  *****************************************************************************/
   11742 static int32_t
   11743 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   11744 {
   11745 	int32_t status;
   11746 
   11747 	status = wm_read_ich8_data(sc, index, 4, data);
   11748 	return status;
   11749 }
   11750 
   11751 /******************************************************************************
   11752  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   11753  * register.
   11754  *
   11755  * sc - Struct containing variables accessed by shared code
   11756  * offset - offset of word in the EEPROM to read
   11757  * data - word read from the EEPROM
   11758  * words - number of words to read
   11759  *****************************************************************************/
   11760 static int
   11761 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11762 {
   11763 	int32_t  error = 0;
   11764 	uint32_t flash_bank = 0;
   11765 	uint32_t act_offset = 0;
   11766 	uint32_t bank_offset = 0;
   11767 	uint16_t word = 0;
   11768 	uint16_t i = 0;
   11769 
   11770 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11771 		device_xname(sc->sc_dev), __func__));
   11772 
   11773 	/*
   11774 	 * We need to know which is the valid flash bank.  In the event
   11775 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   11776 	 * managing flash_bank.  So it cannot be trusted and needs
   11777 	 * to be updated with each read.
   11778 	 */
   11779 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   11780 	if (error) {
   11781 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   11782 			device_xname(sc->sc_dev)));
   11783 		flash_bank = 0;
   11784 	}
   11785 
   11786 	/*
   11787 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   11788 	 * size
   11789 	 */
   11790 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   11791 
   11792 	error = wm_get_swfwhw_semaphore(sc);
   11793 	if (error) {
   11794 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11795 		    __func__);
   11796 		return error;
   11797 	}
   11798 
   11799 	for (i = 0; i < words; i++) {
   11800 		/* The NVM part needs a byte offset, hence * 2 */
   11801 		act_offset = bank_offset + ((offset + i) * 2);
   11802 		error = wm_read_ich8_word(sc, act_offset, &word);
   11803 		if (error) {
   11804 			aprint_error_dev(sc->sc_dev,
   11805 			    "%s: failed to read NVM\n", __func__);
   11806 			break;
   11807 		}
   11808 		data[i] = word;
   11809 	}
   11810 
   11811 	wm_put_swfwhw_semaphore(sc);
   11812 	return error;
   11813 }
   11814 
   11815 /******************************************************************************
   11816  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   11817  * register.
   11818  *
   11819  * sc - Struct containing variables accessed by shared code
   11820  * offset - offset of word in the EEPROM to read
   11821  * data - word read from the EEPROM
   11822  * words - number of words to read
   11823  *****************************************************************************/
   11824 static int
   11825 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11826 {
   11827 	int32_t  error = 0;
   11828 	uint32_t flash_bank = 0;
   11829 	uint32_t act_offset = 0;
   11830 	uint32_t bank_offset = 0;
   11831 	uint32_t dword = 0;
   11832 	uint16_t i = 0;
   11833 
   11834 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11835 		device_xname(sc->sc_dev), __func__));
   11836 
   11837 	/*
   11838 	 * We need to know which is the valid flash bank.  In the event
   11839 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   11840 	 * managing flash_bank.  So it cannot be trusted and needs
   11841 	 * to be updated with each read.
   11842 	 */
   11843 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   11844 	if (error) {
   11845 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   11846 			device_xname(sc->sc_dev)));
   11847 		flash_bank = 0;
   11848 	}
   11849 
   11850 	/*
   11851 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   11852 	 * size
   11853 	 */
   11854 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   11855 
   11856 	error = wm_get_swfwhw_semaphore(sc);
   11857 	if (error) {
   11858 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11859 		    __func__);
   11860 		return error;
   11861 	}
   11862 
   11863 	for (i = 0; i < words; i++) {
   11864 		/* The NVM part needs a byte offset, hence * 2 */
   11865 		act_offset = bank_offset + ((offset + i) * 2);
   11866 		/* but we must read dword aligned, so mask ... */
   11867 		error = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   11868 		if (error) {
   11869 			aprint_error_dev(sc->sc_dev,
   11870 			    "%s: failed to read NVM\n", __func__);
   11871 			break;
   11872 		}
   11873 		/* ... and pick out low or high word */
   11874 		if ((act_offset & 0x2) == 0)
   11875 			data[i] = (uint16_t)(dword & 0xFFFF);
   11876 		else
   11877 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   11878 	}
   11879 
   11880 	wm_put_swfwhw_semaphore(sc);
   11881 	return error;
   11882 }
   11883 
   11884 /* iNVM */
   11885 
   11886 static int
   11887 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   11888 {
   11889 	int32_t  rv = 0;
   11890 	uint32_t invm_dword;
   11891 	uint16_t i;
   11892 	uint8_t record_type, word_address;
   11893 
   11894 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11895 		device_xname(sc->sc_dev), __func__));
   11896 
   11897 	for (i = 0; i < INVM_SIZE; i++) {
   11898 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   11899 		/* Get record type */
   11900 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   11901 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   11902 			break;
   11903 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   11904 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   11905 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   11906 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   11907 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   11908 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   11909 			if (word_address == address) {
   11910 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   11911 				rv = 0;
   11912 				break;
   11913 			}
   11914 		}
   11915 	}
   11916 
   11917 	return rv;
   11918 }
   11919 
   11920 static int
   11921 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11922 {
   11923 	int rv = 0;
   11924 	int i;
   11925 
   11926 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11927 		device_xname(sc->sc_dev), __func__));
   11928 
   11929 	for (i = 0; i < words; i++) {
   11930 		switch (offset + i) {
   11931 		case NVM_OFF_MACADDR:
   11932 		case NVM_OFF_MACADDR1:
   11933 		case NVM_OFF_MACADDR2:
   11934 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   11935 			if (rv != 0) {
   11936 				data[i] = 0xffff;
   11937 				rv = -1;
   11938 			}
   11939 			break;
   11940 		case NVM_OFF_CFG2:
   11941 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11942 			if (rv != 0) {
   11943 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   11944 				rv = 0;
   11945 			}
   11946 			break;
   11947 		case NVM_OFF_CFG4:
   11948 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11949 			if (rv != 0) {
   11950 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   11951 				rv = 0;
   11952 			}
   11953 			break;
   11954 		case NVM_OFF_LED_1_CFG:
   11955 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11956 			if (rv != 0) {
   11957 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   11958 				rv = 0;
   11959 			}
   11960 			break;
   11961 		case NVM_OFF_LED_0_2_CFG:
   11962 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11963 			if (rv != 0) {
   11964 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   11965 				rv = 0;
   11966 			}
   11967 			break;
   11968 		case NVM_OFF_ID_LED_SETTINGS:
   11969 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11970 			if (rv != 0) {
   11971 				*data = ID_LED_RESERVED_FFFF;
   11972 				rv = 0;
   11973 			}
   11974 			break;
   11975 		default:
   11976 			DPRINTF(WM_DEBUG_NVM,
   11977 			    ("NVM word 0x%02x is not mapped.\n", offset));
   11978 			*data = NVM_RESERVED_WORD;
   11979 			break;
   11980 		}
   11981 	}
   11982 
   11983 	return rv;
   11984 }
   11985 
   11986 /* Lock, detecting NVM type, validate checksum, version and read */
   11987 
   11988 /*
   11989  * wm_nvm_acquire:
   11990  *
   11991  *	Perform the EEPROM handshake required on some chips.
   11992  */
   11993 static int
   11994 wm_nvm_acquire(struct wm_softc *sc)
   11995 {
   11996 	uint32_t reg;
   11997 	int x;
   11998 	int ret = 0;
   11999 
   12000 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12001 		device_xname(sc->sc_dev), __func__));
   12002 
   12003 	if (sc->sc_type >= WM_T_ICH8) {
   12004 		ret = wm_get_nvm_ich8lan(sc);
   12005 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   12006 		ret = wm_get_swfwhw_semaphore(sc);
   12007 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
   12008 		/* This will also do wm_get_swsm_semaphore() if needed */
   12009 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
   12010 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
   12011 		ret = wm_get_swsm_semaphore(sc);
   12012 	}
   12013 
   12014 	if (ret) {
   12015 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   12016 			__func__);
   12017 		return 1;
   12018 	}
   12019 
   12020 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   12021 		reg = CSR_READ(sc, WMREG_EECD);
   12022 
   12023 		/* Request EEPROM access. */
   12024 		reg |= EECD_EE_REQ;
   12025 		CSR_WRITE(sc, WMREG_EECD, reg);
   12026 
   12027 		/* ..and wait for it to be granted. */
   12028 		for (x = 0; x < 1000; x++) {
   12029 			reg = CSR_READ(sc, WMREG_EECD);
   12030 			if (reg & EECD_EE_GNT)
   12031 				break;
   12032 			delay(5);
   12033 		}
   12034 		if ((reg & EECD_EE_GNT) == 0) {
   12035 			aprint_error_dev(sc->sc_dev,
   12036 			    "could not acquire EEPROM GNT\n");
   12037 			reg &= ~EECD_EE_REQ;
   12038 			CSR_WRITE(sc, WMREG_EECD, reg);
   12039 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   12040 				wm_put_swfwhw_semaphore(sc);
   12041 			if (sc->sc_flags & WM_F_LOCK_SWFW)
   12042 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   12043 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
   12044 				wm_put_swsm_semaphore(sc);
   12045 			return 1;
   12046 		}
   12047 	}
   12048 
   12049 	return 0;
   12050 }
   12051 
   12052 /*
   12053  * wm_nvm_release:
   12054  *
   12055  *	Release the EEPROM mutex.
   12056  */
   12057 static void
   12058 wm_nvm_release(struct wm_softc *sc)
   12059 {
   12060 	uint32_t reg;
   12061 
   12062 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12063 		device_xname(sc->sc_dev), __func__));
   12064 
   12065 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   12066 		reg = CSR_READ(sc, WMREG_EECD);
   12067 		reg &= ~EECD_EE_REQ;
   12068 		CSR_WRITE(sc, WMREG_EECD, reg);
   12069 	}
   12070 
   12071 	if (sc->sc_type >= WM_T_ICH8) {
   12072 		wm_put_nvm_ich8lan(sc);
   12073 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   12074 		wm_put_swfwhw_semaphore(sc);
   12075 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   12076 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   12077 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
   12078 		wm_put_swsm_semaphore(sc);
   12079 }
   12080 
   12081 static int
   12082 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   12083 {
   12084 	uint32_t eecd = 0;
   12085 
   12086 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   12087 	    || sc->sc_type == WM_T_82583) {
   12088 		eecd = CSR_READ(sc, WMREG_EECD);
   12089 
   12090 		/* Isolate bits 15 & 16 */
   12091 		eecd = ((eecd >> 15) & 0x03);
   12092 
   12093 		/* If both bits are set, device is Flash type */
   12094 		if (eecd == 0x03)
   12095 			return 0;
   12096 	}
   12097 	return 1;
   12098 }
   12099 
   12100 static int
   12101 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   12102 {
   12103 	uint32_t eec;
   12104 
   12105 	eec = CSR_READ(sc, WMREG_EEC);
   12106 	if ((eec & EEC_FLASH_DETECTED) != 0)
   12107 		return 1;
   12108 
   12109 	return 0;
   12110 }
   12111 
   12112 /*
   12113  * wm_nvm_validate_checksum
   12114  *
   12115  * The checksum is defined as the sum of the first 64 (16 bit) words.
   12116  */
   12117 static int
   12118 wm_nvm_validate_checksum(struct wm_softc *sc)
   12119 {
   12120 	uint16_t checksum;
   12121 	uint16_t eeprom_data;
   12122 #ifdef WM_DEBUG
   12123 	uint16_t csum_wordaddr, valid_checksum;
   12124 #endif
   12125 	int i;
   12126 
   12127 	checksum = 0;
   12128 
   12129 	/* Don't check for I211 */
   12130 	if (sc->sc_type == WM_T_I211)
   12131 		return 0;
   12132 
   12133 #ifdef WM_DEBUG
   12134 	if (sc->sc_type == WM_T_PCH_LPT) {
   12135 		csum_wordaddr = NVM_OFF_COMPAT;
   12136 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   12137 	} else {
   12138 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   12139 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   12140 	}
   12141 
   12142 	/* Dump EEPROM image for debug */
   12143 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12144 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12145 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   12146 		/* XXX PCH_SPT? */
   12147 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   12148 		if ((eeprom_data & valid_checksum) == 0) {
   12149 			DPRINTF(WM_DEBUG_NVM,
   12150 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   12151 				device_xname(sc->sc_dev), eeprom_data,
   12152 				    valid_checksum));
   12153 		}
   12154 	}
   12155 
   12156 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   12157 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   12158 		for (i = 0; i < NVM_SIZE; i++) {
   12159 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12160 				printf("XXXX ");
   12161 			else
   12162 				printf("%04hx ", eeprom_data);
   12163 			if (i % 8 == 7)
   12164 				printf("\n");
   12165 		}
   12166 	}
   12167 
   12168 #endif /* WM_DEBUG */
   12169 
   12170 	for (i = 0; i < NVM_SIZE; i++) {
   12171 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12172 			return 1;
   12173 		checksum += eeprom_data;
   12174 	}
   12175 
   12176 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   12177 #ifdef WM_DEBUG
   12178 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   12179 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   12180 #endif
   12181 	}
   12182 
   12183 	return 0;
   12184 }
   12185 
   12186 static void
   12187 wm_nvm_version_invm(struct wm_softc *sc)
   12188 {
   12189 	uint32_t dword;
   12190 
   12191 	/*
   12192 	 * Linux's code to decode version is very strange, so we don't
   12193 	 * obey that algorithm and just use word 61 as the document.
   12194 	 * Perhaps it's not perfect though...
   12195 	 *
   12196 	 * Example:
   12197 	 *
   12198 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   12199 	 */
   12200 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   12201 	dword = __SHIFTOUT(dword, INVM_VER_1);
   12202 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   12203 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   12204 }
   12205 
   12206 static void
   12207 wm_nvm_version(struct wm_softc *sc)
   12208 {
   12209 	uint16_t major, minor, build, patch;
   12210 	uint16_t uid0, uid1;
   12211 	uint16_t nvm_data;
   12212 	uint16_t off;
   12213 	bool check_version = false;
   12214 	bool check_optionrom = false;
   12215 	bool have_build = false;
   12216 	bool have_uid = true;
   12217 
   12218 	/*
   12219 	 * Version format:
   12220 	 *
   12221 	 * XYYZ
   12222 	 * X0YZ
   12223 	 * X0YY
   12224 	 *
   12225 	 * Example:
   12226 	 *
   12227 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   12228 	 *	82571	0x50a6	5.10.6?
   12229 	 *	82572	0x506a	5.6.10?
   12230 	 *	82572EI	0x5069	5.6.9?
   12231 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   12232 	 *		0x2013	2.1.3?
   12233 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   12234 	 */
   12235 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
   12236 	switch (sc->sc_type) {
   12237 	case WM_T_82571:
   12238 	case WM_T_82572:
   12239 	case WM_T_82574:
   12240 	case WM_T_82583:
   12241 		check_version = true;
   12242 		check_optionrom = true;
   12243 		have_build = true;
   12244 		break;
   12245 	case WM_T_82575:
   12246 	case WM_T_82576:
   12247 	case WM_T_82580:
   12248 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   12249 			check_version = true;
   12250 		break;
   12251 	case WM_T_I211:
   12252 		wm_nvm_version_invm(sc);
   12253 		have_uid = false;
   12254 		goto printver;
   12255 	case WM_T_I210:
   12256 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   12257 			wm_nvm_version_invm(sc);
   12258 			have_uid = false;
   12259 			goto printver;
   12260 		}
   12261 		/* FALLTHROUGH */
   12262 	case WM_T_I350:
   12263 	case WM_T_I354:
   12264 		check_version = true;
   12265 		check_optionrom = true;
   12266 		break;
   12267 	default:
   12268 		return;
   12269 	}
   12270 	if (check_version) {
   12271 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
   12272 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   12273 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   12274 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   12275 			build = nvm_data & NVM_BUILD_MASK;
   12276 			have_build = true;
   12277 		} else
   12278 			minor = nvm_data & 0x00ff;
   12279 
   12280 		/* Decimal */
   12281 		minor = (minor / 16) * 10 + (minor % 16);
   12282 		sc->sc_nvm_ver_major = major;
   12283 		sc->sc_nvm_ver_minor = minor;
   12284 
   12285 printver:
   12286 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   12287 		    sc->sc_nvm_ver_minor);
   12288 		if (have_build) {
   12289 			sc->sc_nvm_ver_build = build;
   12290 			aprint_verbose(".%d", build);
   12291 		}
   12292 	}
   12293 	if (check_optionrom) {
   12294 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
   12295 		/* Option ROM Version */
   12296 		if ((off != 0x0000) && (off != 0xffff)) {
   12297 			off += NVM_COMBO_VER_OFF;
   12298 			wm_nvm_read(sc, off + 1, 1, &uid1);
   12299 			wm_nvm_read(sc, off, 1, &uid0);
   12300 			if ((uid0 != 0) && (uid0 != 0xffff)
   12301 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   12302 				/* 16bits */
   12303 				major = uid0 >> 8;
   12304 				build = (uid0 << 8) | (uid1 >> 8);
   12305 				patch = uid1 & 0x00ff;
   12306 				aprint_verbose(", option ROM Version %d.%d.%d",
   12307 				    major, build, patch);
   12308 			}
   12309 		}
   12310 	}
   12311 
   12312 	if (have_uid) {
   12313 		wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
   12314 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   12315 	}
   12316 }
   12317 
   12318 /*
   12319  * wm_nvm_read:
   12320  *
   12321  *	Read data from the serial EEPROM.
   12322  */
   12323 static int
   12324 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12325 {
   12326 	int rv;
   12327 
   12328 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12329 		device_xname(sc->sc_dev), __func__));
   12330 
   12331 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   12332 		return 1;
   12333 
   12334 	if (wm_nvm_acquire(sc))
   12335 		return 1;
   12336 
   12337 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12338 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12339 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   12340 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
   12341 	else if (sc->sc_type == WM_T_PCH_SPT)
   12342 		rv = wm_nvm_read_spt(sc, word, wordcnt, data);
   12343 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
   12344 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
   12345 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
   12346 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
   12347 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
   12348 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
   12349 	else
   12350 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
   12351 
   12352 	wm_nvm_release(sc);
   12353 	return rv;
   12354 }
   12355 
   12356 /*
   12357  * Hardware semaphores.
   12358  * Very complexed...
   12359  */
   12360 
   12361 static int
   12362 wm_get_null(struct wm_softc *sc)
   12363 {
   12364 
   12365 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12366 		device_xname(sc->sc_dev), __func__));
   12367 	return 0;
   12368 }
   12369 
   12370 static void
   12371 wm_put_null(struct wm_softc *sc)
   12372 {
   12373 
   12374 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12375 		device_xname(sc->sc_dev), __func__));
   12376 	return;
   12377 }
   12378 
   12379 /*
   12380  * Get hardware semaphore.
   12381  * Same as e1000_get_hw_semaphore_generic()
   12382  */
   12383 static int
   12384 wm_get_swsm_semaphore(struct wm_softc *sc)
   12385 {
   12386 	int32_t timeout;
   12387 	uint32_t swsm;
   12388 
   12389 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12390 		device_xname(sc->sc_dev), __func__));
   12391 	KASSERT(sc->sc_nvm_wordsize > 0);
   12392 
   12393 	/* Get the SW semaphore. */
   12394 	timeout = sc->sc_nvm_wordsize + 1;
   12395 	while (timeout) {
   12396 		swsm = CSR_READ(sc, WMREG_SWSM);
   12397 
   12398 		if ((swsm & SWSM_SMBI) == 0)
   12399 			break;
   12400 
   12401 		delay(50);
   12402 		timeout--;
   12403 	}
   12404 
   12405 	if (timeout == 0) {
   12406 		aprint_error_dev(sc->sc_dev,
   12407 		    "could not acquire SWSM SMBI\n");
   12408 		return 1;
   12409 	}
   12410 
   12411 	/* Get the FW semaphore. */
   12412 	timeout = sc->sc_nvm_wordsize + 1;
   12413 	while (timeout) {
   12414 		swsm = CSR_READ(sc, WMREG_SWSM);
   12415 		swsm |= SWSM_SWESMBI;
   12416 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   12417 		/* If we managed to set the bit we got the semaphore. */
   12418 		swsm = CSR_READ(sc, WMREG_SWSM);
   12419 		if (swsm & SWSM_SWESMBI)
   12420 			break;
   12421 
   12422 		delay(50);
   12423 		timeout--;
   12424 	}
   12425 
   12426 	if (timeout == 0) {
   12427 		aprint_error_dev(sc->sc_dev,
   12428 		    "could not acquire SWSM SWESMBI\n");
   12429 		/* Release semaphores */
   12430 		wm_put_swsm_semaphore(sc);
   12431 		return 1;
   12432 	}
   12433 	return 0;
   12434 }
   12435 
   12436 /*
   12437  * Put hardware semaphore.
   12438  * Same as e1000_put_hw_semaphore_generic()
   12439  */
   12440 static void
   12441 wm_put_swsm_semaphore(struct wm_softc *sc)
   12442 {
   12443 	uint32_t swsm;
   12444 
   12445 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12446 		device_xname(sc->sc_dev), __func__));
   12447 
   12448 	swsm = CSR_READ(sc, WMREG_SWSM);
   12449 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   12450 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   12451 }
   12452 
   12453 /*
   12454  * Get SW/FW semaphore.
   12455  * Same as e1000_acquire_swfw_sync_82575().
   12456  */
   12457 static int
   12458 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12459 {
   12460 	uint32_t swfw_sync;
   12461 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   12462 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   12463 	int timeout = 200;
   12464 
   12465 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12466 		device_xname(sc->sc_dev), __func__));
   12467 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   12468 
   12469 	for (timeout = 0; timeout < 200; timeout++) {
   12470 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
   12471 			if (wm_get_swsm_semaphore(sc)) {
   12472 				aprint_error_dev(sc->sc_dev,
   12473 				    "%s: failed to get semaphore\n",
   12474 				    __func__);
   12475 				return 1;
   12476 			}
   12477 		}
   12478 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12479 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   12480 			swfw_sync |= swmask;
   12481 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12482 			if (sc->sc_flags & WM_F_LOCK_SWSM)
   12483 				wm_put_swsm_semaphore(sc);
   12484 			return 0;
   12485 		}
   12486 		if (sc->sc_flags & WM_F_LOCK_SWSM)
   12487 			wm_put_swsm_semaphore(sc);
   12488 		delay(5000);
   12489 	}
   12490 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   12491 	    device_xname(sc->sc_dev), mask, swfw_sync);
   12492 	return 1;
   12493 }
   12494 
   12495 static void
   12496 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12497 {
   12498 	uint32_t swfw_sync;
   12499 
   12500 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12501 		device_xname(sc->sc_dev), __func__));
   12502 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   12503 
   12504 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   12505 		while (wm_get_swsm_semaphore(sc) != 0)
   12506 			continue;
   12507 	}
   12508 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12509 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   12510 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12511 	if (sc->sc_flags & WM_F_LOCK_SWSM)
   12512 		wm_put_swsm_semaphore(sc);
   12513 }
   12514 
   12515 static int
   12516 wm_get_phy_82575(struct wm_softc *sc)
   12517 {
   12518 
   12519 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12520 		device_xname(sc->sc_dev), __func__));
   12521 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   12522 }
   12523 
   12524 static void
   12525 wm_put_phy_82575(struct wm_softc *sc)
   12526 {
   12527 
   12528 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12529 		device_xname(sc->sc_dev), __func__));
   12530 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   12531 }
   12532 
   12533 static int
   12534 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   12535 {
   12536 	uint32_t ext_ctrl;
   12537 	int timeout = 200;
   12538 
   12539 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12540 		device_xname(sc->sc_dev), __func__));
   12541 
   12542 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12543 	for (timeout = 0; timeout < 200; timeout++) {
   12544 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12545 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12546 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12547 
   12548 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12549 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   12550 			return 0;
   12551 		delay(5000);
   12552 	}
   12553 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   12554 	    device_xname(sc->sc_dev), ext_ctrl);
   12555 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12556 	return 1;
   12557 }
   12558 
   12559 static void
   12560 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   12561 {
   12562 	uint32_t ext_ctrl;
   12563 
   12564 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12565 		device_xname(sc->sc_dev), __func__));
   12566 
   12567 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12568 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12569 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12570 
   12571 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12572 }
   12573 
   12574 static int
   12575 wm_get_swflag_ich8lan(struct wm_softc *sc)
   12576 {
   12577 	uint32_t ext_ctrl;
   12578 	int timeout;
   12579 
   12580 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12581 		device_xname(sc->sc_dev), __func__));
   12582 	mutex_enter(sc->sc_ich_phymtx);
   12583 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   12584 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12585 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   12586 			break;
   12587 		delay(1000);
   12588 	}
   12589 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   12590 		printf("%s: SW has already locked the resource\n",
   12591 		    device_xname(sc->sc_dev));
   12592 		goto out;
   12593 	}
   12594 
   12595 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12596 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12597 	for (timeout = 0; timeout < 1000; timeout++) {
   12598 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12599 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   12600 			break;
   12601 		delay(1000);
   12602 	}
   12603 	if (timeout >= 1000) {
   12604 		printf("%s: failed to acquire semaphore\n",
   12605 		    device_xname(sc->sc_dev));
   12606 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12607 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12608 		goto out;
   12609 	}
   12610 	return 0;
   12611 
   12612 out:
   12613 	mutex_exit(sc->sc_ich_phymtx);
   12614 	return 1;
   12615 }
   12616 
   12617 static void
   12618 wm_put_swflag_ich8lan(struct wm_softc *sc)
   12619 {
   12620 	uint32_t ext_ctrl;
   12621 
   12622 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12623 		device_xname(sc->sc_dev), __func__));
   12624 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12625 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   12626 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12627 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12628 	} else {
   12629 		printf("%s: Semaphore unexpectedly released\n",
   12630 		    device_xname(sc->sc_dev));
   12631 	}
   12632 
   12633 	mutex_exit(sc->sc_ich_phymtx);
   12634 }
   12635 
   12636 static int
   12637 wm_get_nvm_ich8lan(struct wm_softc *sc)
   12638 {
   12639 
   12640 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12641 		device_xname(sc->sc_dev), __func__));
   12642 	mutex_enter(sc->sc_ich_nvmmtx);
   12643 
   12644 	return 0;
   12645 }
   12646 
   12647 static void
   12648 wm_put_nvm_ich8lan(struct wm_softc *sc)
   12649 {
   12650 
   12651 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12652 		device_xname(sc->sc_dev), __func__));
   12653 	mutex_exit(sc->sc_ich_nvmmtx);
   12654 }
   12655 
   12656 static int
   12657 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   12658 {
   12659 	int i = 0;
   12660 	uint32_t reg;
   12661 
   12662 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12663 		device_xname(sc->sc_dev), __func__));
   12664 
   12665 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12666 	do {
   12667 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   12668 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   12669 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12670 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   12671 			break;
   12672 		delay(2*1000);
   12673 		i++;
   12674 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   12675 
   12676 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   12677 		wm_put_hw_semaphore_82573(sc);
   12678 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   12679 		    device_xname(sc->sc_dev));
   12680 		return -1;
   12681 	}
   12682 
   12683 	return 0;
   12684 }
   12685 
   12686 static void
   12687 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   12688 {
   12689 	uint32_t reg;
   12690 
   12691 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12692 		device_xname(sc->sc_dev), __func__));
   12693 
   12694 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12695 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12696 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   12697 }
   12698 
   12699 /*
   12700  * Management mode and power management related subroutines.
   12701  * BMC, AMT, suspend/resume and EEE.
   12702  */
   12703 
   12704 #ifdef WM_WOL
   12705 static int
   12706 wm_check_mng_mode(struct wm_softc *sc)
   12707 {
   12708 	int rv;
   12709 
   12710 	switch (sc->sc_type) {
   12711 	case WM_T_ICH8:
   12712 	case WM_T_ICH9:
   12713 	case WM_T_ICH10:
   12714 	case WM_T_PCH:
   12715 	case WM_T_PCH2:
   12716 	case WM_T_PCH_LPT:
   12717 	case WM_T_PCH_SPT:
   12718 		rv = wm_check_mng_mode_ich8lan(sc);
   12719 		break;
   12720 	case WM_T_82574:
   12721 	case WM_T_82583:
   12722 		rv = wm_check_mng_mode_82574(sc);
   12723 		break;
   12724 	case WM_T_82571:
   12725 	case WM_T_82572:
   12726 	case WM_T_82573:
   12727 	case WM_T_80003:
   12728 		rv = wm_check_mng_mode_generic(sc);
   12729 		break;
   12730 	default:
   12731 		/* noting to do */
   12732 		rv = 0;
   12733 		break;
   12734 	}
   12735 
   12736 	return rv;
   12737 }
   12738 
   12739 static int
   12740 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   12741 {
   12742 	uint32_t fwsm;
   12743 
   12744 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12745 
   12746 	if (((fwsm & FWSM_FW_VALID) != 0)
   12747 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   12748 		return 1;
   12749 
   12750 	return 0;
   12751 }
   12752 
   12753 static int
   12754 wm_check_mng_mode_82574(struct wm_softc *sc)
   12755 {
   12756 	uint16_t data;
   12757 
   12758 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   12759 
   12760 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   12761 		return 1;
   12762 
   12763 	return 0;
   12764 }
   12765 
   12766 static int
   12767 wm_check_mng_mode_generic(struct wm_softc *sc)
   12768 {
   12769 	uint32_t fwsm;
   12770 
   12771 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12772 
   12773 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   12774 		return 1;
   12775 
   12776 	return 0;
   12777 }
   12778 #endif /* WM_WOL */
   12779 
   12780 static int
   12781 wm_enable_mng_pass_thru(struct wm_softc *sc)
   12782 {
   12783 	uint32_t manc, fwsm, factps;
   12784 
   12785 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   12786 		return 0;
   12787 
   12788 	manc = CSR_READ(sc, WMREG_MANC);
   12789 
   12790 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   12791 		device_xname(sc->sc_dev), manc));
   12792 	if ((manc & MANC_RECV_TCO_EN) == 0)
   12793 		return 0;
   12794 
   12795 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   12796 		fwsm = CSR_READ(sc, WMREG_FWSM);
   12797 		factps = CSR_READ(sc, WMREG_FACTPS);
   12798 		if (((factps & FACTPS_MNGCG) == 0)
   12799 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   12800 			return 1;
   12801 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   12802 		uint16_t data;
   12803 
   12804 		factps = CSR_READ(sc, WMREG_FACTPS);
   12805 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   12806 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   12807 			device_xname(sc->sc_dev), factps, data));
   12808 		if (((factps & FACTPS_MNGCG) == 0)
   12809 		    && ((data & NVM_CFG2_MNGM_MASK)
   12810 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   12811 			return 1;
   12812 	} else if (((manc & MANC_SMBUS_EN) != 0)
   12813 	    && ((manc & MANC_ASF_EN) == 0))
   12814 		return 1;
   12815 
   12816 	return 0;
   12817 }
   12818 
   12819 static bool
   12820 wm_phy_resetisblocked(struct wm_softc *sc)
   12821 {
   12822 	bool blocked = false;
   12823 	uint32_t reg;
   12824 	int i = 0;
   12825 
   12826 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12827 		device_xname(sc->sc_dev), __func__));
   12828 
   12829 	switch (sc->sc_type) {
   12830 	case WM_T_ICH8:
   12831 	case WM_T_ICH9:
   12832 	case WM_T_ICH10:
   12833 	case WM_T_PCH:
   12834 	case WM_T_PCH2:
   12835 	case WM_T_PCH_LPT:
   12836 	case WM_T_PCH_SPT:
   12837 		do {
   12838 			reg = CSR_READ(sc, WMREG_FWSM);
   12839 			if ((reg & FWSM_RSPCIPHY) == 0) {
   12840 				blocked = true;
   12841 				delay(10*1000);
   12842 				continue;
   12843 			}
   12844 			blocked = false;
   12845 		} while (blocked && (i++ < 30));
   12846 		return blocked;
   12847 		break;
   12848 	case WM_T_82571:
   12849 	case WM_T_82572:
   12850 	case WM_T_82573:
   12851 	case WM_T_82574:
   12852 	case WM_T_82583:
   12853 	case WM_T_80003:
   12854 		reg = CSR_READ(sc, WMREG_MANC);
   12855 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   12856 			return true;
   12857 		else
   12858 			return false;
   12859 		break;
   12860 	default:
   12861 		/* no problem */
   12862 		break;
   12863 	}
   12864 
   12865 	return false;
   12866 }
   12867 
   12868 static void
   12869 wm_get_hw_control(struct wm_softc *sc)
   12870 {
   12871 	uint32_t reg;
   12872 
   12873 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12874 		device_xname(sc->sc_dev), __func__));
   12875 
   12876 	if (sc->sc_type == WM_T_82573) {
   12877 		reg = CSR_READ(sc, WMREG_SWSM);
   12878 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   12879 	} else if (sc->sc_type >= WM_T_82571) {
   12880 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12881 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   12882 	}
   12883 }
   12884 
   12885 static void
   12886 wm_release_hw_control(struct wm_softc *sc)
   12887 {
   12888 	uint32_t reg;
   12889 
   12890 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12891 		device_xname(sc->sc_dev), __func__));
   12892 
   12893 	if (sc->sc_type == WM_T_82573) {
   12894 		reg = CSR_READ(sc, WMREG_SWSM);
   12895 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   12896 	} else if (sc->sc_type >= WM_T_82571) {
   12897 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12898 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   12899 	}
   12900 }
   12901 
   12902 static void
   12903 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   12904 {
   12905 	uint32_t reg;
   12906 
   12907 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12908 		device_xname(sc->sc_dev), __func__));
   12909 
   12910 	if (sc->sc_type < WM_T_PCH2)
   12911 		return;
   12912 
   12913 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12914 
   12915 	if (gate)
   12916 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   12917 	else
   12918 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   12919 
   12920 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   12921 }
   12922 
   12923 static void
   12924 wm_smbustopci(struct wm_softc *sc)
   12925 {
   12926 	uint32_t fwsm, reg;
   12927 	int rv = 0;
   12928 
   12929 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12930 		device_xname(sc->sc_dev), __func__));
   12931 
   12932 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   12933 	wm_gate_hw_phy_config_ich8lan(sc, true);
   12934 
   12935 	/* Disable ULP */
   12936 	wm_ulp_disable(sc);
   12937 
   12938 	/* Acquire PHY semaphore */
   12939 	sc->phy.acquire(sc);
   12940 
   12941 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12942 	switch (sc->sc_type) {
   12943 	case WM_T_PCH_LPT:
   12944 	case WM_T_PCH_SPT:
   12945 		if (wm_phy_is_accessible_pchlan(sc))
   12946 			break;
   12947 
   12948 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12949 		reg |= CTRL_EXT_FORCE_SMBUS;
   12950 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12951 #if 0
   12952 		/* XXX Isn't this required??? */
   12953 		CSR_WRITE_FLUSH(sc);
   12954 #endif
   12955 		delay(50 * 1000);
   12956 		/* FALLTHROUGH */
   12957 	case WM_T_PCH2:
   12958 		if (wm_phy_is_accessible_pchlan(sc) == true)
   12959 			break;
   12960 		/* FALLTHROUGH */
   12961 	case WM_T_PCH:
   12962 		if (sc->sc_type == WM_T_PCH)
   12963 			if ((fwsm & FWSM_FW_VALID) != 0)
   12964 				break;
   12965 
   12966 		if (wm_phy_resetisblocked(sc) == true) {
   12967 			printf("XXX reset is blocked(3)\n");
   12968 			break;
   12969 		}
   12970 
   12971 		wm_toggle_lanphypc_pch_lpt(sc);
   12972 
   12973 		if (sc->sc_type >= WM_T_PCH_LPT) {
   12974 			if (wm_phy_is_accessible_pchlan(sc) == true)
   12975 				break;
   12976 
   12977 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12978 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   12979 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12980 
   12981 			if (wm_phy_is_accessible_pchlan(sc) == true)
   12982 				break;
   12983 			rv = -1;
   12984 		}
   12985 		break;
   12986 	default:
   12987 		break;
   12988 	}
   12989 
   12990 	/* Release semaphore */
   12991 	sc->phy.release(sc);
   12992 
   12993 	if (rv == 0) {
   12994 		if (wm_phy_resetisblocked(sc)) {
   12995 			printf("XXX reset is blocked(4)\n");
   12996 			goto out;
   12997 		}
   12998 		wm_reset_phy(sc);
   12999 		if (wm_phy_resetisblocked(sc))
   13000 			printf("XXX reset is blocked(4)\n");
   13001 	}
   13002 
   13003 out:
   13004 	/*
   13005 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   13006 	 */
   13007 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   13008 		delay(10*1000);
   13009 		wm_gate_hw_phy_config_ich8lan(sc, false);
   13010 	}
   13011 }
   13012 
   13013 static void
   13014 wm_init_manageability(struct wm_softc *sc)
   13015 {
   13016 
   13017 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13018 		device_xname(sc->sc_dev), __func__));
   13019 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13020 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   13021 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13022 
   13023 		/* Disable hardware interception of ARP */
   13024 		manc &= ~MANC_ARP_EN;
   13025 
   13026 		/* Enable receiving management packets to the host */
   13027 		if (sc->sc_type >= WM_T_82571) {
   13028 			manc |= MANC_EN_MNG2HOST;
   13029 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   13030 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   13031 		}
   13032 
   13033 		CSR_WRITE(sc, WMREG_MANC, manc);
   13034 	}
   13035 }
   13036 
   13037 static void
   13038 wm_release_manageability(struct wm_softc *sc)
   13039 {
   13040 
   13041 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13042 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13043 
   13044 		manc |= MANC_ARP_EN;
   13045 		if (sc->sc_type >= WM_T_82571)
   13046 			manc &= ~MANC_EN_MNG2HOST;
   13047 
   13048 		CSR_WRITE(sc, WMREG_MANC, manc);
   13049 	}
   13050 }
   13051 
   13052 static void
   13053 wm_get_wakeup(struct wm_softc *sc)
   13054 {
   13055 
   13056 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   13057 	switch (sc->sc_type) {
   13058 	case WM_T_82573:
   13059 	case WM_T_82583:
   13060 		sc->sc_flags |= WM_F_HAS_AMT;
   13061 		/* FALLTHROUGH */
   13062 	case WM_T_80003:
   13063 	case WM_T_82575:
   13064 	case WM_T_82576:
   13065 	case WM_T_82580:
   13066 	case WM_T_I350:
   13067 	case WM_T_I354:
   13068 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   13069 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   13070 		/* FALLTHROUGH */
   13071 	case WM_T_82541:
   13072 	case WM_T_82541_2:
   13073 	case WM_T_82547:
   13074 	case WM_T_82547_2:
   13075 	case WM_T_82571:
   13076 	case WM_T_82572:
   13077 	case WM_T_82574:
   13078 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13079 		break;
   13080 	case WM_T_ICH8:
   13081 	case WM_T_ICH9:
   13082 	case WM_T_ICH10:
   13083 	case WM_T_PCH:
   13084 	case WM_T_PCH2:
   13085 	case WM_T_PCH_LPT:
   13086 	case WM_T_PCH_SPT:
   13087 		sc->sc_flags |= WM_F_HAS_AMT;
   13088 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13089 		break;
   13090 	default:
   13091 		break;
   13092 	}
   13093 
   13094 	/* 1: HAS_MANAGE */
   13095 	if (wm_enable_mng_pass_thru(sc) != 0)
   13096 		sc->sc_flags |= WM_F_HAS_MANAGE;
   13097 
   13098 	/*
   13099 	 * Note that the WOL flags is set after the resetting of the eeprom
   13100 	 * stuff
   13101 	 */
   13102 }
   13103 
   13104 /*
   13105  * Unconfigure Ultra Low Power mode.
   13106  * Only for I217 and newer (see below).
   13107  */
   13108 static void
   13109 wm_ulp_disable(struct wm_softc *sc)
   13110 {
   13111 	uint32_t reg;
   13112 	int i = 0;
   13113 
   13114 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13115 		device_xname(sc->sc_dev), __func__));
   13116 	/* Exclude old devices */
   13117 	if ((sc->sc_type < WM_T_PCH_LPT)
   13118 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   13119 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   13120 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   13121 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   13122 		return;
   13123 
   13124 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   13125 		/* Request ME un-configure ULP mode in the PHY */
   13126 		reg = CSR_READ(sc, WMREG_H2ME);
   13127 		reg &= ~H2ME_ULP;
   13128 		reg |= H2ME_ENFORCE_SETTINGS;
   13129 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13130 
   13131 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   13132 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   13133 			if (i++ == 30) {
   13134 				printf("%s timed out\n", __func__);
   13135 				return;
   13136 			}
   13137 			delay(10 * 1000);
   13138 		}
   13139 		reg = CSR_READ(sc, WMREG_H2ME);
   13140 		reg &= ~H2ME_ENFORCE_SETTINGS;
   13141 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13142 
   13143 		return;
   13144 	}
   13145 
   13146 	/* Acquire semaphore */
   13147 	sc->phy.acquire(sc);
   13148 
   13149 	/* Toggle LANPHYPC */
   13150 	wm_toggle_lanphypc_pch_lpt(sc);
   13151 
   13152 	/* Unforce SMBus mode in PHY */
   13153 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13154 	if (reg == 0x0000 || reg == 0xffff) {
   13155 		uint32_t reg2;
   13156 
   13157 		printf("%s: Force SMBus first.\n", __func__);
   13158 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   13159 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   13160 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   13161 		delay(50 * 1000);
   13162 
   13163 		reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13164 	}
   13165 	reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13166 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg);
   13167 
   13168 	/* Unforce SMBus mode in MAC */
   13169 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13170 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   13171 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13172 
   13173 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL);
   13174 	reg |= HV_PM_CTRL_K1_ENA;
   13175 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg);
   13176 
   13177 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1);
   13178 	reg &= ~(I218_ULP_CONFIG1_IND
   13179 	    | I218_ULP_CONFIG1_STICKY_ULP
   13180 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   13181 	    | I218_ULP_CONFIG1_WOL_HOST
   13182 	    | I218_ULP_CONFIG1_INBAND_EXIT
   13183 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   13184 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   13185 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   13186 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13187 	reg |= I218_ULP_CONFIG1_START;
   13188 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13189 
   13190 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   13191 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   13192 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   13193 
   13194 	/* Release semaphore */
   13195 	sc->phy.release(sc);
   13196 	wm_gmii_reset(sc);
   13197 	delay(50 * 1000);
   13198 }
   13199 
   13200 /* WOL in the newer chipset interfaces (pchlan) */
   13201 static void
   13202 wm_enable_phy_wakeup(struct wm_softc *sc)
   13203 {
   13204 #if 0
   13205 	uint16_t preg;
   13206 
   13207 	/* Copy MAC RARs to PHY RARs */
   13208 
   13209 	/* Copy MAC MTA to PHY MTA */
   13210 
   13211 	/* Configure PHY Rx Control register */
   13212 
   13213 	/* Enable PHY wakeup in MAC register */
   13214 
   13215 	/* Configure and enable PHY wakeup in PHY registers */
   13216 
   13217 	/* Activate PHY wakeup */
   13218 
   13219 	/* XXX */
   13220 #endif
   13221 }
   13222 
   13223 /* Power down workaround on D3 */
   13224 static void
   13225 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   13226 {
   13227 	uint32_t reg;
   13228 	int i;
   13229 
   13230 	for (i = 0; i < 2; i++) {
   13231 		/* Disable link */
   13232 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13233 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13234 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13235 
   13236 		/*
   13237 		 * Call gig speed drop workaround on Gig disable before
   13238 		 * accessing any PHY registers
   13239 		 */
   13240 		if (sc->sc_type == WM_T_ICH8)
   13241 			wm_gig_downshift_workaround_ich8lan(sc);
   13242 
   13243 		/* Write VR power-down enable */
   13244 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13245 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13246 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   13247 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   13248 
   13249 		/* Read it back and test */
   13250 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13251 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13252 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   13253 			break;
   13254 
   13255 		/* Issue PHY reset and repeat at most one more time */
   13256 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   13257 	}
   13258 }
   13259 
   13260 static void
   13261 wm_enable_wakeup(struct wm_softc *sc)
   13262 {
   13263 	uint32_t reg, pmreg;
   13264 	pcireg_t pmode;
   13265 
   13266 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13267 		device_xname(sc->sc_dev), __func__));
   13268 
   13269 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   13270 		&pmreg, NULL) == 0)
   13271 		return;
   13272 
   13273 	/* Advertise the wakeup capability */
   13274 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   13275 	    | CTRL_SWDPIN(3));
   13276 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   13277 
   13278 	/* ICH workaround */
   13279 	switch (sc->sc_type) {
   13280 	case WM_T_ICH8:
   13281 	case WM_T_ICH9:
   13282 	case WM_T_ICH10:
   13283 	case WM_T_PCH:
   13284 	case WM_T_PCH2:
   13285 	case WM_T_PCH_LPT:
   13286 	case WM_T_PCH_SPT:
   13287 		/* Disable gig during WOL */
   13288 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13289 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   13290 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13291 		if (sc->sc_type == WM_T_PCH)
   13292 			wm_gmii_reset(sc);
   13293 
   13294 		/* Power down workaround */
   13295 		if (sc->sc_phytype == WMPHY_82577) {
   13296 			struct mii_softc *child;
   13297 
   13298 			/* Assume that the PHY is copper */
   13299 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13300 			if ((child != NULL) && (child->mii_mpd_rev <= 2))
   13301 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   13302 				    (768 << 5) | 25, 0x0444); /* magic num */
   13303 		}
   13304 		break;
   13305 	default:
   13306 		break;
   13307 	}
   13308 
   13309 	/* Keep the laser running on fiber adapters */
   13310 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   13311 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   13312 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13313 		reg |= CTRL_EXT_SWDPIN(3);
   13314 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13315 	}
   13316 
   13317 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   13318 #if 0	/* for the multicast packet */
   13319 	reg |= WUFC_MC;
   13320 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   13321 #endif
   13322 
   13323 	if (sc->sc_type >= WM_T_PCH)
   13324 		wm_enable_phy_wakeup(sc);
   13325 	else {
   13326 		CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
   13327 		CSR_WRITE(sc, WMREG_WUFC, reg);
   13328 	}
   13329 
   13330 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13331 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13332 		|| (sc->sc_type == WM_T_PCH2))
   13333 		    && (sc->sc_phytype == WMPHY_IGP_3))
   13334 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   13335 
   13336 	/* Request PME */
   13337 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   13338 #if 0
   13339 	/* Disable WOL */
   13340 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   13341 #else
   13342 	/* For WOL */
   13343 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   13344 #endif
   13345 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   13346 }
   13347 
   13348 /* LPLU */
   13349 
   13350 static void
   13351 wm_lplu_d0_disable(struct wm_softc *sc)
   13352 {
   13353 	uint32_t reg;
   13354 
   13355 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13356 		device_xname(sc->sc_dev), __func__));
   13357 
   13358 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13359 	reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   13360 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13361 }
   13362 
   13363 static void
   13364 wm_lplu_d0_disable_pch(struct wm_softc *sc)
   13365 {
   13366 	uint32_t reg;
   13367 
   13368 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13369 		device_xname(sc->sc_dev), __func__));
   13370 
   13371 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   13372 	reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   13373 	reg |= HV_OEM_BITS_ANEGNOW;
   13374 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   13375 }
   13376 
   13377 /* EEE */
   13378 
   13379 static void
   13380 wm_set_eee_i350(struct wm_softc *sc)
   13381 {
   13382 	uint32_t ipcnfg, eeer;
   13383 
   13384 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   13385 	eeer = CSR_READ(sc, WMREG_EEER);
   13386 
   13387 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   13388 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   13389 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   13390 		    | EEER_LPI_FC);
   13391 	} else {
   13392 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   13393 		ipcnfg &= ~IPCNFG_10BASE_TE;
   13394 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   13395 		    | EEER_LPI_FC);
   13396 	}
   13397 
   13398 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   13399 	CSR_WRITE(sc, WMREG_EEER, eeer);
   13400 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   13401 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   13402 }
   13403 
   13404 /*
   13405  * Workarounds (mainly PHY related).
   13406  * Basically, PHY's workarounds are in the PHY drivers.
   13407  */
   13408 
   13409 /* Work-around for 82566 Kumeran PCS lock loss */
   13410 static void
   13411 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   13412 {
   13413 #if 0
   13414 	int miistatus, active, i;
   13415 	int reg;
   13416 
   13417 	miistatus = sc->sc_mii.mii_media_status;
   13418 
   13419 	/* If the link is not up, do nothing */
   13420 	if ((miistatus & IFM_ACTIVE) == 0)
   13421 		return;
   13422 
   13423 	active = sc->sc_mii.mii_media_active;
   13424 
   13425 	/* Nothing to do if the link is other than 1Gbps */
   13426 	if (IFM_SUBTYPE(active) != IFM_1000_T)
   13427 		return;
   13428 
   13429 	for (i = 0; i < 10; i++) {
   13430 		/* read twice */
   13431 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   13432 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   13433 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   13434 			goto out;	/* GOOD! */
   13435 
   13436 		/* Reset the PHY */
   13437 		wm_gmii_reset(sc);
   13438 		delay(5*1000);
   13439 	}
   13440 
   13441 	/* Disable GigE link negotiation */
   13442 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13443 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13444 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13445 
   13446 	/*
   13447 	 * Call gig speed drop workaround on Gig disable before accessing
   13448 	 * any PHY registers.
   13449 	 */
   13450 	wm_gig_downshift_workaround_ich8lan(sc);
   13451 
   13452 out:
   13453 	return;
   13454 #endif
   13455 }
   13456 
   13457 /* WOL from S5 stops working */
   13458 static void
   13459 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   13460 {
   13461 	uint16_t kmrn_reg;
   13462 
   13463 	/* Only for igp3 */
   13464 	if (sc->sc_phytype == WMPHY_IGP_3) {
   13465 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
   13466 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
   13467 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   13468 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
   13469 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   13470 	}
   13471 }
   13472 
   13473 /*
   13474  * Workaround for pch's PHYs
   13475  * XXX should be moved to new PHY driver?
   13476  */
   13477 static void
   13478 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   13479 {
   13480 
   13481 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13482 		device_xname(sc->sc_dev), __func__));
   13483 	KASSERT(sc->sc_type == WM_T_PCH);
   13484 
   13485 	if (sc->sc_phytype == WMPHY_82577)
   13486 		wm_set_mdio_slow_mode_hv(sc);
   13487 
   13488 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   13489 
   13490 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   13491 
   13492 	/* 82578 */
   13493 	if (sc->sc_phytype == WMPHY_82578) {
   13494 		struct mii_softc *child;
   13495 
   13496 		/*
   13497 		 * Return registers to default by doing a soft reset then
   13498 		 * writing 0x3140 to the control register
   13499 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   13500 		 */
   13501 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13502 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   13503 			PHY_RESET(child);
   13504 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   13505 			    0x3140);
   13506 		}
   13507 	}
   13508 
   13509 	/* Select page 0 */
   13510 	sc->phy.acquire(sc);
   13511 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   13512 	sc->phy.release(sc);
   13513 
   13514 	/*
   13515 	 * Configure the K1 Si workaround during phy reset assuming there is
   13516 	 * link so that it disables K1 if link is in 1Gbps.
   13517 	 */
   13518 	wm_k1_gig_workaround_hv(sc, 1);
   13519 }
   13520 
   13521 static void
   13522 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   13523 {
   13524 
   13525 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13526 		device_xname(sc->sc_dev), __func__));
   13527 	KASSERT(sc->sc_type == WM_T_PCH2);
   13528 
   13529 	wm_set_mdio_slow_mode_hv(sc);
   13530 }
   13531 
   13532 static int
   13533 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   13534 {
   13535 	int k1_enable = sc->sc_nvm_k1_enabled;
   13536 
   13537 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13538 		device_xname(sc->sc_dev), __func__));
   13539 
   13540 	if (sc->phy.acquire(sc) != 0)
   13541 		return -1;
   13542 
   13543 	if (link) {
   13544 		k1_enable = 0;
   13545 
   13546 		/* Link stall fix for link up */
   13547 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   13548 	} else {
   13549 		/* Link stall fix for link down */
   13550 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   13551 	}
   13552 
   13553 	wm_configure_k1_ich8lan(sc, k1_enable);
   13554 	sc->phy.release(sc);
   13555 
   13556 	return 0;
   13557 }
   13558 
   13559 static void
   13560 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   13561 {
   13562 	uint32_t reg;
   13563 
   13564 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   13565 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   13566 	    reg | HV_KMRN_MDIO_SLOW);
   13567 }
   13568 
   13569 static void
   13570 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   13571 {
   13572 	uint32_t ctrl, ctrl_ext, tmp;
   13573 	uint16_t kmrn_reg;
   13574 
   13575 	kmrn_reg = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
   13576 
   13577 	if (k1_enable)
   13578 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
   13579 	else
   13580 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
   13581 
   13582 	wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
   13583 
   13584 	delay(20);
   13585 
   13586 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13587 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   13588 
   13589 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   13590 	tmp |= CTRL_FRCSPD;
   13591 
   13592 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   13593 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   13594 	CSR_WRITE_FLUSH(sc);
   13595 	delay(20);
   13596 
   13597 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   13598 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13599 	CSR_WRITE_FLUSH(sc);
   13600 	delay(20);
   13601 }
   13602 
   13603 /* special case - for 82575 - need to do manual init ... */
   13604 static void
   13605 wm_reset_init_script_82575(struct wm_softc *sc)
   13606 {
   13607 	/*
   13608 	 * remark: this is untested code - we have no board without EEPROM
   13609 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   13610 	 */
   13611 
   13612 	/* SerDes configuration via SERDESCTRL */
   13613 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   13614 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   13615 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   13616 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   13617 
   13618 	/* CCM configuration via CCMCTL register */
   13619 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   13620 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   13621 
   13622 	/* PCIe lanes configuration */
   13623 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   13624 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   13625 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   13626 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   13627 
   13628 	/* PCIe PLL Configuration */
   13629 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   13630 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   13631 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   13632 }
   13633 
   13634 static void
   13635 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   13636 {
   13637 	uint32_t reg;
   13638 	uint16_t nvmword;
   13639 	int rv;
   13640 
   13641 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   13642 		return;
   13643 
   13644 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   13645 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   13646 	if (rv != 0) {
   13647 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   13648 		    __func__);
   13649 		return;
   13650 	}
   13651 
   13652 	reg = CSR_READ(sc, WMREG_MDICNFG);
   13653 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   13654 		reg |= MDICNFG_DEST;
   13655 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   13656 		reg |= MDICNFG_COM_MDIO;
   13657 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   13658 }
   13659 
   13660 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   13661 
   13662 static bool
   13663 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   13664 {
   13665 	int i;
   13666 	uint32_t reg;
   13667 	uint16_t id1, id2;
   13668 
   13669 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13670 		device_xname(sc->sc_dev), __func__));
   13671 	id1 = id2 = 0xffff;
   13672 	for (i = 0; i < 2; i++) {
   13673 		id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1);
   13674 		if (MII_INVALIDID(id1))
   13675 			continue;
   13676 		id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2);
   13677 		if (MII_INVALIDID(id2))
   13678 			continue;
   13679 		break;
   13680 	}
   13681 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) {
   13682 		goto out;
   13683 	}
   13684 
   13685 	if (sc->sc_type < WM_T_PCH_LPT) {
   13686 		sc->phy.release(sc);
   13687 		wm_set_mdio_slow_mode_hv(sc);
   13688 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
   13689 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
   13690 		sc->phy.acquire(sc);
   13691 	}
   13692 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   13693 		printf("XXX return with false\n");
   13694 		return false;
   13695 	}
   13696 out:
   13697 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   13698 		/* Only unforce SMBus if ME is not active */
   13699 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   13700 			/* Unforce SMBus mode in PHY */
   13701 			reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   13702 			    CV_SMB_CTRL);
   13703 			reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13704 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   13705 			    CV_SMB_CTRL, reg);
   13706 
   13707 			/* Unforce SMBus mode in MAC */
   13708 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13709 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   13710 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13711 		}
   13712 	}
   13713 	return true;
   13714 }
   13715 
   13716 static void
   13717 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   13718 {
   13719 	uint32_t reg;
   13720 	int i;
   13721 
   13722 	/* Set PHY Config Counter to 50msec */
   13723 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   13724 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   13725 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   13726 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   13727 
   13728 	/* Toggle LANPHYPC */
   13729 	reg = CSR_READ(sc, WMREG_CTRL);
   13730 	reg |= CTRL_LANPHYPC_OVERRIDE;
   13731 	reg &= ~CTRL_LANPHYPC_VALUE;
   13732 	CSR_WRITE(sc, WMREG_CTRL, reg);
   13733 	CSR_WRITE_FLUSH(sc);
   13734 	delay(1000);
   13735 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   13736 	CSR_WRITE(sc, WMREG_CTRL, reg);
   13737 	CSR_WRITE_FLUSH(sc);
   13738 
   13739 	if (sc->sc_type < WM_T_PCH_LPT)
   13740 		delay(50 * 1000);
   13741 	else {
   13742 		i = 20;
   13743 
   13744 		do {
   13745 			delay(5 * 1000);
   13746 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   13747 		    && i--);
   13748 
   13749 		delay(30 * 1000);
   13750 	}
   13751 }
   13752 
   13753 static int
   13754 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   13755 {
   13756 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   13757 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   13758 	uint32_t rxa;
   13759 	uint16_t scale = 0, lat_enc = 0;
   13760 	int64_t lat_ns, value;
   13761 
   13762 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13763 		device_xname(sc->sc_dev), __func__));
   13764 
   13765 	if (link) {
   13766 		pcireg_t preg;
   13767 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   13768 
   13769 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   13770 
   13771 		/*
   13772 		 * Determine the maximum latency tolerated by the device.
   13773 		 *
   13774 		 * Per the PCIe spec, the tolerated latencies are encoded as
   13775 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   13776 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   13777 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   13778 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   13779 		 */
   13780 		lat_ns = ((int64_t)rxa * 1024 -
   13781 		    (2 * (int64_t)sc->sc_ethercom.ec_if.if_mtu)) * 8 * 1000;
   13782 		if (lat_ns < 0)
   13783 			lat_ns = 0;
   13784 		else {
   13785 			uint32_t status;
   13786 			uint16_t speed;
   13787 
   13788 			status = CSR_READ(sc, WMREG_STATUS);
   13789 			switch (__SHIFTOUT(status, STATUS_SPEED)) {
   13790 			case STATUS_SPEED_10:
   13791 				speed = 10;
   13792 				break;
   13793 			case STATUS_SPEED_100:
   13794 				speed = 100;
   13795 				break;
   13796 			case STATUS_SPEED_1000:
   13797 				speed = 1000;
   13798 				break;
   13799 			default:
   13800 				printf("%s: Unknown speed (status = %08x)\n",
   13801 				    device_xname(sc->sc_dev), status);
   13802 				return -1;
   13803 			}
   13804 			lat_ns /= speed;
   13805 		}
   13806 		value = lat_ns;
   13807 
   13808 		while (value > LTRV_VALUE) {
   13809 			scale ++;
   13810 			value = howmany(value, __BIT(5));
   13811 		}
   13812 		if (scale > LTRV_SCALE_MAX) {
   13813 			printf("%s: Invalid LTR latency scale %d\n",
   13814 			    device_xname(sc->sc_dev), scale);
   13815 			return -1;
   13816 		}
   13817 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   13818 
   13819 		/* Determine the maximum latency tolerated by the platform */
   13820 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   13821 		    WM_PCI_LTR_CAP_LPT);
   13822 		max_snoop = preg & 0xffff;
   13823 		max_nosnoop = preg >> 16;
   13824 
   13825 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   13826 
   13827 		if (lat_enc > max_ltr_enc) {
   13828 			lat_enc = max_ltr_enc;
   13829 		}
   13830 	}
   13831 	/* Snoop and No-Snoop latencies the same */
   13832 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   13833 	CSR_WRITE(sc, WMREG_LTRV, reg);
   13834 
   13835 	return 0;
   13836 }
   13837 
   13838 /*
   13839  * I210 Errata 25 and I211 Errata 10
   13840  * Slow System Clock.
   13841  */
   13842 static void
   13843 wm_pll_workaround_i210(struct wm_softc *sc)
   13844 {
   13845 	uint32_t mdicnfg, wuc;
   13846 	uint32_t reg;
   13847 	pcireg_t pcireg;
   13848 	uint32_t pmreg;
   13849 	uint16_t nvmword, tmp_nvmword;
   13850 	int phyval;
   13851 	bool wa_done = false;
   13852 	int i;
   13853 
   13854 	/* Save WUC and MDICNFG registers */
   13855 	wuc = CSR_READ(sc, WMREG_WUC);
   13856 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   13857 
   13858 	reg = mdicnfg & ~MDICNFG_DEST;
   13859 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   13860 
   13861 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   13862 		nvmword = INVM_DEFAULT_AL;
   13863 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   13864 
   13865 	/* Get Power Management cap offset */
   13866 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   13867 		&pmreg, NULL) == 0)
   13868 		return;
   13869 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   13870 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   13871 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   13872 
   13873 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   13874 			break; /* OK */
   13875 		}
   13876 
   13877 		wa_done = true;
   13878 		/* Directly reset the internal PHY */
   13879 		reg = CSR_READ(sc, WMREG_CTRL);
   13880 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   13881 
   13882 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13883 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   13884 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13885 
   13886 		CSR_WRITE(sc, WMREG_WUC, 0);
   13887 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   13888 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   13889 
   13890 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   13891 		    pmreg + PCI_PMCSR);
   13892 		pcireg |= PCI_PMCSR_STATE_D3;
   13893 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   13894 		    pmreg + PCI_PMCSR, pcireg);
   13895 		delay(1000);
   13896 		pcireg &= ~PCI_PMCSR_STATE_D3;
   13897 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   13898 		    pmreg + PCI_PMCSR, pcireg);
   13899 
   13900 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   13901 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   13902 
   13903 		/* Restore WUC register */
   13904 		CSR_WRITE(sc, WMREG_WUC, wuc);
   13905 	}
   13906 
   13907 	/* Restore MDICNFG setting */
   13908 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   13909 	if (wa_done)
   13910 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   13911 }
   13912