Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.541
      1 /*	$NetBSD: if_wm.c,v 1.541 2017/10/23 09:27:21 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet)
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  *	- Image Unique ID
     83  */
     84 
     85 #include <sys/cdefs.h>
     86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.541 2017/10/23 09:27:21 msaitoh Exp $");
     87 
     88 #ifdef _KERNEL_OPT
     89 #include "opt_net_mpsafe.h"
     90 #include "opt_if_wm.h"
     91 #endif
     92 
     93 #include <sys/param.h>
     94 #include <sys/systm.h>
     95 #include <sys/callout.h>
     96 #include <sys/mbuf.h>
     97 #include <sys/malloc.h>
     98 #include <sys/kmem.h>
     99 #include <sys/kernel.h>
    100 #include <sys/socket.h>
    101 #include <sys/ioctl.h>
    102 #include <sys/errno.h>
    103 #include <sys/device.h>
    104 #include <sys/queue.h>
    105 #include <sys/syslog.h>
    106 #include <sys/interrupt.h>
    107 #include <sys/cpu.h>
    108 #include <sys/pcq.h>
    109 
    110 #include <sys/rndsource.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <netinet/in.h>			/* XXX for struct ip */
    120 #include <netinet/in_systm.h>		/* XXX for struct ip */
    121 #include <netinet/ip.h>			/* XXX for struct ip */
    122 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    123 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    124 
    125 #include <sys/bus.h>
    126 #include <sys/intr.h>
    127 #include <machine/endian.h>
    128 
    129 #include <dev/mii/mii.h>
    130 #include <dev/mii/miivar.h>
    131 #include <dev/mii/miidevs.h>
    132 #include <dev/mii/mii_bitbang.h>
    133 #include <dev/mii/ikphyreg.h>
    134 #include <dev/mii/igphyreg.h>
    135 #include <dev/mii/igphyvar.h>
    136 #include <dev/mii/inbmphyreg.h>
    137 #include <dev/mii/ihphyreg.h>
    138 
    139 #include <dev/pci/pcireg.h>
    140 #include <dev/pci/pcivar.h>
    141 #include <dev/pci/pcidevs.h>
    142 
    143 #include <dev/pci/if_wmreg.h>
    144 #include <dev/pci/if_wmvar.h>
    145 
    146 #ifdef WM_DEBUG
    147 #define	WM_DEBUG_LINK		__BIT(0)
    148 #define	WM_DEBUG_TX		__BIT(1)
    149 #define	WM_DEBUG_RX		__BIT(2)
    150 #define	WM_DEBUG_GMII		__BIT(3)
    151 #define	WM_DEBUG_MANAGE		__BIT(4)
    152 #define	WM_DEBUG_NVM		__BIT(5)
    153 #define	WM_DEBUG_INIT		__BIT(6)
    154 #define	WM_DEBUG_LOCK		__BIT(7)
    155 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    156     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    157 
    158 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    159 #else
    160 #define	DPRINTF(x, y)	/* nothing */
    161 #endif /* WM_DEBUG */
    162 
    163 #ifdef NET_MPSAFE
    164 #define WM_MPSAFE	1
    165 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    166 #else
    167 #define CALLOUT_FLAGS	0
    168 #endif
    169 
    170 /*
    171  * This device driver's max interrupt numbers.
    172  */
    173 #define WM_MAX_NQUEUEINTR	16
    174 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    175 
    176 #ifndef WM_DISABLE_MSI
    177 #define	WM_DISABLE_MSI 0
    178 #endif
    179 #ifndef WM_DISABLE_MSIX
    180 #define	WM_DISABLE_MSIX 0
    181 #endif
    182 
    183 int wm_disable_msi = WM_DISABLE_MSI;
    184 int wm_disable_msix = WM_DISABLE_MSIX;
    185 
    186 /*
    187  * Transmit descriptor list size.  Due to errata, we can only have
    188  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    189  * on >= 82544.  We tell the upper layers that they can queue a lot
    190  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    191  * of them at a time.
    192  *
    193  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    194  * chains containing many small mbufs have been observed in zero-copy
    195  * situations with jumbo frames.
    196  */
    197 #define	WM_NTXSEGS		256
    198 #define	WM_IFQUEUELEN		256
    199 #define	WM_TXQUEUELEN_MAX	64
    200 #define	WM_TXQUEUELEN_MAX_82547	16
    201 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    202 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    203 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    204 #define	WM_NTXDESC_82542	256
    205 #define	WM_NTXDESC_82544	4096
    206 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    207 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    208 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    209 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    210 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    211 
    212 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    213 
    214 #define	WM_TXINTERQSIZE		256
    215 
    216 /*
    217  * Receive descriptor list size.  We have one Rx buffer for normal
    218  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    219  * packet.  We allocate 256 receive descriptors, each with a 2k
    220  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    221  */
    222 #define	WM_NRXDESC		256
    223 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    224 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    225 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    226 
    227 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    228 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    229 #endif
    230 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    231 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    232 #endif
    233 
    234 typedef union txdescs {
    235 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    236 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    237 } txdescs_t;
    238 
    239 typedef union rxdescs {
    240 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    241 	ext_rxdesc_t      sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    242 	nq_rxdesc_t      sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    243 } rxdescs_t;
    244 
    245 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    246 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    247 
    248 /*
    249  * Software state for transmit jobs.
    250  */
    251 struct wm_txsoft {
    252 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    253 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    254 	int txs_firstdesc;		/* first descriptor in packet */
    255 	int txs_lastdesc;		/* last descriptor in packet */
    256 	int txs_ndesc;			/* # of descriptors used */
    257 };
    258 
    259 /*
    260  * Software state for receive buffers.  Each descriptor gets a
    261  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    262  * more than one buffer, we chain them together.
    263  */
    264 struct wm_rxsoft {
    265 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    266 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    267 };
    268 
    269 #define WM_LINKUP_TIMEOUT	50
    270 
    271 static uint16_t swfwphysem[] = {
    272 	SWFW_PHY0_SM,
    273 	SWFW_PHY1_SM,
    274 	SWFW_PHY2_SM,
    275 	SWFW_PHY3_SM
    276 };
    277 
    278 static const uint32_t wm_82580_rxpbs_table[] = {
    279 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    280 };
    281 
    282 struct wm_softc;
    283 
    284 #ifdef WM_EVENT_COUNTERS
    285 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    286 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    287 	struct evcnt qname##_ev_##evname;
    288 
    289 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    290 	do{								\
    291 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    292 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    293 		    "%s%02d%s", #qname, (qnum), #evname);		\
    294 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    295 		    (evtype), NULL, (xname),				\
    296 		    (q)->qname##_##evname##_evcnt_name);		\
    297 	}while(0)
    298 
    299 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    300 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    301 
    302 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    303 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    304 
    305 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    306 	evcnt_detach(&(q)->qname##_ev_##evname);
    307 #endif /* WM_EVENT_COUNTERS */
    308 
    309 struct wm_txqueue {
    310 	kmutex_t *txq_lock;		/* lock for tx operations */
    311 
    312 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    313 
    314 	/* Software state for the transmit descriptors. */
    315 	int txq_num;			/* must be a power of two */
    316 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    317 
    318 	/* TX control data structures. */
    319 	int txq_ndesc;			/* must be a power of two */
    320 	size_t txq_descsize;		/* a tx descriptor size */
    321 	txdescs_t *txq_descs_u;
    322         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    323 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    324 	int txq_desc_rseg;		/* real number of control segment */
    325 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    326 #define	txq_descs	txq_descs_u->sctxu_txdescs
    327 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    328 
    329 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    330 
    331 	int txq_free;			/* number of free Tx descriptors */
    332 	int txq_next;			/* next ready Tx descriptor */
    333 
    334 	int txq_sfree;			/* number of free Tx jobs */
    335 	int txq_snext;			/* next free Tx job */
    336 	int txq_sdirty;			/* dirty Tx jobs */
    337 
    338 	/* These 4 variables are used only on the 82547. */
    339 	int txq_fifo_size;		/* Tx FIFO size */
    340 	int txq_fifo_head;		/* current head of FIFO */
    341 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    342 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    343 
    344 	/*
    345 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    346 	 * CPUs. This queue intermediate them without block.
    347 	 */
    348 	pcq_t *txq_interq;
    349 
    350 	/*
    351 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    352 	 * to manage Tx H/W queue's busy flag.
    353 	 */
    354 	int txq_flags;			/* flags for H/W queue, see below */
    355 #define	WM_TXQ_NO_SPACE	0x1
    356 
    357 	bool txq_stopping;
    358 
    359 	uint32_t txq_packets;		/* for AIM */
    360 	uint32_t txq_bytes;		/* for AIM */
    361 #ifdef WM_EVENT_COUNTERS
    362 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
    363 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
    364 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
    365 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
    366 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
    367 						/* XXX not used? */
    368 
    369 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
    370 	WM_Q_EVCNT_DEFINE(txq,txtusum)		/* TCP/UDP cksums comp. out-bound */
    371 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
    372 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
    373 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
    374 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
    375 
    376 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
    377 
    378 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
    379 
    380 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    381 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    382 #endif /* WM_EVENT_COUNTERS */
    383 };
    384 
    385 struct wm_rxqueue {
    386 	kmutex_t *rxq_lock;		/* lock for rx operations */
    387 
    388 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    389 
    390 	/* Software state for the receive descriptors. */
    391 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    392 
    393 	/* RX control data structures. */
    394 	int rxq_ndesc;			/* must be a power of two */
    395 	size_t rxq_descsize;		/* a rx descriptor size */
    396 	rxdescs_t *rxq_descs_u;
    397 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    398 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    399 	int rxq_desc_rseg;		/* real number of control segment */
    400 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    401 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    402 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    403 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    404 
    405 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    406 
    407 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    408 	int rxq_discard;
    409 	int rxq_len;
    410 	struct mbuf *rxq_head;
    411 	struct mbuf *rxq_tail;
    412 	struct mbuf **rxq_tailp;
    413 
    414 	bool rxq_stopping;
    415 
    416 	uint32_t rxq_packets;		/* for AIM */
    417 	uint32_t rxq_bytes;		/* for AIM */
    418 #ifdef WM_EVENT_COUNTERS
    419 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
    420 
    421 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
    422 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
    423 #endif
    424 };
    425 
    426 struct wm_queue {
    427 	int wmq_id;			/* index of transmit and receive queues */
    428 	int wmq_intr_idx;		/* index of MSI-X tables */
    429 
    430 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    431 	bool wmq_set_itr;
    432 
    433 	struct wm_txqueue wmq_txq;
    434 	struct wm_rxqueue wmq_rxq;
    435 
    436 	void *wmq_si;
    437 };
    438 
    439 struct wm_phyop {
    440 	int (*acquire)(struct wm_softc *);
    441 	void (*release)(struct wm_softc *);
    442 	int reset_delay_us;
    443 };
    444 
    445 struct wm_nvmop {
    446 	int (*acquire)(struct wm_softc *);
    447 	void (*release)(struct wm_softc *);
    448 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    449 };
    450 
    451 /*
    452  * Software state per device.
    453  */
    454 struct wm_softc {
    455 	device_t sc_dev;		/* generic device information */
    456 	bus_space_tag_t sc_st;		/* bus space tag */
    457 	bus_space_handle_t sc_sh;	/* bus space handle */
    458 	bus_size_t sc_ss;		/* bus space size */
    459 	bus_space_tag_t sc_iot;		/* I/O space tag */
    460 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    461 	bus_size_t sc_ios;		/* I/O space size */
    462 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    463 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    464 	bus_size_t sc_flashs;		/* flash registers space size */
    465 	off_t sc_flashreg_offset;	/*
    466 					 * offset to flash registers from
    467 					 * start of BAR
    468 					 */
    469 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    470 
    471 	struct ethercom sc_ethercom;	/* ethernet common data */
    472 	struct mii_data sc_mii;		/* MII/media information */
    473 
    474 	pci_chipset_tag_t sc_pc;
    475 	pcitag_t sc_pcitag;
    476 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    477 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    478 
    479 	uint16_t sc_pcidevid;		/* PCI device ID */
    480 	wm_chip_type sc_type;		/* MAC type */
    481 	int sc_rev;			/* MAC revision */
    482 	wm_phy_type sc_phytype;		/* PHY type */
    483 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    484 #define	WM_MEDIATYPE_UNKNOWN		0x00
    485 #define	WM_MEDIATYPE_FIBER		0x01
    486 #define	WM_MEDIATYPE_COPPER		0x02
    487 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    488 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    489 	int sc_flags;			/* flags; see below */
    490 	int sc_if_flags;		/* last if_flags */
    491 	int sc_flowflags;		/* 802.3x flow control flags */
    492 	int sc_align_tweak;
    493 
    494 	void *sc_ihs[WM_MAX_NINTR];	/*
    495 					 * interrupt cookie.
    496 					 * - legacy and msi use sc_ihs[0] only
    497 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    498 					 */
    499 	pci_intr_handle_t *sc_intrs;	/*
    500 					 * legacy and msi use sc_intrs[0] only
    501 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    502 					 */
    503 	int sc_nintrs;			/* number of interrupts */
    504 
    505 	int sc_link_intr_idx;		/* index of MSI-X tables */
    506 
    507 	callout_t sc_tick_ch;		/* tick callout */
    508 	bool sc_core_stopping;
    509 
    510 	int sc_nvm_ver_major;
    511 	int sc_nvm_ver_minor;
    512 	int sc_nvm_ver_build;
    513 	int sc_nvm_addrbits;		/* NVM address bits */
    514 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    515 	int sc_ich8_flash_base;
    516 	int sc_ich8_flash_bank_size;
    517 	int sc_nvm_k1_enabled;
    518 
    519 	int sc_nqueues;
    520 	struct wm_queue *sc_queue;
    521 	u_int sc_rx_process_limit;	/* Rx processing repeat limit in softint */
    522 	u_int sc_rx_intr_process_limit;	/* Rx processing repeat limit in H/W intr */
    523 
    524 	int sc_affinity_offset;
    525 
    526 #ifdef WM_EVENT_COUNTERS
    527 	/* Event counters. */
    528 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    529 
    530         /* WM_T_82542_2_1 only */
    531 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    532 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    533 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    534 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    535 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    536 #endif /* WM_EVENT_COUNTERS */
    537 
    538 	/* This variable are used only on the 82547. */
    539 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    540 
    541 	uint32_t sc_ctrl;		/* prototype CTRL register */
    542 #if 0
    543 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    544 #endif
    545 	uint32_t sc_icr;		/* prototype interrupt bits */
    546 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    547 	uint32_t sc_tctl;		/* prototype TCTL register */
    548 	uint32_t sc_rctl;		/* prototype RCTL register */
    549 	uint32_t sc_txcw;		/* prototype TXCW register */
    550 	uint32_t sc_tipg;		/* prototype TIPG register */
    551 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    552 	uint32_t sc_pba;		/* prototype PBA register */
    553 
    554 	int sc_tbi_linkup;		/* TBI link status */
    555 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    556 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    557 
    558 	int sc_mchash_type;		/* multicast filter offset */
    559 
    560 	krndsource_t rnd_source;	/* random source */
    561 
    562 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    563 
    564 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    565 	kmutex_t *sc_ich_phymtx;	/*
    566 					 * 82574/82583/ICH/PCH specific PHY
    567 					 * mutex. For 82574/82583, the mutex
    568 					 * is used for both PHY and NVM.
    569 					 */
    570 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    571 
    572 	struct wm_phyop phy;
    573 	struct wm_nvmop nvm;
    574 };
    575 
    576 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    577 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    578 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    579 
    580 #define	WM_RXCHAIN_RESET(rxq)						\
    581 do {									\
    582 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    583 	*(rxq)->rxq_tailp = NULL;					\
    584 	(rxq)->rxq_len = 0;						\
    585 } while (/*CONSTCOND*/0)
    586 
    587 #define	WM_RXCHAIN_LINK(rxq, m)						\
    588 do {									\
    589 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    590 	(rxq)->rxq_tailp = &(m)->m_next;				\
    591 } while (/*CONSTCOND*/0)
    592 
    593 #ifdef WM_EVENT_COUNTERS
    594 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    595 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    596 
    597 #define WM_Q_EVCNT_INCR(qname, evname)			\
    598 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    599 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    600 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    601 #else /* !WM_EVENT_COUNTERS */
    602 #define	WM_EVCNT_INCR(ev)	/* nothing */
    603 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    604 
    605 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    606 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    607 #endif /* !WM_EVENT_COUNTERS */
    608 
    609 #define	CSR_READ(sc, reg)						\
    610 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    611 #define	CSR_WRITE(sc, reg, val)						\
    612 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    613 #define	CSR_WRITE_FLUSH(sc)						\
    614 	(void) CSR_READ((sc), WMREG_STATUS)
    615 
    616 #define ICH8_FLASH_READ32(sc, reg)					\
    617 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    618 	    (reg) + sc->sc_flashreg_offset)
    619 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    620 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    621 	    (reg) + sc->sc_flashreg_offset, (data))
    622 
    623 #define ICH8_FLASH_READ16(sc, reg)					\
    624 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    625 	    (reg) + sc->sc_flashreg_offset)
    626 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    627 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    628 	    (reg) + sc->sc_flashreg_offset, (data))
    629 
    630 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    631 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    632 
    633 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    634 #define	WM_CDTXADDR_HI(txq, x)						\
    635 	(sizeof(bus_addr_t) == 8 ?					\
    636 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    637 
    638 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    639 #define	WM_CDRXADDR_HI(rxq, x)						\
    640 	(sizeof(bus_addr_t) == 8 ?					\
    641 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    642 
    643 /*
    644  * Register read/write functions.
    645  * Other than CSR_{READ|WRITE}().
    646  */
    647 #if 0
    648 static inline uint32_t wm_io_read(struct wm_softc *, int);
    649 #endif
    650 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    651 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    652 	uint32_t, uint32_t);
    653 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    654 
    655 /*
    656  * Descriptor sync/init functions.
    657  */
    658 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    659 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    660 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    661 
    662 /*
    663  * Device driver interface functions and commonly used functions.
    664  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    665  */
    666 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    667 static int	wm_match(device_t, cfdata_t, void *);
    668 static void	wm_attach(device_t, device_t, void *);
    669 static int	wm_detach(device_t, int);
    670 static bool	wm_suspend(device_t, const pmf_qual_t *);
    671 static bool	wm_resume(device_t, const pmf_qual_t *);
    672 static void	wm_watchdog(struct ifnet *);
    673 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
    674 static void	wm_tick(void *);
    675 static int	wm_ifflags_cb(struct ethercom *);
    676 static int	wm_ioctl(struct ifnet *, u_long, void *);
    677 /* MAC address related */
    678 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    679 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    680 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    681 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    682 static void	wm_set_filter(struct wm_softc *);
    683 /* Reset and init related */
    684 static void	wm_set_vlan(struct wm_softc *);
    685 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    686 static void	wm_get_auto_rd_done(struct wm_softc *);
    687 static void	wm_lan_init_done(struct wm_softc *);
    688 static void	wm_get_cfg_done(struct wm_softc *);
    689 static void	wm_phy_post_reset(struct wm_softc *);
    690 static void	wm_write_smbus_addr(struct wm_softc *);
    691 static void	wm_init_lcd_from_nvm(struct wm_softc *);
    692 static void	wm_initialize_hardware_bits(struct wm_softc *);
    693 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    694 static void	wm_reset_phy(struct wm_softc *);
    695 static void	wm_flush_desc_rings(struct wm_softc *);
    696 static void	wm_reset(struct wm_softc *);
    697 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    698 static void	wm_rxdrain(struct wm_rxqueue *);
    699 static void	wm_rss_getkey(uint8_t *);
    700 static void	wm_init_rss(struct wm_softc *);
    701 static void	wm_adjust_qnum(struct wm_softc *, int);
    702 static inline bool	wm_is_using_msix(struct wm_softc *);
    703 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    704 static int	wm_softint_establish(struct wm_softc *, int, int);
    705 static int	wm_setup_legacy(struct wm_softc *);
    706 static int	wm_setup_msix(struct wm_softc *);
    707 static int	wm_init(struct ifnet *);
    708 static int	wm_init_locked(struct ifnet *);
    709 static void	wm_unset_stopping_flags(struct wm_softc *);
    710 static void	wm_set_stopping_flags(struct wm_softc *);
    711 static void	wm_stop(struct ifnet *, int);
    712 static void	wm_stop_locked(struct ifnet *, int);
    713 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    714 static void	wm_82547_txfifo_stall(void *);
    715 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    716 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    717 /* DMA related */
    718 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    719 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    720 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    721 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    722     struct wm_txqueue *);
    723 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    724 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    725 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    726     struct wm_rxqueue *);
    727 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    728 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    729 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    730 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    731 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    732 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    733 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    734     struct wm_txqueue *);
    735 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    736     struct wm_rxqueue *);
    737 static int	wm_alloc_txrx_queues(struct wm_softc *);
    738 static void	wm_free_txrx_queues(struct wm_softc *);
    739 static int	wm_init_txrx_queues(struct wm_softc *);
    740 /* Start */
    741 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    742     struct wm_txsoft *, uint32_t *, uint8_t *);
    743 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    744 static void	wm_start(struct ifnet *);
    745 static void	wm_start_locked(struct ifnet *);
    746 static int	wm_transmit(struct ifnet *, struct mbuf *);
    747 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    748 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    749 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    750     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    751 static void	wm_nq_start(struct ifnet *);
    752 static void	wm_nq_start_locked(struct ifnet *);
    753 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    754 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    755 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    756 static void	wm_deferred_start_locked(struct wm_txqueue *);
    757 static void	wm_handle_queue(void *);
    758 /* Interrupt */
    759 static int	wm_txeof(struct wm_softc *, struct wm_txqueue *);
    760 static void	wm_rxeof(struct wm_rxqueue *, u_int);
    761 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    762 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    763 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    764 static void	wm_linkintr(struct wm_softc *, uint32_t);
    765 static int	wm_intr_legacy(void *);
    766 static inline void	wm_txrxintr_disable(struct wm_queue *);
    767 static inline void	wm_txrxintr_enable(struct wm_queue *);
    768 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    769 static int	wm_txrxintr_msix(void *);
    770 static int	wm_linkintr_msix(void *);
    771 
    772 /*
    773  * Media related.
    774  * GMII, SGMII, TBI, SERDES and SFP.
    775  */
    776 /* Common */
    777 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    778 /* GMII related */
    779 static void	wm_gmii_reset(struct wm_softc *);
    780 static void	wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t, uint16_t);
    781 static int	wm_get_phy_id_82575(struct wm_softc *);
    782 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    783 static int	wm_gmii_mediachange(struct ifnet *);
    784 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    785 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    786 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    787 static int	wm_gmii_i82543_readreg(device_t, int, int);
    788 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    789 static int	wm_gmii_mdic_readreg(device_t, int, int);
    790 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    791 static int	wm_gmii_i82544_readreg(device_t, int, int);
    792 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    793 static int	wm_gmii_i80003_readreg(device_t, int, int);
    794 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    795 static int	wm_gmii_bm_readreg(device_t, int, int);
    796 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    797 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    798 static int	wm_gmii_hv_readreg(device_t, int, int);
    799 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
    800 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    801 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
    802 static int	wm_gmii_82580_readreg(device_t, int, int);
    803 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    804 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    805 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    806 static void	wm_gmii_statchg(struct ifnet *);
    807 /*
    808  * kumeran related (80003, ICH* and PCH*).
    809  * These functions are not for accessing MII registers but for accessing
    810  * kumeran specific registers.
    811  */
    812 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    813 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    814 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    815 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    816 /* SGMII */
    817 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    818 static int	wm_sgmii_readreg(device_t, int, int);
    819 static void	wm_sgmii_writereg(device_t, int, int, int);
    820 /* TBI related */
    821 static void	wm_tbi_mediainit(struct wm_softc *);
    822 static int	wm_tbi_mediachange(struct ifnet *);
    823 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    824 static int	wm_check_for_link(struct wm_softc *);
    825 static void	wm_tbi_tick(struct wm_softc *);
    826 /* SERDES related */
    827 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    828 static int	wm_serdes_mediachange(struct ifnet *);
    829 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    830 static void	wm_serdes_tick(struct wm_softc *);
    831 /* SFP related */
    832 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    833 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    834 
    835 /*
    836  * NVM related.
    837  * Microwire, SPI (w/wo EERD) and Flash.
    838  */
    839 /* Misc functions */
    840 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    841 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    842 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    843 /* Microwire */
    844 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    845 /* SPI */
    846 static int	wm_nvm_ready_spi(struct wm_softc *);
    847 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    848 /* Using with EERD */
    849 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    850 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    851 /* Flash */
    852 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    853     unsigned int *);
    854 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    855 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    856 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    857 	uint32_t *);
    858 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    859 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    860 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    861 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    862 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    863 /* iNVM */
    864 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    865 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    866 /* Lock, detecting NVM type, validate checksum and read */
    867 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    868 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    869 static int	wm_nvm_validate_checksum(struct wm_softc *);
    870 static void	wm_nvm_version_invm(struct wm_softc *);
    871 static void	wm_nvm_version(struct wm_softc *);
    872 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    873 
    874 /*
    875  * Hardware semaphores.
    876  * Very complexed...
    877  */
    878 static int	wm_get_null(struct wm_softc *);
    879 static void	wm_put_null(struct wm_softc *);
    880 static int	wm_get_eecd(struct wm_softc *);
    881 static void	wm_put_eecd(struct wm_softc *);
    882 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    883 static void	wm_put_swsm_semaphore(struct wm_softc *);
    884 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    885 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    886 static int	wm_get_nvm_80003(struct wm_softc *);
    887 static void	wm_put_nvm_80003(struct wm_softc *);
    888 static int	wm_get_nvm_82571(struct wm_softc *);
    889 static void	wm_put_nvm_82571(struct wm_softc *);
    890 static int	wm_get_phy_82575(struct wm_softc *);
    891 static void	wm_put_phy_82575(struct wm_softc *);
    892 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    893 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    894 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    895 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    896 static int	wm_get_nvm_ich8lan(struct wm_softc *);
    897 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    898 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    899 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    900 
    901 /*
    902  * Management mode and power management related subroutines.
    903  * BMC, AMT, suspend/resume and EEE.
    904  */
    905 #if 0
    906 static int	wm_check_mng_mode(struct wm_softc *);
    907 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    908 static int	wm_check_mng_mode_82574(struct wm_softc *);
    909 static int	wm_check_mng_mode_generic(struct wm_softc *);
    910 #endif
    911 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    912 static bool	wm_phy_resetisblocked(struct wm_softc *);
    913 static void	wm_get_hw_control(struct wm_softc *);
    914 static void	wm_release_hw_control(struct wm_softc *);
    915 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    916 static void	wm_smbustopci(struct wm_softc *);
    917 static void	wm_init_manageability(struct wm_softc *);
    918 static void	wm_release_manageability(struct wm_softc *);
    919 static void	wm_get_wakeup(struct wm_softc *);
    920 static void	wm_ulp_disable(struct wm_softc *);
    921 static void	wm_enable_phy_wakeup(struct wm_softc *);
    922 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    923 static void	wm_enable_wakeup(struct wm_softc *);
    924 /* LPLU (Low Power Link Up) */
    925 static void	wm_lplu_d0_disable(struct wm_softc *);
    926 /* EEE */
    927 static void	wm_set_eee_i350(struct wm_softc *);
    928 
    929 /*
    930  * Workarounds (mainly PHY related).
    931  * Basically, PHY's workarounds are in the PHY drivers.
    932  */
    933 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    934 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    935 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    936 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    937 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    938 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    939 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    940 static void	wm_reset_init_script_82575(struct wm_softc *);
    941 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    942 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
    943 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
    944 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
    945 static void	wm_pll_workaround_i210(struct wm_softc *);
    946 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
    947 
    948 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    949     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    950 
    951 /*
    952  * Devices supported by this driver.
    953  */
    954 static const struct wm_product {
    955 	pci_vendor_id_t		wmp_vendor;
    956 	pci_product_id_t	wmp_product;
    957 	const char		*wmp_name;
    958 	wm_chip_type		wmp_type;
    959 	uint32_t		wmp_flags;
    960 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    961 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    962 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    963 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    964 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    965 } wm_products[] = {
    966 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    967 	  "Intel i82542 1000BASE-X Ethernet",
    968 	  WM_T_82542_2_1,	WMP_F_FIBER },
    969 
    970 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    971 	  "Intel i82543GC 1000BASE-X Ethernet",
    972 	  WM_T_82543,		WMP_F_FIBER },
    973 
    974 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    975 	  "Intel i82543GC 1000BASE-T Ethernet",
    976 	  WM_T_82543,		WMP_F_COPPER },
    977 
    978 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    979 	  "Intel i82544EI 1000BASE-T Ethernet",
    980 	  WM_T_82544,		WMP_F_COPPER },
    981 
    982 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    983 	  "Intel i82544EI 1000BASE-X Ethernet",
    984 	  WM_T_82544,		WMP_F_FIBER },
    985 
    986 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    987 	  "Intel i82544GC 1000BASE-T Ethernet",
    988 	  WM_T_82544,		WMP_F_COPPER },
    989 
    990 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    991 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    992 	  WM_T_82544,		WMP_F_COPPER },
    993 
    994 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    995 	  "Intel i82540EM 1000BASE-T Ethernet",
    996 	  WM_T_82540,		WMP_F_COPPER },
    997 
    998 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    999 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1000 	  WM_T_82540,		WMP_F_COPPER },
   1001 
   1002 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1003 	  "Intel i82540EP 1000BASE-T Ethernet",
   1004 	  WM_T_82540,		WMP_F_COPPER },
   1005 
   1006 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1007 	  "Intel i82540EP 1000BASE-T Ethernet",
   1008 	  WM_T_82540,		WMP_F_COPPER },
   1009 
   1010 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1011 	  "Intel i82540EP 1000BASE-T Ethernet",
   1012 	  WM_T_82540,		WMP_F_COPPER },
   1013 
   1014 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1015 	  "Intel i82545EM 1000BASE-T Ethernet",
   1016 	  WM_T_82545,		WMP_F_COPPER },
   1017 
   1018 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1019 	  "Intel i82545GM 1000BASE-T Ethernet",
   1020 	  WM_T_82545_3,		WMP_F_COPPER },
   1021 
   1022 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1023 	  "Intel i82545GM 1000BASE-X Ethernet",
   1024 	  WM_T_82545_3,		WMP_F_FIBER },
   1025 
   1026 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1027 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1028 	  WM_T_82545_3,		WMP_F_SERDES },
   1029 
   1030 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1031 	  "Intel i82546EB 1000BASE-T Ethernet",
   1032 	  WM_T_82546,		WMP_F_COPPER },
   1033 
   1034 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1035 	  "Intel i82546EB 1000BASE-T Ethernet",
   1036 	  WM_T_82546,		WMP_F_COPPER },
   1037 
   1038 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1039 	  "Intel i82545EM 1000BASE-X Ethernet",
   1040 	  WM_T_82545,		WMP_F_FIBER },
   1041 
   1042 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1043 	  "Intel i82546EB 1000BASE-X Ethernet",
   1044 	  WM_T_82546,		WMP_F_FIBER },
   1045 
   1046 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1047 	  "Intel i82546GB 1000BASE-T Ethernet",
   1048 	  WM_T_82546_3,		WMP_F_COPPER },
   1049 
   1050 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1051 	  "Intel i82546GB 1000BASE-X Ethernet",
   1052 	  WM_T_82546_3,		WMP_F_FIBER },
   1053 
   1054 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1055 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1056 	  WM_T_82546_3,		WMP_F_SERDES },
   1057 
   1058 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1059 	  "i82546GB quad-port Gigabit Ethernet",
   1060 	  WM_T_82546_3,		WMP_F_COPPER },
   1061 
   1062 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1063 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1064 	  WM_T_82546_3,		WMP_F_COPPER },
   1065 
   1066 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1067 	  "Intel PRO/1000MT (82546GB)",
   1068 	  WM_T_82546_3,		WMP_F_COPPER },
   1069 
   1070 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1071 	  "Intel i82541EI 1000BASE-T Ethernet",
   1072 	  WM_T_82541,		WMP_F_COPPER },
   1073 
   1074 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1075 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1076 	  WM_T_82541,		WMP_F_COPPER },
   1077 
   1078 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1079 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1080 	  WM_T_82541,		WMP_F_COPPER },
   1081 
   1082 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1083 	  "Intel i82541ER 1000BASE-T Ethernet",
   1084 	  WM_T_82541_2,		WMP_F_COPPER },
   1085 
   1086 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1087 	  "Intel i82541GI 1000BASE-T Ethernet",
   1088 	  WM_T_82541_2,		WMP_F_COPPER },
   1089 
   1090 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1091 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1092 	  WM_T_82541_2,		WMP_F_COPPER },
   1093 
   1094 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1095 	  "Intel i82541PI 1000BASE-T Ethernet",
   1096 	  WM_T_82541_2,		WMP_F_COPPER },
   1097 
   1098 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1099 	  "Intel i82547EI 1000BASE-T Ethernet",
   1100 	  WM_T_82547,		WMP_F_COPPER },
   1101 
   1102 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1103 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1104 	  WM_T_82547,		WMP_F_COPPER },
   1105 
   1106 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1107 	  "Intel i82547GI 1000BASE-T Ethernet",
   1108 	  WM_T_82547_2,		WMP_F_COPPER },
   1109 
   1110 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1111 	  "Intel PRO/1000 PT (82571EB)",
   1112 	  WM_T_82571,		WMP_F_COPPER },
   1113 
   1114 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1115 	  "Intel PRO/1000 PF (82571EB)",
   1116 	  WM_T_82571,		WMP_F_FIBER },
   1117 
   1118 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1119 	  "Intel PRO/1000 PB (82571EB)",
   1120 	  WM_T_82571,		WMP_F_SERDES },
   1121 
   1122 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1123 	  "Intel PRO/1000 QT (82571EB)",
   1124 	  WM_T_82571,		WMP_F_COPPER },
   1125 
   1126 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1127 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1128 	  WM_T_82571,		WMP_F_COPPER, },
   1129 
   1130 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1131 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1132 	  WM_T_82571,		WMP_F_COPPER, },
   1133 
   1134 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1135 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1136 	  WM_T_82571,		WMP_F_SERDES, },
   1137 
   1138 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1139 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1140 	  WM_T_82571,		WMP_F_SERDES, },
   1141 
   1142 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1143 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1144 	  WM_T_82571,		WMP_F_FIBER, },
   1145 
   1146 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1147 	  "Intel i82572EI 1000baseT Ethernet",
   1148 	  WM_T_82572,		WMP_F_COPPER },
   1149 
   1150 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1151 	  "Intel i82572EI 1000baseX Ethernet",
   1152 	  WM_T_82572,		WMP_F_FIBER },
   1153 
   1154 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1155 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1156 	  WM_T_82572,		WMP_F_SERDES },
   1157 
   1158 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1159 	  "Intel i82572EI 1000baseT Ethernet",
   1160 	  WM_T_82572,		WMP_F_COPPER },
   1161 
   1162 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1163 	  "Intel i82573E",
   1164 	  WM_T_82573,		WMP_F_COPPER },
   1165 
   1166 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1167 	  "Intel i82573E IAMT",
   1168 	  WM_T_82573,		WMP_F_COPPER },
   1169 
   1170 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1171 	  "Intel i82573L Gigabit Ethernet",
   1172 	  WM_T_82573,		WMP_F_COPPER },
   1173 
   1174 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1175 	  "Intel i82574L",
   1176 	  WM_T_82574,		WMP_F_COPPER },
   1177 
   1178 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1179 	  "Intel i82574L",
   1180 	  WM_T_82574,		WMP_F_COPPER },
   1181 
   1182 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1183 	  "Intel i82583V",
   1184 	  WM_T_82583,		WMP_F_COPPER },
   1185 
   1186 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1187 	  "i80003 dual 1000baseT Ethernet",
   1188 	  WM_T_80003,		WMP_F_COPPER },
   1189 
   1190 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1191 	  "i80003 dual 1000baseX Ethernet",
   1192 	  WM_T_80003,		WMP_F_COPPER },
   1193 
   1194 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1195 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1196 	  WM_T_80003,		WMP_F_SERDES },
   1197 
   1198 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1199 	  "Intel i80003 1000baseT Ethernet",
   1200 	  WM_T_80003,		WMP_F_COPPER },
   1201 
   1202 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1203 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1204 	  WM_T_80003,		WMP_F_SERDES },
   1205 
   1206 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1207 	  "Intel i82801H (M_AMT) LAN Controller",
   1208 	  WM_T_ICH8,		WMP_F_COPPER },
   1209 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1210 	  "Intel i82801H (AMT) LAN Controller",
   1211 	  WM_T_ICH8,		WMP_F_COPPER },
   1212 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1213 	  "Intel i82801H LAN Controller",
   1214 	  WM_T_ICH8,		WMP_F_COPPER },
   1215 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1216 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1217 	  WM_T_ICH8,		WMP_F_COPPER },
   1218 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1219 	  "Intel i82801H (M) LAN Controller",
   1220 	  WM_T_ICH8,		WMP_F_COPPER },
   1221 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1222 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1223 	  WM_T_ICH8,		WMP_F_COPPER },
   1224 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1225 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1226 	  WM_T_ICH8,		WMP_F_COPPER },
   1227 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1228 	  "82567V-3 LAN Controller",
   1229 	  WM_T_ICH8,		WMP_F_COPPER },
   1230 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1231 	  "82801I (AMT) LAN Controller",
   1232 	  WM_T_ICH9,		WMP_F_COPPER },
   1233 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1234 	  "82801I 10/100 LAN Controller",
   1235 	  WM_T_ICH9,		WMP_F_COPPER },
   1236 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1237 	  "82801I (G) 10/100 LAN Controller",
   1238 	  WM_T_ICH9,		WMP_F_COPPER },
   1239 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1240 	  "82801I (GT) 10/100 LAN Controller",
   1241 	  WM_T_ICH9,		WMP_F_COPPER },
   1242 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1243 	  "82801I (C) LAN Controller",
   1244 	  WM_T_ICH9,		WMP_F_COPPER },
   1245 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1246 	  "82801I mobile LAN Controller",
   1247 	  WM_T_ICH9,		WMP_F_COPPER },
   1248 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1249 	  "82801I mobile (V) LAN Controller",
   1250 	  WM_T_ICH9,		WMP_F_COPPER },
   1251 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1252 	  "82801I mobile (AMT) LAN Controller",
   1253 	  WM_T_ICH9,		WMP_F_COPPER },
   1254 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1255 	  "82567LM-4 LAN Controller",
   1256 	  WM_T_ICH9,		WMP_F_COPPER },
   1257 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1258 	  "82567LM-2 LAN Controller",
   1259 	  WM_T_ICH10,		WMP_F_COPPER },
   1260 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1261 	  "82567LF-2 LAN Controller",
   1262 	  WM_T_ICH10,		WMP_F_COPPER },
   1263 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1264 	  "82567LM-3 LAN Controller",
   1265 	  WM_T_ICH10,		WMP_F_COPPER },
   1266 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1267 	  "82567LF-3 LAN Controller",
   1268 	  WM_T_ICH10,		WMP_F_COPPER },
   1269 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1270 	  "82567V-2 LAN Controller",
   1271 	  WM_T_ICH10,		WMP_F_COPPER },
   1272 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1273 	  "82567V-3? LAN Controller",
   1274 	  WM_T_ICH10,		WMP_F_COPPER },
   1275 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1276 	  "HANKSVILLE LAN Controller",
   1277 	  WM_T_ICH10,		WMP_F_COPPER },
   1278 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1279 	  "PCH LAN (82577LM) Controller",
   1280 	  WM_T_PCH,		WMP_F_COPPER },
   1281 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1282 	  "PCH LAN (82577LC) Controller",
   1283 	  WM_T_PCH,		WMP_F_COPPER },
   1284 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1285 	  "PCH LAN (82578DM) Controller",
   1286 	  WM_T_PCH,		WMP_F_COPPER },
   1287 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1288 	  "PCH LAN (82578DC) Controller",
   1289 	  WM_T_PCH,		WMP_F_COPPER },
   1290 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1291 	  "PCH2 LAN (82579LM) Controller",
   1292 	  WM_T_PCH2,		WMP_F_COPPER },
   1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1294 	  "PCH2 LAN (82579V) Controller",
   1295 	  WM_T_PCH2,		WMP_F_COPPER },
   1296 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1297 	  "82575EB dual-1000baseT Ethernet",
   1298 	  WM_T_82575,		WMP_F_COPPER },
   1299 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1300 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1301 	  WM_T_82575,		WMP_F_SERDES },
   1302 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1303 	  "82575GB quad-1000baseT Ethernet",
   1304 	  WM_T_82575,		WMP_F_COPPER },
   1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1306 	  "82575GB quad-1000baseT Ethernet (PM)",
   1307 	  WM_T_82575,		WMP_F_COPPER },
   1308 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1309 	  "82576 1000BaseT Ethernet",
   1310 	  WM_T_82576,		WMP_F_COPPER },
   1311 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1312 	  "82576 1000BaseX Ethernet",
   1313 	  WM_T_82576,		WMP_F_FIBER },
   1314 
   1315 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1316 	  "82576 gigabit Ethernet (SERDES)",
   1317 	  WM_T_82576,		WMP_F_SERDES },
   1318 
   1319 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1320 	  "82576 quad-1000BaseT Ethernet",
   1321 	  WM_T_82576,		WMP_F_COPPER },
   1322 
   1323 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1324 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1325 	  WM_T_82576,		WMP_F_COPPER },
   1326 
   1327 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1328 	  "82576 gigabit Ethernet",
   1329 	  WM_T_82576,		WMP_F_COPPER },
   1330 
   1331 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1332 	  "82576 gigabit Ethernet (SERDES)",
   1333 	  WM_T_82576,		WMP_F_SERDES },
   1334 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1335 	  "82576 quad-gigabit Ethernet (SERDES)",
   1336 	  WM_T_82576,		WMP_F_SERDES },
   1337 
   1338 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1339 	  "82580 1000BaseT Ethernet",
   1340 	  WM_T_82580,		WMP_F_COPPER },
   1341 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1342 	  "82580 1000BaseX Ethernet",
   1343 	  WM_T_82580,		WMP_F_FIBER },
   1344 
   1345 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1346 	  "82580 1000BaseT Ethernet (SERDES)",
   1347 	  WM_T_82580,		WMP_F_SERDES },
   1348 
   1349 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1350 	  "82580 gigabit Ethernet (SGMII)",
   1351 	  WM_T_82580,		WMP_F_COPPER },
   1352 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1353 	  "82580 dual-1000BaseT Ethernet",
   1354 	  WM_T_82580,		WMP_F_COPPER },
   1355 
   1356 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1357 	  "82580 quad-1000BaseX Ethernet",
   1358 	  WM_T_82580,		WMP_F_FIBER },
   1359 
   1360 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1361 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1362 	  WM_T_82580,		WMP_F_COPPER },
   1363 
   1364 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1365 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1366 	  WM_T_82580,		WMP_F_SERDES },
   1367 
   1368 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1369 	  "DH89XXCC 1000BASE-KX Ethernet",
   1370 	  WM_T_82580,		WMP_F_SERDES },
   1371 
   1372 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1373 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1374 	  WM_T_82580,		WMP_F_SERDES },
   1375 
   1376 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1377 	  "I350 Gigabit Network Connection",
   1378 	  WM_T_I350,		WMP_F_COPPER },
   1379 
   1380 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1381 	  "I350 Gigabit Fiber Network Connection",
   1382 	  WM_T_I350,		WMP_F_FIBER },
   1383 
   1384 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1385 	  "I350 Gigabit Backplane Connection",
   1386 	  WM_T_I350,		WMP_F_SERDES },
   1387 
   1388 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1389 	  "I350 Quad Port Gigabit Ethernet",
   1390 	  WM_T_I350,		WMP_F_SERDES },
   1391 
   1392 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1393 	  "I350 Gigabit Connection",
   1394 	  WM_T_I350,		WMP_F_COPPER },
   1395 
   1396 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1397 	  "I354 Gigabit Ethernet (KX)",
   1398 	  WM_T_I354,		WMP_F_SERDES },
   1399 
   1400 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1401 	  "I354 Gigabit Ethernet (SGMII)",
   1402 	  WM_T_I354,		WMP_F_COPPER },
   1403 
   1404 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1405 	  "I354 Gigabit Ethernet (2.5G)",
   1406 	  WM_T_I354,		WMP_F_COPPER },
   1407 
   1408 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1409 	  "I210-T1 Ethernet Server Adapter",
   1410 	  WM_T_I210,		WMP_F_COPPER },
   1411 
   1412 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1413 	  "I210 Ethernet (Copper OEM)",
   1414 	  WM_T_I210,		WMP_F_COPPER },
   1415 
   1416 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1417 	  "I210 Ethernet (Copper IT)",
   1418 	  WM_T_I210,		WMP_F_COPPER },
   1419 
   1420 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1421 	  "I210 Ethernet (FLASH less)",
   1422 	  WM_T_I210,		WMP_F_COPPER },
   1423 
   1424 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1425 	  "I210 Gigabit Ethernet (Fiber)",
   1426 	  WM_T_I210,		WMP_F_FIBER },
   1427 
   1428 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1429 	  "I210 Gigabit Ethernet (SERDES)",
   1430 	  WM_T_I210,		WMP_F_SERDES },
   1431 
   1432 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1433 	  "I210 Gigabit Ethernet (FLASH less)",
   1434 	  WM_T_I210,		WMP_F_SERDES },
   1435 
   1436 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1437 	  "I210 Gigabit Ethernet (SGMII)",
   1438 	  WM_T_I210,		WMP_F_COPPER },
   1439 
   1440 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1441 	  "I211 Ethernet (COPPER)",
   1442 	  WM_T_I211,		WMP_F_COPPER },
   1443 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1444 	  "I217 V Ethernet Connection",
   1445 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1446 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1447 	  "I217 LM Ethernet Connection",
   1448 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1449 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1450 	  "I218 V Ethernet Connection",
   1451 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1452 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1453 	  "I218 V Ethernet Connection",
   1454 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1455 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1456 	  "I218 V Ethernet Connection",
   1457 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1458 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1459 	  "I218 LM Ethernet Connection",
   1460 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1461 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1462 	  "I218 LM Ethernet Connection",
   1463 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1464 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1465 	  "I218 LM Ethernet Connection",
   1466 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1467 #if 0
   1468 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1469 	  "I219 V Ethernet Connection",
   1470 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1471 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1472 	  "I219 V Ethernet Connection",
   1473 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1474 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1475 	  "I219 V Ethernet Connection",
   1476 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1477 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1478 	  "I219 V Ethernet Connection",
   1479 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1480 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1481 	  "I219 LM Ethernet Connection",
   1482 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1483 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1484 	  "I219 LM Ethernet Connection",
   1485 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1486 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1487 	  "I219 LM Ethernet Connection",
   1488 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1489 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1490 	  "I219 LM Ethernet Connection",
   1491 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1492 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1493 	  "I219 LM Ethernet Connection",
   1494 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1495 #endif
   1496 	{ 0,			0,
   1497 	  NULL,
   1498 	  0,			0 },
   1499 };
   1500 
   1501 /*
   1502  * Register read/write functions.
   1503  * Other than CSR_{READ|WRITE}().
   1504  */
   1505 
   1506 #if 0 /* Not currently used */
   1507 static inline uint32_t
   1508 wm_io_read(struct wm_softc *sc, int reg)
   1509 {
   1510 
   1511 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1512 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1513 }
   1514 #endif
   1515 
   1516 static inline void
   1517 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1518 {
   1519 
   1520 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1521 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1522 }
   1523 
   1524 static inline void
   1525 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1526     uint32_t data)
   1527 {
   1528 	uint32_t regval;
   1529 	int i;
   1530 
   1531 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1532 
   1533 	CSR_WRITE(sc, reg, regval);
   1534 
   1535 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1536 		delay(5);
   1537 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1538 			break;
   1539 	}
   1540 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1541 		aprint_error("%s: WARNING:"
   1542 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1543 		    device_xname(sc->sc_dev), reg);
   1544 	}
   1545 }
   1546 
   1547 static inline void
   1548 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1549 {
   1550 	wa->wa_low = htole32(v & 0xffffffffU);
   1551 	if (sizeof(bus_addr_t) == 8)
   1552 		wa->wa_high = htole32((uint64_t) v >> 32);
   1553 	else
   1554 		wa->wa_high = 0;
   1555 }
   1556 
   1557 /*
   1558  * Descriptor sync/init functions.
   1559  */
   1560 static inline void
   1561 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1562 {
   1563 	struct wm_softc *sc = txq->txq_sc;
   1564 
   1565 	/* If it will wrap around, sync to the end of the ring. */
   1566 	if ((start + num) > WM_NTXDESC(txq)) {
   1567 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1568 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1569 		    (WM_NTXDESC(txq) - start), ops);
   1570 		num -= (WM_NTXDESC(txq) - start);
   1571 		start = 0;
   1572 	}
   1573 
   1574 	/* Now sync whatever is left. */
   1575 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1576 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1577 }
   1578 
   1579 static inline void
   1580 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1581 {
   1582 	struct wm_softc *sc = rxq->rxq_sc;
   1583 
   1584 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1585 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1586 }
   1587 
   1588 static inline void
   1589 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1590 {
   1591 	struct wm_softc *sc = rxq->rxq_sc;
   1592 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1593 	struct mbuf *m = rxs->rxs_mbuf;
   1594 
   1595 	/*
   1596 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1597 	 * so that the payload after the Ethernet header is aligned
   1598 	 * to a 4-byte boundary.
   1599 
   1600 	 * XXX BRAINDAMAGE ALERT!
   1601 	 * The stupid chip uses the same size for every buffer, which
   1602 	 * is set in the Receive Control register.  We are using the 2K
   1603 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1604 	 * reason, we can't "scoot" packets longer than the standard
   1605 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1606 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1607 	 * the upper layer copy the headers.
   1608 	 */
   1609 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1610 
   1611 	if (sc->sc_type == WM_T_82574) {
   1612 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1613 		rxd->erx_data.erxd_addr =
   1614 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1615 		rxd->erx_data.erxd_dd = 0;
   1616 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1617 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1618 
   1619 		rxd->nqrx_data.nrxd_paddr =
   1620 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1621 		/* Currently, split header is not supported. */
   1622 		rxd->nqrx_data.nrxd_haddr = 0;
   1623 	} else {
   1624 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1625 
   1626 		wm_set_dma_addr(&rxd->wrx_addr,
   1627 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1628 		rxd->wrx_len = 0;
   1629 		rxd->wrx_cksum = 0;
   1630 		rxd->wrx_status = 0;
   1631 		rxd->wrx_errors = 0;
   1632 		rxd->wrx_special = 0;
   1633 	}
   1634 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1635 
   1636 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1637 }
   1638 
   1639 /*
   1640  * Device driver interface functions and commonly used functions.
   1641  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1642  */
   1643 
   1644 /* Lookup supported device table */
   1645 static const struct wm_product *
   1646 wm_lookup(const struct pci_attach_args *pa)
   1647 {
   1648 	const struct wm_product *wmp;
   1649 
   1650 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1651 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1652 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1653 			return wmp;
   1654 	}
   1655 	return NULL;
   1656 }
   1657 
   1658 /* The match function (ca_match) */
   1659 static int
   1660 wm_match(device_t parent, cfdata_t cf, void *aux)
   1661 {
   1662 	struct pci_attach_args *pa = aux;
   1663 
   1664 	if (wm_lookup(pa) != NULL)
   1665 		return 1;
   1666 
   1667 	return 0;
   1668 }
   1669 
   1670 /* The attach function (ca_attach) */
   1671 static void
   1672 wm_attach(device_t parent, device_t self, void *aux)
   1673 {
   1674 	struct wm_softc *sc = device_private(self);
   1675 	struct pci_attach_args *pa = aux;
   1676 	prop_dictionary_t dict;
   1677 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1678 	pci_chipset_tag_t pc = pa->pa_pc;
   1679 	int counts[PCI_INTR_TYPE_SIZE];
   1680 	pci_intr_type_t max_type;
   1681 	const char *eetype, *xname;
   1682 	bus_space_tag_t memt;
   1683 	bus_space_handle_t memh;
   1684 	bus_size_t memsize;
   1685 	int memh_valid;
   1686 	int i, error;
   1687 	const struct wm_product *wmp;
   1688 	prop_data_t ea;
   1689 	prop_number_t pn;
   1690 	uint8_t enaddr[ETHER_ADDR_LEN];
   1691 	char buf[256];
   1692 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1693 	pcireg_t preg, memtype;
   1694 	uint16_t eeprom_data, apme_mask;
   1695 	bool force_clear_smbi;
   1696 	uint32_t link_mode;
   1697 	uint32_t reg;
   1698 
   1699 	sc->sc_dev = self;
   1700 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1701 	sc->sc_core_stopping = false;
   1702 
   1703 	wmp = wm_lookup(pa);
   1704 #ifdef DIAGNOSTIC
   1705 	if (wmp == NULL) {
   1706 		printf("\n");
   1707 		panic("wm_attach: impossible");
   1708 	}
   1709 #endif
   1710 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1711 
   1712 	sc->sc_pc = pa->pa_pc;
   1713 	sc->sc_pcitag = pa->pa_tag;
   1714 
   1715 	if (pci_dma64_available(pa))
   1716 		sc->sc_dmat = pa->pa_dmat64;
   1717 	else
   1718 		sc->sc_dmat = pa->pa_dmat;
   1719 
   1720 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1721 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1722 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1723 
   1724 	sc->sc_type = wmp->wmp_type;
   1725 
   1726 	/* Set default function pointers */
   1727 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1728 	sc->phy.release = sc->nvm.release = wm_put_null;
   1729 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1730 
   1731 	if (sc->sc_type < WM_T_82543) {
   1732 		if (sc->sc_rev < 2) {
   1733 			aprint_error_dev(sc->sc_dev,
   1734 			    "i82542 must be at least rev. 2\n");
   1735 			return;
   1736 		}
   1737 		if (sc->sc_rev < 3)
   1738 			sc->sc_type = WM_T_82542_2_0;
   1739 	}
   1740 
   1741 	/*
   1742 	 * Disable MSI for Errata:
   1743 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1744 	 *
   1745 	 *  82544: Errata 25
   1746 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1747 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1748 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1749 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1750 	 *
   1751 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1752 	 *
   1753 	 *  82571 & 82572: Errata 63
   1754 	 */
   1755 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1756 	    || (sc->sc_type == WM_T_82572))
   1757 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1758 
   1759 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1760 	    || (sc->sc_type == WM_T_82580)
   1761 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1762 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1763 		sc->sc_flags |= WM_F_NEWQUEUE;
   1764 
   1765 	/* Set device properties (mactype) */
   1766 	dict = device_properties(sc->sc_dev);
   1767 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1768 
   1769 	/*
   1770 	 * Map the device.  All devices support memory-mapped acccess,
   1771 	 * and it is really required for normal operation.
   1772 	 */
   1773 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1774 	switch (memtype) {
   1775 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1776 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1777 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1778 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1779 		break;
   1780 	default:
   1781 		memh_valid = 0;
   1782 		break;
   1783 	}
   1784 
   1785 	if (memh_valid) {
   1786 		sc->sc_st = memt;
   1787 		sc->sc_sh = memh;
   1788 		sc->sc_ss = memsize;
   1789 	} else {
   1790 		aprint_error_dev(sc->sc_dev,
   1791 		    "unable to map device registers\n");
   1792 		return;
   1793 	}
   1794 
   1795 	/*
   1796 	 * In addition, i82544 and later support I/O mapped indirect
   1797 	 * register access.  It is not desirable (nor supported in
   1798 	 * this driver) to use it for normal operation, though it is
   1799 	 * required to work around bugs in some chip versions.
   1800 	 */
   1801 	if (sc->sc_type >= WM_T_82544) {
   1802 		/* First we have to find the I/O BAR. */
   1803 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1804 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1805 			if (memtype == PCI_MAPREG_TYPE_IO)
   1806 				break;
   1807 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1808 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1809 				i += 4;	/* skip high bits, too */
   1810 		}
   1811 		if (i < PCI_MAPREG_END) {
   1812 			/*
   1813 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1814 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1815 			 * It's no problem because newer chips has no this
   1816 			 * bug.
   1817 			 *
   1818 			 * The i8254x doesn't apparently respond when the
   1819 			 * I/O BAR is 0, which looks somewhat like it's not
   1820 			 * been configured.
   1821 			 */
   1822 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1823 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1824 				aprint_error_dev(sc->sc_dev,
   1825 				    "WARNING: I/O BAR at zero.\n");
   1826 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1827 					0, &sc->sc_iot, &sc->sc_ioh,
   1828 					NULL, &sc->sc_ios) == 0) {
   1829 				sc->sc_flags |= WM_F_IOH_VALID;
   1830 			} else {
   1831 				aprint_error_dev(sc->sc_dev,
   1832 				    "WARNING: unable to map I/O space\n");
   1833 			}
   1834 		}
   1835 
   1836 	}
   1837 
   1838 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1839 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1840 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1841 	if (sc->sc_type < WM_T_82542_2_1)
   1842 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1843 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1844 
   1845 	/* power up chip */
   1846 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1847 	    NULL)) && error != EOPNOTSUPP) {
   1848 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1849 		return;
   1850 	}
   1851 
   1852 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1853 
   1854 	/* Allocation settings */
   1855 	max_type = PCI_INTR_TYPE_MSIX;
   1856 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1;
   1857 	counts[PCI_INTR_TYPE_MSI] = 1;
   1858 	counts[PCI_INTR_TYPE_INTX] = 1;
   1859 	/* overridden by disable flags */
   1860 	if (wm_disable_msi != 0) {
   1861 		counts[PCI_INTR_TYPE_MSI] = 0;
   1862 		if (wm_disable_msix != 0) {
   1863 			max_type = PCI_INTR_TYPE_INTX;
   1864 			counts[PCI_INTR_TYPE_MSIX] = 0;
   1865 		}
   1866 	} else if (wm_disable_msix != 0) {
   1867 		max_type = PCI_INTR_TYPE_MSI;
   1868 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1869 	}
   1870 
   1871 alloc_retry:
   1872 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1873 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1874 		return;
   1875 	}
   1876 
   1877 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1878 		error = wm_setup_msix(sc);
   1879 		if (error) {
   1880 			pci_intr_release(pc, sc->sc_intrs,
   1881 			    counts[PCI_INTR_TYPE_MSIX]);
   1882 
   1883 			/* Setup for MSI: Disable MSI-X */
   1884 			max_type = PCI_INTR_TYPE_MSI;
   1885 			counts[PCI_INTR_TYPE_MSI] = 1;
   1886 			counts[PCI_INTR_TYPE_INTX] = 1;
   1887 			goto alloc_retry;
   1888 		}
   1889 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1890 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1891 		error = wm_setup_legacy(sc);
   1892 		if (error) {
   1893 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1894 			    counts[PCI_INTR_TYPE_MSI]);
   1895 
   1896 			/* The next try is for INTx: Disable MSI */
   1897 			max_type = PCI_INTR_TYPE_INTX;
   1898 			counts[PCI_INTR_TYPE_INTX] = 1;
   1899 			goto alloc_retry;
   1900 		}
   1901 	} else {
   1902 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1903 		error = wm_setup_legacy(sc);
   1904 		if (error) {
   1905 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1906 			    counts[PCI_INTR_TYPE_INTX]);
   1907 			return;
   1908 		}
   1909 	}
   1910 
   1911 	/*
   1912 	 * Check the function ID (unit number of the chip).
   1913 	 */
   1914 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1915 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1916 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1917 	    || (sc->sc_type == WM_T_82580)
   1918 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1919 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1920 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1921 	else
   1922 		sc->sc_funcid = 0;
   1923 
   1924 	/*
   1925 	 * Determine a few things about the bus we're connected to.
   1926 	 */
   1927 	if (sc->sc_type < WM_T_82543) {
   1928 		/* We don't really know the bus characteristics here. */
   1929 		sc->sc_bus_speed = 33;
   1930 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1931 		/*
   1932 		 * CSA (Communication Streaming Architecture) is about as fast
   1933 		 * a 32-bit 66MHz PCI Bus.
   1934 		 */
   1935 		sc->sc_flags |= WM_F_CSA;
   1936 		sc->sc_bus_speed = 66;
   1937 		aprint_verbose_dev(sc->sc_dev,
   1938 		    "Communication Streaming Architecture\n");
   1939 		if (sc->sc_type == WM_T_82547) {
   1940 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1941 			callout_setfunc(&sc->sc_txfifo_ch,
   1942 					wm_82547_txfifo_stall, sc);
   1943 			aprint_verbose_dev(sc->sc_dev,
   1944 			    "using 82547 Tx FIFO stall work-around\n");
   1945 		}
   1946 	} else if (sc->sc_type >= WM_T_82571) {
   1947 		sc->sc_flags |= WM_F_PCIE;
   1948 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1949 		    && (sc->sc_type != WM_T_ICH10)
   1950 		    && (sc->sc_type != WM_T_PCH)
   1951 		    && (sc->sc_type != WM_T_PCH2)
   1952 		    && (sc->sc_type != WM_T_PCH_LPT)
   1953 		    && (sc->sc_type != WM_T_PCH_SPT)) {
   1954 			/* ICH* and PCH* have no PCIe capability registers */
   1955 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1956 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1957 				NULL) == 0)
   1958 				aprint_error_dev(sc->sc_dev,
   1959 				    "unable to find PCIe capability\n");
   1960 		}
   1961 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1962 	} else {
   1963 		reg = CSR_READ(sc, WMREG_STATUS);
   1964 		if (reg & STATUS_BUS64)
   1965 			sc->sc_flags |= WM_F_BUS64;
   1966 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1967 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1968 
   1969 			sc->sc_flags |= WM_F_PCIX;
   1970 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1971 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1972 				aprint_error_dev(sc->sc_dev,
   1973 				    "unable to find PCIX capability\n");
   1974 			else if (sc->sc_type != WM_T_82545_3 &&
   1975 				 sc->sc_type != WM_T_82546_3) {
   1976 				/*
   1977 				 * Work around a problem caused by the BIOS
   1978 				 * setting the max memory read byte count
   1979 				 * incorrectly.
   1980 				 */
   1981 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1982 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1983 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1984 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1985 
   1986 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1987 				    PCIX_CMD_BYTECNT_SHIFT;
   1988 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1989 				    PCIX_STATUS_MAXB_SHIFT;
   1990 				if (bytecnt > maxb) {
   1991 					aprint_verbose_dev(sc->sc_dev,
   1992 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1993 					    512 << bytecnt, 512 << maxb);
   1994 					pcix_cmd = (pcix_cmd &
   1995 					    ~PCIX_CMD_BYTECNT_MASK) |
   1996 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1997 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1998 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1999 					    pcix_cmd);
   2000 				}
   2001 			}
   2002 		}
   2003 		/*
   2004 		 * The quad port adapter is special; it has a PCIX-PCIX
   2005 		 * bridge on the board, and can run the secondary bus at
   2006 		 * a higher speed.
   2007 		 */
   2008 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2009 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2010 								      : 66;
   2011 		} else if (sc->sc_flags & WM_F_PCIX) {
   2012 			switch (reg & STATUS_PCIXSPD_MASK) {
   2013 			case STATUS_PCIXSPD_50_66:
   2014 				sc->sc_bus_speed = 66;
   2015 				break;
   2016 			case STATUS_PCIXSPD_66_100:
   2017 				sc->sc_bus_speed = 100;
   2018 				break;
   2019 			case STATUS_PCIXSPD_100_133:
   2020 				sc->sc_bus_speed = 133;
   2021 				break;
   2022 			default:
   2023 				aprint_error_dev(sc->sc_dev,
   2024 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2025 				    reg & STATUS_PCIXSPD_MASK);
   2026 				sc->sc_bus_speed = 66;
   2027 				break;
   2028 			}
   2029 		} else
   2030 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2031 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2032 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2033 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2034 	}
   2035 
   2036 	/* clear interesting stat counters */
   2037 	CSR_READ(sc, WMREG_COLC);
   2038 	CSR_READ(sc, WMREG_RXERRC);
   2039 
   2040 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2041 	    || (sc->sc_type >= WM_T_ICH8))
   2042 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2043 	if (sc->sc_type >= WM_T_ICH8)
   2044 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2045 
   2046 	/* Set PHY, NVM mutex related stuff */
   2047 	switch (sc->sc_type) {
   2048 	case WM_T_82542_2_0:
   2049 	case WM_T_82542_2_1:
   2050 	case WM_T_82543:
   2051 	case WM_T_82544:
   2052 		/* Microwire */
   2053 		sc->nvm.read = wm_nvm_read_uwire;
   2054 		sc->sc_nvm_wordsize = 64;
   2055 		sc->sc_nvm_addrbits = 6;
   2056 		break;
   2057 	case WM_T_82540:
   2058 	case WM_T_82545:
   2059 	case WM_T_82545_3:
   2060 	case WM_T_82546:
   2061 	case WM_T_82546_3:
   2062 		/* Microwire */
   2063 		sc->nvm.read = wm_nvm_read_uwire;
   2064 		reg = CSR_READ(sc, WMREG_EECD);
   2065 		if (reg & EECD_EE_SIZE) {
   2066 			sc->sc_nvm_wordsize = 256;
   2067 			sc->sc_nvm_addrbits = 8;
   2068 		} else {
   2069 			sc->sc_nvm_wordsize = 64;
   2070 			sc->sc_nvm_addrbits = 6;
   2071 		}
   2072 		sc->sc_flags |= WM_F_LOCK_EECD;
   2073 		sc->nvm.acquire = wm_get_eecd;
   2074 		sc->nvm.release = wm_put_eecd;
   2075 		break;
   2076 	case WM_T_82541:
   2077 	case WM_T_82541_2:
   2078 	case WM_T_82547:
   2079 	case WM_T_82547_2:
   2080 		reg = CSR_READ(sc, WMREG_EECD);
   2081 		/*
   2082 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2083 		 * on 8254[17], so set flags and functios before calling it.
   2084 		 */
   2085 		sc->sc_flags |= WM_F_LOCK_EECD;
   2086 		sc->nvm.acquire = wm_get_eecd;
   2087 		sc->nvm.release = wm_put_eecd;
   2088 		if (reg & EECD_EE_TYPE) {
   2089 			/* SPI */
   2090 			sc->nvm.read = wm_nvm_read_spi;
   2091 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2092 			wm_nvm_set_addrbits_size_eecd(sc);
   2093 		} else {
   2094 			/* Microwire */
   2095 			sc->nvm.read = wm_nvm_read_uwire;
   2096 			if ((reg & EECD_EE_ABITS) != 0) {
   2097 				sc->sc_nvm_wordsize = 256;
   2098 				sc->sc_nvm_addrbits = 8;
   2099 			} else {
   2100 				sc->sc_nvm_wordsize = 64;
   2101 				sc->sc_nvm_addrbits = 6;
   2102 			}
   2103 		}
   2104 		break;
   2105 	case WM_T_82571:
   2106 	case WM_T_82572:
   2107 		/* SPI */
   2108 		sc->nvm.read = wm_nvm_read_eerd;
   2109 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2110 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2111 		wm_nvm_set_addrbits_size_eecd(sc);
   2112 		sc->phy.acquire = wm_get_swsm_semaphore;
   2113 		sc->phy.release = wm_put_swsm_semaphore;
   2114 		sc->nvm.acquire = wm_get_nvm_82571;
   2115 		sc->nvm.release = wm_put_nvm_82571;
   2116 		break;
   2117 	case WM_T_82573:
   2118 	case WM_T_82574:
   2119 	case WM_T_82583:
   2120 		sc->nvm.read = wm_nvm_read_eerd;
   2121 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2122 		if (sc->sc_type == WM_T_82573) {
   2123 			sc->phy.acquire = wm_get_swsm_semaphore;
   2124 			sc->phy.release = wm_put_swsm_semaphore;
   2125 			sc->nvm.acquire = wm_get_nvm_82571;
   2126 			sc->nvm.release = wm_put_nvm_82571;
   2127 		} else {
   2128 			/* Both PHY and NVM use the same semaphore. */
   2129 			sc->phy.acquire = sc->nvm.acquire
   2130 			    = wm_get_swfwhw_semaphore;
   2131 			sc->phy.release = sc->nvm.release
   2132 			    = wm_put_swfwhw_semaphore;
   2133 		}
   2134 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2135 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2136 			sc->sc_nvm_wordsize = 2048;
   2137 		} else {
   2138 			/* SPI */
   2139 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2140 			wm_nvm_set_addrbits_size_eecd(sc);
   2141 		}
   2142 		break;
   2143 	case WM_T_82575:
   2144 	case WM_T_82576:
   2145 	case WM_T_82580:
   2146 	case WM_T_I350:
   2147 	case WM_T_I354:
   2148 	case WM_T_80003:
   2149 		/* SPI */
   2150 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2151 		wm_nvm_set_addrbits_size_eecd(sc);
   2152 		if((sc->sc_type == WM_T_80003)
   2153 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2154 			sc->nvm.read = wm_nvm_read_eerd;
   2155 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2156 		} else {
   2157 			sc->nvm.read = wm_nvm_read_spi;
   2158 			sc->sc_flags |= WM_F_LOCK_EECD;
   2159 		}
   2160 		sc->phy.acquire = wm_get_phy_82575;
   2161 		sc->phy.release = wm_put_phy_82575;
   2162 		sc->nvm.acquire = wm_get_nvm_80003;
   2163 		sc->nvm.release = wm_put_nvm_80003;
   2164 		break;
   2165 	case WM_T_ICH8:
   2166 	case WM_T_ICH9:
   2167 	case WM_T_ICH10:
   2168 	case WM_T_PCH:
   2169 	case WM_T_PCH2:
   2170 	case WM_T_PCH_LPT:
   2171 		sc->nvm.read = wm_nvm_read_ich8;
   2172 		/* FLASH */
   2173 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2174 		sc->sc_nvm_wordsize = 2048;
   2175 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2176 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2177 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2178 			aprint_error_dev(sc->sc_dev,
   2179 			    "can't map FLASH registers\n");
   2180 			goto out;
   2181 		}
   2182 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2183 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2184 		    ICH_FLASH_SECTOR_SIZE;
   2185 		sc->sc_ich8_flash_bank_size =
   2186 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2187 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2188 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2189 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2190 		sc->sc_flashreg_offset = 0;
   2191 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2192 		sc->phy.release = wm_put_swflag_ich8lan;
   2193 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2194 		sc->nvm.release = wm_put_nvm_ich8lan;
   2195 		break;
   2196 	case WM_T_PCH_SPT:
   2197 		sc->nvm.read = wm_nvm_read_spt;
   2198 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2199 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2200 		sc->sc_flasht = sc->sc_st;
   2201 		sc->sc_flashh = sc->sc_sh;
   2202 		sc->sc_ich8_flash_base = 0;
   2203 		sc->sc_nvm_wordsize =
   2204 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2205 			* NVM_SIZE_MULTIPLIER;
   2206 		/* It is size in bytes, we want words */
   2207 		sc->sc_nvm_wordsize /= 2;
   2208 		/* assume 2 banks */
   2209 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2210 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2211 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2212 		sc->phy.release = wm_put_swflag_ich8lan;
   2213 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2214 		sc->nvm.release = wm_put_nvm_ich8lan;
   2215 		break;
   2216 	case WM_T_I210:
   2217 	case WM_T_I211:
   2218 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2219 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2220 		if (wm_nvm_get_flash_presence_i210(sc)) {
   2221 			sc->nvm.read = wm_nvm_read_eerd;
   2222 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2223 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2224 			wm_nvm_set_addrbits_size_eecd(sc);
   2225 		} else {
   2226 			sc->nvm.read = wm_nvm_read_invm;
   2227 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2228 			sc->sc_nvm_wordsize = INVM_SIZE;
   2229 		}
   2230 		sc->phy.acquire = wm_get_phy_82575;
   2231 		sc->phy.release = wm_put_phy_82575;
   2232 		sc->nvm.acquire = wm_get_nvm_80003;
   2233 		sc->nvm.release = wm_put_nvm_80003;
   2234 		break;
   2235 	default:
   2236 		break;
   2237 	}
   2238 
   2239 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2240 	switch (sc->sc_type) {
   2241 	case WM_T_82571:
   2242 	case WM_T_82572:
   2243 		reg = CSR_READ(sc, WMREG_SWSM2);
   2244 		if ((reg & SWSM2_LOCK) == 0) {
   2245 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2246 			force_clear_smbi = true;
   2247 		} else
   2248 			force_clear_smbi = false;
   2249 		break;
   2250 	case WM_T_82573:
   2251 	case WM_T_82574:
   2252 	case WM_T_82583:
   2253 		force_clear_smbi = true;
   2254 		break;
   2255 	default:
   2256 		force_clear_smbi = false;
   2257 		break;
   2258 	}
   2259 	if (force_clear_smbi) {
   2260 		reg = CSR_READ(sc, WMREG_SWSM);
   2261 		if ((reg & SWSM_SMBI) != 0)
   2262 			aprint_error_dev(sc->sc_dev,
   2263 			    "Please update the Bootagent\n");
   2264 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2265 	}
   2266 
   2267 	/*
   2268 	 * Defer printing the EEPROM type until after verifying the checksum
   2269 	 * This allows the EEPROM type to be printed correctly in the case
   2270 	 * that no EEPROM is attached.
   2271 	 */
   2272 	/*
   2273 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2274 	 * this for later, so we can fail future reads from the EEPROM.
   2275 	 */
   2276 	if (wm_nvm_validate_checksum(sc)) {
   2277 		/*
   2278 		 * Read twice again because some PCI-e parts fail the
   2279 		 * first check due to the link being in sleep state.
   2280 		 */
   2281 		if (wm_nvm_validate_checksum(sc))
   2282 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2283 	}
   2284 
   2285 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2286 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2287 	else {
   2288 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2289 		    sc->sc_nvm_wordsize);
   2290 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2291 			aprint_verbose("iNVM");
   2292 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2293 			aprint_verbose("FLASH(HW)");
   2294 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2295 			aprint_verbose("FLASH");
   2296 		else {
   2297 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2298 				eetype = "SPI";
   2299 			else
   2300 				eetype = "MicroWire";
   2301 			aprint_verbose("(%d address bits) %s EEPROM",
   2302 			    sc->sc_nvm_addrbits, eetype);
   2303 		}
   2304 	}
   2305 	wm_nvm_version(sc);
   2306 	aprint_verbose("\n");
   2307 
   2308 	/*
   2309 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2310 	 * incorrect.
   2311 	 */
   2312 	wm_gmii_setup_phytype(sc, 0, 0);
   2313 
   2314 	/* Reset the chip to a known state. */
   2315 	wm_reset(sc);
   2316 
   2317 	/* Check for I21[01] PLL workaround */
   2318 	if (sc->sc_type == WM_T_I210)
   2319 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2320 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2321 		/* NVM image release 3.25 has a workaround */
   2322 		if ((sc->sc_nvm_ver_major < 3)
   2323 		    || ((sc->sc_nvm_ver_major == 3)
   2324 			&& (sc->sc_nvm_ver_minor < 25))) {
   2325 			aprint_verbose_dev(sc->sc_dev,
   2326 			    "ROM image version %d.%d is older than 3.25\n",
   2327 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2328 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2329 		}
   2330 	}
   2331 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2332 		wm_pll_workaround_i210(sc);
   2333 
   2334 	wm_get_wakeup(sc);
   2335 
   2336 	/* Non-AMT based hardware can now take control from firmware */
   2337 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2338 		wm_get_hw_control(sc);
   2339 
   2340 	/*
   2341 	 * Read the Ethernet address from the EEPROM, if not first found
   2342 	 * in device properties.
   2343 	 */
   2344 	ea = prop_dictionary_get(dict, "mac-address");
   2345 	if (ea != NULL) {
   2346 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2347 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2348 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2349 	} else {
   2350 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2351 			aprint_error_dev(sc->sc_dev,
   2352 			    "unable to read Ethernet address\n");
   2353 			goto out;
   2354 		}
   2355 	}
   2356 
   2357 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2358 	    ether_sprintf(enaddr));
   2359 
   2360 	/*
   2361 	 * Read the config info from the EEPROM, and set up various
   2362 	 * bits in the control registers based on their contents.
   2363 	 */
   2364 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2365 	if (pn != NULL) {
   2366 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2367 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2368 	} else {
   2369 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2370 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2371 			goto out;
   2372 		}
   2373 	}
   2374 
   2375 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2376 	if (pn != NULL) {
   2377 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2378 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2379 	} else {
   2380 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2381 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2382 			goto out;
   2383 		}
   2384 	}
   2385 
   2386 	/* check for WM_F_WOL */
   2387 	switch (sc->sc_type) {
   2388 	case WM_T_82542_2_0:
   2389 	case WM_T_82542_2_1:
   2390 	case WM_T_82543:
   2391 		/* dummy? */
   2392 		eeprom_data = 0;
   2393 		apme_mask = NVM_CFG3_APME;
   2394 		break;
   2395 	case WM_T_82544:
   2396 		apme_mask = NVM_CFG2_82544_APM_EN;
   2397 		eeprom_data = cfg2;
   2398 		break;
   2399 	case WM_T_82546:
   2400 	case WM_T_82546_3:
   2401 	case WM_T_82571:
   2402 	case WM_T_82572:
   2403 	case WM_T_82573:
   2404 	case WM_T_82574:
   2405 	case WM_T_82583:
   2406 	case WM_T_80003:
   2407 	default:
   2408 		apme_mask = NVM_CFG3_APME;
   2409 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2410 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2411 		break;
   2412 	case WM_T_82575:
   2413 	case WM_T_82576:
   2414 	case WM_T_82580:
   2415 	case WM_T_I350:
   2416 	case WM_T_I354: /* XXX ok? */
   2417 	case WM_T_ICH8:
   2418 	case WM_T_ICH9:
   2419 	case WM_T_ICH10:
   2420 	case WM_T_PCH:
   2421 	case WM_T_PCH2:
   2422 	case WM_T_PCH_LPT:
   2423 	case WM_T_PCH_SPT:
   2424 		/* XXX The funcid should be checked on some devices */
   2425 		apme_mask = WUC_APME;
   2426 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2427 		break;
   2428 	}
   2429 
   2430 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2431 	if ((eeprom_data & apme_mask) != 0)
   2432 		sc->sc_flags |= WM_F_WOL;
   2433 
   2434 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2435 		/* Check NVM for autonegotiation */
   2436 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2437 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2438 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2439 		}
   2440 	}
   2441 
   2442 	/*
   2443 	 * XXX need special handling for some multiple port cards
   2444 	 * to disable a paticular port.
   2445 	 */
   2446 
   2447 	if (sc->sc_type >= WM_T_82544) {
   2448 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2449 		if (pn != NULL) {
   2450 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2451 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2452 		} else {
   2453 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2454 				aprint_error_dev(sc->sc_dev,
   2455 				    "unable to read SWDPIN\n");
   2456 				goto out;
   2457 			}
   2458 		}
   2459 	}
   2460 
   2461 	if (cfg1 & NVM_CFG1_ILOS)
   2462 		sc->sc_ctrl |= CTRL_ILOS;
   2463 
   2464 	/*
   2465 	 * XXX
   2466 	 * This code isn't correct because pin 2 and 3 are located
   2467 	 * in different position on newer chips. Check all datasheet.
   2468 	 *
   2469 	 * Until resolve this problem, check if a chip < 82580
   2470 	 */
   2471 	if (sc->sc_type <= WM_T_82580) {
   2472 		if (sc->sc_type >= WM_T_82544) {
   2473 			sc->sc_ctrl |=
   2474 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2475 			    CTRL_SWDPIO_SHIFT;
   2476 			sc->sc_ctrl |=
   2477 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2478 			    CTRL_SWDPINS_SHIFT;
   2479 		} else {
   2480 			sc->sc_ctrl |=
   2481 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2482 			    CTRL_SWDPIO_SHIFT;
   2483 		}
   2484 	}
   2485 
   2486 	/* XXX For other than 82580? */
   2487 	if (sc->sc_type == WM_T_82580) {
   2488 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2489 		if (nvmword & __BIT(13))
   2490 			sc->sc_ctrl |= CTRL_ILOS;
   2491 	}
   2492 
   2493 #if 0
   2494 	if (sc->sc_type >= WM_T_82544) {
   2495 		if (cfg1 & NVM_CFG1_IPS0)
   2496 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2497 		if (cfg1 & NVM_CFG1_IPS1)
   2498 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2499 		sc->sc_ctrl_ext |=
   2500 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2501 		    CTRL_EXT_SWDPIO_SHIFT;
   2502 		sc->sc_ctrl_ext |=
   2503 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2504 		    CTRL_EXT_SWDPINS_SHIFT;
   2505 	} else {
   2506 		sc->sc_ctrl_ext |=
   2507 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2508 		    CTRL_EXT_SWDPIO_SHIFT;
   2509 	}
   2510 #endif
   2511 
   2512 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2513 #if 0
   2514 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2515 #endif
   2516 
   2517 	if (sc->sc_type == WM_T_PCH) {
   2518 		uint16_t val;
   2519 
   2520 		/* Save the NVM K1 bit setting */
   2521 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2522 
   2523 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2524 			sc->sc_nvm_k1_enabled = 1;
   2525 		else
   2526 			sc->sc_nvm_k1_enabled = 0;
   2527 	}
   2528 
   2529 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2530 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2531 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2532 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2533 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
   2534 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2535 		/* Copper only */
   2536 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2537 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2538 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2539 	    || (sc->sc_type ==WM_T_I211)) {
   2540 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2541 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2542 		switch (link_mode) {
   2543 		case CTRL_EXT_LINK_MODE_1000KX:
   2544 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2545 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2546 			break;
   2547 		case CTRL_EXT_LINK_MODE_SGMII:
   2548 			if (wm_sgmii_uses_mdio(sc)) {
   2549 				aprint_verbose_dev(sc->sc_dev,
   2550 				    "SGMII(MDIO)\n");
   2551 				sc->sc_flags |= WM_F_SGMII;
   2552 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2553 				break;
   2554 			}
   2555 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2556 			/*FALLTHROUGH*/
   2557 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2558 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2559 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2560 				if (link_mode
   2561 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2562 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2563 					sc->sc_flags |= WM_F_SGMII;
   2564 				} else {
   2565 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2566 					aprint_verbose_dev(sc->sc_dev,
   2567 					    "SERDES\n");
   2568 				}
   2569 				break;
   2570 			}
   2571 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2572 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2573 
   2574 			/* Change current link mode setting */
   2575 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2576 			switch (sc->sc_mediatype) {
   2577 			case WM_MEDIATYPE_COPPER:
   2578 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2579 				break;
   2580 			case WM_MEDIATYPE_SERDES:
   2581 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2582 				break;
   2583 			default:
   2584 				break;
   2585 			}
   2586 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2587 			break;
   2588 		case CTRL_EXT_LINK_MODE_GMII:
   2589 		default:
   2590 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2591 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2592 			break;
   2593 		}
   2594 
   2595 		reg &= ~CTRL_EXT_I2C_ENA;
   2596 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2597 			reg |= CTRL_EXT_I2C_ENA;
   2598 		else
   2599 			reg &= ~CTRL_EXT_I2C_ENA;
   2600 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2601 	} else if (sc->sc_type < WM_T_82543 ||
   2602 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2603 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2604 			aprint_error_dev(sc->sc_dev,
   2605 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2606 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2607 		}
   2608 	} else {
   2609 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2610 			aprint_error_dev(sc->sc_dev,
   2611 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2612 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2613 		}
   2614 	}
   2615 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2616 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2617 
   2618 	/* Set device properties (macflags) */
   2619 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2620 
   2621 	/* Initialize the media structures accordingly. */
   2622 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2623 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2624 	else
   2625 		wm_tbi_mediainit(sc); /* All others */
   2626 
   2627 	ifp = &sc->sc_ethercom.ec_if;
   2628 	xname = device_xname(sc->sc_dev);
   2629 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2630 	ifp->if_softc = sc;
   2631 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2632 #ifdef WM_MPSAFE
   2633 	ifp->if_extflags = IFEF_START_MPSAFE;
   2634 #endif
   2635 	ifp->if_ioctl = wm_ioctl;
   2636 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2637 		ifp->if_start = wm_nq_start;
   2638 		/*
   2639 		 * When the number of CPUs is one and the controller can use
   2640 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2641 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2642 		 * and the other is used for link status changing.
   2643 		 * In this situation, wm_nq_transmit() is disadvantageous
   2644 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2645 		 */
   2646 		if (wm_is_using_multiqueue(sc))
   2647 			ifp->if_transmit = wm_nq_transmit;
   2648 	} else {
   2649 		ifp->if_start = wm_start;
   2650 		/*
   2651 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2652 		 */
   2653 		if (wm_is_using_multiqueue(sc))
   2654 			ifp->if_transmit = wm_transmit;
   2655 	}
   2656 	ifp->if_watchdog = wm_watchdog;
   2657 	ifp->if_init = wm_init;
   2658 	ifp->if_stop = wm_stop;
   2659 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2660 	IFQ_SET_READY(&ifp->if_snd);
   2661 
   2662 	/* Check for jumbo frame */
   2663 	switch (sc->sc_type) {
   2664 	case WM_T_82573:
   2665 		/* XXX limited to 9234 if ASPM is disabled */
   2666 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2667 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2668 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2669 		break;
   2670 	case WM_T_82571:
   2671 	case WM_T_82572:
   2672 	case WM_T_82574:
   2673 	case WM_T_82575:
   2674 	case WM_T_82576:
   2675 	case WM_T_82580:
   2676 	case WM_T_I350:
   2677 	case WM_T_I354: /* XXXX ok? */
   2678 	case WM_T_I210:
   2679 	case WM_T_I211:
   2680 	case WM_T_80003:
   2681 	case WM_T_ICH9:
   2682 	case WM_T_ICH10:
   2683 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2684 	case WM_T_PCH_LPT:
   2685 	case WM_T_PCH_SPT:
   2686 		/* XXX limited to 9234 */
   2687 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2688 		break;
   2689 	case WM_T_PCH:
   2690 		/* XXX limited to 4096 */
   2691 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2692 		break;
   2693 	case WM_T_82542_2_0:
   2694 	case WM_T_82542_2_1:
   2695 	case WM_T_82583:
   2696 	case WM_T_ICH8:
   2697 		/* No support for jumbo frame */
   2698 		break;
   2699 	default:
   2700 		/* ETHER_MAX_LEN_JUMBO */
   2701 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2702 		break;
   2703 	}
   2704 
   2705 	/* If we're a i82543 or greater, we can support VLANs. */
   2706 	if (sc->sc_type >= WM_T_82543)
   2707 		sc->sc_ethercom.ec_capabilities |=
   2708 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2709 
   2710 	/*
   2711 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2712 	 * on i82543 and later.
   2713 	 */
   2714 	if (sc->sc_type >= WM_T_82543) {
   2715 		ifp->if_capabilities |=
   2716 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2717 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2718 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2719 		    IFCAP_CSUM_TCPv6_Tx |
   2720 		    IFCAP_CSUM_UDPv6_Tx;
   2721 	}
   2722 
   2723 	/*
   2724 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2725 	 *
   2726 	 *	82541GI (8086:1076) ... no
   2727 	 *	82572EI (8086:10b9) ... yes
   2728 	 */
   2729 	if (sc->sc_type >= WM_T_82571) {
   2730 		ifp->if_capabilities |=
   2731 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2732 	}
   2733 
   2734 	/*
   2735 	 * If we're a i82544 or greater (except i82547), we can do
   2736 	 * TCP segmentation offload.
   2737 	 */
   2738 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2739 		ifp->if_capabilities |= IFCAP_TSOv4;
   2740 	}
   2741 
   2742 	if (sc->sc_type >= WM_T_82571) {
   2743 		ifp->if_capabilities |= IFCAP_TSOv6;
   2744 	}
   2745 
   2746 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2747 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2748 
   2749 #ifdef WM_MPSAFE
   2750 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2751 #else
   2752 	sc->sc_core_lock = NULL;
   2753 #endif
   2754 
   2755 	/* Attach the interface. */
   2756 	error = if_initialize(ifp);
   2757 	if (error != 0) {
   2758 		aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
   2759 		    error);
   2760 		return; /* Error */
   2761 	}
   2762 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2763 	ether_ifattach(ifp, enaddr);
   2764 	if_register(ifp);
   2765 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2766 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2767 			  RND_FLAG_DEFAULT);
   2768 
   2769 #ifdef WM_EVENT_COUNTERS
   2770 	/* Attach event counters. */
   2771 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2772 	    NULL, xname, "linkintr");
   2773 
   2774 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2775 	    NULL, xname, "tx_xoff");
   2776 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2777 	    NULL, xname, "tx_xon");
   2778 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2779 	    NULL, xname, "rx_xoff");
   2780 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2781 	    NULL, xname, "rx_xon");
   2782 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2783 	    NULL, xname, "rx_macctl");
   2784 #endif /* WM_EVENT_COUNTERS */
   2785 
   2786 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2787 		pmf_class_network_register(self, ifp);
   2788 	else
   2789 		aprint_error_dev(self, "couldn't establish power handler\n");
   2790 
   2791 	sc->sc_flags |= WM_F_ATTACHED;
   2792  out:
   2793 	return;
   2794 }
   2795 
   2796 /* The detach function (ca_detach) */
   2797 static int
   2798 wm_detach(device_t self, int flags __unused)
   2799 {
   2800 	struct wm_softc *sc = device_private(self);
   2801 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2802 	int i;
   2803 
   2804 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2805 		return 0;
   2806 
   2807 	/* Stop the interface. Callouts are stopped in it. */
   2808 	wm_stop(ifp, 1);
   2809 
   2810 	pmf_device_deregister(self);
   2811 
   2812 #ifdef WM_EVENT_COUNTERS
   2813 	evcnt_detach(&sc->sc_ev_linkintr);
   2814 
   2815 	evcnt_detach(&sc->sc_ev_tx_xoff);
   2816 	evcnt_detach(&sc->sc_ev_tx_xon);
   2817 	evcnt_detach(&sc->sc_ev_rx_xoff);
   2818 	evcnt_detach(&sc->sc_ev_rx_xon);
   2819 	evcnt_detach(&sc->sc_ev_rx_macctl);
   2820 #endif /* WM_EVENT_COUNTERS */
   2821 
   2822 	/* Tell the firmware about the release */
   2823 	WM_CORE_LOCK(sc);
   2824 	wm_release_manageability(sc);
   2825 	wm_release_hw_control(sc);
   2826 	wm_enable_wakeup(sc);
   2827 	WM_CORE_UNLOCK(sc);
   2828 
   2829 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2830 
   2831 	/* Delete all remaining media. */
   2832 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2833 
   2834 	ether_ifdetach(ifp);
   2835 	if_detach(ifp);
   2836 	if_percpuq_destroy(sc->sc_ipq);
   2837 
   2838 	/* Unload RX dmamaps and free mbufs */
   2839 	for (i = 0; i < sc->sc_nqueues; i++) {
   2840 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2841 		mutex_enter(rxq->rxq_lock);
   2842 		wm_rxdrain(rxq);
   2843 		mutex_exit(rxq->rxq_lock);
   2844 	}
   2845 	/* Must unlock here */
   2846 
   2847 	/* Disestablish the interrupt handler */
   2848 	for (i = 0; i < sc->sc_nintrs; i++) {
   2849 		if (sc->sc_ihs[i] != NULL) {
   2850 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2851 			sc->sc_ihs[i] = NULL;
   2852 		}
   2853 	}
   2854 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2855 
   2856 	wm_free_txrx_queues(sc);
   2857 
   2858 	/* Unmap the registers */
   2859 	if (sc->sc_ss) {
   2860 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2861 		sc->sc_ss = 0;
   2862 	}
   2863 	if (sc->sc_ios) {
   2864 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2865 		sc->sc_ios = 0;
   2866 	}
   2867 	if (sc->sc_flashs) {
   2868 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2869 		sc->sc_flashs = 0;
   2870 	}
   2871 
   2872 	if (sc->sc_core_lock)
   2873 		mutex_obj_free(sc->sc_core_lock);
   2874 	if (sc->sc_ich_phymtx)
   2875 		mutex_obj_free(sc->sc_ich_phymtx);
   2876 	if (sc->sc_ich_nvmmtx)
   2877 		mutex_obj_free(sc->sc_ich_nvmmtx);
   2878 
   2879 	return 0;
   2880 }
   2881 
   2882 static bool
   2883 wm_suspend(device_t self, const pmf_qual_t *qual)
   2884 {
   2885 	struct wm_softc *sc = device_private(self);
   2886 
   2887 	wm_release_manageability(sc);
   2888 	wm_release_hw_control(sc);
   2889 	wm_enable_wakeup(sc);
   2890 
   2891 	return true;
   2892 }
   2893 
   2894 static bool
   2895 wm_resume(device_t self, const pmf_qual_t *qual)
   2896 {
   2897 	struct wm_softc *sc = device_private(self);
   2898 
   2899 	wm_init_manageability(sc);
   2900 
   2901 	return true;
   2902 }
   2903 
   2904 /*
   2905  * wm_watchdog:		[ifnet interface function]
   2906  *
   2907  *	Watchdog timer handler.
   2908  */
   2909 static void
   2910 wm_watchdog(struct ifnet *ifp)
   2911 {
   2912 	int qid;
   2913 	struct wm_softc *sc = ifp->if_softc;
   2914 
   2915 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2916 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2917 
   2918 		wm_watchdog_txq(ifp, txq);
   2919 	}
   2920 
   2921 	/* Reset the interface. */
   2922 	(void) wm_init(ifp);
   2923 
   2924 	/*
   2925 	 * There are still some upper layer processing which call
   2926 	 * ifp->if_start(). e.g. ALTQ or one CPU system
   2927 	 */
   2928 	/* Try to get more packets going. */
   2929 	ifp->if_start(ifp);
   2930 }
   2931 
   2932 static void
   2933 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
   2934 {
   2935 	struct wm_softc *sc = ifp->if_softc;
   2936 
   2937 	/*
   2938 	 * Since we're using delayed interrupts, sweep up
   2939 	 * before we report an error.
   2940 	 */
   2941 	mutex_enter(txq->txq_lock);
   2942 	wm_txeof(sc, txq);
   2943 	mutex_exit(txq->txq_lock);
   2944 
   2945 	if (txq->txq_free != WM_NTXDESC(txq)) {
   2946 #ifdef WM_DEBUG
   2947 		int i, j;
   2948 		struct wm_txsoft *txs;
   2949 #endif
   2950 		log(LOG_ERR,
   2951 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2952 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   2953 		    txq->txq_next);
   2954 		ifp->if_oerrors++;
   2955 #ifdef WM_DEBUG
   2956 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   2957 		    i = WM_NEXTTXS(txq, i)) {
   2958 		    txs = &txq->txq_soft[i];
   2959 		    printf("txs %d tx %d -> %d\n",
   2960 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2961 		    for (j = txs->txs_firstdesc; ;
   2962 			j = WM_NEXTTX(txq, j)) {
   2963 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2964 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   2965 			printf("\t %#08x%08x\n",
   2966 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   2967 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   2968 			if (j == txs->txs_lastdesc)
   2969 				break;
   2970 			}
   2971 		}
   2972 #endif
   2973 	}
   2974 }
   2975 
   2976 /*
   2977  * wm_tick:
   2978  *
   2979  *	One second timer, used to check link status, sweep up
   2980  *	completed transmit jobs, etc.
   2981  */
   2982 static void
   2983 wm_tick(void *arg)
   2984 {
   2985 	struct wm_softc *sc = arg;
   2986 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2987 #ifndef WM_MPSAFE
   2988 	int s = splnet();
   2989 #endif
   2990 
   2991 	WM_CORE_LOCK(sc);
   2992 
   2993 	if (sc->sc_core_stopping)
   2994 		goto out;
   2995 
   2996 	if (sc->sc_type >= WM_T_82542_2_1) {
   2997 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2998 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2999 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3000 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3001 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3002 	}
   3003 
   3004 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   3005 	ifp->if_ierrors += 0ULL /* ensure quad_t */
   3006 	    + CSR_READ(sc, WMREG_CRCERRS)
   3007 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3008 	    + CSR_READ(sc, WMREG_SYMERRC)
   3009 	    + CSR_READ(sc, WMREG_RXERRC)
   3010 	    + CSR_READ(sc, WMREG_SEC)
   3011 	    + CSR_READ(sc, WMREG_CEXTERR)
   3012 	    + CSR_READ(sc, WMREG_RLEC);
   3013 	/*
   3014 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3015 	 * memory. It does not mean the number of dropped packet. Because
   3016 	 * ethernet controller can receive packets in such case if there is
   3017 	 * space in phy's FIFO.
   3018 	 *
   3019 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3020 	 * own EVCNT instead of if_iqdrops.
   3021 	 */
   3022 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   3023 
   3024 	if (sc->sc_flags & WM_F_HAS_MII)
   3025 		mii_tick(&sc->sc_mii);
   3026 	else if ((sc->sc_type >= WM_T_82575)
   3027 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3028 		wm_serdes_tick(sc);
   3029 	else
   3030 		wm_tbi_tick(sc);
   3031 
   3032 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   3033 out:
   3034 	WM_CORE_UNLOCK(sc);
   3035 #ifndef WM_MPSAFE
   3036 	splx(s);
   3037 #endif
   3038 }
   3039 
   3040 static int
   3041 wm_ifflags_cb(struct ethercom *ec)
   3042 {
   3043 	struct ifnet *ifp = &ec->ec_if;
   3044 	struct wm_softc *sc = ifp->if_softc;
   3045 	int rc = 0;
   3046 
   3047 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3048 		device_xname(sc->sc_dev), __func__));
   3049 
   3050 	WM_CORE_LOCK(sc);
   3051 
   3052 	int change = ifp->if_flags ^ sc->sc_if_flags;
   3053 	sc->sc_if_flags = ifp->if_flags;
   3054 
   3055 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3056 		rc = ENETRESET;
   3057 		goto out;
   3058 	}
   3059 
   3060 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   3061 		wm_set_filter(sc);
   3062 
   3063 	wm_set_vlan(sc);
   3064 
   3065 out:
   3066 	WM_CORE_UNLOCK(sc);
   3067 
   3068 	return rc;
   3069 }
   3070 
   3071 /*
   3072  * wm_ioctl:		[ifnet interface function]
   3073  *
   3074  *	Handle control requests from the operator.
   3075  */
   3076 static int
   3077 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3078 {
   3079 	struct wm_softc *sc = ifp->if_softc;
   3080 	struct ifreq *ifr = (struct ifreq *) data;
   3081 	struct ifaddr *ifa = (struct ifaddr *)data;
   3082 	struct sockaddr_dl *sdl;
   3083 	int s, error;
   3084 
   3085 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3086 		device_xname(sc->sc_dev), __func__));
   3087 
   3088 #ifndef WM_MPSAFE
   3089 	s = splnet();
   3090 #endif
   3091 	switch (cmd) {
   3092 	case SIOCSIFMEDIA:
   3093 	case SIOCGIFMEDIA:
   3094 		WM_CORE_LOCK(sc);
   3095 		/* Flow control requires full-duplex mode. */
   3096 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3097 		    (ifr->ifr_media & IFM_FDX) == 0)
   3098 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3099 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3100 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3101 				/* We can do both TXPAUSE and RXPAUSE. */
   3102 				ifr->ifr_media |=
   3103 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3104 			}
   3105 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3106 		}
   3107 		WM_CORE_UNLOCK(sc);
   3108 #ifdef WM_MPSAFE
   3109 		s = splnet();
   3110 #endif
   3111 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3112 #ifdef WM_MPSAFE
   3113 		splx(s);
   3114 #endif
   3115 		break;
   3116 	case SIOCINITIFADDR:
   3117 		WM_CORE_LOCK(sc);
   3118 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3119 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3120 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3121 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3122 			/* unicast address is first multicast entry */
   3123 			wm_set_filter(sc);
   3124 			error = 0;
   3125 			WM_CORE_UNLOCK(sc);
   3126 			break;
   3127 		}
   3128 		WM_CORE_UNLOCK(sc);
   3129 		/*FALLTHROUGH*/
   3130 	default:
   3131 #ifdef WM_MPSAFE
   3132 		s = splnet();
   3133 #endif
   3134 		/* It may call wm_start, so unlock here */
   3135 		error = ether_ioctl(ifp, cmd, data);
   3136 #ifdef WM_MPSAFE
   3137 		splx(s);
   3138 #endif
   3139 		if (error != ENETRESET)
   3140 			break;
   3141 
   3142 		error = 0;
   3143 
   3144 		if (cmd == SIOCSIFCAP) {
   3145 			error = (*ifp->if_init)(ifp);
   3146 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3147 			;
   3148 		else if (ifp->if_flags & IFF_RUNNING) {
   3149 			/*
   3150 			 * Multicast list has changed; set the hardware filter
   3151 			 * accordingly.
   3152 			 */
   3153 			WM_CORE_LOCK(sc);
   3154 			wm_set_filter(sc);
   3155 			WM_CORE_UNLOCK(sc);
   3156 		}
   3157 		break;
   3158 	}
   3159 
   3160 #ifndef WM_MPSAFE
   3161 	splx(s);
   3162 #endif
   3163 	return error;
   3164 }
   3165 
   3166 /* MAC address related */
   3167 
   3168 /*
   3169  * Get the offset of MAC address and return it.
   3170  * If error occured, use offset 0.
   3171  */
   3172 static uint16_t
   3173 wm_check_alt_mac_addr(struct wm_softc *sc)
   3174 {
   3175 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3176 	uint16_t offset = NVM_OFF_MACADDR;
   3177 
   3178 	/* Try to read alternative MAC address pointer */
   3179 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3180 		return 0;
   3181 
   3182 	/* Check pointer if it's valid or not. */
   3183 	if ((offset == 0x0000) || (offset == 0xffff))
   3184 		return 0;
   3185 
   3186 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3187 	/*
   3188 	 * Check whether alternative MAC address is valid or not.
   3189 	 * Some cards have non 0xffff pointer but those don't use
   3190 	 * alternative MAC address in reality.
   3191 	 *
   3192 	 * Check whether the broadcast bit is set or not.
   3193 	 */
   3194 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3195 		if (((myea[0] & 0xff) & 0x01) == 0)
   3196 			return offset; /* Found */
   3197 
   3198 	/* Not found */
   3199 	return 0;
   3200 }
   3201 
   3202 static int
   3203 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3204 {
   3205 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3206 	uint16_t offset = NVM_OFF_MACADDR;
   3207 	int do_invert = 0;
   3208 
   3209 	switch (sc->sc_type) {
   3210 	case WM_T_82580:
   3211 	case WM_T_I350:
   3212 	case WM_T_I354:
   3213 		/* EEPROM Top Level Partitioning */
   3214 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3215 		break;
   3216 	case WM_T_82571:
   3217 	case WM_T_82575:
   3218 	case WM_T_82576:
   3219 	case WM_T_80003:
   3220 	case WM_T_I210:
   3221 	case WM_T_I211:
   3222 		offset = wm_check_alt_mac_addr(sc);
   3223 		if (offset == 0)
   3224 			if ((sc->sc_funcid & 0x01) == 1)
   3225 				do_invert = 1;
   3226 		break;
   3227 	default:
   3228 		if ((sc->sc_funcid & 0x01) == 1)
   3229 			do_invert = 1;
   3230 		break;
   3231 	}
   3232 
   3233 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3234 		goto bad;
   3235 
   3236 	enaddr[0] = myea[0] & 0xff;
   3237 	enaddr[1] = myea[0] >> 8;
   3238 	enaddr[2] = myea[1] & 0xff;
   3239 	enaddr[3] = myea[1] >> 8;
   3240 	enaddr[4] = myea[2] & 0xff;
   3241 	enaddr[5] = myea[2] >> 8;
   3242 
   3243 	/*
   3244 	 * Toggle the LSB of the MAC address on the second port
   3245 	 * of some dual port cards.
   3246 	 */
   3247 	if (do_invert != 0)
   3248 		enaddr[5] ^= 1;
   3249 
   3250 	return 0;
   3251 
   3252  bad:
   3253 	return -1;
   3254 }
   3255 
   3256 /*
   3257  * wm_set_ral:
   3258  *
   3259  *	Set an entery in the receive address list.
   3260  */
   3261 static void
   3262 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3263 {
   3264 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3265 	uint32_t wlock_mac;
   3266 	int rv;
   3267 
   3268 	if (enaddr != NULL) {
   3269 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3270 		    (enaddr[3] << 24);
   3271 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3272 		ral_hi |= RAL_AV;
   3273 	} else {
   3274 		ral_lo = 0;
   3275 		ral_hi = 0;
   3276 	}
   3277 
   3278 	switch (sc->sc_type) {
   3279 	case WM_T_82542_2_0:
   3280 	case WM_T_82542_2_1:
   3281 	case WM_T_82543:
   3282 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3283 		CSR_WRITE_FLUSH(sc);
   3284 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3285 		CSR_WRITE_FLUSH(sc);
   3286 		break;
   3287 	case WM_T_PCH2:
   3288 	case WM_T_PCH_LPT:
   3289 	case WM_T_PCH_SPT:
   3290 		if (idx == 0) {
   3291 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3292 			CSR_WRITE_FLUSH(sc);
   3293 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3294 			CSR_WRITE_FLUSH(sc);
   3295 			return;
   3296 		}
   3297 		if (sc->sc_type != WM_T_PCH2) {
   3298 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3299 			    FWSM_WLOCK_MAC);
   3300 			addrl = WMREG_SHRAL(idx - 1);
   3301 			addrh = WMREG_SHRAH(idx - 1);
   3302 		} else {
   3303 			wlock_mac = 0;
   3304 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3305 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3306 		}
   3307 
   3308 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3309 			rv = wm_get_swflag_ich8lan(sc);
   3310 			if (rv != 0)
   3311 				return;
   3312 			CSR_WRITE(sc, addrl, ral_lo);
   3313 			CSR_WRITE_FLUSH(sc);
   3314 			CSR_WRITE(sc, addrh, ral_hi);
   3315 			CSR_WRITE_FLUSH(sc);
   3316 			wm_put_swflag_ich8lan(sc);
   3317 		}
   3318 
   3319 		break;
   3320 	default:
   3321 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3322 		CSR_WRITE_FLUSH(sc);
   3323 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3324 		CSR_WRITE_FLUSH(sc);
   3325 		break;
   3326 	}
   3327 }
   3328 
   3329 /*
   3330  * wm_mchash:
   3331  *
   3332  *	Compute the hash of the multicast address for the 4096-bit
   3333  *	multicast filter.
   3334  */
   3335 static uint32_t
   3336 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3337 {
   3338 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3339 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3340 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3341 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3342 	uint32_t hash;
   3343 
   3344 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3345 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3346 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3347 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   3348 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3349 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3350 		return (hash & 0x3ff);
   3351 	}
   3352 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3353 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3354 
   3355 	return (hash & 0xfff);
   3356 }
   3357 
   3358 /*
   3359  * wm_set_filter:
   3360  *
   3361  *	Set up the receive filter.
   3362  */
   3363 static void
   3364 wm_set_filter(struct wm_softc *sc)
   3365 {
   3366 	struct ethercom *ec = &sc->sc_ethercom;
   3367 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3368 	struct ether_multi *enm;
   3369 	struct ether_multistep step;
   3370 	bus_addr_t mta_reg;
   3371 	uint32_t hash, reg, bit;
   3372 	int i, size, ralmax;
   3373 
   3374 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3375 		device_xname(sc->sc_dev), __func__));
   3376 
   3377 	if (sc->sc_type >= WM_T_82544)
   3378 		mta_reg = WMREG_CORDOVA_MTA;
   3379 	else
   3380 		mta_reg = WMREG_MTA;
   3381 
   3382 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3383 
   3384 	if (ifp->if_flags & IFF_BROADCAST)
   3385 		sc->sc_rctl |= RCTL_BAM;
   3386 	if (ifp->if_flags & IFF_PROMISC) {
   3387 		sc->sc_rctl |= RCTL_UPE;
   3388 		goto allmulti;
   3389 	}
   3390 
   3391 	/*
   3392 	 * Set the station address in the first RAL slot, and
   3393 	 * clear the remaining slots.
   3394 	 */
   3395 	if (sc->sc_type == WM_T_ICH8)
   3396 		size = WM_RAL_TABSIZE_ICH8 -1;
   3397 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3398 	    || (sc->sc_type == WM_T_PCH))
   3399 		size = WM_RAL_TABSIZE_ICH8;
   3400 	else if (sc->sc_type == WM_T_PCH2)
   3401 		size = WM_RAL_TABSIZE_PCH2;
   3402 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
   3403 		size = WM_RAL_TABSIZE_PCH_LPT;
   3404 	else if (sc->sc_type == WM_T_82575)
   3405 		size = WM_RAL_TABSIZE_82575;
   3406 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3407 		size = WM_RAL_TABSIZE_82576;
   3408 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3409 		size = WM_RAL_TABSIZE_I350;
   3410 	else
   3411 		size = WM_RAL_TABSIZE;
   3412 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3413 
   3414 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   3415 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3416 		switch (i) {
   3417 		case 0:
   3418 			/* We can use all entries */
   3419 			ralmax = size;
   3420 			break;
   3421 		case 1:
   3422 			/* Only RAR[0] */
   3423 			ralmax = 1;
   3424 			break;
   3425 		default:
   3426 			/* available SHRA + RAR[0] */
   3427 			ralmax = i + 1;
   3428 		}
   3429 	} else
   3430 		ralmax = size;
   3431 	for (i = 1; i < size; i++) {
   3432 		if (i < ralmax)
   3433 			wm_set_ral(sc, NULL, i);
   3434 	}
   3435 
   3436 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3437 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3438 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3439 	    || (sc->sc_type == WM_T_PCH_SPT))
   3440 		size = WM_ICH8_MC_TABSIZE;
   3441 	else
   3442 		size = WM_MC_TABSIZE;
   3443 	/* Clear out the multicast table. */
   3444 	for (i = 0; i < size; i++) {
   3445 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3446 		CSR_WRITE_FLUSH(sc);
   3447 	}
   3448 
   3449 	ETHER_LOCK(ec);
   3450 	ETHER_FIRST_MULTI(step, ec, enm);
   3451 	while (enm != NULL) {
   3452 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3453 			ETHER_UNLOCK(ec);
   3454 			/*
   3455 			 * We must listen to a range of multicast addresses.
   3456 			 * For now, just accept all multicasts, rather than
   3457 			 * trying to set only those filter bits needed to match
   3458 			 * the range.  (At this time, the only use of address
   3459 			 * ranges is for IP multicast routing, for which the
   3460 			 * range is big enough to require all bits set.)
   3461 			 */
   3462 			goto allmulti;
   3463 		}
   3464 
   3465 		hash = wm_mchash(sc, enm->enm_addrlo);
   3466 
   3467 		reg = (hash >> 5);
   3468 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3469 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3470 		    || (sc->sc_type == WM_T_PCH2)
   3471 		    || (sc->sc_type == WM_T_PCH_LPT)
   3472 		    || (sc->sc_type == WM_T_PCH_SPT))
   3473 			reg &= 0x1f;
   3474 		else
   3475 			reg &= 0x7f;
   3476 		bit = hash & 0x1f;
   3477 
   3478 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3479 		hash |= 1U << bit;
   3480 
   3481 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3482 			/*
   3483 			 * 82544 Errata 9: Certain register cannot be written
   3484 			 * with particular alignments in PCI-X bus operation
   3485 			 * (FCAH, MTA and VFTA).
   3486 			 */
   3487 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3488 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3489 			CSR_WRITE_FLUSH(sc);
   3490 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3491 			CSR_WRITE_FLUSH(sc);
   3492 		} else {
   3493 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3494 			CSR_WRITE_FLUSH(sc);
   3495 		}
   3496 
   3497 		ETHER_NEXT_MULTI(step, enm);
   3498 	}
   3499 	ETHER_UNLOCK(ec);
   3500 
   3501 	ifp->if_flags &= ~IFF_ALLMULTI;
   3502 	goto setit;
   3503 
   3504  allmulti:
   3505 	ifp->if_flags |= IFF_ALLMULTI;
   3506 	sc->sc_rctl |= RCTL_MPE;
   3507 
   3508  setit:
   3509 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3510 }
   3511 
   3512 /* Reset and init related */
   3513 
   3514 static void
   3515 wm_set_vlan(struct wm_softc *sc)
   3516 {
   3517 
   3518 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3519 		device_xname(sc->sc_dev), __func__));
   3520 
   3521 	/* Deal with VLAN enables. */
   3522 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3523 		sc->sc_ctrl |= CTRL_VME;
   3524 	else
   3525 		sc->sc_ctrl &= ~CTRL_VME;
   3526 
   3527 	/* Write the control registers. */
   3528 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3529 }
   3530 
   3531 static void
   3532 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3533 {
   3534 	uint32_t gcr;
   3535 	pcireg_t ctrl2;
   3536 
   3537 	gcr = CSR_READ(sc, WMREG_GCR);
   3538 
   3539 	/* Only take action if timeout value is defaulted to 0 */
   3540 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3541 		goto out;
   3542 
   3543 	if ((gcr & GCR_CAP_VER2) == 0) {
   3544 		gcr |= GCR_CMPL_TMOUT_10MS;
   3545 		goto out;
   3546 	}
   3547 
   3548 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3549 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3550 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3551 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3552 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3553 
   3554 out:
   3555 	/* Disable completion timeout resend */
   3556 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3557 
   3558 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3559 }
   3560 
   3561 void
   3562 wm_get_auto_rd_done(struct wm_softc *sc)
   3563 {
   3564 	int i;
   3565 
   3566 	/* wait for eeprom to reload */
   3567 	switch (sc->sc_type) {
   3568 	case WM_T_82571:
   3569 	case WM_T_82572:
   3570 	case WM_T_82573:
   3571 	case WM_T_82574:
   3572 	case WM_T_82583:
   3573 	case WM_T_82575:
   3574 	case WM_T_82576:
   3575 	case WM_T_82580:
   3576 	case WM_T_I350:
   3577 	case WM_T_I354:
   3578 	case WM_T_I210:
   3579 	case WM_T_I211:
   3580 	case WM_T_80003:
   3581 	case WM_T_ICH8:
   3582 	case WM_T_ICH9:
   3583 		for (i = 0; i < 10; i++) {
   3584 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3585 				break;
   3586 			delay(1000);
   3587 		}
   3588 		if (i == 10) {
   3589 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3590 			    "complete\n", device_xname(sc->sc_dev));
   3591 		}
   3592 		break;
   3593 	default:
   3594 		break;
   3595 	}
   3596 }
   3597 
   3598 void
   3599 wm_lan_init_done(struct wm_softc *sc)
   3600 {
   3601 	uint32_t reg = 0;
   3602 	int i;
   3603 
   3604 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3605 		device_xname(sc->sc_dev), __func__));
   3606 
   3607 	/* Wait for eeprom to reload */
   3608 	switch (sc->sc_type) {
   3609 	case WM_T_ICH10:
   3610 	case WM_T_PCH:
   3611 	case WM_T_PCH2:
   3612 	case WM_T_PCH_LPT:
   3613 	case WM_T_PCH_SPT:
   3614 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3615 			reg = CSR_READ(sc, WMREG_STATUS);
   3616 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3617 				break;
   3618 			delay(100);
   3619 		}
   3620 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3621 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3622 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3623 		}
   3624 		break;
   3625 	default:
   3626 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3627 		    __func__);
   3628 		break;
   3629 	}
   3630 
   3631 	reg &= ~STATUS_LAN_INIT_DONE;
   3632 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3633 }
   3634 
   3635 void
   3636 wm_get_cfg_done(struct wm_softc *sc)
   3637 {
   3638 	int mask;
   3639 	uint32_t reg;
   3640 	int i;
   3641 
   3642 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3643 		device_xname(sc->sc_dev), __func__));
   3644 
   3645 	/* Wait for eeprom to reload */
   3646 	switch (sc->sc_type) {
   3647 	case WM_T_82542_2_0:
   3648 	case WM_T_82542_2_1:
   3649 		/* null */
   3650 		break;
   3651 	case WM_T_82543:
   3652 	case WM_T_82544:
   3653 	case WM_T_82540:
   3654 	case WM_T_82545:
   3655 	case WM_T_82545_3:
   3656 	case WM_T_82546:
   3657 	case WM_T_82546_3:
   3658 	case WM_T_82541:
   3659 	case WM_T_82541_2:
   3660 	case WM_T_82547:
   3661 	case WM_T_82547_2:
   3662 	case WM_T_82573:
   3663 	case WM_T_82574:
   3664 	case WM_T_82583:
   3665 		/* generic */
   3666 		delay(10*1000);
   3667 		break;
   3668 	case WM_T_80003:
   3669 	case WM_T_82571:
   3670 	case WM_T_82572:
   3671 	case WM_T_82575:
   3672 	case WM_T_82576:
   3673 	case WM_T_82580:
   3674 	case WM_T_I350:
   3675 	case WM_T_I354:
   3676 	case WM_T_I210:
   3677 	case WM_T_I211:
   3678 		if (sc->sc_type == WM_T_82571) {
   3679 			/* Only 82571 shares port 0 */
   3680 			mask = EEMNGCTL_CFGDONE_0;
   3681 		} else
   3682 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3683 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3684 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3685 				break;
   3686 			delay(1000);
   3687 		}
   3688 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3689 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3690 				device_xname(sc->sc_dev), __func__));
   3691 		}
   3692 		break;
   3693 	case WM_T_ICH8:
   3694 	case WM_T_ICH9:
   3695 	case WM_T_ICH10:
   3696 	case WM_T_PCH:
   3697 	case WM_T_PCH2:
   3698 	case WM_T_PCH_LPT:
   3699 	case WM_T_PCH_SPT:
   3700 		delay(10*1000);
   3701 		if (sc->sc_type >= WM_T_ICH10)
   3702 			wm_lan_init_done(sc);
   3703 		else
   3704 			wm_get_auto_rd_done(sc);
   3705 
   3706 		reg = CSR_READ(sc, WMREG_STATUS);
   3707 		if ((reg & STATUS_PHYRA) != 0)
   3708 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3709 		break;
   3710 	default:
   3711 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3712 		    __func__);
   3713 		break;
   3714 	}
   3715 }
   3716 
   3717 void
   3718 wm_phy_post_reset(struct wm_softc *sc)
   3719 {
   3720 	uint32_t reg;
   3721 
   3722 	/* This function is only for ICH8 and newer. */
   3723 	if (sc->sc_type < WM_T_ICH8)
   3724 		return;
   3725 
   3726 	if (wm_phy_resetisblocked(sc)) {
   3727 		/* XXX */
   3728 		device_printf(sc->sc_dev, "PHY is blocked\n");
   3729 		return;
   3730 	}
   3731 
   3732 	/* Allow time for h/w to get to quiescent state after reset */
   3733 	delay(10*1000);
   3734 
   3735 	/* Perform any necessary post-reset workarounds */
   3736 	if (sc->sc_type == WM_T_PCH)
   3737 		wm_hv_phy_workaround_ich8lan(sc);
   3738 	if (sc->sc_type == WM_T_PCH2)
   3739 		wm_lv_phy_workaround_ich8lan(sc);
   3740 
   3741 	/* Clear the host wakeup bit after lcd reset */
   3742 	if (sc->sc_type >= WM_T_PCH) {
   3743 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   3744 		    BM_PORT_GEN_CFG);
   3745 		reg &= ~BM_WUC_HOST_WU_BIT;
   3746 		wm_gmii_hv_writereg(sc->sc_dev, 2,
   3747 		    BM_PORT_GEN_CFG, reg);
   3748 	}
   3749 
   3750 	/* Configure the LCD with the extended configuration region in NVM */
   3751 	wm_init_lcd_from_nvm(sc);
   3752 
   3753 	/* Configure the LCD with the OEM bits in NVM */
   3754 }
   3755 
   3756 /* Only for PCH and newer */
   3757 static void
   3758 wm_write_smbus_addr(struct wm_softc *sc)
   3759 {
   3760 	uint32_t strap, freq;
   3761 	uint32_t phy_data;
   3762 
   3763 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3764 		device_xname(sc->sc_dev), __func__));
   3765 
   3766 	strap = CSR_READ(sc, WMREG_STRAP);
   3767 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   3768 
   3769 	phy_data = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR);
   3770 
   3771 	phy_data &= ~HV_SMB_ADDR_ADDR;
   3772 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   3773 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   3774 
   3775 	if (sc->sc_phytype == WMPHY_I217) {
   3776 		/* Restore SMBus frequency */
   3777 		if (freq --) {
   3778 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   3779 			    | HV_SMB_ADDR_FREQ_HIGH);
   3780 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   3781 			    HV_SMB_ADDR_FREQ_LOW);
   3782 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   3783 			    HV_SMB_ADDR_FREQ_HIGH);
   3784 		} else {
   3785 			DPRINTF(WM_DEBUG_INIT,
   3786 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   3787 				device_xname(sc->sc_dev), __func__));
   3788 		}
   3789 	}
   3790 
   3791 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR, phy_data);
   3792 }
   3793 
   3794 void
   3795 wm_init_lcd_from_nvm(struct wm_softc *sc)
   3796 {
   3797 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   3798 	uint16_t phy_page = 0;
   3799 
   3800 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3801 		device_xname(sc->sc_dev), __func__));
   3802 
   3803 	switch (sc->sc_type) {
   3804 	case WM_T_ICH8:
   3805 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   3806 		    || (sc->sc_phytype != WMPHY_IGP_3))
   3807 			return;
   3808 
   3809 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   3810 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   3811 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   3812 			break;
   3813 		}
   3814 		/* FALLTHROUGH */
   3815 	case WM_T_PCH:
   3816 	case WM_T_PCH2:
   3817 	case WM_T_PCH_LPT:
   3818 	case WM_T_PCH_SPT:
   3819 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   3820 		break;
   3821 	default:
   3822 		return;
   3823 	}
   3824 
   3825 	sc->phy.acquire(sc);
   3826 
   3827 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   3828 	if ((reg & sw_cfg_mask) == 0)
   3829 		goto release;
   3830 
   3831 	/*
   3832 	 * Make sure HW does not configure LCD from PHY extended configuration
   3833 	 * before SW configuration
   3834 	 */
   3835 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   3836 	if ((sc->sc_type < WM_T_PCH2)
   3837 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   3838 		goto release;
   3839 
   3840 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   3841 		device_xname(sc->sc_dev), __func__));
   3842 	/* word_addr is in DWORD */
   3843 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   3844 
   3845 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   3846 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   3847 
   3848 	if (((sc->sc_type == WM_T_PCH)
   3849 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   3850 	    || (sc->sc_type > WM_T_PCH)) {
   3851 		/*
   3852 		 * HW configures the SMBus address and LEDs when the OEM and
   3853 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   3854 		 * are cleared, SW will configure them instead.
   3855 		 */
   3856 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   3857 			device_xname(sc->sc_dev), __func__));
   3858 		wm_write_smbus_addr(sc);
   3859 
   3860 		reg = CSR_READ(sc, WMREG_LEDCTL);
   3861 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG, reg);
   3862 	}
   3863 
   3864 	/* Configure LCD from extended configuration region. */
   3865 	for (i = 0; i < cnf_size; i++) {
   3866 		uint16_t reg_data, reg_addr;
   3867 
   3868 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   3869 			goto release;
   3870 
   3871 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   3872 			goto release;
   3873 
   3874 		if (reg_addr == MII_IGPHY_PAGE_SELECT)
   3875 			phy_page = reg_data;
   3876 
   3877 		reg_addr &= IGPHY_MAXREGADDR;
   3878 		reg_addr |= phy_page;
   3879 
   3880 		sc->phy.release(sc); /* XXX */
   3881 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, reg_addr, reg_data);
   3882 		sc->phy.acquire(sc); /* XXX */
   3883 	}
   3884 
   3885 release:
   3886 	sc->phy.release(sc);
   3887 	return;
   3888 }
   3889 
   3890 
   3891 /* Init hardware bits */
   3892 void
   3893 wm_initialize_hardware_bits(struct wm_softc *sc)
   3894 {
   3895 	uint32_t tarc0, tarc1, reg;
   3896 
   3897 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3898 		device_xname(sc->sc_dev), __func__));
   3899 
   3900 	/* For 82571 variant, 80003 and ICHs */
   3901 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3902 	    || (sc->sc_type >= WM_T_80003)) {
   3903 
   3904 		/* Transmit Descriptor Control 0 */
   3905 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3906 		reg |= TXDCTL_COUNT_DESC;
   3907 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3908 
   3909 		/* Transmit Descriptor Control 1 */
   3910 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3911 		reg |= TXDCTL_COUNT_DESC;
   3912 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3913 
   3914 		/* TARC0 */
   3915 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3916 		switch (sc->sc_type) {
   3917 		case WM_T_82571:
   3918 		case WM_T_82572:
   3919 		case WM_T_82573:
   3920 		case WM_T_82574:
   3921 		case WM_T_82583:
   3922 		case WM_T_80003:
   3923 			/* Clear bits 30..27 */
   3924 			tarc0 &= ~__BITS(30, 27);
   3925 			break;
   3926 		default:
   3927 			break;
   3928 		}
   3929 
   3930 		switch (sc->sc_type) {
   3931 		case WM_T_82571:
   3932 		case WM_T_82572:
   3933 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3934 
   3935 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3936 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3937 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3938 			/* 8257[12] Errata No.7 */
   3939 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3940 
   3941 			/* TARC1 bit 28 */
   3942 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3943 				tarc1 &= ~__BIT(28);
   3944 			else
   3945 				tarc1 |= __BIT(28);
   3946 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3947 
   3948 			/*
   3949 			 * 8257[12] Errata No.13
   3950 			 * Disable Dyamic Clock Gating.
   3951 			 */
   3952 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3953 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3954 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3955 			break;
   3956 		case WM_T_82573:
   3957 		case WM_T_82574:
   3958 		case WM_T_82583:
   3959 			if ((sc->sc_type == WM_T_82574)
   3960 			    || (sc->sc_type == WM_T_82583))
   3961 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3962 
   3963 			/* Extended Device Control */
   3964 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3965 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3966 			reg |= __BIT(22);	/* Set bit 22 */
   3967 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3968 
   3969 			/* Device Control */
   3970 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   3971 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3972 
   3973 			/* PCIe Control Register */
   3974 			/*
   3975 			 * 82573 Errata (unknown).
   3976 			 *
   3977 			 * 82574 Errata 25 and 82583 Errata 12
   3978 			 * "Dropped Rx Packets":
   3979 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   3980 			 */
   3981 			reg = CSR_READ(sc, WMREG_GCR);
   3982 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   3983 			CSR_WRITE(sc, WMREG_GCR, reg);
   3984 
   3985 			if ((sc->sc_type == WM_T_82574)
   3986 			    || (sc->sc_type == WM_T_82583)) {
   3987 				/*
   3988 				 * Document says this bit must be set for
   3989 				 * proper operation.
   3990 				 */
   3991 				reg = CSR_READ(sc, WMREG_GCR);
   3992 				reg |= __BIT(22);
   3993 				CSR_WRITE(sc, WMREG_GCR, reg);
   3994 
   3995 				/*
   3996 				 * Apply workaround for hardware errata
   3997 				 * documented in errata docs Fixes issue where
   3998 				 * some error prone or unreliable PCIe
   3999 				 * completions are occurring, particularly
   4000 				 * with ASPM enabled. Without fix, issue can
   4001 				 * cause Tx timeouts.
   4002 				 */
   4003 				reg = CSR_READ(sc, WMREG_GCR2);
   4004 				reg |= __BIT(0);
   4005 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4006 			}
   4007 			break;
   4008 		case WM_T_80003:
   4009 			/* TARC0 */
   4010 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4011 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4012 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4013 
   4014 			/* TARC1 bit 28 */
   4015 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4016 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4017 				tarc1 &= ~__BIT(28);
   4018 			else
   4019 				tarc1 |= __BIT(28);
   4020 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4021 			break;
   4022 		case WM_T_ICH8:
   4023 		case WM_T_ICH9:
   4024 		case WM_T_ICH10:
   4025 		case WM_T_PCH:
   4026 		case WM_T_PCH2:
   4027 		case WM_T_PCH_LPT:
   4028 		case WM_T_PCH_SPT:
   4029 			/* TARC0 */
   4030 			if (sc->sc_type == WM_T_ICH8) {
   4031 				/* Set TARC0 bits 29 and 28 */
   4032 				tarc0 |= __BITS(29, 28);
   4033 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4034 				tarc0 |= __BIT(29);
   4035 				/*
   4036 				 *  Drop bit 28. From Linux.
   4037 				 * See I218/I219 spec update
   4038 				 * "5. Buffer Overrun While the I219 is
   4039 				 * Processing DMA Transactions"
   4040 				 */
   4041 				tarc0 &= ~__BIT(28);
   4042 			}
   4043 			/* Set TARC0 bits 23,24,26,27 */
   4044 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4045 
   4046 			/* CTRL_EXT */
   4047 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4048 			reg |= __BIT(22);	/* Set bit 22 */
   4049 			/*
   4050 			 * Enable PHY low-power state when MAC is at D3
   4051 			 * w/o WoL
   4052 			 */
   4053 			if (sc->sc_type >= WM_T_PCH)
   4054 				reg |= CTRL_EXT_PHYPDEN;
   4055 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4056 
   4057 			/* TARC1 */
   4058 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4059 			/* bit 28 */
   4060 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4061 				tarc1 &= ~__BIT(28);
   4062 			else
   4063 				tarc1 |= __BIT(28);
   4064 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4065 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4066 
   4067 			/* Device Status */
   4068 			if (sc->sc_type == WM_T_ICH8) {
   4069 				reg = CSR_READ(sc, WMREG_STATUS);
   4070 				reg &= ~__BIT(31);
   4071 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4072 
   4073 			}
   4074 
   4075 			/* IOSFPC */
   4076 			if (sc->sc_type == WM_T_PCH_SPT) {
   4077 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4078 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4079 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4080 			}
   4081 			/*
   4082 			 * Work-around descriptor data corruption issue during
   4083 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4084 			 * capability.
   4085 			 */
   4086 			reg = CSR_READ(sc, WMREG_RFCTL);
   4087 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4088 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4089 			break;
   4090 		default:
   4091 			break;
   4092 		}
   4093 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4094 
   4095 		switch (sc->sc_type) {
   4096 		/*
   4097 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4098 		 * Avoid RSS Hash Value bug.
   4099 		 */
   4100 		case WM_T_82571:
   4101 		case WM_T_82572:
   4102 		case WM_T_82573:
   4103 		case WM_T_80003:
   4104 		case WM_T_ICH8:
   4105 			reg = CSR_READ(sc, WMREG_RFCTL);
   4106 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4107 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4108 			break;
   4109 		case WM_T_82574:
   4110 			/* use extened Rx descriptor. */
   4111 			reg = CSR_READ(sc, WMREG_RFCTL);
   4112 			reg |= WMREG_RFCTL_EXSTEN;
   4113 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4114 			break;
   4115 		default:
   4116 			break;
   4117 		}
   4118 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4119 		/*
   4120 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4121 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4122 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4123 		 * Correctly by the Device"
   4124 		 *
   4125 		 * I354(C2000) Errata AVR53:
   4126 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4127 		 * Hang"
   4128 		 */
   4129 		reg = CSR_READ(sc, WMREG_RFCTL);
   4130 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4131 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4132 	}
   4133 }
   4134 
   4135 static uint32_t
   4136 wm_rxpbs_adjust_82580(uint32_t val)
   4137 {
   4138 	uint32_t rv = 0;
   4139 
   4140 	if (val < __arraycount(wm_82580_rxpbs_table))
   4141 		rv = wm_82580_rxpbs_table[val];
   4142 
   4143 	return rv;
   4144 }
   4145 
   4146 /*
   4147  * wm_reset_phy:
   4148  *
   4149  *	generic PHY reset function.
   4150  *	Same as e1000_phy_hw_reset_generic()
   4151  */
   4152 static void
   4153 wm_reset_phy(struct wm_softc *sc)
   4154 {
   4155 	uint32_t reg;
   4156 
   4157 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4158 		device_xname(sc->sc_dev), __func__));
   4159 	if (wm_phy_resetisblocked(sc))
   4160 		return;
   4161 
   4162 	sc->phy.acquire(sc);
   4163 
   4164 	reg = CSR_READ(sc, WMREG_CTRL);
   4165 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4166 	CSR_WRITE_FLUSH(sc);
   4167 
   4168 	delay(sc->phy.reset_delay_us);
   4169 
   4170 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4171 	CSR_WRITE_FLUSH(sc);
   4172 
   4173 	delay(150);
   4174 
   4175 	sc->phy.release(sc);
   4176 
   4177 	wm_get_cfg_done(sc);
   4178 	wm_phy_post_reset(sc);
   4179 }
   4180 
   4181 static void
   4182 wm_flush_desc_rings(struct wm_softc *sc)
   4183 {
   4184 	pcireg_t preg;
   4185 	uint32_t reg;
   4186 	struct wm_txqueue *txq;
   4187 	wiseman_txdesc_t *txd;
   4188 	int nexttx;
   4189 	uint32_t rctl;
   4190 
   4191 	/* First, disable MULR fix in FEXTNVM11 */
   4192 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4193 	reg |= FEXTNVM11_DIS_MULRFIX;
   4194 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4195 
   4196 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4197 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4198 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4199 		return;
   4200 
   4201 	/* TX */
   4202 	printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   4203 	    device_xname(sc->sc_dev), preg, reg);
   4204 	reg = CSR_READ(sc, WMREG_TCTL);
   4205 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4206 
   4207 	txq = &sc->sc_queue[0].wmq_txq;
   4208 	nexttx = txq->txq_next;
   4209 	txd = &txq->txq_descs[nexttx];
   4210 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4211 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS| 512);
   4212 	txd->wtx_fields.wtxu_status = 0;
   4213 	txd->wtx_fields.wtxu_options = 0;
   4214 	txd->wtx_fields.wtxu_vlan = 0;
   4215 
   4216 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4217 	    BUS_SPACE_BARRIER_WRITE);
   4218 
   4219 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4220 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4221 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4222 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4223 	delay(250);
   4224 
   4225 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4226 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4227 		return;
   4228 
   4229 	/* RX */
   4230 	printf("%s: Need RX flush (reg = %08x)\n",
   4231 	    device_xname(sc->sc_dev), preg);
   4232 	rctl = CSR_READ(sc, WMREG_RCTL);
   4233 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4234 	CSR_WRITE_FLUSH(sc);
   4235 	delay(150);
   4236 
   4237 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4238 	/* zero the lower 14 bits (prefetch and host thresholds) */
   4239 	reg &= 0xffffc000;
   4240 	/*
   4241 	 * update thresholds: prefetch threshold to 31, host threshold
   4242 	 * to 1 and make sure the granularity is "descriptors" and not
   4243 	 * "cache lines"
   4244 	 */
   4245 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4246 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4247 
   4248 	/*
   4249 	 * momentarily enable the RX ring for the changes to take
   4250 	 * effect
   4251 	 */
   4252 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4253 	CSR_WRITE_FLUSH(sc);
   4254 	delay(150);
   4255 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4256 }
   4257 
   4258 /*
   4259  * wm_reset:
   4260  *
   4261  *	Reset the i82542 chip.
   4262  */
   4263 static void
   4264 wm_reset(struct wm_softc *sc)
   4265 {
   4266 	int phy_reset = 0;
   4267 	int i, error = 0;
   4268 	uint32_t reg;
   4269 	uint16_t kmreg;
   4270 	int rv;
   4271 
   4272 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4273 		device_xname(sc->sc_dev), __func__));
   4274 	KASSERT(sc->sc_type != 0);
   4275 
   4276 	/*
   4277 	 * Allocate on-chip memory according to the MTU size.
   4278 	 * The Packet Buffer Allocation register must be written
   4279 	 * before the chip is reset.
   4280 	 */
   4281 	switch (sc->sc_type) {
   4282 	case WM_T_82547:
   4283 	case WM_T_82547_2:
   4284 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4285 		    PBA_22K : PBA_30K;
   4286 		for (i = 0; i < sc->sc_nqueues; i++) {
   4287 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4288 			txq->txq_fifo_head = 0;
   4289 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4290 			txq->txq_fifo_size =
   4291 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4292 			txq->txq_fifo_stall = 0;
   4293 		}
   4294 		break;
   4295 	case WM_T_82571:
   4296 	case WM_T_82572:
   4297 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4298 	case WM_T_80003:
   4299 		sc->sc_pba = PBA_32K;
   4300 		break;
   4301 	case WM_T_82573:
   4302 		sc->sc_pba = PBA_12K;
   4303 		break;
   4304 	case WM_T_82574:
   4305 	case WM_T_82583:
   4306 		sc->sc_pba = PBA_20K;
   4307 		break;
   4308 	case WM_T_82576:
   4309 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4310 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4311 		break;
   4312 	case WM_T_82580:
   4313 	case WM_T_I350:
   4314 	case WM_T_I354:
   4315 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4316 		break;
   4317 	case WM_T_I210:
   4318 	case WM_T_I211:
   4319 		sc->sc_pba = PBA_34K;
   4320 		break;
   4321 	case WM_T_ICH8:
   4322 		/* Workaround for a bit corruption issue in FIFO memory */
   4323 		sc->sc_pba = PBA_8K;
   4324 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4325 		break;
   4326 	case WM_T_ICH9:
   4327 	case WM_T_ICH10:
   4328 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4329 		    PBA_14K : PBA_10K;
   4330 		break;
   4331 	case WM_T_PCH:
   4332 	case WM_T_PCH2:
   4333 	case WM_T_PCH_LPT:
   4334 	case WM_T_PCH_SPT:
   4335 		sc->sc_pba = PBA_26K;
   4336 		break;
   4337 	default:
   4338 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4339 		    PBA_40K : PBA_48K;
   4340 		break;
   4341 	}
   4342 	/*
   4343 	 * Only old or non-multiqueue devices have the PBA register
   4344 	 * XXX Need special handling for 82575.
   4345 	 */
   4346 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4347 	    || (sc->sc_type == WM_T_82575))
   4348 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4349 
   4350 	/* Prevent the PCI-E bus from sticking */
   4351 	if (sc->sc_flags & WM_F_PCIE) {
   4352 		int timeout = 800;
   4353 
   4354 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4355 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4356 
   4357 		while (timeout--) {
   4358 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4359 			    == 0)
   4360 				break;
   4361 			delay(100);
   4362 		}
   4363 		if (timeout == 0)
   4364 			device_printf(sc->sc_dev,
   4365 			    "failed to disable busmastering\n");
   4366 	}
   4367 
   4368 	/* Set the completion timeout for interface */
   4369 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4370 	    || (sc->sc_type == WM_T_82580)
   4371 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4372 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4373 		wm_set_pcie_completion_timeout(sc);
   4374 
   4375 	/* Clear interrupt */
   4376 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4377 	if (wm_is_using_msix(sc)) {
   4378 		if (sc->sc_type != WM_T_82574) {
   4379 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4380 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4381 		} else {
   4382 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4383 		}
   4384 	}
   4385 
   4386 	/* Stop the transmit and receive processes. */
   4387 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4388 	sc->sc_rctl &= ~RCTL_EN;
   4389 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4390 	CSR_WRITE_FLUSH(sc);
   4391 
   4392 	/* XXX set_tbi_sbp_82543() */
   4393 
   4394 	delay(10*1000);
   4395 
   4396 	/* Must acquire the MDIO ownership before MAC reset */
   4397 	switch (sc->sc_type) {
   4398 	case WM_T_82573:
   4399 	case WM_T_82574:
   4400 	case WM_T_82583:
   4401 		error = wm_get_hw_semaphore_82573(sc);
   4402 		break;
   4403 	default:
   4404 		break;
   4405 	}
   4406 
   4407 	/*
   4408 	 * 82541 Errata 29? & 82547 Errata 28?
   4409 	 * See also the description about PHY_RST bit in CTRL register
   4410 	 * in 8254x_GBe_SDM.pdf.
   4411 	 */
   4412 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4413 		CSR_WRITE(sc, WMREG_CTRL,
   4414 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4415 		CSR_WRITE_FLUSH(sc);
   4416 		delay(5000);
   4417 	}
   4418 
   4419 	switch (sc->sc_type) {
   4420 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4421 	case WM_T_82541:
   4422 	case WM_T_82541_2:
   4423 	case WM_T_82547:
   4424 	case WM_T_82547_2:
   4425 		/*
   4426 		 * On some chipsets, a reset through a memory-mapped write
   4427 		 * cycle can cause the chip to reset before completing the
   4428 		 * write cycle.  This causes major headache that can be
   4429 		 * avoided by issuing the reset via indirect register writes
   4430 		 * through I/O space.
   4431 		 *
   4432 		 * So, if we successfully mapped the I/O BAR at attach time,
   4433 		 * use that.  Otherwise, try our luck with a memory-mapped
   4434 		 * reset.
   4435 		 */
   4436 		if (sc->sc_flags & WM_F_IOH_VALID)
   4437 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4438 		else
   4439 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4440 		break;
   4441 	case WM_T_82545_3:
   4442 	case WM_T_82546_3:
   4443 		/* Use the shadow control register on these chips. */
   4444 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4445 		break;
   4446 	case WM_T_80003:
   4447 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4448 		sc->phy.acquire(sc);
   4449 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4450 		sc->phy.release(sc);
   4451 		break;
   4452 	case WM_T_ICH8:
   4453 	case WM_T_ICH9:
   4454 	case WM_T_ICH10:
   4455 	case WM_T_PCH:
   4456 	case WM_T_PCH2:
   4457 	case WM_T_PCH_LPT:
   4458 	case WM_T_PCH_SPT:
   4459 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4460 		if (wm_phy_resetisblocked(sc) == false) {
   4461 			/*
   4462 			 * Gate automatic PHY configuration by hardware on
   4463 			 * non-managed 82579
   4464 			 */
   4465 			if ((sc->sc_type == WM_T_PCH2)
   4466 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4467 				== 0))
   4468 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4469 
   4470 			reg |= CTRL_PHY_RESET;
   4471 			phy_reset = 1;
   4472 		} else
   4473 			printf("XXX reset is blocked!!!\n");
   4474 		sc->phy.acquire(sc);
   4475 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4476 		/* Don't insert a completion barrier when reset */
   4477 		delay(20*1000);
   4478 		mutex_exit(sc->sc_ich_phymtx);
   4479 		break;
   4480 	case WM_T_82580:
   4481 	case WM_T_I350:
   4482 	case WM_T_I354:
   4483 	case WM_T_I210:
   4484 	case WM_T_I211:
   4485 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4486 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4487 			CSR_WRITE_FLUSH(sc);
   4488 		delay(5000);
   4489 		break;
   4490 	case WM_T_82542_2_0:
   4491 	case WM_T_82542_2_1:
   4492 	case WM_T_82543:
   4493 	case WM_T_82540:
   4494 	case WM_T_82545:
   4495 	case WM_T_82546:
   4496 	case WM_T_82571:
   4497 	case WM_T_82572:
   4498 	case WM_T_82573:
   4499 	case WM_T_82574:
   4500 	case WM_T_82575:
   4501 	case WM_T_82576:
   4502 	case WM_T_82583:
   4503 	default:
   4504 		/* Everything else can safely use the documented method. */
   4505 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4506 		break;
   4507 	}
   4508 
   4509 	/* Must release the MDIO ownership after MAC reset */
   4510 	switch (sc->sc_type) {
   4511 	case WM_T_82573:
   4512 	case WM_T_82574:
   4513 	case WM_T_82583:
   4514 		if (error == 0)
   4515 			wm_put_hw_semaphore_82573(sc);
   4516 		break;
   4517 	default:
   4518 		break;
   4519 	}
   4520 
   4521 	if (phy_reset != 0)
   4522 		wm_get_cfg_done(sc);
   4523 
   4524 	/* reload EEPROM */
   4525 	switch (sc->sc_type) {
   4526 	case WM_T_82542_2_0:
   4527 	case WM_T_82542_2_1:
   4528 	case WM_T_82543:
   4529 	case WM_T_82544:
   4530 		delay(10);
   4531 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4532 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4533 		CSR_WRITE_FLUSH(sc);
   4534 		delay(2000);
   4535 		break;
   4536 	case WM_T_82540:
   4537 	case WM_T_82545:
   4538 	case WM_T_82545_3:
   4539 	case WM_T_82546:
   4540 	case WM_T_82546_3:
   4541 		delay(5*1000);
   4542 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4543 		break;
   4544 	case WM_T_82541:
   4545 	case WM_T_82541_2:
   4546 	case WM_T_82547:
   4547 	case WM_T_82547_2:
   4548 		delay(20000);
   4549 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4550 		break;
   4551 	case WM_T_82571:
   4552 	case WM_T_82572:
   4553 	case WM_T_82573:
   4554 	case WM_T_82574:
   4555 	case WM_T_82583:
   4556 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4557 			delay(10);
   4558 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4559 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4560 			CSR_WRITE_FLUSH(sc);
   4561 		}
   4562 		/* check EECD_EE_AUTORD */
   4563 		wm_get_auto_rd_done(sc);
   4564 		/*
   4565 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4566 		 * is set.
   4567 		 */
   4568 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4569 		    || (sc->sc_type == WM_T_82583))
   4570 			delay(25*1000);
   4571 		break;
   4572 	case WM_T_82575:
   4573 	case WM_T_82576:
   4574 	case WM_T_82580:
   4575 	case WM_T_I350:
   4576 	case WM_T_I354:
   4577 	case WM_T_I210:
   4578 	case WM_T_I211:
   4579 	case WM_T_80003:
   4580 		/* check EECD_EE_AUTORD */
   4581 		wm_get_auto_rd_done(sc);
   4582 		break;
   4583 	case WM_T_ICH8:
   4584 	case WM_T_ICH9:
   4585 	case WM_T_ICH10:
   4586 	case WM_T_PCH:
   4587 	case WM_T_PCH2:
   4588 	case WM_T_PCH_LPT:
   4589 	case WM_T_PCH_SPT:
   4590 		break;
   4591 	default:
   4592 		panic("%s: unknown type\n", __func__);
   4593 	}
   4594 
   4595 	/* Check whether EEPROM is present or not */
   4596 	switch (sc->sc_type) {
   4597 	case WM_T_82575:
   4598 	case WM_T_82576:
   4599 	case WM_T_82580:
   4600 	case WM_T_I350:
   4601 	case WM_T_I354:
   4602 	case WM_T_ICH8:
   4603 	case WM_T_ICH9:
   4604 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4605 			/* Not found */
   4606 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4607 			if (sc->sc_type == WM_T_82575)
   4608 				wm_reset_init_script_82575(sc);
   4609 		}
   4610 		break;
   4611 	default:
   4612 		break;
   4613 	}
   4614 
   4615 	if (phy_reset != 0)
   4616 		wm_phy_post_reset(sc);
   4617 
   4618 	if ((sc->sc_type == WM_T_82580)
   4619 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4620 		/* clear global device reset status bit */
   4621 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4622 	}
   4623 
   4624 	/* Clear any pending interrupt events. */
   4625 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4626 	reg = CSR_READ(sc, WMREG_ICR);
   4627 	if (wm_is_using_msix(sc)) {
   4628 		if (sc->sc_type != WM_T_82574) {
   4629 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4630 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4631 		} else
   4632 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4633 	}
   4634 
   4635 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4636 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4637 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4638 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   4639 		reg = CSR_READ(sc, WMREG_KABGTXD);
   4640 		reg |= KABGTXD_BGSQLBIAS;
   4641 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   4642 	}
   4643 
   4644 	/* reload sc_ctrl */
   4645 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4646 
   4647 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4648 		wm_set_eee_i350(sc);
   4649 
   4650 	/*
   4651 	 * For PCH, this write will make sure that any noise will be detected
   4652 	 * as a CRC error and be dropped rather than show up as a bad packet
   4653 	 * to the DMA engine
   4654 	 */
   4655 	if (sc->sc_type == WM_T_PCH)
   4656 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4657 
   4658 	if (sc->sc_type >= WM_T_82544)
   4659 		CSR_WRITE(sc, WMREG_WUC, 0);
   4660 
   4661 	wm_reset_mdicnfg_82580(sc);
   4662 
   4663 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4664 		wm_pll_workaround_i210(sc);
   4665 
   4666 	if (sc->sc_type == WM_T_80003) {
   4667 		/* default to TRUE to enable the MDIC W/A */
   4668 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   4669 
   4670 		rv = wm_kmrn_readreg(sc,
   4671 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   4672 		if (rv == 0) {
   4673 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   4674 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   4675 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   4676 			else
   4677 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   4678 		}
   4679 	}
   4680 }
   4681 
   4682 /*
   4683  * wm_add_rxbuf:
   4684  *
   4685  *	Add a receive buffer to the indiciated descriptor.
   4686  */
   4687 static int
   4688 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4689 {
   4690 	struct wm_softc *sc = rxq->rxq_sc;
   4691 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4692 	struct mbuf *m;
   4693 	int error;
   4694 
   4695 	KASSERT(mutex_owned(rxq->rxq_lock));
   4696 
   4697 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4698 	if (m == NULL)
   4699 		return ENOBUFS;
   4700 
   4701 	MCLGET(m, M_DONTWAIT);
   4702 	if ((m->m_flags & M_EXT) == 0) {
   4703 		m_freem(m);
   4704 		return ENOBUFS;
   4705 	}
   4706 
   4707 	if (rxs->rxs_mbuf != NULL)
   4708 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4709 
   4710 	rxs->rxs_mbuf = m;
   4711 
   4712 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4713 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4714 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4715 	if (error) {
   4716 		/* XXX XXX XXX */
   4717 		aprint_error_dev(sc->sc_dev,
   4718 		    "unable to load rx DMA map %d, error = %d\n",
   4719 		    idx, error);
   4720 		panic("wm_add_rxbuf");
   4721 	}
   4722 
   4723 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4724 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4725 
   4726 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4727 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4728 			wm_init_rxdesc(rxq, idx);
   4729 	} else
   4730 		wm_init_rxdesc(rxq, idx);
   4731 
   4732 	return 0;
   4733 }
   4734 
   4735 /*
   4736  * wm_rxdrain:
   4737  *
   4738  *	Drain the receive queue.
   4739  */
   4740 static void
   4741 wm_rxdrain(struct wm_rxqueue *rxq)
   4742 {
   4743 	struct wm_softc *sc = rxq->rxq_sc;
   4744 	struct wm_rxsoft *rxs;
   4745 	int i;
   4746 
   4747 	KASSERT(mutex_owned(rxq->rxq_lock));
   4748 
   4749 	for (i = 0; i < WM_NRXDESC; i++) {
   4750 		rxs = &rxq->rxq_soft[i];
   4751 		if (rxs->rxs_mbuf != NULL) {
   4752 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4753 			m_freem(rxs->rxs_mbuf);
   4754 			rxs->rxs_mbuf = NULL;
   4755 		}
   4756 	}
   4757 }
   4758 
   4759 
   4760 /*
   4761  * XXX copy from FreeBSD's sys/net/rss_config.c
   4762  */
   4763 /*
   4764  * RSS secret key, intended to prevent attacks on load-balancing.  Its
   4765  * effectiveness may be limited by algorithm choice and available entropy
   4766  * during the boot.
   4767  *
   4768  * XXXRW: And that we don't randomize it yet!
   4769  *
   4770  * This is the default Microsoft RSS specification key which is also
   4771  * the Chelsio T5 firmware default key.
   4772  */
   4773 #define RSS_KEYSIZE 40
   4774 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
   4775 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
   4776 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
   4777 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
   4778 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
   4779 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
   4780 };
   4781 
   4782 /*
   4783  * Caller must pass an array of size sizeof(rss_key).
   4784  *
   4785  * XXX
   4786  * As if_ixgbe may use this function, this function should not be
   4787  * if_wm specific function.
   4788  */
   4789 static void
   4790 wm_rss_getkey(uint8_t *key)
   4791 {
   4792 
   4793 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
   4794 }
   4795 
   4796 /*
   4797  * Setup registers for RSS.
   4798  *
   4799  * XXX not yet VMDq support
   4800  */
   4801 static void
   4802 wm_init_rss(struct wm_softc *sc)
   4803 {
   4804 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4805 	int i;
   4806 
   4807 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
   4808 
   4809 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4810 		int qid, reta_ent;
   4811 
   4812 		qid  = i % sc->sc_nqueues;
   4813 		switch(sc->sc_type) {
   4814 		case WM_T_82574:
   4815 			reta_ent = __SHIFTIN(qid,
   4816 			    RETA_ENT_QINDEX_MASK_82574);
   4817 			break;
   4818 		case WM_T_82575:
   4819 			reta_ent = __SHIFTIN(qid,
   4820 			    RETA_ENT_QINDEX1_MASK_82575);
   4821 			break;
   4822 		default:
   4823 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4824 			break;
   4825 		}
   4826 
   4827 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4828 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4829 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4830 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4831 	}
   4832 
   4833 	wm_rss_getkey((uint8_t *)rss_key);
   4834 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4835 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4836 
   4837 	if (sc->sc_type == WM_T_82574)
   4838 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4839 	else
   4840 		mrqc = MRQC_ENABLE_RSS_MQ;
   4841 
   4842 	/*
   4843 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   4844 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   4845 	 */
   4846 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4847 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4848 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4849 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4850 
   4851 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4852 }
   4853 
   4854 /*
   4855  * Adjust TX and RX queue numbers which the system actulally uses.
   4856  *
   4857  * The numbers are affected by below parameters.
   4858  *     - The nubmer of hardware queues
   4859  *     - The number of MSI-X vectors (= "nvectors" argument)
   4860  *     - ncpu
   4861  */
   4862 static void
   4863 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4864 {
   4865 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4866 
   4867 	if (nvectors < 2) {
   4868 		sc->sc_nqueues = 1;
   4869 		return;
   4870 	}
   4871 
   4872 	switch(sc->sc_type) {
   4873 	case WM_T_82572:
   4874 		hw_ntxqueues = 2;
   4875 		hw_nrxqueues = 2;
   4876 		break;
   4877 	case WM_T_82574:
   4878 		hw_ntxqueues = 2;
   4879 		hw_nrxqueues = 2;
   4880 		break;
   4881 	case WM_T_82575:
   4882 		hw_ntxqueues = 4;
   4883 		hw_nrxqueues = 4;
   4884 		break;
   4885 	case WM_T_82576:
   4886 		hw_ntxqueues = 16;
   4887 		hw_nrxqueues = 16;
   4888 		break;
   4889 	case WM_T_82580:
   4890 	case WM_T_I350:
   4891 	case WM_T_I354:
   4892 		hw_ntxqueues = 8;
   4893 		hw_nrxqueues = 8;
   4894 		break;
   4895 	case WM_T_I210:
   4896 		hw_ntxqueues = 4;
   4897 		hw_nrxqueues = 4;
   4898 		break;
   4899 	case WM_T_I211:
   4900 		hw_ntxqueues = 2;
   4901 		hw_nrxqueues = 2;
   4902 		break;
   4903 		/*
   4904 		 * As below ethernet controllers does not support MSI-X,
   4905 		 * this driver let them not use multiqueue.
   4906 		 *     - WM_T_80003
   4907 		 *     - WM_T_ICH8
   4908 		 *     - WM_T_ICH9
   4909 		 *     - WM_T_ICH10
   4910 		 *     - WM_T_PCH
   4911 		 *     - WM_T_PCH2
   4912 		 *     - WM_T_PCH_LPT
   4913 		 */
   4914 	default:
   4915 		hw_ntxqueues = 1;
   4916 		hw_nrxqueues = 1;
   4917 		break;
   4918 	}
   4919 
   4920 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
   4921 
   4922 	/*
   4923 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   4924 	 * the number of queues used actually.
   4925 	 */
   4926 	if (nvectors < hw_nqueues + 1) {
   4927 		sc->sc_nqueues = nvectors - 1;
   4928 	} else {
   4929 		sc->sc_nqueues = hw_nqueues;
   4930 	}
   4931 
   4932 	/*
   4933 	 * As queues more then cpus cannot improve scaling, we limit
   4934 	 * the number of queues used actually.
   4935 	 */
   4936 	if (ncpu < sc->sc_nqueues)
   4937 		sc->sc_nqueues = ncpu;
   4938 }
   4939 
   4940 static inline bool
   4941 wm_is_using_msix(struct wm_softc *sc)
   4942 {
   4943 
   4944 	return (sc->sc_nintrs > 1);
   4945 }
   4946 
   4947 static inline bool
   4948 wm_is_using_multiqueue(struct wm_softc *sc)
   4949 {
   4950 
   4951 	return (sc->sc_nqueues > 1);
   4952 }
   4953 
   4954 static int
   4955 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   4956 {
   4957 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   4958 	wmq->wmq_id = qidx;
   4959 	wmq->wmq_intr_idx = intr_idx;
   4960 	wmq->wmq_si = softint_establish(SOFTINT_NET
   4961 #ifdef WM_MPSAFE
   4962 	    | SOFTINT_MPSAFE
   4963 #endif
   4964 	    , wm_handle_queue, wmq);
   4965 	if (wmq->wmq_si != NULL)
   4966 		return 0;
   4967 
   4968 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   4969 	    wmq->wmq_id);
   4970 
   4971 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   4972 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4973 	return ENOMEM;
   4974 }
   4975 
   4976 /*
   4977  * Both single interrupt MSI and INTx can use this function.
   4978  */
   4979 static int
   4980 wm_setup_legacy(struct wm_softc *sc)
   4981 {
   4982 	pci_chipset_tag_t pc = sc->sc_pc;
   4983 	const char *intrstr = NULL;
   4984 	char intrbuf[PCI_INTRSTR_LEN];
   4985 	int error;
   4986 
   4987 	error = wm_alloc_txrx_queues(sc);
   4988 	if (error) {
   4989 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4990 		    error);
   4991 		return ENOMEM;
   4992 	}
   4993 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   4994 	    sizeof(intrbuf));
   4995 #ifdef WM_MPSAFE
   4996 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   4997 #endif
   4998 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   4999 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5000 	if (sc->sc_ihs[0] == NULL) {
   5001 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5002 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5003 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5004 		return ENOMEM;
   5005 	}
   5006 
   5007 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5008 	sc->sc_nintrs = 1;
   5009 
   5010 	return wm_softint_establish(sc, 0, 0);
   5011 }
   5012 
   5013 static int
   5014 wm_setup_msix(struct wm_softc *sc)
   5015 {
   5016 	void *vih;
   5017 	kcpuset_t *affinity;
   5018 	int qidx, error, intr_idx, txrx_established;
   5019 	pci_chipset_tag_t pc = sc->sc_pc;
   5020 	const char *intrstr = NULL;
   5021 	char intrbuf[PCI_INTRSTR_LEN];
   5022 	char intr_xname[INTRDEVNAMEBUF];
   5023 
   5024 	if (sc->sc_nqueues < ncpu) {
   5025 		/*
   5026 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5027 		 * interrupts start from CPU#1.
   5028 		 */
   5029 		sc->sc_affinity_offset = 1;
   5030 	} else {
   5031 		/*
   5032 		 * In this case, this device use all CPUs. So, we unify
   5033 		 * affinitied cpu_index to msix vector number for readability.
   5034 		 */
   5035 		sc->sc_affinity_offset = 0;
   5036 	}
   5037 
   5038 	error = wm_alloc_txrx_queues(sc);
   5039 	if (error) {
   5040 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5041 		    error);
   5042 		return ENOMEM;
   5043 	}
   5044 
   5045 	kcpuset_create(&affinity, false);
   5046 	intr_idx = 0;
   5047 
   5048 	/*
   5049 	 * TX and RX
   5050 	 */
   5051 	txrx_established = 0;
   5052 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5053 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5054 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5055 
   5056 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5057 		    sizeof(intrbuf));
   5058 #ifdef WM_MPSAFE
   5059 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5060 		    PCI_INTR_MPSAFE, true);
   5061 #endif
   5062 		memset(intr_xname, 0, sizeof(intr_xname));
   5063 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5064 		    device_xname(sc->sc_dev), qidx);
   5065 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5066 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5067 		if (vih == NULL) {
   5068 			aprint_error_dev(sc->sc_dev,
   5069 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5070 			    intrstr ? " at " : "",
   5071 			    intrstr ? intrstr : "");
   5072 
   5073 			goto fail;
   5074 		}
   5075 		kcpuset_zero(affinity);
   5076 		/* Round-robin affinity */
   5077 		kcpuset_set(affinity, affinity_to);
   5078 		error = interrupt_distribute(vih, affinity, NULL);
   5079 		if (error == 0) {
   5080 			aprint_normal_dev(sc->sc_dev,
   5081 			    "for TX and RX interrupting at %s affinity to %u\n",
   5082 			    intrstr, affinity_to);
   5083 		} else {
   5084 			aprint_normal_dev(sc->sc_dev,
   5085 			    "for TX and RX interrupting at %s\n", intrstr);
   5086 		}
   5087 		sc->sc_ihs[intr_idx] = vih;
   5088 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   5089 			goto fail;
   5090 		txrx_established++;
   5091 		intr_idx++;
   5092 	}
   5093 
   5094 	/*
   5095 	 * LINK
   5096 	 */
   5097 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5098 	    sizeof(intrbuf));
   5099 #ifdef WM_MPSAFE
   5100 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5101 #endif
   5102 	memset(intr_xname, 0, sizeof(intr_xname));
   5103 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5104 	    device_xname(sc->sc_dev));
   5105 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5106 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5107 	if (vih == NULL) {
   5108 		aprint_error_dev(sc->sc_dev,
   5109 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5110 		    intrstr ? " at " : "",
   5111 		    intrstr ? intrstr : "");
   5112 
   5113 		goto fail;
   5114 	}
   5115 	/* keep default affinity to LINK interrupt */
   5116 	aprint_normal_dev(sc->sc_dev,
   5117 	    "for LINK interrupting at %s\n", intrstr);
   5118 	sc->sc_ihs[intr_idx] = vih;
   5119 	sc->sc_link_intr_idx = intr_idx;
   5120 
   5121 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5122 	kcpuset_destroy(affinity);
   5123 	return 0;
   5124 
   5125  fail:
   5126 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5127 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5128 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5129 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5130 	}
   5131 
   5132 	kcpuset_destroy(affinity);
   5133 	return ENOMEM;
   5134 }
   5135 
   5136 static void
   5137 wm_unset_stopping_flags(struct wm_softc *sc)
   5138 {
   5139 	int i;
   5140 
   5141 	KASSERT(WM_CORE_LOCKED(sc));
   5142 
   5143 	/*
   5144 	 * must unset stopping flags in ascending order.
   5145 	 */
   5146 	for(i = 0; i < sc->sc_nqueues; i++) {
   5147 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5148 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5149 
   5150 		mutex_enter(txq->txq_lock);
   5151 		txq->txq_stopping = false;
   5152 		mutex_exit(txq->txq_lock);
   5153 
   5154 		mutex_enter(rxq->rxq_lock);
   5155 		rxq->rxq_stopping = false;
   5156 		mutex_exit(rxq->rxq_lock);
   5157 	}
   5158 
   5159 	sc->sc_core_stopping = false;
   5160 }
   5161 
   5162 static void
   5163 wm_set_stopping_flags(struct wm_softc *sc)
   5164 {
   5165 	int i;
   5166 
   5167 	KASSERT(WM_CORE_LOCKED(sc));
   5168 
   5169 	sc->sc_core_stopping = true;
   5170 
   5171 	/*
   5172 	 * must set stopping flags in ascending order.
   5173 	 */
   5174 	for(i = 0; i < sc->sc_nqueues; i++) {
   5175 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5176 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5177 
   5178 		mutex_enter(rxq->rxq_lock);
   5179 		rxq->rxq_stopping = true;
   5180 		mutex_exit(rxq->rxq_lock);
   5181 
   5182 		mutex_enter(txq->txq_lock);
   5183 		txq->txq_stopping = true;
   5184 		mutex_exit(txq->txq_lock);
   5185 	}
   5186 }
   5187 
   5188 /*
   5189  * write interrupt interval value to ITR or EITR
   5190  */
   5191 static void
   5192 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5193 {
   5194 
   5195 	if (!wmq->wmq_set_itr)
   5196 		return;
   5197 
   5198 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5199 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5200 
   5201 		/*
   5202 		 * 82575 doesn't have CNT_INGR field.
   5203 		 * So, overwrite counter field by software.
   5204 		 */
   5205 		if (sc->sc_type == WM_T_82575)
   5206 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5207 		else
   5208 			eitr |= EITR_CNT_INGR;
   5209 
   5210 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5211 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5212 		/*
   5213 		 * 82574 has both ITR and EITR. SET EITR when we use
   5214 		 * the multi queue function with MSI-X.
   5215 		 */
   5216 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5217 			    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5218 	} else {
   5219 		KASSERT(wmq->wmq_id == 0);
   5220 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5221 	}
   5222 
   5223 	wmq->wmq_set_itr = false;
   5224 }
   5225 
   5226 /*
   5227  * TODO
   5228  * Below dynamic calculation of itr is almost the same as linux igb,
   5229  * however it does not fit to wm(4). So, we will have been disable AIM
   5230  * until we will find appropriate calculation of itr.
   5231  */
   5232 /*
   5233  * calculate interrupt interval value to be going to write register in
   5234  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5235  */
   5236 static void
   5237 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5238 {
   5239 #ifdef NOTYET
   5240 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5241 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5242 	uint32_t avg_size = 0;
   5243 	uint32_t new_itr;
   5244 
   5245 	if (rxq->rxq_packets)
   5246 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5247 	if (txq->txq_packets)
   5248 		avg_size = max(avg_size, txq->txq_bytes / txq->txq_packets);
   5249 
   5250 	if (avg_size == 0) {
   5251 		new_itr = 450; /* restore default value */
   5252 		goto out;
   5253 	}
   5254 
   5255 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5256 	avg_size += 24;
   5257 
   5258 	/* Don't starve jumbo frames */
   5259 	avg_size = min(avg_size, 3000);
   5260 
   5261 	/* Give a little boost to mid-size frames */
   5262 	if ((avg_size > 300) && (avg_size < 1200))
   5263 		new_itr = avg_size / 3;
   5264 	else
   5265 		new_itr = avg_size / 2;
   5266 
   5267 out:
   5268 	/*
   5269 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5270 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5271 	 */
   5272 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5273 		new_itr *= 4;
   5274 
   5275 	if (new_itr != wmq->wmq_itr) {
   5276 		wmq->wmq_itr = new_itr;
   5277 		wmq->wmq_set_itr = true;
   5278 	} else
   5279 		wmq->wmq_set_itr = false;
   5280 
   5281 	rxq->rxq_packets = 0;
   5282 	rxq->rxq_bytes = 0;
   5283 	txq->txq_packets = 0;
   5284 	txq->txq_bytes = 0;
   5285 #endif
   5286 }
   5287 
   5288 /*
   5289  * wm_init:		[ifnet interface function]
   5290  *
   5291  *	Initialize the interface.
   5292  */
   5293 static int
   5294 wm_init(struct ifnet *ifp)
   5295 {
   5296 	struct wm_softc *sc = ifp->if_softc;
   5297 	int ret;
   5298 
   5299 	WM_CORE_LOCK(sc);
   5300 	ret = wm_init_locked(ifp);
   5301 	WM_CORE_UNLOCK(sc);
   5302 
   5303 	return ret;
   5304 }
   5305 
   5306 static int
   5307 wm_init_locked(struct ifnet *ifp)
   5308 {
   5309 	struct wm_softc *sc = ifp->if_softc;
   5310 	int i, j, trynum, error = 0;
   5311 	uint32_t reg;
   5312 
   5313 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5314 		device_xname(sc->sc_dev), __func__));
   5315 	KASSERT(WM_CORE_LOCKED(sc));
   5316 
   5317 	/*
   5318 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5319 	 * There is a small but measurable benefit to avoiding the adjusment
   5320 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5321 	 * on such platforms.  One possibility is that the DMA itself is
   5322 	 * slightly more efficient if the front of the entire packet (instead
   5323 	 * of the front of the headers) is aligned.
   5324 	 *
   5325 	 * Note we must always set align_tweak to 0 if we are using
   5326 	 * jumbo frames.
   5327 	 */
   5328 #ifdef __NO_STRICT_ALIGNMENT
   5329 	sc->sc_align_tweak = 0;
   5330 #else
   5331 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5332 		sc->sc_align_tweak = 0;
   5333 	else
   5334 		sc->sc_align_tweak = 2;
   5335 #endif /* __NO_STRICT_ALIGNMENT */
   5336 
   5337 	/* Cancel any pending I/O. */
   5338 	wm_stop_locked(ifp, 0);
   5339 
   5340 	/* update statistics before reset */
   5341 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   5342 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   5343 
   5344 	/* PCH_SPT hardware workaround */
   5345 	if (sc->sc_type == WM_T_PCH_SPT)
   5346 		wm_flush_desc_rings(sc);
   5347 
   5348 	/* Reset the chip to a known state. */
   5349 	wm_reset(sc);
   5350 
   5351 	/*
   5352 	 * AMT based hardware can now take control from firmware
   5353 	 * Do this after reset.
   5354 	 */
   5355 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5356 		wm_get_hw_control(sc);
   5357 
   5358 	if ((sc->sc_type == WM_T_PCH_SPT) &&
   5359 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5360 		wm_legacy_irq_quirk_spt(sc);
   5361 
   5362 	/* Init hardware bits */
   5363 	wm_initialize_hardware_bits(sc);
   5364 
   5365 	/* Reset the PHY. */
   5366 	if (sc->sc_flags & WM_F_HAS_MII)
   5367 		wm_gmii_reset(sc);
   5368 
   5369 	/* Calculate (E)ITR value */
   5370 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5371 		/*
   5372 		 * For NEWQUEUE's EITR (except for 82575).
   5373 		 * 82575's EITR should be set same throttling value as other
   5374 		 * old controllers' ITR because the interrupt/sec calculation
   5375 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5376 		 *
   5377 		 * 82574's EITR should be set same throttling value as ITR.
   5378 		 *
   5379 		 * For N interrupts/sec, set this value to:
   5380 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5381 		 */
   5382 		sc->sc_itr_init = 450;
   5383 	} else if (sc->sc_type >= WM_T_82543) {
   5384 		/*
   5385 		 * Set up the interrupt throttling register (units of 256ns)
   5386 		 * Note that a footnote in Intel's documentation says this
   5387 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5388 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5389 		 * that that is also true for the 1024ns units of the other
   5390 		 * interrupt-related timer registers -- so, really, we ought
   5391 		 * to divide this value by 4 when the link speed is low.
   5392 		 *
   5393 		 * XXX implement this division at link speed change!
   5394 		 */
   5395 
   5396 		/*
   5397 		 * For N interrupts/sec, set this value to:
   5398 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5399 		 * absolute and packet timer values to this value
   5400 		 * divided by 4 to get "simple timer" behavior.
   5401 		 */
   5402 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5403 	}
   5404 
   5405 	error = wm_init_txrx_queues(sc);
   5406 	if (error)
   5407 		goto out;
   5408 
   5409 	/*
   5410 	 * Clear out the VLAN table -- we don't use it (yet).
   5411 	 */
   5412 	CSR_WRITE(sc, WMREG_VET, 0);
   5413 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5414 		trynum = 10; /* Due to hw errata */
   5415 	else
   5416 		trynum = 1;
   5417 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5418 		for (j = 0; j < trynum; j++)
   5419 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5420 
   5421 	/*
   5422 	 * Set up flow-control parameters.
   5423 	 *
   5424 	 * XXX Values could probably stand some tuning.
   5425 	 */
   5426 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5427 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5428 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5429 	    && (sc->sc_type != WM_T_PCH_SPT)) {
   5430 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5431 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5432 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5433 	}
   5434 
   5435 	sc->sc_fcrtl = FCRTL_DFLT;
   5436 	if (sc->sc_type < WM_T_82543) {
   5437 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5438 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5439 	} else {
   5440 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5441 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5442 	}
   5443 
   5444 	if (sc->sc_type == WM_T_80003)
   5445 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5446 	else
   5447 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5448 
   5449 	/* Writes the control register. */
   5450 	wm_set_vlan(sc);
   5451 
   5452 	if (sc->sc_flags & WM_F_HAS_MII) {
   5453 		uint16_t kmreg;
   5454 
   5455 		switch (sc->sc_type) {
   5456 		case WM_T_80003:
   5457 		case WM_T_ICH8:
   5458 		case WM_T_ICH9:
   5459 		case WM_T_ICH10:
   5460 		case WM_T_PCH:
   5461 		case WM_T_PCH2:
   5462 		case WM_T_PCH_LPT:
   5463 		case WM_T_PCH_SPT:
   5464 			/*
   5465 			 * Set the mac to wait the maximum time between each
   5466 			 * iteration and increase the max iterations when
   5467 			 * polling the phy; this fixes erroneous timeouts at
   5468 			 * 10Mbps.
   5469 			 */
   5470 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5471 			    0xFFFF);
   5472 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5473 			    &kmreg);
   5474 			kmreg |= 0x3F;
   5475 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5476 			    kmreg);
   5477 			break;
   5478 		default:
   5479 			break;
   5480 		}
   5481 
   5482 		if (sc->sc_type == WM_T_80003) {
   5483 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5484 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   5485 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5486 
   5487 			/* Bypass RX and TX FIFO's */
   5488 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5489 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5490 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5491 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5492 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5493 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5494 		}
   5495 	}
   5496 #if 0
   5497 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5498 #endif
   5499 
   5500 	/* Set up checksum offload parameters. */
   5501 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5502 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5503 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5504 		reg |= RXCSUM_IPOFL;
   5505 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5506 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5507 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5508 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5509 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5510 
   5511 	/* Set registers about MSI-X */
   5512 	if (wm_is_using_msix(sc)) {
   5513 		uint32_t ivar;
   5514 		struct wm_queue *wmq;
   5515 		int qid, qintr_idx;
   5516 
   5517 		if (sc->sc_type == WM_T_82575) {
   5518 			/* Interrupt control */
   5519 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5520 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5521 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5522 
   5523 			/* TX and RX */
   5524 			for (i = 0; i < sc->sc_nqueues; i++) {
   5525 				wmq = &sc->sc_queue[i];
   5526 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5527 				    EITR_TX_QUEUE(wmq->wmq_id)
   5528 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5529 			}
   5530 			/* Link status */
   5531 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5532 			    EITR_OTHER);
   5533 		} else if (sc->sc_type == WM_T_82574) {
   5534 			/* Interrupt control */
   5535 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5536 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5537 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5538 
   5539 			/*
   5540 			 * workaround issue with spurious interrupts
   5541 			 * in MSI-X mode.
   5542 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5543 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5544 			 */
   5545 			reg = CSR_READ(sc, WMREG_RFCTL);
   5546 			reg |= WMREG_RFCTL_ACKDIS;
   5547 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5548 
   5549 			ivar = 0;
   5550 			/* TX and RX */
   5551 			for (i = 0; i < sc->sc_nqueues; i++) {
   5552 				wmq = &sc->sc_queue[i];
   5553 				qid = wmq->wmq_id;
   5554 				qintr_idx = wmq->wmq_intr_idx;
   5555 
   5556 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5557 				    IVAR_TX_MASK_Q_82574(qid));
   5558 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5559 				    IVAR_RX_MASK_Q_82574(qid));
   5560 			}
   5561 			/* Link status */
   5562 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5563 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5564 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5565 		} else {
   5566 			/* Interrupt control */
   5567 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5568 			    | GPIE_EIAME | GPIE_PBA);
   5569 
   5570 			switch (sc->sc_type) {
   5571 			case WM_T_82580:
   5572 			case WM_T_I350:
   5573 			case WM_T_I354:
   5574 			case WM_T_I210:
   5575 			case WM_T_I211:
   5576 				/* TX and RX */
   5577 				for (i = 0; i < sc->sc_nqueues; i++) {
   5578 					wmq = &sc->sc_queue[i];
   5579 					qid = wmq->wmq_id;
   5580 					qintr_idx = wmq->wmq_intr_idx;
   5581 
   5582 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5583 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5584 					ivar |= __SHIFTIN((qintr_idx
   5585 						| IVAR_VALID),
   5586 					    IVAR_TX_MASK_Q(qid));
   5587 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5588 					ivar |= __SHIFTIN((qintr_idx
   5589 						| IVAR_VALID),
   5590 					    IVAR_RX_MASK_Q(qid));
   5591 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5592 				}
   5593 				break;
   5594 			case WM_T_82576:
   5595 				/* TX and RX */
   5596 				for (i = 0; i < sc->sc_nqueues; i++) {
   5597 					wmq = &sc->sc_queue[i];
   5598 					qid = wmq->wmq_id;
   5599 					qintr_idx = wmq->wmq_intr_idx;
   5600 
   5601 					ivar = CSR_READ(sc,
   5602 					    WMREG_IVAR_Q_82576(qid));
   5603 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   5604 					ivar |= __SHIFTIN((qintr_idx
   5605 						| IVAR_VALID),
   5606 					    IVAR_TX_MASK_Q_82576(qid));
   5607 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   5608 					ivar |= __SHIFTIN((qintr_idx
   5609 						| IVAR_VALID),
   5610 					    IVAR_RX_MASK_Q_82576(qid));
   5611 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   5612 					    ivar);
   5613 				}
   5614 				break;
   5615 			default:
   5616 				break;
   5617 			}
   5618 
   5619 			/* Link status */
   5620 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   5621 			    IVAR_MISC_OTHER);
   5622 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   5623 		}
   5624 
   5625 		if (wm_is_using_multiqueue(sc)) {
   5626 			wm_init_rss(sc);
   5627 
   5628 			/*
   5629 			** NOTE: Receive Full-Packet Checksum Offload
   5630 			** is mutually exclusive with Multiqueue. However
   5631 			** this is not the same as TCP/IP checksums which
   5632 			** still work.
   5633 			*/
   5634 			reg = CSR_READ(sc, WMREG_RXCSUM);
   5635 			reg |= RXCSUM_PCSD;
   5636 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5637 		}
   5638 	}
   5639 
   5640 	/* Set up the interrupt registers. */
   5641 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5642 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   5643 	    ICR_RXO | ICR_RXT0;
   5644 	if (wm_is_using_msix(sc)) {
   5645 		uint32_t mask;
   5646 		struct wm_queue *wmq;
   5647 
   5648 		switch (sc->sc_type) {
   5649 		case WM_T_82574:
   5650 			mask = 0;
   5651 			for (i = 0; i < sc->sc_nqueues; i++) {
   5652 				wmq = &sc->sc_queue[i];
   5653 				mask |= ICR_TXQ(wmq->wmq_id);
   5654 				mask |= ICR_RXQ(wmq->wmq_id);
   5655 			}
   5656 			mask |= ICR_OTHER;
   5657 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   5658 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   5659 			break;
   5660 		default:
   5661 			if (sc->sc_type == WM_T_82575) {
   5662 				mask = 0;
   5663 				for (i = 0; i < sc->sc_nqueues; i++) {
   5664 					wmq = &sc->sc_queue[i];
   5665 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   5666 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   5667 				}
   5668 				mask |= EITR_OTHER;
   5669 			} else {
   5670 				mask = 0;
   5671 				for (i = 0; i < sc->sc_nqueues; i++) {
   5672 					wmq = &sc->sc_queue[i];
   5673 					mask |= 1 << wmq->wmq_intr_idx;
   5674 				}
   5675 				mask |= 1 << sc->sc_link_intr_idx;
   5676 			}
   5677 			CSR_WRITE(sc, WMREG_EIAC, mask);
   5678 			CSR_WRITE(sc, WMREG_EIAM, mask);
   5679 			CSR_WRITE(sc, WMREG_EIMS, mask);
   5680 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   5681 			break;
   5682 		}
   5683 	} else
   5684 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5685 
   5686 	/* Set up the inter-packet gap. */
   5687 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   5688 
   5689 	if (sc->sc_type >= WM_T_82543) {
   5690 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5691 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   5692 			wm_itrs_writereg(sc, wmq);
   5693 		}
   5694 		/*
   5695 		 * Link interrupts occur much less than TX
   5696 		 * interrupts and RX interrupts. So, we don't
   5697 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   5698 		 * FreeBSD's if_igb.
   5699 		 */
   5700 	}
   5701 
   5702 	/* Set the VLAN ethernetype. */
   5703 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   5704 
   5705 	/*
   5706 	 * Set up the transmit control register; we start out with
   5707 	 * a collision distance suitable for FDX, but update it whe
   5708 	 * we resolve the media type.
   5709 	 */
   5710 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   5711 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   5712 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5713 	if (sc->sc_type >= WM_T_82571)
   5714 		sc->sc_tctl |= TCTL_MULR;
   5715 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5716 
   5717 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5718 		/* Write TDT after TCTL.EN is set. See the document. */
   5719 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   5720 	}
   5721 
   5722 	if (sc->sc_type == WM_T_80003) {
   5723 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   5724 		reg &= ~TCTL_EXT_GCEX_MASK;
   5725 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   5726 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   5727 	}
   5728 
   5729 	/* Set the media. */
   5730 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   5731 		goto out;
   5732 
   5733 	/* Configure for OS presence */
   5734 	wm_init_manageability(sc);
   5735 
   5736 	/*
   5737 	 * Set up the receive control register; we actually program
   5738 	 * the register when we set the receive filter.  Use multicast
   5739 	 * address offset type 0.
   5740 	 *
   5741 	 * Only the i82544 has the ability to strip the incoming
   5742 	 * CRC, so we don't enable that feature.
   5743 	 */
   5744 	sc->sc_mchash_type = 0;
   5745 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   5746 	    | RCTL_MO(sc->sc_mchash_type);
   5747 
   5748 	/*
   5749 	 * 82574 use one buffer extended Rx descriptor.
   5750 	 */
   5751 	if (sc->sc_type == WM_T_82574)
   5752 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   5753 
   5754 	/*
   5755 	 * The I350 has a bug where it always strips the CRC whether
   5756 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   5757 	 */
   5758 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5759 	    || (sc->sc_type == WM_T_I210))
   5760 		sc->sc_rctl |= RCTL_SECRC;
   5761 
   5762 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5763 	    && (ifp->if_mtu > ETHERMTU)) {
   5764 		sc->sc_rctl |= RCTL_LPE;
   5765 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5766 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5767 	}
   5768 
   5769 	if (MCLBYTES == 2048) {
   5770 		sc->sc_rctl |= RCTL_2k;
   5771 	} else {
   5772 		if (sc->sc_type >= WM_T_82543) {
   5773 			switch (MCLBYTES) {
   5774 			case 4096:
   5775 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5776 				break;
   5777 			case 8192:
   5778 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5779 				break;
   5780 			case 16384:
   5781 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5782 				break;
   5783 			default:
   5784 				panic("wm_init: MCLBYTES %d unsupported",
   5785 				    MCLBYTES);
   5786 				break;
   5787 			}
   5788 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   5789 	}
   5790 
   5791 	/* Enable ECC */
   5792 	switch (sc->sc_type) {
   5793 	case WM_T_82571:
   5794 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5795 		reg |= PBA_ECC_CORR_EN;
   5796 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5797 		break;
   5798 	case WM_T_PCH_LPT:
   5799 	case WM_T_PCH_SPT:
   5800 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5801 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5802 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5803 
   5804 		sc->sc_ctrl |= CTRL_MEHE;
   5805 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5806 		break;
   5807 	default:
   5808 		break;
   5809 	}
   5810 
   5811 	/* On 575 and later set RDT only if RX enabled */
   5812 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5813 		int qidx;
   5814 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5815 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5816 			for (i = 0; i < WM_NRXDESC; i++) {
   5817 				mutex_enter(rxq->rxq_lock);
   5818 				wm_init_rxdesc(rxq, i);
   5819 				mutex_exit(rxq->rxq_lock);
   5820 
   5821 			}
   5822 		}
   5823 	}
   5824 
   5825 	/* Set the receive filter. */
   5826 	wm_set_filter(sc);
   5827 
   5828 	wm_unset_stopping_flags(sc);
   5829 
   5830 	/* Start the one second link check clock. */
   5831 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5832 
   5833 	/* ...all done! */
   5834 	ifp->if_flags |= IFF_RUNNING;
   5835 	ifp->if_flags &= ~IFF_OACTIVE;
   5836 
   5837  out:
   5838 	sc->sc_if_flags = ifp->if_flags;
   5839 	if (error)
   5840 		log(LOG_ERR, "%s: interface not running\n",
   5841 		    device_xname(sc->sc_dev));
   5842 	return error;
   5843 }
   5844 
   5845 /*
   5846  * wm_stop:		[ifnet interface function]
   5847  *
   5848  *	Stop transmission on the interface.
   5849  */
   5850 static void
   5851 wm_stop(struct ifnet *ifp, int disable)
   5852 {
   5853 	struct wm_softc *sc = ifp->if_softc;
   5854 
   5855 	WM_CORE_LOCK(sc);
   5856 	wm_stop_locked(ifp, disable);
   5857 	WM_CORE_UNLOCK(sc);
   5858 }
   5859 
   5860 static void
   5861 wm_stop_locked(struct ifnet *ifp, int disable)
   5862 {
   5863 	struct wm_softc *sc = ifp->if_softc;
   5864 	struct wm_txsoft *txs;
   5865 	int i, qidx;
   5866 
   5867 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5868 		device_xname(sc->sc_dev), __func__));
   5869 	KASSERT(WM_CORE_LOCKED(sc));
   5870 
   5871 	wm_set_stopping_flags(sc);
   5872 
   5873 	/* Stop the one second clock. */
   5874 	callout_stop(&sc->sc_tick_ch);
   5875 
   5876 	/* Stop the 82547 Tx FIFO stall check timer. */
   5877 	if (sc->sc_type == WM_T_82547)
   5878 		callout_stop(&sc->sc_txfifo_ch);
   5879 
   5880 	if (sc->sc_flags & WM_F_HAS_MII) {
   5881 		/* Down the MII. */
   5882 		mii_down(&sc->sc_mii);
   5883 	} else {
   5884 #if 0
   5885 		/* Should we clear PHY's status properly? */
   5886 		wm_reset(sc);
   5887 #endif
   5888 	}
   5889 
   5890 	/* Stop the transmit and receive processes. */
   5891 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5892 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5893 	sc->sc_rctl &= ~RCTL_EN;
   5894 
   5895 	/*
   5896 	 * Clear the interrupt mask to ensure the device cannot assert its
   5897 	 * interrupt line.
   5898 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5899 	 * service any currently pending or shared interrupt.
   5900 	 */
   5901 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5902 	sc->sc_icr = 0;
   5903 	if (wm_is_using_msix(sc)) {
   5904 		if (sc->sc_type != WM_T_82574) {
   5905 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5906 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5907 		} else
   5908 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5909 	}
   5910 
   5911 	/* Release any queued transmit buffers. */
   5912 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5913 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5914 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5915 		mutex_enter(txq->txq_lock);
   5916 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5917 			txs = &txq->txq_soft[i];
   5918 			if (txs->txs_mbuf != NULL) {
   5919 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   5920 				m_freem(txs->txs_mbuf);
   5921 				txs->txs_mbuf = NULL;
   5922 			}
   5923 		}
   5924 		mutex_exit(txq->txq_lock);
   5925 	}
   5926 
   5927 	/* Mark the interface as down and cancel the watchdog timer. */
   5928 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5929 	ifp->if_timer = 0;
   5930 
   5931 	if (disable) {
   5932 		for (i = 0; i < sc->sc_nqueues; i++) {
   5933 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5934 			mutex_enter(rxq->rxq_lock);
   5935 			wm_rxdrain(rxq);
   5936 			mutex_exit(rxq->rxq_lock);
   5937 		}
   5938 	}
   5939 
   5940 #if 0 /* notyet */
   5941 	if (sc->sc_type >= WM_T_82544)
   5942 		CSR_WRITE(sc, WMREG_WUC, 0);
   5943 #endif
   5944 }
   5945 
   5946 static void
   5947 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   5948 {
   5949 	struct mbuf *m;
   5950 	int i;
   5951 
   5952 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   5953 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   5954 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   5955 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   5956 		    m->m_data, m->m_len, m->m_flags);
   5957 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   5958 	    i, i == 1 ? "" : "s");
   5959 }
   5960 
   5961 /*
   5962  * wm_82547_txfifo_stall:
   5963  *
   5964  *	Callout used to wait for the 82547 Tx FIFO to drain,
   5965  *	reset the FIFO pointers, and restart packet transmission.
   5966  */
   5967 static void
   5968 wm_82547_txfifo_stall(void *arg)
   5969 {
   5970 	struct wm_softc *sc = arg;
   5971 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5972 
   5973 	mutex_enter(txq->txq_lock);
   5974 
   5975 	if (txq->txq_stopping)
   5976 		goto out;
   5977 
   5978 	if (txq->txq_fifo_stall) {
   5979 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   5980 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   5981 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   5982 			/*
   5983 			 * Packets have drained.  Stop transmitter, reset
   5984 			 * FIFO pointers, restart transmitter, and kick
   5985 			 * the packet queue.
   5986 			 */
   5987 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   5988 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   5989 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   5990 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   5991 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   5992 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   5993 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   5994 			CSR_WRITE_FLUSH(sc);
   5995 
   5996 			txq->txq_fifo_head = 0;
   5997 			txq->txq_fifo_stall = 0;
   5998 			wm_start_locked(&sc->sc_ethercom.ec_if);
   5999 		} else {
   6000 			/*
   6001 			 * Still waiting for packets to drain; try again in
   6002 			 * another tick.
   6003 			 */
   6004 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6005 		}
   6006 	}
   6007 
   6008 out:
   6009 	mutex_exit(txq->txq_lock);
   6010 }
   6011 
   6012 /*
   6013  * wm_82547_txfifo_bugchk:
   6014  *
   6015  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6016  *	prevent enqueueing a packet that would wrap around the end
   6017  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6018  *
   6019  *	We do this by checking the amount of space before the end
   6020  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   6021  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6022  *	the internal FIFO pointers to the beginning, and restart
   6023  *	transmission on the interface.
   6024  */
   6025 #define	WM_FIFO_HDR		0x10
   6026 #define	WM_82547_PAD_LEN	0x3e0
   6027 static int
   6028 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6029 {
   6030 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6031 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6032 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6033 
   6034 	/* Just return if already stalled. */
   6035 	if (txq->txq_fifo_stall)
   6036 		return 1;
   6037 
   6038 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6039 		/* Stall only occurs in half-duplex mode. */
   6040 		goto send_packet;
   6041 	}
   6042 
   6043 	if (len >= WM_82547_PAD_LEN + space) {
   6044 		txq->txq_fifo_stall = 1;
   6045 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6046 		return 1;
   6047 	}
   6048 
   6049  send_packet:
   6050 	txq->txq_fifo_head += len;
   6051 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6052 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6053 
   6054 	return 0;
   6055 }
   6056 
   6057 static int
   6058 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6059 {
   6060 	int error;
   6061 
   6062 	/*
   6063 	 * Allocate the control data structures, and create and load the
   6064 	 * DMA map for it.
   6065 	 *
   6066 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6067 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6068 	 * both sets within the same 4G segment.
   6069 	 */
   6070 	if (sc->sc_type < WM_T_82544)
   6071 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6072 	else
   6073 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6074 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6075 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6076 	else
   6077 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6078 
   6079 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6080 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6081 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6082 		aprint_error_dev(sc->sc_dev,
   6083 		    "unable to allocate TX control data, error = %d\n",
   6084 		    error);
   6085 		goto fail_0;
   6086 	}
   6087 
   6088 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6089 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6090 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6091 		aprint_error_dev(sc->sc_dev,
   6092 		    "unable to map TX control data, error = %d\n", error);
   6093 		goto fail_1;
   6094 	}
   6095 
   6096 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6097 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6098 		aprint_error_dev(sc->sc_dev,
   6099 		    "unable to create TX control data DMA map, error = %d\n",
   6100 		    error);
   6101 		goto fail_2;
   6102 	}
   6103 
   6104 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6105 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6106 		aprint_error_dev(sc->sc_dev,
   6107 		    "unable to load TX control data DMA map, error = %d\n",
   6108 		    error);
   6109 		goto fail_3;
   6110 	}
   6111 
   6112 	return 0;
   6113 
   6114  fail_3:
   6115 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6116  fail_2:
   6117 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6118 	    WM_TXDESCS_SIZE(txq));
   6119  fail_1:
   6120 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6121  fail_0:
   6122 	return error;
   6123 }
   6124 
   6125 static void
   6126 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6127 {
   6128 
   6129 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6130 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6131 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6132 	    WM_TXDESCS_SIZE(txq));
   6133 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6134 }
   6135 
   6136 static int
   6137 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6138 {
   6139 	int error;
   6140 	size_t rxq_descs_size;
   6141 
   6142 	/*
   6143 	 * Allocate the control data structures, and create and load the
   6144 	 * DMA map for it.
   6145 	 *
   6146 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6147 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6148 	 * both sets within the same 4G segment.
   6149 	 */
   6150 	rxq->rxq_ndesc = WM_NRXDESC;
   6151 	if (sc->sc_type == WM_T_82574)
   6152 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6153 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6154 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6155 	else
   6156 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6157 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6158 
   6159 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6160 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6161 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6162 		aprint_error_dev(sc->sc_dev,
   6163 		    "unable to allocate RX control data, error = %d\n",
   6164 		    error);
   6165 		goto fail_0;
   6166 	}
   6167 
   6168 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6169 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6170 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6171 		aprint_error_dev(sc->sc_dev,
   6172 		    "unable to map RX control data, error = %d\n", error);
   6173 		goto fail_1;
   6174 	}
   6175 
   6176 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6177 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6178 		aprint_error_dev(sc->sc_dev,
   6179 		    "unable to create RX control data DMA map, error = %d\n",
   6180 		    error);
   6181 		goto fail_2;
   6182 	}
   6183 
   6184 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6185 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6186 		aprint_error_dev(sc->sc_dev,
   6187 		    "unable to load RX control data DMA map, error = %d\n",
   6188 		    error);
   6189 		goto fail_3;
   6190 	}
   6191 
   6192 	return 0;
   6193 
   6194  fail_3:
   6195 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6196  fail_2:
   6197 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6198 	    rxq_descs_size);
   6199  fail_1:
   6200 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6201  fail_0:
   6202 	return error;
   6203 }
   6204 
   6205 static void
   6206 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6207 {
   6208 
   6209 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6210 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6211 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6212 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6213 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6214 }
   6215 
   6216 
   6217 static int
   6218 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6219 {
   6220 	int i, error;
   6221 
   6222 	/* Create the transmit buffer DMA maps. */
   6223 	WM_TXQUEUELEN(txq) =
   6224 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6225 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6226 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6227 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6228 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6229 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6230 			aprint_error_dev(sc->sc_dev,
   6231 			    "unable to create Tx DMA map %d, error = %d\n",
   6232 			    i, error);
   6233 			goto fail;
   6234 		}
   6235 	}
   6236 
   6237 	return 0;
   6238 
   6239  fail:
   6240 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6241 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6242 			bus_dmamap_destroy(sc->sc_dmat,
   6243 			    txq->txq_soft[i].txs_dmamap);
   6244 	}
   6245 	return error;
   6246 }
   6247 
   6248 static void
   6249 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6250 {
   6251 	int i;
   6252 
   6253 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6254 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6255 			bus_dmamap_destroy(sc->sc_dmat,
   6256 			    txq->txq_soft[i].txs_dmamap);
   6257 	}
   6258 }
   6259 
   6260 static int
   6261 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6262 {
   6263 	int i, error;
   6264 
   6265 	/* Create the receive buffer DMA maps. */
   6266 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6267 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6268 			    MCLBYTES, 0, 0,
   6269 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6270 			aprint_error_dev(sc->sc_dev,
   6271 			    "unable to create Rx DMA map %d error = %d\n",
   6272 			    i, error);
   6273 			goto fail;
   6274 		}
   6275 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6276 	}
   6277 
   6278 	return 0;
   6279 
   6280  fail:
   6281 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6282 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6283 			bus_dmamap_destroy(sc->sc_dmat,
   6284 			    rxq->rxq_soft[i].rxs_dmamap);
   6285 	}
   6286 	return error;
   6287 }
   6288 
   6289 static void
   6290 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6291 {
   6292 	int i;
   6293 
   6294 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6295 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6296 			bus_dmamap_destroy(sc->sc_dmat,
   6297 			    rxq->rxq_soft[i].rxs_dmamap);
   6298 	}
   6299 }
   6300 
   6301 /*
   6302  * wm_alloc_quques:
   6303  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6304  */
   6305 static int
   6306 wm_alloc_txrx_queues(struct wm_softc *sc)
   6307 {
   6308 	int i, error, tx_done, rx_done;
   6309 
   6310 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6311 	    KM_SLEEP);
   6312 	if (sc->sc_queue == NULL) {
   6313 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6314 		error = ENOMEM;
   6315 		goto fail_0;
   6316 	}
   6317 
   6318 	/*
   6319 	 * For transmission
   6320 	 */
   6321 	error = 0;
   6322 	tx_done = 0;
   6323 	for (i = 0; i < sc->sc_nqueues; i++) {
   6324 #ifdef WM_EVENT_COUNTERS
   6325 		int j;
   6326 		const char *xname;
   6327 #endif
   6328 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6329 		txq->txq_sc = sc;
   6330 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6331 
   6332 		error = wm_alloc_tx_descs(sc, txq);
   6333 		if (error)
   6334 			break;
   6335 		error = wm_alloc_tx_buffer(sc, txq);
   6336 		if (error) {
   6337 			wm_free_tx_descs(sc, txq);
   6338 			break;
   6339 		}
   6340 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6341 		if (txq->txq_interq == NULL) {
   6342 			wm_free_tx_descs(sc, txq);
   6343 			wm_free_tx_buffer(sc, txq);
   6344 			error = ENOMEM;
   6345 			break;
   6346 		}
   6347 
   6348 #ifdef WM_EVENT_COUNTERS
   6349 		xname = device_xname(sc->sc_dev);
   6350 
   6351 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6352 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6353 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
   6354 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6355 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6356 
   6357 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
   6358 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
   6359 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
   6360 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
   6361 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
   6362 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
   6363 
   6364 		for (j = 0; j < WM_NTXSEGS; j++) {
   6365 			snprintf(txq->txq_txseg_evcnt_names[j],
   6366 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6367 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6368 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6369 		}
   6370 
   6371 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
   6372 
   6373 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
   6374 #endif /* WM_EVENT_COUNTERS */
   6375 
   6376 		tx_done++;
   6377 	}
   6378 	if (error)
   6379 		goto fail_1;
   6380 
   6381 	/*
   6382 	 * For recieve
   6383 	 */
   6384 	error = 0;
   6385 	rx_done = 0;
   6386 	for (i = 0; i < sc->sc_nqueues; i++) {
   6387 #ifdef WM_EVENT_COUNTERS
   6388 		const char *xname;
   6389 #endif
   6390 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6391 		rxq->rxq_sc = sc;
   6392 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6393 
   6394 		error = wm_alloc_rx_descs(sc, rxq);
   6395 		if (error)
   6396 			break;
   6397 
   6398 		error = wm_alloc_rx_buffer(sc, rxq);
   6399 		if (error) {
   6400 			wm_free_rx_descs(sc, rxq);
   6401 			break;
   6402 		}
   6403 
   6404 #ifdef WM_EVENT_COUNTERS
   6405 		xname = device_xname(sc->sc_dev);
   6406 
   6407 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
   6408 
   6409 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
   6410 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
   6411 #endif /* WM_EVENT_COUNTERS */
   6412 
   6413 		rx_done++;
   6414 	}
   6415 	if (error)
   6416 		goto fail_2;
   6417 
   6418 	return 0;
   6419 
   6420  fail_2:
   6421 	for (i = 0; i < rx_done; i++) {
   6422 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6423 		wm_free_rx_buffer(sc, rxq);
   6424 		wm_free_rx_descs(sc, rxq);
   6425 		if (rxq->rxq_lock)
   6426 			mutex_obj_free(rxq->rxq_lock);
   6427 	}
   6428  fail_1:
   6429 	for (i = 0; i < tx_done; i++) {
   6430 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6431 		pcq_destroy(txq->txq_interq);
   6432 		wm_free_tx_buffer(sc, txq);
   6433 		wm_free_tx_descs(sc, txq);
   6434 		if (txq->txq_lock)
   6435 			mutex_obj_free(txq->txq_lock);
   6436 	}
   6437 
   6438 	kmem_free(sc->sc_queue,
   6439 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6440  fail_0:
   6441 	return error;
   6442 }
   6443 
   6444 /*
   6445  * wm_free_quques:
   6446  *	Free {tx,rx}descs and {tx,rx} buffers
   6447  */
   6448 static void
   6449 wm_free_txrx_queues(struct wm_softc *sc)
   6450 {
   6451 	int i;
   6452 
   6453 	for (i = 0; i < sc->sc_nqueues; i++) {
   6454 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6455 
   6456 #ifdef WM_EVENT_COUNTERS
   6457 		WM_Q_EVCNT_DETACH(rxq, rxintr, rxq, i);
   6458 		WM_Q_EVCNT_DETACH(rxq, rxipsum, rxq, i);
   6459 		WM_Q_EVCNT_DETACH(rxq, rxtusum, rxq, i);
   6460 #endif /* WM_EVENT_COUNTERS */
   6461 
   6462 		wm_free_rx_buffer(sc, rxq);
   6463 		wm_free_rx_descs(sc, rxq);
   6464 		if (rxq->rxq_lock)
   6465 			mutex_obj_free(rxq->rxq_lock);
   6466 	}
   6467 
   6468 	for (i = 0; i < sc->sc_nqueues; i++) {
   6469 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6470 		struct mbuf *m;
   6471 #ifdef WM_EVENT_COUNTERS
   6472 		int j;
   6473 
   6474 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6475 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6476 		WM_Q_EVCNT_DETACH(txq, txfifo_stall, txq, i);
   6477 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6478 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6479 		WM_Q_EVCNT_DETACH(txq, txipsum, txq, i);
   6480 		WM_Q_EVCNT_DETACH(txq, txtusum, txq, i);
   6481 		WM_Q_EVCNT_DETACH(txq, txtusum6, txq, i);
   6482 		WM_Q_EVCNT_DETACH(txq, txtso, txq, i);
   6483 		WM_Q_EVCNT_DETACH(txq, txtso6, txq, i);
   6484 		WM_Q_EVCNT_DETACH(txq, txtsopain, txq, i);
   6485 
   6486 		for (j = 0; j < WM_NTXSEGS; j++)
   6487 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6488 
   6489 		WM_Q_EVCNT_DETACH(txq, txdrop, txq, i);
   6490 		WM_Q_EVCNT_DETACH(txq, tu, txq, i);
   6491 #endif /* WM_EVENT_COUNTERS */
   6492 
   6493 		/* drain txq_interq */
   6494 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6495 			m_freem(m);
   6496 		pcq_destroy(txq->txq_interq);
   6497 
   6498 		wm_free_tx_buffer(sc, txq);
   6499 		wm_free_tx_descs(sc, txq);
   6500 		if (txq->txq_lock)
   6501 			mutex_obj_free(txq->txq_lock);
   6502 	}
   6503 
   6504 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6505 }
   6506 
   6507 static void
   6508 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6509 {
   6510 
   6511 	KASSERT(mutex_owned(txq->txq_lock));
   6512 
   6513 	/* Initialize the transmit descriptor ring. */
   6514 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6515 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6516 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6517 	txq->txq_free = WM_NTXDESC(txq);
   6518 	txq->txq_next = 0;
   6519 }
   6520 
   6521 static void
   6522 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6523     struct wm_txqueue *txq)
   6524 {
   6525 
   6526 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6527 		device_xname(sc->sc_dev), __func__));
   6528 	KASSERT(mutex_owned(txq->txq_lock));
   6529 
   6530 	if (sc->sc_type < WM_T_82543) {
   6531 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6532 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6533 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6534 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6535 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6536 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6537 	} else {
   6538 		int qid = wmq->wmq_id;
   6539 
   6540 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6541 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6542 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6543 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6544 
   6545 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6546 			/*
   6547 			 * Don't write TDT before TCTL.EN is set.
   6548 			 * See the document.
   6549 			 */
   6550 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6551 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6552 			    | TXDCTL_WTHRESH(0));
   6553 		else {
   6554 			/* XXX should update with AIM? */
   6555 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6556 			if (sc->sc_type >= WM_T_82540) {
   6557 				/* should be same */
   6558 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6559 			}
   6560 
   6561 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6562 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6563 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6564 		}
   6565 	}
   6566 }
   6567 
   6568 static void
   6569 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6570 {
   6571 	int i;
   6572 
   6573 	KASSERT(mutex_owned(txq->txq_lock));
   6574 
   6575 	/* Initialize the transmit job descriptors. */
   6576 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6577 		txq->txq_soft[i].txs_mbuf = NULL;
   6578 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6579 	txq->txq_snext = 0;
   6580 	txq->txq_sdirty = 0;
   6581 }
   6582 
   6583 static void
   6584 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6585     struct wm_txqueue *txq)
   6586 {
   6587 
   6588 	KASSERT(mutex_owned(txq->txq_lock));
   6589 
   6590 	/*
   6591 	 * Set up some register offsets that are different between
   6592 	 * the i82542 and the i82543 and later chips.
   6593 	 */
   6594 	if (sc->sc_type < WM_T_82543)
   6595 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   6596 	else
   6597 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   6598 
   6599 	wm_init_tx_descs(sc, txq);
   6600 	wm_init_tx_regs(sc, wmq, txq);
   6601 	wm_init_tx_buffer(sc, txq);
   6602 }
   6603 
   6604 static void
   6605 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6606     struct wm_rxqueue *rxq)
   6607 {
   6608 
   6609 	KASSERT(mutex_owned(rxq->rxq_lock));
   6610 
   6611 	/*
   6612 	 * Initialize the receive descriptor and receive job
   6613 	 * descriptor rings.
   6614 	 */
   6615 	if (sc->sc_type < WM_T_82543) {
   6616 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   6617 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   6618 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   6619 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6620 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   6621 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   6622 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   6623 
   6624 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   6625 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   6626 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   6627 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   6628 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   6629 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   6630 	} else {
   6631 		int qid = wmq->wmq_id;
   6632 
   6633 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   6634 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   6635 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_descsize * rxq->rxq_ndesc);
   6636 
   6637 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6638 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   6639 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   6640 
   6641 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   6642 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   6643 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   6644 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   6645 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   6646 			    | RXDCTL_WTHRESH(1));
   6647 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6648 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6649 		} else {
   6650 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6651 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6652 			/* XXX should update with AIM? */
   6653 			CSR_WRITE(sc, WMREG_RDTR, (wmq->wmq_itr / 4) | RDTR_FPD);
   6654 			/* MUST be same */
   6655 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   6656 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   6657 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   6658 		}
   6659 	}
   6660 }
   6661 
   6662 static int
   6663 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6664 {
   6665 	struct wm_rxsoft *rxs;
   6666 	int error, i;
   6667 
   6668 	KASSERT(mutex_owned(rxq->rxq_lock));
   6669 
   6670 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6671 		rxs = &rxq->rxq_soft[i];
   6672 		if (rxs->rxs_mbuf == NULL) {
   6673 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   6674 				log(LOG_ERR, "%s: unable to allocate or map "
   6675 				    "rx buffer %d, error = %d\n",
   6676 				    device_xname(sc->sc_dev), i, error);
   6677 				/*
   6678 				 * XXX Should attempt to run with fewer receive
   6679 				 * XXX buffers instead of just failing.
   6680 				 */
   6681 				wm_rxdrain(rxq);
   6682 				return ENOMEM;
   6683 			}
   6684 		} else {
   6685 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   6686 				wm_init_rxdesc(rxq, i);
   6687 			/*
   6688 			 * For 82575 and newer device, the RX descriptors
   6689 			 * must be initialized after the setting of RCTL.EN in
   6690 			 * wm_set_filter()
   6691 			 */
   6692 		}
   6693 	}
   6694 	rxq->rxq_ptr = 0;
   6695 	rxq->rxq_discard = 0;
   6696 	WM_RXCHAIN_RESET(rxq);
   6697 
   6698 	return 0;
   6699 }
   6700 
   6701 static int
   6702 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6703     struct wm_rxqueue *rxq)
   6704 {
   6705 
   6706 	KASSERT(mutex_owned(rxq->rxq_lock));
   6707 
   6708 	/*
   6709 	 * Set up some register offsets that are different between
   6710 	 * the i82542 and the i82543 and later chips.
   6711 	 */
   6712 	if (sc->sc_type < WM_T_82543)
   6713 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   6714 	else
   6715 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   6716 
   6717 	wm_init_rx_regs(sc, wmq, rxq);
   6718 	return wm_init_rx_buffer(sc, rxq);
   6719 }
   6720 
   6721 /*
   6722  * wm_init_quques:
   6723  *	Initialize {tx,rx}descs and {tx,rx} buffers
   6724  */
   6725 static int
   6726 wm_init_txrx_queues(struct wm_softc *sc)
   6727 {
   6728 	int i, error = 0;
   6729 
   6730 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6731 		device_xname(sc->sc_dev), __func__));
   6732 
   6733 	for (i = 0; i < sc->sc_nqueues; i++) {
   6734 		struct wm_queue *wmq = &sc->sc_queue[i];
   6735 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6736 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6737 
   6738 		/*
   6739 		 * TODO
   6740 		 * Currently, use constant variable instead of AIM.
   6741 		 * Furthermore, the interrupt interval of multiqueue which use
   6742 		 * polling mode is less than default value.
   6743 		 * More tuning and AIM are required.
   6744 		 */
   6745 		if (wm_is_using_multiqueue(sc))
   6746 			wmq->wmq_itr = 50;
   6747 		else
   6748 			wmq->wmq_itr = sc->sc_itr_init;
   6749 		wmq->wmq_set_itr = true;
   6750 
   6751 		mutex_enter(txq->txq_lock);
   6752 		wm_init_tx_queue(sc, wmq, txq);
   6753 		mutex_exit(txq->txq_lock);
   6754 
   6755 		mutex_enter(rxq->rxq_lock);
   6756 		error = wm_init_rx_queue(sc, wmq, rxq);
   6757 		mutex_exit(rxq->rxq_lock);
   6758 		if (error)
   6759 			break;
   6760 	}
   6761 
   6762 	return error;
   6763 }
   6764 
   6765 /*
   6766  * wm_tx_offload:
   6767  *
   6768  *	Set up TCP/IP checksumming parameters for the
   6769  *	specified packet.
   6770  */
   6771 static int
   6772 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   6773     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   6774 {
   6775 	struct mbuf *m0 = txs->txs_mbuf;
   6776 	struct livengood_tcpip_ctxdesc *t;
   6777 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   6778 	uint32_t ipcse;
   6779 	struct ether_header *eh;
   6780 	int offset, iphl;
   6781 	uint8_t fields;
   6782 
   6783 	/*
   6784 	 * XXX It would be nice if the mbuf pkthdr had offset
   6785 	 * fields for the protocol headers.
   6786 	 */
   6787 
   6788 	eh = mtod(m0, struct ether_header *);
   6789 	switch (htons(eh->ether_type)) {
   6790 	case ETHERTYPE_IP:
   6791 	case ETHERTYPE_IPV6:
   6792 		offset = ETHER_HDR_LEN;
   6793 		break;
   6794 
   6795 	case ETHERTYPE_VLAN:
   6796 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6797 		break;
   6798 
   6799 	default:
   6800 		/*
   6801 		 * Don't support this protocol or encapsulation.
   6802 		 */
   6803 		*fieldsp = 0;
   6804 		*cmdp = 0;
   6805 		return 0;
   6806 	}
   6807 
   6808 	if ((m0->m_pkthdr.csum_flags &
   6809 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6810 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6811 	} else {
   6812 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6813 	}
   6814 	ipcse = offset + iphl - 1;
   6815 
   6816 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   6817 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   6818 	seg = 0;
   6819 	fields = 0;
   6820 
   6821 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6822 		int hlen = offset + iphl;
   6823 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6824 
   6825 		if (__predict_false(m0->m_len <
   6826 				    (hlen + sizeof(struct tcphdr)))) {
   6827 			/*
   6828 			 * TCP/IP headers are not in the first mbuf; we need
   6829 			 * to do this the slow and painful way.  Let's just
   6830 			 * hope this doesn't happen very often.
   6831 			 */
   6832 			struct tcphdr th;
   6833 
   6834 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6835 
   6836 			m_copydata(m0, hlen, sizeof(th), &th);
   6837 			if (v4) {
   6838 				struct ip ip;
   6839 
   6840 				m_copydata(m0, offset, sizeof(ip), &ip);
   6841 				ip.ip_len = 0;
   6842 				m_copyback(m0,
   6843 				    offset + offsetof(struct ip, ip_len),
   6844 				    sizeof(ip.ip_len), &ip.ip_len);
   6845 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6846 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6847 			} else {
   6848 				struct ip6_hdr ip6;
   6849 
   6850 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6851 				ip6.ip6_plen = 0;
   6852 				m_copyback(m0,
   6853 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6854 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6855 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6856 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6857 			}
   6858 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6859 			    sizeof(th.th_sum), &th.th_sum);
   6860 
   6861 			hlen += th.th_off << 2;
   6862 		} else {
   6863 			/*
   6864 			 * TCP/IP headers are in the first mbuf; we can do
   6865 			 * this the easy way.
   6866 			 */
   6867 			struct tcphdr *th;
   6868 
   6869 			if (v4) {
   6870 				struct ip *ip =
   6871 				    (void *)(mtod(m0, char *) + offset);
   6872 				th = (void *)(mtod(m0, char *) + hlen);
   6873 
   6874 				ip->ip_len = 0;
   6875 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6876 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6877 			} else {
   6878 				struct ip6_hdr *ip6 =
   6879 				    (void *)(mtod(m0, char *) + offset);
   6880 				th = (void *)(mtod(m0, char *) + hlen);
   6881 
   6882 				ip6->ip6_plen = 0;
   6883 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6884 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6885 			}
   6886 			hlen += th->th_off << 2;
   6887 		}
   6888 
   6889 		if (v4) {
   6890 			WM_Q_EVCNT_INCR(txq, txtso);
   6891 			cmdlen |= WTX_TCPIP_CMD_IP;
   6892 		} else {
   6893 			WM_Q_EVCNT_INCR(txq, txtso6);
   6894 			ipcse = 0;
   6895 		}
   6896 		cmd |= WTX_TCPIP_CMD_TSE;
   6897 		cmdlen |= WTX_TCPIP_CMD_TSE |
   6898 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   6899 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   6900 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   6901 	}
   6902 
   6903 	/*
   6904 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   6905 	 * offload feature, if we load the context descriptor, we
   6906 	 * MUST provide valid values for IPCSS and TUCSS fields.
   6907 	 */
   6908 
   6909 	ipcs = WTX_TCPIP_IPCSS(offset) |
   6910 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   6911 	    WTX_TCPIP_IPCSE(ipcse);
   6912 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   6913 		WM_Q_EVCNT_INCR(txq, txipsum);
   6914 		fields |= WTX_IXSM;
   6915 	}
   6916 
   6917 	offset += iphl;
   6918 
   6919 	if (m0->m_pkthdr.csum_flags &
   6920 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   6921 		WM_Q_EVCNT_INCR(txq, txtusum);
   6922 		fields |= WTX_TXSM;
   6923 		tucs = WTX_TCPIP_TUCSS(offset) |
   6924 		    WTX_TCPIP_TUCSO(offset +
   6925 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   6926 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6927 	} else if ((m0->m_pkthdr.csum_flags &
   6928 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   6929 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6930 		fields |= WTX_TXSM;
   6931 		tucs = WTX_TCPIP_TUCSS(offset) |
   6932 		    WTX_TCPIP_TUCSO(offset +
   6933 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   6934 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6935 	} else {
   6936 		/* Just initialize it to a valid TCP context. */
   6937 		tucs = WTX_TCPIP_TUCSS(offset) |
   6938 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   6939 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6940 	}
   6941 
   6942 	/*
   6943 	 * We don't have to write context descriptor for every packet
   6944 	 * except for 82574. For 82574, we must write context descriptor
   6945 	 * for every packet when we use two descriptor queues.
   6946 	 * It would be overhead to write context descriptor for every packet,
   6947 	 * however it does not cause problems.
   6948 	 */
   6949 	/* Fill in the context descriptor. */
   6950 	t = (struct livengood_tcpip_ctxdesc *)
   6951 	    &txq->txq_descs[txq->txq_next];
   6952 	t->tcpip_ipcs = htole32(ipcs);
   6953 	t->tcpip_tucs = htole32(tucs);
   6954 	t->tcpip_cmdlen = htole32(cmdlen);
   6955 	t->tcpip_seg = htole32(seg);
   6956 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6957 
   6958 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6959 	txs->txs_ndesc++;
   6960 
   6961 	*cmdp = cmd;
   6962 	*fieldsp = fields;
   6963 
   6964 	return 0;
   6965 }
   6966 
   6967 static inline int
   6968 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   6969 {
   6970 	struct wm_softc *sc = ifp->if_softc;
   6971 	u_int cpuid = cpu_index(curcpu());
   6972 
   6973 	/*
   6974 	 * Currently, simple distribute strategy.
   6975 	 * TODO:
   6976 	 * distribute by flowid(RSS has value).
   6977 	 */
   6978         return (cpuid + ncpu - sc->sc_affinity_offset) % sc->sc_nqueues;
   6979 }
   6980 
   6981 /*
   6982  * wm_start:		[ifnet interface function]
   6983  *
   6984  *	Start packet transmission on the interface.
   6985  */
   6986 static void
   6987 wm_start(struct ifnet *ifp)
   6988 {
   6989 	struct wm_softc *sc = ifp->if_softc;
   6990 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6991 
   6992 #ifdef WM_MPSAFE
   6993 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6994 #endif
   6995 	/*
   6996 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   6997 	 */
   6998 
   6999 	mutex_enter(txq->txq_lock);
   7000 	if (!txq->txq_stopping)
   7001 		wm_start_locked(ifp);
   7002 	mutex_exit(txq->txq_lock);
   7003 }
   7004 
   7005 static void
   7006 wm_start_locked(struct ifnet *ifp)
   7007 {
   7008 	struct wm_softc *sc = ifp->if_softc;
   7009 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7010 
   7011 	wm_send_common_locked(ifp, txq, false);
   7012 }
   7013 
   7014 static int
   7015 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7016 {
   7017 	int qid;
   7018 	struct wm_softc *sc = ifp->if_softc;
   7019 	struct wm_txqueue *txq;
   7020 
   7021 	qid = wm_select_txqueue(ifp, m);
   7022 	txq = &sc->sc_queue[qid].wmq_txq;
   7023 
   7024 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7025 		m_freem(m);
   7026 		WM_Q_EVCNT_INCR(txq, txdrop);
   7027 		return ENOBUFS;
   7028 	}
   7029 
   7030 	/*
   7031 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7032 	 */
   7033 	ifp->if_obytes += m->m_pkthdr.len;
   7034 	if (m->m_flags & M_MCAST)
   7035 		ifp->if_omcasts++;
   7036 
   7037 	if (mutex_tryenter(txq->txq_lock)) {
   7038 		if (!txq->txq_stopping)
   7039 			wm_transmit_locked(ifp, txq);
   7040 		mutex_exit(txq->txq_lock);
   7041 	}
   7042 
   7043 	return 0;
   7044 }
   7045 
   7046 static void
   7047 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7048 {
   7049 
   7050 	wm_send_common_locked(ifp, txq, true);
   7051 }
   7052 
   7053 static void
   7054 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7055     bool is_transmit)
   7056 {
   7057 	struct wm_softc *sc = ifp->if_softc;
   7058 	struct mbuf *m0;
   7059 	struct wm_txsoft *txs;
   7060 	bus_dmamap_t dmamap;
   7061 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7062 	bus_addr_t curaddr;
   7063 	bus_size_t seglen, curlen;
   7064 	uint32_t cksumcmd;
   7065 	uint8_t cksumfields;
   7066 
   7067 	KASSERT(mutex_owned(txq->txq_lock));
   7068 
   7069 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7070 		return;
   7071 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7072 		return;
   7073 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7074 		return;
   7075 
   7076 	/* Remember the previous number of free descriptors. */
   7077 	ofree = txq->txq_free;
   7078 
   7079 	/*
   7080 	 * Loop through the send queue, setting up transmit descriptors
   7081 	 * until we drain the queue, or use up all available transmit
   7082 	 * descriptors.
   7083 	 */
   7084 	for (;;) {
   7085 		m0 = NULL;
   7086 
   7087 		/* Get a work queue entry. */
   7088 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7089 			wm_txeof(sc, txq);
   7090 			if (txq->txq_sfree == 0) {
   7091 				DPRINTF(WM_DEBUG_TX,
   7092 				    ("%s: TX: no free job descriptors\n",
   7093 					device_xname(sc->sc_dev)));
   7094 				WM_Q_EVCNT_INCR(txq, txsstall);
   7095 				break;
   7096 			}
   7097 		}
   7098 
   7099 		/* Grab a packet off the queue. */
   7100 		if (is_transmit)
   7101 			m0 = pcq_get(txq->txq_interq);
   7102 		else
   7103 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7104 		if (m0 == NULL)
   7105 			break;
   7106 
   7107 		DPRINTF(WM_DEBUG_TX,
   7108 		    ("%s: TX: have packet to transmit: %p\n",
   7109 		    device_xname(sc->sc_dev), m0));
   7110 
   7111 		txs = &txq->txq_soft[txq->txq_snext];
   7112 		dmamap = txs->txs_dmamap;
   7113 
   7114 		use_tso = (m0->m_pkthdr.csum_flags &
   7115 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7116 
   7117 		/*
   7118 		 * So says the Linux driver:
   7119 		 * The controller does a simple calculation to make sure
   7120 		 * there is enough room in the FIFO before initiating the
   7121 		 * DMA for each buffer.  The calc is:
   7122 		 *	4 = ceil(buffer len / MSS)
   7123 		 * To make sure we don't overrun the FIFO, adjust the max
   7124 		 * buffer len if the MSS drops.
   7125 		 */
   7126 		dmamap->dm_maxsegsz =
   7127 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7128 		    ? m0->m_pkthdr.segsz << 2
   7129 		    : WTX_MAX_LEN;
   7130 
   7131 		/*
   7132 		 * Load the DMA map.  If this fails, the packet either
   7133 		 * didn't fit in the allotted number of segments, or we
   7134 		 * were short on resources.  For the too-many-segments
   7135 		 * case, we simply report an error and drop the packet,
   7136 		 * since we can't sanely copy a jumbo packet to a single
   7137 		 * buffer.
   7138 		 */
   7139 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7140 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7141 		if (error) {
   7142 			if (error == EFBIG) {
   7143 				WM_Q_EVCNT_INCR(txq, txdrop);
   7144 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7145 				    "DMA segments, dropping...\n",
   7146 				    device_xname(sc->sc_dev));
   7147 				wm_dump_mbuf_chain(sc, m0);
   7148 				m_freem(m0);
   7149 				continue;
   7150 			}
   7151 			/*  Short on resources, just stop for now. */
   7152 			DPRINTF(WM_DEBUG_TX,
   7153 			    ("%s: TX: dmamap load failed: %d\n",
   7154 			    device_xname(sc->sc_dev), error));
   7155 			break;
   7156 		}
   7157 
   7158 		segs_needed = dmamap->dm_nsegs;
   7159 		if (use_tso) {
   7160 			/* For sentinel descriptor; see below. */
   7161 			segs_needed++;
   7162 		}
   7163 
   7164 		/*
   7165 		 * Ensure we have enough descriptors free to describe
   7166 		 * the packet.  Note, we always reserve one descriptor
   7167 		 * at the end of the ring due to the semantics of the
   7168 		 * TDT register, plus one more in the event we need
   7169 		 * to load offload context.
   7170 		 */
   7171 		if (segs_needed > txq->txq_free - 2) {
   7172 			/*
   7173 			 * Not enough free descriptors to transmit this
   7174 			 * packet.  We haven't committed anything yet,
   7175 			 * so just unload the DMA map, put the packet
   7176 			 * pack on the queue, and punt.  Notify the upper
   7177 			 * layer that there are no more slots left.
   7178 			 */
   7179 			DPRINTF(WM_DEBUG_TX,
   7180 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7181 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7182 			    segs_needed, txq->txq_free - 1));
   7183 			if (!is_transmit)
   7184 				ifp->if_flags |= IFF_OACTIVE;
   7185 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7186 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7187 			WM_Q_EVCNT_INCR(txq, txdstall);
   7188 			break;
   7189 		}
   7190 
   7191 		/*
   7192 		 * Check for 82547 Tx FIFO bug.  We need to do this
   7193 		 * once we know we can transmit the packet, since we
   7194 		 * do some internal FIFO space accounting here.
   7195 		 */
   7196 		if (sc->sc_type == WM_T_82547 &&
   7197 		    wm_82547_txfifo_bugchk(sc, m0)) {
   7198 			DPRINTF(WM_DEBUG_TX,
   7199 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   7200 			    device_xname(sc->sc_dev)));
   7201 			if (!is_transmit)
   7202 				ifp->if_flags |= IFF_OACTIVE;
   7203 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7204 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7205 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
   7206 			break;
   7207 		}
   7208 
   7209 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7210 
   7211 		DPRINTF(WM_DEBUG_TX,
   7212 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7213 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7214 
   7215 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7216 
   7217 		/*
   7218 		 * Store a pointer to the packet so that we can free it
   7219 		 * later.
   7220 		 *
   7221 		 * Initially, we consider the number of descriptors the
   7222 		 * packet uses the number of DMA segments.  This may be
   7223 		 * incremented by 1 if we do checksum offload (a descriptor
   7224 		 * is used to set the checksum context).
   7225 		 */
   7226 		txs->txs_mbuf = m0;
   7227 		txs->txs_firstdesc = txq->txq_next;
   7228 		txs->txs_ndesc = segs_needed;
   7229 
   7230 		/* Set up offload parameters for this packet. */
   7231 		if (m0->m_pkthdr.csum_flags &
   7232 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7233 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7234 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7235 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   7236 					  &cksumfields) != 0) {
   7237 				/* Error message already displayed. */
   7238 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7239 				continue;
   7240 			}
   7241 		} else {
   7242 			cksumcmd = 0;
   7243 			cksumfields = 0;
   7244 		}
   7245 
   7246 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7247 
   7248 		/* Sync the DMA map. */
   7249 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7250 		    BUS_DMASYNC_PREWRITE);
   7251 
   7252 		/* Initialize the transmit descriptor. */
   7253 		for (nexttx = txq->txq_next, seg = 0;
   7254 		     seg < dmamap->dm_nsegs; seg++) {
   7255 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7256 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7257 			     seglen != 0;
   7258 			     curaddr += curlen, seglen -= curlen,
   7259 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7260 				curlen = seglen;
   7261 
   7262 				/*
   7263 				 * So says the Linux driver:
   7264 				 * Work around for premature descriptor
   7265 				 * write-backs in TSO mode.  Append a
   7266 				 * 4-byte sentinel descriptor.
   7267 				 */
   7268 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7269 				    curlen > 8)
   7270 					curlen -= 4;
   7271 
   7272 				wm_set_dma_addr(
   7273 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7274 				txq->txq_descs[nexttx].wtx_cmdlen
   7275 				    = htole32(cksumcmd | curlen);
   7276 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7277 				    = 0;
   7278 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7279 				    = cksumfields;
   7280 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7281 				lasttx = nexttx;
   7282 
   7283 				DPRINTF(WM_DEBUG_TX,
   7284 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7285 				     "len %#04zx\n",
   7286 				    device_xname(sc->sc_dev), nexttx,
   7287 				    (uint64_t)curaddr, curlen));
   7288 			}
   7289 		}
   7290 
   7291 		KASSERT(lasttx != -1);
   7292 
   7293 		/*
   7294 		 * Set up the command byte on the last descriptor of
   7295 		 * the packet.  If we're in the interrupt delay window,
   7296 		 * delay the interrupt.
   7297 		 */
   7298 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7299 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7300 
   7301 		/*
   7302 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7303 		 * up the descriptor to encapsulate the packet for us.
   7304 		 *
   7305 		 * This is only valid on the last descriptor of the packet.
   7306 		 */
   7307 		if (vlan_has_tag(m0)) {
   7308 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7309 			    htole32(WTX_CMD_VLE);
   7310 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7311 			    = htole16(vlan_get_tag(m0));
   7312 		}
   7313 
   7314 		txs->txs_lastdesc = lasttx;
   7315 
   7316 		DPRINTF(WM_DEBUG_TX,
   7317 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7318 		    device_xname(sc->sc_dev),
   7319 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7320 
   7321 		/* Sync the descriptors we're using. */
   7322 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7323 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7324 
   7325 		/* Give the packet to the chip. */
   7326 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7327 
   7328 		DPRINTF(WM_DEBUG_TX,
   7329 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7330 
   7331 		DPRINTF(WM_DEBUG_TX,
   7332 		    ("%s: TX: finished transmitting packet, job %d\n",
   7333 		    device_xname(sc->sc_dev), txq->txq_snext));
   7334 
   7335 		/* Advance the tx pointer. */
   7336 		txq->txq_free -= txs->txs_ndesc;
   7337 		txq->txq_next = nexttx;
   7338 
   7339 		txq->txq_sfree--;
   7340 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7341 
   7342 		/* Pass the packet to any BPF listeners. */
   7343 		bpf_mtap(ifp, m0);
   7344 	}
   7345 
   7346 	if (m0 != NULL) {
   7347 		if (!is_transmit)
   7348 			ifp->if_flags |= IFF_OACTIVE;
   7349 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7350 		WM_Q_EVCNT_INCR(txq, txdrop);
   7351 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7352 			__func__));
   7353 		m_freem(m0);
   7354 	}
   7355 
   7356 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7357 		/* No more slots; notify upper layer. */
   7358 		if (!is_transmit)
   7359 			ifp->if_flags |= IFF_OACTIVE;
   7360 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7361 	}
   7362 
   7363 	if (txq->txq_free != ofree) {
   7364 		/* Set a watchdog timer in case the chip flakes out. */
   7365 		ifp->if_timer = 5;
   7366 	}
   7367 }
   7368 
   7369 /*
   7370  * wm_nq_tx_offload:
   7371  *
   7372  *	Set up TCP/IP checksumming parameters for the
   7373  *	specified packet, for NEWQUEUE devices
   7374  */
   7375 static int
   7376 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7377     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7378 {
   7379 	struct mbuf *m0 = txs->txs_mbuf;
   7380 	uint32_t vl_len, mssidx, cmdc;
   7381 	struct ether_header *eh;
   7382 	int offset, iphl;
   7383 
   7384 	/*
   7385 	 * XXX It would be nice if the mbuf pkthdr had offset
   7386 	 * fields for the protocol headers.
   7387 	 */
   7388 	*cmdlenp = 0;
   7389 	*fieldsp = 0;
   7390 
   7391 	eh = mtod(m0, struct ether_header *);
   7392 	switch (htons(eh->ether_type)) {
   7393 	case ETHERTYPE_IP:
   7394 	case ETHERTYPE_IPV6:
   7395 		offset = ETHER_HDR_LEN;
   7396 		break;
   7397 
   7398 	case ETHERTYPE_VLAN:
   7399 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7400 		break;
   7401 
   7402 	default:
   7403 		/* Don't support this protocol or encapsulation. */
   7404 		*do_csum = false;
   7405 		return 0;
   7406 	}
   7407 	*do_csum = true;
   7408 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7409 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7410 
   7411 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7412 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7413 
   7414 	if ((m0->m_pkthdr.csum_flags &
   7415 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7416 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7417 	} else {
   7418 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   7419 	}
   7420 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7421 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7422 
   7423 	if (vlan_has_tag(m0)) {
   7424 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   7425 		     << NQTXC_VLLEN_VLAN_SHIFT);
   7426 		*cmdlenp |= NQTX_CMD_VLE;
   7427 	}
   7428 
   7429 	mssidx = 0;
   7430 
   7431 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7432 		int hlen = offset + iphl;
   7433 		int tcp_hlen;
   7434 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7435 
   7436 		if (__predict_false(m0->m_len <
   7437 				    (hlen + sizeof(struct tcphdr)))) {
   7438 			/*
   7439 			 * TCP/IP headers are not in the first mbuf; we need
   7440 			 * to do this the slow and painful way.  Let's just
   7441 			 * hope this doesn't happen very often.
   7442 			 */
   7443 			struct tcphdr th;
   7444 
   7445 			WM_Q_EVCNT_INCR(txq, txtsopain);
   7446 
   7447 			m_copydata(m0, hlen, sizeof(th), &th);
   7448 			if (v4) {
   7449 				struct ip ip;
   7450 
   7451 				m_copydata(m0, offset, sizeof(ip), &ip);
   7452 				ip.ip_len = 0;
   7453 				m_copyback(m0,
   7454 				    offset + offsetof(struct ip, ip_len),
   7455 				    sizeof(ip.ip_len), &ip.ip_len);
   7456 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7457 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7458 			} else {
   7459 				struct ip6_hdr ip6;
   7460 
   7461 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7462 				ip6.ip6_plen = 0;
   7463 				m_copyback(m0,
   7464 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7465 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7466 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7467 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7468 			}
   7469 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7470 			    sizeof(th.th_sum), &th.th_sum);
   7471 
   7472 			tcp_hlen = th.th_off << 2;
   7473 		} else {
   7474 			/*
   7475 			 * TCP/IP headers are in the first mbuf; we can do
   7476 			 * this the easy way.
   7477 			 */
   7478 			struct tcphdr *th;
   7479 
   7480 			if (v4) {
   7481 				struct ip *ip =
   7482 				    (void *)(mtod(m0, char *) + offset);
   7483 				th = (void *)(mtod(m0, char *) + hlen);
   7484 
   7485 				ip->ip_len = 0;
   7486 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7487 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7488 			} else {
   7489 				struct ip6_hdr *ip6 =
   7490 				    (void *)(mtod(m0, char *) + offset);
   7491 				th = (void *)(mtod(m0, char *) + hlen);
   7492 
   7493 				ip6->ip6_plen = 0;
   7494 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7495 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7496 			}
   7497 			tcp_hlen = th->th_off << 2;
   7498 		}
   7499 		hlen += tcp_hlen;
   7500 		*cmdlenp |= NQTX_CMD_TSE;
   7501 
   7502 		if (v4) {
   7503 			WM_Q_EVCNT_INCR(txq, txtso);
   7504 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7505 		} else {
   7506 			WM_Q_EVCNT_INCR(txq, txtso6);
   7507 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7508 		}
   7509 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7510 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7511 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7512 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7513 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7514 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7515 	} else {
   7516 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7517 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7518 	}
   7519 
   7520 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7521 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7522 		cmdc |= NQTXC_CMD_IP4;
   7523 	}
   7524 
   7525 	if (m0->m_pkthdr.csum_flags &
   7526 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7527 		WM_Q_EVCNT_INCR(txq, txtusum);
   7528 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7529 			cmdc |= NQTXC_CMD_TCP;
   7530 		} else {
   7531 			cmdc |= NQTXC_CMD_UDP;
   7532 		}
   7533 		cmdc |= NQTXC_CMD_IP4;
   7534 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7535 	}
   7536 	if (m0->m_pkthdr.csum_flags &
   7537 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7538 		WM_Q_EVCNT_INCR(txq, txtusum6);
   7539 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7540 			cmdc |= NQTXC_CMD_TCP;
   7541 		} else {
   7542 			cmdc |= NQTXC_CMD_UDP;
   7543 		}
   7544 		cmdc |= NQTXC_CMD_IP6;
   7545 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7546 	}
   7547 
   7548 	/*
   7549 	 * We don't have to write context descriptor for every packet to
   7550 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   7551 	 * I210 and I211. It is enough to write once per a Tx queue for these
   7552 	 * controllers.
   7553 	 * It would be overhead to write context descriptor for every packet,
   7554 	 * however it does not cause problems.
   7555 	 */
   7556 	/* Fill in the context descriptor. */
   7557 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   7558 	    htole32(vl_len);
   7559 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   7560 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   7561 	    htole32(cmdc);
   7562 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   7563 	    htole32(mssidx);
   7564 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7565 	DPRINTF(WM_DEBUG_TX,
   7566 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   7567 	    txq->txq_next, 0, vl_len));
   7568 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   7569 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7570 	txs->txs_ndesc++;
   7571 	return 0;
   7572 }
   7573 
   7574 /*
   7575  * wm_nq_start:		[ifnet interface function]
   7576  *
   7577  *	Start packet transmission on the interface for NEWQUEUE devices
   7578  */
   7579 static void
   7580 wm_nq_start(struct ifnet *ifp)
   7581 {
   7582 	struct wm_softc *sc = ifp->if_softc;
   7583 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7584 
   7585 #ifdef WM_MPSAFE
   7586 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   7587 #endif
   7588 	/*
   7589 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7590 	 */
   7591 
   7592 	mutex_enter(txq->txq_lock);
   7593 	if (!txq->txq_stopping)
   7594 		wm_nq_start_locked(ifp);
   7595 	mutex_exit(txq->txq_lock);
   7596 }
   7597 
   7598 static void
   7599 wm_nq_start_locked(struct ifnet *ifp)
   7600 {
   7601 	struct wm_softc *sc = ifp->if_softc;
   7602 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7603 
   7604 	wm_nq_send_common_locked(ifp, txq, false);
   7605 }
   7606 
   7607 static int
   7608 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   7609 {
   7610 	int qid;
   7611 	struct wm_softc *sc = ifp->if_softc;
   7612 	struct wm_txqueue *txq;
   7613 
   7614 	qid = wm_select_txqueue(ifp, m);
   7615 	txq = &sc->sc_queue[qid].wmq_txq;
   7616 
   7617 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7618 		m_freem(m);
   7619 		WM_Q_EVCNT_INCR(txq, txdrop);
   7620 		return ENOBUFS;
   7621 	}
   7622 
   7623 	/*
   7624 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7625 	 */
   7626 	ifp->if_obytes += m->m_pkthdr.len;
   7627 	if (m->m_flags & M_MCAST)
   7628 		ifp->if_omcasts++;
   7629 
   7630 	/*
   7631 	 * The situations which this mutex_tryenter() fails at running time
   7632 	 * are below two patterns.
   7633 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   7634 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   7635 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   7636 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   7637 	 * In the case of (2), the last packet enqueued to txq->txq_interq is also
   7638 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck, either.
   7639 	 */
   7640 	if (mutex_tryenter(txq->txq_lock)) {
   7641 		if (!txq->txq_stopping)
   7642 			wm_nq_transmit_locked(ifp, txq);
   7643 		mutex_exit(txq->txq_lock);
   7644 	}
   7645 
   7646 	return 0;
   7647 }
   7648 
   7649 static void
   7650 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7651 {
   7652 
   7653 	wm_nq_send_common_locked(ifp, txq, true);
   7654 }
   7655 
   7656 static void
   7657 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7658     bool is_transmit)
   7659 {
   7660 	struct wm_softc *sc = ifp->if_softc;
   7661 	struct mbuf *m0;
   7662 	struct wm_txsoft *txs;
   7663 	bus_dmamap_t dmamap;
   7664 	int error, nexttx, lasttx = -1, seg, segs_needed;
   7665 	bool do_csum, sent;
   7666 
   7667 	KASSERT(mutex_owned(txq->txq_lock));
   7668 
   7669 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7670 		return;
   7671 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7672 		return;
   7673 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7674 		return;
   7675 
   7676 	sent = false;
   7677 
   7678 	/*
   7679 	 * Loop through the send queue, setting up transmit descriptors
   7680 	 * until we drain the queue, or use up all available transmit
   7681 	 * descriptors.
   7682 	 */
   7683 	for (;;) {
   7684 		m0 = NULL;
   7685 
   7686 		/* Get a work queue entry. */
   7687 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7688 			wm_txeof(sc, txq);
   7689 			if (txq->txq_sfree == 0) {
   7690 				DPRINTF(WM_DEBUG_TX,
   7691 				    ("%s: TX: no free job descriptors\n",
   7692 					device_xname(sc->sc_dev)));
   7693 				WM_Q_EVCNT_INCR(txq, txsstall);
   7694 				break;
   7695 			}
   7696 		}
   7697 
   7698 		/* Grab a packet off the queue. */
   7699 		if (is_transmit)
   7700 			m0 = pcq_get(txq->txq_interq);
   7701 		else
   7702 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7703 		if (m0 == NULL)
   7704 			break;
   7705 
   7706 		DPRINTF(WM_DEBUG_TX,
   7707 		    ("%s: TX: have packet to transmit: %p\n",
   7708 		    device_xname(sc->sc_dev), m0));
   7709 
   7710 		txs = &txq->txq_soft[txq->txq_snext];
   7711 		dmamap = txs->txs_dmamap;
   7712 
   7713 		/*
   7714 		 * Load the DMA map.  If this fails, the packet either
   7715 		 * didn't fit in the allotted number of segments, or we
   7716 		 * were short on resources.  For the too-many-segments
   7717 		 * case, we simply report an error and drop the packet,
   7718 		 * since we can't sanely copy a jumbo packet to a single
   7719 		 * buffer.
   7720 		 */
   7721 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7722 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7723 		if (error) {
   7724 			if (error == EFBIG) {
   7725 				WM_Q_EVCNT_INCR(txq, txdrop);
   7726 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7727 				    "DMA segments, dropping...\n",
   7728 				    device_xname(sc->sc_dev));
   7729 				wm_dump_mbuf_chain(sc, m0);
   7730 				m_freem(m0);
   7731 				continue;
   7732 			}
   7733 			/* Short on resources, just stop for now. */
   7734 			DPRINTF(WM_DEBUG_TX,
   7735 			    ("%s: TX: dmamap load failed: %d\n",
   7736 			    device_xname(sc->sc_dev), error));
   7737 			break;
   7738 		}
   7739 
   7740 		segs_needed = dmamap->dm_nsegs;
   7741 
   7742 		/*
   7743 		 * Ensure we have enough descriptors free to describe
   7744 		 * the packet.  Note, we always reserve one descriptor
   7745 		 * at the end of the ring due to the semantics of the
   7746 		 * TDT register, plus one more in the event we need
   7747 		 * to load offload context.
   7748 		 */
   7749 		if (segs_needed > txq->txq_free - 2) {
   7750 			/*
   7751 			 * Not enough free descriptors to transmit this
   7752 			 * packet.  We haven't committed anything yet,
   7753 			 * so just unload the DMA map, put the packet
   7754 			 * pack on the queue, and punt.  Notify the upper
   7755 			 * layer that there are no more slots left.
   7756 			 */
   7757 			DPRINTF(WM_DEBUG_TX,
   7758 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7759 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7760 			    segs_needed, txq->txq_free - 1));
   7761 			if (!is_transmit)
   7762 				ifp->if_flags |= IFF_OACTIVE;
   7763 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7764 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7765 			WM_Q_EVCNT_INCR(txq, txdstall);
   7766 			break;
   7767 		}
   7768 
   7769 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7770 
   7771 		DPRINTF(WM_DEBUG_TX,
   7772 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7773 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7774 
   7775 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7776 
   7777 		/*
   7778 		 * Store a pointer to the packet so that we can free it
   7779 		 * later.
   7780 		 *
   7781 		 * Initially, we consider the number of descriptors the
   7782 		 * packet uses the number of DMA segments.  This may be
   7783 		 * incremented by 1 if we do checksum offload (a descriptor
   7784 		 * is used to set the checksum context).
   7785 		 */
   7786 		txs->txs_mbuf = m0;
   7787 		txs->txs_firstdesc = txq->txq_next;
   7788 		txs->txs_ndesc = segs_needed;
   7789 
   7790 		/* Set up offload parameters for this packet. */
   7791 		uint32_t cmdlen, fields, dcmdlen;
   7792 		if (m0->m_pkthdr.csum_flags &
   7793 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7794 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7795 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7796 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   7797 			    &do_csum) != 0) {
   7798 				/* Error message already displayed. */
   7799 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7800 				continue;
   7801 			}
   7802 		} else {
   7803 			do_csum = false;
   7804 			cmdlen = 0;
   7805 			fields = 0;
   7806 		}
   7807 
   7808 		/* Sync the DMA map. */
   7809 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7810 		    BUS_DMASYNC_PREWRITE);
   7811 
   7812 		/* Initialize the first transmit descriptor. */
   7813 		nexttx = txq->txq_next;
   7814 		if (!do_csum) {
   7815 			/* setup a legacy descriptor */
   7816 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   7817 			    dmamap->dm_segs[0].ds_addr);
   7818 			txq->txq_descs[nexttx].wtx_cmdlen =
   7819 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   7820 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   7821 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   7822 			if (vlan_has_tag(m0)) {
   7823 				txq->txq_descs[nexttx].wtx_cmdlen |=
   7824 				    htole32(WTX_CMD_VLE);
   7825 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   7826 				    htole16(vlan_get_tag(m0));
   7827 			} else {
   7828 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7829 			}
   7830 			dcmdlen = 0;
   7831 		} else {
   7832 			/* setup an advanced data descriptor */
   7833 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7834 			    htole64(dmamap->dm_segs[0].ds_addr);
   7835 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   7836 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7837 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   7838 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   7839 			    htole32(fields);
   7840 			DPRINTF(WM_DEBUG_TX,
   7841 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   7842 			    device_xname(sc->sc_dev), nexttx,
   7843 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   7844 			DPRINTF(WM_DEBUG_TX,
   7845 			    ("\t 0x%08x%08x\n", fields,
   7846 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   7847 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   7848 		}
   7849 
   7850 		lasttx = nexttx;
   7851 		nexttx = WM_NEXTTX(txq, nexttx);
   7852 		/*
   7853 		 * fill in the next descriptors. legacy or adcanced format
   7854 		 * is the same here
   7855 		 */
   7856 		for (seg = 1; seg < dmamap->dm_nsegs;
   7857 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   7858 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7859 			    htole64(dmamap->dm_segs[seg].ds_addr);
   7860 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7861 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   7862 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   7863 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   7864 			lasttx = nexttx;
   7865 
   7866 			DPRINTF(WM_DEBUG_TX,
   7867 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   7868 			     "len %#04zx\n",
   7869 			    device_xname(sc->sc_dev), nexttx,
   7870 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   7871 			    dmamap->dm_segs[seg].ds_len));
   7872 		}
   7873 
   7874 		KASSERT(lasttx != -1);
   7875 
   7876 		/*
   7877 		 * Set up the command byte on the last descriptor of
   7878 		 * the packet.  If we're in the interrupt delay window,
   7879 		 * delay the interrupt.
   7880 		 */
   7881 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   7882 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   7883 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7884 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7885 
   7886 		txs->txs_lastdesc = lasttx;
   7887 
   7888 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7889 		    device_xname(sc->sc_dev),
   7890 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7891 
   7892 		/* Sync the descriptors we're using. */
   7893 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7894 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7895 
   7896 		/* Give the packet to the chip. */
   7897 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7898 		sent = true;
   7899 
   7900 		DPRINTF(WM_DEBUG_TX,
   7901 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7902 
   7903 		DPRINTF(WM_DEBUG_TX,
   7904 		    ("%s: TX: finished transmitting packet, job %d\n",
   7905 		    device_xname(sc->sc_dev), txq->txq_snext));
   7906 
   7907 		/* Advance the tx pointer. */
   7908 		txq->txq_free -= txs->txs_ndesc;
   7909 		txq->txq_next = nexttx;
   7910 
   7911 		txq->txq_sfree--;
   7912 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7913 
   7914 		/* Pass the packet to any BPF listeners. */
   7915 		bpf_mtap(ifp, m0);
   7916 	}
   7917 
   7918 	if (m0 != NULL) {
   7919 		if (!is_transmit)
   7920 			ifp->if_flags |= IFF_OACTIVE;
   7921 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7922 		WM_Q_EVCNT_INCR(txq, txdrop);
   7923 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7924 			__func__));
   7925 		m_freem(m0);
   7926 	}
   7927 
   7928 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7929 		/* No more slots; notify upper layer. */
   7930 		if (!is_transmit)
   7931 			ifp->if_flags |= IFF_OACTIVE;
   7932 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7933 	}
   7934 
   7935 	if (sent) {
   7936 		/* Set a watchdog timer in case the chip flakes out. */
   7937 		ifp->if_timer = 5;
   7938 	}
   7939 }
   7940 
   7941 static void
   7942 wm_deferred_start_locked(struct wm_txqueue *txq)
   7943 {
   7944 	struct wm_softc *sc = txq->txq_sc;
   7945 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7946 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   7947 	int qid = wmq->wmq_id;
   7948 
   7949 	KASSERT(mutex_owned(txq->txq_lock));
   7950 
   7951 	if (txq->txq_stopping) {
   7952 		mutex_exit(txq->txq_lock);
   7953 		return;
   7954 	}
   7955 
   7956 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7957 		/* XXX need for ALTQ or one CPU system */
   7958 		if (qid == 0)
   7959 			wm_nq_start_locked(ifp);
   7960 		wm_nq_transmit_locked(ifp, txq);
   7961 	} else {
   7962 		/* XXX need for ALTQ or one CPU system */
   7963 		if (qid == 0)
   7964 			wm_start_locked(ifp);
   7965 		wm_transmit_locked(ifp, txq);
   7966 	}
   7967 }
   7968 
   7969 /* Interrupt */
   7970 
   7971 /*
   7972  * wm_txeof:
   7973  *
   7974  *	Helper; handle transmit interrupts.
   7975  */
   7976 static int
   7977 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
   7978 {
   7979 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7980 	struct wm_txsoft *txs;
   7981 	bool processed = false;
   7982 	int count = 0;
   7983 	int i;
   7984 	uint8_t status;
   7985 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   7986 
   7987 	KASSERT(mutex_owned(txq->txq_lock));
   7988 
   7989 	if (txq->txq_stopping)
   7990 		return 0;
   7991 
   7992 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   7993 	/* for ALTQ and legacy(not use multiqueue) ethernet controller */
   7994 	if (wmq->wmq_id == 0)
   7995 		ifp->if_flags &= ~IFF_OACTIVE;
   7996 
   7997 	/*
   7998 	 * Go through the Tx list and free mbufs for those
   7999 	 * frames which have been transmitted.
   8000 	 */
   8001 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8002 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8003 		txs = &txq->txq_soft[i];
   8004 
   8005 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8006 			device_xname(sc->sc_dev), i));
   8007 
   8008 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8009 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8010 
   8011 		status =
   8012 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8013 		if ((status & WTX_ST_DD) == 0) {
   8014 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8015 			    BUS_DMASYNC_PREREAD);
   8016 			break;
   8017 		}
   8018 
   8019 		processed = true;
   8020 		count++;
   8021 		DPRINTF(WM_DEBUG_TX,
   8022 		    ("%s: TX: job %d done: descs %d..%d\n",
   8023 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8024 		    txs->txs_lastdesc));
   8025 
   8026 		/*
   8027 		 * XXX We should probably be using the statistics
   8028 		 * XXX registers, but I don't know if they exist
   8029 		 * XXX on chips before the i82544.
   8030 		 */
   8031 
   8032 #ifdef WM_EVENT_COUNTERS
   8033 		if (status & WTX_ST_TU)
   8034 			WM_Q_EVCNT_INCR(txq, tu);
   8035 #endif /* WM_EVENT_COUNTERS */
   8036 
   8037 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   8038 			ifp->if_oerrors++;
   8039 			if (status & WTX_ST_LC)
   8040 				log(LOG_WARNING, "%s: late collision\n",
   8041 				    device_xname(sc->sc_dev));
   8042 			else if (status & WTX_ST_EC) {
   8043 				ifp->if_collisions += 16;
   8044 				log(LOG_WARNING, "%s: excessive collisions\n",
   8045 				    device_xname(sc->sc_dev));
   8046 			}
   8047 		} else
   8048 			ifp->if_opackets++;
   8049 
   8050 		txq->txq_packets++;
   8051 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8052 
   8053 		txq->txq_free += txs->txs_ndesc;
   8054 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8055 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8056 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8057 		m_freem(txs->txs_mbuf);
   8058 		txs->txs_mbuf = NULL;
   8059 	}
   8060 
   8061 	/* Update the dirty transmit buffer pointer. */
   8062 	txq->txq_sdirty = i;
   8063 	DPRINTF(WM_DEBUG_TX,
   8064 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8065 
   8066 	if (count != 0)
   8067 		rnd_add_uint32(&sc->rnd_source, count);
   8068 
   8069 	/*
   8070 	 * If there are no more pending transmissions, cancel the watchdog
   8071 	 * timer.
   8072 	 */
   8073 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8074 		ifp->if_timer = 0;
   8075 
   8076 	return processed;
   8077 }
   8078 
   8079 static inline uint32_t
   8080 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8081 {
   8082 	struct wm_softc *sc = rxq->rxq_sc;
   8083 
   8084 	if (sc->sc_type == WM_T_82574)
   8085 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8086 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8087 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8088 	else
   8089 		return rxq->rxq_descs[idx].wrx_status;
   8090 }
   8091 
   8092 static inline uint32_t
   8093 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8094 {
   8095 	struct wm_softc *sc = rxq->rxq_sc;
   8096 
   8097 	if (sc->sc_type == WM_T_82574)
   8098 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8099 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8100 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8101 	else
   8102 		return rxq->rxq_descs[idx].wrx_errors;
   8103 }
   8104 
   8105 static inline uint16_t
   8106 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8107 {
   8108 	struct wm_softc *sc = rxq->rxq_sc;
   8109 
   8110 	if (sc->sc_type == WM_T_82574)
   8111 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8112 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8113 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8114 	else
   8115 		return rxq->rxq_descs[idx].wrx_special;
   8116 }
   8117 
   8118 static inline int
   8119 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8120 {
   8121 	struct wm_softc *sc = rxq->rxq_sc;
   8122 
   8123 	if (sc->sc_type == WM_T_82574)
   8124 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8125 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8126 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   8127 	else
   8128 		return rxq->rxq_descs[idx].wrx_len;
   8129 }
   8130 
   8131 #ifdef WM_DEBUG
   8132 static inline uint32_t
   8133 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   8134 {
   8135 	struct wm_softc *sc = rxq->rxq_sc;
   8136 
   8137 	if (sc->sc_type == WM_T_82574)
   8138 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   8139 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8140 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   8141 	else
   8142 		return 0;
   8143 }
   8144 
   8145 static inline uint8_t
   8146 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   8147 {
   8148 	struct wm_softc *sc = rxq->rxq_sc;
   8149 
   8150 	if (sc->sc_type == WM_T_82574)
   8151 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   8152 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8153 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   8154 	else
   8155 		return 0;
   8156 }
   8157 #endif /* WM_DEBUG */
   8158 
   8159 static inline bool
   8160 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   8161     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8162 {
   8163 
   8164 	if (sc->sc_type == WM_T_82574)
   8165 		return (status & ext_bit) != 0;
   8166 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8167 		return (status & nq_bit) != 0;
   8168 	else
   8169 		return (status & legacy_bit) != 0;
   8170 }
   8171 
   8172 static inline bool
   8173 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   8174     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8175 {
   8176 
   8177 	if (sc->sc_type == WM_T_82574)
   8178 		return (error & ext_bit) != 0;
   8179 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8180 		return (error & nq_bit) != 0;
   8181 	else
   8182 		return (error & legacy_bit) != 0;
   8183 }
   8184 
   8185 static inline bool
   8186 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   8187 {
   8188 
   8189 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8190 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   8191 		return true;
   8192 	else
   8193 		return false;
   8194 }
   8195 
   8196 static inline bool
   8197 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   8198 {
   8199 	struct wm_softc *sc = rxq->rxq_sc;
   8200 
   8201 	/* XXXX missing error bit for newqueue? */
   8202 	if (wm_rxdesc_is_set_error(sc, errors,
   8203 		WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE,
   8204 		EXTRXC_ERROR_CE|EXTRXC_ERROR_SE|EXTRXC_ERROR_SEQ|EXTRXC_ERROR_CXE|EXTRXC_ERROR_RXE,
   8205 		NQRXC_ERROR_RXE)) {
   8206 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE, EXTRXC_ERROR_SE, 0))
   8207 			log(LOG_WARNING, "%s: symbol error\n",
   8208 			    device_xname(sc->sc_dev));
   8209 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ, EXTRXC_ERROR_SEQ, 0))
   8210 			log(LOG_WARNING, "%s: receive sequence error\n",
   8211 			    device_xname(sc->sc_dev));
   8212 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE, EXTRXC_ERROR_CE, 0))
   8213 			log(LOG_WARNING, "%s: CRC error\n",
   8214 			    device_xname(sc->sc_dev));
   8215 		return true;
   8216 	}
   8217 
   8218 	return false;
   8219 }
   8220 
   8221 static inline bool
   8222 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8223 {
   8224 	struct wm_softc *sc = rxq->rxq_sc;
   8225 
   8226 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8227 		NQRXC_STATUS_DD)) {
   8228 		/* We have processed all of the receive descriptors. */
   8229 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8230 		return false;
   8231 	}
   8232 
   8233 	return true;
   8234 }
   8235 
   8236 static inline bool
   8237 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status, uint16_t vlantag,
   8238     struct mbuf *m)
   8239 {
   8240 
   8241 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8242 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8243 		vlan_set_tag(m, le16toh(vlantag));
   8244 	}
   8245 
   8246 	return true;
   8247 }
   8248 
   8249 static inline void
   8250 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8251     uint32_t errors, struct mbuf *m)
   8252 {
   8253 	struct wm_softc *sc = rxq->rxq_sc;
   8254 
   8255 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8256 		if (wm_rxdesc_is_set_status(sc, status,
   8257 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8258 			WM_Q_EVCNT_INCR(rxq, rxipsum);
   8259 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8260 			if (wm_rxdesc_is_set_error(sc, errors,
   8261 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8262 				m->m_pkthdr.csum_flags |=
   8263 					M_CSUM_IPv4_BAD;
   8264 		}
   8265 		if (wm_rxdesc_is_set_status(sc, status,
   8266 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8267 			/*
   8268 			 * Note: we don't know if this was TCP or UDP,
   8269 			 * so we just set both bits, and expect the
   8270 			 * upper layers to deal.
   8271 			 */
   8272 			WM_Q_EVCNT_INCR(rxq, rxtusum);
   8273 			m->m_pkthdr.csum_flags |=
   8274 				M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8275 				M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8276 			if (wm_rxdesc_is_set_error(sc, errors,
   8277 				WRX_ER_TCPE, EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8278 				m->m_pkthdr.csum_flags |=
   8279 					M_CSUM_TCP_UDP_BAD;
   8280 		}
   8281 	}
   8282 }
   8283 
   8284 /*
   8285  * wm_rxeof:
   8286  *
   8287  *	Helper; handle receive interrupts.
   8288  */
   8289 static void
   8290 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8291 {
   8292 	struct wm_softc *sc = rxq->rxq_sc;
   8293 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8294 	struct wm_rxsoft *rxs;
   8295 	struct mbuf *m;
   8296 	int i, len;
   8297 	int count = 0;
   8298 	uint32_t status, errors;
   8299 	uint16_t vlantag;
   8300 
   8301 	KASSERT(mutex_owned(rxq->rxq_lock));
   8302 
   8303 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8304 		if (limit-- == 0) {
   8305 			rxq->rxq_ptr = i;
   8306 			break;
   8307 		}
   8308 
   8309 		rxs = &rxq->rxq_soft[i];
   8310 
   8311 		DPRINTF(WM_DEBUG_RX,
   8312 		    ("%s: RX: checking descriptor %d\n",
   8313 		    device_xname(sc->sc_dev), i));
   8314 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   8315 
   8316 		status = wm_rxdesc_get_status(rxq, i);
   8317 		errors = wm_rxdesc_get_errors(rxq, i);
   8318 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8319 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8320 #ifdef WM_DEBUG
   8321 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8322 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8323 #endif
   8324 
   8325 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8326 			/*
   8327 			 * Update the receive pointer holding rxq_lock
   8328 			 * consistent with increment counter.
   8329 			 */
   8330 			rxq->rxq_ptr = i;
   8331 			break;
   8332 		}
   8333 
   8334 		count++;
   8335 		if (__predict_false(rxq->rxq_discard)) {
   8336 			DPRINTF(WM_DEBUG_RX,
   8337 			    ("%s: RX: discarding contents of descriptor %d\n",
   8338 			    device_xname(sc->sc_dev), i));
   8339 			wm_init_rxdesc(rxq, i);
   8340 			if (wm_rxdesc_is_eop(rxq, status)) {
   8341 				/* Reset our state. */
   8342 				DPRINTF(WM_DEBUG_RX,
   8343 				    ("%s: RX: resetting rxdiscard -> 0\n",
   8344 				    device_xname(sc->sc_dev)));
   8345 				rxq->rxq_discard = 0;
   8346 			}
   8347 			continue;
   8348 		}
   8349 
   8350 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8351 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   8352 
   8353 		m = rxs->rxs_mbuf;
   8354 
   8355 		/*
   8356 		 * Add a new receive buffer to the ring, unless of
   8357 		 * course the length is zero. Treat the latter as a
   8358 		 * failed mapping.
   8359 		 */
   8360 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   8361 			/*
   8362 			 * Failed, throw away what we've done so
   8363 			 * far, and discard the rest of the packet.
   8364 			 */
   8365 			ifp->if_ierrors++;
   8366 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8367 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   8368 			wm_init_rxdesc(rxq, i);
   8369 			if (!wm_rxdesc_is_eop(rxq, status))
   8370 				rxq->rxq_discard = 1;
   8371 			if (rxq->rxq_head != NULL)
   8372 				m_freem(rxq->rxq_head);
   8373 			WM_RXCHAIN_RESET(rxq);
   8374 			DPRINTF(WM_DEBUG_RX,
   8375 			    ("%s: RX: Rx buffer allocation failed, "
   8376 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8377 			    rxq->rxq_discard ? " (discard)" : ""));
   8378 			continue;
   8379 		}
   8380 
   8381 		m->m_len = len;
   8382 		rxq->rxq_len += len;
   8383 		DPRINTF(WM_DEBUG_RX,
   8384 		    ("%s: RX: buffer at %p len %d\n",
   8385 		    device_xname(sc->sc_dev), m->m_data, len));
   8386 
   8387 		/* If this is not the end of the packet, keep looking. */
   8388 		if (!wm_rxdesc_is_eop(rxq, status)) {
   8389 			WM_RXCHAIN_LINK(rxq, m);
   8390 			DPRINTF(WM_DEBUG_RX,
   8391 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   8392 			    device_xname(sc->sc_dev), rxq->rxq_len));
   8393 			continue;
   8394 		}
   8395 
   8396 		/*
   8397 		 * Okay, we have the entire packet now.  The chip is
   8398 		 * configured to include the FCS except I350 and I21[01]
   8399 		 * (not all chips can be configured to strip it),
   8400 		 * so we need to trim it.
   8401 		 * May need to adjust length of previous mbuf in the
   8402 		 * chain if the current mbuf is too short.
   8403 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   8404 		 * is always set in I350, so we don't trim it.
   8405 		 */
   8406 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   8407 		    && (sc->sc_type != WM_T_I210)
   8408 		    && (sc->sc_type != WM_T_I211)) {
   8409 			if (m->m_len < ETHER_CRC_LEN) {
   8410 				rxq->rxq_tail->m_len
   8411 				    -= (ETHER_CRC_LEN - m->m_len);
   8412 				m->m_len = 0;
   8413 			} else
   8414 				m->m_len -= ETHER_CRC_LEN;
   8415 			len = rxq->rxq_len - ETHER_CRC_LEN;
   8416 		} else
   8417 			len = rxq->rxq_len;
   8418 
   8419 		WM_RXCHAIN_LINK(rxq, m);
   8420 
   8421 		*rxq->rxq_tailp = NULL;
   8422 		m = rxq->rxq_head;
   8423 
   8424 		WM_RXCHAIN_RESET(rxq);
   8425 
   8426 		DPRINTF(WM_DEBUG_RX,
   8427 		    ("%s: RX: have entire packet, len -> %d\n",
   8428 		    device_xname(sc->sc_dev), len));
   8429 
   8430 		/* If an error occurred, update stats and drop the packet. */
   8431 		if (wm_rxdesc_has_errors(rxq, errors)) {
   8432 			m_freem(m);
   8433 			continue;
   8434 		}
   8435 
   8436 		/* No errors.  Receive the packet. */
   8437 		m_set_rcvif(m, ifp);
   8438 		m->m_pkthdr.len = len;
   8439 		/*
   8440 		 * TODO
   8441 		 * should be save rsshash and rsstype to this mbuf.
   8442 		 */
   8443 		DPRINTF(WM_DEBUG_RX,
   8444 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   8445 			device_xname(sc->sc_dev), rsstype, rsshash));
   8446 
   8447 		/*
   8448 		 * If VLANs are enabled, VLAN packets have been unwrapped
   8449 		 * for us.  Associate the tag with the packet.
   8450 		 */
   8451 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   8452 			continue;
   8453 
   8454 		/* Set up checksum info for this packet. */
   8455 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   8456 		/*
   8457 		 * Update the receive pointer holding rxq_lock consistent with
   8458 		 * increment counter.
   8459 		 */
   8460 		rxq->rxq_ptr = i;
   8461 		rxq->rxq_packets++;
   8462 		rxq->rxq_bytes += len;
   8463 		mutex_exit(rxq->rxq_lock);
   8464 
   8465 		/* Pass it on. */
   8466 		if_percpuq_enqueue(sc->sc_ipq, m);
   8467 
   8468 		mutex_enter(rxq->rxq_lock);
   8469 
   8470 		if (rxq->rxq_stopping)
   8471 			break;
   8472 	}
   8473 
   8474 	if (count != 0)
   8475 		rnd_add_uint32(&sc->rnd_source, count);
   8476 
   8477 	DPRINTF(WM_DEBUG_RX,
   8478 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8479 }
   8480 
   8481 /*
   8482  * wm_linkintr_gmii:
   8483  *
   8484  *	Helper; handle link interrupts for GMII.
   8485  */
   8486 static void
   8487 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8488 {
   8489 
   8490 	KASSERT(WM_CORE_LOCKED(sc));
   8491 
   8492 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8493 		__func__));
   8494 
   8495 	if (icr & ICR_LSC) {
   8496 		uint32_t reg;
   8497 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   8498 
   8499 		if ((status & STATUS_LU) != 0) {
   8500 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8501 				device_xname(sc->sc_dev),
   8502 				(status & STATUS_FD) ? "FDX" : "HDX"));
   8503 		} else {
   8504 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8505 				device_xname(sc->sc_dev)));
   8506 		}
   8507 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   8508 			wm_gig_downshift_workaround_ich8lan(sc);
   8509 
   8510 		if ((sc->sc_type == WM_T_ICH8)
   8511 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   8512 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   8513 		}
   8514 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   8515 			device_xname(sc->sc_dev)));
   8516 		mii_pollstat(&sc->sc_mii);
   8517 		if (sc->sc_type == WM_T_82543) {
   8518 			int miistatus, active;
   8519 
   8520 			/*
   8521 			 * With 82543, we need to force speed and
   8522 			 * duplex on the MAC equal to what the PHY
   8523 			 * speed and duplex configuration is.
   8524 			 */
   8525 			miistatus = sc->sc_mii.mii_media_status;
   8526 
   8527 			if (miistatus & IFM_ACTIVE) {
   8528 				active = sc->sc_mii.mii_media_active;
   8529 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8530 				switch (IFM_SUBTYPE(active)) {
   8531 				case IFM_10_T:
   8532 					sc->sc_ctrl |= CTRL_SPEED_10;
   8533 					break;
   8534 				case IFM_100_TX:
   8535 					sc->sc_ctrl |= CTRL_SPEED_100;
   8536 					break;
   8537 				case IFM_1000_T:
   8538 					sc->sc_ctrl |= CTRL_SPEED_1000;
   8539 					break;
   8540 				default:
   8541 					/*
   8542 					 * fiber?
   8543 					 * Shoud not enter here.
   8544 					 */
   8545 					printf("unknown media (%x)\n", active);
   8546 					break;
   8547 				}
   8548 				if (active & IFM_FDX)
   8549 					sc->sc_ctrl |= CTRL_FD;
   8550 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8551 			}
   8552 		} else if (sc->sc_type == WM_T_PCH) {
   8553 			wm_k1_gig_workaround_hv(sc,
   8554 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   8555 		}
   8556 
   8557 		if ((sc->sc_phytype == WMPHY_82578)
   8558 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   8559 			== IFM_1000_T)) {
   8560 
   8561 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   8562 				delay(200*1000); /* XXX too big */
   8563 
   8564 				/* Link stall fix for link up */
   8565 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8566 				    HV_MUX_DATA_CTRL,
   8567 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   8568 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   8569 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8570 				    HV_MUX_DATA_CTRL,
   8571 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   8572 			}
   8573 		}
   8574 		/*
   8575 		 * I217 Packet Loss issue:
   8576 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
   8577 		 * on power up.
   8578 		 * Set the Beacon Duration for I217 to 8 usec
   8579 		 */
   8580 		if ((sc->sc_type == WM_T_PCH_LPT)
   8581 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8582 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   8583 			reg &= ~FEXTNVM4_BEACON_DURATION;
   8584 			reg |= FEXTNVM4_BEACON_DURATION_8US;
   8585 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   8586 		}
   8587 
   8588 		/* XXX Work-around I218 hang issue */
   8589 		/* e1000_k1_workaround_lpt_lp() */
   8590 
   8591 		if ((sc->sc_type == WM_T_PCH_LPT)
   8592 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8593 			/*
   8594 			 * Set platform power management values for Latency
   8595 			 * Tolerance Reporting (LTR)
   8596 			 */
   8597 			wm_platform_pm_pch_lpt(sc,
   8598 				((sc->sc_mii.mii_media_status & IFM_ACTIVE)
   8599 				    != 0));
   8600 		}
   8601 
   8602 		/* FEXTNVM6 K1-off workaround */
   8603 		if (sc->sc_type == WM_T_PCH_SPT) {
   8604 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
   8605 			if (CSR_READ(sc, WMREG_PCIEANACFG)
   8606 			    & FEXTNVM6_K1_OFF_ENABLE)
   8607 				reg |= FEXTNVM6_K1_OFF_ENABLE;
   8608 			else
   8609 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   8610 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   8611 		}
   8612 	} else if (icr & ICR_RXSEQ) {
   8613 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   8614 			device_xname(sc->sc_dev)));
   8615 	}
   8616 }
   8617 
   8618 /*
   8619  * wm_linkintr_tbi:
   8620  *
   8621  *	Helper; handle link interrupts for TBI mode.
   8622  */
   8623 static void
   8624 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   8625 {
   8626 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8627 	uint32_t status;
   8628 
   8629 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8630 		__func__));
   8631 
   8632 	status = CSR_READ(sc, WMREG_STATUS);
   8633 	if (icr & ICR_LSC) {
   8634 		if (status & STATUS_LU) {
   8635 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8636 			    device_xname(sc->sc_dev),
   8637 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   8638 			/*
   8639 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   8640 			 * so we should update sc->sc_ctrl
   8641 			 */
   8642 
   8643 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   8644 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8645 			sc->sc_fcrtl &= ~FCRTL_XONE;
   8646 			if (status & STATUS_FD)
   8647 				sc->sc_tctl |=
   8648 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8649 			else
   8650 				sc->sc_tctl |=
   8651 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8652 			if (sc->sc_ctrl & CTRL_TFCE)
   8653 				sc->sc_fcrtl |= FCRTL_XONE;
   8654 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8655 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   8656 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   8657 				      sc->sc_fcrtl);
   8658 			sc->sc_tbi_linkup = 1;
   8659 			if_link_state_change(ifp, LINK_STATE_UP);
   8660 		} else {
   8661 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8662 			    device_xname(sc->sc_dev)));
   8663 			sc->sc_tbi_linkup = 0;
   8664 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8665 		}
   8666 		/* Update LED */
   8667 		wm_tbi_serdes_set_linkled(sc);
   8668 	} else if (icr & ICR_RXSEQ) {
   8669 		DPRINTF(WM_DEBUG_LINK,
   8670 		    ("%s: LINK: Receive sequence error\n",
   8671 		    device_xname(sc->sc_dev)));
   8672 	}
   8673 }
   8674 
   8675 /*
   8676  * wm_linkintr_serdes:
   8677  *
   8678  *	Helper; handle link interrupts for TBI mode.
   8679  */
   8680 static void
   8681 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   8682 {
   8683 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8684 	struct mii_data *mii = &sc->sc_mii;
   8685 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8686 	uint32_t pcs_adv, pcs_lpab, reg;
   8687 
   8688 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8689 		__func__));
   8690 
   8691 	if (icr & ICR_LSC) {
   8692 		/* Check PCS */
   8693 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8694 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   8695 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   8696 				device_xname(sc->sc_dev)));
   8697 			mii->mii_media_status |= IFM_ACTIVE;
   8698 			sc->sc_tbi_linkup = 1;
   8699 			if_link_state_change(ifp, LINK_STATE_UP);
   8700 		} else {
   8701 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8702 				device_xname(sc->sc_dev)));
   8703 			mii->mii_media_status |= IFM_NONE;
   8704 			sc->sc_tbi_linkup = 0;
   8705 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8706 			wm_tbi_serdes_set_linkled(sc);
   8707 			return;
   8708 		}
   8709 		mii->mii_media_active |= IFM_1000_SX;
   8710 		if ((reg & PCS_LSTS_FDX) != 0)
   8711 			mii->mii_media_active |= IFM_FDX;
   8712 		else
   8713 			mii->mii_media_active |= IFM_HDX;
   8714 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   8715 			/* Check flow */
   8716 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8717 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   8718 				DPRINTF(WM_DEBUG_LINK,
   8719 				    ("XXX LINKOK but not ACOMP\n"));
   8720 				return;
   8721 			}
   8722 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   8723 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   8724 			DPRINTF(WM_DEBUG_LINK,
   8725 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   8726 			if ((pcs_adv & TXCW_SYM_PAUSE)
   8727 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   8728 				mii->mii_media_active |= IFM_FLOW
   8729 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   8730 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   8731 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8732 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   8733 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8734 				mii->mii_media_active |= IFM_FLOW
   8735 				    | IFM_ETH_TXPAUSE;
   8736 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   8737 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8738 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   8739 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8740 				mii->mii_media_active |= IFM_FLOW
   8741 				    | IFM_ETH_RXPAUSE;
   8742 		}
   8743 		/* Update LED */
   8744 		wm_tbi_serdes_set_linkled(sc);
   8745 	} else {
   8746 		DPRINTF(WM_DEBUG_LINK,
   8747 		    ("%s: LINK: Receive sequence error\n",
   8748 		    device_xname(sc->sc_dev)));
   8749 	}
   8750 }
   8751 
   8752 /*
   8753  * wm_linkintr:
   8754  *
   8755  *	Helper; handle link interrupts.
   8756  */
   8757 static void
   8758 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   8759 {
   8760 
   8761 	KASSERT(WM_CORE_LOCKED(sc));
   8762 
   8763 	if (sc->sc_flags & WM_F_HAS_MII)
   8764 		wm_linkintr_gmii(sc, icr);
   8765 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   8766 	    && (sc->sc_type >= WM_T_82575))
   8767 		wm_linkintr_serdes(sc, icr);
   8768 	else
   8769 		wm_linkintr_tbi(sc, icr);
   8770 }
   8771 
   8772 /*
   8773  * wm_intr_legacy:
   8774  *
   8775  *	Interrupt service routine for INTx and MSI.
   8776  */
   8777 static int
   8778 wm_intr_legacy(void *arg)
   8779 {
   8780 	struct wm_softc *sc = arg;
   8781 	struct wm_queue *wmq = &sc->sc_queue[0];
   8782 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8783 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8784 	uint32_t icr, rndval = 0;
   8785 	int handled = 0;
   8786 
   8787 	while (1 /* CONSTCOND */) {
   8788 		icr = CSR_READ(sc, WMREG_ICR);
   8789 		if ((icr & sc->sc_icr) == 0)
   8790 			break;
   8791 		if (handled == 0) {
   8792 			DPRINTF(WM_DEBUG_TX,
   8793 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   8794 		}
   8795 		if (rndval == 0)
   8796 			rndval = icr;
   8797 
   8798 		mutex_enter(rxq->rxq_lock);
   8799 
   8800 		if (rxq->rxq_stopping) {
   8801 			mutex_exit(rxq->rxq_lock);
   8802 			break;
   8803 		}
   8804 
   8805 		handled = 1;
   8806 
   8807 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8808 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   8809 			DPRINTF(WM_DEBUG_RX,
   8810 			    ("%s: RX: got Rx intr 0x%08x\n",
   8811 			    device_xname(sc->sc_dev),
   8812 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
   8813 			WM_Q_EVCNT_INCR(rxq, rxintr);
   8814 		}
   8815 #endif
   8816 		/*
   8817 		 * wm_rxeof() does *not* call upper layer functions directly,
   8818 		 * as if_percpuq_enqueue() just call softint_schedule().
   8819 		 * So, we can call wm_rxeof() in interrupt context.
   8820 		 */
   8821 		wm_rxeof(rxq, UINT_MAX);
   8822 
   8823 		mutex_exit(rxq->rxq_lock);
   8824 		mutex_enter(txq->txq_lock);
   8825 
   8826 		if (txq->txq_stopping) {
   8827 			mutex_exit(txq->txq_lock);
   8828 			break;
   8829 		}
   8830 
   8831 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8832 		if (icr & ICR_TXDW) {
   8833 			DPRINTF(WM_DEBUG_TX,
   8834 			    ("%s: TX: got TXDW interrupt\n",
   8835 			    device_xname(sc->sc_dev)));
   8836 			WM_Q_EVCNT_INCR(txq, txdw);
   8837 		}
   8838 #endif
   8839 		wm_txeof(sc, txq);
   8840 
   8841 		mutex_exit(txq->txq_lock);
   8842 		WM_CORE_LOCK(sc);
   8843 
   8844 		if (sc->sc_core_stopping) {
   8845 			WM_CORE_UNLOCK(sc);
   8846 			break;
   8847 		}
   8848 
   8849 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   8850 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8851 			wm_linkintr(sc, icr);
   8852 		}
   8853 
   8854 		WM_CORE_UNLOCK(sc);
   8855 
   8856 		if (icr & ICR_RXO) {
   8857 #if defined(WM_DEBUG)
   8858 			log(LOG_WARNING, "%s: Receive overrun\n",
   8859 			    device_xname(sc->sc_dev));
   8860 #endif /* defined(WM_DEBUG) */
   8861 		}
   8862 	}
   8863 
   8864 	rnd_add_uint32(&sc->rnd_source, rndval);
   8865 
   8866 	if (handled) {
   8867 		/* Try to get more packets going. */
   8868 		softint_schedule(wmq->wmq_si);
   8869 	}
   8870 
   8871 	return handled;
   8872 }
   8873 
   8874 static inline void
   8875 wm_txrxintr_disable(struct wm_queue *wmq)
   8876 {
   8877 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8878 
   8879 	if (sc->sc_type == WM_T_82574)
   8880 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8881 	else if (sc->sc_type == WM_T_82575)
   8882 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8883 	else
   8884 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   8885 }
   8886 
   8887 static inline void
   8888 wm_txrxintr_enable(struct wm_queue *wmq)
   8889 {
   8890 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8891 
   8892 	wm_itrs_calculate(sc, wmq);
   8893 
   8894 	if (sc->sc_type == WM_T_82574)
   8895 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8896 	else if (sc->sc_type == WM_T_82575)
   8897 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8898 	else
   8899 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   8900 }
   8901 
   8902 static int
   8903 wm_txrxintr_msix(void *arg)
   8904 {
   8905 	struct wm_queue *wmq = arg;
   8906 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8907 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8908 	struct wm_softc *sc = txq->txq_sc;
   8909 	u_int limit = sc->sc_rx_intr_process_limit;
   8910 
   8911 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   8912 
   8913 	DPRINTF(WM_DEBUG_TX,
   8914 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   8915 
   8916 	wm_txrxintr_disable(wmq);
   8917 
   8918 	mutex_enter(txq->txq_lock);
   8919 
   8920 	if (txq->txq_stopping) {
   8921 		mutex_exit(txq->txq_lock);
   8922 		return 0;
   8923 	}
   8924 
   8925 	WM_Q_EVCNT_INCR(txq, txdw);
   8926 	wm_txeof(sc, txq);
   8927 	/* wm_deferred start() is done in wm_handle_queue(). */
   8928 	mutex_exit(txq->txq_lock);
   8929 
   8930 	DPRINTF(WM_DEBUG_RX,
   8931 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   8932 	mutex_enter(rxq->rxq_lock);
   8933 
   8934 	if (rxq->rxq_stopping) {
   8935 		mutex_exit(rxq->rxq_lock);
   8936 		return 0;
   8937 	}
   8938 
   8939 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8940 	wm_rxeof(rxq, limit);
   8941 	mutex_exit(rxq->rxq_lock);
   8942 
   8943 	wm_itrs_writereg(sc, wmq);
   8944 
   8945 	softint_schedule(wmq->wmq_si);
   8946 
   8947 	return 1;
   8948 }
   8949 
   8950 static void
   8951 wm_handle_queue(void *arg)
   8952 {
   8953 	struct wm_queue *wmq = arg;
   8954 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8955 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8956 	struct wm_softc *sc = txq->txq_sc;
   8957 	u_int limit = sc->sc_rx_process_limit;
   8958 
   8959 	mutex_enter(txq->txq_lock);
   8960 	if (txq->txq_stopping) {
   8961 		mutex_exit(txq->txq_lock);
   8962 		return;
   8963 	}
   8964 	wm_txeof(sc, txq);
   8965 	wm_deferred_start_locked(txq);
   8966 	mutex_exit(txq->txq_lock);
   8967 
   8968 	mutex_enter(rxq->rxq_lock);
   8969 	if (rxq->rxq_stopping) {
   8970 		mutex_exit(rxq->rxq_lock);
   8971 		return;
   8972 	}
   8973 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8974 	wm_rxeof(rxq, limit);
   8975 	mutex_exit(rxq->rxq_lock);
   8976 
   8977 	wm_txrxintr_enable(wmq);
   8978 }
   8979 
   8980 /*
   8981  * wm_linkintr_msix:
   8982  *
   8983  *	Interrupt service routine for link status change for MSI-X.
   8984  */
   8985 static int
   8986 wm_linkintr_msix(void *arg)
   8987 {
   8988 	struct wm_softc *sc = arg;
   8989 	uint32_t reg;
   8990 
   8991 	DPRINTF(WM_DEBUG_LINK,
   8992 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   8993 
   8994 	reg = CSR_READ(sc, WMREG_ICR);
   8995 	WM_CORE_LOCK(sc);
   8996 	if ((sc->sc_core_stopping) || ((reg & ICR_LSC) == 0))
   8997 		goto out;
   8998 
   8999 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9000 	wm_linkintr(sc, ICR_LSC);
   9001 
   9002 out:
   9003 	WM_CORE_UNLOCK(sc);
   9004 
   9005 	if (sc->sc_type == WM_T_82574)
   9006 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   9007 	else if (sc->sc_type == WM_T_82575)
   9008 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   9009 	else
   9010 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   9011 
   9012 	return 1;
   9013 }
   9014 
   9015 /*
   9016  * Media related.
   9017  * GMII, SGMII, TBI (and SERDES)
   9018  */
   9019 
   9020 /* Common */
   9021 
   9022 /*
   9023  * wm_tbi_serdes_set_linkled:
   9024  *
   9025  *	Update the link LED on TBI and SERDES devices.
   9026  */
   9027 static void
   9028 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   9029 {
   9030 
   9031 	if (sc->sc_tbi_linkup)
   9032 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   9033 	else
   9034 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   9035 
   9036 	/* 82540 or newer devices are active low */
   9037 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   9038 
   9039 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9040 }
   9041 
   9042 /* GMII related */
   9043 
   9044 /*
   9045  * wm_gmii_reset:
   9046  *
   9047  *	Reset the PHY.
   9048  */
   9049 static void
   9050 wm_gmii_reset(struct wm_softc *sc)
   9051 {
   9052 	uint32_t reg;
   9053 	int rv;
   9054 
   9055 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9056 		device_xname(sc->sc_dev), __func__));
   9057 
   9058 	rv = sc->phy.acquire(sc);
   9059 	if (rv != 0) {
   9060 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9061 		    __func__);
   9062 		return;
   9063 	}
   9064 
   9065 	switch (sc->sc_type) {
   9066 	case WM_T_82542_2_0:
   9067 	case WM_T_82542_2_1:
   9068 		/* null */
   9069 		break;
   9070 	case WM_T_82543:
   9071 		/*
   9072 		 * With 82543, we need to force speed and duplex on the MAC
   9073 		 * equal to what the PHY speed and duplex configuration is.
   9074 		 * In addition, we need to perform a hardware reset on the PHY
   9075 		 * to take it out of reset.
   9076 		 */
   9077 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9078 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9079 
   9080 		/* The PHY reset pin is active-low. */
   9081 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9082 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   9083 		    CTRL_EXT_SWDPIN(4));
   9084 		reg |= CTRL_EXT_SWDPIO(4);
   9085 
   9086 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9087 		CSR_WRITE_FLUSH(sc);
   9088 		delay(10*1000);
   9089 
   9090 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   9091 		CSR_WRITE_FLUSH(sc);
   9092 		delay(150);
   9093 #if 0
   9094 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   9095 #endif
   9096 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   9097 		break;
   9098 	case WM_T_82544:	/* reset 10000us */
   9099 	case WM_T_82540:
   9100 	case WM_T_82545:
   9101 	case WM_T_82545_3:
   9102 	case WM_T_82546:
   9103 	case WM_T_82546_3:
   9104 	case WM_T_82541:
   9105 	case WM_T_82541_2:
   9106 	case WM_T_82547:
   9107 	case WM_T_82547_2:
   9108 	case WM_T_82571:	/* reset 100us */
   9109 	case WM_T_82572:
   9110 	case WM_T_82573:
   9111 	case WM_T_82574:
   9112 	case WM_T_82575:
   9113 	case WM_T_82576:
   9114 	case WM_T_82580:
   9115 	case WM_T_I350:
   9116 	case WM_T_I354:
   9117 	case WM_T_I210:
   9118 	case WM_T_I211:
   9119 	case WM_T_82583:
   9120 	case WM_T_80003:
   9121 		/* generic reset */
   9122 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9123 		CSR_WRITE_FLUSH(sc);
   9124 		delay(20000);
   9125 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9126 		CSR_WRITE_FLUSH(sc);
   9127 		delay(20000);
   9128 
   9129 		if ((sc->sc_type == WM_T_82541)
   9130 		    || (sc->sc_type == WM_T_82541_2)
   9131 		    || (sc->sc_type == WM_T_82547)
   9132 		    || (sc->sc_type == WM_T_82547_2)) {
   9133 			/* workaround for igp are done in igp_reset() */
   9134 			/* XXX add code to set LED after phy reset */
   9135 		}
   9136 		break;
   9137 	case WM_T_ICH8:
   9138 	case WM_T_ICH9:
   9139 	case WM_T_ICH10:
   9140 	case WM_T_PCH:
   9141 	case WM_T_PCH2:
   9142 	case WM_T_PCH_LPT:
   9143 	case WM_T_PCH_SPT:
   9144 		/* generic reset */
   9145 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9146 		CSR_WRITE_FLUSH(sc);
   9147 		delay(100);
   9148 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9149 		CSR_WRITE_FLUSH(sc);
   9150 		delay(150);
   9151 		break;
   9152 	default:
   9153 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   9154 		    __func__);
   9155 		break;
   9156 	}
   9157 
   9158 	sc->phy.release(sc);
   9159 
   9160 	/* get_cfg_done */
   9161 	wm_get_cfg_done(sc);
   9162 
   9163 	/* extra setup */
   9164 	switch (sc->sc_type) {
   9165 	case WM_T_82542_2_0:
   9166 	case WM_T_82542_2_1:
   9167 	case WM_T_82543:
   9168 	case WM_T_82544:
   9169 	case WM_T_82540:
   9170 	case WM_T_82545:
   9171 	case WM_T_82545_3:
   9172 	case WM_T_82546:
   9173 	case WM_T_82546_3:
   9174 	case WM_T_82541_2:
   9175 	case WM_T_82547_2:
   9176 	case WM_T_82571:
   9177 	case WM_T_82572:
   9178 	case WM_T_82573:
   9179 	case WM_T_82574:
   9180 	case WM_T_82583:
   9181 	case WM_T_82575:
   9182 	case WM_T_82576:
   9183 	case WM_T_82580:
   9184 	case WM_T_I350:
   9185 	case WM_T_I354:
   9186 	case WM_T_I210:
   9187 	case WM_T_I211:
   9188 	case WM_T_80003:
   9189 		/* null */
   9190 		break;
   9191 	case WM_T_82541:
   9192 	case WM_T_82547:
   9193 		/* XXX Configure actively LED after PHY reset */
   9194 		break;
   9195 	case WM_T_ICH8:
   9196 	case WM_T_ICH9:
   9197 	case WM_T_ICH10:
   9198 	case WM_T_PCH:
   9199 	case WM_T_PCH2:
   9200 	case WM_T_PCH_LPT:
   9201 	case WM_T_PCH_SPT:
   9202 		wm_phy_post_reset(sc);
   9203 		break;
   9204 	default:
   9205 		panic("%s: unknown type\n", __func__);
   9206 		break;
   9207 	}
   9208 }
   9209 
   9210 /*
   9211  * Setup sc_phytype and mii_{read|write}reg.
   9212  *
   9213  *  To identify PHY type, correct read/write function should be selected.
   9214  * To select correct read/write function, PCI ID or MAC type are required
   9215  * without accessing PHY registers.
   9216  *
   9217  *  On the first call of this function, PHY ID is not known yet. Check
   9218  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   9219  * result might be incorrect.
   9220  *
   9221  *  In the second call, PHY OUI and model is used to identify PHY type.
   9222  * It might not be perfpect because of the lack of compared entry, but it
   9223  * would be better than the first call.
   9224  *
   9225  *  If the detected new result and previous assumption is different,
   9226  * diagnous message will be printed.
   9227  */
   9228 static void
   9229 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   9230     uint16_t phy_model)
   9231 {
   9232 	device_t dev = sc->sc_dev;
   9233 	struct mii_data *mii = &sc->sc_mii;
   9234 	uint16_t new_phytype = WMPHY_UNKNOWN;
   9235 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   9236 	mii_readreg_t new_readreg;
   9237 	mii_writereg_t new_writereg;
   9238 
   9239 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9240 		device_xname(sc->sc_dev), __func__));
   9241 
   9242 	if (mii->mii_readreg == NULL) {
   9243 		/*
   9244 		 *  This is the first call of this function. For ICH and PCH
   9245 		 * variants, it's difficult to determine the PHY access method
   9246 		 * by sc_type, so use the PCI product ID for some devices.
   9247 		 */
   9248 
   9249 		switch (sc->sc_pcidevid) {
   9250 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   9251 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   9252 			/* 82577 */
   9253 			new_phytype = WMPHY_82577;
   9254 			break;
   9255 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   9256 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   9257 			/* 82578 */
   9258 			new_phytype = WMPHY_82578;
   9259 			break;
   9260 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   9261 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   9262 			/* 82579 */
   9263 			new_phytype = WMPHY_82579;
   9264 			break;
   9265 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   9266 		case PCI_PRODUCT_INTEL_82801I_BM:
   9267 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   9268 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   9269 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   9270 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   9271 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   9272 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   9273 			/* ICH8, 9, 10 with 82567 */
   9274 			new_phytype = WMPHY_BM;
   9275 			break;
   9276 		default:
   9277 			break;
   9278 		}
   9279 	} else {
   9280 		/* It's not the first call. Use PHY OUI and model */
   9281 		switch (phy_oui) {
   9282 		case MII_OUI_ATHEROS: /* XXX ??? */
   9283 			switch (phy_model) {
   9284 			case 0x0004: /* XXX */
   9285 				new_phytype = WMPHY_82578;
   9286 				break;
   9287 			default:
   9288 				break;
   9289 			}
   9290 			break;
   9291 		case MII_OUI_xxMARVELL:
   9292 			switch (phy_model) {
   9293 			case MII_MODEL_xxMARVELL_I210:
   9294 				new_phytype = WMPHY_I210;
   9295 				break;
   9296 			case MII_MODEL_xxMARVELL_E1011:
   9297 			case MII_MODEL_xxMARVELL_E1000_3:
   9298 			case MII_MODEL_xxMARVELL_E1000_5:
   9299 			case MII_MODEL_xxMARVELL_E1112:
   9300 				new_phytype = WMPHY_M88;
   9301 				break;
   9302 			case MII_MODEL_xxMARVELL_E1149:
   9303 				new_phytype = WMPHY_BM;
   9304 				break;
   9305 			case MII_MODEL_xxMARVELL_E1111:
   9306 			case MII_MODEL_xxMARVELL_I347:
   9307 			case MII_MODEL_xxMARVELL_E1512:
   9308 			case MII_MODEL_xxMARVELL_E1340M:
   9309 			case MII_MODEL_xxMARVELL_E1543:
   9310 				new_phytype = WMPHY_M88;
   9311 				break;
   9312 			case MII_MODEL_xxMARVELL_I82563:
   9313 				new_phytype = WMPHY_GG82563;
   9314 				break;
   9315 			default:
   9316 				break;
   9317 			}
   9318 			break;
   9319 		case MII_OUI_INTEL:
   9320 			switch (phy_model) {
   9321 			case MII_MODEL_INTEL_I82577:
   9322 				new_phytype = WMPHY_82577;
   9323 				break;
   9324 			case MII_MODEL_INTEL_I82579:
   9325 				new_phytype = WMPHY_82579;
   9326 				break;
   9327 			case MII_MODEL_INTEL_I217:
   9328 				new_phytype = WMPHY_I217;
   9329 				break;
   9330 			case MII_MODEL_INTEL_I82580:
   9331 			case MII_MODEL_INTEL_I350:
   9332 				new_phytype = WMPHY_82580;
   9333 				break;
   9334 			default:
   9335 				break;
   9336 			}
   9337 			break;
   9338 		case MII_OUI_yyINTEL:
   9339 			switch (phy_model) {
   9340 			case MII_MODEL_yyINTEL_I82562G:
   9341 			case MII_MODEL_yyINTEL_I82562EM:
   9342 			case MII_MODEL_yyINTEL_I82562ET:
   9343 				new_phytype = WMPHY_IFE;
   9344 				break;
   9345 			case MII_MODEL_yyINTEL_IGP01E1000:
   9346 				new_phytype = WMPHY_IGP;
   9347 				break;
   9348 			case MII_MODEL_yyINTEL_I82566:
   9349 				new_phytype = WMPHY_IGP_3;
   9350 				break;
   9351 			default:
   9352 				break;
   9353 			}
   9354 			break;
   9355 		default:
   9356 			break;
   9357 		}
   9358 		if (new_phytype == WMPHY_UNKNOWN)
   9359 			aprint_verbose_dev(dev, "%s: unknown PHY model\n",
   9360 			    __func__);
   9361 
   9362 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9363 		    && (sc->sc_phytype != new_phytype )) {
   9364 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9365 			    "was incorrect. PHY type from PHY ID = %u\n",
   9366 			    sc->sc_phytype, new_phytype);
   9367 		}
   9368 	}
   9369 
   9370 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   9371 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   9372 		/* SGMII */
   9373 		new_readreg = wm_sgmii_readreg;
   9374 		new_writereg = wm_sgmii_writereg;
   9375 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   9376 		/* BM2 (phyaddr == 1) */
   9377 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9378 		    && (new_phytype != WMPHY_BM)
   9379 		    && (new_phytype != WMPHY_UNKNOWN))
   9380 			doubt_phytype = new_phytype;
   9381 		new_phytype = WMPHY_BM;
   9382 		new_readreg = wm_gmii_bm_readreg;
   9383 		new_writereg = wm_gmii_bm_writereg;
   9384 	} else if (sc->sc_type >= WM_T_PCH) {
   9385 		/* All PCH* use _hv_ */
   9386 		new_readreg = wm_gmii_hv_readreg;
   9387 		new_writereg = wm_gmii_hv_writereg;
   9388 	} else if (sc->sc_type >= WM_T_ICH8) {
   9389 		/* non-82567 ICH8, 9 and 10 */
   9390 		new_readreg = wm_gmii_i82544_readreg;
   9391 		new_writereg = wm_gmii_i82544_writereg;
   9392 	} else if (sc->sc_type >= WM_T_80003) {
   9393 		/* 80003 */
   9394 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9395 		    && (new_phytype != WMPHY_GG82563)
   9396 		    && (new_phytype != WMPHY_UNKNOWN))
   9397 			doubt_phytype = new_phytype;
   9398 		new_phytype = WMPHY_GG82563;
   9399 		new_readreg = wm_gmii_i80003_readreg;
   9400 		new_writereg = wm_gmii_i80003_writereg;
   9401 	} else if (sc->sc_type >= WM_T_I210) {
   9402 		/* I210 and I211 */
   9403 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9404 		    && (new_phytype != WMPHY_I210)
   9405 		    && (new_phytype != WMPHY_UNKNOWN))
   9406 			doubt_phytype = new_phytype;
   9407 		new_phytype = WMPHY_I210;
   9408 		new_readreg = wm_gmii_gs40g_readreg;
   9409 		new_writereg = wm_gmii_gs40g_writereg;
   9410 	} else if (sc->sc_type >= WM_T_82580) {
   9411 		/* 82580, I350 and I354 */
   9412 		new_readreg = wm_gmii_82580_readreg;
   9413 		new_writereg = wm_gmii_82580_writereg;
   9414 	} else if (sc->sc_type >= WM_T_82544) {
   9415 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   9416 		new_readreg = wm_gmii_i82544_readreg;
   9417 		new_writereg = wm_gmii_i82544_writereg;
   9418 	} else {
   9419 		new_readreg = wm_gmii_i82543_readreg;
   9420 		new_writereg = wm_gmii_i82543_writereg;
   9421 	}
   9422 
   9423 	if (new_phytype == WMPHY_BM) {
   9424 		/* All BM use _bm_ */
   9425 		new_readreg = wm_gmii_bm_readreg;
   9426 		new_writereg = wm_gmii_bm_writereg;
   9427 	}
   9428 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
   9429 		/* All PCH* use _hv_ */
   9430 		new_readreg = wm_gmii_hv_readreg;
   9431 		new_writereg = wm_gmii_hv_writereg;
   9432 	}
   9433 
   9434 	/* Diag output */
   9435 	if (doubt_phytype != WMPHY_UNKNOWN)
   9436 		aprint_error_dev(dev, "Assumed new PHY type was "
   9437 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   9438 		    new_phytype);
   9439 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9440 	    && (sc->sc_phytype != new_phytype ))
   9441 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9442 		    "was incorrect. New PHY type = %u\n",
   9443 		    sc->sc_phytype, new_phytype);
   9444 
   9445 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   9446 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   9447 
   9448 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   9449 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   9450 		    "function was incorrect.\n");
   9451 
   9452 	/* Update now */
   9453 	sc->sc_phytype = new_phytype;
   9454 	mii->mii_readreg = new_readreg;
   9455 	mii->mii_writereg = new_writereg;
   9456 }
   9457 
   9458 /*
   9459  * wm_get_phy_id_82575:
   9460  *
   9461  * Return PHY ID. Return -1 if it failed.
   9462  */
   9463 static int
   9464 wm_get_phy_id_82575(struct wm_softc *sc)
   9465 {
   9466 	uint32_t reg;
   9467 	int phyid = -1;
   9468 
   9469 	/* XXX */
   9470 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   9471 		return -1;
   9472 
   9473 	if (wm_sgmii_uses_mdio(sc)) {
   9474 		switch (sc->sc_type) {
   9475 		case WM_T_82575:
   9476 		case WM_T_82576:
   9477 			reg = CSR_READ(sc, WMREG_MDIC);
   9478 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   9479 			break;
   9480 		case WM_T_82580:
   9481 		case WM_T_I350:
   9482 		case WM_T_I354:
   9483 		case WM_T_I210:
   9484 		case WM_T_I211:
   9485 			reg = CSR_READ(sc, WMREG_MDICNFG);
   9486 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   9487 			break;
   9488 		default:
   9489 			return -1;
   9490 		}
   9491 	}
   9492 
   9493 	return phyid;
   9494 }
   9495 
   9496 
   9497 /*
   9498  * wm_gmii_mediainit:
   9499  *
   9500  *	Initialize media for use on 1000BASE-T devices.
   9501  */
   9502 static void
   9503 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   9504 {
   9505 	device_t dev = sc->sc_dev;
   9506 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9507 	struct mii_data *mii = &sc->sc_mii;
   9508 	uint32_t reg;
   9509 
   9510 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9511 		device_xname(sc->sc_dev), __func__));
   9512 
   9513 	/* We have GMII. */
   9514 	sc->sc_flags |= WM_F_HAS_MII;
   9515 
   9516 	if (sc->sc_type == WM_T_80003)
   9517 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9518 	else
   9519 		sc->sc_tipg = TIPG_1000T_DFLT;
   9520 
   9521 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   9522 	if ((sc->sc_type == WM_T_82580)
   9523 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   9524 	    || (sc->sc_type == WM_T_I211)) {
   9525 		reg = CSR_READ(sc, WMREG_PHPM);
   9526 		reg &= ~PHPM_GO_LINK_D;
   9527 		CSR_WRITE(sc, WMREG_PHPM, reg);
   9528 	}
   9529 
   9530 	/*
   9531 	 * Let the chip set speed/duplex on its own based on
   9532 	 * signals from the PHY.
   9533 	 * XXXbouyer - I'm not sure this is right for the 80003,
   9534 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   9535 	 */
   9536 	sc->sc_ctrl |= CTRL_SLU;
   9537 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9538 
   9539 	/* Initialize our media structures and probe the GMII. */
   9540 	mii->mii_ifp = ifp;
   9541 
   9542 	mii->mii_statchg = wm_gmii_statchg;
   9543 
   9544 	/* get PHY control from SMBus to PCIe */
   9545 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   9546 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
   9547 		wm_smbustopci(sc);
   9548 
   9549 	wm_gmii_reset(sc);
   9550 
   9551 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9552 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   9553 	    wm_gmii_mediastatus);
   9554 
   9555 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   9556 	    || (sc->sc_type == WM_T_82580)
   9557 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   9558 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   9559 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   9560 			/* Attach only one port */
   9561 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   9562 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9563 		} else {
   9564 			int i, id;
   9565 			uint32_t ctrl_ext;
   9566 
   9567 			id = wm_get_phy_id_82575(sc);
   9568 			if (id != -1) {
   9569 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   9570 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   9571 			}
   9572 			if ((id == -1)
   9573 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9574 				/* Power on sgmii phy if it is disabled */
   9575 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9576 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   9577 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   9578 				CSR_WRITE_FLUSH(sc);
   9579 				delay(300*1000); /* XXX too long */
   9580 
   9581 				/* from 1 to 8 */
   9582 				for (i = 1; i < 8; i++)
   9583 					mii_attach(sc->sc_dev, &sc->sc_mii,
   9584 					    0xffffffff, i, MII_OFFSET_ANY,
   9585 					    MIIF_DOPAUSE);
   9586 
   9587 				/* restore previous sfp cage power state */
   9588 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9589 			}
   9590 		}
   9591 	} else {
   9592 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9593 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9594 	}
   9595 
   9596 	/*
   9597 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   9598 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   9599 	 */
   9600 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   9601 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9602 		wm_set_mdio_slow_mode_hv(sc);
   9603 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9604 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9605 	}
   9606 
   9607 	/*
   9608 	 * (For ICH8 variants)
   9609 	 * If PHY detection failed, use BM's r/w function and retry.
   9610 	 */
   9611 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9612 		/* if failed, retry with *_bm_* */
   9613 		aprint_verbose_dev(dev, "Assumed PHY access function "
   9614 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   9615 		    sc->sc_phytype);
   9616 		sc->sc_phytype = WMPHY_BM;
   9617 		mii->mii_readreg = wm_gmii_bm_readreg;
   9618 		mii->mii_writereg = wm_gmii_bm_writereg;
   9619 
   9620 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9621 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9622 	}
   9623 
   9624 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9625 		/* Any PHY wasn't find */
   9626 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   9627 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   9628 		sc->sc_phytype = WMPHY_NONE;
   9629 	} else {
   9630 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   9631 
   9632 		/*
   9633 		 * PHY Found! Check PHY type again by the second call of
   9634 		 * wm_gmii_setup_phytype.
   9635 		 */
   9636 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   9637 		    child->mii_mpd_model);
   9638 
   9639 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   9640 	}
   9641 }
   9642 
   9643 /*
   9644  * wm_gmii_mediachange:	[ifmedia interface function]
   9645  *
   9646  *	Set hardware to newly-selected media on a 1000BASE-T device.
   9647  */
   9648 static int
   9649 wm_gmii_mediachange(struct ifnet *ifp)
   9650 {
   9651 	struct wm_softc *sc = ifp->if_softc;
   9652 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9653 	int rc;
   9654 
   9655 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9656 		device_xname(sc->sc_dev), __func__));
   9657 	if ((ifp->if_flags & IFF_UP) == 0)
   9658 		return 0;
   9659 
   9660 	/* Disable D0 LPLU. */
   9661 	wm_lplu_d0_disable(sc);
   9662 
   9663 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9664 	sc->sc_ctrl |= CTRL_SLU;
   9665 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9666 	    || (sc->sc_type > WM_T_82543)) {
   9667 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   9668 	} else {
   9669 		sc->sc_ctrl &= ~CTRL_ASDE;
   9670 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9671 		if (ife->ifm_media & IFM_FDX)
   9672 			sc->sc_ctrl |= CTRL_FD;
   9673 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   9674 		case IFM_10_T:
   9675 			sc->sc_ctrl |= CTRL_SPEED_10;
   9676 			break;
   9677 		case IFM_100_TX:
   9678 			sc->sc_ctrl |= CTRL_SPEED_100;
   9679 			break;
   9680 		case IFM_1000_T:
   9681 			sc->sc_ctrl |= CTRL_SPEED_1000;
   9682 			break;
   9683 		default:
   9684 			panic("wm_gmii_mediachange: bad media 0x%x",
   9685 			    ife->ifm_media);
   9686 		}
   9687 	}
   9688 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9689 	CSR_WRITE_FLUSH(sc);
   9690 	if (sc->sc_type <= WM_T_82543)
   9691 		wm_gmii_reset(sc);
   9692 
   9693 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   9694 		return 0;
   9695 	return rc;
   9696 }
   9697 
   9698 /*
   9699  * wm_gmii_mediastatus:	[ifmedia interface function]
   9700  *
   9701  *	Get the current interface media status on a 1000BASE-T device.
   9702  */
   9703 static void
   9704 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9705 {
   9706 	struct wm_softc *sc = ifp->if_softc;
   9707 
   9708 	ether_mediastatus(ifp, ifmr);
   9709 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9710 	    | sc->sc_flowflags;
   9711 }
   9712 
   9713 #define	MDI_IO		CTRL_SWDPIN(2)
   9714 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   9715 #define	MDI_CLK		CTRL_SWDPIN(3)
   9716 
   9717 static void
   9718 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   9719 {
   9720 	uint32_t i, v;
   9721 
   9722 	v = CSR_READ(sc, WMREG_CTRL);
   9723 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9724 	v |= MDI_DIR | CTRL_SWDPIO(3);
   9725 
   9726 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   9727 		if (data & i)
   9728 			v |= MDI_IO;
   9729 		else
   9730 			v &= ~MDI_IO;
   9731 		CSR_WRITE(sc, WMREG_CTRL, v);
   9732 		CSR_WRITE_FLUSH(sc);
   9733 		delay(10);
   9734 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9735 		CSR_WRITE_FLUSH(sc);
   9736 		delay(10);
   9737 		CSR_WRITE(sc, WMREG_CTRL, v);
   9738 		CSR_WRITE_FLUSH(sc);
   9739 		delay(10);
   9740 	}
   9741 }
   9742 
   9743 static uint32_t
   9744 wm_i82543_mii_recvbits(struct wm_softc *sc)
   9745 {
   9746 	uint32_t v, i, data = 0;
   9747 
   9748 	v = CSR_READ(sc, WMREG_CTRL);
   9749 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9750 	v |= CTRL_SWDPIO(3);
   9751 
   9752 	CSR_WRITE(sc, WMREG_CTRL, v);
   9753 	CSR_WRITE_FLUSH(sc);
   9754 	delay(10);
   9755 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9756 	CSR_WRITE_FLUSH(sc);
   9757 	delay(10);
   9758 	CSR_WRITE(sc, WMREG_CTRL, v);
   9759 	CSR_WRITE_FLUSH(sc);
   9760 	delay(10);
   9761 
   9762 	for (i = 0; i < 16; i++) {
   9763 		data <<= 1;
   9764 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9765 		CSR_WRITE_FLUSH(sc);
   9766 		delay(10);
   9767 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   9768 			data |= 1;
   9769 		CSR_WRITE(sc, WMREG_CTRL, v);
   9770 		CSR_WRITE_FLUSH(sc);
   9771 		delay(10);
   9772 	}
   9773 
   9774 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9775 	CSR_WRITE_FLUSH(sc);
   9776 	delay(10);
   9777 	CSR_WRITE(sc, WMREG_CTRL, v);
   9778 	CSR_WRITE_FLUSH(sc);
   9779 	delay(10);
   9780 
   9781 	return data;
   9782 }
   9783 
   9784 #undef MDI_IO
   9785 #undef MDI_DIR
   9786 #undef MDI_CLK
   9787 
   9788 /*
   9789  * wm_gmii_i82543_readreg:	[mii interface function]
   9790  *
   9791  *	Read a PHY register on the GMII (i82543 version).
   9792  */
   9793 static int
   9794 wm_gmii_i82543_readreg(device_t dev, int phy, int reg)
   9795 {
   9796 	struct wm_softc *sc = device_private(dev);
   9797 	int rv;
   9798 
   9799 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9800 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   9801 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   9802 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   9803 
   9804 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   9805 	    device_xname(dev), phy, reg, rv));
   9806 
   9807 	return rv;
   9808 }
   9809 
   9810 /*
   9811  * wm_gmii_i82543_writereg:	[mii interface function]
   9812  *
   9813  *	Write a PHY register on the GMII (i82543 version).
   9814  */
   9815 static void
   9816 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, int val)
   9817 {
   9818 	struct wm_softc *sc = device_private(dev);
   9819 
   9820 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9821 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   9822 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   9823 	    (MII_COMMAND_START << 30), 32);
   9824 }
   9825 
   9826 /*
   9827  * wm_gmii_mdic_readreg:	[mii interface function]
   9828  *
   9829  *	Read a PHY register on the GMII.
   9830  */
   9831 static int
   9832 wm_gmii_mdic_readreg(device_t dev, int phy, int reg)
   9833 {
   9834 	struct wm_softc *sc = device_private(dev);
   9835 	uint32_t mdic = 0;
   9836 	int i, rv;
   9837 
   9838 	if (reg > MII_ADDRMASK) {
   9839 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   9840 		    __func__, sc->sc_phytype, reg);
   9841 		reg &= MII_ADDRMASK;
   9842 	}
   9843 
   9844 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   9845 	    MDIC_REGADD(reg));
   9846 
   9847 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9848 		mdic = CSR_READ(sc, WMREG_MDIC);
   9849 		if (mdic & MDIC_READY)
   9850 			break;
   9851 		delay(50);
   9852 	}
   9853 
   9854 	if ((mdic & MDIC_READY) == 0) {
   9855 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   9856 		    device_xname(dev), phy, reg);
   9857 		rv = 0;
   9858 	} else if (mdic & MDIC_E) {
   9859 #if 0 /* This is normal if no PHY is present. */
   9860 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   9861 		    device_xname(dev), phy, reg);
   9862 #endif
   9863 		rv = 0;
   9864 	} else {
   9865 		rv = MDIC_DATA(mdic);
   9866 		if (rv == 0xffff)
   9867 			rv = 0;
   9868 	}
   9869 
   9870 	return rv;
   9871 }
   9872 
   9873 /*
   9874  * wm_gmii_mdic_writereg:	[mii interface function]
   9875  *
   9876  *	Write a PHY register on the GMII.
   9877  */
   9878 static void
   9879 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, int val)
   9880 {
   9881 	struct wm_softc *sc = device_private(dev);
   9882 	uint32_t mdic = 0;
   9883 	int i;
   9884 
   9885 	if (reg > MII_ADDRMASK) {
   9886 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   9887 		    __func__, sc->sc_phytype, reg);
   9888 		reg &= MII_ADDRMASK;
   9889 	}
   9890 
   9891 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   9892 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   9893 
   9894 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9895 		mdic = CSR_READ(sc, WMREG_MDIC);
   9896 		if (mdic & MDIC_READY)
   9897 			break;
   9898 		delay(50);
   9899 	}
   9900 
   9901 	if ((mdic & MDIC_READY) == 0)
   9902 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   9903 		    device_xname(dev), phy, reg);
   9904 	else if (mdic & MDIC_E)
   9905 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   9906 		    device_xname(dev), phy, reg);
   9907 }
   9908 
   9909 /*
   9910  * wm_gmii_i82544_readreg:	[mii interface function]
   9911  *
   9912  *	Read a PHY register on the GMII.
   9913  */
   9914 static int
   9915 wm_gmii_i82544_readreg(device_t dev, int phy, int reg)
   9916 {
   9917 	struct wm_softc *sc = device_private(dev);
   9918 	int rv;
   9919 
   9920 	if (sc->phy.acquire(sc)) {
   9921 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   9922 		return 0;
   9923 	}
   9924 
   9925 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9926 		switch (sc->sc_phytype) {
   9927 		case WMPHY_IGP:
   9928 		case WMPHY_IGP_2:
   9929 		case WMPHY_IGP_3:
   9930 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT, reg);
   9931 			break;
   9932 		default:
   9933 #ifdef WM_DEBUG
   9934 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   9935 			    __func__, sc->sc_phytype, reg);
   9936 #endif
   9937 			break;
   9938 		}
   9939 	}
   9940 
   9941 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   9942 	sc->phy.release(sc);
   9943 
   9944 	return rv;
   9945 }
   9946 
   9947 /*
   9948  * wm_gmii_i82544_writereg:	[mii interface function]
   9949  *
   9950  *	Write a PHY register on the GMII.
   9951  */
   9952 static void
   9953 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, int val)
   9954 {
   9955 	struct wm_softc *sc = device_private(dev);
   9956 
   9957 	if (sc->phy.acquire(sc)) {
   9958 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   9959 		return;
   9960 	}
   9961 
   9962 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9963 		switch (sc->sc_phytype) {
   9964 		case WMPHY_IGP:
   9965 		case WMPHY_IGP_2:
   9966 		case WMPHY_IGP_3:
   9967 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT, reg);
   9968 			break;
   9969 		default:
   9970 #ifdef WM_DEBUG
   9971 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   9972 			    __func__, sc->sc_phytype, reg);
   9973 #endif
   9974 			break;
   9975 		}
   9976 	}
   9977 
   9978 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   9979 	sc->phy.release(sc);
   9980 }
   9981 
   9982 /*
   9983  * wm_gmii_i80003_readreg:	[mii interface function]
   9984  *
   9985  *	Read a PHY register on the kumeran
   9986  * This could be handled by the PHY layer if we didn't have to lock the
   9987  * ressource ...
   9988  */
   9989 static int
   9990 wm_gmii_i80003_readreg(device_t dev, int phy, int reg)
   9991 {
   9992 	struct wm_softc *sc = device_private(dev);
   9993 	int page_select, temp;
   9994 	int rv;
   9995 
   9996 	if (phy != 1) /* only one PHY on kumeran bus */
   9997 		return 0;
   9998 
   9999 	if (sc->phy.acquire(sc)) {
   10000 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10001 		return 0;
   10002 	}
   10003 
   10004 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10005 		page_select = GG82563_PHY_PAGE_SELECT;
   10006 	else {
   10007 		/*
   10008 		 * Use Alternative Page Select register to access registers
   10009 		 * 30 and 31.
   10010 		 */
   10011 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10012 	}
   10013 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10014 	wm_gmii_mdic_writereg(dev, phy, page_select, temp);
   10015 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10016 		/*
   10017 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10018 		 * register.
   10019 		 */
   10020 		delay(200);
   10021 		if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) {
   10022 			device_printf(dev, "%s failed\n", __func__);
   10023 			rv = 0; /* XXX */
   10024 			goto out;
   10025 		}
   10026 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10027 		delay(200);
   10028 	} else
   10029 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10030 
   10031 out:
   10032 	sc->phy.release(sc);
   10033 	return rv;
   10034 }
   10035 
   10036 /*
   10037  * wm_gmii_i80003_writereg:	[mii interface function]
   10038  *
   10039  *	Write a PHY register on the kumeran.
   10040  * This could be handled by the PHY layer if we didn't have to lock the
   10041  * ressource ...
   10042  */
   10043 static void
   10044 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, int val)
   10045 {
   10046 	struct wm_softc *sc = device_private(dev);
   10047 	int page_select, temp;
   10048 
   10049 	if (phy != 1) /* only one PHY on kumeran bus */
   10050 		return;
   10051 
   10052 	if (sc->phy.acquire(sc)) {
   10053 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10054 		return;
   10055 	}
   10056 
   10057 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10058 		page_select = GG82563_PHY_PAGE_SELECT;
   10059 	else {
   10060 		/*
   10061 		 * Use Alternative Page Select register to access registers
   10062 		 * 30 and 31.
   10063 		 */
   10064 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10065 	}
   10066 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10067 	wm_gmii_mdic_writereg(dev, phy, page_select, temp);
   10068 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10069 		/*
   10070 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10071 		 * register.
   10072 		 */
   10073 		delay(200);
   10074 		if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) {
   10075 			device_printf(dev, "%s failed\n", __func__);
   10076 			goto out;
   10077 		}
   10078 		wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10079 		delay(200);
   10080 	} else
   10081 		wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10082 
   10083 out:
   10084 	sc->phy.release(sc);
   10085 }
   10086 
   10087 /*
   10088  * wm_gmii_bm_readreg:	[mii interface function]
   10089  *
   10090  *	Read a PHY register on the kumeran
   10091  * This could be handled by the PHY layer if we didn't have to lock the
   10092  * ressource ...
   10093  */
   10094 static int
   10095 wm_gmii_bm_readreg(device_t dev, int phy, int reg)
   10096 {
   10097 	struct wm_softc *sc = device_private(dev);
   10098 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10099 	uint16_t val;
   10100 	int rv;
   10101 
   10102 	if (sc->phy.acquire(sc)) {
   10103 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10104 		return 0;
   10105 	}
   10106 
   10107 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10108 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10109 		    || (reg == 31)) ? 1 : phy;
   10110 	/* Page 800 works differently than the rest so it has its own func */
   10111 	if (page == BM_WUC_PAGE) {
   10112 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
   10113 		rv = val;
   10114 		goto release;
   10115 	}
   10116 
   10117 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10118 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10119 		    && (sc->sc_type != WM_T_82583))
   10120 			wm_gmii_mdic_writereg(dev, phy,
   10121 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10122 		else
   10123 			wm_gmii_mdic_writereg(dev, phy,
   10124 			    BME1000_PHY_PAGE_SELECT, page);
   10125 	}
   10126 
   10127 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10128 
   10129 release:
   10130 	sc->phy.release(sc);
   10131 	return rv;
   10132 }
   10133 
   10134 /*
   10135  * wm_gmii_bm_writereg:	[mii interface function]
   10136  *
   10137  *	Write a PHY register on the kumeran.
   10138  * This could be handled by the PHY layer if we didn't have to lock the
   10139  * ressource ...
   10140  */
   10141 static void
   10142 wm_gmii_bm_writereg(device_t dev, int phy, int reg, int val)
   10143 {
   10144 	struct wm_softc *sc = device_private(dev);
   10145 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10146 
   10147 	if (sc->phy.acquire(sc)) {
   10148 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10149 		return;
   10150 	}
   10151 
   10152 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10153 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10154 		    || (reg == 31)) ? 1 : phy;
   10155 	/* Page 800 works differently than the rest so it has its own func */
   10156 	if (page == BM_WUC_PAGE) {
   10157 		uint16_t tmp;
   10158 
   10159 		tmp = val;
   10160 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   10161 		goto release;
   10162 	}
   10163 
   10164 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10165 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10166 		    && (sc->sc_type != WM_T_82583))
   10167 			wm_gmii_mdic_writereg(dev, phy,
   10168 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10169 		else
   10170 			wm_gmii_mdic_writereg(dev, phy,
   10171 			    BME1000_PHY_PAGE_SELECT, page);
   10172 	}
   10173 
   10174 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10175 
   10176 release:
   10177 	sc->phy.release(sc);
   10178 }
   10179 
   10180 static void
   10181 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd)
   10182 {
   10183 	struct wm_softc *sc = device_private(dev);
   10184 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   10185 	uint16_t wuce, reg;
   10186 
   10187 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10188 		device_xname(dev), __func__));
   10189 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   10190 	if (sc->sc_type == WM_T_PCH) {
   10191 		/* XXX e1000 driver do nothing... why? */
   10192 	}
   10193 
   10194 	/*
   10195 	 * 1) Enable PHY wakeup register first.
   10196 	 * See e1000_enable_phy_wakeup_reg_access_bm().
   10197 	 */
   10198 
   10199 	/* Set page 769 */
   10200 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10201 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   10202 
   10203 	/* Read WUCE and save it */
   10204 	wuce = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG);
   10205 
   10206 	reg = wuce | BM_WUC_ENABLE_BIT;
   10207 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   10208 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, reg);
   10209 
   10210 	/* Select page 800 */
   10211 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10212 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   10213 
   10214 	/*
   10215 	 * 2) Access PHY wakeup register.
   10216 	 * See e1000_access_phy_wakeup_reg_bm.
   10217 	 */
   10218 
   10219 	/* Write page 800 */
   10220 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   10221 
   10222 	if (rd)
   10223 		*val = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE);
   10224 	else
   10225 		wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   10226 
   10227 	/*
   10228 	 * 3) Disable PHY wakeup register.
   10229 	 * See e1000_disable_phy_wakeup_reg_access_bm().
   10230 	 */
   10231 	/* Set page 769 */
   10232 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10233 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   10234 
   10235 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, wuce);
   10236 }
   10237 
   10238 /*
   10239  * wm_gmii_hv_readreg:	[mii interface function]
   10240  *
   10241  *	Read a PHY register on the kumeran
   10242  * This could be handled by the PHY layer if we didn't have to lock the
   10243  * ressource ...
   10244  */
   10245 static int
   10246 wm_gmii_hv_readreg(device_t dev, int phy, int reg)
   10247 {
   10248 	struct wm_softc *sc = device_private(dev);
   10249 	int rv;
   10250 
   10251 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10252 		device_xname(dev), __func__));
   10253 	if (sc->phy.acquire(sc)) {
   10254 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10255 		return 0;
   10256 	}
   10257 
   10258 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg);
   10259 	sc->phy.release(sc);
   10260 	return rv;
   10261 }
   10262 
   10263 static int
   10264 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg)
   10265 {
   10266 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10267 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10268 	uint16_t val;
   10269 	int rv;
   10270 
   10271 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10272 
   10273 	/* Page 800 works differently than the rest so it has its own func */
   10274 	if (page == BM_WUC_PAGE) {
   10275 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
   10276 		return val;
   10277 	}
   10278 
   10279 	/*
   10280 	 * Lower than page 768 works differently than the rest so it has its
   10281 	 * own func
   10282 	 */
   10283 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10284 		printf("gmii_hv_readreg!!!\n");
   10285 		return 0;
   10286 	}
   10287 
   10288 	/*
   10289 	 * XXX I21[789] documents say that the SMBus Address register is at
   10290 	 * PHY address 01, Page 0 (not 768), Register 26.
   10291 	 */
   10292 	if (page == HV_INTC_FC_PAGE_START)
   10293 		page = 0;
   10294 
   10295 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10296 		wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10297 		    page << BME1000_PAGE_SHIFT);
   10298 	}
   10299 
   10300 	rv = wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK);
   10301 	return rv;
   10302 }
   10303 
   10304 /*
   10305  * wm_gmii_hv_writereg:	[mii interface function]
   10306  *
   10307  *	Write a PHY register on the kumeran.
   10308  * This could be handled by the PHY layer if we didn't have to lock the
   10309  * ressource ...
   10310  */
   10311 static void
   10312 wm_gmii_hv_writereg(device_t dev, int phy, int reg, int val)
   10313 {
   10314 	struct wm_softc *sc = device_private(dev);
   10315 
   10316 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10317 		device_xname(dev), __func__));
   10318 
   10319 	if (sc->phy.acquire(sc)) {
   10320 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10321 		return;
   10322 	}
   10323 
   10324 	wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   10325 	sc->phy.release(sc);
   10326 }
   10327 
   10328 static void
   10329 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, int val)
   10330 {
   10331 	struct wm_softc *sc = device_private(dev);
   10332 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10333 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10334 
   10335 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10336 
   10337 	/* Page 800 works differently than the rest so it has its own func */
   10338 	if (page == BM_WUC_PAGE) {
   10339 		uint16_t tmp;
   10340 
   10341 		tmp = val;
   10342 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   10343 		return;
   10344 	}
   10345 
   10346 	/*
   10347 	 * Lower than page 768 works differently than the rest so it has its
   10348 	 * own func
   10349 	 */
   10350 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10351 		printf("gmii_hv_writereg!!!\n");
   10352 		return;
   10353 	}
   10354 
   10355 	{
   10356 		/*
   10357 		 * XXX I21[789] documents say that the SMBus Address register
   10358 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   10359 		 */
   10360 		if (page == HV_INTC_FC_PAGE_START)
   10361 			page = 0;
   10362 
   10363 		/*
   10364 		 * XXX Workaround MDIO accesses being disabled after entering
   10365 		 * IEEE Power Down (whenever bit 11 of the PHY control
   10366 		 * register is set)
   10367 		 */
   10368 		if (sc->sc_phytype == WMPHY_82578) {
   10369 			struct mii_softc *child;
   10370 
   10371 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   10372 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   10373 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   10374 			    && ((val & (1 << 11)) != 0)) {
   10375 				printf("XXX need workaround\n");
   10376 			}
   10377 		}
   10378 
   10379 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10380 			wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10381 			    page << BME1000_PAGE_SHIFT);
   10382 		}
   10383 	}
   10384 
   10385 	wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   10386 }
   10387 
   10388 /*
   10389  * wm_gmii_82580_readreg:	[mii interface function]
   10390  *
   10391  *	Read a PHY register on the 82580 and I350.
   10392  * This could be handled by the PHY layer if we didn't have to lock the
   10393  * ressource ...
   10394  */
   10395 static int
   10396 wm_gmii_82580_readreg(device_t dev, int phy, int reg)
   10397 {
   10398 	struct wm_softc *sc = device_private(dev);
   10399 	int rv;
   10400 
   10401 	if (sc->phy.acquire(sc) != 0) {
   10402 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10403 		return 0;
   10404 	}
   10405 
   10406 #ifdef DIAGNOSTIC
   10407 	if (reg > MII_ADDRMASK) {
   10408 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10409 		    __func__, sc->sc_phytype, reg);
   10410 		reg &= MII_ADDRMASK;
   10411 	}
   10412 #endif
   10413 	rv = wm_gmii_mdic_readreg(dev, phy, reg);
   10414 
   10415 	sc->phy.release(sc);
   10416 	return rv;
   10417 }
   10418 
   10419 /*
   10420  * wm_gmii_82580_writereg:	[mii interface function]
   10421  *
   10422  *	Write a PHY register on the 82580 and I350.
   10423  * This could be handled by the PHY layer if we didn't have to lock the
   10424  * ressource ...
   10425  */
   10426 static void
   10427 wm_gmii_82580_writereg(device_t dev, int phy, int reg, int val)
   10428 {
   10429 	struct wm_softc *sc = device_private(dev);
   10430 
   10431 	if (sc->phy.acquire(sc) != 0) {
   10432 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10433 		return;
   10434 	}
   10435 
   10436 #ifdef DIAGNOSTIC
   10437 	if (reg > MII_ADDRMASK) {
   10438 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10439 		    __func__, sc->sc_phytype, reg);
   10440 		reg &= MII_ADDRMASK;
   10441 	}
   10442 #endif
   10443 	wm_gmii_mdic_writereg(dev, phy, reg, val);
   10444 
   10445 	sc->phy.release(sc);
   10446 }
   10447 
   10448 /*
   10449  * wm_gmii_gs40g_readreg:	[mii interface function]
   10450  *
   10451  *	Read a PHY register on the I2100 and I211.
   10452  * This could be handled by the PHY layer if we didn't have to lock the
   10453  * ressource ...
   10454  */
   10455 static int
   10456 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg)
   10457 {
   10458 	struct wm_softc *sc = device_private(dev);
   10459 	int page, offset;
   10460 	int rv;
   10461 
   10462 	/* Acquire semaphore */
   10463 	if (sc->phy.acquire(sc)) {
   10464 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10465 		return 0;
   10466 	}
   10467 
   10468 	/* Page select */
   10469 	page = reg >> GS40G_PAGE_SHIFT;
   10470 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10471 
   10472 	/* Read reg */
   10473 	offset = reg & GS40G_OFFSET_MASK;
   10474 	rv = wm_gmii_mdic_readreg(dev, phy, offset);
   10475 
   10476 	sc->phy.release(sc);
   10477 	return rv;
   10478 }
   10479 
   10480 /*
   10481  * wm_gmii_gs40g_writereg:	[mii interface function]
   10482  *
   10483  *	Write a PHY register on the I210 and I211.
   10484  * This could be handled by the PHY layer if we didn't have to lock the
   10485  * ressource ...
   10486  */
   10487 static void
   10488 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, int val)
   10489 {
   10490 	struct wm_softc *sc = device_private(dev);
   10491 	int page, offset;
   10492 
   10493 	/* Acquire semaphore */
   10494 	if (sc->phy.acquire(sc)) {
   10495 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10496 		return;
   10497 	}
   10498 
   10499 	/* Page select */
   10500 	page = reg >> GS40G_PAGE_SHIFT;
   10501 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10502 
   10503 	/* Write reg */
   10504 	offset = reg & GS40G_OFFSET_MASK;
   10505 	wm_gmii_mdic_writereg(dev, phy, offset, val);
   10506 
   10507 	/* Release semaphore */
   10508 	sc->phy.release(sc);
   10509 }
   10510 
   10511 /*
   10512  * wm_gmii_statchg:	[mii interface function]
   10513  *
   10514  *	Callback from MII layer when media changes.
   10515  */
   10516 static void
   10517 wm_gmii_statchg(struct ifnet *ifp)
   10518 {
   10519 	struct wm_softc *sc = ifp->if_softc;
   10520 	struct mii_data *mii = &sc->sc_mii;
   10521 
   10522 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   10523 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10524 	sc->sc_fcrtl &= ~FCRTL_XONE;
   10525 
   10526 	/*
   10527 	 * Get flow control negotiation result.
   10528 	 */
   10529 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   10530 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   10531 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   10532 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   10533 	}
   10534 
   10535 	if (sc->sc_flowflags & IFM_FLOW) {
   10536 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   10537 			sc->sc_ctrl |= CTRL_TFCE;
   10538 			sc->sc_fcrtl |= FCRTL_XONE;
   10539 		}
   10540 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   10541 			sc->sc_ctrl |= CTRL_RFCE;
   10542 	}
   10543 
   10544 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   10545 		DPRINTF(WM_DEBUG_LINK,
   10546 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   10547 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10548 	} else {
   10549 		DPRINTF(WM_DEBUG_LINK,
   10550 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   10551 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10552 	}
   10553 
   10554 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10555 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10556 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   10557 						 : WMREG_FCRTL, sc->sc_fcrtl);
   10558 	if (sc->sc_type == WM_T_80003) {
   10559 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   10560 		case IFM_1000_T:
   10561 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10562 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   10563 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10564 			break;
   10565 		default:
   10566 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10567 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   10568 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   10569 			break;
   10570 		}
   10571 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   10572 	}
   10573 }
   10574 
   10575 /* kumeran related (80003, ICH* and PCH*) */
   10576 
   10577 /*
   10578  * wm_kmrn_readreg:
   10579  *
   10580  *	Read a kumeran register
   10581  */
   10582 static int
   10583 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   10584 {
   10585 	int rv;
   10586 
   10587 	if (sc->sc_type == WM_T_80003)
   10588 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10589 	else
   10590 		rv = sc->phy.acquire(sc);
   10591 	if (rv != 0) {
   10592 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   10593 		    __func__);
   10594 		return rv;
   10595 	}
   10596 
   10597 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   10598 
   10599 	if (sc->sc_type == WM_T_80003)
   10600 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10601 	else
   10602 		sc->phy.release(sc);
   10603 
   10604 	return rv;
   10605 }
   10606 
   10607 static int
   10608 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   10609 {
   10610 
   10611 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10612 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10613 	    KUMCTRLSTA_REN);
   10614 	CSR_WRITE_FLUSH(sc);
   10615 	delay(2);
   10616 
   10617 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   10618 
   10619 	return 0;
   10620 }
   10621 
   10622 /*
   10623  * wm_kmrn_writereg:
   10624  *
   10625  *	Write a kumeran register
   10626  */
   10627 static int
   10628 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   10629 {
   10630 	int rv;
   10631 
   10632 	if (sc->sc_type == WM_T_80003)
   10633 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10634 	else
   10635 		rv = sc->phy.acquire(sc);
   10636 	if (rv != 0) {
   10637 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   10638 		    __func__);
   10639 		return rv;
   10640 	}
   10641 
   10642 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   10643 
   10644 	if (sc->sc_type == WM_T_80003)
   10645 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10646 	else
   10647 		sc->phy.release(sc);
   10648 
   10649 	return rv;
   10650 }
   10651 
   10652 static int
   10653 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   10654 {
   10655 
   10656 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10657 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   10658 
   10659 	return 0;
   10660 }
   10661 
   10662 /* SGMII related */
   10663 
   10664 /*
   10665  * wm_sgmii_uses_mdio
   10666  *
   10667  * Check whether the transaction is to the internal PHY or the external
   10668  * MDIO interface. Return true if it's MDIO.
   10669  */
   10670 static bool
   10671 wm_sgmii_uses_mdio(struct wm_softc *sc)
   10672 {
   10673 	uint32_t reg;
   10674 	bool ismdio = false;
   10675 
   10676 	switch (sc->sc_type) {
   10677 	case WM_T_82575:
   10678 	case WM_T_82576:
   10679 		reg = CSR_READ(sc, WMREG_MDIC);
   10680 		ismdio = ((reg & MDIC_DEST) != 0);
   10681 		break;
   10682 	case WM_T_82580:
   10683 	case WM_T_I350:
   10684 	case WM_T_I354:
   10685 	case WM_T_I210:
   10686 	case WM_T_I211:
   10687 		reg = CSR_READ(sc, WMREG_MDICNFG);
   10688 		ismdio = ((reg & MDICNFG_DEST) != 0);
   10689 		break;
   10690 	default:
   10691 		break;
   10692 	}
   10693 
   10694 	return ismdio;
   10695 }
   10696 
   10697 /*
   10698  * wm_sgmii_readreg:	[mii interface function]
   10699  *
   10700  *	Read a PHY register on the SGMII
   10701  * This could be handled by the PHY layer if we didn't have to lock the
   10702  * ressource ...
   10703  */
   10704 static int
   10705 wm_sgmii_readreg(device_t dev, int phy, int reg)
   10706 {
   10707 	struct wm_softc *sc = device_private(dev);
   10708 	uint32_t i2ccmd;
   10709 	int i, rv;
   10710 
   10711 	if (sc->phy.acquire(sc)) {
   10712 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10713 		return 0;
   10714 	}
   10715 
   10716 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10717 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10718 	    | I2CCMD_OPCODE_READ;
   10719 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10720 
   10721 	/* Poll the ready bit */
   10722 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10723 		delay(50);
   10724 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10725 		if (i2ccmd & I2CCMD_READY)
   10726 			break;
   10727 	}
   10728 	if ((i2ccmd & I2CCMD_READY) == 0)
   10729 		device_printf(dev, "I2CCMD Read did not complete\n");
   10730 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10731 		device_printf(dev, "I2CCMD Error bit set\n");
   10732 
   10733 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   10734 
   10735 	sc->phy.release(sc);
   10736 	return rv;
   10737 }
   10738 
   10739 /*
   10740  * wm_sgmii_writereg:	[mii interface function]
   10741  *
   10742  *	Write a PHY register on the SGMII.
   10743  * This could be handled by the PHY layer if we didn't have to lock the
   10744  * ressource ...
   10745  */
   10746 static void
   10747 wm_sgmii_writereg(device_t dev, int phy, int reg, int val)
   10748 {
   10749 	struct wm_softc *sc = device_private(dev);
   10750 	uint32_t i2ccmd;
   10751 	int i;
   10752 	int val_swapped;
   10753 
   10754 	if (sc->phy.acquire(sc) != 0) {
   10755 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10756 		return;
   10757 	}
   10758 	/* Swap the data bytes for the I2C interface */
   10759 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   10760 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10761 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10762 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   10763 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10764 
   10765 	/* Poll the ready bit */
   10766 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10767 		delay(50);
   10768 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10769 		if (i2ccmd & I2CCMD_READY)
   10770 			break;
   10771 	}
   10772 	if ((i2ccmd & I2CCMD_READY) == 0)
   10773 		device_printf(dev, "I2CCMD Write did not complete\n");
   10774 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10775 		device_printf(dev, "I2CCMD Error bit set\n");
   10776 
   10777 	sc->phy.release(sc);
   10778 }
   10779 
   10780 /* TBI related */
   10781 
   10782 /*
   10783  * wm_tbi_mediainit:
   10784  *
   10785  *	Initialize media for use on 1000BASE-X devices.
   10786  */
   10787 static void
   10788 wm_tbi_mediainit(struct wm_softc *sc)
   10789 {
   10790 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10791 	const char *sep = "";
   10792 
   10793 	if (sc->sc_type < WM_T_82543)
   10794 		sc->sc_tipg = TIPG_WM_DFLT;
   10795 	else
   10796 		sc->sc_tipg = TIPG_LG_DFLT;
   10797 
   10798 	sc->sc_tbi_serdes_anegticks = 5;
   10799 
   10800 	/* Initialize our media structures */
   10801 	sc->sc_mii.mii_ifp = ifp;
   10802 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10803 
   10804 	if ((sc->sc_type >= WM_T_82575)
   10805 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   10806 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10807 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   10808 	else
   10809 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10810 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   10811 
   10812 	/*
   10813 	 * SWD Pins:
   10814 	 *
   10815 	 *	0 = Link LED (output)
   10816 	 *	1 = Loss Of Signal (input)
   10817 	 */
   10818 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   10819 
   10820 	/* XXX Perhaps this is only for TBI */
   10821 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   10822 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   10823 
   10824 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   10825 		sc->sc_ctrl &= ~CTRL_LRST;
   10826 
   10827 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10828 
   10829 #define	ADD(ss, mm, dd)							\
   10830 do {									\
   10831 	aprint_normal("%s%s", sep, ss);					\
   10832 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   10833 	sep = ", ";							\
   10834 } while (/*CONSTCOND*/0)
   10835 
   10836 	aprint_normal_dev(sc->sc_dev, "");
   10837 
   10838 	if (sc->sc_type == WM_T_I354) {
   10839 		uint32_t status;
   10840 
   10841 		status = CSR_READ(sc, WMREG_STATUS);
   10842 		if (((status & STATUS_2P5_SKU) != 0)
   10843 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   10844 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   10845 		} else
   10846 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   10847 	} else if (sc->sc_type == WM_T_82545) {
   10848 		/* Only 82545 is LX (XXX except SFP) */
   10849 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   10850 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   10851 	} else {
   10852 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   10853 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   10854 	}
   10855 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   10856 	aprint_normal("\n");
   10857 
   10858 #undef ADD
   10859 
   10860 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   10861 }
   10862 
   10863 /*
   10864  * wm_tbi_mediachange:	[ifmedia interface function]
   10865  *
   10866  *	Set hardware to newly-selected media on a 1000BASE-X device.
   10867  */
   10868 static int
   10869 wm_tbi_mediachange(struct ifnet *ifp)
   10870 {
   10871 	struct wm_softc *sc = ifp->if_softc;
   10872 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10873 	uint32_t status;
   10874 	int i;
   10875 
   10876 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   10877 		/* XXX need some work for >= 82571 and < 82575 */
   10878 		if (sc->sc_type < WM_T_82575)
   10879 			return 0;
   10880 	}
   10881 
   10882 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   10883 	    || (sc->sc_type >= WM_T_82575))
   10884 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   10885 
   10886 	sc->sc_ctrl &= ~CTRL_LRST;
   10887 	sc->sc_txcw = TXCW_ANE;
   10888 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10889 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   10890 	else if (ife->ifm_media & IFM_FDX)
   10891 		sc->sc_txcw |= TXCW_FD;
   10892 	else
   10893 		sc->sc_txcw |= TXCW_HD;
   10894 
   10895 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   10896 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   10897 
   10898 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   10899 		    device_xname(sc->sc_dev), sc->sc_txcw));
   10900 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10901 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10902 	CSR_WRITE_FLUSH(sc);
   10903 	delay(1000);
   10904 
   10905 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   10906 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   10907 
   10908 	/*
   10909 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   10910 	 * optics detect a signal, 0 if they don't.
   10911 	 */
   10912 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   10913 		/* Have signal; wait for the link to come up. */
   10914 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   10915 			delay(10000);
   10916 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   10917 				break;
   10918 		}
   10919 
   10920 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   10921 			    device_xname(sc->sc_dev),i));
   10922 
   10923 		status = CSR_READ(sc, WMREG_STATUS);
   10924 		DPRINTF(WM_DEBUG_LINK,
   10925 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   10926 			device_xname(sc->sc_dev),status, STATUS_LU));
   10927 		if (status & STATUS_LU) {
   10928 			/* Link is up. */
   10929 			DPRINTF(WM_DEBUG_LINK,
   10930 			    ("%s: LINK: set media -> link up %s\n",
   10931 			    device_xname(sc->sc_dev),
   10932 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   10933 
   10934 			/*
   10935 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   10936 			 * so we should update sc->sc_ctrl
   10937 			 */
   10938 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   10939 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10940 			sc->sc_fcrtl &= ~FCRTL_XONE;
   10941 			if (status & STATUS_FD)
   10942 				sc->sc_tctl |=
   10943 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10944 			else
   10945 				sc->sc_tctl |=
   10946 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10947 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   10948 				sc->sc_fcrtl |= FCRTL_XONE;
   10949 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10950 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   10951 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   10952 				      sc->sc_fcrtl);
   10953 			sc->sc_tbi_linkup = 1;
   10954 		} else {
   10955 			if (i == WM_LINKUP_TIMEOUT)
   10956 				wm_check_for_link(sc);
   10957 			/* Link is down. */
   10958 			DPRINTF(WM_DEBUG_LINK,
   10959 			    ("%s: LINK: set media -> link down\n",
   10960 			    device_xname(sc->sc_dev)));
   10961 			sc->sc_tbi_linkup = 0;
   10962 		}
   10963 	} else {
   10964 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   10965 		    device_xname(sc->sc_dev)));
   10966 		sc->sc_tbi_linkup = 0;
   10967 	}
   10968 
   10969 	wm_tbi_serdes_set_linkled(sc);
   10970 
   10971 	return 0;
   10972 }
   10973 
   10974 /*
   10975  * wm_tbi_mediastatus:	[ifmedia interface function]
   10976  *
   10977  *	Get the current interface media status on a 1000BASE-X device.
   10978  */
   10979 static void
   10980 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10981 {
   10982 	struct wm_softc *sc = ifp->if_softc;
   10983 	uint32_t ctrl, status;
   10984 
   10985 	ifmr->ifm_status = IFM_AVALID;
   10986 	ifmr->ifm_active = IFM_ETHER;
   10987 
   10988 	status = CSR_READ(sc, WMREG_STATUS);
   10989 	if ((status & STATUS_LU) == 0) {
   10990 		ifmr->ifm_active |= IFM_NONE;
   10991 		return;
   10992 	}
   10993 
   10994 	ifmr->ifm_status |= IFM_ACTIVE;
   10995 	/* Only 82545 is LX */
   10996 	if (sc->sc_type == WM_T_82545)
   10997 		ifmr->ifm_active |= IFM_1000_LX;
   10998 	else
   10999 		ifmr->ifm_active |= IFM_1000_SX;
   11000 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   11001 		ifmr->ifm_active |= IFM_FDX;
   11002 	else
   11003 		ifmr->ifm_active |= IFM_HDX;
   11004 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11005 	if (ctrl & CTRL_RFCE)
   11006 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   11007 	if (ctrl & CTRL_TFCE)
   11008 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   11009 }
   11010 
   11011 /* XXX TBI only */
   11012 static int
   11013 wm_check_for_link(struct wm_softc *sc)
   11014 {
   11015 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11016 	uint32_t rxcw;
   11017 	uint32_t ctrl;
   11018 	uint32_t status;
   11019 	uint32_t sig;
   11020 
   11021 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11022 		/* XXX need some work for >= 82571 */
   11023 		if (sc->sc_type >= WM_T_82571) {
   11024 			sc->sc_tbi_linkup = 1;
   11025 			return 0;
   11026 		}
   11027 	}
   11028 
   11029 	rxcw = CSR_READ(sc, WMREG_RXCW);
   11030 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11031 	status = CSR_READ(sc, WMREG_STATUS);
   11032 
   11033 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   11034 
   11035 	DPRINTF(WM_DEBUG_LINK,
   11036 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   11037 		device_xname(sc->sc_dev), __func__,
   11038 		((ctrl & CTRL_SWDPIN(1)) == sig),
   11039 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   11040 
   11041 	/*
   11042 	 * SWDPIN   LU RXCW
   11043 	 *      0    0    0
   11044 	 *      0    0    1	(should not happen)
   11045 	 *      0    1    0	(should not happen)
   11046 	 *      0    1    1	(should not happen)
   11047 	 *      1    0    0	Disable autonego and force linkup
   11048 	 *      1    0    1	got /C/ but not linkup yet
   11049 	 *      1    1    0	(linkup)
   11050 	 *      1    1    1	If IFM_AUTO, back to autonego
   11051 	 *
   11052 	 */
   11053 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   11054 	    && ((status & STATUS_LU) == 0)
   11055 	    && ((rxcw & RXCW_C) == 0)) {
   11056 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   11057 			__func__));
   11058 		sc->sc_tbi_linkup = 0;
   11059 		/* Disable auto-negotiation in the TXCW register */
   11060 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   11061 
   11062 		/*
   11063 		 * Force link-up and also force full-duplex.
   11064 		 *
   11065 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   11066 		 * so we should update sc->sc_ctrl
   11067 		 */
   11068 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   11069 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11070 	} else if (((status & STATUS_LU) != 0)
   11071 	    && ((rxcw & RXCW_C) != 0)
   11072 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   11073 		sc->sc_tbi_linkup = 1;
   11074 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   11075 			__func__));
   11076 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11077 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   11078 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   11079 	    && ((rxcw & RXCW_C) != 0)) {
   11080 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   11081 	} else {
   11082 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   11083 			status));
   11084 	}
   11085 
   11086 	return 0;
   11087 }
   11088 
   11089 /*
   11090  * wm_tbi_tick:
   11091  *
   11092  *	Check the link on TBI devices.
   11093  *	This function acts as mii_tick().
   11094  */
   11095 static void
   11096 wm_tbi_tick(struct wm_softc *sc)
   11097 {
   11098 	struct mii_data *mii = &sc->sc_mii;
   11099 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11100 	uint32_t status;
   11101 
   11102 	KASSERT(WM_CORE_LOCKED(sc));
   11103 
   11104 	status = CSR_READ(sc, WMREG_STATUS);
   11105 
   11106 	/* XXX is this needed? */
   11107 	(void)CSR_READ(sc, WMREG_RXCW);
   11108 	(void)CSR_READ(sc, WMREG_CTRL);
   11109 
   11110 	/* set link status */
   11111 	if ((status & STATUS_LU) == 0) {
   11112 		DPRINTF(WM_DEBUG_LINK,
   11113 		    ("%s: LINK: checklink -> down\n",
   11114 			device_xname(sc->sc_dev)));
   11115 		sc->sc_tbi_linkup = 0;
   11116 	} else if (sc->sc_tbi_linkup == 0) {
   11117 		DPRINTF(WM_DEBUG_LINK,
   11118 		    ("%s: LINK: checklink -> up %s\n",
   11119 			device_xname(sc->sc_dev),
   11120 			(status & STATUS_FD) ? "FDX" : "HDX"));
   11121 		sc->sc_tbi_linkup = 1;
   11122 		sc->sc_tbi_serdes_ticks = 0;
   11123 	}
   11124 
   11125 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   11126 		goto setled;
   11127 
   11128 	if ((status & STATUS_LU) == 0) {
   11129 		sc->sc_tbi_linkup = 0;
   11130 		/* If the timer expired, retry autonegotiation */
   11131 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11132 		    && (++sc->sc_tbi_serdes_ticks
   11133 			>= sc->sc_tbi_serdes_anegticks)) {
   11134 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11135 			sc->sc_tbi_serdes_ticks = 0;
   11136 			/*
   11137 			 * Reset the link, and let autonegotiation do
   11138 			 * its thing
   11139 			 */
   11140 			sc->sc_ctrl |= CTRL_LRST;
   11141 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11142 			CSR_WRITE_FLUSH(sc);
   11143 			delay(1000);
   11144 			sc->sc_ctrl &= ~CTRL_LRST;
   11145 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11146 			CSR_WRITE_FLUSH(sc);
   11147 			delay(1000);
   11148 			CSR_WRITE(sc, WMREG_TXCW,
   11149 			    sc->sc_txcw & ~TXCW_ANE);
   11150 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11151 		}
   11152 	}
   11153 
   11154 setled:
   11155 	wm_tbi_serdes_set_linkled(sc);
   11156 }
   11157 
   11158 /* SERDES related */
   11159 static void
   11160 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   11161 {
   11162 	uint32_t reg;
   11163 
   11164 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11165 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   11166 		return;
   11167 
   11168 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   11169 	reg |= PCS_CFG_PCS_EN;
   11170 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   11171 
   11172 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11173 	reg &= ~CTRL_EXT_SWDPIN(3);
   11174 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11175 	CSR_WRITE_FLUSH(sc);
   11176 }
   11177 
   11178 static int
   11179 wm_serdes_mediachange(struct ifnet *ifp)
   11180 {
   11181 	struct wm_softc *sc = ifp->if_softc;
   11182 	bool pcs_autoneg = true; /* XXX */
   11183 	uint32_t ctrl_ext, pcs_lctl, reg;
   11184 
   11185 	/* XXX Currently, this function is not called on 8257[12] */
   11186 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11187 	    || (sc->sc_type >= WM_T_82575))
   11188 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11189 
   11190 	wm_serdes_power_up_link_82575(sc);
   11191 
   11192 	sc->sc_ctrl |= CTRL_SLU;
   11193 
   11194 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   11195 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   11196 
   11197 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11198 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   11199 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   11200 	case CTRL_EXT_LINK_MODE_SGMII:
   11201 		pcs_autoneg = true;
   11202 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   11203 		break;
   11204 	case CTRL_EXT_LINK_MODE_1000KX:
   11205 		pcs_autoneg = false;
   11206 		/* FALLTHROUGH */
   11207 	default:
   11208 		if ((sc->sc_type == WM_T_82575)
   11209 		    || (sc->sc_type == WM_T_82576)) {
   11210 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   11211 				pcs_autoneg = false;
   11212 		}
   11213 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   11214 		    | CTRL_FRCFDX;
   11215 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   11216 	}
   11217 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11218 
   11219 	if (pcs_autoneg) {
   11220 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   11221 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   11222 
   11223 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   11224 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   11225 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   11226 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   11227 	} else
   11228 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   11229 
   11230 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   11231 
   11232 
   11233 	return 0;
   11234 }
   11235 
   11236 static void
   11237 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11238 {
   11239 	struct wm_softc *sc = ifp->if_softc;
   11240 	struct mii_data *mii = &sc->sc_mii;
   11241 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11242 	uint32_t pcs_adv, pcs_lpab, reg;
   11243 
   11244 	ifmr->ifm_status = IFM_AVALID;
   11245 	ifmr->ifm_active = IFM_ETHER;
   11246 
   11247 	/* Check PCS */
   11248 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11249 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   11250 		ifmr->ifm_active |= IFM_NONE;
   11251 		sc->sc_tbi_linkup = 0;
   11252 		goto setled;
   11253 	}
   11254 
   11255 	sc->sc_tbi_linkup = 1;
   11256 	ifmr->ifm_status |= IFM_ACTIVE;
   11257 	if (sc->sc_type == WM_T_I354) {
   11258 		uint32_t status;
   11259 
   11260 		status = CSR_READ(sc, WMREG_STATUS);
   11261 		if (((status & STATUS_2P5_SKU) != 0)
   11262 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11263 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
   11264 		} else
   11265 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
   11266 	} else {
   11267 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   11268 		case PCS_LSTS_SPEED_10:
   11269 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   11270 			break;
   11271 		case PCS_LSTS_SPEED_100:
   11272 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   11273 			break;
   11274 		case PCS_LSTS_SPEED_1000:
   11275 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11276 			break;
   11277 		default:
   11278 			device_printf(sc->sc_dev, "Unknown speed\n");
   11279 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11280 			break;
   11281 		}
   11282 	}
   11283 	if ((reg & PCS_LSTS_FDX) != 0)
   11284 		ifmr->ifm_active |= IFM_FDX;
   11285 	else
   11286 		ifmr->ifm_active |= IFM_HDX;
   11287 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   11288 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   11289 		/* Check flow */
   11290 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11291 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   11292 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   11293 			goto setled;
   11294 		}
   11295 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   11296 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   11297 		DPRINTF(WM_DEBUG_LINK,
   11298 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   11299 		if ((pcs_adv & TXCW_SYM_PAUSE)
   11300 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   11301 			mii->mii_media_active |= IFM_FLOW
   11302 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   11303 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   11304 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11305 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   11306 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11307 			mii->mii_media_active |= IFM_FLOW
   11308 			    | IFM_ETH_TXPAUSE;
   11309 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   11310 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11311 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   11312 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11313 			mii->mii_media_active |= IFM_FLOW
   11314 			    | IFM_ETH_RXPAUSE;
   11315 		}
   11316 	}
   11317 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   11318 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   11319 setled:
   11320 	wm_tbi_serdes_set_linkled(sc);
   11321 }
   11322 
   11323 /*
   11324  * wm_serdes_tick:
   11325  *
   11326  *	Check the link on serdes devices.
   11327  */
   11328 static void
   11329 wm_serdes_tick(struct wm_softc *sc)
   11330 {
   11331 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11332 	struct mii_data *mii = &sc->sc_mii;
   11333 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11334 	uint32_t reg;
   11335 
   11336 	KASSERT(WM_CORE_LOCKED(sc));
   11337 
   11338 	mii->mii_media_status = IFM_AVALID;
   11339 	mii->mii_media_active = IFM_ETHER;
   11340 
   11341 	/* Check PCS */
   11342 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11343 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   11344 		mii->mii_media_status |= IFM_ACTIVE;
   11345 		sc->sc_tbi_linkup = 1;
   11346 		sc->sc_tbi_serdes_ticks = 0;
   11347 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   11348 		if ((reg & PCS_LSTS_FDX) != 0)
   11349 			mii->mii_media_active |= IFM_FDX;
   11350 		else
   11351 			mii->mii_media_active |= IFM_HDX;
   11352 	} else {
   11353 		mii->mii_media_status |= IFM_NONE;
   11354 		sc->sc_tbi_linkup = 0;
   11355 		/* If the timer expired, retry autonegotiation */
   11356 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11357 		    && (++sc->sc_tbi_serdes_ticks
   11358 			>= sc->sc_tbi_serdes_anegticks)) {
   11359 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11360 			sc->sc_tbi_serdes_ticks = 0;
   11361 			/* XXX */
   11362 			wm_serdes_mediachange(ifp);
   11363 		}
   11364 	}
   11365 
   11366 	wm_tbi_serdes_set_linkled(sc);
   11367 }
   11368 
   11369 /* SFP related */
   11370 
   11371 static int
   11372 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   11373 {
   11374 	uint32_t i2ccmd;
   11375 	int i;
   11376 
   11377 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11378 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11379 
   11380 	/* Poll the ready bit */
   11381 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11382 		delay(50);
   11383 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11384 		if (i2ccmd & I2CCMD_READY)
   11385 			break;
   11386 	}
   11387 	if ((i2ccmd & I2CCMD_READY) == 0)
   11388 		return -1;
   11389 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   11390 		return -1;
   11391 
   11392 	*data = i2ccmd & 0x00ff;
   11393 
   11394 	return 0;
   11395 }
   11396 
   11397 static uint32_t
   11398 wm_sfp_get_media_type(struct wm_softc *sc)
   11399 {
   11400 	uint32_t ctrl_ext;
   11401 	uint8_t val = 0;
   11402 	int timeout = 3;
   11403 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   11404 	int rv = -1;
   11405 
   11406 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11407 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   11408 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   11409 	CSR_WRITE_FLUSH(sc);
   11410 
   11411 	/* Read SFP module data */
   11412 	while (timeout) {
   11413 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   11414 		if (rv == 0)
   11415 			break;
   11416 		delay(100*1000); /* XXX too big */
   11417 		timeout--;
   11418 	}
   11419 	if (rv != 0)
   11420 		goto out;
   11421 	switch (val) {
   11422 	case SFF_SFP_ID_SFF:
   11423 		aprint_normal_dev(sc->sc_dev,
   11424 		    "Module/Connector soldered to board\n");
   11425 		break;
   11426 	case SFF_SFP_ID_SFP:
   11427 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   11428 		break;
   11429 	case SFF_SFP_ID_UNKNOWN:
   11430 		goto out;
   11431 	default:
   11432 		break;
   11433 	}
   11434 
   11435 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   11436 	if (rv != 0) {
   11437 		goto out;
   11438 	}
   11439 
   11440 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   11441 		mediatype = WM_MEDIATYPE_SERDES;
   11442 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   11443 		sc->sc_flags |= WM_F_SGMII;
   11444 		mediatype = WM_MEDIATYPE_COPPER;
   11445 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   11446 		sc->sc_flags |= WM_F_SGMII;
   11447 		mediatype = WM_MEDIATYPE_SERDES;
   11448 	}
   11449 
   11450 out:
   11451 	/* Restore I2C interface setting */
   11452 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11453 
   11454 	return mediatype;
   11455 }
   11456 
   11457 /*
   11458  * NVM related.
   11459  * Microwire, SPI (w/wo EERD) and Flash.
   11460  */
   11461 
   11462 /* Both spi and uwire */
   11463 
   11464 /*
   11465  * wm_eeprom_sendbits:
   11466  *
   11467  *	Send a series of bits to the EEPROM.
   11468  */
   11469 static void
   11470 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   11471 {
   11472 	uint32_t reg;
   11473 	int x;
   11474 
   11475 	reg = CSR_READ(sc, WMREG_EECD);
   11476 
   11477 	for (x = nbits; x > 0; x--) {
   11478 		if (bits & (1U << (x - 1)))
   11479 			reg |= EECD_DI;
   11480 		else
   11481 			reg &= ~EECD_DI;
   11482 		CSR_WRITE(sc, WMREG_EECD, reg);
   11483 		CSR_WRITE_FLUSH(sc);
   11484 		delay(2);
   11485 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11486 		CSR_WRITE_FLUSH(sc);
   11487 		delay(2);
   11488 		CSR_WRITE(sc, WMREG_EECD, reg);
   11489 		CSR_WRITE_FLUSH(sc);
   11490 		delay(2);
   11491 	}
   11492 }
   11493 
   11494 /*
   11495  * wm_eeprom_recvbits:
   11496  *
   11497  *	Receive a series of bits from the EEPROM.
   11498  */
   11499 static void
   11500 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   11501 {
   11502 	uint32_t reg, val;
   11503 	int x;
   11504 
   11505 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   11506 
   11507 	val = 0;
   11508 	for (x = nbits; x > 0; x--) {
   11509 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11510 		CSR_WRITE_FLUSH(sc);
   11511 		delay(2);
   11512 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   11513 			val |= (1U << (x - 1));
   11514 		CSR_WRITE(sc, WMREG_EECD, reg);
   11515 		CSR_WRITE_FLUSH(sc);
   11516 		delay(2);
   11517 	}
   11518 	*valp = val;
   11519 }
   11520 
   11521 /* Microwire */
   11522 
   11523 /*
   11524  * wm_nvm_read_uwire:
   11525  *
   11526  *	Read a word from the EEPROM using the MicroWire protocol.
   11527  */
   11528 static int
   11529 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11530 {
   11531 	uint32_t reg, val;
   11532 	int i;
   11533 
   11534 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11535 		device_xname(sc->sc_dev), __func__));
   11536 
   11537 	if (sc->nvm.acquire(sc) != 0)
   11538 		return -1;
   11539 
   11540 	for (i = 0; i < wordcnt; i++) {
   11541 		/* Clear SK and DI. */
   11542 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   11543 		CSR_WRITE(sc, WMREG_EECD, reg);
   11544 
   11545 		/*
   11546 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   11547 		 * and Xen.
   11548 		 *
   11549 		 * We use this workaround only for 82540 because qemu's
   11550 		 * e1000 act as 82540.
   11551 		 */
   11552 		if (sc->sc_type == WM_T_82540) {
   11553 			reg |= EECD_SK;
   11554 			CSR_WRITE(sc, WMREG_EECD, reg);
   11555 			reg &= ~EECD_SK;
   11556 			CSR_WRITE(sc, WMREG_EECD, reg);
   11557 			CSR_WRITE_FLUSH(sc);
   11558 			delay(2);
   11559 		}
   11560 		/* XXX: end of workaround */
   11561 
   11562 		/* Set CHIP SELECT. */
   11563 		reg |= EECD_CS;
   11564 		CSR_WRITE(sc, WMREG_EECD, reg);
   11565 		CSR_WRITE_FLUSH(sc);
   11566 		delay(2);
   11567 
   11568 		/* Shift in the READ command. */
   11569 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   11570 
   11571 		/* Shift in address. */
   11572 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   11573 
   11574 		/* Shift out the data. */
   11575 		wm_eeprom_recvbits(sc, &val, 16);
   11576 		data[i] = val & 0xffff;
   11577 
   11578 		/* Clear CHIP SELECT. */
   11579 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   11580 		CSR_WRITE(sc, WMREG_EECD, reg);
   11581 		CSR_WRITE_FLUSH(sc);
   11582 		delay(2);
   11583 	}
   11584 
   11585 	sc->nvm.release(sc);
   11586 	return 0;
   11587 }
   11588 
   11589 /* SPI */
   11590 
   11591 /*
   11592  * Set SPI and FLASH related information from the EECD register.
   11593  * For 82541 and 82547, the word size is taken from EEPROM.
   11594  */
   11595 static int
   11596 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   11597 {
   11598 	int size;
   11599 	uint32_t reg;
   11600 	uint16_t data;
   11601 
   11602 	reg = CSR_READ(sc, WMREG_EECD);
   11603 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   11604 
   11605 	/* Read the size of NVM from EECD by default */
   11606 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11607 	switch (sc->sc_type) {
   11608 	case WM_T_82541:
   11609 	case WM_T_82541_2:
   11610 	case WM_T_82547:
   11611 	case WM_T_82547_2:
   11612 		/* Set dummy value to access EEPROM */
   11613 		sc->sc_nvm_wordsize = 64;
   11614 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   11615 			aprint_error_dev(sc->sc_dev,
   11616 			    "%s: failed to read EEPROM size\n", __func__);
   11617 		}
   11618 		reg = data;
   11619 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11620 		if (size == 0)
   11621 			size = 6; /* 64 word size */
   11622 		else
   11623 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   11624 		break;
   11625 	case WM_T_80003:
   11626 	case WM_T_82571:
   11627 	case WM_T_82572:
   11628 	case WM_T_82573: /* SPI case */
   11629 	case WM_T_82574: /* SPI case */
   11630 	case WM_T_82583: /* SPI case */
   11631 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11632 		if (size > 14)
   11633 			size = 14;
   11634 		break;
   11635 	case WM_T_82575:
   11636 	case WM_T_82576:
   11637 	case WM_T_82580:
   11638 	case WM_T_I350:
   11639 	case WM_T_I354:
   11640 	case WM_T_I210:
   11641 	case WM_T_I211:
   11642 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11643 		if (size > 15)
   11644 			size = 15;
   11645 		break;
   11646 	default:
   11647 		aprint_error_dev(sc->sc_dev,
   11648 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   11649 		return -1;
   11650 		break;
   11651 	}
   11652 
   11653 	sc->sc_nvm_wordsize = 1 << size;
   11654 
   11655 	return 0;
   11656 }
   11657 
   11658 /*
   11659  * wm_nvm_ready_spi:
   11660  *
   11661  *	Wait for a SPI EEPROM to be ready for commands.
   11662  */
   11663 static int
   11664 wm_nvm_ready_spi(struct wm_softc *sc)
   11665 {
   11666 	uint32_t val;
   11667 	int usec;
   11668 
   11669 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11670 		device_xname(sc->sc_dev), __func__));
   11671 
   11672 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   11673 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   11674 		wm_eeprom_recvbits(sc, &val, 8);
   11675 		if ((val & SPI_SR_RDY) == 0)
   11676 			break;
   11677 	}
   11678 	if (usec >= SPI_MAX_RETRIES) {
   11679 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   11680 		return -1;
   11681 	}
   11682 	return 0;
   11683 }
   11684 
   11685 /*
   11686  * wm_nvm_read_spi:
   11687  *
   11688  *	Read a work from the EEPROM using the SPI protocol.
   11689  */
   11690 static int
   11691 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11692 {
   11693 	uint32_t reg, val;
   11694 	int i;
   11695 	uint8_t opc;
   11696 	int rv = 0;
   11697 
   11698 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11699 		device_xname(sc->sc_dev), __func__));
   11700 
   11701 	if (sc->nvm.acquire(sc) != 0)
   11702 		return -1;
   11703 
   11704 	/* Clear SK and CS. */
   11705 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   11706 	CSR_WRITE(sc, WMREG_EECD, reg);
   11707 	CSR_WRITE_FLUSH(sc);
   11708 	delay(2);
   11709 
   11710 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   11711 		goto out;
   11712 
   11713 	/* Toggle CS to flush commands. */
   11714 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   11715 	CSR_WRITE_FLUSH(sc);
   11716 	delay(2);
   11717 	CSR_WRITE(sc, WMREG_EECD, reg);
   11718 	CSR_WRITE_FLUSH(sc);
   11719 	delay(2);
   11720 
   11721 	opc = SPI_OPC_READ;
   11722 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   11723 		opc |= SPI_OPC_A8;
   11724 
   11725 	wm_eeprom_sendbits(sc, opc, 8);
   11726 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   11727 
   11728 	for (i = 0; i < wordcnt; i++) {
   11729 		wm_eeprom_recvbits(sc, &val, 16);
   11730 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   11731 	}
   11732 
   11733 	/* Raise CS and clear SK. */
   11734 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   11735 	CSR_WRITE(sc, WMREG_EECD, reg);
   11736 	CSR_WRITE_FLUSH(sc);
   11737 	delay(2);
   11738 
   11739 out:
   11740 	sc->nvm.release(sc);
   11741 	return rv;
   11742 }
   11743 
   11744 /* Using with EERD */
   11745 
   11746 static int
   11747 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   11748 {
   11749 	uint32_t attempts = 100000;
   11750 	uint32_t i, reg = 0;
   11751 	int32_t done = -1;
   11752 
   11753 	for (i = 0; i < attempts; i++) {
   11754 		reg = CSR_READ(sc, rw);
   11755 
   11756 		if (reg & EERD_DONE) {
   11757 			done = 0;
   11758 			break;
   11759 		}
   11760 		delay(5);
   11761 	}
   11762 
   11763 	return done;
   11764 }
   11765 
   11766 static int
   11767 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   11768     uint16_t *data)
   11769 {
   11770 	int i, eerd = 0;
   11771 	int rv = 0;
   11772 
   11773 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11774 		device_xname(sc->sc_dev), __func__));
   11775 
   11776 	if (sc->nvm.acquire(sc) != 0)
   11777 		return -1;
   11778 
   11779 	for (i = 0; i < wordcnt; i++) {
   11780 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   11781 		CSR_WRITE(sc, WMREG_EERD, eerd);
   11782 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   11783 		if (rv != 0) {
   11784 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   11785 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   11786 			break;
   11787 		}
   11788 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   11789 	}
   11790 
   11791 	sc->nvm.release(sc);
   11792 	return rv;
   11793 }
   11794 
   11795 /* Flash */
   11796 
   11797 static int
   11798 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   11799 {
   11800 	uint32_t eecd;
   11801 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   11802 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   11803 	uint8_t sig_byte = 0;
   11804 
   11805 	switch (sc->sc_type) {
   11806 	case WM_T_PCH_SPT:
   11807 		/*
   11808 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
   11809 		 * sector valid bits from the NVM.
   11810 		 */
   11811 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
   11812 		if ((*bank == 0) || (*bank == 1)) {
   11813 			aprint_error_dev(sc->sc_dev,
   11814 			    "%s: no valid NVM bank present (%u)\n", __func__,
   11815 				*bank);
   11816 			return -1;
   11817 		} else {
   11818 			*bank = *bank - 2;
   11819 			return 0;
   11820 		}
   11821 	case WM_T_ICH8:
   11822 	case WM_T_ICH9:
   11823 		eecd = CSR_READ(sc, WMREG_EECD);
   11824 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   11825 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   11826 			return 0;
   11827 		}
   11828 		/* FALLTHROUGH */
   11829 	default:
   11830 		/* Default to 0 */
   11831 		*bank = 0;
   11832 
   11833 		/* Check bank 0 */
   11834 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   11835 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11836 			*bank = 0;
   11837 			return 0;
   11838 		}
   11839 
   11840 		/* Check bank 1 */
   11841 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   11842 		    &sig_byte);
   11843 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11844 			*bank = 1;
   11845 			return 0;
   11846 		}
   11847 	}
   11848 
   11849 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   11850 		device_xname(sc->sc_dev)));
   11851 	return -1;
   11852 }
   11853 
   11854 /******************************************************************************
   11855  * This function does initial flash setup so that a new read/write/erase cycle
   11856  * can be started.
   11857  *
   11858  * sc - The pointer to the hw structure
   11859  ****************************************************************************/
   11860 static int32_t
   11861 wm_ich8_cycle_init(struct wm_softc *sc)
   11862 {
   11863 	uint16_t hsfsts;
   11864 	int32_t error = 1;
   11865 	int32_t i     = 0;
   11866 
   11867 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11868 
   11869 	/* May be check the Flash Des Valid bit in Hw status */
   11870 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   11871 		return error;
   11872 	}
   11873 
   11874 	/* Clear FCERR in Hw status by writing 1 */
   11875 	/* Clear DAEL in Hw status by writing a 1 */
   11876 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   11877 
   11878 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11879 
   11880 	/*
   11881 	 * Either we should have a hardware SPI cycle in progress bit to check
   11882 	 * against, in order to start a new cycle or FDONE bit should be
   11883 	 * changed in the hardware so that it is 1 after harware reset, which
   11884 	 * can then be used as an indication whether a cycle is in progress or
   11885 	 * has been completed .. we should also have some software semaphore
   11886 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   11887 	 * threads access to those bits can be sequentiallized or a way so that
   11888 	 * 2 threads dont start the cycle at the same time
   11889 	 */
   11890 
   11891 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11892 		/*
   11893 		 * There is no cycle running at present, so we can start a
   11894 		 * cycle
   11895 		 */
   11896 
   11897 		/* Begin by setting Flash Cycle Done. */
   11898 		hsfsts |= HSFSTS_DONE;
   11899 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11900 		error = 0;
   11901 	} else {
   11902 		/*
   11903 		 * otherwise poll for sometime so the current cycle has a
   11904 		 * chance to end before giving up.
   11905 		 */
   11906 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   11907 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11908 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11909 				error = 0;
   11910 				break;
   11911 			}
   11912 			delay(1);
   11913 		}
   11914 		if (error == 0) {
   11915 			/*
   11916 			 * Successful in waiting for previous cycle to timeout,
   11917 			 * now set the Flash Cycle Done.
   11918 			 */
   11919 			hsfsts |= HSFSTS_DONE;
   11920 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11921 		}
   11922 	}
   11923 	return error;
   11924 }
   11925 
   11926 /******************************************************************************
   11927  * This function starts a flash cycle and waits for its completion
   11928  *
   11929  * sc - The pointer to the hw structure
   11930  ****************************************************************************/
   11931 static int32_t
   11932 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   11933 {
   11934 	uint16_t hsflctl;
   11935 	uint16_t hsfsts;
   11936 	int32_t error = 1;
   11937 	uint32_t i = 0;
   11938 
   11939 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   11940 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11941 	hsflctl |= HSFCTL_GO;
   11942 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11943 
   11944 	/* Wait till FDONE bit is set to 1 */
   11945 	do {
   11946 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11947 		if (hsfsts & HSFSTS_DONE)
   11948 			break;
   11949 		delay(1);
   11950 		i++;
   11951 	} while (i < timeout);
   11952 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   11953 		error = 0;
   11954 
   11955 	return error;
   11956 }
   11957 
   11958 /******************************************************************************
   11959  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   11960  *
   11961  * sc - The pointer to the hw structure
   11962  * index - The index of the byte or word to read.
   11963  * size - Size of data to read, 1=byte 2=word, 4=dword
   11964  * data - Pointer to the word to store the value read.
   11965  *****************************************************************************/
   11966 static int32_t
   11967 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   11968     uint32_t size, uint32_t *data)
   11969 {
   11970 	uint16_t hsfsts;
   11971 	uint16_t hsflctl;
   11972 	uint32_t flash_linear_address;
   11973 	uint32_t flash_data = 0;
   11974 	int32_t error = 1;
   11975 	int32_t count = 0;
   11976 
   11977 	if (size < 1  || size > 4 || data == 0x0 ||
   11978 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   11979 		return error;
   11980 
   11981 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   11982 	    sc->sc_ich8_flash_base;
   11983 
   11984 	do {
   11985 		delay(1);
   11986 		/* Steps */
   11987 		error = wm_ich8_cycle_init(sc);
   11988 		if (error)
   11989 			break;
   11990 
   11991 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11992 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   11993 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   11994 		    & HSFCTL_BCOUNT_MASK;
   11995 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   11996 		if (sc->sc_type == WM_T_PCH_SPT) {
   11997 			/*
   11998 			 * In SPT, This register is in Lan memory space, not
   11999 			 * flash. Therefore, only 32 bit access is supported.
   12000 			 */
   12001 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
   12002 			    (uint32_t)hsflctl);
   12003 		} else
   12004 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12005 
   12006 		/*
   12007 		 * Write the last 24 bits of index into Flash Linear address
   12008 		 * field in Flash Address
   12009 		 */
   12010 		/* TODO: TBD maybe check the index against the size of flash */
   12011 
   12012 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   12013 
   12014 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   12015 
   12016 		/*
   12017 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   12018 		 * the whole sequence a few more times, else read in (shift in)
   12019 		 * the Flash Data0, the order is least significant byte first
   12020 		 * msb to lsb
   12021 		 */
   12022 		if (error == 0) {
   12023 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   12024 			if (size == 1)
   12025 				*data = (uint8_t)(flash_data & 0x000000FF);
   12026 			else if (size == 2)
   12027 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   12028 			else if (size == 4)
   12029 				*data = (uint32_t)flash_data;
   12030 			break;
   12031 		} else {
   12032 			/*
   12033 			 * If we've gotten here, then things are probably
   12034 			 * completely hosed, but if the error condition is
   12035 			 * detected, it won't hurt to give it another try...
   12036 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   12037 			 */
   12038 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12039 			if (hsfsts & HSFSTS_ERR) {
   12040 				/* Repeat for some time before giving up. */
   12041 				continue;
   12042 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   12043 				break;
   12044 		}
   12045 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   12046 
   12047 	return error;
   12048 }
   12049 
   12050 /******************************************************************************
   12051  * Reads a single byte from the NVM using the ICH8 flash access registers.
   12052  *
   12053  * sc - pointer to wm_hw structure
   12054  * index - The index of the byte to read.
   12055  * data - Pointer to a byte to store the value read.
   12056  *****************************************************************************/
   12057 static int32_t
   12058 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   12059 {
   12060 	int32_t status;
   12061 	uint32_t word = 0;
   12062 
   12063 	status = wm_read_ich8_data(sc, index, 1, &word);
   12064 	if (status == 0)
   12065 		*data = (uint8_t)word;
   12066 	else
   12067 		*data = 0;
   12068 
   12069 	return status;
   12070 }
   12071 
   12072 /******************************************************************************
   12073  * Reads a word from the NVM using the ICH8 flash access registers.
   12074  *
   12075  * sc - pointer to wm_hw structure
   12076  * index - The starting byte index of the word to read.
   12077  * data - Pointer to a word to store the value read.
   12078  *****************************************************************************/
   12079 static int32_t
   12080 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   12081 {
   12082 	int32_t status;
   12083 	uint32_t word = 0;
   12084 
   12085 	status = wm_read_ich8_data(sc, index, 2, &word);
   12086 	if (status == 0)
   12087 		*data = (uint16_t)word;
   12088 	else
   12089 		*data = 0;
   12090 
   12091 	return status;
   12092 }
   12093 
   12094 /******************************************************************************
   12095  * Reads a dword from the NVM using the ICH8 flash access registers.
   12096  *
   12097  * sc - pointer to wm_hw structure
   12098  * index - The starting byte index of the word to read.
   12099  * data - Pointer to a word to store the value read.
   12100  *****************************************************************************/
   12101 static int32_t
   12102 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   12103 {
   12104 	int32_t status;
   12105 
   12106 	status = wm_read_ich8_data(sc, index, 4, data);
   12107 	return status;
   12108 }
   12109 
   12110 /******************************************************************************
   12111  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   12112  * register.
   12113  *
   12114  * sc - Struct containing variables accessed by shared code
   12115  * offset - offset of word in the EEPROM to read
   12116  * data - word read from the EEPROM
   12117  * words - number of words to read
   12118  *****************************************************************************/
   12119 static int
   12120 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12121 {
   12122 	int32_t  rv = 0;
   12123 	uint32_t flash_bank = 0;
   12124 	uint32_t act_offset = 0;
   12125 	uint32_t bank_offset = 0;
   12126 	uint16_t word = 0;
   12127 	uint16_t i = 0;
   12128 
   12129 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12130 		device_xname(sc->sc_dev), __func__));
   12131 
   12132 	if (sc->nvm.acquire(sc) != 0)
   12133 		return -1;
   12134 
   12135 	/*
   12136 	 * We need to know which is the valid flash bank.  In the event
   12137 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12138 	 * managing flash_bank.  So it cannot be trusted and needs
   12139 	 * to be updated with each read.
   12140 	 */
   12141 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   12142 	if (rv) {
   12143 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   12144 			device_xname(sc->sc_dev)));
   12145 		flash_bank = 0;
   12146 	}
   12147 
   12148 	/*
   12149 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   12150 	 * size
   12151 	 */
   12152 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   12153 
   12154 	for (i = 0; i < words; i++) {
   12155 		/* The NVM part needs a byte offset, hence * 2 */
   12156 		act_offset = bank_offset + ((offset + i) * 2);
   12157 		rv = wm_read_ich8_word(sc, act_offset, &word);
   12158 		if (rv) {
   12159 			aprint_error_dev(sc->sc_dev,
   12160 			    "%s: failed to read NVM\n", __func__);
   12161 			break;
   12162 		}
   12163 		data[i] = word;
   12164 	}
   12165 
   12166 	sc->nvm.release(sc);
   12167 	return rv;
   12168 }
   12169 
   12170 /******************************************************************************
   12171  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   12172  * register.
   12173  *
   12174  * sc - Struct containing variables accessed by shared code
   12175  * offset - offset of word in the EEPROM to read
   12176  * data - word read from the EEPROM
   12177  * words - number of words to read
   12178  *****************************************************************************/
   12179 static int
   12180 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12181 {
   12182 	int32_t  rv = 0;
   12183 	uint32_t flash_bank = 0;
   12184 	uint32_t act_offset = 0;
   12185 	uint32_t bank_offset = 0;
   12186 	uint32_t dword = 0;
   12187 	uint16_t i = 0;
   12188 
   12189 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12190 		device_xname(sc->sc_dev), __func__));
   12191 
   12192 	if (sc->nvm.acquire(sc) != 0)
   12193 		return -1;
   12194 
   12195 	/*
   12196 	 * We need to know which is the valid flash bank.  In the event
   12197 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12198 	 * managing flash_bank.  So it cannot be trusted and needs
   12199 	 * to be updated with each read.
   12200 	 */
   12201 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   12202 	if (rv) {
   12203 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   12204 			device_xname(sc->sc_dev)));
   12205 		flash_bank = 0;
   12206 	}
   12207 
   12208 	/*
   12209 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   12210 	 * size
   12211 	 */
   12212 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   12213 
   12214 	for (i = 0; i < words; i++) {
   12215 		/* The NVM part needs a byte offset, hence * 2 */
   12216 		act_offset = bank_offset + ((offset + i) * 2);
   12217 		/* but we must read dword aligned, so mask ... */
   12218 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   12219 		if (rv) {
   12220 			aprint_error_dev(sc->sc_dev,
   12221 			    "%s: failed to read NVM\n", __func__);
   12222 			break;
   12223 		}
   12224 		/* ... and pick out low or high word */
   12225 		if ((act_offset & 0x2) == 0)
   12226 			data[i] = (uint16_t)(dword & 0xFFFF);
   12227 		else
   12228 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   12229 	}
   12230 
   12231 	sc->nvm.release(sc);
   12232 	return rv;
   12233 }
   12234 
   12235 /* iNVM */
   12236 
   12237 static int
   12238 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   12239 {
   12240 	int32_t  rv = 0;
   12241 	uint32_t invm_dword;
   12242 	uint16_t i;
   12243 	uint8_t record_type, word_address;
   12244 
   12245 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12246 		device_xname(sc->sc_dev), __func__));
   12247 
   12248 	for (i = 0; i < INVM_SIZE; i++) {
   12249 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   12250 		/* Get record type */
   12251 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   12252 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   12253 			break;
   12254 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   12255 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   12256 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   12257 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   12258 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   12259 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   12260 			if (word_address == address) {
   12261 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   12262 				rv = 0;
   12263 				break;
   12264 			}
   12265 		}
   12266 	}
   12267 
   12268 	return rv;
   12269 }
   12270 
   12271 static int
   12272 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12273 {
   12274 	int rv = 0;
   12275 	int i;
   12276 
   12277 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12278 		device_xname(sc->sc_dev), __func__));
   12279 
   12280 	if (sc->nvm.acquire(sc) != 0)
   12281 		return -1;
   12282 
   12283 	for (i = 0; i < words; i++) {
   12284 		switch (offset + i) {
   12285 		case NVM_OFF_MACADDR:
   12286 		case NVM_OFF_MACADDR1:
   12287 		case NVM_OFF_MACADDR2:
   12288 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   12289 			if (rv != 0) {
   12290 				data[i] = 0xffff;
   12291 				rv = -1;
   12292 			}
   12293 			break;
   12294 		case NVM_OFF_CFG2:
   12295 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12296 			if (rv != 0) {
   12297 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   12298 				rv = 0;
   12299 			}
   12300 			break;
   12301 		case NVM_OFF_CFG4:
   12302 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12303 			if (rv != 0) {
   12304 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   12305 				rv = 0;
   12306 			}
   12307 			break;
   12308 		case NVM_OFF_LED_1_CFG:
   12309 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12310 			if (rv != 0) {
   12311 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   12312 				rv = 0;
   12313 			}
   12314 			break;
   12315 		case NVM_OFF_LED_0_2_CFG:
   12316 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12317 			if (rv != 0) {
   12318 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   12319 				rv = 0;
   12320 			}
   12321 			break;
   12322 		case NVM_OFF_ID_LED_SETTINGS:
   12323 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12324 			if (rv != 0) {
   12325 				*data = ID_LED_RESERVED_FFFF;
   12326 				rv = 0;
   12327 			}
   12328 			break;
   12329 		default:
   12330 			DPRINTF(WM_DEBUG_NVM,
   12331 			    ("NVM word 0x%02x is not mapped.\n", offset));
   12332 			*data = NVM_RESERVED_WORD;
   12333 			break;
   12334 		}
   12335 	}
   12336 
   12337 	sc->nvm.release(sc);
   12338 	return rv;
   12339 }
   12340 
   12341 /* Lock, detecting NVM type, validate checksum, version and read */
   12342 
   12343 static int
   12344 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   12345 {
   12346 	uint32_t eecd = 0;
   12347 
   12348 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   12349 	    || sc->sc_type == WM_T_82583) {
   12350 		eecd = CSR_READ(sc, WMREG_EECD);
   12351 
   12352 		/* Isolate bits 15 & 16 */
   12353 		eecd = ((eecd >> 15) & 0x03);
   12354 
   12355 		/* If both bits are set, device is Flash type */
   12356 		if (eecd == 0x03)
   12357 			return 0;
   12358 	}
   12359 	return 1;
   12360 }
   12361 
   12362 static int
   12363 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   12364 {
   12365 	uint32_t eec;
   12366 
   12367 	eec = CSR_READ(sc, WMREG_EEC);
   12368 	if ((eec & EEC_FLASH_DETECTED) != 0)
   12369 		return 1;
   12370 
   12371 	return 0;
   12372 }
   12373 
   12374 /*
   12375  * wm_nvm_validate_checksum
   12376  *
   12377  * The checksum is defined as the sum of the first 64 (16 bit) words.
   12378  */
   12379 static int
   12380 wm_nvm_validate_checksum(struct wm_softc *sc)
   12381 {
   12382 	uint16_t checksum;
   12383 	uint16_t eeprom_data;
   12384 #ifdef WM_DEBUG
   12385 	uint16_t csum_wordaddr, valid_checksum;
   12386 #endif
   12387 	int i;
   12388 
   12389 	checksum = 0;
   12390 
   12391 	/* Don't check for I211 */
   12392 	if (sc->sc_type == WM_T_I211)
   12393 		return 0;
   12394 
   12395 #ifdef WM_DEBUG
   12396 	if (sc->sc_type == WM_T_PCH_LPT) {
   12397 		csum_wordaddr = NVM_OFF_COMPAT;
   12398 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   12399 	} else {
   12400 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   12401 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   12402 	}
   12403 
   12404 	/* Dump EEPROM image for debug */
   12405 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12406 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12407 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   12408 		/* XXX PCH_SPT? */
   12409 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   12410 		if ((eeprom_data & valid_checksum) == 0) {
   12411 			DPRINTF(WM_DEBUG_NVM,
   12412 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   12413 				device_xname(sc->sc_dev), eeprom_data,
   12414 				    valid_checksum));
   12415 		}
   12416 	}
   12417 
   12418 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   12419 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   12420 		for (i = 0; i < NVM_SIZE; i++) {
   12421 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12422 				printf("XXXX ");
   12423 			else
   12424 				printf("%04hx ", eeprom_data);
   12425 			if (i % 8 == 7)
   12426 				printf("\n");
   12427 		}
   12428 	}
   12429 
   12430 #endif /* WM_DEBUG */
   12431 
   12432 	for (i = 0; i < NVM_SIZE; i++) {
   12433 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12434 			return 1;
   12435 		checksum += eeprom_data;
   12436 	}
   12437 
   12438 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   12439 #ifdef WM_DEBUG
   12440 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   12441 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   12442 #endif
   12443 	}
   12444 
   12445 	return 0;
   12446 }
   12447 
   12448 static void
   12449 wm_nvm_version_invm(struct wm_softc *sc)
   12450 {
   12451 	uint32_t dword;
   12452 
   12453 	/*
   12454 	 * Linux's code to decode version is very strange, so we don't
   12455 	 * obey that algorithm and just use word 61 as the document.
   12456 	 * Perhaps it's not perfect though...
   12457 	 *
   12458 	 * Example:
   12459 	 *
   12460 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   12461 	 */
   12462 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   12463 	dword = __SHIFTOUT(dword, INVM_VER_1);
   12464 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   12465 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   12466 }
   12467 
   12468 static void
   12469 wm_nvm_version(struct wm_softc *sc)
   12470 {
   12471 	uint16_t major, minor, build, patch;
   12472 	uint16_t uid0, uid1;
   12473 	uint16_t nvm_data;
   12474 	uint16_t off;
   12475 	bool check_version = false;
   12476 	bool check_optionrom = false;
   12477 	bool have_build = false;
   12478 	bool have_uid = true;
   12479 
   12480 	/*
   12481 	 * Version format:
   12482 	 *
   12483 	 * XYYZ
   12484 	 * X0YZ
   12485 	 * X0YY
   12486 	 *
   12487 	 * Example:
   12488 	 *
   12489 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   12490 	 *	82571	0x50a6	5.10.6?
   12491 	 *	82572	0x506a	5.6.10?
   12492 	 *	82572EI	0x5069	5.6.9?
   12493 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   12494 	 *		0x2013	2.1.3?
   12495 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   12496 	 */
   12497 
   12498 	/*
   12499 	 * XXX
   12500 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   12501 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   12502 	 */
   12503 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   12504 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   12505 		have_uid = false;
   12506 
   12507 	switch (sc->sc_type) {
   12508 	case WM_T_82571:
   12509 	case WM_T_82572:
   12510 	case WM_T_82574:
   12511 	case WM_T_82583:
   12512 		check_version = true;
   12513 		check_optionrom = true;
   12514 		have_build = true;
   12515 		break;
   12516 	case WM_T_82575:
   12517 	case WM_T_82576:
   12518 	case WM_T_82580:
   12519 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   12520 			check_version = true;
   12521 		break;
   12522 	case WM_T_I211:
   12523 		wm_nvm_version_invm(sc);
   12524 		have_uid = false;
   12525 		goto printver;
   12526 	case WM_T_I210:
   12527 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   12528 			wm_nvm_version_invm(sc);
   12529 			have_uid = false;
   12530 			goto printver;
   12531 		}
   12532 		/* FALLTHROUGH */
   12533 	case WM_T_I350:
   12534 	case WM_T_I354:
   12535 		check_version = true;
   12536 		check_optionrom = true;
   12537 		break;
   12538 	default:
   12539 		return;
   12540 	}
   12541 	if (check_version
   12542 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   12543 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   12544 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   12545 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   12546 			build = nvm_data & NVM_BUILD_MASK;
   12547 			have_build = true;
   12548 		} else
   12549 			minor = nvm_data & 0x00ff;
   12550 
   12551 		/* Decimal */
   12552 		minor = (minor / 16) * 10 + (minor % 16);
   12553 		sc->sc_nvm_ver_major = major;
   12554 		sc->sc_nvm_ver_minor = minor;
   12555 
   12556 printver:
   12557 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   12558 		    sc->sc_nvm_ver_minor);
   12559 		if (have_build) {
   12560 			sc->sc_nvm_ver_build = build;
   12561 			aprint_verbose(".%d", build);
   12562 		}
   12563 	}
   12564 
   12565 	/* Assume the Option ROM area is at avove NVM_SIZE */
   12566 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   12567 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   12568 		/* Option ROM Version */
   12569 		if ((off != 0x0000) && (off != 0xffff)) {
   12570 			int rv;
   12571 
   12572 			off += NVM_COMBO_VER_OFF;
   12573 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   12574 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   12575 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   12576 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   12577 				/* 16bits */
   12578 				major = uid0 >> 8;
   12579 				build = (uid0 << 8) | (uid1 >> 8);
   12580 				patch = uid1 & 0x00ff;
   12581 				aprint_verbose(", option ROM Version %d.%d.%d",
   12582 				    major, build, patch);
   12583 			}
   12584 		}
   12585 	}
   12586 
   12587 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   12588 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   12589 }
   12590 
   12591 /*
   12592  * wm_nvm_read:
   12593  *
   12594  *	Read data from the serial EEPROM.
   12595  */
   12596 static int
   12597 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12598 {
   12599 	int rv;
   12600 
   12601 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12602 		device_xname(sc->sc_dev), __func__));
   12603 
   12604 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   12605 		return -1;
   12606 
   12607 	rv = sc->nvm.read(sc, word, wordcnt, data);
   12608 
   12609 	return rv;
   12610 }
   12611 
   12612 /*
   12613  * Hardware semaphores.
   12614  * Very complexed...
   12615  */
   12616 
   12617 static int
   12618 wm_get_null(struct wm_softc *sc)
   12619 {
   12620 
   12621 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12622 		device_xname(sc->sc_dev), __func__));
   12623 	return 0;
   12624 }
   12625 
   12626 static void
   12627 wm_put_null(struct wm_softc *sc)
   12628 {
   12629 
   12630 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12631 		device_xname(sc->sc_dev), __func__));
   12632 	return;
   12633 }
   12634 
   12635 static int
   12636 wm_get_eecd(struct wm_softc *sc)
   12637 {
   12638 	uint32_t reg;
   12639 	int x;
   12640 
   12641 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   12642 		device_xname(sc->sc_dev), __func__));
   12643 
   12644 	reg = CSR_READ(sc, WMREG_EECD);
   12645 
   12646 	/* Request EEPROM access. */
   12647 	reg |= EECD_EE_REQ;
   12648 	CSR_WRITE(sc, WMREG_EECD, reg);
   12649 
   12650 	/* ..and wait for it to be granted. */
   12651 	for (x = 0; x < 1000; x++) {
   12652 		reg = CSR_READ(sc, WMREG_EECD);
   12653 		if (reg & EECD_EE_GNT)
   12654 			break;
   12655 		delay(5);
   12656 	}
   12657 	if ((reg & EECD_EE_GNT) == 0) {
   12658 		aprint_error_dev(sc->sc_dev,
   12659 		    "could not acquire EEPROM GNT\n");
   12660 		reg &= ~EECD_EE_REQ;
   12661 		CSR_WRITE(sc, WMREG_EECD, reg);
   12662 		return -1;
   12663 	}
   12664 
   12665 	return 0;
   12666 }
   12667 
   12668 static void
   12669 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   12670 {
   12671 
   12672 	*eecd |= EECD_SK;
   12673 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   12674 	CSR_WRITE_FLUSH(sc);
   12675 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   12676 		delay(1);
   12677 	else
   12678 		delay(50);
   12679 }
   12680 
   12681 static void
   12682 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   12683 {
   12684 
   12685 	*eecd &= ~EECD_SK;
   12686 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   12687 	CSR_WRITE_FLUSH(sc);
   12688 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   12689 		delay(1);
   12690 	else
   12691 		delay(50);
   12692 }
   12693 
   12694 static void
   12695 wm_put_eecd(struct wm_softc *sc)
   12696 {
   12697 	uint32_t reg;
   12698 
   12699 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12700 		device_xname(sc->sc_dev), __func__));
   12701 
   12702 	/* Stop nvm */
   12703 	reg = CSR_READ(sc, WMREG_EECD);
   12704 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   12705 		/* Pull CS high */
   12706 		reg |= EECD_CS;
   12707 		wm_nvm_eec_clock_lower(sc, &reg);
   12708 	} else {
   12709 		/* CS on Microwire is active-high */
   12710 		reg &= ~(EECD_CS | EECD_DI);
   12711 		CSR_WRITE(sc, WMREG_EECD, reg);
   12712 		wm_nvm_eec_clock_raise(sc, &reg);
   12713 		wm_nvm_eec_clock_lower(sc, &reg);
   12714 	}
   12715 
   12716 	reg = CSR_READ(sc, WMREG_EECD);
   12717 	reg &= ~EECD_EE_REQ;
   12718 	CSR_WRITE(sc, WMREG_EECD, reg);
   12719 
   12720 	return;
   12721 }
   12722 
   12723 /*
   12724  * Get hardware semaphore.
   12725  * Same as e1000_get_hw_semaphore_generic()
   12726  */
   12727 static int
   12728 wm_get_swsm_semaphore(struct wm_softc *sc)
   12729 {
   12730 	int32_t timeout;
   12731 	uint32_t swsm;
   12732 
   12733 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12734 		device_xname(sc->sc_dev), __func__));
   12735 	KASSERT(sc->sc_nvm_wordsize > 0);
   12736 
   12737 retry:
   12738 	/* Get the SW semaphore. */
   12739 	timeout = sc->sc_nvm_wordsize + 1;
   12740 	while (timeout) {
   12741 		swsm = CSR_READ(sc, WMREG_SWSM);
   12742 
   12743 		if ((swsm & SWSM_SMBI) == 0)
   12744 			break;
   12745 
   12746 		delay(50);
   12747 		timeout--;
   12748 	}
   12749 
   12750 	if (timeout == 0) {
   12751 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   12752 			/*
   12753 			 * In rare circumstances, the SW semaphore may already
   12754 			 * be held unintentionally. Clear the semaphore once
   12755 			 * before giving up.
   12756 			 */
   12757 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   12758 			wm_put_swsm_semaphore(sc);
   12759 			goto retry;
   12760 		}
   12761 		aprint_error_dev(sc->sc_dev,
   12762 		    "could not acquire SWSM SMBI\n");
   12763 		return 1;
   12764 	}
   12765 
   12766 	/* Get the FW semaphore. */
   12767 	timeout = sc->sc_nvm_wordsize + 1;
   12768 	while (timeout) {
   12769 		swsm = CSR_READ(sc, WMREG_SWSM);
   12770 		swsm |= SWSM_SWESMBI;
   12771 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   12772 		/* If we managed to set the bit we got the semaphore. */
   12773 		swsm = CSR_READ(sc, WMREG_SWSM);
   12774 		if (swsm & SWSM_SWESMBI)
   12775 			break;
   12776 
   12777 		delay(50);
   12778 		timeout--;
   12779 	}
   12780 
   12781 	if (timeout == 0) {
   12782 		aprint_error_dev(sc->sc_dev,
   12783 		    "could not acquire SWSM SWESMBI\n");
   12784 		/* Release semaphores */
   12785 		wm_put_swsm_semaphore(sc);
   12786 		return 1;
   12787 	}
   12788 	return 0;
   12789 }
   12790 
   12791 /*
   12792  * Put hardware semaphore.
   12793  * Same as e1000_put_hw_semaphore_generic()
   12794  */
   12795 static void
   12796 wm_put_swsm_semaphore(struct wm_softc *sc)
   12797 {
   12798 	uint32_t swsm;
   12799 
   12800 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12801 		device_xname(sc->sc_dev), __func__));
   12802 
   12803 	swsm = CSR_READ(sc, WMREG_SWSM);
   12804 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   12805 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   12806 }
   12807 
   12808 /*
   12809  * Get SW/FW semaphore.
   12810  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   12811  */
   12812 static int
   12813 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12814 {
   12815 	uint32_t swfw_sync;
   12816 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   12817 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   12818 	int timeout;
   12819 
   12820 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12821 		device_xname(sc->sc_dev), __func__));
   12822 
   12823 	if (sc->sc_type == WM_T_80003)
   12824 		timeout = 50;
   12825 	else
   12826 		timeout = 200;
   12827 
   12828 	for (timeout = 0; timeout < 200; timeout++) {
   12829 		if (wm_get_swsm_semaphore(sc)) {
   12830 			aprint_error_dev(sc->sc_dev,
   12831 			    "%s: failed to get semaphore\n",
   12832 			    __func__);
   12833 			return 1;
   12834 		}
   12835 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12836 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   12837 			swfw_sync |= swmask;
   12838 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12839 			wm_put_swsm_semaphore(sc);
   12840 			return 0;
   12841 		}
   12842 		wm_put_swsm_semaphore(sc);
   12843 		delay(5000);
   12844 	}
   12845 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   12846 	    device_xname(sc->sc_dev), mask, swfw_sync);
   12847 	return 1;
   12848 }
   12849 
   12850 static void
   12851 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12852 {
   12853 	uint32_t swfw_sync;
   12854 
   12855 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12856 		device_xname(sc->sc_dev), __func__));
   12857 
   12858 	while (wm_get_swsm_semaphore(sc) != 0)
   12859 		continue;
   12860 
   12861 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12862 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   12863 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12864 
   12865 	wm_put_swsm_semaphore(sc);
   12866 }
   12867 
   12868 static int
   12869 wm_get_nvm_80003(struct wm_softc *sc)
   12870 {
   12871 	int rv;
   12872 
   12873 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   12874 		device_xname(sc->sc_dev), __func__));
   12875 
   12876 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   12877 		aprint_error_dev(sc->sc_dev,
   12878 		    "%s: failed to get semaphore(SWFW)\n",
   12879 		    __func__);
   12880 		return rv;
   12881 	}
   12882 
   12883 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   12884 	    && (rv = wm_get_eecd(sc)) != 0) {
   12885 		aprint_error_dev(sc->sc_dev,
   12886 		    "%s: failed to get semaphore(EECD)\n",
   12887 		    __func__);
   12888 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   12889 		return rv;
   12890 	}
   12891 
   12892 	return 0;
   12893 }
   12894 
   12895 static void
   12896 wm_put_nvm_80003(struct wm_softc *sc)
   12897 {
   12898 
   12899 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12900 		device_xname(sc->sc_dev), __func__));
   12901 
   12902 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   12903 		wm_put_eecd(sc);
   12904 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   12905 }
   12906 
   12907 static int
   12908 wm_get_nvm_82571(struct wm_softc *sc)
   12909 {
   12910 	int rv;
   12911 
   12912 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12913 		device_xname(sc->sc_dev), __func__));
   12914 
   12915 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   12916 		return rv;
   12917 
   12918 	switch (sc->sc_type) {
   12919 	case WM_T_82573:
   12920 		break;
   12921 	default:
   12922 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   12923 			rv = wm_get_eecd(sc);
   12924 		break;
   12925 	}
   12926 
   12927 	if (rv != 0) {
   12928 		aprint_error_dev(sc->sc_dev,
   12929 		    "%s: failed to get semaphore\n",
   12930 		    __func__);
   12931 		wm_put_swsm_semaphore(sc);
   12932 	}
   12933 
   12934 	return rv;
   12935 }
   12936 
   12937 static void
   12938 wm_put_nvm_82571(struct wm_softc *sc)
   12939 {
   12940 
   12941 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12942 		device_xname(sc->sc_dev), __func__));
   12943 
   12944 	switch (sc->sc_type) {
   12945 	case WM_T_82573:
   12946 		break;
   12947 	default:
   12948 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   12949 			wm_put_eecd(sc);
   12950 		break;
   12951 	}
   12952 
   12953 	wm_put_swsm_semaphore(sc);
   12954 }
   12955 
   12956 static int
   12957 wm_get_phy_82575(struct wm_softc *sc)
   12958 {
   12959 
   12960 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12961 		device_xname(sc->sc_dev), __func__));
   12962 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   12963 }
   12964 
   12965 static void
   12966 wm_put_phy_82575(struct wm_softc *sc)
   12967 {
   12968 
   12969 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12970 		device_xname(sc->sc_dev), __func__));
   12971 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   12972 }
   12973 
   12974 static int
   12975 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   12976 {
   12977 	uint32_t ext_ctrl;
   12978 	int timeout = 200;
   12979 
   12980 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12981 		device_xname(sc->sc_dev), __func__));
   12982 
   12983 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12984 	for (timeout = 0; timeout < 200; timeout++) {
   12985 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12986 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12987 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12988 
   12989 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12990 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   12991 			return 0;
   12992 		delay(5000);
   12993 	}
   12994 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   12995 	    device_xname(sc->sc_dev), ext_ctrl);
   12996 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12997 	return 1;
   12998 }
   12999 
   13000 static void
   13001 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   13002 {
   13003 	uint32_t ext_ctrl;
   13004 
   13005 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13006 		device_xname(sc->sc_dev), __func__));
   13007 
   13008 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13009 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13010 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13011 
   13012 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13013 }
   13014 
   13015 static int
   13016 wm_get_swflag_ich8lan(struct wm_softc *sc)
   13017 {
   13018 	uint32_t ext_ctrl;
   13019 	int timeout;
   13020 
   13021 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13022 		device_xname(sc->sc_dev), __func__));
   13023 	mutex_enter(sc->sc_ich_phymtx);
   13024 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   13025 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13026 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   13027 			break;
   13028 		delay(1000);
   13029 	}
   13030 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   13031 		printf("%s: SW has already locked the resource\n",
   13032 		    device_xname(sc->sc_dev));
   13033 		goto out;
   13034 	}
   13035 
   13036 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13037 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13038 	for (timeout = 0; timeout < 1000; timeout++) {
   13039 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13040 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   13041 			break;
   13042 		delay(1000);
   13043 	}
   13044 	if (timeout >= 1000) {
   13045 		printf("%s: failed to acquire semaphore\n",
   13046 		    device_xname(sc->sc_dev));
   13047 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13048 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13049 		goto out;
   13050 	}
   13051 	return 0;
   13052 
   13053 out:
   13054 	mutex_exit(sc->sc_ich_phymtx);
   13055 	return 1;
   13056 }
   13057 
   13058 static void
   13059 wm_put_swflag_ich8lan(struct wm_softc *sc)
   13060 {
   13061 	uint32_t ext_ctrl;
   13062 
   13063 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13064 		device_xname(sc->sc_dev), __func__));
   13065 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13066 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   13067 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13068 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13069 	} else {
   13070 		printf("%s: Semaphore unexpectedly released\n",
   13071 		    device_xname(sc->sc_dev));
   13072 	}
   13073 
   13074 	mutex_exit(sc->sc_ich_phymtx);
   13075 }
   13076 
   13077 static int
   13078 wm_get_nvm_ich8lan(struct wm_softc *sc)
   13079 {
   13080 
   13081 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13082 		device_xname(sc->sc_dev), __func__));
   13083 	mutex_enter(sc->sc_ich_nvmmtx);
   13084 
   13085 	return 0;
   13086 }
   13087 
   13088 static void
   13089 wm_put_nvm_ich8lan(struct wm_softc *sc)
   13090 {
   13091 
   13092 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13093 		device_xname(sc->sc_dev), __func__));
   13094 	mutex_exit(sc->sc_ich_nvmmtx);
   13095 }
   13096 
   13097 static int
   13098 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   13099 {
   13100 	int i = 0;
   13101 	uint32_t reg;
   13102 
   13103 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13104 		device_xname(sc->sc_dev), __func__));
   13105 
   13106 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13107 	do {
   13108 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   13109 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   13110 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13111 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   13112 			break;
   13113 		delay(2*1000);
   13114 		i++;
   13115 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   13116 
   13117 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   13118 		wm_put_hw_semaphore_82573(sc);
   13119 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   13120 		    device_xname(sc->sc_dev));
   13121 		return -1;
   13122 	}
   13123 
   13124 	return 0;
   13125 }
   13126 
   13127 static void
   13128 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   13129 {
   13130 	uint32_t reg;
   13131 
   13132 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13133 		device_xname(sc->sc_dev), __func__));
   13134 
   13135 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13136 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13137 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   13138 }
   13139 
   13140 /*
   13141  * Management mode and power management related subroutines.
   13142  * BMC, AMT, suspend/resume and EEE.
   13143  */
   13144 
   13145 #ifdef WM_WOL
   13146 static int
   13147 wm_check_mng_mode(struct wm_softc *sc)
   13148 {
   13149 	int rv;
   13150 
   13151 	switch (sc->sc_type) {
   13152 	case WM_T_ICH8:
   13153 	case WM_T_ICH9:
   13154 	case WM_T_ICH10:
   13155 	case WM_T_PCH:
   13156 	case WM_T_PCH2:
   13157 	case WM_T_PCH_LPT:
   13158 	case WM_T_PCH_SPT:
   13159 		rv = wm_check_mng_mode_ich8lan(sc);
   13160 		break;
   13161 	case WM_T_82574:
   13162 	case WM_T_82583:
   13163 		rv = wm_check_mng_mode_82574(sc);
   13164 		break;
   13165 	case WM_T_82571:
   13166 	case WM_T_82572:
   13167 	case WM_T_82573:
   13168 	case WM_T_80003:
   13169 		rv = wm_check_mng_mode_generic(sc);
   13170 		break;
   13171 	default:
   13172 		/* noting to do */
   13173 		rv = 0;
   13174 		break;
   13175 	}
   13176 
   13177 	return rv;
   13178 }
   13179 
   13180 static int
   13181 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   13182 {
   13183 	uint32_t fwsm;
   13184 
   13185 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13186 
   13187 	if (((fwsm & FWSM_FW_VALID) != 0)
   13188 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   13189 		return 1;
   13190 
   13191 	return 0;
   13192 }
   13193 
   13194 static int
   13195 wm_check_mng_mode_82574(struct wm_softc *sc)
   13196 {
   13197 	uint16_t data;
   13198 
   13199 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   13200 
   13201 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   13202 		return 1;
   13203 
   13204 	return 0;
   13205 }
   13206 
   13207 static int
   13208 wm_check_mng_mode_generic(struct wm_softc *sc)
   13209 {
   13210 	uint32_t fwsm;
   13211 
   13212 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13213 
   13214 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   13215 		return 1;
   13216 
   13217 	return 0;
   13218 }
   13219 #endif /* WM_WOL */
   13220 
   13221 static int
   13222 wm_enable_mng_pass_thru(struct wm_softc *sc)
   13223 {
   13224 	uint32_t manc, fwsm, factps;
   13225 
   13226 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   13227 		return 0;
   13228 
   13229 	manc = CSR_READ(sc, WMREG_MANC);
   13230 
   13231 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   13232 		device_xname(sc->sc_dev), manc));
   13233 	if ((manc & MANC_RECV_TCO_EN) == 0)
   13234 		return 0;
   13235 
   13236 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   13237 		fwsm = CSR_READ(sc, WMREG_FWSM);
   13238 		factps = CSR_READ(sc, WMREG_FACTPS);
   13239 		if (((factps & FACTPS_MNGCG) == 0)
   13240 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   13241 			return 1;
   13242 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   13243 		uint16_t data;
   13244 
   13245 		factps = CSR_READ(sc, WMREG_FACTPS);
   13246 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   13247 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   13248 			device_xname(sc->sc_dev), factps, data));
   13249 		if (((factps & FACTPS_MNGCG) == 0)
   13250 		    && ((data & NVM_CFG2_MNGM_MASK)
   13251 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   13252 			return 1;
   13253 	} else if (((manc & MANC_SMBUS_EN) != 0)
   13254 	    && ((manc & MANC_ASF_EN) == 0))
   13255 		return 1;
   13256 
   13257 	return 0;
   13258 }
   13259 
   13260 static bool
   13261 wm_phy_resetisblocked(struct wm_softc *sc)
   13262 {
   13263 	bool blocked = false;
   13264 	uint32_t reg;
   13265 	int i = 0;
   13266 
   13267 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13268 		device_xname(sc->sc_dev), __func__));
   13269 
   13270 	switch (sc->sc_type) {
   13271 	case WM_T_ICH8:
   13272 	case WM_T_ICH9:
   13273 	case WM_T_ICH10:
   13274 	case WM_T_PCH:
   13275 	case WM_T_PCH2:
   13276 	case WM_T_PCH_LPT:
   13277 	case WM_T_PCH_SPT:
   13278 		do {
   13279 			reg = CSR_READ(sc, WMREG_FWSM);
   13280 			if ((reg & FWSM_RSPCIPHY) == 0) {
   13281 				blocked = true;
   13282 				delay(10*1000);
   13283 				continue;
   13284 			}
   13285 			blocked = false;
   13286 		} while (blocked && (i++ < 30));
   13287 		return blocked;
   13288 		break;
   13289 	case WM_T_82571:
   13290 	case WM_T_82572:
   13291 	case WM_T_82573:
   13292 	case WM_T_82574:
   13293 	case WM_T_82583:
   13294 	case WM_T_80003:
   13295 		reg = CSR_READ(sc, WMREG_MANC);
   13296 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   13297 			return true;
   13298 		else
   13299 			return false;
   13300 		break;
   13301 	default:
   13302 		/* no problem */
   13303 		break;
   13304 	}
   13305 
   13306 	return false;
   13307 }
   13308 
   13309 static void
   13310 wm_get_hw_control(struct wm_softc *sc)
   13311 {
   13312 	uint32_t reg;
   13313 
   13314 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13315 		device_xname(sc->sc_dev), __func__));
   13316 
   13317 	if (sc->sc_type == WM_T_82573) {
   13318 		reg = CSR_READ(sc, WMREG_SWSM);
   13319 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   13320 	} else if (sc->sc_type >= WM_T_82571) {
   13321 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13322 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   13323 	}
   13324 }
   13325 
   13326 static void
   13327 wm_release_hw_control(struct wm_softc *sc)
   13328 {
   13329 	uint32_t reg;
   13330 
   13331 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13332 		device_xname(sc->sc_dev), __func__));
   13333 
   13334 	if (sc->sc_type == WM_T_82573) {
   13335 		reg = CSR_READ(sc, WMREG_SWSM);
   13336 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   13337 	} else if (sc->sc_type >= WM_T_82571) {
   13338 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13339 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   13340 	}
   13341 }
   13342 
   13343 static void
   13344 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   13345 {
   13346 	uint32_t reg;
   13347 
   13348 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13349 		device_xname(sc->sc_dev), __func__));
   13350 
   13351 	if (sc->sc_type < WM_T_PCH2)
   13352 		return;
   13353 
   13354 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13355 
   13356 	if (gate)
   13357 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   13358 	else
   13359 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   13360 
   13361 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   13362 }
   13363 
   13364 static void
   13365 wm_smbustopci(struct wm_softc *sc)
   13366 {
   13367 	uint32_t fwsm, reg;
   13368 	int rv = 0;
   13369 
   13370 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13371 		device_xname(sc->sc_dev), __func__));
   13372 
   13373 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   13374 	wm_gate_hw_phy_config_ich8lan(sc, true);
   13375 
   13376 	/* Disable ULP */
   13377 	wm_ulp_disable(sc);
   13378 
   13379 	/* Acquire PHY semaphore */
   13380 	sc->phy.acquire(sc);
   13381 
   13382 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13383 	switch (sc->sc_type) {
   13384 	case WM_T_PCH_LPT:
   13385 	case WM_T_PCH_SPT:
   13386 		if (wm_phy_is_accessible_pchlan(sc))
   13387 			break;
   13388 
   13389 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13390 		reg |= CTRL_EXT_FORCE_SMBUS;
   13391 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13392 #if 0
   13393 		/* XXX Isn't this required??? */
   13394 		CSR_WRITE_FLUSH(sc);
   13395 #endif
   13396 		delay(50 * 1000);
   13397 		/* FALLTHROUGH */
   13398 	case WM_T_PCH2:
   13399 		if (wm_phy_is_accessible_pchlan(sc) == true)
   13400 			break;
   13401 		/* FALLTHROUGH */
   13402 	case WM_T_PCH:
   13403 		if (sc->sc_type == WM_T_PCH)
   13404 			if ((fwsm & FWSM_FW_VALID) != 0)
   13405 				break;
   13406 
   13407 		if (wm_phy_resetisblocked(sc) == true) {
   13408 			printf("XXX reset is blocked(3)\n");
   13409 			break;
   13410 		}
   13411 
   13412 		wm_toggle_lanphypc_pch_lpt(sc);
   13413 
   13414 		if (sc->sc_type >= WM_T_PCH_LPT) {
   13415 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13416 				break;
   13417 
   13418 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13419 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   13420 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13421 
   13422 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13423 				break;
   13424 			rv = -1;
   13425 		}
   13426 		break;
   13427 	default:
   13428 		break;
   13429 	}
   13430 
   13431 	/* Release semaphore */
   13432 	sc->phy.release(sc);
   13433 
   13434 	if (rv == 0) {
   13435 		if (wm_phy_resetisblocked(sc)) {
   13436 			printf("XXX reset is blocked(4)\n");
   13437 			goto out;
   13438 		}
   13439 		wm_reset_phy(sc);
   13440 		if (wm_phy_resetisblocked(sc))
   13441 			printf("XXX reset is blocked(4)\n");
   13442 	}
   13443 
   13444 out:
   13445 	/*
   13446 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   13447 	 */
   13448 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   13449 		delay(10*1000);
   13450 		wm_gate_hw_phy_config_ich8lan(sc, false);
   13451 	}
   13452 }
   13453 
   13454 static void
   13455 wm_init_manageability(struct wm_softc *sc)
   13456 {
   13457 
   13458 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13459 		device_xname(sc->sc_dev), __func__));
   13460 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13461 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   13462 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13463 
   13464 		/* Disable hardware interception of ARP */
   13465 		manc &= ~MANC_ARP_EN;
   13466 
   13467 		/* Enable receiving management packets to the host */
   13468 		if (sc->sc_type >= WM_T_82571) {
   13469 			manc |= MANC_EN_MNG2HOST;
   13470 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   13471 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   13472 		}
   13473 
   13474 		CSR_WRITE(sc, WMREG_MANC, manc);
   13475 	}
   13476 }
   13477 
   13478 static void
   13479 wm_release_manageability(struct wm_softc *sc)
   13480 {
   13481 
   13482 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13483 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13484 
   13485 		manc |= MANC_ARP_EN;
   13486 		if (sc->sc_type >= WM_T_82571)
   13487 			manc &= ~MANC_EN_MNG2HOST;
   13488 
   13489 		CSR_WRITE(sc, WMREG_MANC, manc);
   13490 	}
   13491 }
   13492 
   13493 static void
   13494 wm_get_wakeup(struct wm_softc *sc)
   13495 {
   13496 
   13497 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   13498 	switch (sc->sc_type) {
   13499 	case WM_T_82573:
   13500 	case WM_T_82583:
   13501 		sc->sc_flags |= WM_F_HAS_AMT;
   13502 		/* FALLTHROUGH */
   13503 	case WM_T_80003:
   13504 	case WM_T_82575:
   13505 	case WM_T_82576:
   13506 	case WM_T_82580:
   13507 	case WM_T_I350:
   13508 	case WM_T_I354:
   13509 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   13510 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   13511 		/* FALLTHROUGH */
   13512 	case WM_T_82541:
   13513 	case WM_T_82541_2:
   13514 	case WM_T_82547:
   13515 	case WM_T_82547_2:
   13516 	case WM_T_82571:
   13517 	case WM_T_82572:
   13518 	case WM_T_82574:
   13519 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13520 		break;
   13521 	case WM_T_ICH8:
   13522 	case WM_T_ICH9:
   13523 	case WM_T_ICH10:
   13524 	case WM_T_PCH:
   13525 	case WM_T_PCH2:
   13526 	case WM_T_PCH_LPT:
   13527 	case WM_T_PCH_SPT:
   13528 		sc->sc_flags |= WM_F_HAS_AMT;
   13529 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13530 		break;
   13531 	default:
   13532 		break;
   13533 	}
   13534 
   13535 	/* 1: HAS_MANAGE */
   13536 	if (wm_enable_mng_pass_thru(sc) != 0)
   13537 		sc->sc_flags |= WM_F_HAS_MANAGE;
   13538 
   13539 	/*
   13540 	 * Note that the WOL flags is set after the resetting of the eeprom
   13541 	 * stuff
   13542 	 */
   13543 }
   13544 
   13545 /*
   13546  * Unconfigure Ultra Low Power mode.
   13547  * Only for I217 and newer (see below).
   13548  */
   13549 static void
   13550 wm_ulp_disable(struct wm_softc *sc)
   13551 {
   13552 	uint32_t reg;
   13553 	int i = 0;
   13554 
   13555 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13556 		device_xname(sc->sc_dev), __func__));
   13557 	/* Exclude old devices */
   13558 	if ((sc->sc_type < WM_T_PCH_LPT)
   13559 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   13560 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   13561 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   13562 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   13563 		return;
   13564 
   13565 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   13566 		/* Request ME un-configure ULP mode in the PHY */
   13567 		reg = CSR_READ(sc, WMREG_H2ME);
   13568 		reg &= ~H2ME_ULP;
   13569 		reg |= H2ME_ENFORCE_SETTINGS;
   13570 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13571 
   13572 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   13573 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   13574 			if (i++ == 30) {
   13575 				printf("%s timed out\n", __func__);
   13576 				return;
   13577 			}
   13578 			delay(10 * 1000);
   13579 		}
   13580 		reg = CSR_READ(sc, WMREG_H2ME);
   13581 		reg &= ~H2ME_ENFORCE_SETTINGS;
   13582 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13583 
   13584 		return;
   13585 	}
   13586 
   13587 	/* Acquire semaphore */
   13588 	sc->phy.acquire(sc);
   13589 
   13590 	/* Toggle LANPHYPC */
   13591 	wm_toggle_lanphypc_pch_lpt(sc);
   13592 
   13593 	/* Unforce SMBus mode in PHY */
   13594 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13595 	if (reg == 0x0000 || reg == 0xffff) {
   13596 		uint32_t reg2;
   13597 
   13598 		printf("%s: Force SMBus first.\n", __func__);
   13599 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   13600 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   13601 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   13602 		delay(50 * 1000);
   13603 
   13604 		reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13605 	}
   13606 	reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13607 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg);
   13608 
   13609 	/* Unforce SMBus mode in MAC */
   13610 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13611 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   13612 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13613 
   13614 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL);
   13615 	reg |= HV_PM_CTRL_K1_ENA;
   13616 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg);
   13617 
   13618 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1);
   13619 	reg &= ~(I218_ULP_CONFIG1_IND
   13620 	    | I218_ULP_CONFIG1_STICKY_ULP
   13621 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   13622 	    | I218_ULP_CONFIG1_WOL_HOST
   13623 	    | I218_ULP_CONFIG1_INBAND_EXIT
   13624 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   13625 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   13626 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   13627 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13628 	reg |= I218_ULP_CONFIG1_START;
   13629 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13630 
   13631 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   13632 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   13633 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   13634 
   13635 	/* Release semaphore */
   13636 	sc->phy.release(sc);
   13637 	wm_gmii_reset(sc);
   13638 	delay(50 * 1000);
   13639 }
   13640 
   13641 /* WOL in the newer chipset interfaces (pchlan) */
   13642 static void
   13643 wm_enable_phy_wakeup(struct wm_softc *sc)
   13644 {
   13645 #if 0
   13646 	uint16_t preg;
   13647 
   13648 	/* Copy MAC RARs to PHY RARs */
   13649 
   13650 	/* Copy MAC MTA to PHY MTA */
   13651 
   13652 	/* Configure PHY Rx Control register */
   13653 
   13654 	/* Enable PHY wakeup in MAC register */
   13655 
   13656 	/* Configure and enable PHY wakeup in PHY registers */
   13657 
   13658 	/* Activate PHY wakeup */
   13659 
   13660 	/* XXX */
   13661 #endif
   13662 }
   13663 
   13664 /* Power down workaround on D3 */
   13665 static void
   13666 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   13667 {
   13668 	uint32_t reg;
   13669 	int i;
   13670 
   13671 	for (i = 0; i < 2; i++) {
   13672 		/* Disable link */
   13673 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13674 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13675 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13676 
   13677 		/*
   13678 		 * Call gig speed drop workaround on Gig disable before
   13679 		 * accessing any PHY registers
   13680 		 */
   13681 		if (sc->sc_type == WM_T_ICH8)
   13682 			wm_gig_downshift_workaround_ich8lan(sc);
   13683 
   13684 		/* Write VR power-down enable */
   13685 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13686 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13687 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   13688 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   13689 
   13690 		/* Read it back and test */
   13691 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13692 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13693 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   13694 			break;
   13695 
   13696 		/* Issue PHY reset and repeat at most one more time */
   13697 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   13698 	}
   13699 }
   13700 
   13701 static void
   13702 wm_enable_wakeup(struct wm_softc *sc)
   13703 {
   13704 	uint32_t reg, pmreg;
   13705 	pcireg_t pmode;
   13706 
   13707 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13708 		device_xname(sc->sc_dev), __func__));
   13709 
   13710 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   13711 		&pmreg, NULL) == 0)
   13712 		return;
   13713 
   13714 	/* Advertise the wakeup capability */
   13715 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   13716 	    | CTRL_SWDPIN(3));
   13717 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   13718 
   13719 	/* ICH workaround */
   13720 	switch (sc->sc_type) {
   13721 	case WM_T_ICH8:
   13722 	case WM_T_ICH9:
   13723 	case WM_T_ICH10:
   13724 	case WM_T_PCH:
   13725 	case WM_T_PCH2:
   13726 	case WM_T_PCH_LPT:
   13727 	case WM_T_PCH_SPT:
   13728 		/* Disable gig during WOL */
   13729 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13730 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   13731 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13732 		if (sc->sc_type == WM_T_PCH)
   13733 			wm_gmii_reset(sc);
   13734 
   13735 		/* Power down workaround */
   13736 		if (sc->sc_phytype == WMPHY_82577) {
   13737 			struct mii_softc *child;
   13738 
   13739 			/* Assume that the PHY is copper */
   13740 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13741 			if ((child != NULL) && (child->mii_mpd_rev <= 2))
   13742 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   13743 				    (768 << 5) | 25, 0x0444); /* magic num */
   13744 		}
   13745 		break;
   13746 	default:
   13747 		break;
   13748 	}
   13749 
   13750 	/* Keep the laser running on fiber adapters */
   13751 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   13752 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   13753 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13754 		reg |= CTRL_EXT_SWDPIN(3);
   13755 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13756 	}
   13757 
   13758 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   13759 #if 0	/* for the multicast packet */
   13760 	reg |= WUFC_MC;
   13761 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   13762 #endif
   13763 
   13764 	if (sc->sc_type >= WM_T_PCH)
   13765 		wm_enable_phy_wakeup(sc);
   13766 	else {
   13767 		CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
   13768 		CSR_WRITE(sc, WMREG_WUFC, reg);
   13769 	}
   13770 
   13771 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13772 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13773 		|| (sc->sc_type == WM_T_PCH2))
   13774 		    && (sc->sc_phytype == WMPHY_IGP_3))
   13775 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   13776 
   13777 	/* Request PME */
   13778 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   13779 #if 0
   13780 	/* Disable WOL */
   13781 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   13782 #else
   13783 	/* For WOL */
   13784 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   13785 #endif
   13786 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   13787 }
   13788 
   13789 /* LPLU */
   13790 
   13791 static void
   13792 wm_lplu_d0_disable(struct wm_softc *sc)
   13793 {
   13794 	struct mii_data *mii = &sc->sc_mii;
   13795 	uint32_t reg;
   13796 
   13797 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13798 		device_xname(sc->sc_dev), __func__));
   13799 
   13800 	if (sc->sc_phytype == WMPHY_IFE)
   13801 		return;
   13802 
   13803 	switch (sc->sc_type) {
   13804 	case WM_T_82571:
   13805 	case WM_T_82572:
   13806 	case WM_T_82573:
   13807 	case WM_T_82575:
   13808 	case WM_T_82576:
   13809 		reg = mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT);
   13810 		reg &= ~PMR_D0_LPLU;
   13811 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, reg);
   13812 		break;
   13813 	case WM_T_82580:
   13814 	case WM_T_I350:
   13815 	case WM_T_I210:
   13816 	case WM_T_I211:
   13817 		reg = CSR_READ(sc, WMREG_PHPM);
   13818 		reg &= ~PHPM_D0A_LPLU;
   13819 		CSR_WRITE(sc, WMREG_PHPM, reg);
   13820 		break;
   13821 	case WM_T_82574:
   13822 	case WM_T_82583:
   13823 	case WM_T_ICH8:
   13824 	case WM_T_ICH9:
   13825 	case WM_T_ICH10:
   13826 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13827 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   13828 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13829 		CSR_WRITE_FLUSH(sc);
   13830 		break;
   13831 	case WM_T_PCH:
   13832 	case WM_T_PCH2:
   13833 	case WM_T_PCH_LPT:
   13834 	case WM_T_PCH_SPT:
   13835 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   13836 		reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   13837 		if (wm_phy_resetisblocked(sc) == false)
   13838 			reg |= HV_OEM_BITS_ANEGNOW;
   13839 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   13840 		break;
   13841 	default:
   13842 		break;
   13843 	}
   13844 }
   13845 
   13846 /* EEE */
   13847 
   13848 static void
   13849 wm_set_eee_i350(struct wm_softc *sc)
   13850 {
   13851 	uint32_t ipcnfg, eeer;
   13852 
   13853 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   13854 	eeer = CSR_READ(sc, WMREG_EEER);
   13855 
   13856 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   13857 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   13858 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   13859 		    | EEER_LPI_FC);
   13860 	} else {
   13861 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   13862 		ipcnfg &= ~IPCNFG_10BASE_TE;
   13863 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   13864 		    | EEER_LPI_FC);
   13865 	}
   13866 
   13867 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   13868 	CSR_WRITE(sc, WMREG_EEER, eeer);
   13869 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   13870 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   13871 }
   13872 
   13873 /*
   13874  * Workarounds (mainly PHY related).
   13875  * Basically, PHY's workarounds are in the PHY drivers.
   13876  */
   13877 
   13878 /* Work-around for 82566 Kumeran PCS lock loss */
   13879 static void
   13880 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   13881 {
   13882 	struct mii_data *mii = &sc->sc_mii;
   13883 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   13884 	int i;
   13885 	int reg;
   13886 
   13887 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13888 		device_xname(sc->sc_dev), __func__));
   13889 
   13890 	/* If the link is not up, do nothing */
   13891 	if ((status & STATUS_LU) == 0)
   13892 		return;
   13893 
   13894 	/* Nothing to do if the link is other than 1Gbps */
   13895 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   13896 		return;
   13897 
   13898 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13899 	for (i = 0; i < 10; i++) {
   13900 		/* read twice */
   13901 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   13902 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   13903 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   13904 			goto out;	/* GOOD! */
   13905 
   13906 		/* Reset the PHY */
   13907 		wm_reset_phy(sc);
   13908 		delay(5*1000);
   13909 	}
   13910 
   13911 	/* Disable GigE link negotiation */
   13912 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13913 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13914 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13915 
   13916 	/*
   13917 	 * Call gig speed drop workaround on Gig disable before accessing
   13918 	 * any PHY registers.
   13919 	 */
   13920 	wm_gig_downshift_workaround_ich8lan(sc);
   13921 
   13922 out:
   13923 	return;
   13924 }
   13925 
   13926 /* WOL from S5 stops working */
   13927 static void
   13928 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   13929 {
   13930 	uint16_t kmreg;
   13931 
   13932 	/* Only for igp3 */
   13933 	if (sc->sc_phytype == WMPHY_IGP_3) {
   13934 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   13935 			return;
   13936 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   13937 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   13938 			return;
   13939 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   13940 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   13941 	}
   13942 }
   13943 
   13944 /*
   13945  * Workaround for pch's PHYs
   13946  * XXX should be moved to new PHY driver?
   13947  */
   13948 static void
   13949 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   13950 {
   13951 
   13952 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13953 		device_xname(sc->sc_dev), __func__));
   13954 	KASSERT(sc->sc_type == WM_T_PCH);
   13955 
   13956 	if (sc->sc_phytype == WMPHY_82577)
   13957 		wm_set_mdio_slow_mode_hv(sc);
   13958 
   13959 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   13960 
   13961 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   13962 
   13963 	/* 82578 */
   13964 	if (sc->sc_phytype == WMPHY_82578) {
   13965 		struct mii_softc *child;
   13966 
   13967 		/*
   13968 		 * Return registers to default by doing a soft reset then
   13969 		 * writing 0x3140 to the control register
   13970 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   13971 		 */
   13972 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13973 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   13974 			PHY_RESET(child);
   13975 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   13976 			    0x3140);
   13977 		}
   13978 	}
   13979 
   13980 	/* Select page 0 */
   13981 	sc->phy.acquire(sc);
   13982 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   13983 	sc->phy.release(sc);
   13984 
   13985 	/*
   13986 	 * Configure the K1 Si workaround during phy reset assuming there is
   13987 	 * link so that it disables K1 if link is in 1Gbps.
   13988 	 */
   13989 	wm_k1_gig_workaround_hv(sc, 1);
   13990 }
   13991 
   13992 static void
   13993 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   13994 {
   13995 
   13996 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13997 		device_xname(sc->sc_dev), __func__));
   13998 	KASSERT(sc->sc_type == WM_T_PCH2);
   13999 
   14000 	wm_set_mdio_slow_mode_hv(sc);
   14001 }
   14002 
   14003 static int
   14004 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   14005 {
   14006 	int k1_enable = sc->sc_nvm_k1_enabled;
   14007 
   14008 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14009 		device_xname(sc->sc_dev), __func__));
   14010 
   14011 	if (sc->phy.acquire(sc) != 0)
   14012 		return -1;
   14013 
   14014 	if (link) {
   14015 		k1_enable = 0;
   14016 
   14017 		/* Link stall fix for link up */
   14018 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   14019 	} else {
   14020 		/* Link stall fix for link down */
   14021 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   14022 	}
   14023 
   14024 	wm_configure_k1_ich8lan(sc, k1_enable);
   14025 	sc->phy.release(sc);
   14026 
   14027 	return 0;
   14028 }
   14029 
   14030 static void
   14031 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   14032 {
   14033 	uint32_t reg;
   14034 
   14035 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   14036 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   14037 	    reg | HV_KMRN_MDIO_SLOW);
   14038 }
   14039 
   14040 static void
   14041 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   14042 {
   14043 	uint32_t ctrl, ctrl_ext, tmp;
   14044 	uint16_t kmreg;
   14045 	int rv;
   14046 
   14047 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   14048 	if (rv != 0)
   14049 		return;
   14050 
   14051 	if (k1_enable)
   14052 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   14053 	else
   14054 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   14055 
   14056 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   14057 	if (rv != 0)
   14058 		return;
   14059 
   14060 	delay(20);
   14061 
   14062 	ctrl = CSR_READ(sc, WMREG_CTRL);
   14063 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   14064 
   14065 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   14066 	tmp |= CTRL_FRCSPD;
   14067 
   14068 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   14069 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   14070 	CSR_WRITE_FLUSH(sc);
   14071 	delay(20);
   14072 
   14073 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   14074 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   14075 	CSR_WRITE_FLUSH(sc);
   14076 	delay(20);
   14077 
   14078 	return;
   14079 }
   14080 
   14081 /* special case - for 82575 - need to do manual init ... */
   14082 static void
   14083 wm_reset_init_script_82575(struct wm_softc *sc)
   14084 {
   14085 	/*
   14086 	 * remark: this is untested code - we have no board without EEPROM
   14087 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   14088 	 */
   14089 
   14090 	/* SerDes configuration via SERDESCTRL */
   14091 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   14092 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   14093 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   14094 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   14095 
   14096 	/* CCM configuration via CCMCTL register */
   14097 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   14098 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   14099 
   14100 	/* PCIe lanes configuration */
   14101 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   14102 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   14103 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   14104 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   14105 
   14106 	/* PCIe PLL Configuration */
   14107 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   14108 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   14109 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   14110 }
   14111 
   14112 static void
   14113 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   14114 {
   14115 	uint32_t reg;
   14116 	uint16_t nvmword;
   14117 	int rv;
   14118 
   14119 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   14120 		return;
   14121 
   14122 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   14123 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   14124 	if (rv != 0) {
   14125 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   14126 		    __func__);
   14127 		return;
   14128 	}
   14129 
   14130 	reg = CSR_READ(sc, WMREG_MDICNFG);
   14131 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   14132 		reg |= MDICNFG_DEST;
   14133 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   14134 		reg |= MDICNFG_COM_MDIO;
   14135 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   14136 }
   14137 
   14138 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   14139 
   14140 static bool
   14141 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   14142 {
   14143 	int i;
   14144 	uint32_t reg;
   14145 	uint16_t id1, id2;
   14146 
   14147 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14148 		device_xname(sc->sc_dev), __func__));
   14149 	id1 = id2 = 0xffff;
   14150 	for (i = 0; i < 2; i++) {
   14151 		id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1);
   14152 		if (MII_INVALIDID(id1))
   14153 			continue;
   14154 		id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2);
   14155 		if (MII_INVALIDID(id2))
   14156 			continue;
   14157 		break;
   14158 	}
   14159 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) {
   14160 		goto out;
   14161 	}
   14162 
   14163 	if (sc->sc_type < WM_T_PCH_LPT) {
   14164 		sc->phy.release(sc);
   14165 		wm_set_mdio_slow_mode_hv(sc);
   14166 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
   14167 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
   14168 		sc->phy.acquire(sc);
   14169 	}
   14170 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   14171 		printf("XXX return with false\n");
   14172 		return false;
   14173 	}
   14174 out:
   14175 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   14176 		/* Only unforce SMBus if ME is not active */
   14177 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   14178 			/* Unforce SMBus mode in PHY */
   14179 			reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   14180 			    CV_SMB_CTRL);
   14181 			reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   14182 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   14183 			    CV_SMB_CTRL, reg);
   14184 
   14185 			/* Unforce SMBus mode in MAC */
   14186 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14187 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   14188 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14189 		}
   14190 	}
   14191 	return true;
   14192 }
   14193 
   14194 static void
   14195 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   14196 {
   14197 	uint32_t reg;
   14198 	int i;
   14199 
   14200 	/* Set PHY Config Counter to 50msec */
   14201 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   14202 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   14203 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   14204 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   14205 
   14206 	/* Toggle LANPHYPC */
   14207 	reg = CSR_READ(sc, WMREG_CTRL);
   14208 	reg |= CTRL_LANPHYPC_OVERRIDE;
   14209 	reg &= ~CTRL_LANPHYPC_VALUE;
   14210 	CSR_WRITE(sc, WMREG_CTRL, reg);
   14211 	CSR_WRITE_FLUSH(sc);
   14212 	delay(1000);
   14213 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   14214 	CSR_WRITE(sc, WMREG_CTRL, reg);
   14215 	CSR_WRITE_FLUSH(sc);
   14216 
   14217 	if (sc->sc_type < WM_T_PCH_LPT)
   14218 		delay(50 * 1000);
   14219 	else {
   14220 		i = 20;
   14221 
   14222 		do {
   14223 			delay(5 * 1000);
   14224 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   14225 		    && i--);
   14226 
   14227 		delay(30 * 1000);
   14228 	}
   14229 }
   14230 
   14231 static int
   14232 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   14233 {
   14234 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   14235 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   14236 	uint32_t rxa;
   14237 	uint16_t scale = 0, lat_enc = 0;
   14238 	int32_t obff_hwm = 0;
   14239 	int64_t lat_ns, value;
   14240 
   14241 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14242 		device_xname(sc->sc_dev), __func__));
   14243 
   14244 	if (link) {
   14245 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   14246 		uint32_t status;
   14247 		uint16_t speed;
   14248 		pcireg_t preg;
   14249 
   14250 		status = CSR_READ(sc, WMREG_STATUS);
   14251 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   14252 		case STATUS_SPEED_10:
   14253 			speed = 10;
   14254 			break;
   14255 		case STATUS_SPEED_100:
   14256 			speed = 100;
   14257 			break;
   14258 		case STATUS_SPEED_1000:
   14259 			speed = 1000;
   14260 			break;
   14261 		default:
   14262 			device_printf(sc->sc_dev, "Unknown speed "
   14263 			    "(status = %08x)\n", status);
   14264 			return -1;
   14265 		}
   14266 
   14267 		/* Rx Packet Buffer Allocation size (KB) */
   14268 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   14269 
   14270 		/*
   14271 		 * Determine the maximum latency tolerated by the device.
   14272 		 *
   14273 		 * Per the PCIe spec, the tolerated latencies are encoded as
   14274 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   14275 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   14276 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   14277 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   14278 		 */
   14279 		lat_ns = ((int64_t)rxa * 1024 -
   14280 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   14281 			+ ETHER_HDR_LEN))) * 8 * 1000;
   14282 		if (lat_ns < 0)
   14283 			lat_ns = 0;
   14284 		else
   14285 			lat_ns /= speed;
   14286 		value = lat_ns;
   14287 
   14288 		while (value > LTRV_VALUE) {
   14289 			scale ++;
   14290 			value = howmany(value, __BIT(5));
   14291 		}
   14292 		if (scale > LTRV_SCALE_MAX) {
   14293 			printf("%s: Invalid LTR latency scale %d\n",
   14294 			    device_xname(sc->sc_dev), scale);
   14295 			return -1;
   14296 		}
   14297 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   14298 
   14299 		/* Determine the maximum latency tolerated by the platform */
   14300 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14301 		    WM_PCI_LTR_CAP_LPT);
   14302 		max_snoop = preg & 0xffff;
   14303 		max_nosnoop = preg >> 16;
   14304 
   14305 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   14306 
   14307 		if (lat_enc > max_ltr_enc) {
   14308 			lat_enc = max_ltr_enc;
   14309 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   14310 			    * PCI_LTR_SCALETONS(
   14311 				    __SHIFTOUT(lat_enc,
   14312 					PCI_LTR_MAXSNOOPLAT_SCALE));
   14313 		}
   14314 
   14315 		if (lat_ns) {
   14316 			lat_ns *= speed * 1000;
   14317 			lat_ns /= 8;
   14318 			lat_ns /= 1000000000;
   14319 			obff_hwm = (int32_t)(rxa - lat_ns);
   14320 		}
   14321 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   14322 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   14323 			    "(rxa = %d, lat_ns = %d)\n",
   14324 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   14325 			return -1;
   14326 		}
   14327 	}
   14328 	/* Snoop and No-Snoop latencies the same */
   14329 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   14330 	CSR_WRITE(sc, WMREG_LTRV, reg);
   14331 
   14332 	/* Set OBFF high water mark */
   14333 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   14334 	reg |= obff_hwm;
   14335 	CSR_WRITE(sc, WMREG_SVT, reg);
   14336 
   14337 	/* Enable OBFF */
   14338 	reg = CSR_READ(sc, WMREG_SVCR);
   14339 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   14340 	CSR_WRITE(sc, WMREG_SVCR, reg);
   14341 
   14342 	return 0;
   14343 }
   14344 
   14345 /*
   14346  * I210 Errata 25 and I211 Errata 10
   14347  * Slow System Clock.
   14348  */
   14349 static void
   14350 wm_pll_workaround_i210(struct wm_softc *sc)
   14351 {
   14352 	uint32_t mdicnfg, wuc;
   14353 	uint32_t reg;
   14354 	pcireg_t pcireg;
   14355 	uint32_t pmreg;
   14356 	uint16_t nvmword, tmp_nvmword;
   14357 	int phyval;
   14358 	bool wa_done = false;
   14359 	int i;
   14360 
   14361 	/* Save WUC and MDICNFG registers */
   14362 	wuc = CSR_READ(sc, WMREG_WUC);
   14363 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   14364 
   14365 	reg = mdicnfg & ~MDICNFG_DEST;
   14366 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   14367 
   14368 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   14369 		nvmword = INVM_DEFAULT_AL;
   14370 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   14371 
   14372 	/* Get Power Management cap offset */
   14373 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   14374 		&pmreg, NULL) == 0)
   14375 		return;
   14376 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   14377 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   14378 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   14379 
   14380 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   14381 			break; /* OK */
   14382 		}
   14383 
   14384 		wa_done = true;
   14385 		/* Directly reset the internal PHY */
   14386 		reg = CSR_READ(sc, WMREG_CTRL);
   14387 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   14388 
   14389 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14390 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   14391 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14392 
   14393 		CSR_WRITE(sc, WMREG_WUC, 0);
   14394 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   14395 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   14396 
   14397 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14398 		    pmreg + PCI_PMCSR);
   14399 		pcireg |= PCI_PMCSR_STATE_D3;
   14400 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14401 		    pmreg + PCI_PMCSR, pcireg);
   14402 		delay(1000);
   14403 		pcireg &= ~PCI_PMCSR_STATE_D3;
   14404 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14405 		    pmreg + PCI_PMCSR, pcireg);
   14406 
   14407 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   14408 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   14409 
   14410 		/* Restore WUC register */
   14411 		CSR_WRITE(sc, WMREG_WUC, wuc);
   14412 	}
   14413 
   14414 	/* Restore MDICNFG setting */
   14415 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   14416 	if (wa_done)
   14417 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   14418 }
   14419 
   14420 static void
   14421 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   14422 {
   14423 	uint32_t reg;
   14424 
   14425 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14426 		device_xname(sc->sc_dev), __func__));
   14427 	KASSERT(sc->sc_type == WM_T_PCH_SPT);
   14428 
   14429 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   14430 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   14431 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   14432 
   14433 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   14434 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   14435 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   14436 }
   14437