Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.530
      1 /*	$NetBSD: if_wm.c,v 1.530 2017/07/25 06:00:17 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet)
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  *	- Image Unique ID
     83  */
     84 
     85 #include <sys/cdefs.h>
     86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.530 2017/07/25 06:00:17 msaitoh Exp $");
     87 
     88 #ifdef _KERNEL_OPT
     89 #include "opt_net_mpsafe.h"
     90 #include "opt_if_wm.h"
     91 #endif
     92 
     93 #include <sys/param.h>
     94 #include <sys/systm.h>
     95 #include <sys/callout.h>
     96 #include <sys/mbuf.h>
     97 #include <sys/malloc.h>
     98 #include <sys/kmem.h>
     99 #include <sys/kernel.h>
    100 #include <sys/socket.h>
    101 #include <sys/ioctl.h>
    102 #include <sys/errno.h>
    103 #include <sys/device.h>
    104 #include <sys/queue.h>
    105 #include <sys/syslog.h>
    106 #include <sys/interrupt.h>
    107 #include <sys/cpu.h>
    108 #include <sys/pcq.h>
    109 
    110 #include <sys/rndsource.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <netinet/in.h>			/* XXX for struct ip */
    120 #include <netinet/in_systm.h>		/* XXX for struct ip */
    121 #include <netinet/ip.h>			/* XXX for struct ip */
    122 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    123 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    124 
    125 #include <sys/bus.h>
    126 #include <sys/intr.h>
    127 #include <machine/endian.h>
    128 
    129 #include <dev/mii/mii.h>
    130 #include <dev/mii/miivar.h>
    131 #include <dev/mii/miidevs.h>
    132 #include <dev/mii/mii_bitbang.h>
    133 #include <dev/mii/ikphyreg.h>
    134 #include <dev/mii/igphyreg.h>
    135 #include <dev/mii/igphyvar.h>
    136 #include <dev/mii/inbmphyreg.h>
    137 #include <dev/mii/ihphyreg.h>
    138 
    139 #include <dev/pci/pcireg.h>
    140 #include <dev/pci/pcivar.h>
    141 #include <dev/pci/pcidevs.h>
    142 
    143 #include <dev/pci/if_wmreg.h>
    144 #include <dev/pci/if_wmvar.h>
    145 
    146 #ifdef WM_DEBUG
    147 #define	WM_DEBUG_LINK		__BIT(0)
    148 #define	WM_DEBUG_TX		__BIT(1)
    149 #define	WM_DEBUG_RX		__BIT(2)
    150 #define	WM_DEBUG_GMII		__BIT(3)
    151 #define	WM_DEBUG_MANAGE		__BIT(4)
    152 #define	WM_DEBUG_NVM		__BIT(5)
    153 #define	WM_DEBUG_INIT		__BIT(6)
    154 #define	WM_DEBUG_LOCK		__BIT(7)
    155 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    156     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    157 
    158 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    159 #else
    160 #define	DPRINTF(x, y)	/* nothing */
    161 #endif /* WM_DEBUG */
    162 
    163 #ifdef NET_MPSAFE
    164 #define WM_MPSAFE	1
    165 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    166 #else
    167 #define CALLOUT_FLAGS	0
    168 #endif
    169 
    170 /*
    171  * This device driver's max interrupt numbers.
    172  */
    173 #define WM_MAX_NQUEUEINTR	16
    174 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    175 
    176 #ifndef WM_DISABLE_MSI
    177 #define	WM_DISABLE_MSI 0
    178 #endif
    179 #ifndef WM_DISABLE_MSIX
    180 #define	WM_DISABLE_MSIX 0
    181 #endif
    182 
    183 int wm_disable_msi = WM_DISABLE_MSI;
    184 int wm_disable_msix = WM_DISABLE_MSIX;
    185 
    186 /*
    187  * Transmit descriptor list size.  Due to errata, we can only have
    188  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    189  * on >= 82544.  We tell the upper layers that they can queue a lot
    190  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    191  * of them at a time.
    192  *
    193  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    194  * chains containing many small mbufs have been observed in zero-copy
    195  * situations with jumbo frames.
    196  */
    197 #define	WM_NTXSEGS		256
    198 #define	WM_IFQUEUELEN		256
    199 #define	WM_TXQUEUELEN_MAX	64
    200 #define	WM_TXQUEUELEN_MAX_82547	16
    201 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    202 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    203 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    204 #define	WM_NTXDESC_82542	256
    205 #define	WM_NTXDESC_82544	4096
    206 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    207 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    208 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    209 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    210 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    211 
    212 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    213 
    214 #define	WM_TXINTERQSIZE		256
    215 
    216 /*
    217  * Receive descriptor list size.  We have one Rx buffer for normal
    218  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    219  * packet.  We allocate 256 receive descriptors, each with a 2k
    220  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    221  */
    222 #define	WM_NRXDESC		256
    223 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    224 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    225 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    226 
    227 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    228 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    229 #endif
    230 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    231 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    232 #endif
    233 
    234 typedef union txdescs {
    235 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    236 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    237 } txdescs_t;
    238 
    239 typedef union rxdescs {
    240 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    241 	ext_rxdesc_t      sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    242 	nq_rxdesc_t      sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    243 } rxdescs_t;
    244 
    245 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    246 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    247 
    248 /*
    249  * Software state for transmit jobs.
    250  */
    251 struct wm_txsoft {
    252 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    253 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    254 	int txs_firstdesc;		/* first descriptor in packet */
    255 	int txs_lastdesc;		/* last descriptor in packet */
    256 	int txs_ndesc;			/* # of descriptors used */
    257 };
    258 
    259 /*
    260  * Software state for receive buffers.  Each descriptor gets a
    261  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    262  * more than one buffer, we chain them together.
    263  */
    264 struct wm_rxsoft {
    265 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    266 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    267 };
    268 
    269 #define WM_LINKUP_TIMEOUT	50
    270 
    271 static uint16_t swfwphysem[] = {
    272 	SWFW_PHY0_SM,
    273 	SWFW_PHY1_SM,
    274 	SWFW_PHY2_SM,
    275 	SWFW_PHY3_SM
    276 };
    277 
    278 static const uint32_t wm_82580_rxpbs_table[] = {
    279 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    280 };
    281 
    282 struct wm_softc;
    283 
    284 #ifdef WM_EVENT_COUNTERS
    285 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    286 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    287 	struct evcnt qname##_ev_##evname;
    288 
    289 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    290 	do{								\
    291 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    292 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    293 		    "%s%02d%s", #qname, (qnum), #evname);		\
    294 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    295 		    (evtype), NULL, (xname),				\
    296 		    (q)->qname##_##evname##_evcnt_name);		\
    297 	}while(0)
    298 
    299 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    300 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    301 
    302 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    303 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    304 
    305 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    306 	evcnt_detach(&(q)->qname##_ev_##evname);
    307 #endif /* WM_EVENT_COUNTERS */
    308 
    309 struct wm_txqueue {
    310 	kmutex_t *txq_lock;		/* lock for tx operations */
    311 
    312 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    313 
    314 	/* Software state for the transmit descriptors. */
    315 	int txq_num;			/* must be a power of two */
    316 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    317 
    318 	/* TX control data structures. */
    319 	int txq_ndesc;			/* must be a power of two */
    320 	size_t txq_descsize;		/* a tx descriptor size */
    321 	txdescs_t *txq_descs_u;
    322         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    323 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    324 	int txq_desc_rseg;		/* real number of control segment */
    325 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    326 #define	txq_descs	txq_descs_u->sctxu_txdescs
    327 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    328 
    329 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    330 
    331 	int txq_free;			/* number of free Tx descriptors */
    332 	int txq_next;			/* next ready Tx descriptor */
    333 
    334 	int txq_sfree;			/* number of free Tx jobs */
    335 	int txq_snext;			/* next free Tx job */
    336 	int txq_sdirty;			/* dirty Tx jobs */
    337 
    338 	/* These 4 variables are used only on the 82547. */
    339 	int txq_fifo_size;		/* Tx FIFO size */
    340 	int txq_fifo_head;		/* current head of FIFO */
    341 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    342 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    343 
    344 	/*
    345 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    346 	 * CPUs. This queue intermediate them without block.
    347 	 */
    348 	pcq_t *txq_interq;
    349 
    350 	/*
    351 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    352 	 * to manage Tx H/W queue's busy flag.
    353 	 */
    354 	int txq_flags;			/* flags for H/W queue, see below */
    355 #define	WM_TXQ_NO_SPACE	0x1
    356 
    357 	bool txq_stopping;
    358 
    359 	uint32_t txq_packets;		/* for AIM */
    360 	uint32_t txq_bytes;		/* for AIM */
    361 #ifdef WM_EVENT_COUNTERS
    362 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
    363 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
    364 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
    365 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
    366 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
    367 						/* XXX not used? */
    368 
    369 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
    370 	WM_Q_EVCNT_DEFINE(txq,txtusum)		/* TCP/UDP cksums comp. out-bound */
    371 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
    372 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
    373 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
    374 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
    375 
    376 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
    377 
    378 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
    379 
    380 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    381 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    382 #endif /* WM_EVENT_COUNTERS */
    383 };
    384 
    385 struct wm_rxqueue {
    386 	kmutex_t *rxq_lock;		/* lock for rx operations */
    387 
    388 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    389 
    390 	/* Software state for the receive descriptors. */
    391 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    392 
    393 	/* RX control data structures. */
    394 	int rxq_ndesc;			/* must be a power of two */
    395 	size_t rxq_descsize;		/* a rx descriptor size */
    396 	rxdescs_t *rxq_descs_u;
    397 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    398 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    399 	int rxq_desc_rseg;		/* real number of control segment */
    400 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    401 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    402 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    403 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    404 
    405 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    406 
    407 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    408 	int rxq_discard;
    409 	int rxq_len;
    410 	struct mbuf *rxq_head;
    411 	struct mbuf *rxq_tail;
    412 	struct mbuf **rxq_tailp;
    413 
    414 	bool rxq_stopping;
    415 
    416 	uint32_t rxq_packets;		/* for AIM */
    417 	uint32_t rxq_bytes;		/* for AIM */
    418 #ifdef WM_EVENT_COUNTERS
    419 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
    420 
    421 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
    422 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
    423 #endif
    424 };
    425 
    426 struct wm_queue {
    427 	int wmq_id;			/* index of transmit and receive queues */
    428 	int wmq_intr_idx;		/* index of MSI-X tables */
    429 
    430 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    431 	bool wmq_set_itr;
    432 
    433 	struct wm_txqueue wmq_txq;
    434 	struct wm_rxqueue wmq_rxq;
    435 
    436 	void *wmq_si;
    437 };
    438 
    439 struct wm_phyop {
    440 	int (*acquire)(struct wm_softc *);
    441 	void (*release)(struct wm_softc *);
    442 	int reset_delay_us;
    443 };
    444 
    445 struct wm_nvmop {
    446 	int (*acquire)(struct wm_softc *);
    447 	void (*release)(struct wm_softc *);
    448 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    449 };
    450 
    451 /*
    452  * Software state per device.
    453  */
    454 struct wm_softc {
    455 	device_t sc_dev;		/* generic device information */
    456 	bus_space_tag_t sc_st;		/* bus space tag */
    457 	bus_space_handle_t sc_sh;	/* bus space handle */
    458 	bus_size_t sc_ss;		/* bus space size */
    459 	bus_space_tag_t sc_iot;		/* I/O space tag */
    460 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    461 	bus_size_t sc_ios;		/* I/O space size */
    462 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    463 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    464 	bus_size_t sc_flashs;		/* flash registers space size */
    465 	off_t sc_flashreg_offset;	/*
    466 					 * offset to flash registers from
    467 					 * start of BAR
    468 					 */
    469 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    470 
    471 	struct ethercom sc_ethercom;	/* ethernet common data */
    472 	struct mii_data sc_mii;		/* MII/media information */
    473 
    474 	pci_chipset_tag_t sc_pc;
    475 	pcitag_t sc_pcitag;
    476 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    477 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    478 
    479 	uint16_t sc_pcidevid;		/* PCI device ID */
    480 	wm_chip_type sc_type;		/* MAC type */
    481 	int sc_rev;			/* MAC revision */
    482 	wm_phy_type sc_phytype;		/* PHY type */
    483 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    484 #define	WM_MEDIATYPE_UNKNOWN		0x00
    485 #define	WM_MEDIATYPE_FIBER		0x01
    486 #define	WM_MEDIATYPE_COPPER		0x02
    487 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    488 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    489 	int sc_flags;			/* flags; see below */
    490 	int sc_if_flags;		/* last if_flags */
    491 	int sc_flowflags;		/* 802.3x flow control flags */
    492 	int sc_align_tweak;
    493 
    494 	void *sc_ihs[WM_MAX_NINTR];	/*
    495 					 * interrupt cookie.
    496 					 * - legacy and msi use sc_ihs[0] only
    497 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    498 					 */
    499 	pci_intr_handle_t *sc_intrs;	/*
    500 					 * legacy and msi use sc_intrs[0] only
    501 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    502 					 */
    503 	int sc_nintrs;			/* number of interrupts */
    504 
    505 	int sc_link_intr_idx;		/* index of MSI-X tables */
    506 
    507 	callout_t sc_tick_ch;		/* tick callout */
    508 	bool sc_core_stopping;
    509 
    510 	int sc_nvm_ver_major;
    511 	int sc_nvm_ver_minor;
    512 	int sc_nvm_ver_build;
    513 	int sc_nvm_addrbits;		/* NVM address bits */
    514 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    515 	int sc_ich8_flash_base;
    516 	int sc_ich8_flash_bank_size;
    517 	int sc_nvm_k1_enabled;
    518 
    519 	int sc_nqueues;
    520 	struct wm_queue *sc_queue;
    521 	u_int sc_rx_process_limit;	/* Rx processing repeat limit in softint */
    522 	u_int sc_rx_intr_process_limit;	/* Rx processing repeat limit in H/W intr */
    523 
    524 	int sc_affinity_offset;
    525 
    526 #ifdef WM_EVENT_COUNTERS
    527 	/* Event counters. */
    528 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    529 
    530         /* WM_T_82542_2_1 only */
    531 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    532 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    533 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    534 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    535 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    536 #endif /* WM_EVENT_COUNTERS */
    537 
    538 	/* This variable are used only on the 82547. */
    539 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    540 
    541 	uint32_t sc_ctrl;		/* prototype CTRL register */
    542 #if 0
    543 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    544 #endif
    545 	uint32_t sc_icr;		/* prototype interrupt bits */
    546 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    547 	uint32_t sc_tctl;		/* prototype TCTL register */
    548 	uint32_t sc_rctl;		/* prototype RCTL register */
    549 	uint32_t sc_txcw;		/* prototype TXCW register */
    550 	uint32_t sc_tipg;		/* prototype TIPG register */
    551 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    552 	uint32_t sc_pba;		/* prototype PBA register */
    553 
    554 	int sc_tbi_linkup;		/* TBI link status */
    555 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    556 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    557 
    558 	int sc_mchash_type;		/* multicast filter offset */
    559 
    560 	krndsource_t rnd_source;	/* random source */
    561 
    562 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    563 
    564 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    565 	kmutex_t *sc_ich_phymtx;	/*
    566 					 * 82574/82583/ICH/PCH specific PHY
    567 					 * mutex. For 82574/82583, the mutex
    568 					 * is used for both PHY and NVM.
    569 					 */
    570 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    571 
    572 	struct wm_phyop phy;
    573 	struct wm_nvmop nvm;
    574 };
    575 
    576 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    577 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    578 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    579 
    580 #define	WM_RXCHAIN_RESET(rxq)						\
    581 do {									\
    582 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    583 	*(rxq)->rxq_tailp = NULL;					\
    584 	(rxq)->rxq_len = 0;						\
    585 } while (/*CONSTCOND*/0)
    586 
    587 #define	WM_RXCHAIN_LINK(rxq, m)						\
    588 do {									\
    589 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    590 	(rxq)->rxq_tailp = &(m)->m_next;				\
    591 } while (/*CONSTCOND*/0)
    592 
    593 #ifdef WM_EVENT_COUNTERS
    594 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    595 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    596 
    597 #define WM_Q_EVCNT_INCR(qname, evname)			\
    598 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    599 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    600 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    601 #else /* !WM_EVENT_COUNTERS */
    602 #define	WM_EVCNT_INCR(ev)	/* nothing */
    603 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    604 
    605 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    606 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    607 #endif /* !WM_EVENT_COUNTERS */
    608 
    609 #define	CSR_READ(sc, reg)						\
    610 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    611 #define	CSR_WRITE(sc, reg, val)						\
    612 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    613 #define	CSR_WRITE_FLUSH(sc)						\
    614 	(void) CSR_READ((sc), WMREG_STATUS)
    615 
    616 #define ICH8_FLASH_READ32(sc, reg)					\
    617 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    618 	    (reg) + sc->sc_flashreg_offset)
    619 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    620 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    621 	    (reg) + sc->sc_flashreg_offset, (data))
    622 
    623 #define ICH8_FLASH_READ16(sc, reg)					\
    624 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    625 	    (reg) + sc->sc_flashreg_offset)
    626 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    627 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    628 	    (reg) + sc->sc_flashreg_offset, (data))
    629 
    630 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    631 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    632 
    633 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    634 #define	WM_CDTXADDR_HI(txq, x)						\
    635 	(sizeof(bus_addr_t) == 8 ?					\
    636 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    637 
    638 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    639 #define	WM_CDRXADDR_HI(rxq, x)						\
    640 	(sizeof(bus_addr_t) == 8 ?					\
    641 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    642 
    643 /*
    644  * Register read/write functions.
    645  * Other than CSR_{READ|WRITE}().
    646  */
    647 #if 0
    648 static inline uint32_t wm_io_read(struct wm_softc *, int);
    649 #endif
    650 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    651 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    652 	uint32_t, uint32_t);
    653 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    654 
    655 /*
    656  * Descriptor sync/init functions.
    657  */
    658 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    659 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    660 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    661 
    662 /*
    663  * Device driver interface functions and commonly used functions.
    664  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    665  */
    666 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    667 static int	wm_match(device_t, cfdata_t, void *);
    668 static void	wm_attach(device_t, device_t, void *);
    669 static int	wm_detach(device_t, int);
    670 static bool	wm_suspend(device_t, const pmf_qual_t *);
    671 static bool	wm_resume(device_t, const pmf_qual_t *);
    672 static void	wm_watchdog(struct ifnet *);
    673 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
    674 static void	wm_tick(void *);
    675 static int	wm_ifflags_cb(struct ethercom *);
    676 static int	wm_ioctl(struct ifnet *, u_long, void *);
    677 /* MAC address related */
    678 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    679 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    680 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    681 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    682 static void	wm_set_filter(struct wm_softc *);
    683 /* Reset and init related */
    684 static void	wm_set_vlan(struct wm_softc *);
    685 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    686 static void	wm_get_auto_rd_done(struct wm_softc *);
    687 static void	wm_lan_init_done(struct wm_softc *);
    688 static void	wm_get_cfg_done(struct wm_softc *);
    689 static void	wm_phy_post_reset(struct wm_softc *);
    690 static void	wm_write_smbus_addr(struct wm_softc *);
    691 static void	wm_init_lcd_from_nvm(struct wm_softc *);
    692 static void	wm_initialize_hardware_bits(struct wm_softc *);
    693 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    694 static void	wm_reset_phy(struct wm_softc *);
    695 static void	wm_flush_desc_rings(struct wm_softc *);
    696 static void	wm_reset(struct wm_softc *);
    697 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    698 static void	wm_rxdrain(struct wm_rxqueue *);
    699 static void	wm_rss_getkey(uint8_t *);
    700 static void	wm_init_rss(struct wm_softc *);
    701 static void	wm_adjust_qnum(struct wm_softc *, int);
    702 static inline bool	wm_is_using_msix(struct wm_softc *);
    703 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    704 static int	wm_softint_establish(struct wm_softc *, int, int);
    705 static int	wm_setup_legacy(struct wm_softc *);
    706 static int	wm_setup_msix(struct wm_softc *);
    707 static int	wm_init(struct ifnet *);
    708 static int	wm_init_locked(struct ifnet *);
    709 static void	wm_turnon(struct wm_softc *);
    710 static void	wm_turnoff(struct wm_softc *);
    711 static void	wm_stop(struct ifnet *, int);
    712 static void	wm_stop_locked(struct ifnet *, int);
    713 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    714 static void	wm_82547_txfifo_stall(void *);
    715 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    716 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    717 /* DMA related */
    718 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    719 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    720 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    721 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    722     struct wm_txqueue *);
    723 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    724 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    725 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    726     struct wm_rxqueue *);
    727 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    728 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    729 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    730 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    731 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    732 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    733 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    734     struct wm_txqueue *);
    735 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    736     struct wm_rxqueue *);
    737 static int	wm_alloc_txrx_queues(struct wm_softc *);
    738 static void	wm_free_txrx_queues(struct wm_softc *);
    739 static int	wm_init_txrx_queues(struct wm_softc *);
    740 /* Start */
    741 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    742     struct wm_txsoft *, uint32_t *, uint8_t *);
    743 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    744 static void	wm_start(struct ifnet *);
    745 static void	wm_start_locked(struct ifnet *);
    746 static int	wm_transmit(struct ifnet *, struct mbuf *);
    747 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    748 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    749 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    750     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    751 static void	wm_nq_start(struct ifnet *);
    752 static void	wm_nq_start_locked(struct ifnet *);
    753 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    754 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    755 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    756 static void	wm_deferred_start_locked(struct wm_txqueue *);
    757 static void	wm_handle_queue(void *);
    758 /* Interrupt */
    759 static int	wm_txeof(struct wm_softc *, struct wm_txqueue *);
    760 static void	wm_rxeof(struct wm_rxqueue *, u_int);
    761 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    762 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    763 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    764 static void	wm_linkintr(struct wm_softc *, uint32_t);
    765 static int	wm_intr_legacy(void *);
    766 static inline void	wm_txrxintr_disable(struct wm_queue *);
    767 static inline void	wm_txrxintr_enable(struct wm_queue *);
    768 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    769 static int	wm_txrxintr_msix(void *);
    770 static int	wm_linkintr_msix(void *);
    771 
    772 /*
    773  * Media related.
    774  * GMII, SGMII, TBI, SERDES and SFP.
    775  */
    776 /* Common */
    777 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    778 /* GMII related */
    779 static void	wm_gmii_reset(struct wm_softc *);
    780 static void	wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t, uint16_t);
    781 static int	wm_get_phy_id_82575(struct wm_softc *);
    782 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    783 static int	wm_gmii_mediachange(struct ifnet *);
    784 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    785 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    786 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    787 static int	wm_gmii_i82543_readreg(device_t, int, int);
    788 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    789 static int	wm_gmii_mdic_readreg(device_t, int, int);
    790 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    791 static int	wm_gmii_i82544_readreg(device_t, int, int);
    792 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    793 static int	wm_gmii_i80003_readreg(device_t, int, int);
    794 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    795 static int	wm_gmii_bm_readreg(device_t, int, int);
    796 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    797 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    798 static int	wm_gmii_hv_readreg(device_t, int, int);
    799 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
    800 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    801 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
    802 static int	wm_gmii_82580_readreg(device_t, int, int);
    803 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    804 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    805 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    806 static void	wm_gmii_statchg(struct ifnet *);
    807 /*
    808  * kumeran related (80003, ICH* and PCH*).
    809  * These functions are not for accessing MII registers but for accessing
    810  * kumeran specific registers.
    811  */
    812 static int	wm_kmrn_readreg(struct wm_softc *, int);
    813 static int	wm_kmrn_readreg_locked(struct wm_softc *, int);
    814 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
    815 static void	wm_kmrn_writereg_locked(struct wm_softc *, int, int);
    816 /* SGMII */
    817 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    818 static int	wm_sgmii_readreg(device_t, int, int);
    819 static void	wm_sgmii_writereg(device_t, int, int, int);
    820 /* TBI related */
    821 static void	wm_tbi_mediainit(struct wm_softc *);
    822 static int	wm_tbi_mediachange(struct ifnet *);
    823 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    824 static int	wm_check_for_link(struct wm_softc *);
    825 static void	wm_tbi_tick(struct wm_softc *);
    826 /* SERDES related */
    827 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    828 static int	wm_serdes_mediachange(struct ifnet *);
    829 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    830 static void	wm_serdes_tick(struct wm_softc *);
    831 /* SFP related */
    832 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    833 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    834 
    835 /*
    836  * NVM related.
    837  * Microwire, SPI (w/wo EERD) and Flash.
    838  */
    839 /* Misc functions */
    840 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    841 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    842 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    843 /* Microwire */
    844 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    845 /* SPI */
    846 static int	wm_nvm_ready_spi(struct wm_softc *);
    847 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    848 /* Using with EERD */
    849 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    850 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    851 /* Flash */
    852 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    853     unsigned int *);
    854 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    855 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    856 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    857 	uint32_t *);
    858 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    859 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    860 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    861 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    862 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    863 /* iNVM */
    864 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    865 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    866 /* Lock, detecting NVM type, validate checksum and read */
    867 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    868 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    869 static int	wm_nvm_validate_checksum(struct wm_softc *);
    870 static void	wm_nvm_version_invm(struct wm_softc *);
    871 static void	wm_nvm_version(struct wm_softc *);
    872 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    873 
    874 /*
    875  * Hardware semaphores.
    876  * Very complexed...
    877  */
    878 static int	wm_get_null(struct wm_softc *);
    879 static void	wm_put_null(struct wm_softc *);
    880 static int	wm_get_eecd(struct wm_softc *);
    881 static void	wm_put_eecd(struct wm_softc *);
    882 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    883 static void	wm_put_swsm_semaphore(struct wm_softc *);
    884 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    885 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    886 static int	wm_get_nvm_80003(struct wm_softc *);
    887 static void	wm_put_nvm_80003(struct wm_softc *);
    888 static int	wm_get_nvm_82571(struct wm_softc *);
    889 static void	wm_put_nvm_82571(struct wm_softc *);
    890 static int	wm_get_phy_82575(struct wm_softc *);
    891 static void	wm_put_phy_82575(struct wm_softc *);
    892 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    893 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    894 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    895 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    896 static int	wm_get_nvm_ich8lan(struct wm_softc *);
    897 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    898 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    899 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    900 
    901 /*
    902  * Management mode and power management related subroutines.
    903  * BMC, AMT, suspend/resume and EEE.
    904  */
    905 #if 0
    906 static int	wm_check_mng_mode(struct wm_softc *);
    907 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    908 static int	wm_check_mng_mode_82574(struct wm_softc *);
    909 static int	wm_check_mng_mode_generic(struct wm_softc *);
    910 #endif
    911 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    912 static bool	wm_phy_resetisblocked(struct wm_softc *);
    913 static void	wm_get_hw_control(struct wm_softc *);
    914 static void	wm_release_hw_control(struct wm_softc *);
    915 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    916 static void	wm_smbustopci(struct wm_softc *);
    917 static void	wm_init_manageability(struct wm_softc *);
    918 static void	wm_release_manageability(struct wm_softc *);
    919 static void	wm_get_wakeup(struct wm_softc *);
    920 static void	wm_ulp_disable(struct wm_softc *);
    921 static void	wm_enable_phy_wakeup(struct wm_softc *);
    922 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    923 static void	wm_enable_wakeup(struct wm_softc *);
    924 /* LPLU (Low Power Link Up) */
    925 static void	wm_lplu_d0_disable(struct wm_softc *);
    926 /* EEE */
    927 static void	wm_set_eee_i350(struct wm_softc *);
    928 
    929 /*
    930  * Workarounds (mainly PHY related).
    931  * Basically, PHY's workarounds are in the PHY drivers.
    932  */
    933 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    934 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    935 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    936 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    937 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    938 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    939 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    940 static void	wm_reset_init_script_82575(struct wm_softc *);
    941 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    942 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
    943 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
    944 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
    945 static void	wm_pll_workaround_i210(struct wm_softc *);
    946 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
    947 
    948 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    949     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    950 
    951 /*
    952  * Devices supported by this driver.
    953  */
    954 static const struct wm_product {
    955 	pci_vendor_id_t		wmp_vendor;
    956 	pci_product_id_t	wmp_product;
    957 	const char		*wmp_name;
    958 	wm_chip_type		wmp_type;
    959 	uint32_t		wmp_flags;
    960 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    961 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    962 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    963 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    964 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    965 } wm_products[] = {
    966 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    967 	  "Intel i82542 1000BASE-X Ethernet",
    968 	  WM_T_82542_2_1,	WMP_F_FIBER },
    969 
    970 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    971 	  "Intel i82543GC 1000BASE-X Ethernet",
    972 	  WM_T_82543,		WMP_F_FIBER },
    973 
    974 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    975 	  "Intel i82543GC 1000BASE-T Ethernet",
    976 	  WM_T_82543,		WMP_F_COPPER },
    977 
    978 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    979 	  "Intel i82544EI 1000BASE-T Ethernet",
    980 	  WM_T_82544,		WMP_F_COPPER },
    981 
    982 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    983 	  "Intel i82544EI 1000BASE-X Ethernet",
    984 	  WM_T_82544,		WMP_F_FIBER },
    985 
    986 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    987 	  "Intel i82544GC 1000BASE-T Ethernet",
    988 	  WM_T_82544,		WMP_F_COPPER },
    989 
    990 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    991 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    992 	  WM_T_82544,		WMP_F_COPPER },
    993 
    994 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    995 	  "Intel i82540EM 1000BASE-T Ethernet",
    996 	  WM_T_82540,		WMP_F_COPPER },
    997 
    998 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    999 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1000 	  WM_T_82540,		WMP_F_COPPER },
   1001 
   1002 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1003 	  "Intel i82540EP 1000BASE-T Ethernet",
   1004 	  WM_T_82540,		WMP_F_COPPER },
   1005 
   1006 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1007 	  "Intel i82540EP 1000BASE-T Ethernet",
   1008 	  WM_T_82540,		WMP_F_COPPER },
   1009 
   1010 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1011 	  "Intel i82540EP 1000BASE-T Ethernet",
   1012 	  WM_T_82540,		WMP_F_COPPER },
   1013 
   1014 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1015 	  "Intel i82545EM 1000BASE-T Ethernet",
   1016 	  WM_T_82545,		WMP_F_COPPER },
   1017 
   1018 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1019 	  "Intel i82545GM 1000BASE-T Ethernet",
   1020 	  WM_T_82545_3,		WMP_F_COPPER },
   1021 
   1022 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1023 	  "Intel i82545GM 1000BASE-X Ethernet",
   1024 	  WM_T_82545_3,		WMP_F_FIBER },
   1025 
   1026 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1027 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1028 	  WM_T_82545_3,		WMP_F_SERDES },
   1029 
   1030 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1031 	  "Intel i82546EB 1000BASE-T Ethernet",
   1032 	  WM_T_82546,		WMP_F_COPPER },
   1033 
   1034 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1035 	  "Intel i82546EB 1000BASE-T Ethernet",
   1036 	  WM_T_82546,		WMP_F_COPPER },
   1037 
   1038 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1039 	  "Intel i82545EM 1000BASE-X Ethernet",
   1040 	  WM_T_82545,		WMP_F_FIBER },
   1041 
   1042 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1043 	  "Intel i82546EB 1000BASE-X Ethernet",
   1044 	  WM_T_82546,		WMP_F_FIBER },
   1045 
   1046 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1047 	  "Intel i82546GB 1000BASE-T Ethernet",
   1048 	  WM_T_82546_3,		WMP_F_COPPER },
   1049 
   1050 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1051 	  "Intel i82546GB 1000BASE-X Ethernet",
   1052 	  WM_T_82546_3,		WMP_F_FIBER },
   1053 
   1054 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1055 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1056 	  WM_T_82546_3,		WMP_F_SERDES },
   1057 
   1058 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1059 	  "i82546GB quad-port Gigabit Ethernet",
   1060 	  WM_T_82546_3,		WMP_F_COPPER },
   1061 
   1062 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1063 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1064 	  WM_T_82546_3,		WMP_F_COPPER },
   1065 
   1066 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1067 	  "Intel PRO/1000MT (82546GB)",
   1068 	  WM_T_82546_3,		WMP_F_COPPER },
   1069 
   1070 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1071 	  "Intel i82541EI 1000BASE-T Ethernet",
   1072 	  WM_T_82541,		WMP_F_COPPER },
   1073 
   1074 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1075 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1076 	  WM_T_82541,		WMP_F_COPPER },
   1077 
   1078 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1079 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1080 	  WM_T_82541,		WMP_F_COPPER },
   1081 
   1082 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1083 	  "Intel i82541ER 1000BASE-T Ethernet",
   1084 	  WM_T_82541_2,		WMP_F_COPPER },
   1085 
   1086 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1087 	  "Intel i82541GI 1000BASE-T Ethernet",
   1088 	  WM_T_82541_2,		WMP_F_COPPER },
   1089 
   1090 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1091 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1092 	  WM_T_82541_2,		WMP_F_COPPER },
   1093 
   1094 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1095 	  "Intel i82541PI 1000BASE-T Ethernet",
   1096 	  WM_T_82541_2,		WMP_F_COPPER },
   1097 
   1098 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1099 	  "Intel i82547EI 1000BASE-T Ethernet",
   1100 	  WM_T_82547,		WMP_F_COPPER },
   1101 
   1102 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1103 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1104 	  WM_T_82547,		WMP_F_COPPER },
   1105 
   1106 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1107 	  "Intel i82547GI 1000BASE-T Ethernet",
   1108 	  WM_T_82547_2,		WMP_F_COPPER },
   1109 
   1110 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1111 	  "Intel PRO/1000 PT (82571EB)",
   1112 	  WM_T_82571,		WMP_F_COPPER },
   1113 
   1114 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1115 	  "Intel PRO/1000 PF (82571EB)",
   1116 	  WM_T_82571,		WMP_F_FIBER },
   1117 
   1118 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1119 	  "Intel PRO/1000 PB (82571EB)",
   1120 	  WM_T_82571,		WMP_F_SERDES },
   1121 
   1122 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1123 	  "Intel PRO/1000 QT (82571EB)",
   1124 	  WM_T_82571,		WMP_F_COPPER },
   1125 
   1126 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1127 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1128 	  WM_T_82571,		WMP_F_COPPER, },
   1129 
   1130 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1131 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1132 	  WM_T_82571,		WMP_F_COPPER, },
   1133 
   1134 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1135 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1136 	  WM_T_82571,		WMP_F_SERDES, },
   1137 
   1138 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1139 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1140 	  WM_T_82571,		WMP_F_SERDES, },
   1141 
   1142 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1143 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1144 	  WM_T_82571,		WMP_F_FIBER, },
   1145 
   1146 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1147 	  "Intel i82572EI 1000baseT Ethernet",
   1148 	  WM_T_82572,		WMP_F_COPPER },
   1149 
   1150 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1151 	  "Intel i82572EI 1000baseX Ethernet",
   1152 	  WM_T_82572,		WMP_F_FIBER },
   1153 
   1154 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1155 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1156 	  WM_T_82572,		WMP_F_SERDES },
   1157 
   1158 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1159 	  "Intel i82572EI 1000baseT Ethernet",
   1160 	  WM_T_82572,		WMP_F_COPPER },
   1161 
   1162 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1163 	  "Intel i82573E",
   1164 	  WM_T_82573,		WMP_F_COPPER },
   1165 
   1166 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1167 	  "Intel i82573E IAMT",
   1168 	  WM_T_82573,		WMP_F_COPPER },
   1169 
   1170 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1171 	  "Intel i82573L Gigabit Ethernet",
   1172 	  WM_T_82573,		WMP_F_COPPER },
   1173 
   1174 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1175 	  "Intel i82574L",
   1176 	  WM_T_82574,		WMP_F_COPPER },
   1177 
   1178 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1179 	  "Intel i82574L",
   1180 	  WM_T_82574,		WMP_F_COPPER },
   1181 
   1182 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1183 	  "Intel i82583V",
   1184 	  WM_T_82583,		WMP_F_COPPER },
   1185 
   1186 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1187 	  "i80003 dual 1000baseT Ethernet",
   1188 	  WM_T_80003,		WMP_F_COPPER },
   1189 
   1190 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1191 	  "i80003 dual 1000baseX Ethernet",
   1192 	  WM_T_80003,		WMP_F_COPPER },
   1193 
   1194 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1195 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1196 	  WM_T_80003,		WMP_F_SERDES },
   1197 
   1198 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1199 	  "Intel i80003 1000baseT Ethernet",
   1200 	  WM_T_80003,		WMP_F_COPPER },
   1201 
   1202 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1203 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1204 	  WM_T_80003,		WMP_F_SERDES },
   1205 
   1206 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1207 	  "Intel i82801H (M_AMT) LAN Controller",
   1208 	  WM_T_ICH8,		WMP_F_COPPER },
   1209 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1210 	  "Intel i82801H (AMT) LAN Controller",
   1211 	  WM_T_ICH8,		WMP_F_COPPER },
   1212 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1213 	  "Intel i82801H LAN Controller",
   1214 	  WM_T_ICH8,		WMP_F_COPPER },
   1215 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1216 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1217 	  WM_T_ICH8,		WMP_F_COPPER },
   1218 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1219 	  "Intel i82801H (M) LAN Controller",
   1220 	  WM_T_ICH8,		WMP_F_COPPER },
   1221 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1222 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1223 	  WM_T_ICH8,		WMP_F_COPPER },
   1224 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1225 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1226 	  WM_T_ICH8,		WMP_F_COPPER },
   1227 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1228 	  "82567V-3 LAN Controller",
   1229 	  WM_T_ICH8,		WMP_F_COPPER },
   1230 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1231 	  "82801I (AMT) LAN Controller",
   1232 	  WM_T_ICH9,		WMP_F_COPPER },
   1233 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1234 	  "82801I 10/100 LAN Controller",
   1235 	  WM_T_ICH9,		WMP_F_COPPER },
   1236 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1237 	  "82801I (G) 10/100 LAN Controller",
   1238 	  WM_T_ICH9,		WMP_F_COPPER },
   1239 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1240 	  "82801I (GT) 10/100 LAN Controller",
   1241 	  WM_T_ICH9,		WMP_F_COPPER },
   1242 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1243 	  "82801I (C) LAN Controller",
   1244 	  WM_T_ICH9,		WMP_F_COPPER },
   1245 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1246 	  "82801I mobile LAN Controller",
   1247 	  WM_T_ICH9,		WMP_F_COPPER },
   1248 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1249 	  "82801I mobile (V) LAN Controller",
   1250 	  WM_T_ICH9,		WMP_F_COPPER },
   1251 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1252 	  "82801I mobile (AMT) LAN Controller",
   1253 	  WM_T_ICH9,		WMP_F_COPPER },
   1254 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1255 	  "82567LM-4 LAN Controller",
   1256 	  WM_T_ICH9,		WMP_F_COPPER },
   1257 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1258 	  "82567LM-2 LAN Controller",
   1259 	  WM_T_ICH10,		WMP_F_COPPER },
   1260 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1261 	  "82567LF-2 LAN Controller",
   1262 	  WM_T_ICH10,		WMP_F_COPPER },
   1263 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1264 	  "82567LM-3 LAN Controller",
   1265 	  WM_T_ICH10,		WMP_F_COPPER },
   1266 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1267 	  "82567LF-3 LAN Controller",
   1268 	  WM_T_ICH10,		WMP_F_COPPER },
   1269 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1270 	  "82567V-2 LAN Controller",
   1271 	  WM_T_ICH10,		WMP_F_COPPER },
   1272 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1273 	  "82567V-3? LAN Controller",
   1274 	  WM_T_ICH10,		WMP_F_COPPER },
   1275 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1276 	  "HANKSVILLE LAN Controller",
   1277 	  WM_T_ICH10,		WMP_F_COPPER },
   1278 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1279 	  "PCH LAN (82577LM) Controller",
   1280 	  WM_T_PCH,		WMP_F_COPPER },
   1281 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1282 	  "PCH LAN (82577LC) Controller",
   1283 	  WM_T_PCH,		WMP_F_COPPER },
   1284 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1285 	  "PCH LAN (82578DM) Controller",
   1286 	  WM_T_PCH,		WMP_F_COPPER },
   1287 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1288 	  "PCH LAN (82578DC) Controller",
   1289 	  WM_T_PCH,		WMP_F_COPPER },
   1290 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1291 	  "PCH2 LAN (82579LM) Controller",
   1292 	  WM_T_PCH2,		WMP_F_COPPER },
   1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1294 	  "PCH2 LAN (82579V) Controller",
   1295 	  WM_T_PCH2,		WMP_F_COPPER },
   1296 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1297 	  "82575EB dual-1000baseT Ethernet",
   1298 	  WM_T_82575,		WMP_F_COPPER },
   1299 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1300 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1301 	  WM_T_82575,		WMP_F_SERDES },
   1302 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1303 	  "82575GB quad-1000baseT Ethernet",
   1304 	  WM_T_82575,		WMP_F_COPPER },
   1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1306 	  "82575GB quad-1000baseT Ethernet (PM)",
   1307 	  WM_T_82575,		WMP_F_COPPER },
   1308 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1309 	  "82576 1000BaseT Ethernet",
   1310 	  WM_T_82576,		WMP_F_COPPER },
   1311 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1312 	  "82576 1000BaseX Ethernet",
   1313 	  WM_T_82576,		WMP_F_FIBER },
   1314 
   1315 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1316 	  "82576 gigabit Ethernet (SERDES)",
   1317 	  WM_T_82576,		WMP_F_SERDES },
   1318 
   1319 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1320 	  "82576 quad-1000BaseT Ethernet",
   1321 	  WM_T_82576,		WMP_F_COPPER },
   1322 
   1323 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1324 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1325 	  WM_T_82576,		WMP_F_COPPER },
   1326 
   1327 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1328 	  "82576 gigabit Ethernet",
   1329 	  WM_T_82576,		WMP_F_COPPER },
   1330 
   1331 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1332 	  "82576 gigabit Ethernet (SERDES)",
   1333 	  WM_T_82576,		WMP_F_SERDES },
   1334 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1335 	  "82576 quad-gigabit Ethernet (SERDES)",
   1336 	  WM_T_82576,		WMP_F_SERDES },
   1337 
   1338 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1339 	  "82580 1000BaseT Ethernet",
   1340 	  WM_T_82580,		WMP_F_COPPER },
   1341 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1342 	  "82580 1000BaseX Ethernet",
   1343 	  WM_T_82580,		WMP_F_FIBER },
   1344 
   1345 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1346 	  "82580 1000BaseT Ethernet (SERDES)",
   1347 	  WM_T_82580,		WMP_F_SERDES },
   1348 
   1349 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1350 	  "82580 gigabit Ethernet (SGMII)",
   1351 	  WM_T_82580,		WMP_F_COPPER },
   1352 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1353 	  "82580 dual-1000BaseT Ethernet",
   1354 	  WM_T_82580,		WMP_F_COPPER },
   1355 
   1356 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1357 	  "82580 quad-1000BaseX Ethernet",
   1358 	  WM_T_82580,		WMP_F_FIBER },
   1359 
   1360 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1361 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1362 	  WM_T_82580,		WMP_F_COPPER },
   1363 
   1364 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1365 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1366 	  WM_T_82580,		WMP_F_SERDES },
   1367 
   1368 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1369 	  "DH89XXCC 1000BASE-KX Ethernet",
   1370 	  WM_T_82580,		WMP_F_SERDES },
   1371 
   1372 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1373 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1374 	  WM_T_82580,		WMP_F_SERDES },
   1375 
   1376 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1377 	  "I350 Gigabit Network Connection",
   1378 	  WM_T_I350,		WMP_F_COPPER },
   1379 
   1380 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1381 	  "I350 Gigabit Fiber Network Connection",
   1382 	  WM_T_I350,		WMP_F_FIBER },
   1383 
   1384 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1385 	  "I350 Gigabit Backplane Connection",
   1386 	  WM_T_I350,		WMP_F_SERDES },
   1387 
   1388 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1389 	  "I350 Quad Port Gigabit Ethernet",
   1390 	  WM_T_I350,		WMP_F_SERDES },
   1391 
   1392 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1393 	  "I350 Gigabit Connection",
   1394 	  WM_T_I350,		WMP_F_COPPER },
   1395 
   1396 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1397 	  "I354 Gigabit Ethernet (KX)",
   1398 	  WM_T_I354,		WMP_F_SERDES },
   1399 
   1400 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1401 	  "I354 Gigabit Ethernet (SGMII)",
   1402 	  WM_T_I354,		WMP_F_COPPER },
   1403 
   1404 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1405 	  "I354 Gigabit Ethernet (2.5G)",
   1406 	  WM_T_I354,		WMP_F_COPPER },
   1407 
   1408 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1409 	  "I210-T1 Ethernet Server Adapter",
   1410 	  WM_T_I210,		WMP_F_COPPER },
   1411 
   1412 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1413 	  "I210 Ethernet (Copper OEM)",
   1414 	  WM_T_I210,		WMP_F_COPPER },
   1415 
   1416 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1417 	  "I210 Ethernet (Copper IT)",
   1418 	  WM_T_I210,		WMP_F_COPPER },
   1419 
   1420 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1421 	  "I210 Ethernet (FLASH less)",
   1422 	  WM_T_I210,		WMP_F_COPPER },
   1423 
   1424 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1425 	  "I210 Gigabit Ethernet (Fiber)",
   1426 	  WM_T_I210,		WMP_F_FIBER },
   1427 
   1428 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1429 	  "I210 Gigabit Ethernet (SERDES)",
   1430 	  WM_T_I210,		WMP_F_SERDES },
   1431 
   1432 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1433 	  "I210 Gigabit Ethernet (FLASH less)",
   1434 	  WM_T_I210,		WMP_F_SERDES },
   1435 
   1436 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1437 	  "I210 Gigabit Ethernet (SGMII)",
   1438 	  WM_T_I210,		WMP_F_COPPER },
   1439 
   1440 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1441 	  "I211 Ethernet (COPPER)",
   1442 	  WM_T_I211,		WMP_F_COPPER },
   1443 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1444 	  "I217 V Ethernet Connection",
   1445 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1446 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1447 	  "I217 LM Ethernet Connection",
   1448 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1449 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1450 	  "I218 V Ethernet Connection",
   1451 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1452 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1453 	  "I218 V Ethernet Connection",
   1454 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1455 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1456 	  "I218 V Ethernet Connection",
   1457 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1458 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1459 	  "I218 LM Ethernet Connection",
   1460 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1461 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1462 	  "I218 LM Ethernet Connection",
   1463 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1464 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1465 	  "I218 LM Ethernet Connection",
   1466 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1467 #if 0
   1468 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1469 	  "I219 V Ethernet Connection",
   1470 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1471 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1472 	  "I219 V Ethernet Connection",
   1473 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1474 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1475 	  "I219 V Ethernet Connection",
   1476 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1477 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1478 	  "I219 V Ethernet Connection",
   1479 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1480 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1481 	  "I219 LM Ethernet Connection",
   1482 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1483 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1484 	  "I219 LM Ethernet Connection",
   1485 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1486 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1487 	  "I219 LM Ethernet Connection",
   1488 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1489 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1490 	  "I219 LM Ethernet Connection",
   1491 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1492 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1493 	  "I219 LM Ethernet Connection",
   1494 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1495 #endif
   1496 	{ 0,			0,
   1497 	  NULL,
   1498 	  0,			0 },
   1499 };
   1500 
   1501 /*
   1502  * Register read/write functions.
   1503  * Other than CSR_{READ|WRITE}().
   1504  */
   1505 
   1506 #if 0 /* Not currently used */
   1507 static inline uint32_t
   1508 wm_io_read(struct wm_softc *sc, int reg)
   1509 {
   1510 
   1511 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1512 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1513 }
   1514 #endif
   1515 
   1516 static inline void
   1517 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1518 {
   1519 
   1520 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1521 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1522 }
   1523 
   1524 static inline void
   1525 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1526     uint32_t data)
   1527 {
   1528 	uint32_t regval;
   1529 	int i;
   1530 
   1531 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1532 
   1533 	CSR_WRITE(sc, reg, regval);
   1534 
   1535 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1536 		delay(5);
   1537 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1538 			break;
   1539 	}
   1540 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1541 		aprint_error("%s: WARNING:"
   1542 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1543 		    device_xname(sc->sc_dev), reg);
   1544 	}
   1545 }
   1546 
   1547 static inline void
   1548 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1549 {
   1550 	wa->wa_low = htole32(v & 0xffffffffU);
   1551 	if (sizeof(bus_addr_t) == 8)
   1552 		wa->wa_high = htole32((uint64_t) v >> 32);
   1553 	else
   1554 		wa->wa_high = 0;
   1555 }
   1556 
   1557 /*
   1558  * Descriptor sync/init functions.
   1559  */
   1560 static inline void
   1561 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1562 {
   1563 	struct wm_softc *sc = txq->txq_sc;
   1564 
   1565 	/* If it will wrap around, sync to the end of the ring. */
   1566 	if ((start + num) > WM_NTXDESC(txq)) {
   1567 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1568 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1569 		    (WM_NTXDESC(txq) - start), ops);
   1570 		num -= (WM_NTXDESC(txq) - start);
   1571 		start = 0;
   1572 	}
   1573 
   1574 	/* Now sync whatever is left. */
   1575 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1576 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1577 }
   1578 
   1579 static inline void
   1580 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1581 {
   1582 	struct wm_softc *sc = rxq->rxq_sc;
   1583 
   1584 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1585 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1586 }
   1587 
   1588 static inline void
   1589 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1590 {
   1591 	struct wm_softc *sc = rxq->rxq_sc;
   1592 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1593 	struct mbuf *m = rxs->rxs_mbuf;
   1594 
   1595 	/*
   1596 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1597 	 * so that the payload after the Ethernet header is aligned
   1598 	 * to a 4-byte boundary.
   1599 
   1600 	 * XXX BRAINDAMAGE ALERT!
   1601 	 * The stupid chip uses the same size for every buffer, which
   1602 	 * is set in the Receive Control register.  We are using the 2K
   1603 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1604 	 * reason, we can't "scoot" packets longer than the standard
   1605 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1606 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1607 	 * the upper layer copy the headers.
   1608 	 */
   1609 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1610 
   1611 	if (sc->sc_type == WM_T_82574) {
   1612 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1613 		rxd->erx_data.erxd_addr =
   1614 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1615 		rxd->erx_data.erxd_dd = 0;
   1616 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1617 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1618 
   1619 		rxd->nqrx_data.nrxd_paddr =
   1620 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1621 		/* Currently, split header is not supported. */
   1622 		rxd->nqrx_data.nrxd_haddr = 0;
   1623 	} else {
   1624 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1625 
   1626 		wm_set_dma_addr(&rxd->wrx_addr,
   1627 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1628 		rxd->wrx_len = 0;
   1629 		rxd->wrx_cksum = 0;
   1630 		rxd->wrx_status = 0;
   1631 		rxd->wrx_errors = 0;
   1632 		rxd->wrx_special = 0;
   1633 	}
   1634 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1635 
   1636 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1637 }
   1638 
   1639 /*
   1640  * Device driver interface functions and commonly used functions.
   1641  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1642  */
   1643 
   1644 /* Lookup supported device table */
   1645 static const struct wm_product *
   1646 wm_lookup(const struct pci_attach_args *pa)
   1647 {
   1648 	const struct wm_product *wmp;
   1649 
   1650 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1651 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1652 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1653 			return wmp;
   1654 	}
   1655 	return NULL;
   1656 }
   1657 
   1658 /* The match function (ca_match) */
   1659 static int
   1660 wm_match(device_t parent, cfdata_t cf, void *aux)
   1661 {
   1662 	struct pci_attach_args *pa = aux;
   1663 
   1664 	if (wm_lookup(pa) != NULL)
   1665 		return 1;
   1666 
   1667 	return 0;
   1668 }
   1669 
   1670 /* The attach function (ca_attach) */
   1671 static void
   1672 wm_attach(device_t parent, device_t self, void *aux)
   1673 {
   1674 	struct wm_softc *sc = device_private(self);
   1675 	struct pci_attach_args *pa = aux;
   1676 	prop_dictionary_t dict;
   1677 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1678 	pci_chipset_tag_t pc = pa->pa_pc;
   1679 	int counts[PCI_INTR_TYPE_SIZE];
   1680 	pci_intr_type_t max_type;
   1681 	const char *eetype, *xname;
   1682 	bus_space_tag_t memt;
   1683 	bus_space_handle_t memh;
   1684 	bus_size_t memsize;
   1685 	int memh_valid;
   1686 	int i, error;
   1687 	const struct wm_product *wmp;
   1688 	prop_data_t ea;
   1689 	prop_number_t pn;
   1690 	uint8_t enaddr[ETHER_ADDR_LEN];
   1691 	char buf[256];
   1692 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1693 	pcireg_t preg, memtype;
   1694 	uint16_t eeprom_data, apme_mask;
   1695 	bool force_clear_smbi;
   1696 	uint32_t link_mode;
   1697 	uint32_t reg;
   1698 
   1699 	sc->sc_dev = self;
   1700 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1701 	sc->sc_core_stopping = false;
   1702 
   1703 	wmp = wm_lookup(pa);
   1704 #ifdef DIAGNOSTIC
   1705 	if (wmp == NULL) {
   1706 		printf("\n");
   1707 		panic("wm_attach: impossible");
   1708 	}
   1709 #endif
   1710 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1711 
   1712 	sc->sc_pc = pa->pa_pc;
   1713 	sc->sc_pcitag = pa->pa_tag;
   1714 
   1715 	if (pci_dma64_available(pa))
   1716 		sc->sc_dmat = pa->pa_dmat64;
   1717 	else
   1718 		sc->sc_dmat = pa->pa_dmat;
   1719 
   1720 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1721 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1722 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1723 
   1724 	sc->sc_type = wmp->wmp_type;
   1725 
   1726 	/* Set default function pointers */
   1727 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1728 	sc->phy.release = sc->nvm.release = wm_put_null;
   1729 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1730 
   1731 	if (sc->sc_type < WM_T_82543) {
   1732 		if (sc->sc_rev < 2) {
   1733 			aprint_error_dev(sc->sc_dev,
   1734 			    "i82542 must be at least rev. 2\n");
   1735 			return;
   1736 		}
   1737 		if (sc->sc_rev < 3)
   1738 			sc->sc_type = WM_T_82542_2_0;
   1739 	}
   1740 
   1741 	/*
   1742 	 * Disable MSI for Errata:
   1743 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1744 	 *
   1745 	 *  82544: Errata 25
   1746 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1747 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1748 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1749 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1750 	 *
   1751 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1752 	 *
   1753 	 *  82571 & 82572: Errata 63
   1754 	 */
   1755 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1756 	    || (sc->sc_type == WM_T_82572))
   1757 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1758 
   1759 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1760 	    || (sc->sc_type == WM_T_82580)
   1761 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1762 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1763 		sc->sc_flags |= WM_F_NEWQUEUE;
   1764 
   1765 	/* Set device properties (mactype) */
   1766 	dict = device_properties(sc->sc_dev);
   1767 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1768 
   1769 	/*
   1770 	 * Map the device.  All devices support memory-mapped acccess,
   1771 	 * and it is really required for normal operation.
   1772 	 */
   1773 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1774 	switch (memtype) {
   1775 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1776 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1777 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1778 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1779 		break;
   1780 	default:
   1781 		memh_valid = 0;
   1782 		break;
   1783 	}
   1784 
   1785 	if (memh_valid) {
   1786 		sc->sc_st = memt;
   1787 		sc->sc_sh = memh;
   1788 		sc->sc_ss = memsize;
   1789 	} else {
   1790 		aprint_error_dev(sc->sc_dev,
   1791 		    "unable to map device registers\n");
   1792 		return;
   1793 	}
   1794 
   1795 	/*
   1796 	 * In addition, i82544 and later support I/O mapped indirect
   1797 	 * register access.  It is not desirable (nor supported in
   1798 	 * this driver) to use it for normal operation, though it is
   1799 	 * required to work around bugs in some chip versions.
   1800 	 */
   1801 	if (sc->sc_type >= WM_T_82544) {
   1802 		/* First we have to find the I/O BAR. */
   1803 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1804 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1805 			if (memtype == PCI_MAPREG_TYPE_IO)
   1806 				break;
   1807 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1808 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1809 				i += 4;	/* skip high bits, too */
   1810 		}
   1811 		if (i < PCI_MAPREG_END) {
   1812 			/*
   1813 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1814 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1815 			 * It's no problem because newer chips has no this
   1816 			 * bug.
   1817 			 *
   1818 			 * The i8254x doesn't apparently respond when the
   1819 			 * I/O BAR is 0, which looks somewhat like it's not
   1820 			 * been configured.
   1821 			 */
   1822 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1823 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1824 				aprint_error_dev(sc->sc_dev,
   1825 				    "WARNING: I/O BAR at zero.\n");
   1826 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1827 					0, &sc->sc_iot, &sc->sc_ioh,
   1828 					NULL, &sc->sc_ios) == 0) {
   1829 				sc->sc_flags |= WM_F_IOH_VALID;
   1830 			} else {
   1831 				aprint_error_dev(sc->sc_dev,
   1832 				    "WARNING: unable to map I/O space\n");
   1833 			}
   1834 		}
   1835 
   1836 	}
   1837 
   1838 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1839 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1840 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1841 	if (sc->sc_type < WM_T_82542_2_1)
   1842 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1843 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1844 
   1845 	/* power up chip */
   1846 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1847 	    NULL)) && error != EOPNOTSUPP) {
   1848 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1849 		return;
   1850 	}
   1851 
   1852 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1853 
   1854 	/* Allocation settings */
   1855 	max_type = PCI_INTR_TYPE_MSIX;
   1856 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1;
   1857 	counts[PCI_INTR_TYPE_MSI] = 1;
   1858 	counts[PCI_INTR_TYPE_INTX] = 1;
   1859 	/* overridden by disable flags */
   1860 	if (wm_disable_msi != 0) {
   1861 		counts[PCI_INTR_TYPE_MSI] = 0;
   1862 		if (wm_disable_msix != 0) {
   1863 			max_type = PCI_INTR_TYPE_INTX;
   1864 			counts[PCI_INTR_TYPE_MSIX] = 0;
   1865 		}
   1866 	} else if (wm_disable_msix != 0) {
   1867 		max_type = PCI_INTR_TYPE_MSI;
   1868 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1869 	}
   1870 
   1871 alloc_retry:
   1872 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1873 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1874 		return;
   1875 	}
   1876 
   1877 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1878 		error = wm_setup_msix(sc);
   1879 		if (error) {
   1880 			pci_intr_release(pc, sc->sc_intrs,
   1881 			    counts[PCI_INTR_TYPE_MSIX]);
   1882 
   1883 			/* Setup for MSI: Disable MSI-X */
   1884 			max_type = PCI_INTR_TYPE_MSI;
   1885 			counts[PCI_INTR_TYPE_MSI] = 1;
   1886 			counts[PCI_INTR_TYPE_INTX] = 1;
   1887 			goto alloc_retry;
   1888 		}
   1889 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1890 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1891 		error = wm_setup_legacy(sc);
   1892 		if (error) {
   1893 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1894 			    counts[PCI_INTR_TYPE_MSI]);
   1895 
   1896 			/* The next try is for INTx: Disable MSI */
   1897 			max_type = PCI_INTR_TYPE_INTX;
   1898 			counts[PCI_INTR_TYPE_INTX] = 1;
   1899 			goto alloc_retry;
   1900 		}
   1901 	} else {
   1902 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1903 		error = wm_setup_legacy(sc);
   1904 		if (error) {
   1905 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1906 			    counts[PCI_INTR_TYPE_INTX]);
   1907 			return;
   1908 		}
   1909 	}
   1910 
   1911 	/*
   1912 	 * Check the function ID (unit number of the chip).
   1913 	 */
   1914 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1915 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1916 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1917 	    || (sc->sc_type == WM_T_82580)
   1918 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1919 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1920 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1921 	else
   1922 		sc->sc_funcid = 0;
   1923 
   1924 	/*
   1925 	 * Determine a few things about the bus we're connected to.
   1926 	 */
   1927 	if (sc->sc_type < WM_T_82543) {
   1928 		/* We don't really know the bus characteristics here. */
   1929 		sc->sc_bus_speed = 33;
   1930 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1931 		/*
   1932 		 * CSA (Communication Streaming Architecture) is about as fast
   1933 		 * a 32-bit 66MHz PCI Bus.
   1934 		 */
   1935 		sc->sc_flags |= WM_F_CSA;
   1936 		sc->sc_bus_speed = 66;
   1937 		aprint_verbose_dev(sc->sc_dev,
   1938 		    "Communication Streaming Architecture\n");
   1939 		if (sc->sc_type == WM_T_82547) {
   1940 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1941 			callout_setfunc(&sc->sc_txfifo_ch,
   1942 					wm_82547_txfifo_stall, sc);
   1943 			aprint_verbose_dev(sc->sc_dev,
   1944 			    "using 82547 Tx FIFO stall work-around\n");
   1945 		}
   1946 	} else if (sc->sc_type >= WM_T_82571) {
   1947 		sc->sc_flags |= WM_F_PCIE;
   1948 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1949 		    && (sc->sc_type != WM_T_ICH10)
   1950 		    && (sc->sc_type != WM_T_PCH)
   1951 		    && (sc->sc_type != WM_T_PCH2)
   1952 		    && (sc->sc_type != WM_T_PCH_LPT)
   1953 		    && (sc->sc_type != WM_T_PCH_SPT)) {
   1954 			/* ICH* and PCH* have no PCIe capability registers */
   1955 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1956 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1957 				NULL) == 0)
   1958 				aprint_error_dev(sc->sc_dev,
   1959 				    "unable to find PCIe capability\n");
   1960 		}
   1961 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1962 	} else {
   1963 		reg = CSR_READ(sc, WMREG_STATUS);
   1964 		if (reg & STATUS_BUS64)
   1965 			sc->sc_flags |= WM_F_BUS64;
   1966 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1967 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1968 
   1969 			sc->sc_flags |= WM_F_PCIX;
   1970 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1971 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1972 				aprint_error_dev(sc->sc_dev,
   1973 				    "unable to find PCIX capability\n");
   1974 			else if (sc->sc_type != WM_T_82545_3 &&
   1975 				 sc->sc_type != WM_T_82546_3) {
   1976 				/*
   1977 				 * Work around a problem caused by the BIOS
   1978 				 * setting the max memory read byte count
   1979 				 * incorrectly.
   1980 				 */
   1981 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1982 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1983 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1984 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1985 
   1986 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1987 				    PCIX_CMD_BYTECNT_SHIFT;
   1988 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1989 				    PCIX_STATUS_MAXB_SHIFT;
   1990 				if (bytecnt > maxb) {
   1991 					aprint_verbose_dev(sc->sc_dev,
   1992 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1993 					    512 << bytecnt, 512 << maxb);
   1994 					pcix_cmd = (pcix_cmd &
   1995 					    ~PCIX_CMD_BYTECNT_MASK) |
   1996 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1997 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1998 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1999 					    pcix_cmd);
   2000 				}
   2001 			}
   2002 		}
   2003 		/*
   2004 		 * The quad port adapter is special; it has a PCIX-PCIX
   2005 		 * bridge on the board, and can run the secondary bus at
   2006 		 * a higher speed.
   2007 		 */
   2008 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2009 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2010 								      : 66;
   2011 		} else if (sc->sc_flags & WM_F_PCIX) {
   2012 			switch (reg & STATUS_PCIXSPD_MASK) {
   2013 			case STATUS_PCIXSPD_50_66:
   2014 				sc->sc_bus_speed = 66;
   2015 				break;
   2016 			case STATUS_PCIXSPD_66_100:
   2017 				sc->sc_bus_speed = 100;
   2018 				break;
   2019 			case STATUS_PCIXSPD_100_133:
   2020 				sc->sc_bus_speed = 133;
   2021 				break;
   2022 			default:
   2023 				aprint_error_dev(sc->sc_dev,
   2024 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2025 				    reg & STATUS_PCIXSPD_MASK);
   2026 				sc->sc_bus_speed = 66;
   2027 				break;
   2028 			}
   2029 		} else
   2030 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2031 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2032 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2033 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2034 	}
   2035 
   2036 	/* clear interesting stat counters */
   2037 	CSR_READ(sc, WMREG_COLC);
   2038 	CSR_READ(sc, WMREG_RXERRC);
   2039 
   2040 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2041 	    || (sc->sc_type >= WM_T_ICH8))
   2042 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2043 	if (sc->sc_type >= WM_T_ICH8)
   2044 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2045 
   2046 	/* Set PHY, NVM mutex related stuff */
   2047 	switch (sc->sc_type) {
   2048 	case WM_T_82542_2_0:
   2049 	case WM_T_82542_2_1:
   2050 	case WM_T_82543:
   2051 	case WM_T_82544:
   2052 		/* Microwire */
   2053 		sc->nvm.read = wm_nvm_read_uwire;
   2054 		sc->sc_nvm_wordsize = 64;
   2055 		sc->sc_nvm_addrbits = 6;
   2056 		break;
   2057 	case WM_T_82540:
   2058 	case WM_T_82545:
   2059 	case WM_T_82545_3:
   2060 	case WM_T_82546:
   2061 	case WM_T_82546_3:
   2062 		/* Microwire */
   2063 		sc->nvm.read = wm_nvm_read_uwire;
   2064 		reg = CSR_READ(sc, WMREG_EECD);
   2065 		if (reg & EECD_EE_SIZE) {
   2066 			sc->sc_nvm_wordsize = 256;
   2067 			sc->sc_nvm_addrbits = 8;
   2068 		} else {
   2069 			sc->sc_nvm_wordsize = 64;
   2070 			sc->sc_nvm_addrbits = 6;
   2071 		}
   2072 		sc->sc_flags |= WM_F_LOCK_EECD;
   2073 		sc->nvm.acquire = wm_get_eecd;
   2074 		sc->nvm.release = wm_put_eecd;
   2075 		break;
   2076 	case WM_T_82541:
   2077 	case WM_T_82541_2:
   2078 	case WM_T_82547:
   2079 	case WM_T_82547_2:
   2080 		reg = CSR_READ(sc, WMREG_EECD);
   2081 		if (reg & EECD_EE_TYPE) {
   2082 			/* SPI */
   2083 			sc->nvm.read = wm_nvm_read_spi;
   2084 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2085 			wm_nvm_set_addrbits_size_eecd(sc);
   2086 		} else {
   2087 			/* Microwire */
   2088 			sc->nvm.read = wm_nvm_read_uwire;
   2089 			if ((reg & EECD_EE_ABITS) != 0) {
   2090 				sc->sc_nvm_wordsize = 256;
   2091 				sc->sc_nvm_addrbits = 8;
   2092 			} else {
   2093 				sc->sc_nvm_wordsize = 64;
   2094 				sc->sc_nvm_addrbits = 6;
   2095 			}
   2096 		}
   2097 		sc->sc_flags |= WM_F_LOCK_EECD;
   2098 		sc->nvm.acquire = wm_get_eecd;
   2099 		sc->nvm.release = wm_put_eecd;
   2100 		break;
   2101 	case WM_T_82571:
   2102 	case WM_T_82572:
   2103 		/* SPI */
   2104 		sc->nvm.read = wm_nvm_read_eerd;
   2105 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2106 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2107 		wm_nvm_set_addrbits_size_eecd(sc);
   2108 		sc->phy.acquire = wm_get_swsm_semaphore;
   2109 		sc->phy.release = wm_put_swsm_semaphore;
   2110 		sc->nvm.acquire = wm_get_nvm_82571;
   2111 		sc->nvm.release = wm_put_nvm_82571;
   2112 		break;
   2113 	case WM_T_82573:
   2114 	case WM_T_82574:
   2115 	case WM_T_82583:
   2116 		sc->nvm.read = wm_nvm_read_eerd;
   2117 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2118 		if (sc->sc_type == WM_T_82573) {
   2119 			sc->phy.acquire = wm_get_swsm_semaphore;
   2120 			sc->phy.release = wm_put_swsm_semaphore;
   2121 			sc->nvm.acquire = wm_get_nvm_82571;
   2122 			sc->nvm.release = wm_put_nvm_82571;
   2123 		} else {
   2124 			/* Both PHY and NVM use the same semaphore. */
   2125 			sc->phy.acquire = sc->nvm.acquire
   2126 			    = wm_get_swfwhw_semaphore;
   2127 			sc->phy.release = sc->nvm.release
   2128 			    = wm_put_swfwhw_semaphore;
   2129 		}
   2130 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2131 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2132 			sc->sc_nvm_wordsize = 2048;
   2133 		} else {
   2134 			/* SPI */
   2135 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2136 			wm_nvm_set_addrbits_size_eecd(sc);
   2137 		}
   2138 		break;
   2139 	case WM_T_82575:
   2140 	case WM_T_82576:
   2141 	case WM_T_82580:
   2142 	case WM_T_I350:
   2143 	case WM_T_I354:
   2144 	case WM_T_80003:
   2145 		/* SPI */
   2146 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2147 		wm_nvm_set_addrbits_size_eecd(sc);
   2148 		if((sc->sc_type == WM_T_80003)
   2149 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2150 			sc->nvm.read = wm_nvm_read_eerd;
   2151 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2152 		} else {
   2153 			sc->nvm.read = wm_nvm_read_spi;
   2154 			sc->sc_flags |= WM_F_LOCK_EECD;
   2155 		}
   2156 		sc->phy.acquire = wm_get_phy_82575;
   2157 		sc->phy.release = wm_put_phy_82575;
   2158 		sc->nvm.acquire = wm_get_nvm_80003;
   2159 		sc->nvm.release = wm_put_nvm_80003;
   2160 		break;
   2161 	case WM_T_ICH8:
   2162 	case WM_T_ICH9:
   2163 	case WM_T_ICH10:
   2164 	case WM_T_PCH:
   2165 	case WM_T_PCH2:
   2166 	case WM_T_PCH_LPT:
   2167 		sc->nvm.read = wm_nvm_read_ich8;
   2168 		/* FLASH */
   2169 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2170 		sc->sc_nvm_wordsize = 2048;
   2171 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2172 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2173 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2174 			aprint_error_dev(sc->sc_dev,
   2175 			    "can't map FLASH registers\n");
   2176 			goto out;
   2177 		}
   2178 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2179 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2180 		    ICH_FLASH_SECTOR_SIZE;
   2181 		sc->sc_ich8_flash_bank_size =
   2182 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2183 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2184 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2185 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2186 		sc->sc_flashreg_offset = 0;
   2187 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2188 		sc->phy.release = wm_put_swflag_ich8lan;
   2189 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2190 		sc->nvm.release = wm_put_nvm_ich8lan;
   2191 		break;
   2192 	case WM_T_PCH_SPT:
   2193 		sc->nvm.read = wm_nvm_read_spt;
   2194 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2195 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2196 		sc->sc_flasht = sc->sc_st;
   2197 		sc->sc_flashh = sc->sc_sh;
   2198 		sc->sc_ich8_flash_base = 0;
   2199 		sc->sc_nvm_wordsize =
   2200 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2201 			* NVM_SIZE_MULTIPLIER;
   2202 		/* It is size in bytes, we want words */
   2203 		sc->sc_nvm_wordsize /= 2;
   2204 		/* assume 2 banks */
   2205 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2206 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2207 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2208 		sc->phy.release = wm_put_swflag_ich8lan;
   2209 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2210 		sc->nvm.release = wm_put_nvm_ich8lan;
   2211 		break;
   2212 	case WM_T_I210:
   2213 	case WM_T_I211:
   2214 		if (wm_nvm_get_flash_presence_i210(sc)) {
   2215 			sc->nvm.read = wm_nvm_read_eerd;
   2216 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2217 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2218 			wm_nvm_set_addrbits_size_eecd(sc);
   2219 		} else {
   2220 			sc->nvm.read = wm_nvm_read_invm;
   2221 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2222 			sc->sc_nvm_wordsize = INVM_SIZE;
   2223 		}
   2224 		sc->phy.acquire = wm_get_phy_82575;
   2225 		sc->phy.release = wm_put_phy_82575;
   2226 		sc->nvm.acquire = wm_get_nvm_80003;
   2227 		sc->nvm.release = wm_put_nvm_80003;
   2228 		break;
   2229 	default:
   2230 		break;
   2231 	}
   2232 
   2233 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2234 	switch (sc->sc_type) {
   2235 	case WM_T_82571:
   2236 	case WM_T_82572:
   2237 		reg = CSR_READ(sc, WMREG_SWSM2);
   2238 		if ((reg & SWSM2_LOCK) == 0) {
   2239 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2240 			force_clear_smbi = true;
   2241 		} else
   2242 			force_clear_smbi = false;
   2243 		break;
   2244 	case WM_T_82573:
   2245 	case WM_T_82574:
   2246 	case WM_T_82583:
   2247 		force_clear_smbi = true;
   2248 		break;
   2249 	default:
   2250 		force_clear_smbi = false;
   2251 		break;
   2252 	}
   2253 	if (force_clear_smbi) {
   2254 		reg = CSR_READ(sc, WMREG_SWSM);
   2255 		if ((reg & SWSM_SMBI) != 0)
   2256 			aprint_error_dev(sc->sc_dev,
   2257 			    "Please update the Bootagent\n");
   2258 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2259 	}
   2260 
   2261 	/*
   2262 	 * Defer printing the EEPROM type until after verifying the checksum
   2263 	 * This allows the EEPROM type to be printed correctly in the case
   2264 	 * that no EEPROM is attached.
   2265 	 */
   2266 	/*
   2267 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2268 	 * this for later, so we can fail future reads from the EEPROM.
   2269 	 */
   2270 	if (wm_nvm_validate_checksum(sc)) {
   2271 		/*
   2272 		 * Read twice again because some PCI-e parts fail the
   2273 		 * first check due to the link being in sleep state.
   2274 		 */
   2275 		if (wm_nvm_validate_checksum(sc))
   2276 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2277 	}
   2278 
   2279 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2280 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2281 	else {
   2282 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2283 		    sc->sc_nvm_wordsize);
   2284 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2285 			aprint_verbose("iNVM");
   2286 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2287 			aprint_verbose("FLASH(HW)");
   2288 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2289 			aprint_verbose("FLASH");
   2290 		else {
   2291 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2292 				eetype = "SPI";
   2293 			else
   2294 				eetype = "MicroWire";
   2295 			aprint_verbose("(%d address bits) %s EEPROM",
   2296 			    sc->sc_nvm_addrbits, eetype);
   2297 		}
   2298 	}
   2299 	wm_nvm_version(sc);
   2300 	aprint_verbose("\n");
   2301 
   2302 	/*
   2303 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2304 	 * incorrect.
   2305 	 */
   2306 	wm_gmii_setup_phytype(sc, 0, 0);
   2307 
   2308 	/* Reset the chip to a known state. */
   2309 	wm_reset(sc);
   2310 
   2311 	/* Check for I21[01] PLL workaround */
   2312 	if (sc->sc_type == WM_T_I210)
   2313 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2314 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2315 		/* NVM image release 3.25 has a workaround */
   2316 		if ((sc->sc_nvm_ver_major < 3)
   2317 		    || ((sc->sc_nvm_ver_major == 3)
   2318 			&& (sc->sc_nvm_ver_minor < 25))) {
   2319 			aprint_verbose_dev(sc->sc_dev,
   2320 			    "ROM image version %d.%d is older than 3.25\n",
   2321 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2322 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2323 		}
   2324 	}
   2325 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2326 		wm_pll_workaround_i210(sc);
   2327 
   2328 	wm_get_wakeup(sc);
   2329 
   2330 	/* Non-AMT based hardware can now take control from firmware */
   2331 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2332 		wm_get_hw_control(sc);
   2333 
   2334 	/*
   2335 	 * Read the Ethernet address from the EEPROM, if not first found
   2336 	 * in device properties.
   2337 	 */
   2338 	ea = prop_dictionary_get(dict, "mac-address");
   2339 	if (ea != NULL) {
   2340 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2341 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2342 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2343 	} else {
   2344 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2345 			aprint_error_dev(sc->sc_dev,
   2346 			    "unable to read Ethernet address\n");
   2347 			goto out;
   2348 		}
   2349 	}
   2350 
   2351 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2352 	    ether_sprintf(enaddr));
   2353 
   2354 	/*
   2355 	 * Read the config info from the EEPROM, and set up various
   2356 	 * bits in the control registers based on their contents.
   2357 	 */
   2358 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2359 	if (pn != NULL) {
   2360 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2361 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2362 	} else {
   2363 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2364 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2365 			goto out;
   2366 		}
   2367 	}
   2368 
   2369 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2370 	if (pn != NULL) {
   2371 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2372 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2373 	} else {
   2374 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2375 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2376 			goto out;
   2377 		}
   2378 	}
   2379 
   2380 	/* check for WM_F_WOL */
   2381 	switch (sc->sc_type) {
   2382 	case WM_T_82542_2_0:
   2383 	case WM_T_82542_2_1:
   2384 	case WM_T_82543:
   2385 		/* dummy? */
   2386 		eeprom_data = 0;
   2387 		apme_mask = NVM_CFG3_APME;
   2388 		break;
   2389 	case WM_T_82544:
   2390 		apme_mask = NVM_CFG2_82544_APM_EN;
   2391 		eeprom_data = cfg2;
   2392 		break;
   2393 	case WM_T_82546:
   2394 	case WM_T_82546_3:
   2395 	case WM_T_82571:
   2396 	case WM_T_82572:
   2397 	case WM_T_82573:
   2398 	case WM_T_82574:
   2399 	case WM_T_82583:
   2400 	case WM_T_80003:
   2401 	default:
   2402 		apme_mask = NVM_CFG3_APME;
   2403 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2404 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2405 		break;
   2406 	case WM_T_82575:
   2407 	case WM_T_82576:
   2408 	case WM_T_82580:
   2409 	case WM_T_I350:
   2410 	case WM_T_I354: /* XXX ok? */
   2411 	case WM_T_ICH8:
   2412 	case WM_T_ICH9:
   2413 	case WM_T_ICH10:
   2414 	case WM_T_PCH:
   2415 	case WM_T_PCH2:
   2416 	case WM_T_PCH_LPT:
   2417 	case WM_T_PCH_SPT:
   2418 		/* XXX The funcid should be checked on some devices */
   2419 		apme_mask = WUC_APME;
   2420 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2421 		break;
   2422 	}
   2423 
   2424 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2425 	if ((eeprom_data & apme_mask) != 0)
   2426 		sc->sc_flags |= WM_F_WOL;
   2427 
   2428 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2429 		/* Check NVM for autonegotiation */
   2430 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2431 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2432 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2433 		}
   2434 	}
   2435 
   2436 	/*
   2437 	 * XXX need special handling for some multiple port cards
   2438 	 * to disable a paticular port.
   2439 	 */
   2440 
   2441 	if (sc->sc_type >= WM_T_82544) {
   2442 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2443 		if (pn != NULL) {
   2444 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2445 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2446 		} else {
   2447 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2448 				aprint_error_dev(sc->sc_dev,
   2449 				    "unable to read SWDPIN\n");
   2450 				goto out;
   2451 			}
   2452 		}
   2453 	}
   2454 
   2455 	if (cfg1 & NVM_CFG1_ILOS)
   2456 		sc->sc_ctrl |= CTRL_ILOS;
   2457 
   2458 	/*
   2459 	 * XXX
   2460 	 * This code isn't correct because pin 2 and 3 are located
   2461 	 * in different position on newer chips. Check all datasheet.
   2462 	 *
   2463 	 * Until resolve this problem, check if a chip < 82580
   2464 	 */
   2465 	if (sc->sc_type <= WM_T_82580) {
   2466 		if (sc->sc_type >= WM_T_82544) {
   2467 			sc->sc_ctrl |=
   2468 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2469 			    CTRL_SWDPIO_SHIFT;
   2470 			sc->sc_ctrl |=
   2471 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2472 			    CTRL_SWDPINS_SHIFT;
   2473 		} else {
   2474 			sc->sc_ctrl |=
   2475 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2476 			    CTRL_SWDPIO_SHIFT;
   2477 		}
   2478 	}
   2479 
   2480 	/* XXX For other than 82580? */
   2481 	if (sc->sc_type == WM_T_82580) {
   2482 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2483 		if (nvmword & __BIT(13))
   2484 			sc->sc_ctrl |= CTRL_ILOS;
   2485 	}
   2486 
   2487 #if 0
   2488 	if (sc->sc_type >= WM_T_82544) {
   2489 		if (cfg1 & NVM_CFG1_IPS0)
   2490 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2491 		if (cfg1 & NVM_CFG1_IPS1)
   2492 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2493 		sc->sc_ctrl_ext |=
   2494 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2495 		    CTRL_EXT_SWDPIO_SHIFT;
   2496 		sc->sc_ctrl_ext |=
   2497 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2498 		    CTRL_EXT_SWDPINS_SHIFT;
   2499 	} else {
   2500 		sc->sc_ctrl_ext |=
   2501 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2502 		    CTRL_EXT_SWDPIO_SHIFT;
   2503 	}
   2504 #endif
   2505 
   2506 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2507 #if 0
   2508 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2509 #endif
   2510 
   2511 	if (sc->sc_type == WM_T_PCH) {
   2512 		uint16_t val;
   2513 
   2514 		/* Save the NVM K1 bit setting */
   2515 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2516 
   2517 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2518 			sc->sc_nvm_k1_enabled = 1;
   2519 		else
   2520 			sc->sc_nvm_k1_enabled = 0;
   2521 	}
   2522 
   2523 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2524 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2525 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2526 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2527 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
   2528 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2529 		/* Copper only */
   2530 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2531 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2532 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2533 	    || (sc->sc_type ==WM_T_I211)) {
   2534 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2535 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2536 		switch (link_mode) {
   2537 		case CTRL_EXT_LINK_MODE_1000KX:
   2538 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2539 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2540 			break;
   2541 		case CTRL_EXT_LINK_MODE_SGMII:
   2542 			if (wm_sgmii_uses_mdio(sc)) {
   2543 				aprint_verbose_dev(sc->sc_dev,
   2544 				    "SGMII(MDIO)\n");
   2545 				sc->sc_flags |= WM_F_SGMII;
   2546 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2547 				break;
   2548 			}
   2549 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2550 			/*FALLTHROUGH*/
   2551 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2552 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2553 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2554 				if (link_mode
   2555 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2556 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2557 					sc->sc_flags |= WM_F_SGMII;
   2558 				} else {
   2559 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2560 					aprint_verbose_dev(sc->sc_dev,
   2561 					    "SERDES\n");
   2562 				}
   2563 				break;
   2564 			}
   2565 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2566 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2567 
   2568 			/* Change current link mode setting */
   2569 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2570 			switch (sc->sc_mediatype) {
   2571 			case WM_MEDIATYPE_COPPER:
   2572 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2573 				break;
   2574 			case WM_MEDIATYPE_SERDES:
   2575 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2576 				break;
   2577 			default:
   2578 				break;
   2579 			}
   2580 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2581 			break;
   2582 		case CTRL_EXT_LINK_MODE_GMII:
   2583 		default:
   2584 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2585 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2586 			break;
   2587 		}
   2588 
   2589 		reg &= ~CTRL_EXT_I2C_ENA;
   2590 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2591 			reg |= CTRL_EXT_I2C_ENA;
   2592 		else
   2593 			reg &= ~CTRL_EXT_I2C_ENA;
   2594 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2595 	} else if (sc->sc_type < WM_T_82543 ||
   2596 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2597 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2598 			aprint_error_dev(sc->sc_dev,
   2599 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2600 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2601 		}
   2602 	} else {
   2603 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2604 			aprint_error_dev(sc->sc_dev,
   2605 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2606 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2607 		}
   2608 	}
   2609 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2610 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2611 
   2612 	/* Set device properties (macflags) */
   2613 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2614 
   2615 	/* Initialize the media structures accordingly. */
   2616 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2617 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2618 	else
   2619 		wm_tbi_mediainit(sc); /* All others */
   2620 
   2621 	ifp = &sc->sc_ethercom.ec_if;
   2622 	xname = device_xname(sc->sc_dev);
   2623 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2624 	ifp->if_softc = sc;
   2625 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2626 #ifdef WM_MPSAFE
   2627 	ifp->if_extflags = IFEF_START_MPSAFE;
   2628 #endif
   2629 	ifp->if_ioctl = wm_ioctl;
   2630 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2631 		ifp->if_start = wm_nq_start;
   2632 		/*
   2633 		 * When the number of CPUs is one and the controller can use
   2634 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2635 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2636 		 * and the other is used for link status changing.
   2637 		 * In this situation, wm_nq_transmit() is disadvantageous
   2638 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2639 		 */
   2640 		if (wm_is_using_multiqueue(sc))
   2641 			ifp->if_transmit = wm_nq_transmit;
   2642 	} else {
   2643 		ifp->if_start = wm_start;
   2644 		/*
   2645 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2646 		 */
   2647 		if (wm_is_using_multiqueue(sc))
   2648 			ifp->if_transmit = wm_transmit;
   2649 	}
   2650 	ifp->if_watchdog = wm_watchdog;
   2651 	ifp->if_init = wm_init;
   2652 	ifp->if_stop = wm_stop;
   2653 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2654 	IFQ_SET_READY(&ifp->if_snd);
   2655 
   2656 	/* Check for jumbo frame */
   2657 	switch (sc->sc_type) {
   2658 	case WM_T_82573:
   2659 		/* XXX limited to 9234 if ASPM is disabled */
   2660 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2661 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2662 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2663 		break;
   2664 	case WM_T_82571:
   2665 	case WM_T_82572:
   2666 	case WM_T_82574:
   2667 	case WM_T_82575:
   2668 	case WM_T_82576:
   2669 	case WM_T_82580:
   2670 	case WM_T_I350:
   2671 	case WM_T_I354: /* XXXX ok? */
   2672 	case WM_T_I210:
   2673 	case WM_T_I211:
   2674 	case WM_T_80003:
   2675 	case WM_T_ICH9:
   2676 	case WM_T_ICH10:
   2677 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2678 	case WM_T_PCH_LPT:
   2679 	case WM_T_PCH_SPT:
   2680 		/* XXX limited to 9234 */
   2681 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2682 		break;
   2683 	case WM_T_PCH:
   2684 		/* XXX limited to 4096 */
   2685 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2686 		break;
   2687 	case WM_T_82542_2_0:
   2688 	case WM_T_82542_2_1:
   2689 	case WM_T_82583:
   2690 	case WM_T_ICH8:
   2691 		/* No support for jumbo frame */
   2692 		break;
   2693 	default:
   2694 		/* ETHER_MAX_LEN_JUMBO */
   2695 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2696 		break;
   2697 	}
   2698 
   2699 	/* If we're a i82543 or greater, we can support VLANs. */
   2700 	if (sc->sc_type >= WM_T_82543)
   2701 		sc->sc_ethercom.ec_capabilities |=
   2702 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2703 
   2704 	/*
   2705 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2706 	 * on i82543 and later.
   2707 	 */
   2708 	if (sc->sc_type >= WM_T_82543) {
   2709 		ifp->if_capabilities |=
   2710 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2711 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2712 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2713 		    IFCAP_CSUM_TCPv6_Tx |
   2714 		    IFCAP_CSUM_UDPv6_Tx;
   2715 	}
   2716 
   2717 	/*
   2718 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2719 	 *
   2720 	 *	82541GI (8086:1076) ... no
   2721 	 *	82572EI (8086:10b9) ... yes
   2722 	 */
   2723 	if (sc->sc_type >= WM_T_82571) {
   2724 		ifp->if_capabilities |=
   2725 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2726 	}
   2727 
   2728 	/*
   2729 	 * If we're a i82544 or greater (except i82547), we can do
   2730 	 * TCP segmentation offload.
   2731 	 */
   2732 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2733 		ifp->if_capabilities |= IFCAP_TSOv4;
   2734 	}
   2735 
   2736 	if (sc->sc_type >= WM_T_82571) {
   2737 		ifp->if_capabilities |= IFCAP_TSOv6;
   2738 	}
   2739 
   2740 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2741 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2742 
   2743 #ifdef WM_MPSAFE
   2744 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2745 #else
   2746 	sc->sc_core_lock = NULL;
   2747 #endif
   2748 
   2749 	/* Attach the interface. */
   2750 	if_initialize(ifp);
   2751 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2752 	ether_ifattach(ifp, enaddr);
   2753 	if_register(ifp);
   2754 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2755 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2756 			  RND_FLAG_DEFAULT);
   2757 
   2758 #ifdef WM_EVENT_COUNTERS
   2759 	/* Attach event counters. */
   2760 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2761 	    NULL, xname, "linkintr");
   2762 
   2763 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2764 	    NULL, xname, "tx_xoff");
   2765 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2766 	    NULL, xname, "tx_xon");
   2767 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2768 	    NULL, xname, "rx_xoff");
   2769 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2770 	    NULL, xname, "rx_xon");
   2771 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2772 	    NULL, xname, "rx_macctl");
   2773 #endif /* WM_EVENT_COUNTERS */
   2774 
   2775 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2776 		pmf_class_network_register(self, ifp);
   2777 	else
   2778 		aprint_error_dev(self, "couldn't establish power handler\n");
   2779 
   2780 	sc->sc_flags |= WM_F_ATTACHED;
   2781  out:
   2782 	return;
   2783 }
   2784 
   2785 /* The detach function (ca_detach) */
   2786 static int
   2787 wm_detach(device_t self, int flags __unused)
   2788 {
   2789 	struct wm_softc *sc = device_private(self);
   2790 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2791 	int i;
   2792 
   2793 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2794 		return 0;
   2795 
   2796 	/* Stop the interface. Callouts are stopped in it. */
   2797 	wm_stop(ifp, 1);
   2798 
   2799 	pmf_device_deregister(self);
   2800 
   2801 #ifdef WM_EVENT_COUNTERS
   2802 	evcnt_detach(&sc->sc_ev_linkintr);
   2803 
   2804 	evcnt_detach(&sc->sc_ev_tx_xoff);
   2805 	evcnt_detach(&sc->sc_ev_tx_xon);
   2806 	evcnt_detach(&sc->sc_ev_rx_xoff);
   2807 	evcnt_detach(&sc->sc_ev_rx_xon);
   2808 	evcnt_detach(&sc->sc_ev_rx_macctl);
   2809 #endif /* WM_EVENT_COUNTERS */
   2810 
   2811 	/* Tell the firmware about the release */
   2812 	WM_CORE_LOCK(sc);
   2813 	wm_release_manageability(sc);
   2814 	wm_release_hw_control(sc);
   2815 	wm_enable_wakeup(sc);
   2816 	WM_CORE_UNLOCK(sc);
   2817 
   2818 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2819 
   2820 	/* Delete all remaining media. */
   2821 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2822 
   2823 	ether_ifdetach(ifp);
   2824 	if_detach(ifp);
   2825 	if_percpuq_destroy(sc->sc_ipq);
   2826 
   2827 	/* Unload RX dmamaps and free mbufs */
   2828 	for (i = 0; i < sc->sc_nqueues; i++) {
   2829 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2830 		mutex_enter(rxq->rxq_lock);
   2831 		wm_rxdrain(rxq);
   2832 		mutex_exit(rxq->rxq_lock);
   2833 	}
   2834 	/* Must unlock here */
   2835 
   2836 	/* Disestablish the interrupt handler */
   2837 	for (i = 0; i < sc->sc_nintrs; i++) {
   2838 		if (sc->sc_ihs[i] != NULL) {
   2839 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2840 			sc->sc_ihs[i] = NULL;
   2841 		}
   2842 	}
   2843 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2844 
   2845 	wm_free_txrx_queues(sc);
   2846 
   2847 	/* Unmap the registers */
   2848 	if (sc->sc_ss) {
   2849 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2850 		sc->sc_ss = 0;
   2851 	}
   2852 	if (sc->sc_ios) {
   2853 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2854 		sc->sc_ios = 0;
   2855 	}
   2856 	if (sc->sc_flashs) {
   2857 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2858 		sc->sc_flashs = 0;
   2859 	}
   2860 
   2861 	if (sc->sc_core_lock)
   2862 		mutex_obj_free(sc->sc_core_lock);
   2863 	if (sc->sc_ich_phymtx)
   2864 		mutex_obj_free(sc->sc_ich_phymtx);
   2865 	if (sc->sc_ich_nvmmtx)
   2866 		mutex_obj_free(sc->sc_ich_nvmmtx);
   2867 
   2868 	return 0;
   2869 }
   2870 
   2871 static bool
   2872 wm_suspend(device_t self, const pmf_qual_t *qual)
   2873 {
   2874 	struct wm_softc *sc = device_private(self);
   2875 
   2876 	wm_release_manageability(sc);
   2877 	wm_release_hw_control(sc);
   2878 	wm_enable_wakeup(sc);
   2879 
   2880 	return true;
   2881 }
   2882 
   2883 static bool
   2884 wm_resume(device_t self, const pmf_qual_t *qual)
   2885 {
   2886 	struct wm_softc *sc = device_private(self);
   2887 
   2888 	wm_init_manageability(sc);
   2889 
   2890 	return true;
   2891 }
   2892 
   2893 /*
   2894  * wm_watchdog:		[ifnet interface function]
   2895  *
   2896  *	Watchdog timer handler.
   2897  */
   2898 static void
   2899 wm_watchdog(struct ifnet *ifp)
   2900 {
   2901 	int qid;
   2902 	struct wm_softc *sc = ifp->if_softc;
   2903 
   2904 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2905 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2906 
   2907 		wm_watchdog_txq(ifp, txq);
   2908 	}
   2909 
   2910 	/* Reset the interface. */
   2911 	(void) wm_init(ifp);
   2912 
   2913 	/*
   2914 	 * There are still some upper layer processing which call
   2915 	 * ifp->if_start(). e.g. ALTQ or one CPU system
   2916 	 */
   2917 	/* Try to get more packets going. */
   2918 	ifp->if_start(ifp);
   2919 }
   2920 
   2921 static void
   2922 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
   2923 {
   2924 	struct wm_softc *sc = ifp->if_softc;
   2925 
   2926 	/*
   2927 	 * Since we're using delayed interrupts, sweep up
   2928 	 * before we report an error.
   2929 	 */
   2930 	mutex_enter(txq->txq_lock);
   2931 	wm_txeof(sc, txq);
   2932 	mutex_exit(txq->txq_lock);
   2933 
   2934 	if (txq->txq_free != WM_NTXDESC(txq)) {
   2935 #ifdef WM_DEBUG
   2936 		int i, j;
   2937 		struct wm_txsoft *txs;
   2938 #endif
   2939 		log(LOG_ERR,
   2940 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2941 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   2942 		    txq->txq_next);
   2943 		ifp->if_oerrors++;
   2944 #ifdef WM_DEBUG
   2945 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   2946 		    i = WM_NEXTTXS(txq, i)) {
   2947 		    txs = &txq->txq_soft[i];
   2948 		    printf("txs %d tx %d -> %d\n",
   2949 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2950 		    for (j = txs->txs_firstdesc; ;
   2951 			j = WM_NEXTTX(txq, j)) {
   2952 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2953 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   2954 			printf("\t %#08x%08x\n",
   2955 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   2956 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   2957 			if (j == txs->txs_lastdesc)
   2958 				break;
   2959 			}
   2960 		}
   2961 #endif
   2962 	}
   2963 }
   2964 
   2965 /*
   2966  * wm_tick:
   2967  *
   2968  *	One second timer, used to check link status, sweep up
   2969  *	completed transmit jobs, etc.
   2970  */
   2971 static void
   2972 wm_tick(void *arg)
   2973 {
   2974 	struct wm_softc *sc = arg;
   2975 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2976 #ifndef WM_MPSAFE
   2977 	int s = splnet();
   2978 #endif
   2979 
   2980 	WM_CORE_LOCK(sc);
   2981 
   2982 	if (sc->sc_core_stopping)
   2983 		goto out;
   2984 
   2985 	if (sc->sc_type >= WM_T_82542_2_1) {
   2986 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2987 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2988 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2989 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2990 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2991 	}
   2992 
   2993 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2994 	ifp->if_ierrors += 0ULL /* ensure quad_t */
   2995 	    + CSR_READ(sc, WMREG_CRCERRS)
   2996 	    + CSR_READ(sc, WMREG_ALGNERRC)
   2997 	    + CSR_READ(sc, WMREG_SYMERRC)
   2998 	    + CSR_READ(sc, WMREG_RXERRC)
   2999 	    + CSR_READ(sc, WMREG_SEC)
   3000 	    + CSR_READ(sc, WMREG_CEXTERR)
   3001 	    + CSR_READ(sc, WMREG_RLEC);
   3002 	/*
   3003 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3004 	 * memory. It does not mean the number of dropped packet. Because
   3005 	 * ethernet controller can receive packets in such case if there is
   3006 	 * space in phy's FIFO.
   3007 	 *
   3008 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3009 	 * own EVCNT instead of if_iqdrops.
   3010 	 */
   3011 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   3012 
   3013 	if (sc->sc_flags & WM_F_HAS_MII)
   3014 		mii_tick(&sc->sc_mii);
   3015 	else if ((sc->sc_type >= WM_T_82575)
   3016 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3017 		wm_serdes_tick(sc);
   3018 	else
   3019 		wm_tbi_tick(sc);
   3020 
   3021 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   3022 out:
   3023 	WM_CORE_UNLOCK(sc);
   3024 #ifndef WM_MPSAFE
   3025 	splx(s);
   3026 #endif
   3027 }
   3028 
   3029 static int
   3030 wm_ifflags_cb(struct ethercom *ec)
   3031 {
   3032 	struct ifnet *ifp = &ec->ec_if;
   3033 	struct wm_softc *sc = ifp->if_softc;
   3034 	int rc = 0;
   3035 
   3036 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3037 		device_xname(sc->sc_dev), __func__));
   3038 
   3039 	WM_CORE_LOCK(sc);
   3040 
   3041 	int change = ifp->if_flags ^ sc->sc_if_flags;
   3042 	sc->sc_if_flags = ifp->if_flags;
   3043 
   3044 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3045 		rc = ENETRESET;
   3046 		goto out;
   3047 	}
   3048 
   3049 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   3050 		wm_set_filter(sc);
   3051 
   3052 	wm_set_vlan(sc);
   3053 
   3054 out:
   3055 	WM_CORE_UNLOCK(sc);
   3056 
   3057 	return rc;
   3058 }
   3059 
   3060 /*
   3061  * wm_ioctl:		[ifnet interface function]
   3062  *
   3063  *	Handle control requests from the operator.
   3064  */
   3065 static int
   3066 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3067 {
   3068 	struct wm_softc *sc = ifp->if_softc;
   3069 	struct ifreq *ifr = (struct ifreq *) data;
   3070 	struct ifaddr *ifa = (struct ifaddr *)data;
   3071 	struct sockaddr_dl *sdl;
   3072 	int s, error;
   3073 
   3074 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3075 		device_xname(sc->sc_dev), __func__));
   3076 
   3077 #ifndef WM_MPSAFE
   3078 	s = splnet();
   3079 #endif
   3080 	switch (cmd) {
   3081 	case SIOCSIFMEDIA:
   3082 	case SIOCGIFMEDIA:
   3083 		WM_CORE_LOCK(sc);
   3084 		/* Flow control requires full-duplex mode. */
   3085 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3086 		    (ifr->ifr_media & IFM_FDX) == 0)
   3087 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3088 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3089 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3090 				/* We can do both TXPAUSE and RXPAUSE. */
   3091 				ifr->ifr_media |=
   3092 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3093 			}
   3094 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3095 		}
   3096 		WM_CORE_UNLOCK(sc);
   3097 #ifdef WM_MPSAFE
   3098 		s = splnet();
   3099 #endif
   3100 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3101 #ifdef WM_MPSAFE
   3102 		splx(s);
   3103 #endif
   3104 		break;
   3105 	case SIOCINITIFADDR:
   3106 		WM_CORE_LOCK(sc);
   3107 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3108 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3109 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3110 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3111 			/* unicast address is first multicast entry */
   3112 			wm_set_filter(sc);
   3113 			error = 0;
   3114 			WM_CORE_UNLOCK(sc);
   3115 			break;
   3116 		}
   3117 		WM_CORE_UNLOCK(sc);
   3118 		/*FALLTHROUGH*/
   3119 	default:
   3120 #ifdef WM_MPSAFE
   3121 		s = splnet();
   3122 #endif
   3123 		/* It may call wm_start, so unlock here */
   3124 		error = ether_ioctl(ifp, cmd, data);
   3125 #ifdef WM_MPSAFE
   3126 		splx(s);
   3127 #endif
   3128 		if (error != ENETRESET)
   3129 			break;
   3130 
   3131 		error = 0;
   3132 
   3133 		if (cmd == SIOCSIFCAP) {
   3134 			error = (*ifp->if_init)(ifp);
   3135 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3136 			;
   3137 		else if (ifp->if_flags & IFF_RUNNING) {
   3138 			/*
   3139 			 * Multicast list has changed; set the hardware filter
   3140 			 * accordingly.
   3141 			 */
   3142 			WM_CORE_LOCK(sc);
   3143 			wm_set_filter(sc);
   3144 			WM_CORE_UNLOCK(sc);
   3145 		}
   3146 		break;
   3147 	}
   3148 
   3149 #ifndef WM_MPSAFE
   3150 	splx(s);
   3151 #endif
   3152 	return error;
   3153 }
   3154 
   3155 /* MAC address related */
   3156 
   3157 /*
   3158  * Get the offset of MAC address and return it.
   3159  * If error occured, use offset 0.
   3160  */
   3161 static uint16_t
   3162 wm_check_alt_mac_addr(struct wm_softc *sc)
   3163 {
   3164 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3165 	uint16_t offset = NVM_OFF_MACADDR;
   3166 
   3167 	/* Try to read alternative MAC address pointer */
   3168 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3169 		return 0;
   3170 
   3171 	/* Check pointer if it's valid or not. */
   3172 	if ((offset == 0x0000) || (offset == 0xffff))
   3173 		return 0;
   3174 
   3175 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3176 	/*
   3177 	 * Check whether alternative MAC address is valid or not.
   3178 	 * Some cards have non 0xffff pointer but those don't use
   3179 	 * alternative MAC address in reality.
   3180 	 *
   3181 	 * Check whether the broadcast bit is set or not.
   3182 	 */
   3183 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3184 		if (((myea[0] & 0xff) & 0x01) == 0)
   3185 			return offset; /* Found */
   3186 
   3187 	/* Not found */
   3188 	return 0;
   3189 }
   3190 
   3191 static int
   3192 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3193 {
   3194 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3195 	uint16_t offset = NVM_OFF_MACADDR;
   3196 	int do_invert = 0;
   3197 
   3198 	switch (sc->sc_type) {
   3199 	case WM_T_82580:
   3200 	case WM_T_I350:
   3201 	case WM_T_I354:
   3202 		/* EEPROM Top Level Partitioning */
   3203 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3204 		break;
   3205 	case WM_T_82571:
   3206 	case WM_T_82575:
   3207 	case WM_T_82576:
   3208 	case WM_T_80003:
   3209 	case WM_T_I210:
   3210 	case WM_T_I211:
   3211 		offset = wm_check_alt_mac_addr(sc);
   3212 		if (offset == 0)
   3213 			if ((sc->sc_funcid & 0x01) == 1)
   3214 				do_invert = 1;
   3215 		break;
   3216 	default:
   3217 		if ((sc->sc_funcid & 0x01) == 1)
   3218 			do_invert = 1;
   3219 		break;
   3220 	}
   3221 
   3222 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3223 		goto bad;
   3224 
   3225 	enaddr[0] = myea[0] & 0xff;
   3226 	enaddr[1] = myea[0] >> 8;
   3227 	enaddr[2] = myea[1] & 0xff;
   3228 	enaddr[3] = myea[1] >> 8;
   3229 	enaddr[4] = myea[2] & 0xff;
   3230 	enaddr[5] = myea[2] >> 8;
   3231 
   3232 	/*
   3233 	 * Toggle the LSB of the MAC address on the second port
   3234 	 * of some dual port cards.
   3235 	 */
   3236 	if (do_invert != 0)
   3237 		enaddr[5] ^= 1;
   3238 
   3239 	return 0;
   3240 
   3241  bad:
   3242 	return -1;
   3243 }
   3244 
   3245 /*
   3246  * wm_set_ral:
   3247  *
   3248  *	Set an entery in the receive address list.
   3249  */
   3250 static void
   3251 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3252 {
   3253 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3254 	uint32_t wlock_mac;
   3255 	int rv;
   3256 
   3257 	if (enaddr != NULL) {
   3258 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3259 		    (enaddr[3] << 24);
   3260 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3261 		ral_hi |= RAL_AV;
   3262 	} else {
   3263 		ral_lo = 0;
   3264 		ral_hi = 0;
   3265 	}
   3266 
   3267 	switch (sc->sc_type) {
   3268 	case WM_T_82542_2_0:
   3269 	case WM_T_82542_2_1:
   3270 	case WM_T_82543:
   3271 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3272 		CSR_WRITE_FLUSH(sc);
   3273 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3274 		CSR_WRITE_FLUSH(sc);
   3275 		break;
   3276 	case WM_T_PCH2:
   3277 	case WM_T_PCH_LPT:
   3278 	case WM_T_PCH_SPT:
   3279 		if (idx == 0) {
   3280 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3281 			CSR_WRITE_FLUSH(sc);
   3282 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3283 			CSR_WRITE_FLUSH(sc);
   3284 			return;
   3285 		}
   3286 		if (sc->sc_type != WM_T_PCH2) {
   3287 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3288 			    FWSM_WLOCK_MAC);
   3289 			addrl = WMREG_SHRAL(idx - 1);
   3290 			addrh = WMREG_SHRAH(idx - 1);
   3291 		} else {
   3292 			wlock_mac = 0;
   3293 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3294 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3295 		}
   3296 
   3297 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3298 			rv = wm_get_swflag_ich8lan(sc);
   3299 			if (rv != 0)
   3300 				return;
   3301 			CSR_WRITE(sc, addrl, ral_lo);
   3302 			CSR_WRITE_FLUSH(sc);
   3303 			CSR_WRITE(sc, addrh, ral_hi);
   3304 			CSR_WRITE_FLUSH(sc);
   3305 			wm_put_swflag_ich8lan(sc);
   3306 		}
   3307 
   3308 		break;
   3309 	default:
   3310 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3311 		CSR_WRITE_FLUSH(sc);
   3312 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3313 		CSR_WRITE_FLUSH(sc);
   3314 		break;
   3315 	}
   3316 }
   3317 
   3318 /*
   3319  * wm_mchash:
   3320  *
   3321  *	Compute the hash of the multicast address for the 4096-bit
   3322  *	multicast filter.
   3323  */
   3324 static uint32_t
   3325 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3326 {
   3327 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3328 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3329 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3330 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3331 	uint32_t hash;
   3332 
   3333 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3334 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3335 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3336 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   3337 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3338 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3339 		return (hash & 0x3ff);
   3340 	}
   3341 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3342 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3343 
   3344 	return (hash & 0xfff);
   3345 }
   3346 
   3347 /*
   3348  * wm_set_filter:
   3349  *
   3350  *	Set up the receive filter.
   3351  */
   3352 static void
   3353 wm_set_filter(struct wm_softc *sc)
   3354 {
   3355 	struct ethercom *ec = &sc->sc_ethercom;
   3356 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3357 	struct ether_multi *enm;
   3358 	struct ether_multistep step;
   3359 	bus_addr_t mta_reg;
   3360 	uint32_t hash, reg, bit;
   3361 	int i, size, ralmax;
   3362 
   3363 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3364 		device_xname(sc->sc_dev), __func__));
   3365 
   3366 	if (sc->sc_type >= WM_T_82544)
   3367 		mta_reg = WMREG_CORDOVA_MTA;
   3368 	else
   3369 		mta_reg = WMREG_MTA;
   3370 
   3371 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3372 
   3373 	if (ifp->if_flags & IFF_BROADCAST)
   3374 		sc->sc_rctl |= RCTL_BAM;
   3375 	if (ifp->if_flags & IFF_PROMISC) {
   3376 		sc->sc_rctl |= RCTL_UPE;
   3377 		goto allmulti;
   3378 	}
   3379 
   3380 	/*
   3381 	 * Set the station address in the first RAL slot, and
   3382 	 * clear the remaining slots.
   3383 	 */
   3384 	if (sc->sc_type == WM_T_ICH8)
   3385 		size = WM_RAL_TABSIZE_ICH8 -1;
   3386 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3387 	    || (sc->sc_type == WM_T_PCH))
   3388 		size = WM_RAL_TABSIZE_ICH8;
   3389 	else if (sc->sc_type == WM_T_PCH2)
   3390 		size = WM_RAL_TABSIZE_PCH2;
   3391 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
   3392 		size = WM_RAL_TABSIZE_PCH_LPT;
   3393 	else if (sc->sc_type == WM_T_82575)
   3394 		size = WM_RAL_TABSIZE_82575;
   3395 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3396 		size = WM_RAL_TABSIZE_82576;
   3397 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3398 		size = WM_RAL_TABSIZE_I350;
   3399 	else
   3400 		size = WM_RAL_TABSIZE;
   3401 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3402 
   3403 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   3404 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3405 		switch (i) {
   3406 		case 0:
   3407 			/* We can use all entries */
   3408 			ralmax = size;
   3409 			break;
   3410 		case 1:
   3411 			/* Only RAR[0] */
   3412 			ralmax = 1;
   3413 			break;
   3414 		default:
   3415 			/* available SHRA + RAR[0] */
   3416 			ralmax = i + 1;
   3417 		}
   3418 	} else
   3419 		ralmax = size;
   3420 	for (i = 1; i < size; i++) {
   3421 		if (i < ralmax)
   3422 			wm_set_ral(sc, NULL, i);
   3423 	}
   3424 
   3425 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3426 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3427 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3428 	    || (sc->sc_type == WM_T_PCH_SPT))
   3429 		size = WM_ICH8_MC_TABSIZE;
   3430 	else
   3431 		size = WM_MC_TABSIZE;
   3432 	/* Clear out the multicast table. */
   3433 	for (i = 0; i < size; i++) {
   3434 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3435 		CSR_WRITE_FLUSH(sc);
   3436 	}
   3437 
   3438 	ETHER_LOCK(ec);
   3439 	ETHER_FIRST_MULTI(step, ec, enm);
   3440 	while (enm != NULL) {
   3441 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3442 			ETHER_UNLOCK(ec);
   3443 			/*
   3444 			 * We must listen to a range of multicast addresses.
   3445 			 * For now, just accept all multicasts, rather than
   3446 			 * trying to set only those filter bits needed to match
   3447 			 * the range.  (At this time, the only use of address
   3448 			 * ranges is for IP multicast routing, for which the
   3449 			 * range is big enough to require all bits set.)
   3450 			 */
   3451 			goto allmulti;
   3452 		}
   3453 
   3454 		hash = wm_mchash(sc, enm->enm_addrlo);
   3455 
   3456 		reg = (hash >> 5);
   3457 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3458 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3459 		    || (sc->sc_type == WM_T_PCH2)
   3460 		    || (sc->sc_type == WM_T_PCH_LPT)
   3461 		    || (sc->sc_type == WM_T_PCH_SPT))
   3462 			reg &= 0x1f;
   3463 		else
   3464 			reg &= 0x7f;
   3465 		bit = hash & 0x1f;
   3466 
   3467 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3468 		hash |= 1U << bit;
   3469 
   3470 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3471 			/*
   3472 			 * 82544 Errata 9: Certain register cannot be written
   3473 			 * with particular alignments in PCI-X bus operation
   3474 			 * (FCAH, MTA and VFTA).
   3475 			 */
   3476 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3477 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3478 			CSR_WRITE_FLUSH(sc);
   3479 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3480 			CSR_WRITE_FLUSH(sc);
   3481 		} else {
   3482 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3483 			CSR_WRITE_FLUSH(sc);
   3484 		}
   3485 
   3486 		ETHER_NEXT_MULTI(step, enm);
   3487 	}
   3488 	ETHER_UNLOCK(ec);
   3489 
   3490 	ifp->if_flags &= ~IFF_ALLMULTI;
   3491 	goto setit;
   3492 
   3493  allmulti:
   3494 	ifp->if_flags |= IFF_ALLMULTI;
   3495 	sc->sc_rctl |= RCTL_MPE;
   3496 
   3497  setit:
   3498 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3499 }
   3500 
   3501 /* Reset and init related */
   3502 
   3503 static void
   3504 wm_set_vlan(struct wm_softc *sc)
   3505 {
   3506 
   3507 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3508 		device_xname(sc->sc_dev), __func__));
   3509 
   3510 	/* Deal with VLAN enables. */
   3511 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3512 		sc->sc_ctrl |= CTRL_VME;
   3513 	else
   3514 		sc->sc_ctrl &= ~CTRL_VME;
   3515 
   3516 	/* Write the control registers. */
   3517 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3518 }
   3519 
   3520 static void
   3521 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3522 {
   3523 	uint32_t gcr;
   3524 	pcireg_t ctrl2;
   3525 
   3526 	gcr = CSR_READ(sc, WMREG_GCR);
   3527 
   3528 	/* Only take action if timeout value is defaulted to 0 */
   3529 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3530 		goto out;
   3531 
   3532 	if ((gcr & GCR_CAP_VER2) == 0) {
   3533 		gcr |= GCR_CMPL_TMOUT_10MS;
   3534 		goto out;
   3535 	}
   3536 
   3537 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3538 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3539 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3540 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3541 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3542 
   3543 out:
   3544 	/* Disable completion timeout resend */
   3545 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3546 
   3547 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3548 }
   3549 
   3550 void
   3551 wm_get_auto_rd_done(struct wm_softc *sc)
   3552 {
   3553 	int i;
   3554 
   3555 	/* wait for eeprom to reload */
   3556 	switch (sc->sc_type) {
   3557 	case WM_T_82571:
   3558 	case WM_T_82572:
   3559 	case WM_T_82573:
   3560 	case WM_T_82574:
   3561 	case WM_T_82583:
   3562 	case WM_T_82575:
   3563 	case WM_T_82576:
   3564 	case WM_T_82580:
   3565 	case WM_T_I350:
   3566 	case WM_T_I354:
   3567 	case WM_T_I210:
   3568 	case WM_T_I211:
   3569 	case WM_T_80003:
   3570 	case WM_T_ICH8:
   3571 	case WM_T_ICH9:
   3572 		for (i = 0; i < 10; i++) {
   3573 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3574 				break;
   3575 			delay(1000);
   3576 		}
   3577 		if (i == 10) {
   3578 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3579 			    "complete\n", device_xname(sc->sc_dev));
   3580 		}
   3581 		break;
   3582 	default:
   3583 		break;
   3584 	}
   3585 }
   3586 
   3587 void
   3588 wm_lan_init_done(struct wm_softc *sc)
   3589 {
   3590 	uint32_t reg = 0;
   3591 	int i;
   3592 
   3593 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3594 		device_xname(sc->sc_dev), __func__));
   3595 
   3596 	/* Wait for eeprom to reload */
   3597 	switch (sc->sc_type) {
   3598 	case WM_T_ICH10:
   3599 	case WM_T_PCH:
   3600 	case WM_T_PCH2:
   3601 	case WM_T_PCH_LPT:
   3602 	case WM_T_PCH_SPT:
   3603 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3604 			reg = CSR_READ(sc, WMREG_STATUS);
   3605 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3606 				break;
   3607 			delay(100);
   3608 		}
   3609 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3610 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3611 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3612 		}
   3613 		break;
   3614 	default:
   3615 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3616 		    __func__);
   3617 		break;
   3618 	}
   3619 
   3620 	reg &= ~STATUS_LAN_INIT_DONE;
   3621 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3622 }
   3623 
   3624 void
   3625 wm_get_cfg_done(struct wm_softc *sc)
   3626 {
   3627 	int mask;
   3628 	uint32_t reg;
   3629 	int i;
   3630 
   3631 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3632 		device_xname(sc->sc_dev), __func__));
   3633 
   3634 	/* Wait for eeprom to reload */
   3635 	switch (sc->sc_type) {
   3636 	case WM_T_82542_2_0:
   3637 	case WM_T_82542_2_1:
   3638 		/* null */
   3639 		break;
   3640 	case WM_T_82543:
   3641 	case WM_T_82544:
   3642 	case WM_T_82540:
   3643 	case WM_T_82545:
   3644 	case WM_T_82545_3:
   3645 	case WM_T_82546:
   3646 	case WM_T_82546_3:
   3647 	case WM_T_82541:
   3648 	case WM_T_82541_2:
   3649 	case WM_T_82547:
   3650 	case WM_T_82547_2:
   3651 	case WM_T_82573:
   3652 	case WM_T_82574:
   3653 	case WM_T_82583:
   3654 		/* generic */
   3655 		delay(10*1000);
   3656 		break;
   3657 	case WM_T_80003:
   3658 	case WM_T_82571:
   3659 	case WM_T_82572:
   3660 	case WM_T_82575:
   3661 	case WM_T_82576:
   3662 	case WM_T_82580:
   3663 	case WM_T_I350:
   3664 	case WM_T_I354:
   3665 	case WM_T_I210:
   3666 	case WM_T_I211:
   3667 		if (sc->sc_type == WM_T_82571) {
   3668 			/* Only 82571 shares port 0 */
   3669 			mask = EEMNGCTL_CFGDONE_0;
   3670 		} else
   3671 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3672 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3673 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3674 				break;
   3675 			delay(1000);
   3676 		}
   3677 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3678 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3679 				device_xname(sc->sc_dev), __func__));
   3680 		}
   3681 		break;
   3682 	case WM_T_ICH8:
   3683 	case WM_T_ICH9:
   3684 	case WM_T_ICH10:
   3685 	case WM_T_PCH:
   3686 	case WM_T_PCH2:
   3687 	case WM_T_PCH_LPT:
   3688 	case WM_T_PCH_SPT:
   3689 		delay(10*1000);
   3690 		if (sc->sc_type >= WM_T_ICH10)
   3691 			wm_lan_init_done(sc);
   3692 		else
   3693 			wm_get_auto_rd_done(sc);
   3694 
   3695 		reg = CSR_READ(sc, WMREG_STATUS);
   3696 		if ((reg & STATUS_PHYRA) != 0)
   3697 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3698 		break;
   3699 	default:
   3700 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3701 		    __func__);
   3702 		break;
   3703 	}
   3704 }
   3705 
   3706 void
   3707 wm_phy_post_reset(struct wm_softc *sc)
   3708 {
   3709 	uint32_t reg;
   3710 
   3711 	/* This function is only for ICH8 and newer. */
   3712 	if (sc->sc_type < WM_T_ICH8)
   3713 		return;
   3714 
   3715 	if (wm_phy_resetisblocked(sc)) {
   3716 		/* XXX */
   3717 		device_printf(sc->sc_dev, "PHY is blocked\n");
   3718 		return;
   3719 	}
   3720 
   3721 	/* Allow time for h/w to get to quiescent state after reset */
   3722 	delay(10*1000);
   3723 
   3724 	/* Perform any necessary post-reset workarounds */
   3725 	if (sc->sc_type == WM_T_PCH)
   3726 		wm_hv_phy_workaround_ich8lan(sc);
   3727 	if (sc->sc_type == WM_T_PCH2)
   3728 		wm_lv_phy_workaround_ich8lan(sc);
   3729 
   3730 	/* Clear the host wakeup bit after lcd reset */
   3731 	if (sc->sc_type >= WM_T_PCH) {
   3732 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   3733 		    BM_PORT_GEN_CFG);
   3734 		reg &= ~BM_WUC_HOST_WU_BIT;
   3735 		wm_gmii_hv_writereg(sc->sc_dev, 2,
   3736 		    BM_PORT_GEN_CFG, reg);
   3737 	}
   3738 
   3739 	/* Configure the LCD with the extended configuration region in NVM */
   3740 	wm_init_lcd_from_nvm(sc);
   3741 
   3742 	/* Configure the LCD with the OEM bits in NVM */
   3743 }
   3744 
   3745 /* Only for PCH and newer */
   3746 static void
   3747 wm_write_smbus_addr(struct wm_softc *sc)
   3748 {
   3749 	uint32_t strap, freq;
   3750 	uint32_t phy_data;
   3751 
   3752 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3753 		device_xname(sc->sc_dev), __func__));
   3754 
   3755 	strap = CSR_READ(sc, WMREG_STRAP);
   3756 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   3757 
   3758 	phy_data = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR);
   3759 
   3760 	phy_data &= ~HV_SMB_ADDR_ADDR;
   3761 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   3762 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   3763 
   3764 	if (sc->sc_phytype == WMPHY_I217) {
   3765 		/* Restore SMBus frequency */
   3766 		if (freq --) {
   3767 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   3768 			    | HV_SMB_ADDR_FREQ_HIGH);
   3769 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   3770 			    HV_SMB_ADDR_FREQ_LOW);
   3771 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   3772 			    HV_SMB_ADDR_FREQ_HIGH);
   3773 		} else {
   3774 			DPRINTF(WM_DEBUG_INIT,
   3775 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   3776 				device_xname(sc->sc_dev), __func__));
   3777 		}
   3778 	}
   3779 
   3780 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR, phy_data);
   3781 }
   3782 
   3783 void
   3784 wm_init_lcd_from_nvm(struct wm_softc *sc)
   3785 {
   3786 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   3787 	uint16_t phy_page = 0;
   3788 
   3789 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3790 		device_xname(sc->sc_dev), __func__));
   3791 
   3792 	switch (sc->sc_type) {
   3793 	case WM_T_ICH8:
   3794 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   3795 		    || (sc->sc_phytype != WMPHY_IGP_3))
   3796 			return;
   3797 
   3798 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   3799 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   3800 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   3801 			break;
   3802 		}
   3803 		/* FALLTHROUGH */
   3804 	case WM_T_PCH:
   3805 	case WM_T_PCH2:
   3806 	case WM_T_PCH_LPT:
   3807 	case WM_T_PCH_SPT:
   3808 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   3809 		break;
   3810 	default:
   3811 		return;
   3812 	}
   3813 
   3814 	sc->phy.acquire(sc);
   3815 
   3816 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   3817 	if ((reg & sw_cfg_mask) == 0)
   3818 		goto release;
   3819 
   3820 	/*
   3821 	 * Make sure HW does not configure LCD from PHY extended configuration
   3822 	 * before SW configuration
   3823 	 */
   3824 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   3825 	if ((sc->sc_type < WM_T_PCH2)
   3826 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   3827 		goto release;
   3828 
   3829 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   3830 		device_xname(sc->sc_dev), __func__));
   3831 	/* word_addr is in DWORD */
   3832 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   3833 
   3834 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   3835 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   3836 
   3837 	if (((sc->sc_type == WM_T_PCH)
   3838 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   3839 	    || (sc->sc_type > WM_T_PCH)) {
   3840 		/*
   3841 		 * HW configures the SMBus address and LEDs when the OEM and
   3842 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   3843 		 * are cleared, SW will configure them instead.
   3844 		 */
   3845 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   3846 			device_xname(sc->sc_dev), __func__));
   3847 		wm_write_smbus_addr(sc);
   3848 
   3849 		reg = CSR_READ(sc, WMREG_LEDCTL);
   3850 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG, reg);
   3851 	}
   3852 
   3853 	/* Configure LCD from extended configuration region. */
   3854 	for (i = 0; i < cnf_size; i++) {
   3855 		uint16_t reg_data, reg_addr;
   3856 
   3857 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   3858 			goto release;
   3859 
   3860 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   3861 			goto release;
   3862 
   3863 		if (reg_addr == MII_IGPHY_PAGE_SELECT)
   3864 			phy_page = reg_data;
   3865 
   3866 		reg_addr &= IGPHY_MAXREGADDR;
   3867 		reg_addr |= phy_page;
   3868 
   3869 		sc->phy.release(sc); /* XXX */
   3870 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, reg_addr, reg_data);
   3871 		sc->phy.acquire(sc); /* XXX */
   3872 	}
   3873 
   3874 release:
   3875 	sc->phy.release(sc);
   3876 	return;
   3877 }
   3878 
   3879 
   3880 /* Init hardware bits */
   3881 void
   3882 wm_initialize_hardware_bits(struct wm_softc *sc)
   3883 {
   3884 	uint32_t tarc0, tarc1, reg;
   3885 
   3886 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3887 		device_xname(sc->sc_dev), __func__));
   3888 
   3889 	/* For 82571 variant, 80003 and ICHs */
   3890 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3891 	    || (sc->sc_type >= WM_T_80003)) {
   3892 
   3893 		/* Transmit Descriptor Control 0 */
   3894 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3895 		reg |= TXDCTL_COUNT_DESC;
   3896 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3897 
   3898 		/* Transmit Descriptor Control 1 */
   3899 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3900 		reg |= TXDCTL_COUNT_DESC;
   3901 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3902 
   3903 		/* TARC0 */
   3904 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3905 		switch (sc->sc_type) {
   3906 		case WM_T_82571:
   3907 		case WM_T_82572:
   3908 		case WM_T_82573:
   3909 		case WM_T_82574:
   3910 		case WM_T_82583:
   3911 		case WM_T_80003:
   3912 			/* Clear bits 30..27 */
   3913 			tarc0 &= ~__BITS(30, 27);
   3914 			break;
   3915 		default:
   3916 			break;
   3917 		}
   3918 
   3919 		switch (sc->sc_type) {
   3920 		case WM_T_82571:
   3921 		case WM_T_82572:
   3922 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3923 
   3924 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3925 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3926 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3927 			/* 8257[12] Errata No.7 */
   3928 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3929 
   3930 			/* TARC1 bit 28 */
   3931 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3932 				tarc1 &= ~__BIT(28);
   3933 			else
   3934 				tarc1 |= __BIT(28);
   3935 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3936 
   3937 			/*
   3938 			 * 8257[12] Errata No.13
   3939 			 * Disable Dyamic Clock Gating.
   3940 			 */
   3941 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3942 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3943 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3944 			break;
   3945 		case WM_T_82573:
   3946 		case WM_T_82574:
   3947 		case WM_T_82583:
   3948 			if ((sc->sc_type == WM_T_82574)
   3949 			    || (sc->sc_type == WM_T_82583))
   3950 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3951 
   3952 			/* Extended Device Control */
   3953 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3954 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3955 			reg |= __BIT(22);	/* Set bit 22 */
   3956 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3957 
   3958 			/* Device Control */
   3959 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   3960 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3961 
   3962 			/* PCIe Control Register */
   3963 			/*
   3964 			 * 82573 Errata (unknown).
   3965 			 *
   3966 			 * 82574 Errata 25 and 82583 Errata 12
   3967 			 * "Dropped Rx Packets":
   3968 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   3969 			 */
   3970 			reg = CSR_READ(sc, WMREG_GCR);
   3971 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   3972 			CSR_WRITE(sc, WMREG_GCR, reg);
   3973 
   3974 			if ((sc->sc_type == WM_T_82574)
   3975 			    || (sc->sc_type == WM_T_82583)) {
   3976 				/*
   3977 				 * Document says this bit must be set for
   3978 				 * proper operation.
   3979 				 */
   3980 				reg = CSR_READ(sc, WMREG_GCR);
   3981 				reg |= __BIT(22);
   3982 				CSR_WRITE(sc, WMREG_GCR, reg);
   3983 
   3984 				/*
   3985 				 * Apply workaround for hardware errata
   3986 				 * documented in errata docs Fixes issue where
   3987 				 * some error prone or unreliable PCIe
   3988 				 * completions are occurring, particularly
   3989 				 * with ASPM enabled. Without fix, issue can
   3990 				 * cause Tx timeouts.
   3991 				 */
   3992 				reg = CSR_READ(sc, WMREG_GCR2);
   3993 				reg |= __BIT(0);
   3994 				CSR_WRITE(sc, WMREG_GCR2, reg);
   3995 			}
   3996 			break;
   3997 		case WM_T_80003:
   3998 			/* TARC0 */
   3999 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4000 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4001 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4002 
   4003 			/* TARC1 bit 28 */
   4004 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4005 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4006 				tarc1 &= ~__BIT(28);
   4007 			else
   4008 				tarc1 |= __BIT(28);
   4009 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4010 			break;
   4011 		case WM_T_ICH8:
   4012 		case WM_T_ICH9:
   4013 		case WM_T_ICH10:
   4014 		case WM_T_PCH:
   4015 		case WM_T_PCH2:
   4016 		case WM_T_PCH_LPT:
   4017 		case WM_T_PCH_SPT:
   4018 			/* TARC0 */
   4019 			if ((sc->sc_type == WM_T_ICH8)
   4020 			    || (sc->sc_type == WM_T_PCH_SPT)) {
   4021 				/* Set TARC0 bits 29 and 28 */
   4022 				tarc0 |= __BITS(29, 28);
   4023 			}
   4024 			/* Set TARC0 bits 23,24,26,27 */
   4025 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4026 
   4027 			/* CTRL_EXT */
   4028 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4029 			reg |= __BIT(22);	/* Set bit 22 */
   4030 			/*
   4031 			 * Enable PHY low-power state when MAC is at D3
   4032 			 * w/o WoL
   4033 			 */
   4034 			if (sc->sc_type >= WM_T_PCH)
   4035 				reg |= CTRL_EXT_PHYPDEN;
   4036 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4037 
   4038 			/* TARC1 */
   4039 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4040 			/* bit 28 */
   4041 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4042 				tarc1 &= ~__BIT(28);
   4043 			else
   4044 				tarc1 |= __BIT(28);
   4045 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4046 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4047 
   4048 			/* Device Status */
   4049 			if (sc->sc_type == WM_T_ICH8) {
   4050 				reg = CSR_READ(sc, WMREG_STATUS);
   4051 				reg &= ~__BIT(31);
   4052 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4053 
   4054 			}
   4055 
   4056 			/* IOSFPC */
   4057 			if (sc->sc_type == WM_T_PCH_SPT) {
   4058 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4059 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4060 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4061 			}
   4062 			/*
   4063 			 * Work-around descriptor data corruption issue during
   4064 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4065 			 * capability.
   4066 			 */
   4067 			reg = CSR_READ(sc, WMREG_RFCTL);
   4068 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4069 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4070 			break;
   4071 		default:
   4072 			break;
   4073 		}
   4074 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4075 
   4076 		switch (sc->sc_type) {
   4077 		/*
   4078 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4079 		 * Avoid RSS Hash Value bug.
   4080 		 */
   4081 		case WM_T_82571:
   4082 		case WM_T_82572:
   4083 		case WM_T_82573:
   4084 		case WM_T_80003:
   4085 		case WM_T_ICH8:
   4086 			reg = CSR_READ(sc, WMREG_RFCTL);
   4087 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4088 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4089 			break;
   4090 		case WM_T_82574:
   4091 			/* use extened Rx descriptor. */
   4092 			reg = CSR_READ(sc, WMREG_RFCTL);
   4093 			reg |= WMREG_RFCTL_EXSTEN;
   4094 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4095 			break;
   4096 		default:
   4097 			break;
   4098 		}
   4099 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4100 		/*
   4101 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4102 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4103 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4104 		 * Correctly by the Device"
   4105 		 *
   4106 		 * I354(C2000) Errata AVR53:
   4107 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4108 		 * Hang"
   4109 		 */
   4110 		reg = CSR_READ(sc, WMREG_RFCTL);
   4111 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4112 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4113 	}
   4114 }
   4115 
   4116 static uint32_t
   4117 wm_rxpbs_adjust_82580(uint32_t val)
   4118 {
   4119 	uint32_t rv = 0;
   4120 
   4121 	if (val < __arraycount(wm_82580_rxpbs_table))
   4122 		rv = wm_82580_rxpbs_table[val];
   4123 
   4124 	return rv;
   4125 }
   4126 
   4127 /*
   4128  * wm_reset_phy:
   4129  *
   4130  *	generic PHY reset function.
   4131  *	Same as e1000_phy_hw_reset_generic()
   4132  */
   4133 static void
   4134 wm_reset_phy(struct wm_softc *sc)
   4135 {
   4136 	uint32_t reg;
   4137 
   4138 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4139 		device_xname(sc->sc_dev), __func__));
   4140 	if (wm_phy_resetisblocked(sc))
   4141 		return;
   4142 
   4143 	sc->phy.acquire(sc);
   4144 
   4145 	reg = CSR_READ(sc, WMREG_CTRL);
   4146 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4147 	CSR_WRITE_FLUSH(sc);
   4148 
   4149 	delay(sc->phy.reset_delay_us);
   4150 
   4151 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4152 	CSR_WRITE_FLUSH(sc);
   4153 
   4154 	delay(150);
   4155 
   4156 	sc->phy.release(sc);
   4157 
   4158 	wm_get_cfg_done(sc);
   4159 	wm_phy_post_reset(sc);
   4160 }
   4161 
   4162 static void
   4163 wm_flush_desc_rings(struct wm_softc *sc)
   4164 {
   4165 	pcireg_t preg;
   4166 	uint32_t reg;
   4167 	struct wm_txqueue *txq;
   4168 	wiseman_txdesc_t *txd;
   4169 	int nexttx;
   4170 	uint32_t rctl;
   4171 
   4172 	/* First, disable MULR fix in FEXTNVM11 */
   4173 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4174 	reg |= FEXTNVM11_DIS_MULRFIX;
   4175 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4176 
   4177 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4178 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4179 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4180 		return;
   4181 
   4182 	/* TX */
   4183 	printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   4184 	    device_xname(sc->sc_dev), preg, reg);
   4185 	reg = CSR_READ(sc, WMREG_TCTL);
   4186 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4187 
   4188 	txq = &sc->sc_queue[0].wmq_txq;
   4189 	nexttx = txq->txq_next;
   4190 	txd = &txq->txq_descs[nexttx];
   4191 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4192 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS| 512);
   4193 	txd->wtx_fields.wtxu_status = 0;
   4194 	txd->wtx_fields.wtxu_options = 0;
   4195 	txd->wtx_fields.wtxu_vlan = 0;
   4196 
   4197 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4198 	    BUS_SPACE_BARRIER_WRITE);
   4199 
   4200 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4201 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4202 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4203 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4204 	delay(250);
   4205 
   4206 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4207 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4208 		return;
   4209 
   4210 	/* RX */
   4211 	printf("%s: Need RX flush (reg = %08x)\n",
   4212 	    device_xname(sc->sc_dev), preg);
   4213 	rctl = CSR_READ(sc, WMREG_RCTL);
   4214 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4215 	CSR_WRITE_FLUSH(sc);
   4216 	delay(150);
   4217 
   4218 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4219 	/* zero the lower 14 bits (prefetch and host thresholds) */
   4220 	reg &= 0xffffc000;
   4221 	/*
   4222 	 * update thresholds: prefetch threshold to 31, host threshold
   4223 	 * to 1 and make sure the granularity is "descriptors" and not
   4224 	 * "cache lines"
   4225 	 */
   4226 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4227 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4228 
   4229 	/*
   4230 	 * momentarily enable the RX ring for the changes to take
   4231 	 * effect
   4232 	 */
   4233 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4234 	CSR_WRITE_FLUSH(sc);
   4235 	delay(150);
   4236 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4237 }
   4238 
   4239 /*
   4240  * wm_reset:
   4241  *
   4242  *	Reset the i82542 chip.
   4243  */
   4244 static void
   4245 wm_reset(struct wm_softc *sc)
   4246 {
   4247 	int phy_reset = 0;
   4248 	int i, error = 0;
   4249 	uint32_t reg;
   4250 
   4251 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4252 		device_xname(sc->sc_dev), __func__));
   4253 	KASSERT(sc->sc_type != 0);
   4254 
   4255 	/*
   4256 	 * Allocate on-chip memory according to the MTU size.
   4257 	 * The Packet Buffer Allocation register must be written
   4258 	 * before the chip is reset.
   4259 	 */
   4260 	switch (sc->sc_type) {
   4261 	case WM_T_82547:
   4262 	case WM_T_82547_2:
   4263 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4264 		    PBA_22K : PBA_30K;
   4265 		for (i = 0; i < sc->sc_nqueues; i++) {
   4266 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4267 			txq->txq_fifo_head = 0;
   4268 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4269 			txq->txq_fifo_size =
   4270 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4271 			txq->txq_fifo_stall = 0;
   4272 		}
   4273 		break;
   4274 	case WM_T_82571:
   4275 	case WM_T_82572:
   4276 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4277 	case WM_T_80003:
   4278 		sc->sc_pba = PBA_32K;
   4279 		break;
   4280 	case WM_T_82573:
   4281 		sc->sc_pba = PBA_12K;
   4282 		break;
   4283 	case WM_T_82574:
   4284 	case WM_T_82583:
   4285 		sc->sc_pba = PBA_20K;
   4286 		break;
   4287 	case WM_T_82576:
   4288 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4289 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4290 		break;
   4291 	case WM_T_82580:
   4292 	case WM_T_I350:
   4293 	case WM_T_I354:
   4294 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4295 		break;
   4296 	case WM_T_I210:
   4297 	case WM_T_I211:
   4298 		sc->sc_pba = PBA_34K;
   4299 		break;
   4300 	case WM_T_ICH8:
   4301 		/* Workaround for a bit corruption issue in FIFO memory */
   4302 		sc->sc_pba = PBA_8K;
   4303 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4304 		break;
   4305 	case WM_T_ICH9:
   4306 	case WM_T_ICH10:
   4307 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4308 		    PBA_14K : PBA_10K;
   4309 		break;
   4310 	case WM_T_PCH:
   4311 	case WM_T_PCH2:
   4312 	case WM_T_PCH_LPT:
   4313 	case WM_T_PCH_SPT:
   4314 		sc->sc_pba = PBA_26K;
   4315 		break;
   4316 	default:
   4317 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4318 		    PBA_40K : PBA_48K;
   4319 		break;
   4320 	}
   4321 	/*
   4322 	 * Only old or non-multiqueue devices have the PBA register
   4323 	 * XXX Need special handling for 82575.
   4324 	 */
   4325 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4326 	    || (sc->sc_type == WM_T_82575))
   4327 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4328 
   4329 	/* Prevent the PCI-E bus from sticking */
   4330 	if (sc->sc_flags & WM_F_PCIE) {
   4331 		int timeout = 800;
   4332 
   4333 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4334 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4335 
   4336 		while (timeout--) {
   4337 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4338 			    == 0)
   4339 				break;
   4340 			delay(100);
   4341 		}
   4342 		if (timeout == 0)
   4343 			device_printf(sc->sc_dev,
   4344 			    "failed to disable busmastering\n");
   4345 	}
   4346 
   4347 	/* Set the completion timeout for interface */
   4348 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4349 	    || (sc->sc_type == WM_T_82580)
   4350 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4351 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4352 		wm_set_pcie_completion_timeout(sc);
   4353 
   4354 	/* Clear interrupt */
   4355 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4356 	if (wm_is_using_msix(sc)) {
   4357 		if (sc->sc_type != WM_T_82574) {
   4358 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4359 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4360 		} else {
   4361 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4362 		}
   4363 	}
   4364 
   4365 	/* Stop the transmit and receive processes. */
   4366 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4367 	sc->sc_rctl &= ~RCTL_EN;
   4368 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4369 	CSR_WRITE_FLUSH(sc);
   4370 
   4371 	/* XXX set_tbi_sbp_82543() */
   4372 
   4373 	delay(10*1000);
   4374 
   4375 	/* Must acquire the MDIO ownership before MAC reset */
   4376 	switch (sc->sc_type) {
   4377 	case WM_T_82573:
   4378 	case WM_T_82574:
   4379 	case WM_T_82583:
   4380 		error = wm_get_hw_semaphore_82573(sc);
   4381 		break;
   4382 	default:
   4383 		break;
   4384 	}
   4385 
   4386 	/*
   4387 	 * 82541 Errata 29? & 82547 Errata 28?
   4388 	 * See also the description about PHY_RST bit in CTRL register
   4389 	 * in 8254x_GBe_SDM.pdf.
   4390 	 */
   4391 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4392 		CSR_WRITE(sc, WMREG_CTRL,
   4393 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4394 		CSR_WRITE_FLUSH(sc);
   4395 		delay(5000);
   4396 	}
   4397 
   4398 	switch (sc->sc_type) {
   4399 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4400 	case WM_T_82541:
   4401 	case WM_T_82541_2:
   4402 	case WM_T_82547:
   4403 	case WM_T_82547_2:
   4404 		/*
   4405 		 * On some chipsets, a reset through a memory-mapped write
   4406 		 * cycle can cause the chip to reset before completing the
   4407 		 * write cycle.  This causes major headache that can be
   4408 		 * avoided by issuing the reset via indirect register writes
   4409 		 * through I/O space.
   4410 		 *
   4411 		 * So, if we successfully mapped the I/O BAR at attach time,
   4412 		 * use that.  Otherwise, try our luck with a memory-mapped
   4413 		 * reset.
   4414 		 */
   4415 		if (sc->sc_flags & WM_F_IOH_VALID)
   4416 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4417 		else
   4418 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4419 		break;
   4420 	case WM_T_82545_3:
   4421 	case WM_T_82546_3:
   4422 		/* Use the shadow control register on these chips. */
   4423 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4424 		break;
   4425 	case WM_T_80003:
   4426 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4427 		sc->phy.acquire(sc);
   4428 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4429 		sc->phy.release(sc);
   4430 		break;
   4431 	case WM_T_ICH8:
   4432 	case WM_T_ICH9:
   4433 	case WM_T_ICH10:
   4434 	case WM_T_PCH:
   4435 	case WM_T_PCH2:
   4436 	case WM_T_PCH_LPT:
   4437 	case WM_T_PCH_SPT:
   4438 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4439 		if (wm_phy_resetisblocked(sc) == false) {
   4440 			/*
   4441 			 * Gate automatic PHY configuration by hardware on
   4442 			 * non-managed 82579
   4443 			 */
   4444 			if ((sc->sc_type == WM_T_PCH2)
   4445 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4446 				== 0))
   4447 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4448 
   4449 			reg |= CTRL_PHY_RESET;
   4450 			phy_reset = 1;
   4451 		} else
   4452 			printf("XXX reset is blocked!!!\n");
   4453 		sc->phy.acquire(sc);
   4454 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4455 		/* Don't insert a completion barrier when reset */
   4456 		delay(20*1000);
   4457 		mutex_exit(sc->sc_ich_phymtx);
   4458 		break;
   4459 	case WM_T_82580:
   4460 	case WM_T_I350:
   4461 	case WM_T_I354:
   4462 	case WM_T_I210:
   4463 	case WM_T_I211:
   4464 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4465 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4466 			CSR_WRITE_FLUSH(sc);
   4467 		delay(5000);
   4468 		break;
   4469 	case WM_T_82542_2_0:
   4470 	case WM_T_82542_2_1:
   4471 	case WM_T_82543:
   4472 	case WM_T_82540:
   4473 	case WM_T_82545:
   4474 	case WM_T_82546:
   4475 	case WM_T_82571:
   4476 	case WM_T_82572:
   4477 	case WM_T_82573:
   4478 	case WM_T_82574:
   4479 	case WM_T_82575:
   4480 	case WM_T_82576:
   4481 	case WM_T_82583:
   4482 	default:
   4483 		/* Everything else can safely use the documented method. */
   4484 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4485 		break;
   4486 	}
   4487 
   4488 	/* Must release the MDIO ownership after MAC reset */
   4489 	switch (sc->sc_type) {
   4490 	case WM_T_82573:
   4491 	case WM_T_82574:
   4492 	case WM_T_82583:
   4493 		if (error == 0)
   4494 			wm_put_hw_semaphore_82573(sc);
   4495 		break;
   4496 	default:
   4497 		break;
   4498 	}
   4499 
   4500 	if (phy_reset != 0)
   4501 		wm_get_cfg_done(sc);
   4502 
   4503 	/* reload EEPROM */
   4504 	switch (sc->sc_type) {
   4505 	case WM_T_82542_2_0:
   4506 	case WM_T_82542_2_1:
   4507 	case WM_T_82543:
   4508 	case WM_T_82544:
   4509 		delay(10);
   4510 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4511 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4512 		CSR_WRITE_FLUSH(sc);
   4513 		delay(2000);
   4514 		break;
   4515 	case WM_T_82540:
   4516 	case WM_T_82545:
   4517 	case WM_T_82545_3:
   4518 	case WM_T_82546:
   4519 	case WM_T_82546_3:
   4520 		delay(5*1000);
   4521 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4522 		break;
   4523 	case WM_T_82541:
   4524 	case WM_T_82541_2:
   4525 	case WM_T_82547:
   4526 	case WM_T_82547_2:
   4527 		delay(20000);
   4528 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4529 		break;
   4530 	case WM_T_82571:
   4531 	case WM_T_82572:
   4532 	case WM_T_82573:
   4533 	case WM_T_82574:
   4534 	case WM_T_82583:
   4535 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4536 			delay(10);
   4537 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4538 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4539 			CSR_WRITE_FLUSH(sc);
   4540 		}
   4541 		/* check EECD_EE_AUTORD */
   4542 		wm_get_auto_rd_done(sc);
   4543 		/*
   4544 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4545 		 * is set.
   4546 		 */
   4547 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4548 		    || (sc->sc_type == WM_T_82583))
   4549 			delay(25*1000);
   4550 		break;
   4551 	case WM_T_82575:
   4552 	case WM_T_82576:
   4553 	case WM_T_82580:
   4554 	case WM_T_I350:
   4555 	case WM_T_I354:
   4556 	case WM_T_I210:
   4557 	case WM_T_I211:
   4558 	case WM_T_80003:
   4559 		/* check EECD_EE_AUTORD */
   4560 		wm_get_auto_rd_done(sc);
   4561 		break;
   4562 	case WM_T_ICH8:
   4563 	case WM_T_ICH9:
   4564 	case WM_T_ICH10:
   4565 	case WM_T_PCH:
   4566 	case WM_T_PCH2:
   4567 	case WM_T_PCH_LPT:
   4568 	case WM_T_PCH_SPT:
   4569 		break;
   4570 	default:
   4571 		panic("%s: unknown type\n", __func__);
   4572 	}
   4573 
   4574 	/* Check whether EEPROM is present or not */
   4575 	switch (sc->sc_type) {
   4576 	case WM_T_82575:
   4577 	case WM_T_82576:
   4578 	case WM_T_82580:
   4579 	case WM_T_I350:
   4580 	case WM_T_I354:
   4581 	case WM_T_ICH8:
   4582 	case WM_T_ICH9:
   4583 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4584 			/* Not found */
   4585 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4586 			if (sc->sc_type == WM_T_82575)
   4587 				wm_reset_init_script_82575(sc);
   4588 		}
   4589 		break;
   4590 	default:
   4591 		break;
   4592 	}
   4593 
   4594 	if (phy_reset != 0)
   4595 		wm_phy_post_reset(sc);
   4596 
   4597 	if ((sc->sc_type == WM_T_82580)
   4598 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4599 		/* clear global device reset status bit */
   4600 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4601 	}
   4602 
   4603 	/* Clear any pending interrupt events. */
   4604 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4605 	reg = CSR_READ(sc, WMREG_ICR);
   4606 	if (wm_is_using_msix(sc)) {
   4607 		if (sc->sc_type != WM_T_82574) {
   4608 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4609 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4610 		} else
   4611 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4612 	}
   4613 
   4614 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4615 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4616 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4617 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   4618 		reg = CSR_READ(sc, WMREG_KABGTXD);
   4619 		reg |= KABGTXD_BGSQLBIAS;
   4620 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   4621 	}
   4622 
   4623 	/* reload sc_ctrl */
   4624 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4625 
   4626 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4627 		wm_set_eee_i350(sc);
   4628 
   4629 	/*
   4630 	 * For PCH, this write will make sure that any noise will be detected
   4631 	 * as a CRC error and be dropped rather than show up as a bad packet
   4632 	 * to the DMA engine
   4633 	 */
   4634 	if (sc->sc_type == WM_T_PCH)
   4635 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4636 
   4637 	if (sc->sc_type >= WM_T_82544)
   4638 		CSR_WRITE(sc, WMREG_WUC, 0);
   4639 
   4640 	wm_reset_mdicnfg_82580(sc);
   4641 
   4642 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4643 		wm_pll_workaround_i210(sc);
   4644 }
   4645 
   4646 /*
   4647  * wm_add_rxbuf:
   4648  *
   4649  *	Add a receive buffer to the indiciated descriptor.
   4650  */
   4651 static int
   4652 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4653 {
   4654 	struct wm_softc *sc = rxq->rxq_sc;
   4655 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4656 	struct mbuf *m;
   4657 	int error;
   4658 
   4659 	KASSERT(mutex_owned(rxq->rxq_lock));
   4660 
   4661 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4662 	if (m == NULL)
   4663 		return ENOBUFS;
   4664 
   4665 	MCLGET(m, M_DONTWAIT);
   4666 	if ((m->m_flags & M_EXT) == 0) {
   4667 		m_freem(m);
   4668 		return ENOBUFS;
   4669 	}
   4670 
   4671 	if (rxs->rxs_mbuf != NULL)
   4672 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4673 
   4674 	rxs->rxs_mbuf = m;
   4675 
   4676 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4677 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4678 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4679 	if (error) {
   4680 		/* XXX XXX XXX */
   4681 		aprint_error_dev(sc->sc_dev,
   4682 		    "unable to load rx DMA map %d, error = %d\n",
   4683 		    idx, error);
   4684 		panic("wm_add_rxbuf");
   4685 	}
   4686 
   4687 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4688 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4689 
   4690 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4691 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4692 			wm_init_rxdesc(rxq, idx);
   4693 	} else
   4694 		wm_init_rxdesc(rxq, idx);
   4695 
   4696 	return 0;
   4697 }
   4698 
   4699 /*
   4700  * wm_rxdrain:
   4701  *
   4702  *	Drain the receive queue.
   4703  */
   4704 static void
   4705 wm_rxdrain(struct wm_rxqueue *rxq)
   4706 {
   4707 	struct wm_softc *sc = rxq->rxq_sc;
   4708 	struct wm_rxsoft *rxs;
   4709 	int i;
   4710 
   4711 	KASSERT(mutex_owned(rxq->rxq_lock));
   4712 
   4713 	for (i = 0; i < WM_NRXDESC; i++) {
   4714 		rxs = &rxq->rxq_soft[i];
   4715 		if (rxs->rxs_mbuf != NULL) {
   4716 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4717 			m_freem(rxs->rxs_mbuf);
   4718 			rxs->rxs_mbuf = NULL;
   4719 		}
   4720 	}
   4721 }
   4722 
   4723 
   4724 /*
   4725  * XXX copy from FreeBSD's sys/net/rss_config.c
   4726  */
   4727 /*
   4728  * RSS secret key, intended to prevent attacks on load-balancing.  Its
   4729  * effectiveness may be limited by algorithm choice and available entropy
   4730  * during the boot.
   4731  *
   4732  * XXXRW: And that we don't randomize it yet!
   4733  *
   4734  * This is the default Microsoft RSS specification key which is also
   4735  * the Chelsio T5 firmware default key.
   4736  */
   4737 #define RSS_KEYSIZE 40
   4738 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
   4739 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
   4740 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
   4741 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
   4742 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
   4743 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
   4744 };
   4745 
   4746 /*
   4747  * Caller must pass an array of size sizeof(rss_key).
   4748  *
   4749  * XXX
   4750  * As if_ixgbe may use this function, this function should not be
   4751  * if_wm specific function.
   4752  */
   4753 static void
   4754 wm_rss_getkey(uint8_t *key)
   4755 {
   4756 
   4757 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
   4758 }
   4759 
   4760 /*
   4761  * Setup registers for RSS.
   4762  *
   4763  * XXX not yet VMDq support
   4764  */
   4765 static void
   4766 wm_init_rss(struct wm_softc *sc)
   4767 {
   4768 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4769 	int i;
   4770 
   4771 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
   4772 
   4773 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4774 		int qid, reta_ent;
   4775 
   4776 		qid  = i % sc->sc_nqueues;
   4777 		switch(sc->sc_type) {
   4778 		case WM_T_82574:
   4779 			reta_ent = __SHIFTIN(qid,
   4780 			    RETA_ENT_QINDEX_MASK_82574);
   4781 			break;
   4782 		case WM_T_82575:
   4783 			reta_ent = __SHIFTIN(qid,
   4784 			    RETA_ENT_QINDEX1_MASK_82575);
   4785 			break;
   4786 		default:
   4787 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4788 			break;
   4789 		}
   4790 
   4791 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4792 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4793 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4794 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4795 	}
   4796 
   4797 	wm_rss_getkey((uint8_t *)rss_key);
   4798 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4799 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4800 
   4801 	if (sc->sc_type == WM_T_82574)
   4802 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4803 	else
   4804 		mrqc = MRQC_ENABLE_RSS_MQ;
   4805 
   4806 	/*
   4807 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   4808 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   4809 	 */
   4810 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4811 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4812 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4813 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4814 
   4815 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4816 }
   4817 
   4818 /*
   4819  * Adjust TX and RX queue numbers which the system actulally uses.
   4820  *
   4821  * The numbers are affected by below parameters.
   4822  *     - The nubmer of hardware queues
   4823  *     - The number of MSI-X vectors (= "nvectors" argument)
   4824  *     - ncpu
   4825  */
   4826 static void
   4827 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4828 {
   4829 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4830 
   4831 	if (nvectors < 2) {
   4832 		sc->sc_nqueues = 1;
   4833 		return;
   4834 	}
   4835 
   4836 	switch(sc->sc_type) {
   4837 	case WM_T_82572:
   4838 		hw_ntxqueues = 2;
   4839 		hw_nrxqueues = 2;
   4840 		break;
   4841 	case WM_T_82574:
   4842 		hw_ntxqueues = 2;
   4843 		hw_nrxqueues = 2;
   4844 		break;
   4845 	case WM_T_82575:
   4846 		hw_ntxqueues = 4;
   4847 		hw_nrxqueues = 4;
   4848 		break;
   4849 	case WM_T_82576:
   4850 		hw_ntxqueues = 16;
   4851 		hw_nrxqueues = 16;
   4852 		break;
   4853 	case WM_T_82580:
   4854 	case WM_T_I350:
   4855 	case WM_T_I354:
   4856 		hw_ntxqueues = 8;
   4857 		hw_nrxqueues = 8;
   4858 		break;
   4859 	case WM_T_I210:
   4860 		hw_ntxqueues = 4;
   4861 		hw_nrxqueues = 4;
   4862 		break;
   4863 	case WM_T_I211:
   4864 		hw_ntxqueues = 2;
   4865 		hw_nrxqueues = 2;
   4866 		break;
   4867 		/*
   4868 		 * As below ethernet controllers does not support MSI-X,
   4869 		 * this driver let them not use multiqueue.
   4870 		 *     - WM_T_80003
   4871 		 *     - WM_T_ICH8
   4872 		 *     - WM_T_ICH9
   4873 		 *     - WM_T_ICH10
   4874 		 *     - WM_T_PCH
   4875 		 *     - WM_T_PCH2
   4876 		 *     - WM_T_PCH_LPT
   4877 		 */
   4878 	default:
   4879 		hw_ntxqueues = 1;
   4880 		hw_nrxqueues = 1;
   4881 		break;
   4882 	}
   4883 
   4884 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
   4885 
   4886 	/*
   4887 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   4888 	 * the number of queues used actually.
   4889 	 */
   4890 	if (nvectors < hw_nqueues + 1) {
   4891 		sc->sc_nqueues = nvectors - 1;
   4892 	} else {
   4893 		sc->sc_nqueues = hw_nqueues;
   4894 	}
   4895 
   4896 	/*
   4897 	 * As queues more then cpus cannot improve scaling, we limit
   4898 	 * the number of queues used actually.
   4899 	 */
   4900 	if (ncpu < sc->sc_nqueues)
   4901 		sc->sc_nqueues = ncpu;
   4902 }
   4903 
   4904 static inline bool
   4905 wm_is_using_msix(struct wm_softc *sc)
   4906 {
   4907 
   4908 	return (sc->sc_nintrs > 1);
   4909 }
   4910 
   4911 static inline bool
   4912 wm_is_using_multiqueue(struct wm_softc *sc)
   4913 {
   4914 
   4915 	return (sc->sc_nqueues > 1);
   4916 }
   4917 
   4918 static int
   4919 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   4920 {
   4921 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   4922 	wmq->wmq_id = qidx;
   4923 	wmq->wmq_intr_idx = intr_idx;
   4924 	wmq->wmq_si = softint_establish(SOFTINT_NET
   4925 #ifdef WM_MPSAFE
   4926 	    | SOFTINT_MPSAFE
   4927 #endif
   4928 	    , wm_handle_queue, wmq);
   4929 	if (wmq->wmq_si != NULL)
   4930 		return 0;
   4931 
   4932 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   4933 	    wmq->wmq_id);
   4934 
   4935 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   4936 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4937 	return ENOMEM;
   4938 }
   4939 
   4940 /*
   4941  * Both single interrupt MSI and INTx can use this function.
   4942  */
   4943 static int
   4944 wm_setup_legacy(struct wm_softc *sc)
   4945 {
   4946 	pci_chipset_tag_t pc = sc->sc_pc;
   4947 	const char *intrstr = NULL;
   4948 	char intrbuf[PCI_INTRSTR_LEN];
   4949 	int error;
   4950 
   4951 	error = wm_alloc_txrx_queues(sc);
   4952 	if (error) {
   4953 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4954 		    error);
   4955 		return ENOMEM;
   4956 	}
   4957 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   4958 	    sizeof(intrbuf));
   4959 #ifdef WM_MPSAFE
   4960 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   4961 #endif
   4962 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   4963 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   4964 	if (sc->sc_ihs[0] == NULL) {
   4965 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   4966 		    (pci_intr_type(pc, sc->sc_intrs[0])
   4967 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   4968 		return ENOMEM;
   4969 	}
   4970 
   4971 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   4972 	sc->sc_nintrs = 1;
   4973 
   4974 	return wm_softint_establish(sc, 0, 0);
   4975 }
   4976 
   4977 static int
   4978 wm_setup_msix(struct wm_softc *sc)
   4979 {
   4980 	void *vih;
   4981 	kcpuset_t *affinity;
   4982 	int qidx, error, intr_idx, txrx_established;
   4983 	pci_chipset_tag_t pc = sc->sc_pc;
   4984 	const char *intrstr = NULL;
   4985 	char intrbuf[PCI_INTRSTR_LEN];
   4986 	char intr_xname[INTRDEVNAMEBUF];
   4987 
   4988 	if (sc->sc_nqueues < ncpu) {
   4989 		/*
   4990 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   4991 		 * interrupts start from CPU#1.
   4992 		 */
   4993 		sc->sc_affinity_offset = 1;
   4994 	} else {
   4995 		/*
   4996 		 * In this case, this device use all CPUs. So, we unify
   4997 		 * affinitied cpu_index to msix vector number for readability.
   4998 		 */
   4999 		sc->sc_affinity_offset = 0;
   5000 	}
   5001 
   5002 	error = wm_alloc_txrx_queues(sc);
   5003 	if (error) {
   5004 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5005 		    error);
   5006 		return ENOMEM;
   5007 	}
   5008 
   5009 	kcpuset_create(&affinity, false);
   5010 	intr_idx = 0;
   5011 
   5012 	/*
   5013 	 * TX and RX
   5014 	 */
   5015 	txrx_established = 0;
   5016 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5017 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5018 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5019 
   5020 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5021 		    sizeof(intrbuf));
   5022 #ifdef WM_MPSAFE
   5023 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5024 		    PCI_INTR_MPSAFE, true);
   5025 #endif
   5026 		memset(intr_xname, 0, sizeof(intr_xname));
   5027 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5028 		    device_xname(sc->sc_dev), qidx);
   5029 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5030 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5031 		if (vih == NULL) {
   5032 			aprint_error_dev(sc->sc_dev,
   5033 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5034 			    intrstr ? " at " : "",
   5035 			    intrstr ? intrstr : "");
   5036 
   5037 			goto fail;
   5038 		}
   5039 		kcpuset_zero(affinity);
   5040 		/* Round-robin affinity */
   5041 		kcpuset_set(affinity, affinity_to);
   5042 		error = interrupt_distribute(vih, affinity, NULL);
   5043 		if (error == 0) {
   5044 			aprint_normal_dev(sc->sc_dev,
   5045 			    "for TX and RX interrupting at %s affinity to %u\n",
   5046 			    intrstr, affinity_to);
   5047 		} else {
   5048 			aprint_normal_dev(sc->sc_dev,
   5049 			    "for TX and RX interrupting at %s\n", intrstr);
   5050 		}
   5051 		sc->sc_ihs[intr_idx] = vih;
   5052 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   5053 			goto fail;
   5054 		txrx_established++;
   5055 		intr_idx++;
   5056 	}
   5057 
   5058 	/*
   5059 	 * LINK
   5060 	 */
   5061 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5062 	    sizeof(intrbuf));
   5063 #ifdef WM_MPSAFE
   5064 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5065 #endif
   5066 	memset(intr_xname, 0, sizeof(intr_xname));
   5067 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5068 	    device_xname(sc->sc_dev));
   5069 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5070 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5071 	if (vih == NULL) {
   5072 		aprint_error_dev(sc->sc_dev,
   5073 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5074 		    intrstr ? " at " : "",
   5075 		    intrstr ? intrstr : "");
   5076 
   5077 		goto fail;
   5078 	}
   5079 	/* keep default affinity to LINK interrupt */
   5080 	aprint_normal_dev(sc->sc_dev,
   5081 	    "for LINK interrupting at %s\n", intrstr);
   5082 	sc->sc_ihs[intr_idx] = vih;
   5083 	sc->sc_link_intr_idx = intr_idx;
   5084 
   5085 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5086 	kcpuset_destroy(affinity);
   5087 	return 0;
   5088 
   5089  fail:
   5090 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5091 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5092 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5093 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5094 	}
   5095 
   5096 	kcpuset_destroy(affinity);
   5097 	return ENOMEM;
   5098 }
   5099 
   5100 static void
   5101 wm_turnon(struct wm_softc *sc)
   5102 {
   5103 	int i;
   5104 
   5105 	KASSERT(WM_CORE_LOCKED(sc));
   5106 
   5107 	/*
   5108 	 * must unset stopping flags in ascending order.
   5109 	 */
   5110 	for(i = 0; i < sc->sc_nqueues; i++) {
   5111 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5112 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5113 
   5114 		mutex_enter(txq->txq_lock);
   5115 		txq->txq_stopping = false;
   5116 		mutex_exit(txq->txq_lock);
   5117 
   5118 		mutex_enter(rxq->rxq_lock);
   5119 		rxq->rxq_stopping = false;
   5120 		mutex_exit(rxq->rxq_lock);
   5121 	}
   5122 
   5123 	sc->sc_core_stopping = false;
   5124 }
   5125 
   5126 static void
   5127 wm_turnoff(struct wm_softc *sc)
   5128 {
   5129 	int i;
   5130 
   5131 	KASSERT(WM_CORE_LOCKED(sc));
   5132 
   5133 	sc->sc_core_stopping = true;
   5134 
   5135 	/*
   5136 	 * must set stopping flags in ascending order.
   5137 	 */
   5138 	for(i = 0; i < sc->sc_nqueues; i++) {
   5139 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5140 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5141 
   5142 		mutex_enter(rxq->rxq_lock);
   5143 		rxq->rxq_stopping = true;
   5144 		mutex_exit(rxq->rxq_lock);
   5145 
   5146 		mutex_enter(txq->txq_lock);
   5147 		txq->txq_stopping = true;
   5148 		mutex_exit(txq->txq_lock);
   5149 	}
   5150 }
   5151 
   5152 /*
   5153  * write interrupt interval value to ITR or EITR
   5154  */
   5155 static void
   5156 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5157 {
   5158 
   5159 	if (!wmq->wmq_set_itr)
   5160 		return;
   5161 
   5162 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5163 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5164 
   5165 		/*
   5166 		 * 82575 doesn't have CNT_INGR field.
   5167 		 * So, overwrite counter field by software.
   5168 		 */
   5169 		if (sc->sc_type == WM_T_82575)
   5170 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5171 		else
   5172 			eitr |= EITR_CNT_INGR;
   5173 
   5174 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5175 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5176 		/*
   5177 		 * 82574 has both ITR and EITR. SET EITR when we use
   5178 		 * the multi queue function with MSI-X.
   5179 		 */
   5180 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5181 			    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5182 	} else {
   5183 		KASSERT(wmq->wmq_id == 0);
   5184 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5185 	}
   5186 
   5187 	wmq->wmq_set_itr = false;
   5188 }
   5189 
   5190 /*
   5191  * TODO
   5192  * Below dynamic calculation of itr is almost the same as linux igb,
   5193  * however it does not fit to wm(4). So, we will have been disable AIM
   5194  * until we will find appropriate calculation of itr.
   5195  */
   5196 /*
   5197  * calculate interrupt interval value to be going to write register in
   5198  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5199  */
   5200 static void
   5201 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5202 {
   5203 #ifdef NOTYET
   5204 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5205 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5206 	uint32_t avg_size = 0;
   5207 	uint32_t new_itr;
   5208 
   5209 	if (rxq->rxq_packets)
   5210 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5211 	if (txq->txq_packets)
   5212 		avg_size = max(avg_size, txq->txq_bytes / txq->txq_packets);
   5213 
   5214 	if (avg_size == 0) {
   5215 		new_itr = 450; /* restore default value */
   5216 		goto out;
   5217 	}
   5218 
   5219 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5220 	avg_size += 24;
   5221 
   5222 	/* Don't starve jumbo frames */
   5223 	avg_size = min(avg_size, 3000);
   5224 
   5225 	/* Give a little boost to mid-size frames */
   5226 	if ((avg_size > 300) && (avg_size < 1200))
   5227 		new_itr = avg_size / 3;
   5228 	else
   5229 		new_itr = avg_size / 2;
   5230 
   5231 out:
   5232 	/*
   5233 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5234 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5235 	 */
   5236 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5237 		new_itr *= 4;
   5238 
   5239 	if (new_itr != wmq->wmq_itr) {
   5240 		wmq->wmq_itr = new_itr;
   5241 		wmq->wmq_set_itr = true;
   5242 	} else
   5243 		wmq->wmq_set_itr = false;
   5244 
   5245 	rxq->rxq_packets = 0;
   5246 	rxq->rxq_bytes = 0;
   5247 	txq->txq_packets = 0;
   5248 	txq->txq_bytes = 0;
   5249 #endif
   5250 }
   5251 
   5252 /*
   5253  * wm_init:		[ifnet interface function]
   5254  *
   5255  *	Initialize the interface.
   5256  */
   5257 static int
   5258 wm_init(struct ifnet *ifp)
   5259 {
   5260 	struct wm_softc *sc = ifp->if_softc;
   5261 	int ret;
   5262 
   5263 	WM_CORE_LOCK(sc);
   5264 	ret = wm_init_locked(ifp);
   5265 	WM_CORE_UNLOCK(sc);
   5266 
   5267 	return ret;
   5268 }
   5269 
   5270 static int
   5271 wm_init_locked(struct ifnet *ifp)
   5272 {
   5273 	struct wm_softc *sc = ifp->if_softc;
   5274 	int i, j, trynum, error = 0;
   5275 	uint32_t reg;
   5276 
   5277 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5278 		device_xname(sc->sc_dev), __func__));
   5279 	KASSERT(WM_CORE_LOCKED(sc));
   5280 
   5281 	/*
   5282 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5283 	 * There is a small but measurable benefit to avoiding the adjusment
   5284 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5285 	 * on such platforms.  One possibility is that the DMA itself is
   5286 	 * slightly more efficient if the front of the entire packet (instead
   5287 	 * of the front of the headers) is aligned.
   5288 	 *
   5289 	 * Note we must always set align_tweak to 0 if we are using
   5290 	 * jumbo frames.
   5291 	 */
   5292 #ifdef __NO_STRICT_ALIGNMENT
   5293 	sc->sc_align_tweak = 0;
   5294 #else
   5295 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5296 		sc->sc_align_tweak = 0;
   5297 	else
   5298 		sc->sc_align_tweak = 2;
   5299 #endif /* __NO_STRICT_ALIGNMENT */
   5300 
   5301 	/* Cancel any pending I/O. */
   5302 	wm_stop_locked(ifp, 0);
   5303 
   5304 	/* update statistics before reset */
   5305 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   5306 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   5307 
   5308 	/* PCH_SPT hardware workaround */
   5309 	if (sc->sc_type == WM_T_PCH_SPT)
   5310 		wm_flush_desc_rings(sc);
   5311 
   5312 	/* Reset the chip to a known state. */
   5313 	wm_reset(sc);
   5314 
   5315 	/*
   5316 	 * AMT based hardware can now take control from firmware
   5317 	 * Do this after reset.
   5318 	 */
   5319 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5320 		wm_get_hw_control(sc);
   5321 
   5322 	if ((sc->sc_type == WM_T_PCH_SPT) &&
   5323 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5324 		wm_legacy_irq_quirk_spt(sc);
   5325 
   5326 	/* Init hardware bits */
   5327 	wm_initialize_hardware_bits(sc);
   5328 
   5329 	/* Reset the PHY. */
   5330 	if (sc->sc_flags & WM_F_HAS_MII)
   5331 		wm_gmii_reset(sc);
   5332 
   5333 	/* Calculate (E)ITR value */
   5334 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5335 		/*
   5336 		 * For NEWQUEUE's EITR (except for 82575).
   5337 		 * 82575's EITR should be set same throttling value as other
   5338 		 * old controllers' ITR because the interrupt/sec calculation
   5339 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5340 		 *
   5341 		 * 82574's EITR should be set same throttling value as ITR.
   5342 		 *
   5343 		 * For N interrupts/sec, set this value to:
   5344 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5345 		 */
   5346 		sc->sc_itr_init = 450;
   5347 	} else if (sc->sc_type >= WM_T_82543) {
   5348 		/*
   5349 		 * Set up the interrupt throttling register (units of 256ns)
   5350 		 * Note that a footnote in Intel's documentation says this
   5351 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5352 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5353 		 * that that is also true for the 1024ns units of the other
   5354 		 * interrupt-related timer registers -- so, really, we ought
   5355 		 * to divide this value by 4 when the link speed is low.
   5356 		 *
   5357 		 * XXX implement this division at link speed change!
   5358 		 */
   5359 
   5360 		/*
   5361 		 * For N interrupts/sec, set this value to:
   5362 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5363 		 * absolute and packet timer values to this value
   5364 		 * divided by 4 to get "simple timer" behavior.
   5365 		 */
   5366 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5367 	}
   5368 
   5369 	error = wm_init_txrx_queues(sc);
   5370 	if (error)
   5371 		goto out;
   5372 
   5373 	/*
   5374 	 * Clear out the VLAN table -- we don't use it (yet).
   5375 	 */
   5376 	CSR_WRITE(sc, WMREG_VET, 0);
   5377 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5378 		trynum = 10; /* Due to hw errata */
   5379 	else
   5380 		trynum = 1;
   5381 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5382 		for (j = 0; j < trynum; j++)
   5383 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5384 
   5385 	/*
   5386 	 * Set up flow-control parameters.
   5387 	 *
   5388 	 * XXX Values could probably stand some tuning.
   5389 	 */
   5390 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5391 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5392 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5393 	    && (sc->sc_type != WM_T_PCH_SPT)) {
   5394 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5395 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5396 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5397 	}
   5398 
   5399 	sc->sc_fcrtl = FCRTL_DFLT;
   5400 	if (sc->sc_type < WM_T_82543) {
   5401 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5402 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5403 	} else {
   5404 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5405 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5406 	}
   5407 
   5408 	if (sc->sc_type == WM_T_80003)
   5409 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5410 	else
   5411 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5412 
   5413 	/* Writes the control register. */
   5414 	wm_set_vlan(sc);
   5415 
   5416 	if (sc->sc_flags & WM_F_HAS_MII) {
   5417 		int val;
   5418 
   5419 		switch (sc->sc_type) {
   5420 		case WM_T_80003:
   5421 		case WM_T_ICH8:
   5422 		case WM_T_ICH9:
   5423 		case WM_T_ICH10:
   5424 		case WM_T_PCH:
   5425 		case WM_T_PCH2:
   5426 		case WM_T_PCH_LPT:
   5427 		case WM_T_PCH_SPT:
   5428 			/*
   5429 			 * Set the mac to wait the maximum time between each
   5430 			 * iteration and increase the max iterations when
   5431 			 * polling the phy; this fixes erroneous timeouts at
   5432 			 * 10Mbps.
   5433 			 */
   5434 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5435 			    0xFFFF);
   5436 			val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
   5437 			val |= 0x3F;
   5438 			wm_kmrn_writereg(sc,
   5439 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
   5440 			break;
   5441 		default:
   5442 			break;
   5443 		}
   5444 
   5445 		if (sc->sc_type == WM_T_80003) {
   5446 			val = CSR_READ(sc, WMREG_CTRL_EXT);
   5447 			val &= ~CTRL_EXT_LINK_MODE_MASK;
   5448 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   5449 
   5450 			/* Bypass RX and TX FIFO's */
   5451 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5452 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5453 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5454 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5455 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5456 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5457 		}
   5458 	}
   5459 #if 0
   5460 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5461 #endif
   5462 
   5463 	/* Set up checksum offload parameters. */
   5464 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5465 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5466 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5467 		reg |= RXCSUM_IPOFL;
   5468 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5469 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5470 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5471 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5472 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5473 
   5474 	/* Set registers about MSI-X */
   5475 	if (wm_is_using_msix(sc)) {
   5476 		uint32_t ivar;
   5477 		struct wm_queue *wmq;
   5478 		int qid, qintr_idx;
   5479 
   5480 		if (sc->sc_type == WM_T_82575) {
   5481 			/* Interrupt control */
   5482 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5483 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5484 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5485 
   5486 			/* TX and RX */
   5487 			for (i = 0; i < sc->sc_nqueues; i++) {
   5488 				wmq = &sc->sc_queue[i];
   5489 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5490 				    EITR_TX_QUEUE(wmq->wmq_id)
   5491 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5492 			}
   5493 			/* Link status */
   5494 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5495 			    EITR_OTHER);
   5496 		} else if (sc->sc_type == WM_T_82574) {
   5497 			/* Interrupt control */
   5498 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5499 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5500 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5501 
   5502 			/*
   5503 			 * workaround issue with spurious interrupts
   5504 			 * in MSI-X mode.
   5505 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5506 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5507 			 */
   5508 			reg = CSR_READ(sc, WMREG_RFCTL);
   5509 			reg |= WMREG_RFCTL_ACKDIS;
   5510 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5511 
   5512 			ivar = 0;
   5513 			/* TX and RX */
   5514 			for (i = 0; i < sc->sc_nqueues; i++) {
   5515 				wmq = &sc->sc_queue[i];
   5516 				qid = wmq->wmq_id;
   5517 				qintr_idx = wmq->wmq_intr_idx;
   5518 
   5519 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5520 				    IVAR_TX_MASK_Q_82574(qid));
   5521 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5522 				    IVAR_RX_MASK_Q_82574(qid));
   5523 			}
   5524 			/* Link status */
   5525 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5526 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5527 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5528 		} else {
   5529 			/* Interrupt control */
   5530 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5531 			    | GPIE_EIAME | GPIE_PBA);
   5532 
   5533 			switch (sc->sc_type) {
   5534 			case WM_T_82580:
   5535 			case WM_T_I350:
   5536 			case WM_T_I354:
   5537 			case WM_T_I210:
   5538 			case WM_T_I211:
   5539 				/* TX and RX */
   5540 				for (i = 0; i < sc->sc_nqueues; i++) {
   5541 					wmq = &sc->sc_queue[i];
   5542 					qid = wmq->wmq_id;
   5543 					qintr_idx = wmq->wmq_intr_idx;
   5544 
   5545 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5546 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5547 					ivar |= __SHIFTIN((qintr_idx
   5548 						| IVAR_VALID),
   5549 					    IVAR_TX_MASK_Q(qid));
   5550 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5551 					ivar |= __SHIFTIN((qintr_idx
   5552 						| IVAR_VALID),
   5553 					    IVAR_RX_MASK_Q(qid));
   5554 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5555 				}
   5556 				break;
   5557 			case WM_T_82576:
   5558 				/* TX and RX */
   5559 				for (i = 0; i < sc->sc_nqueues; i++) {
   5560 					wmq = &sc->sc_queue[i];
   5561 					qid = wmq->wmq_id;
   5562 					qintr_idx = wmq->wmq_intr_idx;
   5563 
   5564 					ivar = CSR_READ(sc,
   5565 					    WMREG_IVAR_Q_82576(qid));
   5566 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   5567 					ivar |= __SHIFTIN((qintr_idx
   5568 						| IVAR_VALID),
   5569 					    IVAR_TX_MASK_Q_82576(qid));
   5570 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   5571 					ivar |= __SHIFTIN((qintr_idx
   5572 						| IVAR_VALID),
   5573 					    IVAR_RX_MASK_Q_82576(qid));
   5574 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   5575 					    ivar);
   5576 				}
   5577 				break;
   5578 			default:
   5579 				break;
   5580 			}
   5581 
   5582 			/* Link status */
   5583 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   5584 			    IVAR_MISC_OTHER);
   5585 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   5586 		}
   5587 
   5588 		if (wm_is_using_multiqueue(sc)) {
   5589 			wm_init_rss(sc);
   5590 
   5591 			/*
   5592 			** NOTE: Receive Full-Packet Checksum Offload
   5593 			** is mutually exclusive with Multiqueue. However
   5594 			** this is not the same as TCP/IP checksums which
   5595 			** still work.
   5596 			*/
   5597 			reg = CSR_READ(sc, WMREG_RXCSUM);
   5598 			reg |= RXCSUM_PCSD;
   5599 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5600 		}
   5601 	}
   5602 
   5603 	/* Set up the interrupt registers. */
   5604 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5605 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   5606 	    ICR_RXO | ICR_RXT0;
   5607 	if (wm_is_using_msix(sc)) {
   5608 		uint32_t mask;
   5609 		struct wm_queue *wmq;
   5610 
   5611 		switch (sc->sc_type) {
   5612 		case WM_T_82574:
   5613 			mask = 0;
   5614 			for (i = 0; i < sc->sc_nqueues; i++) {
   5615 				wmq = &sc->sc_queue[i];
   5616 				mask |= ICR_TXQ(wmq->wmq_id);
   5617 				mask |= ICR_RXQ(wmq->wmq_id);
   5618 			}
   5619 			mask |= ICR_OTHER;
   5620 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   5621 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   5622 			break;
   5623 		default:
   5624 			if (sc->sc_type == WM_T_82575) {
   5625 				mask = 0;
   5626 				for (i = 0; i < sc->sc_nqueues; i++) {
   5627 					wmq = &sc->sc_queue[i];
   5628 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   5629 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   5630 				}
   5631 				mask |= EITR_OTHER;
   5632 			} else {
   5633 				mask = 0;
   5634 				for (i = 0; i < sc->sc_nqueues; i++) {
   5635 					wmq = &sc->sc_queue[i];
   5636 					mask |= 1 << wmq->wmq_intr_idx;
   5637 				}
   5638 				mask |= 1 << sc->sc_link_intr_idx;
   5639 			}
   5640 			CSR_WRITE(sc, WMREG_EIAC, mask);
   5641 			CSR_WRITE(sc, WMREG_EIAM, mask);
   5642 			CSR_WRITE(sc, WMREG_EIMS, mask);
   5643 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   5644 			break;
   5645 		}
   5646 	} else
   5647 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5648 
   5649 	/* Set up the inter-packet gap. */
   5650 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   5651 
   5652 	if (sc->sc_type >= WM_T_82543) {
   5653 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5654 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   5655 			wm_itrs_writereg(sc, wmq);
   5656 		}
   5657 		/*
   5658 		 * Link interrupts occur much less than TX
   5659 		 * interrupts and RX interrupts. So, we don't
   5660 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   5661 		 * FreeBSD's if_igb.
   5662 		 */
   5663 	}
   5664 
   5665 	/* Set the VLAN ethernetype. */
   5666 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   5667 
   5668 	/*
   5669 	 * Set up the transmit control register; we start out with
   5670 	 * a collision distance suitable for FDX, but update it whe
   5671 	 * we resolve the media type.
   5672 	 */
   5673 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   5674 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   5675 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5676 	if (sc->sc_type >= WM_T_82571)
   5677 		sc->sc_tctl |= TCTL_MULR;
   5678 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5679 
   5680 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5681 		/* Write TDT after TCTL.EN is set. See the document. */
   5682 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   5683 	}
   5684 
   5685 	if (sc->sc_type == WM_T_80003) {
   5686 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   5687 		reg &= ~TCTL_EXT_GCEX_MASK;
   5688 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   5689 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   5690 	}
   5691 
   5692 	/* Set the media. */
   5693 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   5694 		goto out;
   5695 
   5696 	/* Configure for OS presence */
   5697 	wm_init_manageability(sc);
   5698 
   5699 	/*
   5700 	 * Set up the receive control register; we actually program
   5701 	 * the register when we set the receive filter.  Use multicast
   5702 	 * address offset type 0.
   5703 	 *
   5704 	 * Only the i82544 has the ability to strip the incoming
   5705 	 * CRC, so we don't enable that feature.
   5706 	 */
   5707 	sc->sc_mchash_type = 0;
   5708 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   5709 	    | RCTL_MO(sc->sc_mchash_type);
   5710 
   5711 	/*
   5712 	 * 82574 use one buffer extended Rx descriptor.
   5713 	 */
   5714 	if (sc->sc_type == WM_T_82574)
   5715 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   5716 
   5717 	/*
   5718 	 * The I350 has a bug where it always strips the CRC whether
   5719 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   5720 	 */
   5721 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5722 	    || (sc->sc_type == WM_T_I210))
   5723 		sc->sc_rctl |= RCTL_SECRC;
   5724 
   5725 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5726 	    && (ifp->if_mtu > ETHERMTU)) {
   5727 		sc->sc_rctl |= RCTL_LPE;
   5728 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5729 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5730 	}
   5731 
   5732 	if (MCLBYTES == 2048) {
   5733 		sc->sc_rctl |= RCTL_2k;
   5734 	} else {
   5735 		if (sc->sc_type >= WM_T_82543) {
   5736 			switch (MCLBYTES) {
   5737 			case 4096:
   5738 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5739 				break;
   5740 			case 8192:
   5741 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5742 				break;
   5743 			case 16384:
   5744 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5745 				break;
   5746 			default:
   5747 				panic("wm_init: MCLBYTES %d unsupported",
   5748 				    MCLBYTES);
   5749 				break;
   5750 			}
   5751 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   5752 	}
   5753 
   5754 	/* Enable ECC */
   5755 	switch (sc->sc_type) {
   5756 	case WM_T_82571:
   5757 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5758 		reg |= PBA_ECC_CORR_EN;
   5759 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5760 		break;
   5761 	case WM_T_PCH_LPT:
   5762 	case WM_T_PCH_SPT:
   5763 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5764 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5765 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5766 
   5767 		sc->sc_ctrl |= CTRL_MEHE;
   5768 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5769 		break;
   5770 	default:
   5771 		break;
   5772 	}
   5773 
   5774 	/* On 575 and later set RDT only if RX enabled */
   5775 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5776 		int qidx;
   5777 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5778 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5779 			for (i = 0; i < WM_NRXDESC; i++) {
   5780 				mutex_enter(rxq->rxq_lock);
   5781 				wm_init_rxdesc(rxq, i);
   5782 				mutex_exit(rxq->rxq_lock);
   5783 
   5784 			}
   5785 		}
   5786 	}
   5787 
   5788 	/* Set the receive filter. */
   5789 	wm_set_filter(sc);
   5790 
   5791 	wm_turnon(sc);
   5792 
   5793 	/* Start the one second link check clock. */
   5794 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5795 
   5796 	/* ...all done! */
   5797 	ifp->if_flags |= IFF_RUNNING;
   5798 	ifp->if_flags &= ~IFF_OACTIVE;
   5799 
   5800  out:
   5801 	sc->sc_if_flags = ifp->if_flags;
   5802 	if (error)
   5803 		log(LOG_ERR, "%s: interface not running\n",
   5804 		    device_xname(sc->sc_dev));
   5805 	return error;
   5806 }
   5807 
   5808 /*
   5809  * wm_stop:		[ifnet interface function]
   5810  *
   5811  *	Stop transmission on the interface.
   5812  */
   5813 static void
   5814 wm_stop(struct ifnet *ifp, int disable)
   5815 {
   5816 	struct wm_softc *sc = ifp->if_softc;
   5817 
   5818 	WM_CORE_LOCK(sc);
   5819 	wm_stop_locked(ifp, disable);
   5820 	WM_CORE_UNLOCK(sc);
   5821 }
   5822 
   5823 static void
   5824 wm_stop_locked(struct ifnet *ifp, int disable)
   5825 {
   5826 	struct wm_softc *sc = ifp->if_softc;
   5827 	struct wm_txsoft *txs;
   5828 	int i, qidx;
   5829 
   5830 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5831 		device_xname(sc->sc_dev), __func__));
   5832 	KASSERT(WM_CORE_LOCKED(sc));
   5833 
   5834 	wm_turnoff(sc);
   5835 
   5836 	/* Stop the one second clock. */
   5837 	callout_stop(&sc->sc_tick_ch);
   5838 
   5839 	/* Stop the 82547 Tx FIFO stall check timer. */
   5840 	if (sc->sc_type == WM_T_82547)
   5841 		callout_stop(&sc->sc_txfifo_ch);
   5842 
   5843 	if (sc->sc_flags & WM_F_HAS_MII) {
   5844 		/* Down the MII. */
   5845 		mii_down(&sc->sc_mii);
   5846 	} else {
   5847 #if 0
   5848 		/* Should we clear PHY's status properly? */
   5849 		wm_reset(sc);
   5850 #endif
   5851 	}
   5852 
   5853 	/* Stop the transmit and receive processes. */
   5854 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5855 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5856 	sc->sc_rctl &= ~RCTL_EN;
   5857 
   5858 	/*
   5859 	 * Clear the interrupt mask to ensure the device cannot assert its
   5860 	 * interrupt line.
   5861 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5862 	 * service any currently pending or shared interrupt.
   5863 	 */
   5864 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5865 	sc->sc_icr = 0;
   5866 	if (wm_is_using_msix(sc)) {
   5867 		if (sc->sc_type != WM_T_82574) {
   5868 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5869 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5870 		} else
   5871 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5872 	}
   5873 
   5874 	/* Release any queued transmit buffers. */
   5875 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5876 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5877 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5878 		mutex_enter(txq->txq_lock);
   5879 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5880 			txs = &txq->txq_soft[i];
   5881 			if (txs->txs_mbuf != NULL) {
   5882 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   5883 				m_freem(txs->txs_mbuf);
   5884 				txs->txs_mbuf = NULL;
   5885 			}
   5886 		}
   5887 		mutex_exit(txq->txq_lock);
   5888 	}
   5889 
   5890 	/* Mark the interface as down and cancel the watchdog timer. */
   5891 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5892 	ifp->if_timer = 0;
   5893 
   5894 	if (disable) {
   5895 		for (i = 0; i < sc->sc_nqueues; i++) {
   5896 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5897 			mutex_enter(rxq->rxq_lock);
   5898 			wm_rxdrain(rxq);
   5899 			mutex_exit(rxq->rxq_lock);
   5900 		}
   5901 	}
   5902 
   5903 #if 0 /* notyet */
   5904 	if (sc->sc_type >= WM_T_82544)
   5905 		CSR_WRITE(sc, WMREG_WUC, 0);
   5906 #endif
   5907 }
   5908 
   5909 static void
   5910 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   5911 {
   5912 	struct mbuf *m;
   5913 	int i;
   5914 
   5915 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   5916 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   5917 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   5918 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   5919 		    m->m_data, m->m_len, m->m_flags);
   5920 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   5921 	    i, i == 1 ? "" : "s");
   5922 }
   5923 
   5924 /*
   5925  * wm_82547_txfifo_stall:
   5926  *
   5927  *	Callout used to wait for the 82547 Tx FIFO to drain,
   5928  *	reset the FIFO pointers, and restart packet transmission.
   5929  */
   5930 static void
   5931 wm_82547_txfifo_stall(void *arg)
   5932 {
   5933 	struct wm_softc *sc = arg;
   5934 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5935 
   5936 	mutex_enter(txq->txq_lock);
   5937 
   5938 	if (txq->txq_stopping)
   5939 		goto out;
   5940 
   5941 	if (txq->txq_fifo_stall) {
   5942 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   5943 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   5944 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   5945 			/*
   5946 			 * Packets have drained.  Stop transmitter, reset
   5947 			 * FIFO pointers, restart transmitter, and kick
   5948 			 * the packet queue.
   5949 			 */
   5950 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   5951 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   5952 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   5953 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   5954 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   5955 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   5956 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   5957 			CSR_WRITE_FLUSH(sc);
   5958 
   5959 			txq->txq_fifo_head = 0;
   5960 			txq->txq_fifo_stall = 0;
   5961 			wm_start_locked(&sc->sc_ethercom.ec_if);
   5962 		} else {
   5963 			/*
   5964 			 * Still waiting for packets to drain; try again in
   5965 			 * another tick.
   5966 			 */
   5967 			callout_schedule(&sc->sc_txfifo_ch, 1);
   5968 		}
   5969 	}
   5970 
   5971 out:
   5972 	mutex_exit(txq->txq_lock);
   5973 }
   5974 
   5975 /*
   5976  * wm_82547_txfifo_bugchk:
   5977  *
   5978  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   5979  *	prevent enqueueing a packet that would wrap around the end
   5980  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   5981  *
   5982  *	We do this by checking the amount of space before the end
   5983  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   5984  *	the Tx FIFO, wait for all remaining packets to drain, reset
   5985  *	the internal FIFO pointers to the beginning, and restart
   5986  *	transmission on the interface.
   5987  */
   5988 #define	WM_FIFO_HDR		0x10
   5989 #define	WM_82547_PAD_LEN	0x3e0
   5990 static int
   5991 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   5992 {
   5993 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5994 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   5995 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   5996 
   5997 	/* Just return if already stalled. */
   5998 	if (txq->txq_fifo_stall)
   5999 		return 1;
   6000 
   6001 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6002 		/* Stall only occurs in half-duplex mode. */
   6003 		goto send_packet;
   6004 	}
   6005 
   6006 	if (len >= WM_82547_PAD_LEN + space) {
   6007 		txq->txq_fifo_stall = 1;
   6008 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6009 		return 1;
   6010 	}
   6011 
   6012  send_packet:
   6013 	txq->txq_fifo_head += len;
   6014 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6015 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6016 
   6017 	return 0;
   6018 }
   6019 
   6020 static int
   6021 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6022 {
   6023 	int error;
   6024 
   6025 	/*
   6026 	 * Allocate the control data structures, and create and load the
   6027 	 * DMA map for it.
   6028 	 *
   6029 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6030 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6031 	 * both sets within the same 4G segment.
   6032 	 */
   6033 	if (sc->sc_type < WM_T_82544)
   6034 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6035 	else
   6036 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6037 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6038 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6039 	else
   6040 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6041 
   6042 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6043 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6044 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6045 		aprint_error_dev(sc->sc_dev,
   6046 		    "unable to allocate TX control data, error = %d\n",
   6047 		    error);
   6048 		goto fail_0;
   6049 	}
   6050 
   6051 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6052 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6053 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6054 		aprint_error_dev(sc->sc_dev,
   6055 		    "unable to map TX control data, error = %d\n", error);
   6056 		goto fail_1;
   6057 	}
   6058 
   6059 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6060 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6061 		aprint_error_dev(sc->sc_dev,
   6062 		    "unable to create TX control data DMA map, error = %d\n",
   6063 		    error);
   6064 		goto fail_2;
   6065 	}
   6066 
   6067 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6068 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6069 		aprint_error_dev(sc->sc_dev,
   6070 		    "unable to load TX control data DMA map, error = %d\n",
   6071 		    error);
   6072 		goto fail_3;
   6073 	}
   6074 
   6075 	return 0;
   6076 
   6077  fail_3:
   6078 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6079  fail_2:
   6080 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6081 	    WM_TXDESCS_SIZE(txq));
   6082  fail_1:
   6083 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6084  fail_0:
   6085 	return error;
   6086 }
   6087 
   6088 static void
   6089 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6090 {
   6091 
   6092 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6093 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6094 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6095 	    WM_TXDESCS_SIZE(txq));
   6096 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6097 }
   6098 
   6099 static int
   6100 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6101 {
   6102 	int error;
   6103 	size_t rxq_descs_size;
   6104 
   6105 	/*
   6106 	 * Allocate the control data structures, and create and load the
   6107 	 * DMA map for it.
   6108 	 *
   6109 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6110 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6111 	 * both sets within the same 4G segment.
   6112 	 */
   6113 	rxq->rxq_ndesc = WM_NRXDESC;
   6114 	if (sc->sc_type == WM_T_82574)
   6115 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6116 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6117 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6118 	else
   6119 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6120 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6121 
   6122 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6123 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6124 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6125 		aprint_error_dev(sc->sc_dev,
   6126 		    "unable to allocate RX control data, error = %d\n",
   6127 		    error);
   6128 		goto fail_0;
   6129 	}
   6130 
   6131 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6132 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6133 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6134 		aprint_error_dev(sc->sc_dev,
   6135 		    "unable to map RX control data, error = %d\n", error);
   6136 		goto fail_1;
   6137 	}
   6138 
   6139 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6140 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6141 		aprint_error_dev(sc->sc_dev,
   6142 		    "unable to create RX control data DMA map, error = %d\n",
   6143 		    error);
   6144 		goto fail_2;
   6145 	}
   6146 
   6147 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6148 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6149 		aprint_error_dev(sc->sc_dev,
   6150 		    "unable to load RX control data DMA map, error = %d\n",
   6151 		    error);
   6152 		goto fail_3;
   6153 	}
   6154 
   6155 	return 0;
   6156 
   6157  fail_3:
   6158 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6159  fail_2:
   6160 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6161 	    rxq_descs_size);
   6162  fail_1:
   6163 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6164  fail_0:
   6165 	return error;
   6166 }
   6167 
   6168 static void
   6169 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6170 {
   6171 
   6172 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6173 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6174 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6175 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6176 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6177 }
   6178 
   6179 
   6180 static int
   6181 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6182 {
   6183 	int i, error;
   6184 
   6185 	/* Create the transmit buffer DMA maps. */
   6186 	WM_TXQUEUELEN(txq) =
   6187 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6188 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6189 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6190 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6191 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6192 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6193 			aprint_error_dev(sc->sc_dev,
   6194 			    "unable to create Tx DMA map %d, error = %d\n",
   6195 			    i, error);
   6196 			goto fail;
   6197 		}
   6198 	}
   6199 
   6200 	return 0;
   6201 
   6202  fail:
   6203 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6204 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6205 			bus_dmamap_destroy(sc->sc_dmat,
   6206 			    txq->txq_soft[i].txs_dmamap);
   6207 	}
   6208 	return error;
   6209 }
   6210 
   6211 static void
   6212 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6213 {
   6214 	int i;
   6215 
   6216 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6217 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6218 			bus_dmamap_destroy(sc->sc_dmat,
   6219 			    txq->txq_soft[i].txs_dmamap);
   6220 	}
   6221 }
   6222 
   6223 static int
   6224 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6225 {
   6226 	int i, error;
   6227 
   6228 	/* Create the receive buffer DMA maps. */
   6229 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6230 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6231 			    MCLBYTES, 0, 0,
   6232 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6233 			aprint_error_dev(sc->sc_dev,
   6234 			    "unable to create Rx DMA map %d error = %d\n",
   6235 			    i, error);
   6236 			goto fail;
   6237 		}
   6238 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6239 	}
   6240 
   6241 	return 0;
   6242 
   6243  fail:
   6244 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6245 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6246 			bus_dmamap_destroy(sc->sc_dmat,
   6247 			    rxq->rxq_soft[i].rxs_dmamap);
   6248 	}
   6249 	return error;
   6250 }
   6251 
   6252 static void
   6253 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6254 {
   6255 	int i;
   6256 
   6257 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6258 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6259 			bus_dmamap_destroy(sc->sc_dmat,
   6260 			    rxq->rxq_soft[i].rxs_dmamap);
   6261 	}
   6262 }
   6263 
   6264 /*
   6265  * wm_alloc_quques:
   6266  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6267  */
   6268 static int
   6269 wm_alloc_txrx_queues(struct wm_softc *sc)
   6270 {
   6271 	int i, error, tx_done, rx_done;
   6272 
   6273 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6274 	    KM_SLEEP);
   6275 	if (sc->sc_queue == NULL) {
   6276 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6277 		error = ENOMEM;
   6278 		goto fail_0;
   6279 	}
   6280 
   6281 	/*
   6282 	 * For transmission
   6283 	 */
   6284 	error = 0;
   6285 	tx_done = 0;
   6286 	for (i = 0; i < sc->sc_nqueues; i++) {
   6287 #ifdef WM_EVENT_COUNTERS
   6288 		int j;
   6289 		const char *xname;
   6290 #endif
   6291 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6292 		txq->txq_sc = sc;
   6293 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6294 
   6295 		error = wm_alloc_tx_descs(sc, txq);
   6296 		if (error)
   6297 			break;
   6298 		error = wm_alloc_tx_buffer(sc, txq);
   6299 		if (error) {
   6300 			wm_free_tx_descs(sc, txq);
   6301 			break;
   6302 		}
   6303 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6304 		if (txq->txq_interq == NULL) {
   6305 			wm_free_tx_descs(sc, txq);
   6306 			wm_free_tx_buffer(sc, txq);
   6307 			error = ENOMEM;
   6308 			break;
   6309 		}
   6310 
   6311 #ifdef WM_EVENT_COUNTERS
   6312 		xname = device_xname(sc->sc_dev);
   6313 
   6314 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6315 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6316 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
   6317 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6318 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6319 
   6320 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
   6321 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
   6322 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
   6323 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
   6324 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
   6325 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
   6326 
   6327 		for (j = 0; j < WM_NTXSEGS; j++) {
   6328 			snprintf(txq->txq_txseg_evcnt_names[j],
   6329 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6330 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6331 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6332 		}
   6333 
   6334 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
   6335 
   6336 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
   6337 #endif /* WM_EVENT_COUNTERS */
   6338 
   6339 		tx_done++;
   6340 	}
   6341 	if (error)
   6342 		goto fail_1;
   6343 
   6344 	/*
   6345 	 * For recieve
   6346 	 */
   6347 	error = 0;
   6348 	rx_done = 0;
   6349 	for (i = 0; i < sc->sc_nqueues; i++) {
   6350 #ifdef WM_EVENT_COUNTERS
   6351 		const char *xname;
   6352 #endif
   6353 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6354 		rxq->rxq_sc = sc;
   6355 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6356 
   6357 		error = wm_alloc_rx_descs(sc, rxq);
   6358 		if (error)
   6359 			break;
   6360 
   6361 		error = wm_alloc_rx_buffer(sc, rxq);
   6362 		if (error) {
   6363 			wm_free_rx_descs(sc, rxq);
   6364 			break;
   6365 		}
   6366 
   6367 #ifdef WM_EVENT_COUNTERS
   6368 		xname = device_xname(sc->sc_dev);
   6369 
   6370 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
   6371 
   6372 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
   6373 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
   6374 #endif /* WM_EVENT_COUNTERS */
   6375 
   6376 		rx_done++;
   6377 	}
   6378 	if (error)
   6379 		goto fail_2;
   6380 
   6381 	return 0;
   6382 
   6383  fail_2:
   6384 	for (i = 0; i < rx_done; i++) {
   6385 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6386 		wm_free_rx_buffer(sc, rxq);
   6387 		wm_free_rx_descs(sc, rxq);
   6388 		if (rxq->rxq_lock)
   6389 			mutex_obj_free(rxq->rxq_lock);
   6390 	}
   6391  fail_1:
   6392 	for (i = 0; i < tx_done; i++) {
   6393 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6394 		pcq_destroy(txq->txq_interq);
   6395 		wm_free_tx_buffer(sc, txq);
   6396 		wm_free_tx_descs(sc, txq);
   6397 		if (txq->txq_lock)
   6398 			mutex_obj_free(txq->txq_lock);
   6399 	}
   6400 
   6401 	kmem_free(sc->sc_queue,
   6402 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6403  fail_0:
   6404 	return error;
   6405 }
   6406 
   6407 /*
   6408  * wm_free_quques:
   6409  *	Free {tx,rx}descs and {tx,rx} buffers
   6410  */
   6411 static void
   6412 wm_free_txrx_queues(struct wm_softc *sc)
   6413 {
   6414 	int i;
   6415 
   6416 	for (i = 0; i < sc->sc_nqueues; i++) {
   6417 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6418 
   6419 #ifdef WM_EVENT_COUNTERS
   6420 		WM_Q_EVCNT_DETACH(rxq, rxintr, rxq, i);
   6421 		WM_Q_EVCNT_DETACH(rxq, rxipsum, rxq, i);
   6422 		WM_Q_EVCNT_DETACH(rxq, rxtusum, rxq, i);
   6423 #endif /* WM_EVENT_COUNTERS */
   6424 
   6425 		wm_free_rx_buffer(sc, rxq);
   6426 		wm_free_rx_descs(sc, rxq);
   6427 		if (rxq->rxq_lock)
   6428 			mutex_obj_free(rxq->rxq_lock);
   6429 	}
   6430 
   6431 	for (i = 0; i < sc->sc_nqueues; i++) {
   6432 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6433 		struct mbuf *m;
   6434 #ifdef WM_EVENT_COUNTERS
   6435 		int j;
   6436 
   6437 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6438 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6439 		WM_Q_EVCNT_DETACH(txq, txfifo_stall, txq, i);
   6440 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6441 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6442 		WM_Q_EVCNT_DETACH(txq, txipsum, txq, i);
   6443 		WM_Q_EVCNT_DETACH(txq, txtusum, txq, i);
   6444 		WM_Q_EVCNT_DETACH(txq, txtusum6, txq, i);
   6445 		WM_Q_EVCNT_DETACH(txq, txtso, txq, i);
   6446 		WM_Q_EVCNT_DETACH(txq, txtso6, txq, i);
   6447 		WM_Q_EVCNT_DETACH(txq, txtsopain, txq, i);
   6448 
   6449 		for (j = 0; j < WM_NTXSEGS; j++)
   6450 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6451 
   6452 		WM_Q_EVCNT_DETACH(txq, txdrop, txq, i);
   6453 		WM_Q_EVCNT_DETACH(txq, tu, txq, i);
   6454 #endif /* WM_EVENT_COUNTERS */
   6455 
   6456 		/* drain txq_interq */
   6457 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6458 			m_freem(m);
   6459 		pcq_destroy(txq->txq_interq);
   6460 
   6461 		wm_free_tx_buffer(sc, txq);
   6462 		wm_free_tx_descs(sc, txq);
   6463 		if (txq->txq_lock)
   6464 			mutex_obj_free(txq->txq_lock);
   6465 	}
   6466 
   6467 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6468 }
   6469 
   6470 static void
   6471 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6472 {
   6473 
   6474 	KASSERT(mutex_owned(txq->txq_lock));
   6475 
   6476 	/* Initialize the transmit descriptor ring. */
   6477 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6478 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6479 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6480 	txq->txq_free = WM_NTXDESC(txq);
   6481 	txq->txq_next = 0;
   6482 }
   6483 
   6484 static void
   6485 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6486     struct wm_txqueue *txq)
   6487 {
   6488 
   6489 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6490 		device_xname(sc->sc_dev), __func__));
   6491 	KASSERT(mutex_owned(txq->txq_lock));
   6492 
   6493 	if (sc->sc_type < WM_T_82543) {
   6494 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6495 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6496 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6497 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6498 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6499 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6500 	} else {
   6501 		int qid = wmq->wmq_id;
   6502 
   6503 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6504 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6505 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6506 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6507 
   6508 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6509 			/*
   6510 			 * Don't write TDT before TCTL.EN is set.
   6511 			 * See the document.
   6512 			 */
   6513 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6514 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6515 			    | TXDCTL_WTHRESH(0));
   6516 		else {
   6517 			/* XXX should update with AIM? */
   6518 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6519 			if (sc->sc_type >= WM_T_82540) {
   6520 				/* should be same */
   6521 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6522 			}
   6523 
   6524 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6525 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6526 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6527 		}
   6528 	}
   6529 }
   6530 
   6531 static void
   6532 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6533 {
   6534 	int i;
   6535 
   6536 	KASSERT(mutex_owned(txq->txq_lock));
   6537 
   6538 	/* Initialize the transmit job descriptors. */
   6539 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6540 		txq->txq_soft[i].txs_mbuf = NULL;
   6541 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6542 	txq->txq_snext = 0;
   6543 	txq->txq_sdirty = 0;
   6544 }
   6545 
   6546 static void
   6547 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6548     struct wm_txqueue *txq)
   6549 {
   6550 
   6551 	KASSERT(mutex_owned(txq->txq_lock));
   6552 
   6553 	/*
   6554 	 * Set up some register offsets that are different between
   6555 	 * the i82542 and the i82543 and later chips.
   6556 	 */
   6557 	if (sc->sc_type < WM_T_82543)
   6558 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   6559 	else
   6560 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   6561 
   6562 	wm_init_tx_descs(sc, txq);
   6563 	wm_init_tx_regs(sc, wmq, txq);
   6564 	wm_init_tx_buffer(sc, txq);
   6565 }
   6566 
   6567 static void
   6568 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6569     struct wm_rxqueue *rxq)
   6570 {
   6571 
   6572 	KASSERT(mutex_owned(rxq->rxq_lock));
   6573 
   6574 	/*
   6575 	 * Initialize the receive descriptor and receive job
   6576 	 * descriptor rings.
   6577 	 */
   6578 	if (sc->sc_type < WM_T_82543) {
   6579 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   6580 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   6581 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   6582 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6583 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   6584 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   6585 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   6586 
   6587 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   6588 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   6589 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   6590 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   6591 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   6592 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   6593 	} else {
   6594 		int qid = wmq->wmq_id;
   6595 
   6596 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   6597 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   6598 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_descsize * rxq->rxq_ndesc);
   6599 
   6600 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6601 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   6602 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   6603 
   6604 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   6605 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   6606 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   6607 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   6608 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   6609 			    | RXDCTL_WTHRESH(1));
   6610 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6611 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6612 		} else {
   6613 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6614 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6615 			/* XXX should update with AIM? */
   6616 			CSR_WRITE(sc, WMREG_RDTR, (wmq->wmq_itr / 4) | RDTR_FPD);
   6617 			/* MUST be same */
   6618 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   6619 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   6620 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   6621 		}
   6622 	}
   6623 }
   6624 
   6625 static int
   6626 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6627 {
   6628 	struct wm_rxsoft *rxs;
   6629 	int error, i;
   6630 
   6631 	KASSERT(mutex_owned(rxq->rxq_lock));
   6632 
   6633 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6634 		rxs = &rxq->rxq_soft[i];
   6635 		if (rxs->rxs_mbuf == NULL) {
   6636 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   6637 				log(LOG_ERR, "%s: unable to allocate or map "
   6638 				    "rx buffer %d, error = %d\n",
   6639 				    device_xname(sc->sc_dev), i, error);
   6640 				/*
   6641 				 * XXX Should attempt to run with fewer receive
   6642 				 * XXX buffers instead of just failing.
   6643 				 */
   6644 				wm_rxdrain(rxq);
   6645 				return ENOMEM;
   6646 			}
   6647 		} else {
   6648 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   6649 				wm_init_rxdesc(rxq, i);
   6650 			/*
   6651 			 * For 82575 and newer device, the RX descriptors
   6652 			 * must be initialized after the setting of RCTL.EN in
   6653 			 * wm_set_filter()
   6654 			 */
   6655 		}
   6656 	}
   6657 	rxq->rxq_ptr = 0;
   6658 	rxq->rxq_discard = 0;
   6659 	WM_RXCHAIN_RESET(rxq);
   6660 
   6661 	return 0;
   6662 }
   6663 
   6664 static int
   6665 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6666     struct wm_rxqueue *rxq)
   6667 {
   6668 
   6669 	KASSERT(mutex_owned(rxq->rxq_lock));
   6670 
   6671 	/*
   6672 	 * Set up some register offsets that are different between
   6673 	 * the i82542 and the i82543 and later chips.
   6674 	 */
   6675 	if (sc->sc_type < WM_T_82543)
   6676 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   6677 	else
   6678 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   6679 
   6680 	wm_init_rx_regs(sc, wmq, rxq);
   6681 	return wm_init_rx_buffer(sc, rxq);
   6682 }
   6683 
   6684 /*
   6685  * wm_init_quques:
   6686  *	Initialize {tx,rx}descs and {tx,rx} buffers
   6687  */
   6688 static int
   6689 wm_init_txrx_queues(struct wm_softc *sc)
   6690 {
   6691 	int i, error = 0;
   6692 
   6693 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6694 		device_xname(sc->sc_dev), __func__));
   6695 
   6696 	for (i = 0; i < sc->sc_nqueues; i++) {
   6697 		struct wm_queue *wmq = &sc->sc_queue[i];
   6698 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6699 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6700 
   6701 		/*
   6702 		 * TODO
   6703 		 * Currently, use constant variable instead of AIM.
   6704 		 * Furthermore, the interrupt interval of multiqueue which use
   6705 		 * polling mode is less than default value.
   6706 		 * More tuning and AIM are required.
   6707 		 */
   6708 		if (wm_is_using_multiqueue(sc))
   6709 			wmq->wmq_itr = 50;
   6710 		else
   6711 			wmq->wmq_itr = sc->sc_itr_init;
   6712 		wmq->wmq_set_itr = true;
   6713 
   6714 		mutex_enter(txq->txq_lock);
   6715 		wm_init_tx_queue(sc, wmq, txq);
   6716 		mutex_exit(txq->txq_lock);
   6717 
   6718 		mutex_enter(rxq->rxq_lock);
   6719 		error = wm_init_rx_queue(sc, wmq, rxq);
   6720 		mutex_exit(rxq->rxq_lock);
   6721 		if (error)
   6722 			break;
   6723 	}
   6724 
   6725 	return error;
   6726 }
   6727 
   6728 /*
   6729  * wm_tx_offload:
   6730  *
   6731  *	Set up TCP/IP checksumming parameters for the
   6732  *	specified packet.
   6733  */
   6734 static int
   6735 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   6736     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   6737 {
   6738 	struct mbuf *m0 = txs->txs_mbuf;
   6739 	struct livengood_tcpip_ctxdesc *t;
   6740 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   6741 	uint32_t ipcse;
   6742 	struct ether_header *eh;
   6743 	int offset, iphl;
   6744 	uint8_t fields;
   6745 
   6746 	/*
   6747 	 * XXX It would be nice if the mbuf pkthdr had offset
   6748 	 * fields for the protocol headers.
   6749 	 */
   6750 
   6751 	eh = mtod(m0, struct ether_header *);
   6752 	switch (htons(eh->ether_type)) {
   6753 	case ETHERTYPE_IP:
   6754 	case ETHERTYPE_IPV6:
   6755 		offset = ETHER_HDR_LEN;
   6756 		break;
   6757 
   6758 	case ETHERTYPE_VLAN:
   6759 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6760 		break;
   6761 
   6762 	default:
   6763 		/*
   6764 		 * Don't support this protocol or encapsulation.
   6765 		 */
   6766 		*fieldsp = 0;
   6767 		*cmdp = 0;
   6768 		return 0;
   6769 	}
   6770 
   6771 	if ((m0->m_pkthdr.csum_flags &
   6772 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6773 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6774 	} else {
   6775 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6776 	}
   6777 	ipcse = offset + iphl - 1;
   6778 
   6779 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   6780 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   6781 	seg = 0;
   6782 	fields = 0;
   6783 
   6784 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6785 		int hlen = offset + iphl;
   6786 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6787 
   6788 		if (__predict_false(m0->m_len <
   6789 				    (hlen + sizeof(struct tcphdr)))) {
   6790 			/*
   6791 			 * TCP/IP headers are not in the first mbuf; we need
   6792 			 * to do this the slow and painful way.  Let's just
   6793 			 * hope this doesn't happen very often.
   6794 			 */
   6795 			struct tcphdr th;
   6796 
   6797 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6798 
   6799 			m_copydata(m0, hlen, sizeof(th), &th);
   6800 			if (v4) {
   6801 				struct ip ip;
   6802 
   6803 				m_copydata(m0, offset, sizeof(ip), &ip);
   6804 				ip.ip_len = 0;
   6805 				m_copyback(m0,
   6806 				    offset + offsetof(struct ip, ip_len),
   6807 				    sizeof(ip.ip_len), &ip.ip_len);
   6808 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6809 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6810 			} else {
   6811 				struct ip6_hdr ip6;
   6812 
   6813 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6814 				ip6.ip6_plen = 0;
   6815 				m_copyback(m0,
   6816 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6817 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6818 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6819 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6820 			}
   6821 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6822 			    sizeof(th.th_sum), &th.th_sum);
   6823 
   6824 			hlen += th.th_off << 2;
   6825 		} else {
   6826 			/*
   6827 			 * TCP/IP headers are in the first mbuf; we can do
   6828 			 * this the easy way.
   6829 			 */
   6830 			struct tcphdr *th;
   6831 
   6832 			if (v4) {
   6833 				struct ip *ip =
   6834 				    (void *)(mtod(m0, char *) + offset);
   6835 				th = (void *)(mtod(m0, char *) + hlen);
   6836 
   6837 				ip->ip_len = 0;
   6838 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6839 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6840 			} else {
   6841 				struct ip6_hdr *ip6 =
   6842 				    (void *)(mtod(m0, char *) + offset);
   6843 				th = (void *)(mtod(m0, char *) + hlen);
   6844 
   6845 				ip6->ip6_plen = 0;
   6846 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6847 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6848 			}
   6849 			hlen += th->th_off << 2;
   6850 		}
   6851 
   6852 		if (v4) {
   6853 			WM_Q_EVCNT_INCR(txq, txtso);
   6854 			cmdlen |= WTX_TCPIP_CMD_IP;
   6855 		} else {
   6856 			WM_Q_EVCNT_INCR(txq, txtso6);
   6857 			ipcse = 0;
   6858 		}
   6859 		cmd |= WTX_TCPIP_CMD_TSE;
   6860 		cmdlen |= WTX_TCPIP_CMD_TSE |
   6861 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   6862 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   6863 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   6864 	}
   6865 
   6866 	/*
   6867 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   6868 	 * offload feature, if we load the context descriptor, we
   6869 	 * MUST provide valid values for IPCSS and TUCSS fields.
   6870 	 */
   6871 
   6872 	ipcs = WTX_TCPIP_IPCSS(offset) |
   6873 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   6874 	    WTX_TCPIP_IPCSE(ipcse);
   6875 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   6876 		WM_Q_EVCNT_INCR(txq, txipsum);
   6877 		fields |= WTX_IXSM;
   6878 	}
   6879 
   6880 	offset += iphl;
   6881 
   6882 	if (m0->m_pkthdr.csum_flags &
   6883 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   6884 		WM_Q_EVCNT_INCR(txq, txtusum);
   6885 		fields |= WTX_TXSM;
   6886 		tucs = WTX_TCPIP_TUCSS(offset) |
   6887 		    WTX_TCPIP_TUCSO(offset +
   6888 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   6889 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6890 	} else if ((m0->m_pkthdr.csum_flags &
   6891 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   6892 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6893 		fields |= WTX_TXSM;
   6894 		tucs = WTX_TCPIP_TUCSS(offset) |
   6895 		    WTX_TCPIP_TUCSO(offset +
   6896 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   6897 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6898 	} else {
   6899 		/* Just initialize it to a valid TCP context. */
   6900 		tucs = WTX_TCPIP_TUCSS(offset) |
   6901 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   6902 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6903 	}
   6904 
   6905 	/*
   6906 	 * We don't have to write context descriptor for every packet
   6907 	 * except for 82574. For 82574, we must write context descriptor
   6908 	 * for every packet when we use two descriptor queues.
   6909 	 * It would be overhead to write context descriptor for every packet,
   6910 	 * however it does not cause problems.
   6911 	 */
   6912 	/* Fill in the context descriptor. */
   6913 	t = (struct livengood_tcpip_ctxdesc *)
   6914 	    &txq->txq_descs[txq->txq_next];
   6915 	t->tcpip_ipcs = htole32(ipcs);
   6916 	t->tcpip_tucs = htole32(tucs);
   6917 	t->tcpip_cmdlen = htole32(cmdlen);
   6918 	t->tcpip_seg = htole32(seg);
   6919 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6920 
   6921 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6922 	txs->txs_ndesc++;
   6923 
   6924 	*cmdp = cmd;
   6925 	*fieldsp = fields;
   6926 
   6927 	return 0;
   6928 }
   6929 
   6930 static inline int
   6931 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   6932 {
   6933 	struct wm_softc *sc = ifp->if_softc;
   6934 	u_int cpuid = cpu_index(curcpu());
   6935 
   6936 	/*
   6937 	 * Currently, simple distribute strategy.
   6938 	 * TODO:
   6939 	 * distribute by flowid(RSS has value).
   6940 	 */
   6941         return (cpuid + ncpu - sc->sc_affinity_offset) % sc->sc_nqueues;
   6942 }
   6943 
   6944 /*
   6945  * wm_start:		[ifnet interface function]
   6946  *
   6947  *	Start packet transmission on the interface.
   6948  */
   6949 static void
   6950 wm_start(struct ifnet *ifp)
   6951 {
   6952 	struct wm_softc *sc = ifp->if_softc;
   6953 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6954 
   6955 #ifdef WM_MPSAFE
   6956 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6957 #endif
   6958 	/*
   6959 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   6960 	 */
   6961 
   6962 	mutex_enter(txq->txq_lock);
   6963 	if (!txq->txq_stopping)
   6964 		wm_start_locked(ifp);
   6965 	mutex_exit(txq->txq_lock);
   6966 }
   6967 
   6968 static void
   6969 wm_start_locked(struct ifnet *ifp)
   6970 {
   6971 	struct wm_softc *sc = ifp->if_softc;
   6972 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6973 
   6974 	wm_send_common_locked(ifp, txq, false);
   6975 }
   6976 
   6977 static int
   6978 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   6979 {
   6980 	int qid;
   6981 	struct wm_softc *sc = ifp->if_softc;
   6982 	struct wm_txqueue *txq;
   6983 
   6984 	qid = wm_select_txqueue(ifp, m);
   6985 	txq = &sc->sc_queue[qid].wmq_txq;
   6986 
   6987 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   6988 		m_freem(m);
   6989 		WM_Q_EVCNT_INCR(txq, txdrop);
   6990 		return ENOBUFS;
   6991 	}
   6992 
   6993 	/*
   6994 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   6995 	 */
   6996 	ifp->if_obytes += m->m_pkthdr.len;
   6997 	if (m->m_flags & M_MCAST)
   6998 		ifp->if_omcasts++;
   6999 
   7000 	if (mutex_tryenter(txq->txq_lock)) {
   7001 		if (!txq->txq_stopping)
   7002 			wm_transmit_locked(ifp, txq);
   7003 		mutex_exit(txq->txq_lock);
   7004 	}
   7005 
   7006 	return 0;
   7007 }
   7008 
   7009 static void
   7010 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7011 {
   7012 
   7013 	wm_send_common_locked(ifp, txq, true);
   7014 }
   7015 
   7016 static void
   7017 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7018     bool is_transmit)
   7019 {
   7020 	struct wm_softc *sc = ifp->if_softc;
   7021 	struct mbuf *m0;
   7022 	struct m_tag *mtag;
   7023 	struct wm_txsoft *txs;
   7024 	bus_dmamap_t dmamap;
   7025 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7026 	bus_addr_t curaddr;
   7027 	bus_size_t seglen, curlen;
   7028 	uint32_t cksumcmd;
   7029 	uint8_t cksumfields;
   7030 
   7031 	KASSERT(mutex_owned(txq->txq_lock));
   7032 
   7033 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7034 		return;
   7035 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7036 		return;
   7037 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7038 		return;
   7039 
   7040 	/* Remember the previous number of free descriptors. */
   7041 	ofree = txq->txq_free;
   7042 
   7043 	/*
   7044 	 * Loop through the send queue, setting up transmit descriptors
   7045 	 * until we drain the queue, or use up all available transmit
   7046 	 * descriptors.
   7047 	 */
   7048 	for (;;) {
   7049 		m0 = NULL;
   7050 
   7051 		/* Get a work queue entry. */
   7052 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7053 			wm_txeof(sc, txq);
   7054 			if (txq->txq_sfree == 0) {
   7055 				DPRINTF(WM_DEBUG_TX,
   7056 				    ("%s: TX: no free job descriptors\n",
   7057 					device_xname(sc->sc_dev)));
   7058 				WM_Q_EVCNT_INCR(txq, txsstall);
   7059 				break;
   7060 			}
   7061 		}
   7062 
   7063 		/* Grab a packet off the queue. */
   7064 		if (is_transmit)
   7065 			m0 = pcq_get(txq->txq_interq);
   7066 		else
   7067 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7068 		if (m0 == NULL)
   7069 			break;
   7070 
   7071 		DPRINTF(WM_DEBUG_TX,
   7072 		    ("%s: TX: have packet to transmit: %p\n",
   7073 		    device_xname(sc->sc_dev), m0));
   7074 
   7075 		txs = &txq->txq_soft[txq->txq_snext];
   7076 		dmamap = txs->txs_dmamap;
   7077 
   7078 		use_tso = (m0->m_pkthdr.csum_flags &
   7079 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7080 
   7081 		/*
   7082 		 * So says the Linux driver:
   7083 		 * The controller does a simple calculation to make sure
   7084 		 * there is enough room in the FIFO before initiating the
   7085 		 * DMA for each buffer.  The calc is:
   7086 		 *	4 = ceil(buffer len / MSS)
   7087 		 * To make sure we don't overrun the FIFO, adjust the max
   7088 		 * buffer len if the MSS drops.
   7089 		 */
   7090 		dmamap->dm_maxsegsz =
   7091 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7092 		    ? m0->m_pkthdr.segsz << 2
   7093 		    : WTX_MAX_LEN;
   7094 
   7095 		/*
   7096 		 * Load the DMA map.  If this fails, the packet either
   7097 		 * didn't fit in the allotted number of segments, or we
   7098 		 * were short on resources.  For the too-many-segments
   7099 		 * case, we simply report an error and drop the packet,
   7100 		 * since we can't sanely copy a jumbo packet to a single
   7101 		 * buffer.
   7102 		 */
   7103 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7104 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7105 		if (error) {
   7106 			if (error == EFBIG) {
   7107 				WM_Q_EVCNT_INCR(txq, txdrop);
   7108 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7109 				    "DMA segments, dropping...\n",
   7110 				    device_xname(sc->sc_dev));
   7111 				wm_dump_mbuf_chain(sc, m0);
   7112 				m_freem(m0);
   7113 				continue;
   7114 			}
   7115 			/*  Short on resources, just stop for now. */
   7116 			DPRINTF(WM_DEBUG_TX,
   7117 			    ("%s: TX: dmamap load failed: %d\n",
   7118 			    device_xname(sc->sc_dev), error));
   7119 			break;
   7120 		}
   7121 
   7122 		segs_needed = dmamap->dm_nsegs;
   7123 		if (use_tso) {
   7124 			/* For sentinel descriptor; see below. */
   7125 			segs_needed++;
   7126 		}
   7127 
   7128 		/*
   7129 		 * Ensure we have enough descriptors free to describe
   7130 		 * the packet.  Note, we always reserve one descriptor
   7131 		 * at the end of the ring due to the semantics of the
   7132 		 * TDT register, plus one more in the event we need
   7133 		 * to load offload context.
   7134 		 */
   7135 		if (segs_needed > txq->txq_free - 2) {
   7136 			/*
   7137 			 * Not enough free descriptors to transmit this
   7138 			 * packet.  We haven't committed anything yet,
   7139 			 * so just unload the DMA map, put the packet
   7140 			 * pack on the queue, and punt.  Notify the upper
   7141 			 * layer that there are no more slots left.
   7142 			 */
   7143 			DPRINTF(WM_DEBUG_TX,
   7144 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7145 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7146 			    segs_needed, txq->txq_free - 1));
   7147 			if (!is_transmit)
   7148 				ifp->if_flags |= IFF_OACTIVE;
   7149 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7150 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7151 			WM_Q_EVCNT_INCR(txq, txdstall);
   7152 			break;
   7153 		}
   7154 
   7155 		/*
   7156 		 * Check for 82547 Tx FIFO bug.  We need to do this
   7157 		 * once we know we can transmit the packet, since we
   7158 		 * do some internal FIFO space accounting here.
   7159 		 */
   7160 		if (sc->sc_type == WM_T_82547 &&
   7161 		    wm_82547_txfifo_bugchk(sc, m0)) {
   7162 			DPRINTF(WM_DEBUG_TX,
   7163 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   7164 			    device_xname(sc->sc_dev)));
   7165 			if (!is_transmit)
   7166 				ifp->if_flags |= IFF_OACTIVE;
   7167 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7168 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7169 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
   7170 			break;
   7171 		}
   7172 
   7173 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7174 
   7175 		DPRINTF(WM_DEBUG_TX,
   7176 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7177 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7178 
   7179 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7180 
   7181 		/*
   7182 		 * Store a pointer to the packet so that we can free it
   7183 		 * later.
   7184 		 *
   7185 		 * Initially, we consider the number of descriptors the
   7186 		 * packet uses the number of DMA segments.  This may be
   7187 		 * incremented by 1 if we do checksum offload (a descriptor
   7188 		 * is used to set the checksum context).
   7189 		 */
   7190 		txs->txs_mbuf = m0;
   7191 		txs->txs_firstdesc = txq->txq_next;
   7192 		txs->txs_ndesc = segs_needed;
   7193 
   7194 		/* Set up offload parameters for this packet. */
   7195 		if (m0->m_pkthdr.csum_flags &
   7196 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7197 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7198 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7199 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   7200 					  &cksumfields) != 0) {
   7201 				/* Error message already displayed. */
   7202 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7203 				continue;
   7204 			}
   7205 		} else {
   7206 			cksumcmd = 0;
   7207 			cksumfields = 0;
   7208 		}
   7209 
   7210 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7211 
   7212 		/* Sync the DMA map. */
   7213 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7214 		    BUS_DMASYNC_PREWRITE);
   7215 
   7216 		/* Initialize the transmit descriptor. */
   7217 		for (nexttx = txq->txq_next, seg = 0;
   7218 		     seg < dmamap->dm_nsegs; seg++) {
   7219 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7220 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7221 			     seglen != 0;
   7222 			     curaddr += curlen, seglen -= curlen,
   7223 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7224 				curlen = seglen;
   7225 
   7226 				/*
   7227 				 * So says the Linux driver:
   7228 				 * Work around for premature descriptor
   7229 				 * write-backs in TSO mode.  Append a
   7230 				 * 4-byte sentinel descriptor.
   7231 				 */
   7232 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7233 				    curlen > 8)
   7234 					curlen -= 4;
   7235 
   7236 				wm_set_dma_addr(
   7237 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7238 				txq->txq_descs[nexttx].wtx_cmdlen
   7239 				    = htole32(cksumcmd | curlen);
   7240 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7241 				    = 0;
   7242 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7243 				    = cksumfields;
   7244 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7245 				lasttx = nexttx;
   7246 
   7247 				DPRINTF(WM_DEBUG_TX,
   7248 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7249 				     "len %#04zx\n",
   7250 				    device_xname(sc->sc_dev), nexttx,
   7251 				    (uint64_t)curaddr, curlen));
   7252 			}
   7253 		}
   7254 
   7255 		KASSERT(lasttx != -1);
   7256 
   7257 		/*
   7258 		 * Set up the command byte on the last descriptor of
   7259 		 * the packet.  If we're in the interrupt delay window,
   7260 		 * delay the interrupt.
   7261 		 */
   7262 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7263 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7264 
   7265 		/*
   7266 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7267 		 * up the descriptor to encapsulate the packet for us.
   7268 		 *
   7269 		 * This is only valid on the last descriptor of the packet.
   7270 		 */
   7271 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   7272 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7273 			    htole32(WTX_CMD_VLE);
   7274 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7275 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   7276 		}
   7277 
   7278 		txs->txs_lastdesc = lasttx;
   7279 
   7280 		DPRINTF(WM_DEBUG_TX,
   7281 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7282 		    device_xname(sc->sc_dev),
   7283 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7284 
   7285 		/* Sync the descriptors we're using. */
   7286 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7287 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7288 
   7289 		/* Give the packet to the chip. */
   7290 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7291 
   7292 		DPRINTF(WM_DEBUG_TX,
   7293 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7294 
   7295 		DPRINTF(WM_DEBUG_TX,
   7296 		    ("%s: TX: finished transmitting packet, job %d\n",
   7297 		    device_xname(sc->sc_dev), txq->txq_snext));
   7298 
   7299 		/* Advance the tx pointer. */
   7300 		txq->txq_free -= txs->txs_ndesc;
   7301 		txq->txq_next = nexttx;
   7302 
   7303 		txq->txq_sfree--;
   7304 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7305 
   7306 		/* Pass the packet to any BPF listeners. */
   7307 		bpf_mtap(ifp, m0);
   7308 	}
   7309 
   7310 	if (m0 != NULL) {
   7311 		if (!is_transmit)
   7312 			ifp->if_flags |= IFF_OACTIVE;
   7313 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7314 		WM_Q_EVCNT_INCR(txq, txdrop);
   7315 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7316 			__func__));
   7317 		m_freem(m0);
   7318 	}
   7319 
   7320 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7321 		/* No more slots; notify upper layer. */
   7322 		if (!is_transmit)
   7323 			ifp->if_flags |= IFF_OACTIVE;
   7324 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7325 	}
   7326 
   7327 	if (txq->txq_free != ofree) {
   7328 		/* Set a watchdog timer in case the chip flakes out. */
   7329 		ifp->if_timer = 5;
   7330 	}
   7331 }
   7332 
   7333 /*
   7334  * wm_nq_tx_offload:
   7335  *
   7336  *	Set up TCP/IP checksumming parameters for the
   7337  *	specified packet, for NEWQUEUE devices
   7338  */
   7339 static int
   7340 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7341     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7342 {
   7343 	struct mbuf *m0 = txs->txs_mbuf;
   7344 	struct m_tag *mtag;
   7345 	uint32_t vl_len, mssidx, cmdc;
   7346 	struct ether_header *eh;
   7347 	int offset, iphl;
   7348 
   7349 	/*
   7350 	 * XXX It would be nice if the mbuf pkthdr had offset
   7351 	 * fields for the protocol headers.
   7352 	 */
   7353 	*cmdlenp = 0;
   7354 	*fieldsp = 0;
   7355 
   7356 	eh = mtod(m0, struct ether_header *);
   7357 	switch (htons(eh->ether_type)) {
   7358 	case ETHERTYPE_IP:
   7359 	case ETHERTYPE_IPV6:
   7360 		offset = ETHER_HDR_LEN;
   7361 		break;
   7362 
   7363 	case ETHERTYPE_VLAN:
   7364 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7365 		break;
   7366 
   7367 	default:
   7368 		/* Don't support this protocol or encapsulation. */
   7369 		*do_csum = false;
   7370 		return 0;
   7371 	}
   7372 	*do_csum = true;
   7373 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7374 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7375 
   7376 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7377 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7378 
   7379 	if ((m0->m_pkthdr.csum_flags &
   7380 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7381 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7382 	} else {
   7383 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   7384 	}
   7385 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7386 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7387 
   7388 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   7389 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   7390 		     << NQTXC_VLLEN_VLAN_SHIFT);
   7391 		*cmdlenp |= NQTX_CMD_VLE;
   7392 	}
   7393 
   7394 	mssidx = 0;
   7395 
   7396 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7397 		int hlen = offset + iphl;
   7398 		int tcp_hlen;
   7399 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7400 
   7401 		if (__predict_false(m0->m_len <
   7402 				    (hlen + sizeof(struct tcphdr)))) {
   7403 			/*
   7404 			 * TCP/IP headers are not in the first mbuf; we need
   7405 			 * to do this the slow and painful way.  Let's just
   7406 			 * hope this doesn't happen very often.
   7407 			 */
   7408 			struct tcphdr th;
   7409 
   7410 			WM_Q_EVCNT_INCR(txq, txtsopain);
   7411 
   7412 			m_copydata(m0, hlen, sizeof(th), &th);
   7413 			if (v4) {
   7414 				struct ip ip;
   7415 
   7416 				m_copydata(m0, offset, sizeof(ip), &ip);
   7417 				ip.ip_len = 0;
   7418 				m_copyback(m0,
   7419 				    offset + offsetof(struct ip, ip_len),
   7420 				    sizeof(ip.ip_len), &ip.ip_len);
   7421 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7422 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7423 			} else {
   7424 				struct ip6_hdr ip6;
   7425 
   7426 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7427 				ip6.ip6_plen = 0;
   7428 				m_copyback(m0,
   7429 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7430 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7431 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7432 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7433 			}
   7434 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7435 			    sizeof(th.th_sum), &th.th_sum);
   7436 
   7437 			tcp_hlen = th.th_off << 2;
   7438 		} else {
   7439 			/*
   7440 			 * TCP/IP headers are in the first mbuf; we can do
   7441 			 * this the easy way.
   7442 			 */
   7443 			struct tcphdr *th;
   7444 
   7445 			if (v4) {
   7446 				struct ip *ip =
   7447 				    (void *)(mtod(m0, char *) + offset);
   7448 				th = (void *)(mtod(m0, char *) + hlen);
   7449 
   7450 				ip->ip_len = 0;
   7451 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7452 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7453 			} else {
   7454 				struct ip6_hdr *ip6 =
   7455 				    (void *)(mtod(m0, char *) + offset);
   7456 				th = (void *)(mtod(m0, char *) + hlen);
   7457 
   7458 				ip6->ip6_plen = 0;
   7459 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7460 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7461 			}
   7462 			tcp_hlen = th->th_off << 2;
   7463 		}
   7464 		hlen += tcp_hlen;
   7465 		*cmdlenp |= NQTX_CMD_TSE;
   7466 
   7467 		if (v4) {
   7468 			WM_Q_EVCNT_INCR(txq, txtso);
   7469 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7470 		} else {
   7471 			WM_Q_EVCNT_INCR(txq, txtso6);
   7472 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7473 		}
   7474 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7475 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7476 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7477 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7478 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7479 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7480 	} else {
   7481 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7482 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7483 	}
   7484 
   7485 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7486 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7487 		cmdc |= NQTXC_CMD_IP4;
   7488 	}
   7489 
   7490 	if (m0->m_pkthdr.csum_flags &
   7491 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7492 		WM_Q_EVCNT_INCR(txq, txtusum);
   7493 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7494 			cmdc |= NQTXC_CMD_TCP;
   7495 		} else {
   7496 			cmdc |= NQTXC_CMD_UDP;
   7497 		}
   7498 		cmdc |= NQTXC_CMD_IP4;
   7499 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7500 	}
   7501 	if (m0->m_pkthdr.csum_flags &
   7502 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7503 		WM_Q_EVCNT_INCR(txq, txtusum6);
   7504 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7505 			cmdc |= NQTXC_CMD_TCP;
   7506 		} else {
   7507 			cmdc |= NQTXC_CMD_UDP;
   7508 		}
   7509 		cmdc |= NQTXC_CMD_IP6;
   7510 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7511 	}
   7512 
   7513 	/*
   7514 	 * We don't have to write context descriptor for every packet to
   7515 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   7516 	 * I210 and I211. It is enough to write once per a Tx queue for these
   7517 	 * controllers.
   7518 	 * It would be overhead to write context descriptor for every packet,
   7519 	 * however it does not cause problems.
   7520 	 */
   7521 	/* Fill in the context descriptor. */
   7522 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   7523 	    htole32(vl_len);
   7524 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   7525 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   7526 	    htole32(cmdc);
   7527 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   7528 	    htole32(mssidx);
   7529 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7530 	DPRINTF(WM_DEBUG_TX,
   7531 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   7532 	    txq->txq_next, 0, vl_len));
   7533 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   7534 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7535 	txs->txs_ndesc++;
   7536 	return 0;
   7537 }
   7538 
   7539 /*
   7540  * wm_nq_start:		[ifnet interface function]
   7541  *
   7542  *	Start packet transmission on the interface for NEWQUEUE devices
   7543  */
   7544 static void
   7545 wm_nq_start(struct ifnet *ifp)
   7546 {
   7547 	struct wm_softc *sc = ifp->if_softc;
   7548 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7549 
   7550 #ifdef WM_MPSAFE
   7551 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   7552 #endif
   7553 	/*
   7554 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7555 	 */
   7556 
   7557 	mutex_enter(txq->txq_lock);
   7558 	if (!txq->txq_stopping)
   7559 		wm_nq_start_locked(ifp);
   7560 	mutex_exit(txq->txq_lock);
   7561 }
   7562 
   7563 static void
   7564 wm_nq_start_locked(struct ifnet *ifp)
   7565 {
   7566 	struct wm_softc *sc = ifp->if_softc;
   7567 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7568 
   7569 	wm_nq_send_common_locked(ifp, txq, false);
   7570 }
   7571 
   7572 static int
   7573 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   7574 {
   7575 	int qid;
   7576 	struct wm_softc *sc = ifp->if_softc;
   7577 	struct wm_txqueue *txq;
   7578 
   7579 	qid = wm_select_txqueue(ifp, m);
   7580 	txq = &sc->sc_queue[qid].wmq_txq;
   7581 
   7582 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7583 		m_freem(m);
   7584 		WM_Q_EVCNT_INCR(txq, txdrop);
   7585 		return ENOBUFS;
   7586 	}
   7587 
   7588 	/*
   7589 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7590 	 */
   7591 	ifp->if_obytes += m->m_pkthdr.len;
   7592 	if (m->m_flags & M_MCAST)
   7593 		ifp->if_omcasts++;
   7594 
   7595 	/*
   7596 	 * The situations which this mutex_tryenter() fails at running time
   7597 	 * are below two patterns.
   7598 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   7599 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   7600 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   7601 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   7602 	 * In the case of (2), the last packet enqueued to txq->txq_interq is also
   7603 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck, either.
   7604 	 */
   7605 	if (mutex_tryenter(txq->txq_lock)) {
   7606 		if (!txq->txq_stopping)
   7607 			wm_nq_transmit_locked(ifp, txq);
   7608 		mutex_exit(txq->txq_lock);
   7609 	}
   7610 
   7611 	return 0;
   7612 }
   7613 
   7614 static void
   7615 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7616 {
   7617 
   7618 	wm_nq_send_common_locked(ifp, txq, true);
   7619 }
   7620 
   7621 static void
   7622 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7623     bool is_transmit)
   7624 {
   7625 	struct wm_softc *sc = ifp->if_softc;
   7626 	struct mbuf *m0;
   7627 	struct m_tag *mtag;
   7628 	struct wm_txsoft *txs;
   7629 	bus_dmamap_t dmamap;
   7630 	int error, nexttx, lasttx = -1, seg, segs_needed;
   7631 	bool do_csum, sent;
   7632 
   7633 	KASSERT(mutex_owned(txq->txq_lock));
   7634 
   7635 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7636 		return;
   7637 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7638 		return;
   7639 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7640 		return;
   7641 
   7642 	sent = false;
   7643 
   7644 	/*
   7645 	 * Loop through the send queue, setting up transmit descriptors
   7646 	 * until we drain the queue, or use up all available transmit
   7647 	 * descriptors.
   7648 	 */
   7649 	for (;;) {
   7650 		m0 = NULL;
   7651 
   7652 		/* Get a work queue entry. */
   7653 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7654 			wm_txeof(sc, txq);
   7655 			if (txq->txq_sfree == 0) {
   7656 				DPRINTF(WM_DEBUG_TX,
   7657 				    ("%s: TX: no free job descriptors\n",
   7658 					device_xname(sc->sc_dev)));
   7659 				WM_Q_EVCNT_INCR(txq, txsstall);
   7660 				break;
   7661 			}
   7662 		}
   7663 
   7664 		/* Grab a packet off the queue. */
   7665 		if (is_transmit)
   7666 			m0 = pcq_get(txq->txq_interq);
   7667 		else
   7668 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7669 		if (m0 == NULL)
   7670 			break;
   7671 
   7672 		DPRINTF(WM_DEBUG_TX,
   7673 		    ("%s: TX: have packet to transmit: %p\n",
   7674 		    device_xname(sc->sc_dev), m0));
   7675 
   7676 		txs = &txq->txq_soft[txq->txq_snext];
   7677 		dmamap = txs->txs_dmamap;
   7678 
   7679 		/*
   7680 		 * Load the DMA map.  If this fails, the packet either
   7681 		 * didn't fit in the allotted number of segments, or we
   7682 		 * were short on resources.  For the too-many-segments
   7683 		 * case, we simply report an error and drop the packet,
   7684 		 * since we can't sanely copy a jumbo packet to a single
   7685 		 * buffer.
   7686 		 */
   7687 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7688 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7689 		if (error) {
   7690 			if (error == EFBIG) {
   7691 				WM_Q_EVCNT_INCR(txq, txdrop);
   7692 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7693 				    "DMA segments, dropping...\n",
   7694 				    device_xname(sc->sc_dev));
   7695 				wm_dump_mbuf_chain(sc, m0);
   7696 				m_freem(m0);
   7697 				continue;
   7698 			}
   7699 			/* Short on resources, just stop for now. */
   7700 			DPRINTF(WM_DEBUG_TX,
   7701 			    ("%s: TX: dmamap load failed: %d\n",
   7702 			    device_xname(sc->sc_dev), error));
   7703 			break;
   7704 		}
   7705 
   7706 		segs_needed = dmamap->dm_nsegs;
   7707 
   7708 		/*
   7709 		 * Ensure we have enough descriptors free to describe
   7710 		 * the packet.  Note, we always reserve one descriptor
   7711 		 * at the end of the ring due to the semantics of the
   7712 		 * TDT register, plus one more in the event we need
   7713 		 * to load offload context.
   7714 		 */
   7715 		if (segs_needed > txq->txq_free - 2) {
   7716 			/*
   7717 			 * Not enough free descriptors to transmit this
   7718 			 * packet.  We haven't committed anything yet,
   7719 			 * so just unload the DMA map, put the packet
   7720 			 * pack on the queue, and punt.  Notify the upper
   7721 			 * layer that there are no more slots left.
   7722 			 */
   7723 			DPRINTF(WM_DEBUG_TX,
   7724 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7725 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7726 			    segs_needed, txq->txq_free - 1));
   7727 			if (!is_transmit)
   7728 				ifp->if_flags |= IFF_OACTIVE;
   7729 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7730 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7731 			WM_Q_EVCNT_INCR(txq, txdstall);
   7732 			break;
   7733 		}
   7734 
   7735 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7736 
   7737 		DPRINTF(WM_DEBUG_TX,
   7738 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7739 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7740 
   7741 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7742 
   7743 		/*
   7744 		 * Store a pointer to the packet so that we can free it
   7745 		 * later.
   7746 		 *
   7747 		 * Initially, we consider the number of descriptors the
   7748 		 * packet uses the number of DMA segments.  This may be
   7749 		 * incremented by 1 if we do checksum offload (a descriptor
   7750 		 * is used to set the checksum context).
   7751 		 */
   7752 		txs->txs_mbuf = m0;
   7753 		txs->txs_firstdesc = txq->txq_next;
   7754 		txs->txs_ndesc = segs_needed;
   7755 
   7756 		/* Set up offload parameters for this packet. */
   7757 		uint32_t cmdlen, fields, dcmdlen;
   7758 		if (m0->m_pkthdr.csum_flags &
   7759 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7760 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7761 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7762 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   7763 			    &do_csum) != 0) {
   7764 				/* Error message already displayed. */
   7765 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7766 				continue;
   7767 			}
   7768 		} else {
   7769 			do_csum = false;
   7770 			cmdlen = 0;
   7771 			fields = 0;
   7772 		}
   7773 
   7774 		/* Sync the DMA map. */
   7775 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7776 		    BUS_DMASYNC_PREWRITE);
   7777 
   7778 		/* Initialize the first transmit descriptor. */
   7779 		nexttx = txq->txq_next;
   7780 		if (!do_csum) {
   7781 			/* setup a legacy descriptor */
   7782 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   7783 			    dmamap->dm_segs[0].ds_addr);
   7784 			txq->txq_descs[nexttx].wtx_cmdlen =
   7785 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   7786 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   7787 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   7788 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   7789 			    NULL) {
   7790 				txq->txq_descs[nexttx].wtx_cmdlen |=
   7791 				    htole32(WTX_CMD_VLE);
   7792 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   7793 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   7794 			} else {
   7795 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7796 			}
   7797 			dcmdlen = 0;
   7798 		} else {
   7799 			/* setup an advanced data descriptor */
   7800 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7801 			    htole64(dmamap->dm_segs[0].ds_addr);
   7802 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   7803 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7804 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   7805 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   7806 			    htole32(fields);
   7807 			DPRINTF(WM_DEBUG_TX,
   7808 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   7809 			    device_xname(sc->sc_dev), nexttx,
   7810 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   7811 			DPRINTF(WM_DEBUG_TX,
   7812 			    ("\t 0x%08x%08x\n", fields,
   7813 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   7814 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   7815 		}
   7816 
   7817 		lasttx = nexttx;
   7818 		nexttx = WM_NEXTTX(txq, nexttx);
   7819 		/*
   7820 		 * fill in the next descriptors. legacy or adcanced format
   7821 		 * is the same here
   7822 		 */
   7823 		for (seg = 1; seg < dmamap->dm_nsegs;
   7824 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   7825 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7826 			    htole64(dmamap->dm_segs[seg].ds_addr);
   7827 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7828 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   7829 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   7830 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   7831 			lasttx = nexttx;
   7832 
   7833 			DPRINTF(WM_DEBUG_TX,
   7834 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   7835 			     "len %#04zx\n",
   7836 			    device_xname(sc->sc_dev), nexttx,
   7837 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   7838 			    dmamap->dm_segs[seg].ds_len));
   7839 		}
   7840 
   7841 		KASSERT(lasttx != -1);
   7842 
   7843 		/*
   7844 		 * Set up the command byte on the last descriptor of
   7845 		 * the packet.  If we're in the interrupt delay window,
   7846 		 * delay the interrupt.
   7847 		 */
   7848 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   7849 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   7850 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7851 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7852 
   7853 		txs->txs_lastdesc = lasttx;
   7854 
   7855 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7856 		    device_xname(sc->sc_dev),
   7857 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7858 
   7859 		/* Sync the descriptors we're using. */
   7860 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7861 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7862 
   7863 		/* Give the packet to the chip. */
   7864 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7865 		sent = true;
   7866 
   7867 		DPRINTF(WM_DEBUG_TX,
   7868 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7869 
   7870 		DPRINTF(WM_DEBUG_TX,
   7871 		    ("%s: TX: finished transmitting packet, job %d\n",
   7872 		    device_xname(sc->sc_dev), txq->txq_snext));
   7873 
   7874 		/* Advance the tx pointer. */
   7875 		txq->txq_free -= txs->txs_ndesc;
   7876 		txq->txq_next = nexttx;
   7877 
   7878 		txq->txq_sfree--;
   7879 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7880 
   7881 		/* Pass the packet to any BPF listeners. */
   7882 		bpf_mtap(ifp, m0);
   7883 	}
   7884 
   7885 	if (m0 != NULL) {
   7886 		if (!is_transmit)
   7887 			ifp->if_flags |= IFF_OACTIVE;
   7888 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7889 		WM_Q_EVCNT_INCR(txq, txdrop);
   7890 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7891 			__func__));
   7892 		m_freem(m0);
   7893 	}
   7894 
   7895 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7896 		/* No more slots; notify upper layer. */
   7897 		if (!is_transmit)
   7898 			ifp->if_flags |= IFF_OACTIVE;
   7899 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7900 	}
   7901 
   7902 	if (sent) {
   7903 		/* Set a watchdog timer in case the chip flakes out. */
   7904 		ifp->if_timer = 5;
   7905 	}
   7906 }
   7907 
   7908 static void
   7909 wm_deferred_start_locked(struct wm_txqueue *txq)
   7910 {
   7911 	struct wm_softc *sc = txq->txq_sc;
   7912 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7913 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   7914 	int qid = wmq->wmq_id;
   7915 
   7916 	KASSERT(mutex_owned(txq->txq_lock));
   7917 
   7918 	if (txq->txq_stopping) {
   7919 		mutex_exit(txq->txq_lock);
   7920 		return;
   7921 	}
   7922 
   7923 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7924 		/* XXX need for ALTQ or one CPU system */
   7925 		if (qid == 0)
   7926 			wm_nq_start_locked(ifp);
   7927 		wm_nq_transmit_locked(ifp, txq);
   7928 	} else {
   7929 		/* XXX need for ALTQ or one CPU system */
   7930 		if (qid == 0)
   7931 			wm_start_locked(ifp);
   7932 		wm_transmit_locked(ifp, txq);
   7933 	}
   7934 }
   7935 
   7936 /* Interrupt */
   7937 
   7938 /*
   7939  * wm_txeof:
   7940  *
   7941  *	Helper; handle transmit interrupts.
   7942  */
   7943 static int
   7944 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
   7945 {
   7946 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7947 	struct wm_txsoft *txs;
   7948 	bool processed = false;
   7949 	int count = 0;
   7950 	int i;
   7951 	uint8_t status;
   7952 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   7953 
   7954 	KASSERT(mutex_owned(txq->txq_lock));
   7955 
   7956 	if (txq->txq_stopping)
   7957 		return 0;
   7958 
   7959 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   7960 	/* for ALTQ and legacy(not use multiqueue) ethernet controller */
   7961 	if (wmq->wmq_id == 0)
   7962 		ifp->if_flags &= ~IFF_OACTIVE;
   7963 
   7964 	/*
   7965 	 * Go through the Tx list and free mbufs for those
   7966 	 * frames which have been transmitted.
   7967 	 */
   7968 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   7969 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   7970 		txs = &txq->txq_soft[i];
   7971 
   7972 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   7973 			device_xname(sc->sc_dev), i));
   7974 
   7975 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   7976 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   7977 
   7978 		status =
   7979 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   7980 		if ((status & WTX_ST_DD) == 0) {
   7981 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   7982 			    BUS_DMASYNC_PREREAD);
   7983 			break;
   7984 		}
   7985 
   7986 		processed = true;
   7987 		count++;
   7988 		DPRINTF(WM_DEBUG_TX,
   7989 		    ("%s: TX: job %d done: descs %d..%d\n",
   7990 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   7991 		    txs->txs_lastdesc));
   7992 
   7993 		/*
   7994 		 * XXX We should probably be using the statistics
   7995 		 * XXX registers, but I don't know if they exist
   7996 		 * XXX on chips before the i82544.
   7997 		 */
   7998 
   7999 #ifdef WM_EVENT_COUNTERS
   8000 		if (status & WTX_ST_TU)
   8001 			WM_Q_EVCNT_INCR(txq, tu);
   8002 #endif /* WM_EVENT_COUNTERS */
   8003 
   8004 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   8005 			ifp->if_oerrors++;
   8006 			if (status & WTX_ST_LC)
   8007 				log(LOG_WARNING, "%s: late collision\n",
   8008 				    device_xname(sc->sc_dev));
   8009 			else if (status & WTX_ST_EC) {
   8010 				ifp->if_collisions += 16;
   8011 				log(LOG_WARNING, "%s: excessive collisions\n",
   8012 				    device_xname(sc->sc_dev));
   8013 			}
   8014 		} else
   8015 			ifp->if_opackets++;
   8016 
   8017 		txq->txq_packets++;
   8018 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8019 
   8020 		txq->txq_free += txs->txs_ndesc;
   8021 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8022 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8023 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8024 		m_freem(txs->txs_mbuf);
   8025 		txs->txs_mbuf = NULL;
   8026 	}
   8027 
   8028 	/* Update the dirty transmit buffer pointer. */
   8029 	txq->txq_sdirty = i;
   8030 	DPRINTF(WM_DEBUG_TX,
   8031 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8032 
   8033 	if (count != 0)
   8034 		rnd_add_uint32(&sc->rnd_source, count);
   8035 
   8036 	/*
   8037 	 * If there are no more pending transmissions, cancel the watchdog
   8038 	 * timer.
   8039 	 */
   8040 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8041 		ifp->if_timer = 0;
   8042 
   8043 	return processed;
   8044 }
   8045 
   8046 static inline uint32_t
   8047 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8048 {
   8049 	struct wm_softc *sc = rxq->rxq_sc;
   8050 
   8051 	if (sc->sc_type == WM_T_82574)
   8052 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8053 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8054 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8055 	else
   8056 		return rxq->rxq_descs[idx].wrx_status;
   8057 }
   8058 
   8059 static inline uint32_t
   8060 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8061 {
   8062 	struct wm_softc *sc = rxq->rxq_sc;
   8063 
   8064 	if (sc->sc_type == WM_T_82574)
   8065 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8066 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8067 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8068 	else
   8069 		return rxq->rxq_descs[idx].wrx_errors;
   8070 }
   8071 
   8072 static inline uint16_t
   8073 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8074 {
   8075 	struct wm_softc *sc = rxq->rxq_sc;
   8076 
   8077 	if (sc->sc_type == WM_T_82574)
   8078 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8079 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8080 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8081 	else
   8082 		return rxq->rxq_descs[idx].wrx_special;
   8083 }
   8084 
   8085 static inline int
   8086 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8087 {
   8088 	struct wm_softc *sc = rxq->rxq_sc;
   8089 
   8090 	if (sc->sc_type == WM_T_82574)
   8091 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8092 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8093 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   8094 	else
   8095 		return rxq->rxq_descs[idx].wrx_len;
   8096 }
   8097 
   8098 #ifdef WM_DEBUG
   8099 static inline uint32_t
   8100 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   8101 {
   8102 	struct wm_softc *sc = rxq->rxq_sc;
   8103 
   8104 	if (sc->sc_type == WM_T_82574)
   8105 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   8106 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8107 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   8108 	else
   8109 		return 0;
   8110 }
   8111 
   8112 static inline uint8_t
   8113 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   8114 {
   8115 	struct wm_softc *sc = rxq->rxq_sc;
   8116 
   8117 	if (sc->sc_type == WM_T_82574)
   8118 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   8119 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8120 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   8121 	else
   8122 		return 0;
   8123 }
   8124 #endif /* WM_DEBUG */
   8125 
   8126 static inline bool
   8127 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   8128     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8129 {
   8130 
   8131 	if (sc->sc_type == WM_T_82574)
   8132 		return (status & ext_bit) != 0;
   8133 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8134 		return (status & nq_bit) != 0;
   8135 	else
   8136 		return (status & legacy_bit) != 0;
   8137 }
   8138 
   8139 static inline bool
   8140 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   8141     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8142 {
   8143 
   8144 	if (sc->sc_type == WM_T_82574)
   8145 		return (error & ext_bit) != 0;
   8146 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8147 		return (error & nq_bit) != 0;
   8148 	else
   8149 		return (error & legacy_bit) != 0;
   8150 }
   8151 
   8152 static inline bool
   8153 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   8154 {
   8155 
   8156 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8157 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   8158 		return true;
   8159 	else
   8160 		return false;
   8161 }
   8162 
   8163 static inline bool
   8164 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   8165 {
   8166 	struct wm_softc *sc = rxq->rxq_sc;
   8167 
   8168 	/* XXXX missing error bit for newqueue? */
   8169 	if (wm_rxdesc_is_set_error(sc, errors,
   8170 		WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE,
   8171 		EXTRXC_ERROR_CE|EXTRXC_ERROR_SE|EXTRXC_ERROR_SEQ|EXTRXC_ERROR_CXE|EXTRXC_ERROR_RXE,
   8172 		NQRXC_ERROR_RXE)) {
   8173 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE, EXTRXC_ERROR_SE, 0))
   8174 			log(LOG_WARNING, "%s: symbol error\n",
   8175 			    device_xname(sc->sc_dev));
   8176 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ, EXTRXC_ERROR_SEQ, 0))
   8177 			log(LOG_WARNING, "%s: receive sequence error\n",
   8178 			    device_xname(sc->sc_dev));
   8179 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE, EXTRXC_ERROR_CE, 0))
   8180 			log(LOG_WARNING, "%s: CRC error\n",
   8181 			    device_xname(sc->sc_dev));
   8182 		return true;
   8183 	}
   8184 
   8185 	return false;
   8186 }
   8187 
   8188 static inline bool
   8189 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8190 {
   8191 	struct wm_softc *sc = rxq->rxq_sc;
   8192 
   8193 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8194 		NQRXC_STATUS_DD)) {
   8195 		/* We have processed all of the receive descriptors. */
   8196 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8197 		return false;
   8198 	}
   8199 
   8200 	return true;
   8201 }
   8202 
   8203 static inline bool
   8204 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status, uint16_t vlantag,
   8205     struct mbuf *m)
   8206 {
   8207 	struct ifnet *ifp = &rxq->rxq_sc->sc_ethercom.ec_if;
   8208 
   8209 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8210 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8211 		VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), return false);
   8212 	}
   8213 
   8214 	return true;
   8215 }
   8216 
   8217 static inline void
   8218 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8219     uint32_t errors, struct mbuf *m)
   8220 {
   8221 	struct wm_softc *sc = rxq->rxq_sc;
   8222 
   8223 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8224 		if (wm_rxdesc_is_set_status(sc, status,
   8225 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8226 			WM_Q_EVCNT_INCR(rxq, rxipsum);
   8227 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8228 			if (wm_rxdesc_is_set_error(sc, errors,
   8229 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8230 				m->m_pkthdr.csum_flags |=
   8231 					M_CSUM_IPv4_BAD;
   8232 		}
   8233 		if (wm_rxdesc_is_set_status(sc, status,
   8234 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8235 			/*
   8236 			 * Note: we don't know if this was TCP or UDP,
   8237 			 * so we just set both bits, and expect the
   8238 			 * upper layers to deal.
   8239 			 */
   8240 			WM_Q_EVCNT_INCR(rxq, rxtusum);
   8241 			m->m_pkthdr.csum_flags |=
   8242 				M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8243 				M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8244 			if (wm_rxdesc_is_set_error(sc, errors,
   8245 				WRX_ER_TCPE, EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8246 				m->m_pkthdr.csum_flags |=
   8247 					M_CSUM_TCP_UDP_BAD;
   8248 		}
   8249 	}
   8250 }
   8251 
   8252 /*
   8253  * wm_rxeof:
   8254  *
   8255  *	Helper; handle receive interrupts.
   8256  */
   8257 static void
   8258 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8259 {
   8260 	struct wm_softc *sc = rxq->rxq_sc;
   8261 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8262 	struct wm_rxsoft *rxs;
   8263 	struct mbuf *m;
   8264 	int i, len;
   8265 	int count = 0;
   8266 	uint32_t status, errors;
   8267 	uint16_t vlantag;
   8268 
   8269 	KASSERT(mutex_owned(rxq->rxq_lock));
   8270 
   8271 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8272 		if (limit-- == 0) {
   8273 			rxq->rxq_ptr = i;
   8274 			break;
   8275 		}
   8276 
   8277 		rxs = &rxq->rxq_soft[i];
   8278 
   8279 		DPRINTF(WM_DEBUG_RX,
   8280 		    ("%s: RX: checking descriptor %d\n",
   8281 		    device_xname(sc->sc_dev), i));
   8282 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   8283 
   8284 		status = wm_rxdesc_get_status(rxq, i);
   8285 		errors = wm_rxdesc_get_errors(rxq, i);
   8286 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8287 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8288 #ifdef WM_DEBUG
   8289 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8290 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8291 #endif
   8292 
   8293 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8294 			/*
   8295 			 * Update the receive pointer holding rxq_lock
   8296 			 * consistent with increment counter.
   8297 			 */
   8298 			rxq->rxq_ptr = i;
   8299 			break;
   8300 		}
   8301 
   8302 		count++;
   8303 		if (__predict_false(rxq->rxq_discard)) {
   8304 			DPRINTF(WM_DEBUG_RX,
   8305 			    ("%s: RX: discarding contents of descriptor %d\n",
   8306 			    device_xname(sc->sc_dev), i));
   8307 			wm_init_rxdesc(rxq, i);
   8308 			if (wm_rxdesc_is_eop(rxq, status)) {
   8309 				/* Reset our state. */
   8310 				DPRINTF(WM_DEBUG_RX,
   8311 				    ("%s: RX: resetting rxdiscard -> 0\n",
   8312 				    device_xname(sc->sc_dev)));
   8313 				rxq->rxq_discard = 0;
   8314 			}
   8315 			continue;
   8316 		}
   8317 
   8318 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8319 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   8320 
   8321 		m = rxs->rxs_mbuf;
   8322 
   8323 		/*
   8324 		 * Add a new receive buffer to the ring, unless of
   8325 		 * course the length is zero. Treat the latter as a
   8326 		 * failed mapping.
   8327 		 */
   8328 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   8329 			/*
   8330 			 * Failed, throw away what we've done so
   8331 			 * far, and discard the rest of the packet.
   8332 			 */
   8333 			ifp->if_ierrors++;
   8334 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8335 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   8336 			wm_init_rxdesc(rxq, i);
   8337 			if (!wm_rxdesc_is_eop(rxq, status))
   8338 				rxq->rxq_discard = 1;
   8339 			if (rxq->rxq_head != NULL)
   8340 				m_freem(rxq->rxq_head);
   8341 			WM_RXCHAIN_RESET(rxq);
   8342 			DPRINTF(WM_DEBUG_RX,
   8343 			    ("%s: RX: Rx buffer allocation failed, "
   8344 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8345 			    rxq->rxq_discard ? " (discard)" : ""));
   8346 			continue;
   8347 		}
   8348 
   8349 		m->m_len = len;
   8350 		rxq->rxq_len += len;
   8351 		DPRINTF(WM_DEBUG_RX,
   8352 		    ("%s: RX: buffer at %p len %d\n",
   8353 		    device_xname(sc->sc_dev), m->m_data, len));
   8354 
   8355 		/* If this is not the end of the packet, keep looking. */
   8356 		if (!wm_rxdesc_is_eop(rxq, status)) {
   8357 			WM_RXCHAIN_LINK(rxq, m);
   8358 			DPRINTF(WM_DEBUG_RX,
   8359 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   8360 			    device_xname(sc->sc_dev), rxq->rxq_len));
   8361 			continue;
   8362 		}
   8363 
   8364 		/*
   8365 		 * Okay, we have the entire packet now.  The chip is
   8366 		 * configured to include the FCS except I350 and I21[01]
   8367 		 * (not all chips can be configured to strip it),
   8368 		 * so we need to trim it.
   8369 		 * May need to adjust length of previous mbuf in the
   8370 		 * chain if the current mbuf is too short.
   8371 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   8372 		 * is always set in I350, so we don't trim it.
   8373 		 */
   8374 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   8375 		    && (sc->sc_type != WM_T_I210)
   8376 		    && (sc->sc_type != WM_T_I211)) {
   8377 			if (m->m_len < ETHER_CRC_LEN) {
   8378 				rxq->rxq_tail->m_len
   8379 				    -= (ETHER_CRC_LEN - m->m_len);
   8380 				m->m_len = 0;
   8381 			} else
   8382 				m->m_len -= ETHER_CRC_LEN;
   8383 			len = rxq->rxq_len - ETHER_CRC_LEN;
   8384 		} else
   8385 			len = rxq->rxq_len;
   8386 
   8387 		WM_RXCHAIN_LINK(rxq, m);
   8388 
   8389 		*rxq->rxq_tailp = NULL;
   8390 		m = rxq->rxq_head;
   8391 
   8392 		WM_RXCHAIN_RESET(rxq);
   8393 
   8394 		DPRINTF(WM_DEBUG_RX,
   8395 		    ("%s: RX: have entire packet, len -> %d\n",
   8396 		    device_xname(sc->sc_dev), len));
   8397 
   8398 		/* If an error occurred, update stats and drop the packet. */
   8399 		if (wm_rxdesc_has_errors(rxq, errors)) {
   8400 			m_freem(m);
   8401 			continue;
   8402 		}
   8403 
   8404 		/* No errors.  Receive the packet. */
   8405 		m_set_rcvif(m, ifp);
   8406 		m->m_pkthdr.len = len;
   8407 		/*
   8408 		 * TODO
   8409 		 * should be save rsshash and rsstype to this mbuf.
   8410 		 */
   8411 		DPRINTF(WM_DEBUG_RX,
   8412 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   8413 			device_xname(sc->sc_dev), rsstype, rsshash));
   8414 
   8415 		/*
   8416 		 * If VLANs are enabled, VLAN packets have been unwrapped
   8417 		 * for us.  Associate the tag with the packet.
   8418 		 */
   8419 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   8420 			continue;
   8421 
   8422 		/* Set up checksum info for this packet. */
   8423 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   8424 		/*
   8425 		 * Update the receive pointer holding rxq_lock consistent with
   8426 		 * increment counter.
   8427 		 */
   8428 		rxq->rxq_ptr = i;
   8429 		rxq->rxq_packets++;
   8430 		rxq->rxq_bytes += len;
   8431 		mutex_exit(rxq->rxq_lock);
   8432 
   8433 		/* Pass it on. */
   8434 		if_percpuq_enqueue(sc->sc_ipq, m);
   8435 
   8436 		mutex_enter(rxq->rxq_lock);
   8437 
   8438 		if (rxq->rxq_stopping)
   8439 			break;
   8440 	}
   8441 
   8442 	if (count != 0)
   8443 		rnd_add_uint32(&sc->rnd_source, count);
   8444 
   8445 	DPRINTF(WM_DEBUG_RX,
   8446 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8447 }
   8448 
   8449 /*
   8450  * wm_linkintr_gmii:
   8451  *
   8452  *	Helper; handle link interrupts for GMII.
   8453  */
   8454 static void
   8455 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8456 {
   8457 
   8458 	KASSERT(WM_CORE_LOCKED(sc));
   8459 
   8460 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8461 		__func__));
   8462 
   8463 	if (icr & ICR_LSC) {
   8464 		uint32_t reg;
   8465 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   8466 
   8467 		if ((status & STATUS_LU) != 0) {
   8468 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8469 				device_xname(sc->sc_dev),
   8470 				(status & STATUS_FD) ? "FDX" : "HDX"));
   8471 		} else {
   8472 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8473 				device_xname(sc->sc_dev)));
   8474 		}
   8475 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   8476 			wm_gig_downshift_workaround_ich8lan(sc);
   8477 
   8478 		if ((sc->sc_type == WM_T_ICH8)
   8479 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   8480 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   8481 		}
   8482 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   8483 			device_xname(sc->sc_dev)));
   8484 		mii_pollstat(&sc->sc_mii);
   8485 		if (sc->sc_type == WM_T_82543) {
   8486 			int miistatus, active;
   8487 
   8488 			/*
   8489 			 * With 82543, we need to force speed and
   8490 			 * duplex on the MAC equal to what the PHY
   8491 			 * speed and duplex configuration is.
   8492 			 */
   8493 			miistatus = sc->sc_mii.mii_media_status;
   8494 
   8495 			if (miistatus & IFM_ACTIVE) {
   8496 				active = sc->sc_mii.mii_media_active;
   8497 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8498 				switch (IFM_SUBTYPE(active)) {
   8499 				case IFM_10_T:
   8500 					sc->sc_ctrl |= CTRL_SPEED_10;
   8501 					break;
   8502 				case IFM_100_TX:
   8503 					sc->sc_ctrl |= CTRL_SPEED_100;
   8504 					break;
   8505 				case IFM_1000_T:
   8506 					sc->sc_ctrl |= CTRL_SPEED_1000;
   8507 					break;
   8508 				default:
   8509 					/*
   8510 					 * fiber?
   8511 					 * Shoud not enter here.
   8512 					 */
   8513 					printf("unknown media (%x)\n", active);
   8514 					break;
   8515 				}
   8516 				if (active & IFM_FDX)
   8517 					sc->sc_ctrl |= CTRL_FD;
   8518 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8519 			}
   8520 		} else if (sc->sc_type == WM_T_PCH) {
   8521 			wm_k1_gig_workaround_hv(sc,
   8522 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   8523 		}
   8524 
   8525 		if ((sc->sc_phytype == WMPHY_82578)
   8526 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   8527 			== IFM_1000_T)) {
   8528 
   8529 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   8530 				delay(200*1000); /* XXX too big */
   8531 
   8532 				/* Link stall fix for link up */
   8533 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8534 				    HV_MUX_DATA_CTRL,
   8535 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   8536 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   8537 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8538 				    HV_MUX_DATA_CTRL,
   8539 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   8540 			}
   8541 		}
   8542 		/*
   8543 		 * I217 Packet Loss issue:
   8544 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
   8545 		 * on power up.
   8546 		 * Set the Beacon Duration for I217 to 8 usec
   8547 		 */
   8548 		if ((sc->sc_type == WM_T_PCH_LPT)
   8549 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8550 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   8551 			reg &= ~FEXTNVM4_BEACON_DURATION;
   8552 			reg |= FEXTNVM4_BEACON_DURATION_8US;
   8553 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   8554 		}
   8555 
   8556 		/* XXX Work-around I218 hang issue */
   8557 		/* e1000_k1_workaround_lpt_lp() */
   8558 
   8559 		if ((sc->sc_type == WM_T_PCH_LPT)
   8560 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8561 			/*
   8562 			 * Set platform power management values for Latency
   8563 			 * Tolerance Reporting (LTR)
   8564 			 */
   8565 			wm_platform_pm_pch_lpt(sc,
   8566 				((sc->sc_mii.mii_media_status & IFM_ACTIVE)
   8567 				    != 0));
   8568 		}
   8569 
   8570 		/* FEXTNVM6 K1-off workaround */
   8571 		if (sc->sc_type == WM_T_PCH_SPT) {
   8572 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
   8573 			if (CSR_READ(sc, WMREG_PCIEANACFG)
   8574 			    & FEXTNVM6_K1_OFF_ENABLE)
   8575 				reg |= FEXTNVM6_K1_OFF_ENABLE;
   8576 			else
   8577 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   8578 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   8579 		}
   8580 	} else if (icr & ICR_RXSEQ) {
   8581 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   8582 			device_xname(sc->sc_dev)));
   8583 	}
   8584 }
   8585 
   8586 /*
   8587  * wm_linkintr_tbi:
   8588  *
   8589  *	Helper; handle link interrupts for TBI mode.
   8590  */
   8591 static void
   8592 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   8593 {
   8594 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8595 	uint32_t status;
   8596 
   8597 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8598 		__func__));
   8599 
   8600 	status = CSR_READ(sc, WMREG_STATUS);
   8601 	if (icr & ICR_LSC) {
   8602 		if (status & STATUS_LU) {
   8603 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8604 			    device_xname(sc->sc_dev),
   8605 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   8606 			/*
   8607 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   8608 			 * so we should update sc->sc_ctrl
   8609 			 */
   8610 
   8611 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   8612 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8613 			sc->sc_fcrtl &= ~FCRTL_XONE;
   8614 			if (status & STATUS_FD)
   8615 				sc->sc_tctl |=
   8616 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8617 			else
   8618 				sc->sc_tctl |=
   8619 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8620 			if (sc->sc_ctrl & CTRL_TFCE)
   8621 				sc->sc_fcrtl |= FCRTL_XONE;
   8622 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8623 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   8624 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   8625 				      sc->sc_fcrtl);
   8626 			sc->sc_tbi_linkup = 1;
   8627 			if_link_state_change(ifp, LINK_STATE_UP);
   8628 		} else {
   8629 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8630 			    device_xname(sc->sc_dev)));
   8631 			sc->sc_tbi_linkup = 0;
   8632 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8633 		}
   8634 		/* Update LED */
   8635 		wm_tbi_serdes_set_linkled(sc);
   8636 	} else if (icr & ICR_RXSEQ) {
   8637 		DPRINTF(WM_DEBUG_LINK,
   8638 		    ("%s: LINK: Receive sequence error\n",
   8639 		    device_xname(sc->sc_dev)));
   8640 	}
   8641 }
   8642 
   8643 /*
   8644  * wm_linkintr_serdes:
   8645  *
   8646  *	Helper; handle link interrupts for TBI mode.
   8647  */
   8648 static void
   8649 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   8650 {
   8651 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8652 	struct mii_data *mii = &sc->sc_mii;
   8653 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8654 	uint32_t pcs_adv, pcs_lpab, reg;
   8655 
   8656 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8657 		__func__));
   8658 
   8659 	if (icr & ICR_LSC) {
   8660 		/* Check PCS */
   8661 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8662 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   8663 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   8664 				device_xname(sc->sc_dev)));
   8665 			mii->mii_media_status |= IFM_ACTIVE;
   8666 			sc->sc_tbi_linkup = 1;
   8667 			if_link_state_change(ifp, LINK_STATE_UP);
   8668 		} else {
   8669 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8670 				device_xname(sc->sc_dev)));
   8671 			mii->mii_media_status |= IFM_NONE;
   8672 			sc->sc_tbi_linkup = 0;
   8673 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8674 			wm_tbi_serdes_set_linkled(sc);
   8675 			return;
   8676 		}
   8677 		mii->mii_media_active |= IFM_1000_SX;
   8678 		if ((reg & PCS_LSTS_FDX) != 0)
   8679 			mii->mii_media_active |= IFM_FDX;
   8680 		else
   8681 			mii->mii_media_active |= IFM_HDX;
   8682 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   8683 			/* Check flow */
   8684 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8685 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   8686 				DPRINTF(WM_DEBUG_LINK,
   8687 				    ("XXX LINKOK but not ACOMP\n"));
   8688 				return;
   8689 			}
   8690 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   8691 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   8692 			DPRINTF(WM_DEBUG_LINK,
   8693 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   8694 			if ((pcs_adv & TXCW_SYM_PAUSE)
   8695 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   8696 				mii->mii_media_active |= IFM_FLOW
   8697 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   8698 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   8699 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8700 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   8701 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8702 				mii->mii_media_active |= IFM_FLOW
   8703 				    | IFM_ETH_TXPAUSE;
   8704 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   8705 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8706 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   8707 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8708 				mii->mii_media_active |= IFM_FLOW
   8709 				    | IFM_ETH_RXPAUSE;
   8710 		}
   8711 		/* Update LED */
   8712 		wm_tbi_serdes_set_linkled(sc);
   8713 	} else {
   8714 		DPRINTF(WM_DEBUG_LINK,
   8715 		    ("%s: LINK: Receive sequence error\n",
   8716 		    device_xname(sc->sc_dev)));
   8717 	}
   8718 }
   8719 
   8720 /*
   8721  * wm_linkintr:
   8722  *
   8723  *	Helper; handle link interrupts.
   8724  */
   8725 static void
   8726 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   8727 {
   8728 
   8729 	KASSERT(WM_CORE_LOCKED(sc));
   8730 
   8731 	if (sc->sc_flags & WM_F_HAS_MII)
   8732 		wm_linkintr_gmii(sc, icr);
   8733 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   8734 	    && (sc->sc_type >= WM_T_82575))
   8735 		wm_linkintr_serdes(sc, icr);
   8736 	else
   8737 		wm_linkintr_tbi(sc, icr);
   8738 }
   8739 
   8740 /*
   8741  * wm_intr_legacy:
   8742  *
   8743  *	Interrupt service routine for INTx and MSI.
   8744  */
   8745 static int
   8746 wm_intr_legacy(void *arg)
   8747 {
   8748 	struct wm_softc *sc = arg;
   8749 	struct wm_queue *wmq = &sc->sc_queue[0];
   8750 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8751 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8752 	uint32_t icr, rndval = 0;
   8753 	int handled = 0;
   8754 
   8755 	while (1 /* CONSTCOND */) {
   8756 		icr = CSR_READ(sc, WMREG_ICR);
   8757 		if ((icr & sc->sc_icr) == 0)
   8758 			break;
   8759 		if (handled == 0) {
   8760 			DPRINTF(WM_DEBUG_TX,
   8761 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   8762 		}
   8763 		if (rndval == 0)
   8764 			rndval = icr;
   8765 
   8766 		mutex_enter(rxq->rxq_lock);
   8767 
   8768 		if (rxq->rxq_stopping) {
   8769 			mutex_exit(rxq->rxq_lock);
   8770 			break;
   8771 		}
   8772 
   8773 		handled = 1;
   8774 
   8775 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8776 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   8777 			DPRINTF(WM_DEBUG_RX,
   8778 			    ("%s: RX: got Rx intr 0x%08x\n",
   8779 			    device_xname(sc->sc_dev),
   8780 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
   8781 			WM_Q_EVCNT_INCR(rxq, rxintr);
   8782 		}
   8783 #endif
   8784 		wm_rxeof(rxq, UINT_MAX);
   8785 
   8786 		mutex_exit(rxq->rxq_lock);
   8787 		mutex_enter(txq->txq_lock);
   8788 
   8789 		if (txq->txq_stopping) {
   8790 			mutex_exit(txq->txq_lock);
   8791 			break;
   8792 		}
   8793 
   8794 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8795 		if (icr & ICR_TXDW) {
   8796 			DPRINTF(WM_DEBUG_TX,
   8797 			    ("%s: TX: got TXDW interrupt\n",
   8798 			    device_xname(sc->sc_dev)));
   8799 			WM_Q_EVCNT_INCR(txq, txdw);
   8800 		}
   8801 #endif
   8802 		wm_txeof(sc, txq);
   8803 
   8804 		mutex_exit(txq->txq_lock);
   8805 		WM_CORE_LOCK(sc);
   8806 
   8807 		if (sc->sc_core_stopping) {
   8808 			WM_CORE_UNLOCK(sc);
   8809 			break;
   8810 		}
   8811 
   8812 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   8813 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8814 			wm_linkintr(sc, icr);
   8815 		}
   8816 
   8817 		WM_CORE_UNLOCK(sc);
   8818 
   8819 		if (icr & ICR_RXO) {
   8820 #if defined(WM_DEBUG)
   8821 			log(LOG_WARNING, "%s: Receive overrun\n",
   8822 			    device_xname(sc->sc_dev));
   8823 #endif /* defined(WM_DEBUG) */
   8824 		}
   8825 	}
   8826 
   8827 	rnd_add_uint32(&sc->rnd_source, rndval);
   8828 
   8829 	if (handled) {
   8830 		/* Try to get more packets going. */
   8831 		softint_schedule(wmq->wmq_si);
   8832 	}
   8833 
   8834 	return handled;
   8835 }
   8836 
   8837 static inline void
   8838 wm_txrxintr_disable(struct wm_queue *wmq)
   8839 {
   8840 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8841 
   8842 	if (sc->sc_type == WM_T_82574)
   8843 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8844 	else if (sc->sc_type == WM_T_82575)
   8845 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8846 	else
   8847 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   8848 }
   8849 
   8850 static inline void
   8851 wm_txrxintr_enable(struct wm_queue *wmq)
   8852 {
   8853 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8854 
   8855 	wm_itrs_calculate(sc, wmq);
   8856 
   8857 	if (sc->sc_type == WM_T_82574)
   8858 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8859 	else if (sc->sc_type == WM_T_82575)
   8860 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8861 	else
   8862 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   8863 }
   8864 
   8865 static int
   8866 wm_txrxintr_msix(void *arg)
   8867 {
   8868 	struct wm_queue *wmq = arg;
   8869 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8870 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8871 	struct wm_softc *sc = txq->txq_sc;
   8872 	u_int limit = sc->sc_rx_intr_process_limit;
   8873 
   8874 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   8875 
   8876 	DPRINTF(WM_DEBUG_TX,
   8877 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   8878 
   8879 	wm_txrxintr_disable(wmq);
   8880 
   8881 	mutex_enter(txq->txq_lock);
   8882 
   8883 	if (txq->txq_stopping) {
   8884 		mutex_exit(txq->txq_lock);
   8885 		return 0;
   8886 	}
   8887 
   8888 	WM_Q_EVCNT_INCR(txq, txdw);
   8889 	wm_txeof(sc, txq);
   8890 	/* wm_deferred start() is done in wm_handle_queue(). */
   8891 	mutex_exit(txq->txq_lock);
   8892 
   8893 	DPRINTF(WM_DEBUG_RX,
   8894 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   8895 	mutex_enter(rxq->rxq_lock);
   8896 
   8897 	if (rxq->rxq_stopping) {
   8898 		mutex_exit(rxq->rxq_lock);
   8899 		return 0;
   8900 	}
   8901 
   8902 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8903 	wm_rxeof(rxq, limit);
   8904 	mutex_exit(rxq->rxq_lock);
   8905 
   8906 	wm_itrs_writereg(sc, wmq);
   8907 
   8908 	softint_schedule(wmq->wmq_si);
   8909 
   8910 	return 1;
   8911 }
   8912 
   8913 static void
   8914 wm_handle_queue(void *arg)
   8915 {
   8916 	struct wm_queue *wmq = arg;
   8917 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8918 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8919 	struct wm_softc *sc = txq->txq_sc;
   8920 	u_int limit = sc->sc_rx_process_limit;
   8921 
   8922 	mutex_enter(txq->txq_lock);
   8923 	if (txq->txq_stopping) {
   8924 		mutex_exit(txq->txq_lock);
   8925 		return;
   8926 	}
   8927 	wm_txeof(sc, txq);
   8928 	wm_deferred_start_locked(txq);
   8929 	mutex_exit(txq->txq_lock);
   8930 
   8931 	mutex_enter(rxq->rxq_lock);
   8932 	if (rxq->rxq_stopping) {
   8933 		mutex_exit(rxq->rxq_lock);
   8934 		return;
   8935 	}
   8936 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8937 	wm_rxeof(rxq, limit);
   8938 	mutex_exit(rxq->rxq_lock);
   8939 
   8940 	wm_txrxintr_enable(wmq);
   8941 }
   8942 
   8943 /*
   8944  * wm_linkintr_msix:
   8945  *
   8946  *	Interrupt service routine for link status change for MSI-X.
   8947  */
   8948 static int
   8949 wm_linkintr_msix(void *arg)
   8950 {
   8951 	struct wm_softc *sc = arg;
   8952 	uint32_t reg;
   8953 
   8954 	DPRINTF(WM_DEBUG_LINK,
   8955 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   8956 
   8957 	reg = CSR_READ(sc, WMREG_ICR);
   8958 	WM_CORE_LOCK(sc);
   8959 	if ((sc->sc_core_stopping) || ((reg & ICR_LSC) == 0))
   8960 		goto out;
   8961 
   8962 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8963 	wm_linkintr(sc, ICR_LSC);
   8964 
   8965 out:
   8966 	WM_CORE_UNLOCK(sc);
   8967 
   8968 	if (sc->sc_type == WM_T_82574)
   8969 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   8970 	else if (sc->sc_type == WM_T_82575)
   8971 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   8972 	else
   8973 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   8974 
   8975 	return 1;
   8976 }
   8977 
   8978 /*
   8979  * Media related.
   8980  * GMII, SGMII, TBI (and SERDES)
   8981  */
   8982 
   8983 /* Common */
   8984 
   8985 /*
   8986  * wm_tbi_serdes_set_linkled:
   8987  *
   8988  *	Update the link LED on TBI and SERDES devices.
   8989  */
   8990 static void
   8991 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   8992 {
   8993 
   8994 	if (sc->sc_tbi_linkup)
   8995 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   8996 	else
   8997 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   8998 
   8999 	/* 82540 or newer devices are active low */
   9000 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   9001 
   9002 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9003 }
   9004 
   9005 /* GMII related */
   9006 
   9007 /*
   9008  * wm_gmii_reset:
   9009  *
   9010  *	Reset the PHY.
   9011  */
   9012 static void
   9013 wm_gmii_reset(struct wm_softc *sc)
   9014 {
   9015 	uint32_t reg;
   9016 	int rv;
   9017 
   9018 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9019 		device_xname(sc->sc_dev), __func__));
   9020 
   9021 	rv = sc->phy.acquire(sc);
   9022 	if (rv != 0) {
   9023 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9024 		    __func__);
   9025 		return;
   9026 	}
   9027 
   9028 	switch (sc->sc_type) {
   9029 	case WM_T_82542_2_0:
   9030 	case WM_T_82542_2_1:
   9031 		/* null */
   9032 		break;
   9033 	case WM_T_82543:
   9034 		/*
   9035 		 * With 82543, we need to force speed and duplex on the MAC
   9036 		 * equal to what the PHY speed and duplex configuration is.
   9037 		 * In addition, we need to perform a hardware reset on the PHY
   9038 		 * to take it out of reset.
   9039 		 */
   9040 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9041 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9042 
   9043 		/* The PHY reset pin is active-low. */
   9044 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9045 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   9046 		    CTRL_EXT_SWDPIN(4));
   9047 		reg |= CTRL_EXT_SWDPIO(4);
   9048 
   9049 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9050 		CSR_WRITE_FLUSH(sc);
   9051 		delay(10*1000);
   9052 
   9053 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   9054 		CSR_WRITE_FLUSH(sc);
   9055 		delay(150);
   9056 #if 0
   9057 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   9058 #endif
   9059 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   9060 		break;
   9061 	case WM_T_82544:	/* reset 10000us */
   9062 	case WM_T_82540:
   9063 	case WM_T_82545:
   9064 	case WM_T_82545_3:
   9065 	case WM_T_82546:
   9066 	case WM_T_82546_3:
   9067 	case WM_T_82541:
   9068 	case WM_T_82541_2:
   9069 	case WM_T_82547:
   9070 	case WM_T_82547_2:
   9071 	case WM_T_82571:	/* reset 100us */
   9072 	case WM_T_82572:
   9073 	case WM_T_82573:
   9074 	case WM_T_82574:
   9075 	case WM_T_82575:
   9076 	case WM_T_82576:
   9077 	case WM_T_82580:
   9078 	case WM_T_I350:
   9079 	case WM_T_I354:
   9080 	case WM_T_I210:
   9081 	case WM_T_I211:
   9082 	case WM_T_82583:
   9083 	case WM_T_80003:
   9084 		/* generic reset */
   9085 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9086 		CSR_WRITE_FLUSH(sc);
   9087 		delay(20000);
   9088 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9089 		CSR_WRITE_FLUSH(sc);
   9090 		delay(20000);
   9091 
   9092 		if ((sc->sc_type == WM_T_82541)
   9093 		    || (sc->sc_type == WM_T_82541_2)
   9094 		    || (sc->sc_type == WM_T_82547)
   9095 		    || (sc->sc_type == WM_T_82547_2)) {
   9096 			/* workaround for igp are done in igp_reset() */
   9097 			/* XXX add code to set LED after phy reset */
   9098 		}
   9099 		break;
   9100 	case WM_T_ICH8:
   9101 	case WM_T_ICH9:
   9102 	case WM_T_ICH10:
   9103 	case WM_T_PCH:
   9104 	case WM_T_PCH2:
   9105 	case WM_T_PCH_LPT:
   9106 	case WM_T_PCH_SPT:
   9107 		/* generic reset */
   9108 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9109 		CSR_WRITE_FLUSH(sc);
   9110 		delay(100);
   9111 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9112 		CSR_WRITE_FLUSH(sc);
   9113 		delay(150);
   9114 		break;
   9115 	default:
   9116 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   9117 		    __func__);
   9118 		break;
   9119 	}
   9120 
   9121 	sc->phy.release(sc);
   9122 
   9123 	/* get_cfg_done */
   9124 	wm_get_cfg_done(sc);
   9125 
   9126 	/* extra setup */
   9127 	switch (sc->sc_type) {
   9128 	case WM_T_82542_2_0:
   9129 	case WM_T_82542_2_1:
   9130 	case WM_T_82543:
   9131 	case WM_T_82544:
   9132 	case WM_T_82540:
   9133 	case WM_T_82545:
   9134 	case WM_T_82545_3:
   9135 	case WM_T_82546:
   9136 	case WM_T_82546_3:
   9137 	case WM_T_82541_2:
   9138 	case WM_T_82547_2:
   9139 	case WM_T_82571:
   9140 	case WM_T_82572:
   9141 	case WM_T_82573:
   9142 	case WM_T_82574:
   9143 	case WM_T_82583:
   9144 	case WM_T_82575:
   9145 	case WM_T_82576:
   9146 	case WM_T_82580:
   9147 	case WM_T_I350:
   9148 	case WM_T_I354:
   9149 	case WM_T_I210:
   9150 	case WM_T_I211:
   9151 	case WM_T_80003:
   9152 		/* null */
   9153 		break;
   9154 	case WM_T_82541:
   9155 	case WM_T_82547:
   9156 		/* XXX Configure actively LED after PHY reset */
   9157 		break;
   9158 	case WM_T_ICH8:
   9159 	case WM_T_ICH9:
   9160 	case WM_T_ICH10:
   9161 	case WM_T_PCH:
   9162 	case WM_T_PCH2:
   9163 	case WM_T_PCH_LPT:
   9164 	case WM_T_PCH_SPT:
   9165 		wm_phy_post_reset(sc);
   9166 		break;
   9167 	default:
   9168 		panic("%s: unknown type\n", __func__);
   9169 		break;
   9170 	}
   9171 }
   9172 
   9173 /*
   9174  * Setup sc_phytype and mii_{read|write}reg.
   9175  *
   9176  *  To identify PHY type, correct read/write function should be selected.
   9177  * To select correct read/write function, PCI ID or MAC type are required
   9178  * without accessing PHY registers.
   9179  *
   9180  *  On the first call of this function, PHY ID is not known yet. Check
   9181  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   9182  * result might be incorrect.
   9183  *
   9184  *  In the second call, PHY OUI and model is used to identify PHY type.
   9185  * It might not be perfpect because of the lack of compared entry, but it
   9186  * would be better than the first call.
   9187  *
   9188  *  If the detected new result and previous assumption is different,
   9189  * diagnous message will be printed.
   9190  */
   9191 static void
   9192 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   9193     uint16_t phy_model)
   9194 {
   9195 	device_t dev = sc->sc_dev;
   9196 	struct mii_data *mii = &sc->sc_mii;
   9197 	uint16_t new_phytype = WMPHY_UNKNOWN;
   9198 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   9199 	mii_readreg_t new_readreg;
   9200 	mii_writereg_t new_writereg;
   9201 
   9202 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9203 		device_xname(sc->sc_dev), __func__));
   9204 
   9205 	if (mii->mii_readreg == NULL) {
   9206 		/*
   9207 		 *  This is the first call of this function. For ICH and PCH
   9208 		 * variants, it's difficult to determine the PHY access method
   9209 		 * by sc_type, so use the PCI product ID for some devices.
   9210 		 */
   9211 
   9212 		switch (sc->sc_pcidevid) {
   9213 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   9214 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   9215 			/* 82577 */
   9216 			new_phytype = WMPHY_82577;
   9217 			break;
   9218 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   9219 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   9220 			/* 82578 */
   9221 			new_phytype = WMPHY_82578;
   9222 			break;
   9223 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   9224 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   9225 			/* 82579 */
   9226 			new_phytype = WMPHY_82579;
   9227 			break;
   9228 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   9229 		case PCI_PRODUCT_INTEL_82801I_BM:
   9230 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   9231 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   9232 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   9233 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   9234 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   9235 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   9236 			/* ICH8, 9, 10 with 82567 */
   9237 			new_phytype = WMPHY_BM;
   9238 			break;
   9239 		default:
   9240 			break;
   9241 		}
   9242 	} else {
   9243 		/* It's not the first call. Use PHY OUI and model */
   9244 		switch (phy_oui) {
   9245 		case MII_OUI_ATHEROS: /* XXX ??? */
   9246 			switch (phy_model) {
   9247 			case 0x0004: /* XXX */
   9248 				new_phytype = WMPHY_82578;
   9249 				break;
   9250 			default:
   9251 				break;
   9252 			}
   9253 			break;
   9254 		case MII_OUI_xxMARVELL:
   9255 			switch (phy_model) {
   9256 			case MII_MODEL_xxMARVELL_I210:
   9257 				new_phytype = WMPHY_I210;
   9258 				break;
   9259 			case MII_MODEL_xxMARVELL_E1011:
   9260 			case MII_MODEL_xxMARVELL_E1000_3:
   9261 			case MII_MODEL_xxMARVELL_E1000_5:
   9262 			case MII_MODEL_xxMARVELL_E1112:
   9263 				new_phytype = WMPHY_M88;
   9264 				break;
   9265 			case MII_MODEL_xxMARVELL_E1149:
   9266 				new_phytype = WMPHY_BM;
   9267 				break;
   9268 			case MII_MODEL_xxMARVELL_E1111:
   9269 			case MII_MODEL_xxMARVELL_I347:
   9270 			case MII_MODEL_xxMARVELL_E1512:
   9271 			case MII_MODEL_xxMARVELL_E1340M:
   9272 			case MII_MODEL_xxMARVELL_E1543:
   9273 				new_phytype = WMPHY_M88;
   9274 				break;
   9275 			case MII_MODEL_xxMARVELL_I82563:
   9276 				new_phytype = WMPHY_GG82563;
   9277 				break;
   9278 			default:
   9279 				break;
   9280 			}
   9281 			break;
   9282 		case MII_OUI_INTEL:
   9283 			switch (phy_model) {
   9284 			case MII_MODEL_INTEL_I82577:
   9285 				new_phytype = WMPHY_82577;
   9286 				break;
   9287 			case MII_MODEL_INTEL_I82579:
   9288 				new_phytype = WMPHY_82579;
   9289 				break;
   9290 			case MII_MODEL_INTEL_I217:
   9291 				new_phytype = WMPHY_I217;
   9292 				break;
   9293 			case MII_MODEL_INTEL_I82580:
   9294 			case MII_MODEL_INTEL_I350:
   9295 				new_phytype = WMPHY_82580;
   9296 				break;
   9297 			default:
   9298 				break;
   9299 			}
   9300 			break;
   9301 		case MII_OUI_yyINTEL:
   9302 			switch (phy_model) {
   9303 			case MII_MODEL_yyINTEL_I82562G:
   9304 			case MII_MODEL_yyINTEL_I82562EM:
   9305 			case MII_MODEL_yyINTEL_I82562ET:
   9306 				new_phytype = WMPHY_IFE;
   9307 				break;
   9308 			case MII_MODEL_yyINTEL_IGP01E1000:
   9309 				new_phytype = WMPHY_IGP;
   9310 				break;
   9311 			case MII_MODEL_yyINTEL_I82566:
   9312 				new_phytype = WMPHY_IGP_3;
   9313 				break;
   9314 			default:
   9315 				break;
   9316 			}
   9317 			break;
   9318 		default:
   9319 			break;
   9320 		}
   9321 		if (new_phytype == WMPHY_UNKNOWN)
   9322 			aprint_verbose_dev(dev, "%s: unknown PHY model\n",
   9323 			    __func__);
   9324 
   9325 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9326 		    && (sc->sc_phytype != new_phytype )) {
   9327 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9328 			    "was incorrect. PHY type from PHY ID = %u\n",
   9329 			    sc->sc_phytype, new_phytype);
   9330 		}
   9331 	}
   9332 
   9333 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   9334 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   9335 		/* SGMII */
   9336 		new_readreg = wm_sgmii_readreg;
   9337 		new_writereg = wm_sgmii_writereg;
   9338 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   9339 		/* BM2 (phyaddr == 1) */
   9340 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9341 		    && (new_phytype != WMPHY_BM)
   9342 		    && (new_phytype != WMPHY_UNKNOWN))
   9343 			doubt_phytype = new_phytype;
   9344 		new_phytype = WMPHY_BM;
   9345 		new_readreg = wm_gmii_bm_readreg;
   9346 		new_writereg = wm_gmii_bm_writereg;
   9347 	} else if (sc->sc_type >= WM_T_PCH) {
   9348 		/* All PCH* use _hv_ */
   9349 		new_readreg = wm_gmii_hv_readreg;
   9350 		new_writereg = wm_gmii_hv_writereg;
   9351 	} else if (sc->sc_type >= WM_T_ICH8) {
   9352 		/* non-82567 ICH8, 9 and 10 */
   9353 		new_readreg = wm_gmii_i82544_readreg;
   9354 		new_writereg = wm_gmii_i82544_writereg;
   9355 	} else if (sc->sc_type >= WM_T_80003) {
   9356 		/* 80003 */
   9357 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9358 		    && (new_phytype != WMPHY_GG82563)
   9359 		    && (new_phytype != WMPHY_UNKNOWN))
   9360 			doubt_phytype = new_phytype;
   9361 		new_phytype = WMPHY_GG82563;
   9362 		new_readreg = wm_gmii_i80003_readreg;
   9363 		new_writereg = wm_gmii_i80003_writereg;
   9364 	} else if (sc->sc_type >= WM_T_I210) {
   9365 		/* I210 and I211 */
   9366 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9367 		    && (new_phytype != WMPHY_I210)
   9368 		    && (new_phytype != WMPHY_UNKNOWN))
   9369 			doubt_phytype = new_phytype;
   9370 		new_phytype = WMPHY_I210;
   9371 		new_readreg = wm_gmii_gs40g_readreg;
   9372 		new_writereg = wm_gmii_gs40g_writereg;
   9373 	} else if (sc->sc_type >= WM_T_82580) {
   9374 		/* 82580, I350 and I354 */
   9375 		new_readreg = wm_gmii_82580_readreg;
   9376 		new_writereg = wm_gmii_82580_writereg;
   9377 	} else if (sc->sc_type >= WM_T_82544) {
   9378 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   9379 		new_readreg = wm_gmii_i82544_readreg;
   9380 		new_writereg = wm_gmii_i82544_writereg;
   9381 	} else {
   9382 		new_readreg = wm_gmii_i82543_readreg;
   9383 		new_writereg = wm_gmii_i82543_writereg;
   9384 	}
   9385 
   9386 	if (new_phytype == WMPHY_BM) {
   9387 		/* All BM use _bm_ */
   9388 		new_readreg = wm_gmii_bm_readreg;
   9389 		new_writereg = wm_gmii_bm_writereg;
   9390 	}
   9391 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
   9392 		/* All PCH* use _hv_ */
   9393 		new_readreg = wm_gmii_hv_readreg;
   9394 		new_writereg = wm_gmii_hv_writereg;
   9395 	}
   9396 
   9397 	/* Diag output */
   9398 	if (doubt_phytype != WMPHY_UNKNOWN)
   9399 		aprint_error_dev(dev, "Assumed new PHY type was "
   9400 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   9401 		    new_phytype);
   9402 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9403 	    && (sc->sc_phytype != new_phytype ))
   9404 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9405 		    "was incorrect. New PHY type = %u\n",
   9406 		    sc->sc_phytype, new_phytype);
   9407 
   9408 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   9409 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   9410 
   9411 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   9412 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   9413 		    "function was incorrect.\n");
   9414 
   9415 	/* Update now */
   9416 	sc->sc_phytype = new_phytype;
   9417 	mii->mii_readreg = new_readreg;
   9418 	mii->mii_writereg = new_writereg;
   9419 }
   9420 
   9421 /*
   9422  * wm_get_phy_id_82575:
   9423  *
   9424  * Return PHY ID. Return -1 if it failed.
   9425  */
   9426 static int
   9427 wm_get_phy_id_82575(struct wm_softc *sc)
   9428 {
   9429 	uint32_t reg;
   9430 	int phyid = -1;
   9431 
   9432 	/* XXX */
   9433 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   9434 		return -1;
   9435 
   9436 	if (wm_sgmii_uses_mdio(sc)) {
   9437 		switch (sc->sc_type) {
   9438 		case WM_T_82575:
   9439 		case WM_T_82576:
   9440 			reg = CSR_READ(sc, WMREG_MDIC);
   9441 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   9442 			break;
   9443 		case WM_T_82580:
   9444 		case WM_T_I350:
   9445 		case WM_T_I354:
   9446 		case WM_T_I210:
   9447 		case WM_T_I211:
   9448 			reg = CSR_READ(sc, WMREG_MDICNFG);
   9449 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   9450 			break;
   9451 		default:
   9452 			return -1;
   9453 		}
   9454 	}
   9455 
   9456 	return phyid;
   9457 }
   9458 
   9459 
   9460 /*
   9461  * wm_gmii_mediainit:
   9462  *
   9463  *	Initialize media for use on 1000BASE-T devices.
   9464  */
   9465 static void
   9466 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   9467 {
   9468 	device_t dev = sc->sc_dev;
   9469 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9470 	struct mii_data *mii = &sc->sc_mii;
   9471 	uint32_t reg;
   9472 
   9473 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9474 		device_xname(sc->sc_dev), __func__));
   9475 
   9476 	/* We have GMII. */
   9477 	sc->sc_flags |= WM_F_HAS_MII;
   9478 
   9479 	if (sc->sc_type == WM_T_80003)
   9480 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9481 	else
   9482 		sc->sc_tipg = TIPG_1000T_DFLT;
   9483 
   9484 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   9485 	if ((sc->sc_type == WM_T_82580)
   9486 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   9487 	    || (sc->sc_type == WM_T_I211)) {
   9488 		reg = CSR_READ(sc, WMREG_PHPM);
   9489 		reg &= ~PHPM_GO_LINK_D;
   9490 		CSR_WRITE(sc, WMREG_PHPM, reg);
   9491 	}
   9492 
   9493 	/*
   9494 	 * Let the chip set speed/duplex on its own based on
   9495 	 * signals from the PHY.
   9496 	 * XXXbouyer - I'm not sure this is right for the 80003,
   9497 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   9498 	 */
   9499 	sc->sc_ctrl |= CTRL_SLU;
   9500 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9501 
   9502 	/* Initialize our media structures and probe the GMII. */
   9503 	mii->mii_ifp = ifp;
   9504 
   9505 	mii->mii_statchg = wm_gmii_statchg;
   9506 
   9507 	/* get PHY control from SMBus to PCIe */
   9508 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   9509 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
   9510 		wm_smbustopci(sc);
   9511 
   9512 	wm_gmii_reset(sc);
   9513 
   9514 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9515 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   9516 	    wm_gmii_mediastatus);
   9517 
   9518 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   9519 	    || (sc->sc_type == WM_T_82580)
   9520 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   9521 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   9522 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   9523 			/* Attach only one port */
   9524 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   9525 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9526 		} else {
   9527 			int i, id;
   9528 			uint32_t ctrl_ext;
   9529 
   9530 			id = wm_get_phy_id_82575(sc);
   9531 			if (id != -1) {
   9532 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   9533 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   9534 			}
   9535 			if ((id == -1)
   9536 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9537 				/* Power on sgmii phy if it is disabled */
   9538 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9539 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   9540 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   9541 				CSR_WRITE_FLUSH(sc);
   9542 				delay(300*1000); /* XXX too long */
   9543 
   9544 				/* from 1 to 8 */
   9545 				for (i = 1; i < 8; i++)
   9546 					mii_attach(sc->sc_dev, &sc->sc_mii,
   9547 					    0xffffffff, i, MII_OFFSET_ANY,
   9548 					    MIIF_DOPAUSE);
   9549 
   9550 				/* restore previous sfp cage power state */
   9551 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9552 			}
   9553 		}
   9554 	} else {
   9555 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9556 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9557 	}
   9558 
   9559 	/*
   9560 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   9561 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   9562 	 */
   9563 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   9564 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9565 		wm_set_mdio_slow_mode_hv(sc);
   9566 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9567 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9568 	}
   9569 
   9570 	/*
   9571 	 * (For ICH8 variants)
   9572 	 * If PHY detection failed, use BM's r/w function and retry.
   9573 	 */
   9574 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9575 		/* if failed, retry with *_bm_* */
   9576 		aprint_verbose_dev(dev, "Assumed PHY access function "
   9577 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   9578 		    sc->sc_phytype);
   9579 		sc->sc_phytype = WMPHY_BM;
   9580 		mii->mii_readreg = wm_gmii_bm_readreg;
   9581 		mii->mii_writereg = wm_gmii_bm_writereg;
   9582 
   9583 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9584 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9585 	}
   9586 
   9587 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9588 		/* Any PHY wasn't find */
   9589 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   9590 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   9591 		sc->sc_phytype = WMPHY_NONE;
   9592 	} else {
   9593 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   9594 
   9595 		/*
   9596 		 * PHY Found! Check PHY type again by the second call of
   9597 		 * wm_gmii_setup_phytype.
   9598 		 */
   9599 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   9600 		    child->mii_mpd_model);
   9601 
   9602 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   9603 	}
   9604 }
   9605 
   9606 /*
   9607  * wm_gmii_mediachange:	[ifmedia interface function]
   9608  *
   9609  *	Set hardware to newly-selected media on a 1000BASE-T device.
   9610  */
   9611 static int
   9612 wm_gmii_mediachange(struct ifnet *ifp)
   9613 {
   9614 	struct wm_softc *sc = ifp->if_softc;
   9615 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9616 	int rc;
   9617 
   9618 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9619 		device_xname(sc->sc_dev), __func__));
   9620 	if ((ifp->if_flags & IFF_UP) == 0)
   9621 		return 0;
   9622 
   9623 	/* Disable D0 LPLU. */
   9624 	wm_lplu_d0_disable(sc);
   9625 
   9626 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9627 	sc->sc_ctrl |= CTRL_SLU;
   9628 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9629 	    || (sc->sc_type > WM_T_82543)) {
   9630 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   9631 	} else {
   9632 		sc->sc_ctrl &= ~CTRL_ASDE;
   9633 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9634 		if (ife->ifm_media & IFM_FDX)
   9635 			sc->sc_ctrl |= CTRL_FD;
   9636 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   9637 		case IFM_10_T:
   9638 			sc->sc_ctrl |= CTRL_SPEED_10;
   9639 			break;
   9640 		case IFM_100_TX:
   9641 			sc->sc_ctrl |= CTRL_SPEED_100;
   9642 			break;
   9643 		case IFM_1000_T:
   9644 			sc->sc_ctrl |= CTRL_SPEED_1000;
   9645 			break;
   9646 		default:
   9647 			panic("wm_gmii_mediachange: bad media 0x%x",
   9648 			    ife->ifm_media);
   9649 		}
   9650 	}
   9651 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9652 	CSR_WRITE_FLUSH(sc);
   9653 	if (sc->sc_type <= WM_T_82543)
   9654 		wm_gmii_reset(sc);
   9655 
   9656 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   9657 		return 0;
   9658 	return rc;
   9659 }
   9660 
   9661 /*
   9662  * wm_gmii_mediastatus:	[ifmedia interface function]
   9663  *
   9664  *	Get the current interface media status on a 1000BASE-T device.
   9665  */
   9666 static void
   9667 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9668 {
   9669 	struct wm_softc *sc = ifp->if_softc;
   9670 
   9671 	ether_mediastatus(ifp, ifmr);
   9672 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9673 	    | sc->sc_flowflags;
   9674 }
   9675 
   9676 #define	MDI_IO		CTRL_SWDPIN(2)
   9677 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   9678 #define	MDI_CLK		CTRL_SWDPIN(3)
   9679 
   9680 static void
   9681 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   9682 {
   9683 	uint32_t i, v;
   9684 
   9685 	v = CSR_READ(sc, WMREG_CTRL);
   9686 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9687 	v |= MDI_DIR | CTRL_SWDPIO(3);
   9688 
   9689 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   9690 		if (data & i)
   9691 			v |= MDI_IO;
   9692 		else
   9693 			v &= ~MDI_IO;
   9694 		CSR_WRITE(sc, WMREG_CTRL, v);
   9695 		CSR_WRITE_FLUSH(sc);
   9696 		delay(10);
   9697 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9698 		CSR_WRITE_FLUSH(sc);
   9699 		delay(10);
   9700 		CSR_WRITE(sc, WMREG_CTRL, v);
   9701 		CSR_WRITE_FLUSH(sc);
   9702 		delay(10);
   9703 	}
   9704 }
   9705 
   9706 static uint32_t
   9707 wm_i82543_mii_recvbits(struct wm_softc *sc)
   9708 {
   9709 	uint32_t v, i, data = 0;
   9710 
   9711 	v = CSR_READ(sc, WMREG_CTRL);
   9712 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9713 	v |= CTRL_SWDPIO(3);
   9714 
   9715 	CSR_WRITE(sc, WMREG_CTRL, v);
   9716 	CSR_WRITE_FLUSH(sc);
   9717 	delay(10);
   9718 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9719 	CSR_WRITE_FLUSH(sc);
   9720 	delay(10);
   9721 	CSR_WRITE(sc, WMREG_CTRL, v);
   9722 	CSR_WRITE_FLUSH(sc);
   9723 	delay(10);
   9724 
   9725 	for (i = 0; i < 16; i++) {
   9726 		data <<= 1;
   9727 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9728 		CSR_WRITE_FLUSH(sc);
   9729 		delay(10);
   9730 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   9731 			data |= 1;
   9732 		CSR_WRITE(sc, WMREG_CTRL, v);
   9733 		CSR_WRITE_FLUSH(sc);
   9734 		delay(10);
   9735 	}
   9736 
   9737 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9738 	CSR_WRITE_FLUSH(sc);
   9739 	delay(10);
   9740 	CSR_WRITE(sc, WMREG_CTRL, v);
   9741 	CSR_WRITE_FLUSH(sc);
   9742 	delay(10);
   9743 
   9744 	return data;
   9745 }
   9746 
   9747 #undef MDI_IO
   9748 #undef MDI_DIR
   9749 #undef MDI_CLK
   9750 
   9751 /*
   9752  * wm_gmii_i82543_readreg:	[mii interface function]
   9753  *
   9754  *	Read a PHY register on the GMII (i82543 version).
   9755  */
   9756 static int
   9757 wm_gmii_i82543_readreg(device_t dev, int phy, int reg)
   9758 {
   9759 	struct wm_softc *sc = device_private(dev);
   9760 	int rv;
   9761 
   9762 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9763 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   9764 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   9765 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   9766 
   9767 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   9768 	    device_xname(dev), phy, reg, rv));
   9769 
   9770 	return rv;
   9771 }
   9772 
   9773 /*
   9774  * wm_gmii_i82543_writereg:	[mii interface function]
   9775  *
   9776  *	Write a PHY register on the GMII (i82543 version).
   9777  */
   9778 static void
   9779 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, int val)
   9780 {
   9781 	struct wm_softc *sc = device_private(dev);
   9782 
   9783 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9784 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   9785 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   9786 	    (MII_COMMAND_START << 30), 32);
   9787 }
   9788 
   9789 /*
   9790  * wm_gmii_mdic_readreg:	[mii interface function]
   9791  *
   9792  *	Read a PHY register on the GMII.
   9793  */
   9794 static int
   9795 wm_gmii_mdic_readreg(device_t dev, int phy, int reg)
   9796 {
   9797 	struct wm_softc *sc = device_private(dev);
   9798 	uint32_t mdic = 0;
   9799 	int i, rv;
   9800 
   9801 	if (reg > MII_ADDRMASK) {
   9802 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   9803 		    __func__, sc->sc_phytype, reg);
   9804 		reg &= MII_ADDRMASK;
   9805 	}
   9806 
   9807 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   9808 	    MDIC_REGADD(reg));
   9809 
   9810 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9811 		mdic = CSR_READ(sc, WMREG_MDIC);
   9812 		if (mdic & MDIC_READY)
   9813 			break;
   9814 		delay(50);
   9815 	}
   9816 
   9817 	if ((mdic & MDIC_READY) == 0) {
   9818 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   9819 		    device_xname(dev), phy, reg);
   9820 		rv = 0;
   9821 	} else if (mdic & MDIC_E) {
   9822 #if 0 /* This is normal if no PHY is present. */
   9823 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   9824 		    device_xname(dev), phy, reg);
   9825 #endif
   9826 		rv = 0;
   9827 	} else {
   9828 		rv = MDIC_DATA(mdic);
   9829 		if (rv == 0xffff)
   9830 			rv = 0;
   9831 	}
   9832 
   9833 	return rv;
   9834 }
   9835 
   9836 /*
   9837  * wm_gmii_mdic_writereg:	[mii interface function]
   9838  *
   9839  *	Write a PHY register on the GMII.
   9840  */
   9841 static void
   9842 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, int val)
   9843 {
   9844 	struct wm_softc *sc = device_private(dev);
   9845 	uint32_t mdic = 0;
   9846 	int i;
   9847 
   9848 	if (reg > MII_ADDRMASK) {
   9849 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   9850 		    __func__, sc->sc_phytype, reg);
   9851 		reg &= MII_ADDRMASK;
   9852 	}
   9853 
   9854 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   9855 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   9856 
   9857 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9858 		mdic = CSR_READ(sc, WMREG_MDIC);
   9859 		if (mdic & MDIC_READY)
   9860 			break;
   9861 		delay(50);
   9862 	}
   9863 
   9864 	if ((mdic & MDIC_READY) == 0)
   9865 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   9866 		    device_xname(dev), phy, reg);
   9867 	else if (mdic & MDIC_E)
   9868 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   9869 		    device_xname(dev), phy, reg);
   9870 }
   9871 
   9872 /*
   9873  * wm_gmii_i82544_readreg:	[mii interface function]
   9874  *
   9875  *	Read a PHY register on the GMII.
   9876  */
   9877 static int
   9878 wm_gmii_i82544_readreg(device_t dev, int phy, int reg)
   9879 {
   9880 	struct wm_softc *sc = device_private(dev);
   9881 	int rv;
   9882 
   9883 	if (sc->phy.acquire(sc)) {
   9884 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   9885 		return 0;
   9886 	}
   9887 
   9888 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9889 		switch (sc->sc_phytype) {
   9890 		case WMPHY_IGP:
   9891 		case WMPHY_IGP_2:
   9892 		case WMPHY_IGP_3:
   9893 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT, reg);
   9894 			break;
   9895 		default:
   9896 #ifdef WM_DEBUG
   9897 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   9898 			    __func__, sc->sc_phytype, reg);
   9899 #endif
   9900 			break;
   9901 		}
   9902 	}
   9903 
   9904 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   9905 	sc->phy.release(sc);
   9906 
   9907 	return rv;
   9908 }
   9909 
   9910 /*
   9911  * wm_gmii_i82544_writereg:	[mii interface function]
   9912  *
   9913  *	Write a PHY register on the GMII.
   9914  */
   9915 static void
   9916 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, int val)
   9917 {
   9918 	struct wm_softc *sc = device_private(dev);
   9919 
   9920 	if (sc->phy.acquire(sc)) {
   9921 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   9922 		return;
   9923 	}
   9924 
   9925 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9926 		switch (sc->sc_phytype) {
   9927 		case WMPHY_IGP:
   9928 		case WMPHY_IGP_2:
   9929 		case WMPHY_IGP_3:
   9930 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT, reg);
   9931 			break;
   9932 		default:
   9933 #ifdef WM_DEBUG
   9934 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   9935 			    __func__, sc->sc_phytype, reg);
   9936 #endif
   9937 			break;
   9938 		}
   9939 	}
   9940 
   9941 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   9942 	sc->phy.release(sc);
   9943 }
   9944 
   9945 /*
   9946  * wm_gmii_i80003_readreg:	[mii interface function]
   9947  *
   9948  *	Read a PHY register on the kumeran
   9949  * This could be handled by the PHY layer if we didn't have to lock the
   9950  * ressource ...
   9951  */
   9952 static int
   9953 wm_gmii_i80003_readreg(device_t dev, int phy, int reg)
   9954 {
   9955 	struct wm_softc *sc = device_private(dev);
   9956 	int rv;
   9957 
   9958 	if (phy != 1) /* only one PHY on kumeran bus */
   9959 		return 0;
   9960 
   9961 	if (sc->phy.acquire(sc)) {
   9962 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   9963 		return 0;
   9964 	}
   9965 
   9966 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   9967 		wm_gmii_mdic_writereg(dev, phy, GG82563_PHY_PAGE_SELECT,
   9968 		    reg >> GG82563_PAGE_SHIFT);
   9969 	} else {
   9970 		wm_gmii_mdic_writereg(dev, phy, GG82563_PHY_PAGE_SELECT_ALT,
   9971 		    reg >> GG82563_PAGE_SHIFT);
   9972 	}
   9973 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   9974 	delay(200);
   9975 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   9976 	delay(200);
   9977 	sc->phy.release(sc);
   9978 
   9979 	return rv;
   9980 }
   9981 
   9982 /*
   9983  * wm_gmii_i80003_writereg:	[mii interface function]
   9984  *
   9985  *	Write a PHY register on the kumeran.
   9986  * This could be handled by the PHY layer if we didn't have to lock the
   9987  * ressource ...
   9988  */
   9989 static void
   9990 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, int val)
   9991 {
   9992 	struct wm_softc *sc = device_private(dev);
   9993 
   9994 	if (phy != 1) /* only one PHY on kumeran bus */
   9995 		return;
   9996 
   9997 	if (sc->phy.acquire(sc)) {
   9998 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   9999 		return;
   10000 	}
   10001 
   10002 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   10003 		wm_gmii_mdic_writereg(dev, phy, GG82563_PHY_PAGE_SELECT,
   10004 		    reg >> GG82563_PAGE_SHIFT);
   10005 	} else {
   10006 		wm_gmii_mdic_writereg(dev, phy, GG82563_PHY_PAGE_SELECT_ALT,
   10007 		    reg >> GG82563_PAGE_SHIFT);
   10008 	}
   10009 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   10010 	delay(200);
   10011 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10012 	delay(200);
   10013 
   10014 	sc->phy.release(sc);
   10015 }
   10016 
   10017 /*
   10018  * wm_gmii_bm_readreg:	[mii interface function]
   10019  *
   10020  *	Read a PHY register on the kumeran
   10021  * This could be handled by the PHY layer if we didn't have to lock the
   10022  * ressource ...
   10023  */
   10024 static int
   10025 wm_gmii_bm_readreg(device_t dev, int phy, int reg)
   10026 {
   10027 	struct wm_softc *sc = device_private(dev);
   10028 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10029 	uint16_t val;
   10030 	int rv;
   10031 
   10032 	if (sc->phy.acquire(sc)) {
   10033 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10034 		return 0;
   10035 	}
   10036 
   10037 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10038 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10039 		    || (reg == 31)) ? 1 : phy;
   10040 	/* Page 800 works differently than the rest so it has its own func */
   10041 	if (page == BM_WUC_PAGE) {
   10042 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
   10043 		rv = val;
   10044 		goto release;
   10045 	}
   10046 
   10047 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10048 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10049 		    && (sc->sc_type != WM_T_82583))
   10050 			wm_gmii_mdic_writereg(dev, phy,
   10051 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10052 		else
   10053 			wm_gmii_mdic_writereg(dev, phy,
   10054 			    BME1000_PHY_PAGE_SELECT, page);
   10055 	}
   10056 
   10057 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10058 
   10059 release:
   10060 	sc->phy.release(sc);
   10061 	return rv;
   10062 }
   10063 
   10064 /*
   10065  * wm_gmii_bm_writereg:	[mii interface function]
   10066  *
   10067  *	Write a PHY register on the kumeran.
   10068  * This could be handled by the PHY layer if we didn't have to lock the
   10069  * ressource ...
   10070  */
   10071 static void
   10072 wm_gmii_bm_writereg(device_t dev, int phy, int reg, int val)
   10073 {
   10074 	struct wm_softc *sc = device_private(dev);
   10075 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10076 
   10077 	if (sc->phy.acquire(sc)) {
   10078 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10079 		return;
   10080 	}
   10081 
   10082 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10083 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10084 		    || (reg == 31)) ? 1 : phy;
   10085 	/* Page 800 works differently than the rest so it has its own func */
   10086 	if (page == BM_WUC_PAGE) {
   10087 		uint16_t tmp;
   10088 
   10089 		tmp = val;
   10090 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   10091 		goto release;
   10092 	}
   10093 
   10094 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10095 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10096 		    && (sc->sc_type != WM_T_82583))
   10097 			wm_gmii_mdic_writereg(dev, phy,
   10098 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10099 		else
   10100 			wm_gmii_mdic_writereg(dev, phy,
   10101 			    BME1000_PHY_PAGE_SELECT, page);
   10102 	}
   10103 
   10104 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10105 
   10106 release:
   10107 	sc->phy.release(sc);
   10108 }
   10109 
   10110 static void
   10111 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd)
   10112 {
   10113 	struct wm_softc *sc = device_private(dev);
   10114 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   10115 	uint16_t wuce, reg;
   10116 
   10117 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10118 		device_xname(dev), __func__));
   10119 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   10120 	if (sc->sc_type == WM_T_PCH) {
   10121 		/* XXX e1000 driver do nothing... why? */
   10122 	}
   10123 
   10124 	/*
   10125 	 * 1) Enable PHY wakeup register first.
   10126 	 * See e1000_enable_phy_wakeup_reg_access_bm().
   10127 	 */
   10128 
   10129 	/* Set page 769 */
   10130 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10131 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   10132 
   10133 	/* Read WUCE and save it */
   10134 	wuce = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG);
   10135 
   10136 	reg = wuce | BM_WUC_ENABLE_BIT;
   10137 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   10138 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, reg);
   10139 
   10140 	/* Select page 800 */
   10141 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10142 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   10143 
   10144 	/*
   10145 	 * 2) Access PHY wakeup register.
   10146 	 * See e1000_access_phy_wakeup_reg_bm.
   10147 	 */
   10148 
   10149 	/* Write page 800 */
   10150 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   10151 
   10152 	if (rd)
   10153 		*val = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE);
   10154 	else
   10155 		wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   10156 
   10157 	/*
   10158 	 * 3) Disable PHY wakeup register.
   10159 	 * See e1000_disable_phy_wakeup_reg_access_bm().
   10160 	 */
   10161 	/* Set page 769 */
   10162 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10163 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   10164 
   10165 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, wuce);
   10166 }
   10167 
   10168 /*
   10169  * wm_gmii_hv_readreg:	[mii interface function]
   10170  *
   10171  *	Read a PHY register on the kumeran
   10172  * This could be handled by the PHY layer if we didn't have to lock the
   10173  * ressource ...
   10174  */
   10175 static int
   10176 wm_gmii_hv_readreg(device_t dev, int phy, int reg)
   10177 {
   10178 	struct wm_softc *sc = device_private(dev);
   10179 	int rv;
   10180 
   10181 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10182 		device_xname(dev), __func__));
   10183 	if (sc->phy.acquire(sc)) {
   10184 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10185 		return 0;
   10186 	}
   10187 
   10188 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg);
   10189 	sc->phy.release(sc);
   10190 	return rv;
   10191 }
   10192 
   10193 static int
   10194 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg)
   10195 {
   10196 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10197 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10198 	uint16_t val;
   10199 	int rv;
   10200 
   10201 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10202 
   10203 	/* Page 800 works differently than the rest so it has its own func */
   10204 	if (page == BM_WUC_PAGE) {
   10205 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
   10206 		return val;
   10207 	}
   10208 
   10209 	/*
   10210 	 * Lower than page 768 works differently than the rest so it has its
   10211 	 * own func
   10212 	 */
   10213 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10214 		printf("gmii_hv_readreg!!!\n");
   10215 		return 0;
   10216 	}
   10217 
   10218 	/*
   10219 	 * XXX I21[789] documents say that the SMBus Address register is at
   10220 	 * PHY address 01, Page 0 (not 768), Register 26.
   10221 	 */
   10222 	if (page == HV_INTC_FC_PAGE_START)
   10223 		page = 0;
   10224 
   10225 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10226 		wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10227 		    page << BME1000_PAGE_SHIFT);
   10228 	}
   10229 
   10230 	rv = wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK);
   10231 	return rv;
   10232 }
   10233 
   10234 /*
   10235  * wm_gmii_hv_writereg:	[mii interface function]
   10236  *
   10237  *	Write a PHY register on the kumeran.
   10238  * This could be handled by the PHY layer if we didn't have to lock the
   10239  * ressource ...
   10240  */
   10241 static void
   10242 wm_gmii_hv_writereg(device_t dev, int phy, int reg, int val)
   10243 {
   10244 	struct wm_softc *sc = device_private(dev);
   10245 
   10246 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10247 		device_xname(dev), __func__));
   10248 
   10249 	if (sc->phy.acquire(sc)) {
   10250 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10251 		return;
   10252 	}
   10253 
   10254 	wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   10255 	sc->phy.release(sc);
   10256 }
   10257 
   10258 static void
   10259 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, int val)
   10260 {
   10261 	struct wm_softc *sc = device_private(dev);
   10262 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10263 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10264 
   10265 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10266 
   10267 	/* Page 800 works differently than the rest so it has its own func */
   10268 	if (page == BM_WUC_PAGE) {
   10269 		uint16_t tmp;
   10270 
   10271 		tmp = val;
   10272 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   10273 		return;
   10274 	}
   10275 
   10276 	/*
   10277 	 * Lower than page 768 works differently than the rest so it has its
   10278 	 * own func
   10279 	 */
   10280 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10281 		printf("gmii_hv_writereg!!!\n");
   10282 		return;
   10283 	}
   10284 
   10285 	{
   10286 		/*
   10287 		 * XXX I21[789] documents say that the SMBus Address register
   10288 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   10289 		 */
   10290 		if (page == HV_INTC_FC_PAGE_START)
   10291 			page = 0;
   10292 
   10293 		/*
   10294 		 * XXX Workaround MDIO accesses being disabled after entering
   10295 		 * IEEE Power Down (whenever bit 11 of the PHY control
   10296 		 * register is set)
   10297 		 */
   10298 		if (sc->sc_phytype == WMPHY_82578) {
   10299 			struct mii_softc *child;
   10300 
   10301 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   10302 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   10303 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   10304 			    && ((val & (1 << 11)) != 0)) {
   10305 				printf("XXX need workaround\n");
   10306 			}
   10307 		}
   10308 
   10309 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10310 			wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10311 			    page << BME1000_PAGE_SHIFT);
   10312 		}
   10313 	}
   10314 
   10315 	wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   10316 }
   10317 
   10318 /*
   10319  * wm_gmii_82580_readreg:	[mii interface function]
   10320  *
   10321  *	Read a PHY register on the 82580 and I350.
   10322  * This could be handled by the PHY layer if we didn't have to lock the
   10323  * ressource ...
   10324  */
   10325 static int
   10326 wm_gmii_82580_readreg(device_t dev, int phy, int reg)
   10327 {
   10328 	struct wm_softc *sc = device_private(dev);
   10329 	int rv;
   10330 
   10331 	if (sc->phy.acquire(sc) != 0) {
   10332 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10333 		return 0;
   10334 	}
   10335 
   10336 #ifdef DIAGNOSTIC
   10337 	if (reg > MII_ADDRMASK) {
   10338 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10339 		    __func__, sc->sc_phytype, reg);
   10340 		reg &= MII_ADDRMASK;
   10341 	}
   10342 #endif
   10343 	rv = wm_gmii_mdic_readreg(dev, phy, reg);
   10344 
   10345 	sc->phy.release(sc);
   10346 	return rv;
   10347 }
   10348 
   10349 /*
   10350  * wm_gmii_82580_writereg:	[mii interface function]
   10351  *
   10352  *	Write a PHY register on the 82580 and I350.
   10353  * This could be handled by the PHY layer if we didn't have to lock the
   10354  * ressource ...
   10355  */
   10356 static void
   10357 wm_gmii_82580_writereg(device_t dev, int phy, int reg, int val)
   10358 {
   10359 	struct wm_softc *sc = device_private(dev);
   10360 
   10361 	if (sc->phy.acquire(sc) != 0) {
   10362 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10363 		return;
   10364 	}
   10365 
   10366 #ifdef DIAGNOSTIC
   10367 	if (reg > MII_ADDRMASK) {
   10368 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10369 		    __func__, sc->sc_phytype, reg);
   10370 		reg &= MII_ADDRMASK;
   10371 	}
   10372 #endif
   10373 	wm_gmii_mdic_writereg(dev, phy, reg, val);
   10374 
   10375 	sc->phy.release(sc);
   10376 }
   10377 
   10378 /*
   10379  * wm_gmii_gs40g_readreg:	[mii interface function]
   10380  *
   10381  *	Read a PHY register on the I2100 and I211.
   10382  * This could be handled by the PHY layer if we didn't have to lock the
   10383  * ressource ...
   10384  */
   10385 static int
   10386 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg)
   10387 {
   10388 	struct wm_softc *sc = device_private(dev);
   10389 	int page, offset;
   10390 	int rv;
   10391 
   10392 	/* Acquire semaphore */
   10393 	if (sc->phy.acquire(sc)) {
   10394 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10395 		return 0;
   10396 	}
   10397 
   10398 	/* Page select */
   10399 	page = reg >> GS40G_PAGE_SHIFT;
   10400 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10401 
   10402 	/* Read reg */
   10403 	offset = reg & GS40G_OFFSET_MASK;
   10404 	rv = wm_gmii_mdic_readreg(dev, phy, offset);
   10405 
   10406 	sc->phy.release(sc);
   10407 	return rv;
   10408 }
   10409 
   10410 /*
   10411  * wm_gmii_gs40g_writereg:	[mii interface function]
   10412  *
   10413  *	Write a PHY register on the I210 and I211.
   10414  * This could be handled by the PHY layer if we didn't have to lock the
   10415  * ressource ...
   10416  */
   10417 static void
   10418 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, int val)
   10419 {
   10420 	struct wm_softc *sc = device_private(dev);
   10421 	int page, offset;
   10422 
   10423 	/* Acquire semaphore */
   10424 	if (sc->phy.acquire(sc)) {
   10425 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10426 		return;
   10427 	}
   10428 
   10429 	/* Page select */
   10430 	page = reg >> GS40G_PAGE_SHIFT;
   10431 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10432 
   10433 	/* Write reg */
   10434 	offset = reg & GS40G_OFFSET_MASK;
   10435 	wm_gmii_mdic_writereg(dev, phy, offset, val);
   10436 
   10437 	/* Release semaphore */
   10438 	sc->phy.release(sc);
   10439 }
   10440 
   10441 /*
   10442  * wm_gmii_statchg:	[mii interface function]
   10443  *
   10444  *	Callback from MII layer when media changes.
   10445  */
   10446 static void
   10447 wm_gmii_statchg(struct ifnet *ifp)
   10448 {
   10449 	struct wm_softc *sc = ifp->if_softc;
   10450 	struct mii_data *mii = &sc->sc_mii;
   10451 
   10452 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   10453 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10454 	sc->sc_fcrtl &= ~FCRTL_XONE;
   10455 
   10456 	/*
   10457 	 * Get flow control negotiation result.
   10458 	 */
   10459 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   10460 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   10461 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   10462 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   10463 	}
   10464 
   10465 	if (sc->sc_flowflags & IFM_FLOW) {
   10466 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   10467 			sc->sc_ctrl |= CTRL_TFCE;
   10468 			sc->sc_fcrtl |= FCRTL_XONE;
   10469 		}
   10470 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   10471 			sc->sc_ctrl |= CTRL_RFCE;
   10472 	}
   10473 
   10474 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   10475 		DPRINTF(WM_DEBUG_LINK,
   10476 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   10477 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10478 	} else {
   10479 		DPRINTF(WM_DEBUG_LINK,
   10480 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   10481 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10482 	}
   10483 
   10484 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10485 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10486 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   10487 						 : WMREG_FCRTL, sc->sc_fcrtl);
   10488 	if (sc->sc_type == WM_T_80003) {
   10489 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   10490 		case IFM_1000_T:
   10491 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10492 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   10493 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10494 			break;
   10495 		default:
   10496 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10497 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   10498 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   10499 			break;
   10500 		}
   10501 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   10502 	}
   10503 }
   10504 
   10505 /* kumeran related (80003, ICH* and PCH*) */
   10506 
   10507 /*
   10508  * wm_kmrn_readreg:
   10509  *
   10510  *	Read a kumeran register
   10511  */
   10512 static int
   10513 wm_kmrn_readreg(struct wm_softc *sc, int reg)
   10514 {
   10515 	int rv;
   10516 
   10517 	if (sc->sc_type == WM_T_80003)
   10518 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10519 	else
   10520 		rv = sc->phy.acquire(sc);
   10521 	if (rv != 0) {
   10522 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   10523 		    __func__);
   10524 		return 0;
   10525 	}
   10526 
   10527 	rv = wm_kmrn_readreg_locked(sc, reg);
   10528 
   10529 	if (sc->sc_type == WM_T_80003)
   10530 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10531 	else
   10532 		sc->phy.release(sc);
   10533 
   10534 	return rv;
   10535 }
   10536 
   10537 static int
   10538 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg)
   10539 {
   10540 	int rv;
   10541 
   10542 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10543 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10544 	    KUMCTRLSTA_REN);
   10545 	CSR_WRITE_FLUSH(sc);
   10546 	delay(2);
   10547 
   10548 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   10549 
   10550 	return rv;
   10551 }
   10552 
   10553 /*
   10554  * wm_kmrn_writereg:
   10555  *
   10556  *	Write a kumeran register
   10557  */
   10558 static void
   10559 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
   10560 {
   10561 	int rv;
   10562 
   10563 	if (sc->sc_type == WM_T_80003)
   10564 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10565 	else
   10566 		rv = sc->phy.acquire(sc);
   10567 	if (rv != 0) {
   10568 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   10569 		    __func__);
   10570 		return;
   10571 	}
   10572 
   10573 	wm_kmrn_writereg_locked(sc, reg, val);
   10574 
   10575 	if (sc->sc_type == WM_T_80003)
   10576 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10577 	else
   10578 		sc->phy.release(sc);
   10579 }
   10580 
   10581 static void
   10582 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, int val)
   10583 {
   10584 
   10585 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10586 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10587 	    (val & KUMCTRLSTA_MASK));
   10588 }
   10589 
   10590 /* SGMII related */
   10591 
   10592 /*
   10593  * wm_sgmii_uses_mdio
   10594  *
   10595  * Check whether the transaction is to the internal PHY or the external
   10596  * MDIO interface. Return true if it's MDIO.
   10597  */
   10598 static bool
   10599 wm_sgmii_uses_mdio(struct wm_softc *sc)
   10600 {
   10601 	uint32_t reg;
   10602 	bool ismdio = false;
   10603 
   10604 	switch (sc->sc_type) {
   10605 	case WM_T_82575:
   10606 	case WM_T_82576:
   10607 		reg = CSR_READ(sc, WMREG_MDIC);
   10608 		ismdio = ((reg & MDIC_DEST) != 0);
   10609 		break;
   10610 	case WM_T_82580:
   10611 	case WM_T_I350:
   10612 	case WM_T_I354:
   10613 	case WM_T_I210:
   10614 	case WM_T_I211:
   10615 		reg = CSR_READ(sc, WMREG_MDICNFG);
   10616 		ismdio = ((reg & MDICNFG_DEST) != 0);
   10617 		break;
   10618 	default:
   10619 		break;
   10620 	}
   10621 
   10622 	return ismdio;
   10623 }
   10624 
   10625 /*
   10626  * wm_sgmii_readreg:	[mii interface function]
   10627  *
   10628  *	Read a PHY register on the SGMII
   10629  * This could be handled by the PHY layer if we didn't have to lock the
   10630  * ressource ...
   10631  */
   10632 static int
   10633 wm_sgmii_readreg(device_t dev, int phy, int reg)
   10634 {
   10635 	struct wm_softc *sc = device_private(dev);
   10636 	uint32_t i2ccmd;
   10637 	int i, rv;
   10638 
   10639 	if (sc->phy.acquire(sc)) {
   10640 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10641 		return 0;
   10642 	}
   10643 
   10644 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10645 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10646 	    | I2CCMD_OPCODE_READ;
   10647 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10648 
   10649 	/* Poll the ready bit */
   10650 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10651 		delay(50);
   10652 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10653 		if (i2ccmd & I2CCMD_READY)
   10654 			break;
   10655 	}
   10656 	if ((i2ccmd & I2CCMD_READY) == 0)
   10657 		device_printf(dev, "I2CCMD Read did not complete\n");
   10658 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10659 		device_printf(dev, "I2CCMD Error bit set\n");
   10660 
   10661 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   10662 
   10663 	sc->phy.release(sc);
   10664 	return rv;
   10665 }
   10666 
   10667 /*
   10668  * wm_sgmii_writereg:	[mii interface function]
   10669  *
   10670  *	Write a PHY register on the SGMII.
   10671  * This could be handled by the PHY layer if we didn't have to lock the
   10672  * ressource ...
   10673  */
   10674 static void
   10675 wm_sgmii_writereg(device_t dev, int phy, int reg, int val)
   10676 {
   10677 	struct wm_softc *sc = device_private(dev);
   10678 	uint32_t i2ccmd;
   10679 	int i;
   10680 	int val_swapped;
   10681 
   10682 	if (sc->phy.acquire(sc) != 0) {
   10683 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10684 		return;
   10685 	}
   10686 	/* Swap the data bytes for the I2C interface */
   10687 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   10688 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10689 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10690 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   10691 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10692 
   10693 	/* Poll the ready bit */
   10694 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10695 		delay(50);
   10696 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10697 		if (i2ccmd & I2CCMD_READY)
   10698 			break;
   10699 	}
   10700 	if ((i2ccmd & I2CCMD_READY) == 0)
   10701 		device_printf(dev, "I2CCMD Write did not complete\n");
   10702 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10703 		device_printf(dev, "I2CCMD Error bit set\n");
   10704 
   10705 	sc->phy.release(sc);
   10706 }
   10707 
   10708 /* TBI related */
   10709 
   10710 /*
   10711  * wm_tbi_mediainit:
   10712  *
   10713  *	Initialize media for use on 1000BASE-X devices.
   10714  */
   10715 static void
   10716 wm_tbi_mediainit(struct wm_softc *sc)
   10717 {
   10718 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10719 	const char *sep = "";
   10720 
   10721 	if (sc->sc_type < WM_T_82543)
   10722 		sc->sc_tipg = TIPG_WM_DFLT;
   10723 	else
   10724 		sc->sc_tipg = TIPG_LG_DFLT;
   10725 
   10726 	sc->sc_tbi_serdes_anegticks = 5;
   10727 
   10728 	/* Initialize our media structures */
   10729 	sc->sc_mii.mii_ifp = ifp;
   10730 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10731 
   10732 	if ((sc->sc_type >= WM_T_82575)
   10733 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   10734 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10735 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   10736 	else
   10737 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10738 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   10739 
   10740 	/*
   10741 	 * SWD Pins:
   10742 	 *
   10743 	 *	0 = Link LED (output)
   10744 	 *	1 = Loss Of Signal (input)
   10745 	 */
   10746 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   10747 
   10748 	/* XXX Perhaps this is only for TBI */
   10749 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   10750 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   10751 
   10752 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   10753 		sc->sc_ctrl &= ~CTRL_LRST;
   10754 
   10755 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10756 
   10757 #define	ADD(ss, mm, dd)							\
   10758 do {									\
   10759 	aprint_normal("%s%s", sep, ss);					\
   10760 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   10761 	sep = ", ";							\
   10762 } while (/*CONSTCOND*/0)
   10763 
   10764 	aprint_normal_dev(sc->sc_dev, "");
   10765 
   10766 	if (sc->sc_type == WM_T_I354) {
   10767 		uint32_t status;
   10768 
   10769 		status = CSR_READ(sc, WMREG_STATUS);
   10770 		if (((status & STATUS_2P5_SKU) != 0)
   10771 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   10772 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   10773 		} else
   10774 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   10775 	} else if (sc->sc_type == WM_T_82545) {
   10776 		/* Only 82545 is LX (XXX except SFP) */
   10777 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   10778 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   10779 	} else {
   10780 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   10781 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   10782 	}
   10783 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   10784 	aprint_normal("\n");
   10785 
   10786 #undef ADD
   10787 
   10788 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   10789 }
   10790 
   10791 /*
   10792  * wm_tbi_mediachange:	[ifmedia interface function]
   10793  *
   10794  *	Set hardware to newly-selected media on a 1000BASE-X device.
   10795  */
   10796 static int
   10797 wm_tbi_mediachange(struct ifnet *ifp)
   10798 {
   10799 	struct wm_softc *sc = ifp->if_softc;
   10800 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10801 	uint32_t status;
   10802 	int i;
   10803 
   10804 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   10805 		/* XXX need some work for >= 82571 and < 82575 */
   10806 		if (sc->sc_type < WM_T_82575)
   10807 			return 0;
   10808 	}
   10809 
   10810 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   10811 	    || (sc->sc_type >= WM_T_82575))
   10812 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   10813 
   10814 	sc->sc_ctrl &= ~CTRL_LRST;
   10815 	sc->sc_txcw = TXCW_ANE;
   10816 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10817 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   10818 	else if (ife->ifm_media & IFM_FDX)
   10819 		sc->sc_txcw |= TXCW_FD;
   10820 	else
   10821 		sc->sc_txcw |= TXCW_HD;
   10822 
   10823 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   10824 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   10825 
   10826 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   10827 		    device_xname(sc->sc_dev), sc->sc_txcw));
   10828 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10829 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10830 	CSR_WRITE_FLUSH(sc);
   10831 	delay(1000);
   10832 
   10833 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   10834 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   10835 
   10836 	/*
   10837 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   10838 	 * optics detect a signal, 0 if they don't.
   10839 	 */
   10840 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   10841 		/* Have signal; wait for the link to come up. */
   10842 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   10843 			delay(10000);
   10844 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   10845 				break;
   10846 		}
   10847 
   10848 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   10849 			    device_xname(sc->sc_dev),i));
   10850 
   10851 		status = CSR_READ(sc, WMREG_STATUS);
   10852 		DPRINTF(WM_DEBUG_LINK,
   10853 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   10854 			device_xname(sc->sc_dev),status, STATUS_LU));
   10855 		if (status & STATUS_LU) {
   10856 			/* Link is up. */
   10857 			DPRINTF(WM_DEBUG_LINK,
   10858 			    ("%s: LINK: set media -> link up %s\n",
   10859 			    device_xname(sc->sc_dev),
   10860 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   10861 
   10862 			/*
   10863 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   10864 			 * so we should update sc->sc_ctrl
   10865 			 */
   10866 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   10867 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10868 			sc->sc_fcrtl &= ~FCRTL_XONE;
   10869 			if (status & STATUS_FD)
   10870 				sc->sc_tctl |=
   10871 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10872 			else
   10873 				sc->sc_tctl |=
   10874 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10875 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   10876 				sc->sc_fcrtl |= FCRTL_XONE;
   10877 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10878 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   10879 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   10880 				      sc->sc_fcrtl);
   10881 			sc->sc_tbi_linkup = 1;
   10882 		} else {
   10883 			if (i == WM_LINKUP_TIMEOUT)
   10884 				wm_check_for_link(sc);
   10885 			/* Link is down. */
   10886 			DPRINTF(WM_DEBUG_LINK,
   10887 			    ("%s: LINK: set media -> link down\n",
   10888 			    device_xname(sc->sc_dev)));
   10889 			sc->sc_tbi_linkup = 0;
   10890 		}
   10891 	} else {
   10892 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   10893 		    device_xname(sc->sc_dev)));
   10894 		sc->sc_tbi_linkup = 0;
   10895 	}
   10896 
   10897 	wm_tbi_serdes_set_linkled(sc);
   10898 
   10899 	return 0;
   10900 }
   10901 
   10902 /*
   10903  * wm_tbi_mediastatus:	[ifmedia interface function]
   10904  *
   10905  *	Get the current interface media status on a 1000BASE-X device.
   10906  */
   10907 static void
   10908 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10909 {
   10910 	struct wm_softc *sc = ifp->if_softc;
   10911 	uint32_t ctrl, status;
   10912 
   10913 	ifmr->ifm_status = IFM_AVALID;
   10914 	ifmr->ifm_active = IFM_ETHER;
   10915 
   10916 	status = CSR_READ(sc, WMREG_STATUS);
   10917 	if ((status & STATUS_LU) == 0) {
   10918 		ifmr->ifm_active |= IFM_NONE;
   10919 		return;
   10920 	}
   10921 
   10922 	ifmr->ifm_status |= IFM_ACTIVE;
   10923 	/* Only 82545 is LX */
   10924 	if (sc->sc_type == WM_T_82545)
   10925 		ifmr->ifm_active |= IFM_1000_LX;
   10926 	else
   10927 		ifmr->ifm_active |= IFM_1000_SX;
   10928 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   10929 		ifmr->ifm_active |= IFM_FDX;
   10930 	else
   10931 		ifmr->ifm_active |= IFM_HDX;
   10932 	ctrl = CSR_READ(sc, WMREG_CTRL);
   10933 	if (ctrl & CTRL_RFCE)
   10934 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   10935 	if (ctrl & CTRL_TFCE)
   10936 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   10937 }
   10938 
   10939 /* XXX TBI only */
   10940 static int
   10941 wm_check_for_link(struct wm_softc *sc)
   10942 {
   10943 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10944 	uint32_t rxcw;
   10945 	uint32_t ctrl;
   10946 	uint32_t status;
   10947 	uint32_t sig;
   10948 
   10949 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   10950 		/* XXX need some work for >= 82571 */
   10951 		if (sc->sc_type >= WM_T_82571) {
   10952 			sc->sc_tbi_linkup = 1;
   10953 			return 0;
   10954 		}
   10955 	}
   10956 
   10957 	rxcw = CSR_READ(sc, WMREG_RXCW);
   10958 	ctrl = CSR_READ(sc, WMREG_CTRL);
   10959 	status = CSR_READ(sc, WMREG_STATUS);
   10960 
   10961 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   10962 
   10963 	DPRINTF(WM_DEBUG_LINK,
   10964 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   10965 		device_xname(sc->sc_dev), __func__,
   10966 		((ctrl & CTRL_SWDPIN(1)) == sig),
   10967 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   10968 
   10969 	/*
   10970 	 * SWDPIN   LU RXCW
   10971 	 *      0    0    0
   10972 	 *      0    0    1	(should not happen)
   10973 	 *      0    1    0	(should not happen)
   10974 	 *      0    1    1	(should not happen)
   10975 	 *      1    0    0	Disable autonego and force linkup
   10976 	 *      1    0    1	got /C/ but not linkup yet
   10977 	 *      1    1    0	(linkup)
   10978 	 *      1    1    1	If IFM_AUTO, back to autonego
   10979 	 *
   10980 	 */
   10981 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   10982 	    && ((status & STATUS_LU) == 0)
   10983 	    && ((rxcw & RXCW_C) == 0)) {
   10984 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   10985 			__func__));
   10986 		sc->sc_tbi_linkup = 0;
   10987 		/* Disable auto-negotiation in the TXCW register */
   10988 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   10989 
   10990 		/*
   10991 		 * Force link-up and also force full-duplex.
   10992 		 *
   10993 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   10994 		 * so we should update sc->sc_ctrl
   10995 		 */
   10996 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   10997 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10998 	} else if (((status & STATUS_LU) != 0)
   10999 	    && ((rxcw & RXCW_C) != 0)
   11000 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   11001 		sc->sc_tbi_linkup = 1;
   11002 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   11003 			__func__));
   11004 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11005 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   11006 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   11007 	    && ((rxcw & RXCW_C) != 0)) {
   11008 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   11009 	} else {
   11010 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   11011 			status));
   11012 	}
   11013 
   11014 	return 0;
   11015 }
   11016 
   11017 /*
   11018  * wm_tbi_tick:
   11019  *
   11020  *	Check the link on TBI devices.
   11021  *	This function acts as mii_tick().
   11022  */
   11023 static void
   11024 wm_tbi_tick(struct wm_softc *sc)
   11025 {
   11026 	struct mii_data *mii = &sc->sc_mii;
   11027 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11028 	uint32_t status;
   11029 
   11030 	KASSERT(WM_CORE_LOCKED(sc));
   11031 
   11032 	status = CSR_READ(sc, WMREG_STATUS);
   11033 
   11034 	/* XXX is this needed? */
   11035 	(void)CSR_READ(sc, WMREG_RXCW);
   11036 	(void)CSR_READ(sc, WMREG_CTRL);
   11037 
   11038 	/* set link status */
   11039 	if ((status & STATUS_LU) == 0) {
   11040 		DPRINTF(WM_DEBUG_LINK,
   11041 		    ("%s: LINK: checklink -> down\n",
   11042 			device_xname(sc->sc_dev)));
   11043 		sc->sc_tbi_linkup = 0;
   11044 	} else if (sc->sc_tbi_linkup == 0) {
   11045 		DPRINTF(WM_DEBUG_LINK,
   11046 		    ("%s: LINK: checklink -> up %s\n",
   11047 			device_xname(sc->sc_dev),
   11048 			(status & STATUS_FD) ? "FDX" : "HDX"));
   11049 		sc->sc_tbi_linkup = 1;
   11050 		sc->sc_tbi_serdes_ticks = 0;
   11051 	}
   11052 
   11053 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   11054 		goto setled;
   11055 
   11056 	if ((status & STATUS_LU) == 0) {
   11057 		sc->sc_tbi_linkup = 0;
   11058 		/* If the timer expired, retry autonegotiation */
   11059 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11060 		    && (++sc->sc_tbi_serdes_ticks
   11061 			>= sc->sc_tbi_serdes_anegticks)) {
   11062 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11063 			sc->sc_tbi_serdes_ticks = 0;
   11064 			/*
   11065 			 * Reset the link, and let autonegotiation do
   11066 			 * its thing
   11067 			 */
   11068 			sc->sc_ctrl |= CTRL_LRST;
   11069 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11070 			CSR_WRITE_FLUSH(sc);
   11071 			delay(1000);
   11072 			sc->sc_ctrl &= ~CTRL_LRST;
   11073 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11074 			CSR_WRITE_FLUSH(sc);
   11075 			delay(1000);
   11076 			CSR_WRITE(sc, WMREG_TXCW,
   11077 			    sc->sc_txcw & ~TXCW_ANE);
   11078 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11079 		}
   11080 	}
   11081 
   11082 setled:
   11083 	wm_tbi_serdes_set_linkled(sc);
   11084 }
   11085 
   11086 /* SERDES related */
   11087 static void
   11088 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   11089 {
   11090 	uint32_t reg;
   11091 
   11092 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11093 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   11094 		return;
   11095 
   11096 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   11097 	reg |= PCS_CFG_PCS_EN;
   11098 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   11099 
   11100 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11101 	reg &= ~CTRL_EXT_SWDPIN(3);
   11102 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11103 	CSR_WRITE_FLUSH(sc);
   11104 }
   11105 
   11106 static int
   11107 wm_serdes_mediachange(struct ifnet *ifp)
   11108 {
   11109 	struct wm_softc *sc = ifp->if_softc;
   11110 	bool pcs_autoneg = true; /* XXX */
   11111 	uint32_t ctrl_ext, pcs_lctl, reg;
   11112 
   11113 	/* XXX Currently, this function is not called on 8257[12] */
   11114 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11115 	    || (sc->sc_type >= WM_T_82575))
   11116 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11117 
   11118 	wm_serdes_power_up_link_82575(sc);
   11119 
   11120 	sc->sc_ctrl |= CTRL_SLU;
   11121 
   11122 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   11123 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   11124 
   11125 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11126 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   11127 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   11128 	case CTRL_EXT_LINK_MODE_SGMII:
   11129 		pcs_autoneg = true;
   11130 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   11131 		break;
   11132 	case CTRL_EXT_LINK_MODE_1000KX:
   11133 		pcs_autoneg = false;
   11134 		/* FALLTHROUGH */
   11135 	default:
   11136 		if ((sc->sc_type == WM_T_82575)
   11137 		    || (sc->sc_type == WM_T_82576)) {
   11138 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   11139 				pcs_autoneg = false;
   11140 		}
   11141 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   11142 		    | CTRL_FRCFDX;
   11143 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   11144 	}
   11145 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11146 
   11147 	if (pcs_autoneg) {
   11148 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   11149 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   11150 
   11151 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   11152 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   11153 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   11154 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   11155 	} else
   11156 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   11157 
   11158 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   11159 
   11160 
   11161 	return 0;
   11162 }
   11163 
   11164 static void
   11165 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11166 {
   11167 	struct wm_softc *sc = ifp->if_softc;
   11168 	struct mii_data *mii = &sc->sc_mii;
   11169 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11170 	uint32_t pcs_adv, pcs_lpab, reg;
   11171 
   11172 	ifmr->ifm_status = IFM_AVALID;
   11173 	ifmr->ifm_active = IFM_ETHER;
   11174 
   11175 	/* Check PCS */
   11176 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11177 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   11178 		ifmr->ifm_active |= IFM_NONE;
   11179 		sc->sc_tbi_linkup = 0;
   11180 		goto setled;
   11181 	}
   11182 
   11183 	sc->sc_tbi_linkup = 1;
   11184 	ifmr->ifm_status |= IFM_ACTIVE;
   11185 	if (sc->sc_type == WM_T_I354) {
   11186 		uint32_t status;
   11187 
   11188 		status = CSR_READ(sc, WMREG_STATUS);
   11189 		if (((status & STATUS_2P5_SKU) != 0)
   11190 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11191 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
   11192 		} else
   11193 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
   11194 	} else {
   11195 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   11196 		case PCS_LSTS_SPEED_10:
   11197 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   11198 			break;
   11199 		case PCS_LSTS_SPEED_100:
   11200 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   11201 			break;
   11202 		case PCS_LSTS_SPEED_1000:
   11203 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11204 			break;
   11205 		default:
   11206 			device_printf(sc->sc_dev, "Unknown speed\n");
   11207 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11208 			break;
   11209 		}
   11210 	}
   11211 	if ((reg & PCS_LSTS_FDX) != 0)
   11212 		ifmr->ifm_active |= IFM_FDX;
   11213 	else
   11214 		ifmr->ifm_active |= IFM_HDX;
   11215 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   11216 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   11217 		/* Check flow */
   11218 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11219 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   11220 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   11221 			goto setled;
   11222 		}
   11223 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   11224 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   11225 		DPRINTF(WM_DEBUG_LINK,
   11226 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   11227 		if ((pcs_adv & TXCW_SYM_PAUSE)
   11228 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   11229 			mii->mii_media_active |= IFM_FLOW
   11230 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   11231 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   11232 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11233 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   11234 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11235 			mii->mii_media_active |= IFM_FLOW
   11236 			    | IFM_ETH_TXPAUSE;
   11237 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   11238 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11239 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   11240 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11241 			mii->mii_media_active |= IFM_FLOW
   11242 			    | IFM_ETH_RXPAUSE;
   11243 		}
   11244 	}
   11245 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   11246 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   11247 setled:
   11248 	wm_tbi_serdes_set_linkled(sc);
   11249 }
   11250 
   11251 /*
   11252  * wm_serdes_tick:
   11253  *
   11254  *	Check the link on serdes devices.
   11255  */
   11256 static void
   11257 wm_serdes_tick(struct wm_softc *sc)
   11258 {
   11259 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11260 	struct mii_data *mii = &sc->sc_mii;
   11261 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11262 	uint32_t reg;
   11263 
   11264 	KASSERT(WM_CORE_LOCKED(sc));
   11265 
   11266 	mii->mii_media_status = IFM_AVALID;
   11267 	mii->mii_media_active = IFM_ETHER;
   11268 
   11269 	/* Check PCS */
   11270 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11271 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   11272 		mii->mii_media_status |= IFM_ACTIVE;
   11273 		sc->sc_tbi_linkup = 1;
   11274 		sc->sc_tbi_serdes_ticks = 0;
   11275 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   11276 		if ((reg & PCS_LSTS_FDX) != 0)
   11277 			mii->mii_media_active |= IFM_FDX;
   11278 		else
   11279 			mii->mii_media_active |= IFM_HDX;
   11280 	} else {
   11281 		mii->mii_media_status |= IFM_NONE;
   11282 		sc->sc_tbi_linkup = 0;
   11283 		/* If the timer expired, retry autonegotiation */
   11284 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11285 		    && (++sc->sc_tbi_serdes_ticks
   11286 			>= sc->sc_tbi_serdes_anegticks)) {
   11287 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11288 			sc->sc_tbi_serdes_ticks = 0;
   11289 			/* XXX */
   11290 			wm_serdes_mediachange(ifp);
   11291 		}
   11292 	}
   11293 
   11294 	wm_tbi_serdes_set_linkled(sc);
   11295 }
   11296 
   11297 /* SFP related */
   11298 
   11299 static int
   11300 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   11301 {
   11302 	uint32_t i2ccmd;
   11303 	int i;
   11304 
   11305 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11306 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11307 
   11308 	/* Poll the ready bit */
   11309 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11310 		delay(50);
   11311 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11312 		if (i2ccmd & I2CCMD_READY)
   11313 			break;
   11314 	}
   11315 	if ((i2ccmd & I2CCMD_READY) == 0)
   11316 		return -1;
   11317 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   11318 		return -1;
   11319 
   11320 	*data = i2ccmd & 0x00ff;
   11321 
   11322 	return 0;
   11323 }
   11324 
   11325 static uint32_t
   11326 wm_sfp_get_media_type(struct wm_softc *sc)
   11327 {
   11328 	uint32_t ctrl_ext;
   11329 	uint8_t val = 0;
   11330 	int timeout = 3;
   11331 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   11332 	int rv = -1;
   11333 
   11334 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11335 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   11336 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   11337 	CSR_WRITE_FLUSH(sc);
   11338 
   11339 	/* Read SFP module data */
   11340 	while (timeout) {
   11341 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   11342 		if (rv == 0)
   11343 			break;
   11344 		delay(100*1000); /* XXX too big */
   11345 		timeout--;
   11346 	}
   11347 	if (rv != 0)
   11348 		goto out;
   11349 	switch (val) {
   11350 	case SFF_SFP_ID_SFF:
   11351 		aprint_normal_dev(sc->sc_dev,
   11352 		    "Module/Connector soldered to board\n");
   11353 		break;
   11354 	case SFF_SFP_ID_SFP:
   11355 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   11356 		break;
   11357 	case SFF_SFP_ID_UNKNOWN:
   11358 		goto out;
   11359 	default:
   11360 		break;
   11361 	}
   11362 
   11363 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   11364 	if (rv != 0) {
   11365 		goto out;
   11366 	}
   11367 
   11368 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   11369 		mediatype = WM_MEDIATYPE_SERDES;
   11370 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   11371 		sc->sc_flags |= WM_F_SGMII;
   11372 		mediatype = WM_MEDIATYPE_COPPER;
   11373 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   11374 		sc->sc_flags |= WM_F_SGMII;
   11375 		mediatype = WM_MEDIATYPE_SERDES;
   11376 	}
   11377 
   11378 out:
   11379 	/* Restore I2C interface setting */
   11380 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11381 
   11382 	return mediatype;
   11383 }
   11384 
   11385 /*
   11386  * NVM related.
   11387  * Microwire, SPI (w/wo EERD) and Flash.
   11388  */
   11389 
   11390 /* Both spi and uwire */
   11391 
   11392 /*
   11393  * wm_eeprom_sendbits:
   11394  *
   11395  *	Send a series of bits to the EEPROM.
   11396  */
   11397 static void
   11398 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   11399 {
   11400 	uint32_t reg;
   11401 	int x;
   11402 
   11403 	reg = CSR_READ(sc, WMREG_EECD);
   11404 
   11405 	for (x = nbits; x > 0; x--) {
   11406 		if (bits & (1U << (x - 1)))
   11407 			reg |= EECD_DI;
   11408 		else
   11409 			reg &= ~EECD_DI;
   11410 		CSR_WRITE(sc, WMREG_EECD, reg);
   11411 		CSR_WRITE_FLUSH(sc);
   11412 		delay(2);
   11413 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11414 		CSR_WRITE_FLUSH(sc);
   11415 		delay(2);
   11416 		CSR_WRITE(sc, WMREG_EECD, reg);
   11417 		CSR_WRITE_FLUSH(sc);
   11418 		delay(2);
   11419 	}
   11420 }
   11421 
   11422 /*
   11423  * wm_eeprom_recvbits:
   11424  *
   11425  *	Receive a series of bits from the EEPROM.
   11426  */
   11427 static void
   11428 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   11429 {
   11430 	uint32_t reg, val;
   11431 	int x;
   11432 
   11433 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   11434 
   11435 	val = 0;
   11436 	for (x = nbits; x > 0; x--) {
   11437 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11438 		CSR_WRITE_FLUSH(sc);
   11439 		delay(2);
   11440 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   11441 			val |= (1U << (x - 1));
   11442 		CSR_WRITE(sc, WMREG_EECD, reg);
   11443 		CSR_WRITE_FLUSH(sc);
   11444 		delay(2);
   11445 	}
   11446 	*valp = val;
   11447 }
   11448 
   11449 /* Microwire */
   11450 
   11451 /*
   11452  * wm_nvm_read_uwire:
   11453  *
   11454  *	Read a word from the EEPROM using the MicroWire protocol.
   11455  */
   11456 static int
   11457 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11458 {
   11459 	uint32_t reg, val;
   11460 	int i;
   11461 
   11462 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11463 		device_xname(sc->sc_dev), __func__));
   11464 
   11465 	if (sc->nvm.acquire(sc) != 0)
   11466 		return -1;
   11467 
   11468 	for (i = 0; i < wordcnt; i++) {
   11469 		/* Clear SK and DI. */
   11470 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   11471 		CSR_WRITE(sc, WMREG_EECD, reg);
   11472 
   11473 		/*
   11474 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   11475 		 * and Xen.
   11476 		 *
   11477 		 * We use this workaround only for 82540 because qemu's
   11478 		 * e1000 act as 82540.
   11479 		 */
   11480 		if (sc->sc_type == WM_T_82540) {
   11481 			reg |= EECD_SK;
   11482 			CSR_WRITE(sc, WMREG_EECD, reg);
   11483 			reg &= ~EECD_SK;
   11484 			CSR_WRITE(sc, WMREG_EECD, reg);
   11485 			CSR_WRITE_FLUSH(sc);
   11486 			delay(2);
   11487 		}
   11488 		/* XXX: end of workaround */
   11489 
   11490 		/* Set CHIP SELECT. */
   11491 		reg |= EECD_CS;
   11492 		CSR_WRITE(sc, WMREG_EECD, reg);
   11493 		CSR_WRITE_FLUSH(sc);
   11494 		delay(2);
   11495 
   11496 		/* Shift in the READ command. */
   11497 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   11498 
   11499 		/* Shift in address. */
   11500 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   11501 
   11502 		/* Shift out the data. */
   11503 		wm_eeprom_recvbits(sc, &val, 16);
   11504 		data[i] = val & 0xffff;
   11505 
   11506 		/* Clear CHIP SELECT. */
   11507 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   11508 		CSR_WRITE(sc, WMREG_EECD, reg);
   11509 		CSR_WRITE_FLUSH(sc);
   11510 		delay(2);
   11511 	}
   11512 
   11513 	sc->nvm.release(sc);
   11514 	return 0;
   11515 }
   11516 
   11517 /* SPI */
   11518 
   11519 /*
   11520  * Set SPI and FLASH related information from the EECD register.
   11521  * For 82541 and 82547, the word size is taken from EEPROM.
   11522  */
   11523 static int
   11524 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   11525 {
   11526 	int size;
   11527 	uint32_t reg;
   11528 	uint16_t data;
   11529 
   11530 	reg = CSR_READ(sc, WMREG_EECD);
   11531 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   11532 
   11533 	/* Read the size of NVM from EECD by default */
   11534 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11535 	switch (sc->sc_type) {
   11536 	case WM_T_82541:
   11537 	case WM_T_82541_2:
   11538 	case WM_T_82547:
   11539 	case WM_T_82547_2:
   11540 		/* Set dummy value to access EEPROM */
   11541 		sc->sc_nvm_wordsize = 64;
   11542 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
   11543 		reg = data;
   11544 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11545 		if (size == 0)
   11546 			size = 6; /* 64 word size */
   11547 		else
   11548 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   11549 		break;
   11550 	case WM_T_80003:
   11551 	case WM_T_82571:
   11552 	case WM_T_82572:
   11553 	case WM_T_82573: /* SPI case */
   11554 	case WM_T_82574: /* SPI case */
   11555 	case WM_T_82583: /* SPI case */
   11556 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11557 		if (size > 14)
   11558 			size = 14;
   11559 		break;
   11560 	case WM_T_82575:
   11561 	case WM_T_82576:
   11562 	case WM_T_82580:
   11563 	case WM_T_I350:
   11564 	case WM_T_I354:
   11565 	case WM_T_I210:
   11566 	case WM_T_I211:
   11567 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11568 		if (size > 15)
   11569 			size = 15;
   11570 		break;
   11571 	default:
   11572 		aprint_error_dev(sc->sc_dev,
   11573 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   11574 		return -1;
   11575 		break;
   11576 	}
   11577 
   11578 	sc->sc_nvm_wordsize = 1 << size;
   11579 
   11580 	return 0;
   11581 }
   11582 
   11583 /*
   11584  * wm_nvm_ready_spi:
   11585  *
   11586  *	Wait for a SPI EEPROM to be ready for commands.
   11587  */
   11588 static int
   11589 wm_nvm_ready_spi(struct wm_softc *sc)
   11590 {
   11591 	uint32_t val;
   11592 	int usec;
   11593 
   11594 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11595 		device_xname(sc->sc_dev), __func__));
   11596 
   11597 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   11598 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   11599 		wm_eeprom_recvbits(sc, &val, 8);
   11600 		if ((val & SPI_SR_RDY) == 0)
   11601 			break;
   11602 	}
   11603 	if (usec >= SPI_MAX_RETRIES) {
   11604 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   11605 		return -1;
   11606 	}
   11607 	return 0;
   11608 }
   11609 
   11610 /*
   11611  * wm_nvm_read_spi:
   11612  *
   11613  *	Read a work from the EEPROM using the SPI protocol.
   11614  */
   11615 static int
   11616 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11617 {
   11618 	uint32_t reg, val;
   11619 	int i;
   11620 	uint8_t opc;
   11621 	int rv = 0;
   11622 
   11623 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11624 		device_xname(sc->sc_dev), __func__));
   11625 
   11626 	if (sc->nvm.acquire(sc) != 0)
   11627 		return -1;
   11628 
   11629 	/* Clear SK and CS. */
   11630 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   11631 	CSR_WRITE(sc, WMREG_EECD, reg);
   11632 	CSR_WRITE_FLUSH(sc);
   11633 	delay(2);
   11634 
   11635 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   11636 		goto out;
   11637 
   11638 	/* Toggle CS to flush commands. */
   11639 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   11640 	CSR_WRITE_FLUSH(sc);
   11641 	delay(2);
   11642 	CSR_WRITE(sc, WMREG_EECD, reg);
   11643 	CSR_WRITE_FLUSH(sc);
   11644 	delay(2);
   11645 
   11646 	opc = SPI_OPC_READ;
   11647 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   11648 		opc |= SPI_OPC_A8;
   11649 
   11650 	wm_eeprom_sendbits(sc, opc, 8);
   11651 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   11652 
   11653 	for (i = 0; i < wordcnt; i++) {
   11654 		wm_eeprom_recvbits(sc, &val, 16);
   11655 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   11656 	}
   11657 
   11658 	/* Raise CS and clear SK. */
   11659 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   11660 	CSR_WRITE(sc, WMREG_EECD, reg);
   11661 	CSR_WRITE_FLUSH(sc);
   11662 	delay(2);
   11663 
   11664 out:
   11665 	sc->nvm.release(sc);
   11666 	return rv;
   11667 }
   11668 
   11669 /* Using with EERD */
   11670 
   11671 static int
   11672 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   11673 {
   11674 	uint32_t attempts = 100000;
   11675 	uint32_t i, reg = 0;
   11676 	int32_t done = -1;
   11677 
   11678 	for (i = 0; i < attempts; i++) {
   11679 		reg = CSR_READ(sc, rw);
   11680 
   11681 		if (reg & EERD_DONE) {
   11682 			done = 0;
   11683 			break;
   11684 		}
   11685 		delay(5);
   11686 	}
   11687 
   11688 	return done;
   11689 }
   11690 
   11691 static int
   11692 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   11693     uint16_t *data)
   11694 {
   11695 	int i, eerd = 0;
   11696 	int rv = 0;
   11697 
   11698 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11699 		device_xname(sc->sc_dev), __func__));
   11700 
   11701 	if (sc->nvm.acquire(sc) != 0)
   11702 		return -1;
   11703 
   11704 	for (i = 0; i < wordcnt; i++) {
   11705 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   11706 		CSR_WRITE(sc, WMREG_EERD, eerd);
   11707 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   11708 		if (rv != 0) {
   11709 			aprint_error_dev(sc->sc_dev, "EERD polling failed\n");
   11710 			break;
   11711 		}
   11712 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   11713 	}
   11714 
   11715 	sc->nvm.release(sc);
   11716 	return rv;
   11717 }
   11718 
   11719 /* Flash */
   11720 
   11721 static int
   11722 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   11723 {
   11724 	uint32_t eecd;
   11725 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   11726 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   11727 	uint8_t sig_byte = 0;
   11728 
   11729 	switch (sc->sc_type) {
   11730 	case WM_T_PCH_SPT:
   11731 		/*
   11732 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
   11733 		 * sector valid bits from the NVM.
   11734 		 */
   11735 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
   11736 		if ((*bank == 0) || (*bank == 1)) {
   11737 			aprint_error_dev(sc->sc_dev,
   11738 			    "%s: no valid NVM bank present (%u)\n", __func__,
   11739 				*bank);
   11740 			return -1;
   11741 		} else {
   11742 			*bank = *bank - 2;
   11743 			return 0;
   11744 		}
   11745 	case WM_T_ICH8:
   11746 	case WM_T_ICH9:
   11747 		eecd = CSR_READ(sc, WMREG_EECD);
   11748 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   11749 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   11750 			return 0;
   11751 		}
   11752 		/* FALLTHROUGH */
   11753 	default:
   11754 		/* Default to 0 */
   11755 		*bank = 0;
   11756 
   11757 		/* Check bank 0 */
   11758 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   11759 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11760 			*bank = 0;
   11761 			return 0;
   11762 		}
   11763 
   11764 		/* Check bank 1 */
   11765 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   11766 		    &sig_byte);
   11767 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11768 			*bank = 1;
   11769 			return 0;
   11770 		}
   11771 	}
   11772 
   11773 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   11774 		device_xname(sc->sc_dev)));
   11775 	return -1;
   11776 }
   11777 
   11778 /******************************************************************************
   11779  * This function does initial flash setup so that a new read/write/erase cycle
   11780  * can be started.
   11781  *
   11782  * sc - The pointer to the hw structure
   11783  ****************************************************************************/
   11784 static int32_t
   11785 wm_ich8_cycle_init(struct wm_softc *sc)
   11786 {
   11787 	uint16_t hsfsts;
   11788 	int32_t error = 1;
   11789 	int32_t i     = 0;
   11790 
   11791 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11792 
   11793 	/* May be check the Flash Des Valid bit in Hw status */
   11794 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   11795 		return error;
   11796 	}
   11797 
   11798 	/* Clear FCERR in Hw status by writing 1 */
   11799 	/* Clear DAEL in Hw status by writing a 1 */
   11800 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   11801 
   11802 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11803 
   11804 	/*
   11805 	 * Either we should have a hardware SPI cycle in progress bit to check
   11806 	 * against, in order to start a new cycle or FDONE bit should be
   11807 	 * changed in the hardware so that it is 1 after harware reset, which
   11808 	 * can then be used as an indication whether a cycle is in progress or
   11809 	 * has been completed .. we should also have some software semaphore
   11810 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   11811 	 * threads access to those bits can be sequentiallized or a way so that
   11812 	 * 2 threads dont start the cycle at the same time
   11813 	 */
   11814 
   11815 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11816 		/*
   11817 		 * There is no cycle running at present, so we can start a
   11818 		 * cycle
   11819 		 */
   11820 
   11821 		/* Begin by setting Flash Cycle Done. */
   11822 		hsfsts |= HSFSTS_DONE;
   11823 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11824 		error = 0;
   11825 	} else {
   11826 		/*
   11827 		 * otherwise poll for sometime so the current cycle has a
   11828 		 * chance to end before giving up.
   11829 		 */
   11830 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   11831 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11832 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11833 				error = 0;
   11834 				break;
   11835 			}
   11836 			delay(1);
   11837 		}
   11838 		if (error == 0) {
   11839 			/*
   11840 			 * Successful in waiting for previous cycle to timeout,
   11841 			 * now set the Flash Cycle Done.
   11842 			 */
   11843 			hsfsts |= HSFSTS_DONE;
   11844 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11845 		}
   11846 	}
   11847 	return error;
   11848 }
   11849 
   11850 /******************************************************************************
   11851  * This function starts a flash cycle and waits for its completion
   11852  *
   11853  * sc - The pointer to the hw structure
   11854  ****************************************************************************/
   11855 static int32_t
   11856 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   11857 {
   11858 	uint16_t hsflctl;
   11859 	uint16_t hsfsts;
   11860 	int32_t error = 1;
   11861 	uint32_t i = 0;
   11862 
   11863 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   11864 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11865 	hsflctl |= HSFCTL_GO;
   11866 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11867 
   11868 	/* Wait till FDONE bit is set to 1 */
   11869 	do {
   11870 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11871 		if (hsfsts & HSFSTS_DONE)
   11872 			break;
   11873 		delay(1);
   11874 		i++;
   11875 	} while (i < timeout);
   11876 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   11877 		error = 0;
   11878 
   11879 	return error;
   11880 }
   11881 
   11882 /******************************************************************************
   11883  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   11884  *
   11885  * sc - The pointer to the hw structure
   11886  * index - The index of the byte or word to read.
   11887  * size - Size of data to read, 1=byte 2=word, 4=dword
   11888  * data - Pointer to the word to store the value read.
   11889  *****************************************************************************/
   11890 static int32_t
   11891 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   11892     uint32_t size, uint32_t *data)
   11893 {
   11894 	uint16_t hsfsts;
   11895 	uint16_t hsflctl;
   11896 	uint32_t flash_linear_address;
   11897 	uint32_t flash_data = 0;
   11898 	int32_t error = 1;
   11899 	int32_t count = 0;
   11900 
   11901 	if (size < 1  || size > 4 || data == 0x0 ||
   11902 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   11903 		return error;
   11904 
   11905 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   11906 	    sc->sc_ich8_flash_base;
   11907 
   11908 	do {
   11909 		delay(1);
   11910 		/* Steps */
   11911 		error = wm_ich8_cycle_init(sc);
   11912 		if (error)
   11913 			break;
   11914 
   11915 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11916 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   11917 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   11918 		    & HSFCTL_BCOUNT_MASK;
   11919 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   11920 		if (sc->sc_type == WM_T_PCH_SPT) {
   11921 			/*
   11922 			 * In SPT, This register is in Lan memory space, not
   11923 			 * flash. Therefore, only 32 bit access is supported.
   11924 			 */
   11925 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
   11926 			    (uint32_t)hsflctl);
   11927 		} else
   11928 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11929 
   11930 		/*
   11931 		 * Write the last 24 bits of index into Flash Linear address
   11932 		 * field in Flash Address
   11933 		 */
   11934 		/* TODO: TBD maybe check the index against the size of flash */
   11935 
   11936 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   11937 
   11938 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   11939 
   11940 		/*
   11941 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   11942 		 * the whole sequence a few more times, else read in (shift in)
   11943 		 * the Flash Data0, the order is least significant byte first
   11944 		 * msb to lsb
   11945 		 */
   11946 		if (error == 0) {
   11947 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   11948 			if (size == 1)
   11949 				*data = (uint8_t)(flash_data & 0x000000FF);
   11950 			else if (size == 2)
   11951 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   11952 			else if (size == 4)
   11953 				*data = (uint32_t)flash_data;
   11954 			break;
   11955 		} else {
   11956 			/*
   11957 			 * If we've gotten here, then things are probably
   11958 			 * completely hosed, but if the error condition is
   11959 			 * detected, it won't hurt to give it another try...
   11960 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   11961 			 */
   11962 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11963 			if (hsfsts & HSFSTS_ERR) {
   11964 				/* Repeat for some time before giving up. */
   11965 				continue;
   11966 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   11967 				break;
   11968 		}
   11969 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   11970 
   11971 	return error;
   11972 }
   11973 
   11974 /******************************************************************************
   11975  * Reads a single byte from the NVM using the ICH8 flash access registers.
   11976  *
   11977  * sc - pointer to wm_hw structure
   11978  * index - The index of the byte to read.
   11979  * data - Pointer to a byte to store the value read.
   11980  *****************************************************************************/
   11981 static int32_t
   11982 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   11983 {
   11984 	int32_t status;
   11985 	uint32_t word = 0;
   11986 
   11987 	status = wm_read_ich8_data(sc, index, 1, &word);
   11988 	if (status == 0)
   11989 		*data = (uint8_t)word;
   11990 	else
   11991 		*data = 0;
   11992 
   11993 	return status;
   11994 }
   11995 
   11996 /******************************************************************************
   11997  * Reads a word from the NVM using the ICH8 flash access registers.
   11998  *
   11999  * sc - pointer to wm_hw structure
   12000  * index - The starting byte index of the word to read.
   12001  * data - Pointer to a word to store the value read.
   12002  *****************************************************************************/
   12003 static int32_t
   12004 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   12005 {
   12006 	int32_t status;
   12007 	uint32_t word = 0;
   12008 
   12009 	status = wm_read_ich8_data(sc, index, 2, &word);
   12010 	if (status == 0)
   12011 		*data = (uint16_t)word;
   12012 	else
   12013 		*data = 0;
   12014 
   12015 	return status;
   12016 }
   12017 
   12018 /******************************************************************************
   12019  * Reads a dword from the NVM using the ICH8 flash access registers.
   12020  *
   12021  * sc - pointer to wm_hw structure
   12022  * index - The starting byte index of the word to read.
   12023  * data - Pointer to a word to store the value read.
   12024  *****************************************************************************/
   12025 static int32_t
   12026 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   12027 {
   12028 	int32_t status;
   12029 
   12030 	status = wm_read_ich8_data(sc, index, 4, data);
   12031 	return status;
   12032 }
   12033 
   12034 /******************************************************************************
   12035  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   12036  * register.
   12037  *
   12038  * sc - Struct containing variables accessed by shared code
   12039  * offset - offset of word in the EEPROM to read
   12040  * data - word read from the EEPROM
   12041  * words - number of words to read
   12042  *****************************************************************************/
   12043 static int
   12044 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12045 {
   12046 	int32_t  rv = 0;
   12047 	uint32_t flash_bank = 0;
   12048 	uint32_t act_offset = 0;
   12049 	uint32_t bank_offset = 0;
   12050 	uint16_t word = 0;
   12051 	uint16_t i = 0;
   12052 
   12053 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12054 		device_xname(sc->sc_dev), __func__));
   12055 
   12056 	if (sc->nvm.acquire(sc) != 0)
   12057 		return -1;
   12058 
   12059 	/*
   12060 	 * We need to know which is the valid flash bank.  In the event
   12061 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12062 	 * managing flash_bank.  So it cannot be trusted and needs
   12063 	 * to be updated with each read.
   12064 	 */
   12065 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   12066 	if (rv) {
   12067 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   12068 			device_xname(sc->sc_dev)));
   12069 		flash_bank = 0;
   12070 	}
   12071 
   12072 	/*
   12073 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   12074 	 * size
   12075 	 */
   12076 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   12077 
   12078 	for (i = 0; i < words; i++) {
   12079 		/* The NVM part needs a byte offset, hence * 2 */
   12080 		act_offset = bank_offset + ((offset + i) * 2);
   12081 		rv = wm_read_ich8_word(sc, act_offset, &word);
   12082 		if (rv) {
   12083 			aprint_error_dev(sc->sc_dev,
   12084 			    "%s: failed to read NVM\n", __func__);
   12085 			break;
   12086 		}
   12087 		data[i] = word;
   12088 	}
   12089 
   12090 	sc->nvm.release(sc);
   12091 	return rv;
   12092 }
   12093 
   12094 /******************************************************************************
   12095  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   12096  * register.
   12097  *
   12098  * sc - Struct containing variables accessed by shared code
   12099  * offset - offset of word in the EEPROM to read
   12100  * data - word read from the EEPROM
   12101  * words - number of words to read
   12102  *****************************************************************************/
   12103 static int
   12104 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12105 {
   12106 	int32_t  rv = 0;
   12107 	uint32_t flash_bank = 0;
   12108 	uint32_t act_offset = 0;
   12109 	uint32_t bank_offset = 0;
   12110 	uint32_t dword = 0;
   12111 	uint16_t i = 0;
   12112 
   12113 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12114 		device_xname(sc->sc_dev), __func__));
   12115 
   12116 	if (sc->nvm.acquire(sc) != 0)
   12117 		return -1;
   12118 
   12119 	/*
   12120 	 * We need to know which is the valid flash bank.  In the event
   12121 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12122 	 * managing flash_bank.  So it cannot be trusted and needs
   12123 	 * to be updated with each read.
   12124 	 */
   12125 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   12126 	if (rv) {
   12127 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   12128 			device_xname(sc->sc_dev)));
   12129 		flash_bank = 0;
   12130 	}
   12131 
   12132 	/*
   12133 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   12134 	 * size
   12135 	 */
   12136 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   12137 
   12138 	for (i = 0; i < words; i++) {
   12139 		/* The NVM part needs a byte offset, hence * 2 */
   12140 		act_offset = bank_offset + ((offset + i) * 2);
   12141 		/* but we must read dword aligned, so mask ... */
   12142 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   12143 		if (rv) {
   12144 			aprint_error_dev(sc->sc_dev,
   12145 			    "%s: failed to read NVM\n", __func__);
   12146 			break;
   12147 		}
   12148 		/* ... and pick out low or high word */
   12149 		if ((act_offset & 0x2) == 0)
   12150 			data[i] = (uint16_t)(dword & 0xFFFF);
   12151 		else
   12152 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   12153 	}
   12154 
   12155 	sc->nvm.release(sc);
   12156 	return rv;
   12157 }
   12158 
   12159 /* iNVM */
   12160 
   12161 static int
   12162 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   12163 {
   12164 	int32_t  rv = 0;
   12165 	uint32_t invm_dword;
   12166 	uint16_t i;
   12167 	uint8_t record_type, word_address;
   12168 
   12169 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12170 		device_xname(sc->sc_dev), __func__));
   12171 
   12172 	for (i = 0; i < INVM_SIZE; i++) {
   12173 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   12174 		/* Get record type */
   12175 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   12176 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   12177 			break;
   12178 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   12179 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   12180 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   12181 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   12182 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   12183 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   12184 			if (word_address == address) {
   12185 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   12186 				rv = 0;
   12187 				break;
   12188 			}
   12189 		}
   12190 	}
   12191 
   12192 	return rv;
   12193 }
   12194 
   12195 static int
   12196 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12197 {
   12198 	int rv = 0;
   12199 	int i;
   12200 
   12201 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12202 		device_xname(sc->sc_dev), __func__));
   12203 
   12204 	if (sc->nvm.acquire(sc) != 0)
   12205 		return -1;
   12206 
   12207 	for (i = 0; i < words; i++) {
   12208 		switch (offset + i) {
   12209 		case NVM_OFF_MACADDR:
   12210 		case NVM_OFF_MACADDR1:
   12211 		case NVM_OFF_MACADDR2:
   12212 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   12213 			if (rv != 0) {
   12214 				data[i] = 0xffff;
   12215 				rv = -1;
   12216 			}
   12217 			break;
   12218 		case NVM_OFF_CFG2:
   12219 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12220 			if (rv != 0) {
   12221 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   12222 				rv = 0;
   12223 			}
   12224 			break;
   12225 		case NVM_OFF_CFG4:
   12226 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12227 			if (rv != 0) {
   12228 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   12229 				rv = 0;
   12230 			}
   12231 			break;
   12232 		case NVM_OFF_LED_1_CFG:
   12233 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12234 			if (rv != 0) {
   12235 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   12236 				rv = 0;
   12237 			}
   12238 			break;
   12239 		case NVM_OFF_LED_0_2_CFG:
   12240 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12241 			if (rv != 0) {
   12242 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   12243 				rv = 0;
   12244 			}
   12245 			break;
   12246 		case NVM_OFF_ID_LED_SETTINGS:
   12247 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12248 			if (rv != 0) {
   12249 				*data = ID_LED_RESERVED_FFFF;
   12250 				rv = 0;
   12251 			}
   12252 			break;
   12253 		default:
   12254 			DPRINTF(WM_DEBUG_NVM,
   12255 			    ("NVM word 0x%02x is not mapped.\n", offset));
   12256 			*data = NVM_RESERVED_WORD;
   12257 			break;
   12258 		}
   12259 	}
   12260 
   12261 	sc->nvm.release(sc);
   12262 	return rv;
   12263 }
   12264 
   12265 /* Lock, detecting NVM type, validate checksum, version and read */
   12266 
   12267 static int
   12268 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   12269 {
   12270 	uint32_t eecd = 0;
   12271 
   12272 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   12273 	    || sc->sc_type == WM_T_82583) {
   12274 		eecd = CSR_READ(sc, WMREG_EECD);
   12275 
   12276 		/* Isolate bits 15 & 16 */
   12277 		eecd = ((eecd >> 15) & 0x03);
   12278 
   12279 		/* If both bits are set, device is Flash type */
   12280 		if (eecd == 0x03)
   12281 			return 0;
   12282 	}
   12283 	return 1;
   12284 }
   12285 
   12286 static int
   12287 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   12288 {
   12289 	uint32_t eec;
   12290 
   12291 	eec = CSR_READ(sc, WMREG_EEC);
   12292 	if ((eec & EEC_FLASH_DETECTED) != 0)
   12293 		return 1;
   12294 
   12295 	return 0;
   12296 }
   12297 
   12298 /*
   12299  * wm_nvm_validate_checksum
   12300  *
   12301  * The checksum is defined as the sum of the first 64 (16 bit) words.
   12302  */
   12303 static int
   12304 wm_nvm_validate_checksum(struct wm_softc *sc)
   12305 {
   12306 	uint16_t checksum;
   12307 	uint16_t eeprom_data;
   12308 #ifdef WM_DEBUG
   12309 	uint16_t csum_wordaddr, valid_checksum;
   12310 #endif
   12311 	int i;
   12312 
   12313 	checksum = 0;
   12314 
   12315 	/* Don't check for I211 */
   12316 	if (sc->sc_type == WM_T_I211)
   12317 		return 0;
   12318 
   12319 #ifdef WM_DEBUG
   12320 	if (sc->sc_type == WM_T_PCH_LPT) {
   12321 		csum_wordaddr = NVM_OFF_COMPAT;
   12322 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   12323 	} else {
   12324 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   12325 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   12326 	}
   12327 
   12328 	/* Dump EEPROM image for debug */
   12329 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12330 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12331 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   12332 		/* XXX PCH_SPT? */
   12333 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   12334 		if ((eeprom_data & valid_checksum) == 0) {
   12335 			DPRINTF(WM_DEBUG_NVM,
   12336 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   12337 				device_xname(sc->sc_dev), eeprom_data,
   12338 				    valid_checksum));
   12339 		}
   12340 	}
   12341 
   12342 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   12343 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   12344 		for (i = 0; i < NVM_SIZE; i++) {
   12345 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12346 				printf("XXXX ");
   12347 			else
   12348 				printf("%04hx ", eeprom_data);
   12349 			if (i % 8 == 7)
   12350 				printf("\n");
   12351 		}
   12352 	}
   12353 
   12354 #endif /* WM_DEBUG */
   12355 
   12356 	for (i = 0; i < NVM_SIZE; i++) {
   12357 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12358 			return 1;
   12359 		checksum += eeprom_data;
   12360 	}
   12361 
   12362 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   12363 #ifdef WM_DEBUG
   12364 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   12365 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   12366 #endif
   12367 	}
   12368 
   12369 	return 0;
   12370 }
   12371 
   12372 static void
   12373 wm_nvm_version_invm(struct wm_softc *sc)
   12374 {
   12375 	uint32_t dword;
   12376 
   12377 	/*
   12378 	 * Linux's code to decode version is very strange, so we don't
   12379 	 * obey that algorithm and just use word 61 as the document.
   12380 	 * Perhaps it's not perfect though...
   12381 	 *
   12382 	 * Example:
   12383 	 *
   12384 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   12385 	 */
   12386 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   12387 	dword = __SHIFTOUT(dword, INVM_VER_1);
   12388 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   12389 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   12390 }
   12391 
   12392 static void
   12393 wm_nvm_version(struct wm_softc *sc)
   12394 {
   12395 	uint16_t major, minor, build, patch;
   12396 	uint16_t uid0, uid1;
   12397 	uint16_t nvm_data;
   12398 	uint16_t off;
   12399 	bool check_version = false;
   12400 	bool check_optionrom = false;
   12401 	bool have_build = false;
   12402 	bool have_uid = true;
   12403 
   12404 	/*
   12405 	 * Version format:
   12406 	 *
   12407 	 * XYYZ
   12408 	 * X0YZ
   12409 	 * X0YY
   12410 	 *
   12411 	 * Example:
   12412 	 *
   12413 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   12414 	 *	82571	0x50a6	5.10.6?
   12415 	 *	82572	0x506a	5.6.10?
   12416 	 *	82572EI	0x5069	5.6.9?
   12417 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   12418 	 *		0x2013	2.1.3?
   12419 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   12420 	 */
   12421 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
   12422 	switch (sc->sc_type) {
   12423 	case WM_T_82571:
   12424 	case WM_T_82572:
   12425 	case WM_T_82574:
   12426 	case WM_T_82583:
   12427 		check_version = true;
   12428 		check_optionrom = true;
   12429 		have_build = true;
   12430 		break;
   12431 	case WM_T_82575:
   12432 	case WM_T_82576:
   12433 	case WM_T_82580:
   12434 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   12435 			check_version = true;
   12436 		break;
   12437 	case WM_T_I211:
   12438 		wm_nvm_version_invm(sc);
   12439 		have_uid = false;
   12440 		goto printver;
   12441 	case WM_T_I210:
   12442 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   12443 			wm_nvm_version_invm(sc);
   12444 			have_uid = false;
   12445 			goto printver;
   12446 		}
   12447 		/* FALLTHROUGH */
   12448 	case WM_T_I350:
   12449 	case WM_T_I354:
   12450 		check_version = true;
   12451 		check_optionrom = true;
   12452 		break;
   12453 	default:
   12454 		return;
   12455 	}
   12456 	if (check_version) {
   12457 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
   12458 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   12459 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   12460 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   12461 			build = nvm_data & NVM_BUILD_MASK;
   12462 			have_build = true;
   12463 		} else
   12464 			minor = nvm_data & 0x00ff;
   12465 
   12466 		/* Decimal */
   12467 		minor = (minor / 16) * 10 + (minor % 16);
   12468 		sc->sc_nvm_ver_major = major;
   12469 		sc->sc_nvm_ver_minor = minor;
   12470 
   12471 printver:
   12472 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   12473 		    sc->sc_nvm_ver_minor);
   12474 		if (have_build) {
   12475 			sc->sc_nvm_ver_build = build;
   12476 			aprint_verbose(".%d", build);
   12477 		}
   12478 	}
   12479 	if (check_optionrom) {
   12480 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
   12481 		/* Option ROM Version */
   12482 		if ((off != 0x0000) && (off != 0xffff)) {
   12483 			off += NVM_COMBO_VER_OFF;
   12484 			wm_nvm_read(sc, off + 1, 1, &uid1);
   12485 			wm_nvm_read(sc, off, 1, &uid0);
   12486 			if ((uid0 != 0) && (uid0 != 0xffff)
   12487 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   12488 				/* 16bits */
   12489 				major = uid0 >> 8;
   12490 				build = (uid0 << 8) | (uid1 >> 8);
   12491 				patch = uid1 & 0x00ff;
   12492 				aprint_verbose(", option ROM Version %d.%d.%d",
   12493 				    major, build, patch);
   12494 			}
   12495 		}
   12496 	}
   12497 
   12498 	if (have_uid) {
   12499 		wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
   12500 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   12501 	}
   12502 }
   12503 
   12504 /*
   12505  * wm_nvm_read:
   12506  *
   12507  *	Read data from the serial EEPROM.
   12508  */
   12509 static int
   12510 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12511 {
   12512 	int rv;
   12513 
   12514 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12515 		device_xname(sc->sc_dev), __func__));
   12516 
   12517 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   12518 		return -1;
   12519 
   12520 	rv = sc->nvm.read(sc, word, wordcnt, data);
   12521 
   12522 	return rv;
   12523 }
   12524 
   12525 /*
   12526  * Hardware semaphores.
   12527  * Very complexed...
   12528  */
   12529 
   12530 static int
   12531 wm_get_null(struct wm_softc *sc)
   12532 {
   12533 
   12534 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12535 		device_xname(sc->sc_dev), __func__));
   12536 	return 0;
   12537 }
   12538 
   12539 static void
   12540 wm_put_null(struct wm_softc *sc)
   12541 {
   12542 
   12543 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12544 		device_xname(sc->sc_dev), __func__));
   12545 	return;
   12546 }
   12547 
   12548 static int
   12549 wm_get_eecd(struct wm_softc *sc)
   12550 {
   12551 	uint32_t reg;
   12552 	int x;
   12553 
   12554 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   12555 		device_xname(sc->sc_dev), __func__));
   12556 
   12557 	reg = CSR_READ(sc, WMREG_EECD);
   12558 
   12559 	/* Request EEPROM access. */
   12560 	reg |= EECD_EE_REQ;
   12561 	CSR_WRITE(sc, WMREG_EECD, reg);
   12562 
   12563 	/* ..and wait for it to be granted. */
   12564 	for (x = 0; x < 1000; x++) {
   12565 		reg = CSR_READ(sc, WMREG_EECD);
   12566 		if (reg & EECD_EE_GNT)
   12567 			break;
   12568 		delay(5);
   12569 	}
   12570 	if ((reg & EECD_EE_GNT) == 0) {
   12571 		aprint_error_dev(sc->sc_dev,
   12572 		    "could not acquire EEPROM GNT\n");
   12573 		reg &= ~EECD_EE_REQ;
   12574 		CSR_WRITE(sc, WMREG_EECD, reg);
   12575 		return -1;
   12576 	}
   12577 
   12578 	return 0;
   12579 }
   12580 
   12581 static void
   12582 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   12583 {
   12584 
   12585 	*eecd |= EECD_SK;
   12586 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   12587 	CSR_WRITE_FLUSH(sc);
   12588 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   12589 		delay(1);
   12590 	else
   12591 		delay(50);
   12592 }
   12593 
   12594 static void
   12595 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   12596 {
   12597 
   12598 	*eecd &= ~EECD_SK;
   12599 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   12600 	CSR_WRITE_FLUSH(sc);
   12601 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   12602 		delay(1);
   12603 	else
   12604 		delay(50);
   12605 }
   12606 
   12607 static void
   12608 wm_put_eecd(struct wm_softc *sc)
   12609 {
   12610 	uint32_t reg;
   12611 
   12612 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12613 		device_xname(sc->sc_dev), __func__));
   12614 
   12615 	/* Stop nvm */
   12616 	reg = CSR_READ(sc, WMREG_EECD);
   12617 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   12618 		/* Pull CS high */
   12619 		reg |= EECD_CS;
   12620 		wm_nvm_eec_clock_lower(sc, &reg);
   12621 	} else {
   12622 		/* CS on Microwire is active-high */
   12623 		reg &= ~(EECD_CS | EECD_DI);
   12624 		CSR_WRITE(sc, WMREG_EECD, reg);
   12625 		wm_nvm_eec_clock_raise(sc, &reg);
   12626 		wm_nvm_eec_clock_lower(sc, &reg);
   12627 	}
   12628 
   12629 	reg = CSR_READ(sc, WMREG_EECD);
   12630 	reg &= ~EECD_EE_REQ;
   12631 	CSR_WRITE(sc, WMREG_EECD, reg);
   12632 
   12633 	return;
   12634 }
   12635 
   12636 /*
   12637  * Get hardware semaphore.
   12638  * Same as e1000_get_hw_semaphore_generic()
   12639  */
   12640 static int
   12641 wm_get_swsm_semaphore(struct wm_softc *sc)
   12642 {
   12643 	int32_t timeout;
   12644 	uint32_t swsm;
   12645 
   12646 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12647 		device_xname(sc->sc_dev), __func__));
   12648 	KASSERT(sc->sc_nvm_wordsize > 0);
   12649 
   12650 	/* Get the SW semaphore. */
   12651 	timeout = sc->sc_nvm_wordsize + 1;
   12652 	while (timeout) {
   12653 		swsm = CSR_READ(sc, WMREG_SWSM);
   12654 
   12655 		if ((swsm & SWSM_SMBI) == 0)
   12656 			break;
   12657 
   12658 		delay(50);
   12659 		timeout--;
   12660 	}
   12661 
   12662 	if (timeout == 0) {
   12663 		aprint_error_dev(sc->sc_dev,
   12664 		    "could not acquire SWSM SMBI\n");
   12665 		return 1;
   12666 	}
   12667 
   12668 	/* Get the FW semaphore. */
   12669 	timeout = sc->sc_nvm_wordsize + 1;
   12670 	while (timeout) {
   12671 		swsm = CSR_READ(sc, WMREG_SWSM);
   12672 		swsm |= SWSM_SWESMBI;
   12673 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   12674 		/* If we managed to set the bit we got the semaphore. */
   12675 		swsm = CSR_READ(sc, WMREG_SWSM);
   12676 		if (swsm & SWSM_SWESMBI)
   12677 			break;
   12678 
   12679 		delay(50);
   12680 		timeout--;
   12681 	}
   12682 
   12683 	if (timeout == 0) {
   12684 		aprint_error_dev(sc->sc_dev,
   12685 		    "could not acquire SWSM SWESMBI\n");
   12686 		/* Release semaphores */
   12687 		wm_put_swsm_semaphore(sc);
   12688 		return 1;
   12689 	}
   12690 	return 0;
   12691 }
   12692 
   12693 /*
   12694  * Put hardware semaphore.
   12695  * Same as e1000_put_hw_semaphore_generic()
   12696  */
   12697 static void
   12698 wm_put_swsm_semaphore(struct wm_softc *sc)
   12699 {
   12700 	uint32_t swsm;
   12701 
   12702 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12703 		device_xname(sc->sc_dev), __func__));
   12704 
   12705 	swsm = CSR_READ(sc, WMREG_SWSM);
   12706 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   12707 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   12708 }
   12709 
   12710 /*
   12711  * Get SW/FW semaphore.
   12712  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   12713  */
   12714 static int
   12715 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12716 {
   12717 	uint32_t swfw_sync;
   12718 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   12719 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   12720 	int timeout;
   12721 
   12722 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12723 		device_xname(sc->sc_dev), __func__));
   12724 
   12725 	if (sc->sc_type == WM_T_80003)
   12726 		timeout = 50;
   12727 	else
   12728 		timeout = 200;
   12729 
   12730 	for (timeout = 0; timeout < 200; timeout++) {
   12731 		if (wm_get_swsm_semaphore(sc)) {
   12732 			aprint_error_dev(sc->sc_dev,
   12733 			    "%s: failed to get semaphore\n",
   12734 			    __func__);
   12735 			return 1;
   12736 		}
   12737 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12738 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   12739 			swfw_sync |= swmask;
   12740 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12741 			wm_put_swsm_semaphore(sc);
   12742 			return 0;
   12743 		}
   12744 		wm_put_swsm_semaphore(sc);
   12745 		delay(5000);
   12746 	}
   12747 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   12748 	    device_xname(sc->sc_dev), mask, swfw_sync);
   12749 	return 1;
   12750 }
   12751 
   12752 static void
   12753 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12754 {
   12755 	uint32_t swfw_sync;
   12756 
   12757 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12758 		device_xname(sc->sc_dev), __func__));
   12759 
   12760 	while (wm_get_swsm_semaphore(sc) != 0)
   12761 		continue;
   12762 
   12763 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12764 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   12765 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12766 
   12767 	wm_put_swsm_semaphore(sc);
   12768 }
   12769 
   12770 static int
   12771 wm_get_nvm_80003(struct wm_softc *sc)
   12772 {
   12773 	int rv;
   12774 
   12775 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   12776 		device_xname(sc->sc_dev), __func__));
   12777 
   12778 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   12779 		aprint_error_dev(sc->sc_dev,
   12780 		    "%s: failed to get semaphore(SWFW)\n",
   12781 		    __func__);
   12782 		return rv;
   12783 	}
   12784 
   12785 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   12786 	    && (rv = wm_get_eecd(sc)) != 0) {
   12787 		aprint_error_dev(sc->sc_dev,
   12788 		    "%s: failed to get semaphore(EECD)\n",
   12789 		    __func__);
   12790 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   12791 		return rv;
   12792 	}
   12793 
   12794 	return 0;
   12795 }
   12796 
   12797 static void
   12798 wm_put_nvm_80003(struct wm_softc *sc)
   12799 {
   12800 
   12801 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12802 		device_xname(sc->sc_dev), __func__));
   12803 
   12804 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   12805 		wm_put_eecd(sc);
   12806 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   12807 }
   12808 
   12809 static int
   12810 wm_get_nvm_82571(struct wm_softc *sc)
   12811 {
   12812 	int rv;
   12813 
   12814 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12815 		device_xname(sc->sc_dev), __func__));
   12816 
   12817 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   12818 		return rv;
   12819 
   12820 	switch (sc->sc_type) {
   12821 	case WM_T_82573:
   12822 		break;
   12823 	default:
   12824 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   12825 			rv = wm_get_eecd(sc);
   12826 		break;
   12827 	}
   12828 
   12829 	if (rv != 0) {
   12830 		aprint_error_dev(sc->sc_dev,
   12831 		    "%s: failed to get semaphore\n",
   12832 		    __func__);
   12833 		wm_put_swsm_semaphore(sc);
   12834 	}
   12835 
   12836 	return rv;
   12837 }
   12838 
   12839 static void
   12840 wm_put_nvm_82571(struct wm_softc *sc)
   12841 {
   12842 
   12843 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12844 		device_xname(sc->sc_dev), __func__));
   12845 
   12846 	switch (sc->sc_type) {
   12847 	case WM_T_82573:
   12848 		break;
   12849 	default:
   12850 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   12851 			wm_put_eecd(sc);
   12852 		break;
   12853 	}
   12854 
   12855 	wm_put_swsm_semaphore(sc);
   12856 }
   12857 
   12858 static int
   12859 wm_get_phy_82575(struct wm_softc *sc)
   12860 {
   12861 
   12862 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12863 		device_xname(sc->sc_dev), __func__));
   12864 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   12865 }
   12866 
   12867 static void
   12868 wm_put_phy_82575(struct wm_softc *sc)
   12869 {
   12870 
   12871 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12872 		device_xname(sc->sc_dev), __func__));
   12873 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   12874 }
   12875 
   12876 static int
   12877 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   12878 {
   12879 	uint32_t ext_ctrl;
   12880 	int timeout = 200;
   12881 
   12882 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12883 		device_xname(sc->sc_dev), __func__));
   12884 
   12885 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12886 	for (timeout = 0; timeout < 200; timeout++) {
   12887 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12888 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12889 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12890 
   12891 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12892 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   12893 			return 0;
   12894 		delay(5000);
   12895 	}
   12896 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   12897 	    device_xname(sc->sc_dev), ext_ctrl);
   12898 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12899 	return 1;
   12900 }
   12901 
   12902 static void
   12903 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   12904 {
   12905 	uint32_t ext_ctrl;
   12906 
   12907 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12908 		device_xname(sc->sc_dev), __func__));
   12909 
   12910 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12911 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12912 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12913 
   12914 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12915 }
   12916 
   12917 static int
   12918 wm_get_swflag_ich8lan(struct wm_softc *sc)
   12919 {
   12920 	uint32_t ext_ctrl;
   12921 	int timeout;
   12922 
   12923 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12924 		device_xname(sc->sc_dev), __func__));
   12925 	mutex_enter(sc->sc_ich_phymtx);
   12926 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   12927 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12928 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   12929 			break;
   12930 		delay(1000);
   12931 	}
   12932 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   12933 		printf("%s: SW has already locked the resource\n",
   12934 		    device_xname(sc->sc_dev));
   12935 		goto out;
   12936 	}
   12937 
   12938 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12939 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12940 	for (timeout = 0; timeout < 1000; timeout++) {
   12941 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12942 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   12943 			break;
   12944 		delay(1000);
   12945 	}
   12946 	if (timeout >= 1000) {
   12947 		printf("%s: failed to acquire semaphore\n",
   12948 		    device_xname(sc->sc_dev));
   12949 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12950 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12951 		goto out;
   12952 	}
   12953 	return 0;
   12954 
   12955 out:
   12956 	mutex_exit(sc->sc_ich_phymtx);
   12957 	return 1;
   12958 }
   12959 
   12960 static void
   12961 wm_put_swflag_ich8lan(struct wm_softc *sc)
   12962 {
   12963 	uint32_t ext_ctrl;
   12964 
   12965 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12966 		device_xname(sc->sc_dev), __func__));
   12967 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12968 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   12969 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12970 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12971 	} else {
   12972 		printf("%s: Semaphore unexpectedly released\n",
   12973 		    device_xname(sc->sc_dev));
   12974 	}
   12975 
   12976 	mutex_exit(sc->sc_ich_phymtx);
   12977 }
   12978 
   12979 static int
   12980 wm_get_nvm_ich8lan(struct wm_softc *sc)
   12981 {
   12982 
   12983 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12984 		device_xname(sc->sc_dev), __func__));
   12985 	mutex_enter(sc->sc_ich_nvmmtx);
   12986 
   12987 	return 0;
   12988 }
   12989 
   12990 static void
   12991 wm_put_nvm_ich8lan(struct wm_softc *sc)
   12992 {
   12993 
   12994 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12995 		device_xname(sc->sc_dev), __func__));
   12996 	mutex_exit(sc->sc_ich_nvmmtx);
   12997 }
   12998 
   12999 static int
   13000 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   13001 {
   13002 	int i = 0;
   13003 	uint32_t reg;
   13004 
   13005 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13006 		device_xname(sc->sc_dev), __func__));
   13007 
   13008 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13009 	do {
   13010 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   13011 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   13012 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13013 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   13014 			break;
   13015 		delay(2*1000);
   13016 		i++;
   13017 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   13018 
   13019 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   13020 		wm_put_hw_semaphore_82573(sc);
   13021 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   13022 		    device_xname(sc->sc_dev));
   13023 		return -1;
   13024 	}
   13025 
   13026 	return 0;
   13027 }
   13028 
   13029 static void
   13030 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   13031 {
   13032 	uint32_t reg;
   13033 
   13034 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13035 		device_xname(sc->sc_dev), __func__));
   13036 
   13037 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13038 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13039 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   13040 }
   13041 
   13042 /*
   13043  * Management mode and power management related subroutines.
   13044  * BMC, AMT, suspend/resume and EEE.
   13045  */
   13046 
   13047 #ifdef WM_WOL
   13048 static int
   13049 wm_check_mng_mode(struct wm_softc *sc)
   13050 {
   13051 	int rv;
   13052 
   13053 	switch (sc->sc_type) {
   13054 	case WM_T_ICH8:
   13055 	case WM_T_ICH9:
   13056 	case WM_T_ICH10:
   13057 	case WM_T_PCH:
   13058 	case WM_T_PCH2:
   13059 	case WM_T_PCH_LPT:
   13060 	case WM_T_PCH_SPT:
   13061 		rv = wm_check_mng_mode_ich8lan(sc);
   13062 		break;
   13063 	case WM_T_82574:
   13064 	case WM_T_82583:
   13065 		rv = wm_check_mng_mode_82574(sc);
   13066 		break;
   13067 	case WM_T_82571:
   13068 	case WM_T_82572:
   13069 	case WM_T_82573:
   13070 	case WM_T_80003:
   13071 		rv = wm_check_mng_mode_generic(sc);
   13072 		break;
   13073 	default:
   13074 		/* noting to do */
   13075 		rv = 0;
   13076 		break;
   13077 	}
   13078 
   13079 	return rv;
   13080 }
   13081 
   13082 static int
   13083 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   13084 {
   13085 	uint32_t fwsm;
   13086 
   13087 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13088 
   13089 	if (((fwsm & FWSM_FW_VALID) != 0)
   13090 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   13091 		return 1;
   13092 
   13093 	return 0;
   13094 }
   13095 
   13096 static int
   13097 wm_check_mng_mode_82574(struct wm_softc *sc)
   13098 {
   13099 	uint16_t data;
   13100 
   13101 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   13102 
   13103 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   13104 		return 1;
   13105 
   13106 	return 0;
   13107 }
   13108 
   13109 static int
   13110 wm_check_mng_mode_generic(struct wm_softc *sc)
   13111 {
   13112 	uint32_t fwsm;
   13113 
   13114 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13115 
   13116 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   13117 		return 1;
   13118 
   13119 	return 0;
   13120 }
   13121 #endif /* WM_WOL */
   13122 
   13123 static int
   13124 wm_enable_mng_pass_thru(struct wm_softc *sc)
   13125 {
   13126 	uint32_t manc, fwsm, factps;
   13127 
   13128 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   13129 		return 0;
   13130 
   13131 	manc = CSR_READ(sc, WMREG_MANC);
   13132 
   13133 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   13134 		device_xname(sc->sc_dev), manc));
   13135 	if ((manc & MANC_RECV_TCO_EN) == 0)
   13136 		return 0;
   13137 
   13138 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   13139 		fwsm = CSR_READ(sc, WMREG_FWSM);
   13140 		factps = CSR_READ(sc, WMREG_FACTPS);
   13141 		if (((factps & FACTPS_MNGCG) == 0)
   13142 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   13143 			return 1;
   13144 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   13145 		uint16_t data;
   13146 
   13147 		factps = CSR_READ(sc, WMREG_FACTPS);
   13148 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   13149 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   13150 			device_xname(sc->sc_dev), factps, data));
   13151 		if (((factps & FACTPS_MNGCG) == 0)
   13152 		    && ((data & NVM_CFG2_MNGM_MASK)
   13153 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   13154 			return 1;
   13155 	} else if (((manc & MANC_SMBUS_EN) != 0)
   13156 	    && ((manc & MANC_ASF_EN) == 0))
   13157 		return 1;
   13158 
   13159 	return 0;
   13160 }
   13161 
   13162 static bool
   13163 wm_phy_resetisblocked(struct wm_softc *sc)
   13164 {
   13165 	bool blocked = false;
   13166 	uint32_t reg;
   13167 	int i = 0;
   13168 
   13169 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13170 		device_xname(sc->sc_dev), __func__));
   13171 
   13172 	switch (sc->sc_type) {
   13173 	case WM_T_ICH8:
   13174 	case WM_T_ICH9:
   13175 	case WM_T_ICH10:
   13176 	case WM_T_PCH:
   13177 	case WM_T_PCH2:
   13178 	case WM_T_PCH_LPT:
   13179 	case WM_T_PCH_SPT:
   13180 		do {
   13181 			reg = CSR_READ(sc, WMREG_FWSM);
   13182 			if ((reg & FWSM_RSPCIPHY) == 0) {
   13183 				blocked = true;
   13184 				delay(10*1000);
   13185 				continue;
   13186 			}
   13187 			blocked = false;
   13188 		} while (blocked && (i++ < 30));
   13189 		return blocked;
   13190 		break;
   13191 	case WM_T_82571:
   13192 	case WM_T_82572:
   13193 	case WM_T_82573:
   13194 	case WM_T_82574:
   13195 	case WM_T_82583:
   13196 	case WM_T_80003:
   13197 		reg = CSR_READ(sc, WMREG_MANC);
   13198 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   13199 			return true;
   13200 		else
   13201 			return false;
   13202 		break;
   13203 	default:
   13204 		/* no problem */
   13205 		break;
   13206 	}
   13207 
   13208 	return false;
   13209 }
   13210 
   13211 static void
   13212 wm_get_hw_control(struct wm_softc *sc)
   13213 {
   13214 	uint32_t reg;
   13215 
   13216 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13217 		device_xname(sc->sc_dev), __func__));
   13218 
   13219 	if (sc->sc_type == WM_T_82573) {
   13220 		reg = CSR_READ(sc, WMREG_SWSM);
   13221 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   13222 	} else if (sc->sc_type >= WM_T_82571) {
   13223 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13224 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   13225 	}
   13226 }
   13227 
   13228 static void
   13229 wm_release_hw_control(struct wm_softc *sc)
   13230 {
   13231 	uint32_t reg;
   13232 
   13233 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13234 		device_xname(sc->sc_dev), __func__));
   13235 
   13236 	if (sc->sc_type == WM_T_82573) {
   13237 		reg = CSR_READ(sc, WMREG_SWSM);
   13238 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   13239 	} else if (sc->sc_type >= WM_T_82571) {
   13240 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13241 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   13242 	}
   13243 }
   13244 
   13245 static void
   13246 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   13247 {
   13248 	uint32_t reg;
   13249 
   13250 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13251 		device_xname(sc->sc_dev), __func__));
   13252 
   13253 	if (sc->sc_type < WM_T_PCH2)
   13254 		return;
   13255 
   13256 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13257 
   13258 	if (gate)
   13259 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   13260 	else
   13261 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   13262 
   13263 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   13264 }
   13265 
   13266 static void
   13267 wm_smbustopci(struct wm_softc *sc)
   13268 {
   13269 	uint32_t fwsm, reg;
   13270 	int rv = 0;
   13271 
   13272 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13273 		device_xname(sc->sc_dev), __func__));
   13274 
   13275 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   13276 	wm_gate_hw_phy_config_ich8lan(sc, true);
   13277 
   13278 	/* Disable ULP */
   13279 	wm_ulp_disable(sc);
   13280 
   13281 	/* Acquire PHY semaphore */
   13282 	sc->phy.acquire(sc);
   13283 
   13284 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13285 	switch (sc->sc_type) {
   13286 	case WM_T_PCH_LPT:
   13287 	case WM_T_PCH_SPT:
   13288 		if (wm_phy_is_accessible_pchlan(sc))
   13289 			break;
   13290 
   13291 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13292 		reg |= CTRL_EXT_FORCE_SMBUS;
   13293 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13294 #if 0
   13295 		/* XXX Isn't this required??? */
   13296 		CSR_WRITE_FLUSH(sc);
   13297 #endif
   13298 		delay(50 * 1000);
   13299 		/* FALLTHROUGH */
   13300 	case WM_T_PCH2:
   13301 		if (wm_phy_is_accessible_pchlan(sc) == true)
   13302 			break;
   13303 		/* FALLTHROUGH */
   13304 	case WM_T_PCH:
   13305 		if (sc->sc_type == WM_T_PCH)
   13306 			if ((fwsm & FWSM_FW_VALID) != 0)
   13307 				break;
   13308 
   13309 		if (wm_phy_resetisblocked(sc) == true) {
   13310 			printf("XXX reset is blocked(3)\n");
   13311 			break;
   13312 		}
   13313 
   13314 		wm_toggle_lanphypc_pch_lpt(sc);
   13315 
   13316 		if (sc->sc_type >= WM_T_PCH_LPT) {
   13317 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13318 				break;
   13319 
   13320 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13321 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   13322 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13323 
   13324 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13325 				break;
   13326 			rv = -1;
   13327 		}
   13328 		break;
   13329 	default:
   13330 		break;
   13331 	}
   13332 
   13333 	/* Release semaphore */
   13334 	sc->phy.release(sc);
   13335 
   13336 	if (rv == 0) {
   13337 		if (wm_phy_resetisblocked(sc)) {
   13338 			printf("XXX reset is blocked(4)\n");
   13339 			goto out;
   13340 		}
   13341 		wm_reset_phy(sc);
   13342 		if (wm_phy_resetisblocked(sc))
   13343 			printf("XXX reset is blocked(4)\n");
   13344 	}
   13345 
   13346 out:
   13347 	/*
   13348 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   13349 	 */
   13350 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   13351 		delay(10*1000);
   13352 		wm_gate_hw_phy_config_ich8lan(sc, false);
   13353 	}
   13354 }
   13355 
   13356 static void
   13357 wm_init_manageability(struct wm_softc *sc)
   13358 {
   13359 
   13360 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13361 		device_xname(sc->sc_dev), __func__));
   13362 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13363 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   13364 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13365 
   13366 		/* Disable hardware interception of ARP */
   13367 		manc &= ~MANC_ARP_EN;
   13368 
   13369 		/* Enable receiving management packets to the host */
   13370 		if (sc->sc_type >= WM_T_82571) {
   13371 			manc |= MANC_EN_MNG2HOST;
   13372 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   13373 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   13374 		}
   13375 
   13376 		CSR_WRITE(sc, WMREG_MANC, manc);
   13377 	}
   13378 }
   13379 
   13380 static void
   13381 wm_release_manageability(struct wm_softc *sc)
   13382 {
   13383 
   13384 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13385 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13386 
   13387 		manc |= MANC_ARP_EN;
   13388 		if (sc->sc_type >= WM_T_82571)
   13389 			manc &= ~MANC_EN_MNG2HOST;
   13390 
   13391 		CSR_WRITE(sc, WMREG_MANC, manc);
   13392 	}
   13393 }
   13394 
   13395 static void
   13396 wm_get_wakeup(struct wm_softc *sc)
   13397 {
   13398 
   13399 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   13400 	switch (sc->sc_type) {
   13401 	case WM_T_82573:
   13402 	case WM_T_82583:
   13403 		sc->sc_flags |= WM_F_HAS_AMT;
   13404 		/* FALLTHROUGH */
   13405 	case WM_T_80003:
   13406 	case WM_T_82575:
   13407 	case WM_T_82576:
   13408 	case WM_T_82580:
   13409 	case WM_T_I350:
   13410 	case WM_T_I354:
   13411 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   13412 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   13413 		/* FALLTHROUGH */
   13414 	case WM_T_82541:
   13415 	case WM_T_82541_2:
   13416 	case WM_T_82547:
   13417 	case WM_T_82547_2:
   13418 	case WM_T_82571:
   13419 	case WM_T_82572:
   13420 	case WM_T_82574:
   13421 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13422 		break;
   13423 	case WM_T_ICH8:
   13424 	case WM_T_ICH9:
   13425 	case WM_T_ICH10:
   13426 	case WM_T_PCH:
   13427 	case WM_T_PCH2:
   13428 	case WM_T_PCH_LPT:
   13429 	case WM_T_PCH_SPT:
   13430 		sc->sc_flags |= WM_F_HAS_AMT;
   13431 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13432 		break;
   13433 	default:
   13434 		break;
   13435 	}
   13436 
   13437 	/* 1: HAS_MANAGE */
   13438 	if (wm_enable_mng_pass_thru(sc) != 0)
   13439 		sc->sc_flags |= WM_F_HAS_MANAGE;
   13440 
   13441 	/*
   13442 	 * Note that the WOL flags is set after the resetting of the eeprom
   13443 	 * stuff
   13444 	 */
   13445 }
   13446 
   13447 /*
   13448  * Unconfigure Ultra Low Power mode.
   13449  * Only for I217 and newer (see below).
   13450  */
   13451 static void
   13452 wm_ulp_disable(struct wm_softc *sc)
   13453 {
   13454 	uint32_t reg;
   13455 	int i = 0;
   13456 
   13457 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13458 		device_xname(sc->sc_dev), __func__));
   13459 	/* Exclude old devices */
   13460 	if ((sc->sc_type < WM_T_PCH_LPT)
   13461 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   13462 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   13463 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   13464 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   13465 		return;
   13466 
   13467 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   13468 		/* Request ME un-configure ULP mode in the PHY */
   13469 		reg = CSR_READ(sc, WMREG_H2ME);
   13470 		reg &= ~H2ME_ULP;
   13471 		reg |= H2ME_ENFORCE_SETTINGS;
   13472 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13473 
   13474 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   13475 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   13476 			if (i++ == 30) {
   13477 				printf("%s timed out\n", __func__);
   13478 				return;
   13479 			}
   13480 			delay(10 * 1000);
   13481 		}
   13482 		reg = CSR_READ(sc, WMREG_H2ME);
   13483 		reg &= ~H2ME_ENFORCE_SETTINGS;
   13484 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13485 
   13486 		return;
   13487 	}
   13488 
   13489 	/* Acquire semaphore */
   13490 	sc->phy.acquire(sc);
   13491 
   13492 	/* Toggle LANPHYPC */
   13493 	wm_toggle_lanphypc_pch_lpt(sc);
   13494 
   13495 	/* Unforce SMBus mode in PHY */
   13496 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13497 	if (reg == 0x0000 || reg == 0xffff) {
   13498 		uint32_t reg2;
   13499 
   13500 		printf("%s: Force SMBus first.\n", __func__);
   13501 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   13502 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   13503 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   13504 		delay(50 * 1000);
   13505 
   13506 		reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13507 	}
   13508 	reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13509 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg);
   13510 
   13511 	/* Unforce SMBus mode in MAC */
   13512 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13513 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   13514 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13515 
   13516 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL);
   13517 	reg |= HV_PM_CTRL_K1_ENA;
   13518 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg);
   13519 
   13520 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1);
   13521 	reg &= ~(I218_ULP_CONFIG1_IND
   13522 	    | I218_ULP_CONFIG1_STICKY_ULP
   13523 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   13524 	    | I218_ULP_CONFIG1_WOL_HOST
   13525 	    | I218_ULP_CONFIG1_INBAND_EXIT
   13526 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   13527 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   13528 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   13529 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13530 	reg |= I218_ULP_CONFIG1_START;
   13531 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13532 
   13533 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   13534 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   13535 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   13536 
   13537 	/* Release semaphore */
   13538 	sc->phy.release(sc);
   13539 	wm_gmii_reset(sc);
   13540 	delay(50 * 1000);
   13541 }
   13542 
   13543 /* WOL in the newer chipset interfaces (pchlan) */
   13544 static void
   13545 wm_enable_phy_wakeup(struct wm_softc *sc)
   13546 {
   13547 #if 0
   13548 	uint16_t preg;
   13549 
   13550 	/* Copy MAC RARs to PHY RARs */
   13551 
   13552 	/* Copy MAC MTA to PHY MTA */
   13553 
   13554 	/* Configure PHY Rx Control register */
   13555 
   13556 	/* Enable PHY wakeup in MAC register */
   13557 
   13558 	/* Configure and enable PHY wakeup in PHY registers */
   13559 
   13560 	/* Activate PHY wakeup */
   13561 
   13562 	/* XXX */
   13563 #endif
   13564 }
   13565 
   13566 /* Power down workaround on D3 */
   13567 static void
   13568 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   13569 {
   13570 	uint32_t reg;
   13571 	int i;
   13572 
   13573 	for (i = 0; i < 2; i++) {
   13574 		/* Disable link */
   13575 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13576 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13577 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13578 
   13579 		/*
   13580 		 * Call gig speed drop workaround on Gig disable before
   13581 		 * accessing any PHY registers
   13582 		 */
   13583 		if (sc->sc_type == WM_T_ICH8)
   13584 			wm_gig_downshift_workaround_ich8lan(sc);
   13585 
   13586 		/* Write VR power-down enable */
   13587 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13588 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13589 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   13590 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   13591 
   13592 		/* Read it back and test */
   13593 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13594 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13595 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   13596 			break;
   13597 
   13598 		/* Issue PHY reset and repeat at most one more time */
   13599 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   13600 	}
   13601 }
   13602 
   13603 static void
   13604 wm_enable_wakeup(struct wm_softc *sc)
   13605 {
   13606 	uint32_t reg, pmreg;
   13607 	pcireg_t pmode;
   13608 
   13609 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13610 		device_xname(sc->sc_dev), __func__));
   13611 
   13612 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   13613 		&pmreg, NULL) == 0)
   13614 		return;
   13615 
   13616 	/* Advertise the wakeup capability */
   13617 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   13618 	    | CTRL_SWDPIN(3));
   13619 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   13620 
   13621 	/* ICH workaround */
   13622 	switch (sc->sc_type) {
   13623 	case WM_T_ICH8:
   13624 	case WM_T_ICH9:
   13625 	case WM_T_ICH10:
   13626 	case WM_T_PCH:
   13627 	case WM_T_PCH2:
   13628 	case WM_T_PCH_LPT:
   13629 	case WM_T_PCH_SPT:
   13630 		/* Disable gig during WOL */
   13631 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13632 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   13633 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13634 		if (sc->sc_type == WM_T_PCH)
   13635 			wm_gmii_reset(sc);
   13636 
   13637 		/* Power down workaround */
   13638 		if (sc->sc_phytype == WMPHY_82577) {
   13639 			struct mii_softc *child;
   13640 
   13641 			/* Assume that the PHY is copper */
   13642 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13643 			if ((child != NULL) && (child->mii_mpd_rev <= 2))
   13644 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   13645 				    (768 << 5) | 25, 0x0444); /* magic num */
   13646 		}
   13647 		break;
   13648 	default:
   13649 		break;
   13650 	}
   13651 
   13652 	/* Keep the laser running on fiber adapters */
   13653 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   13654 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   13655 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13656 		reg |= CTRL_EXT_SWDPIN(3);
   13657 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13658 	}
   13659 
   13660 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   13661 #if 0	/* for the multicast packet */
   13662 	reg |= WUFC_MC;
   13663 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   13664 #endif
   13665 
   13666 	if (sc->sc_type >= WM_T_PCH)
   13667 		wm_enable_phy_wakeup(sc);
   13668 	else {
   13669 		CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
   13670 		CSR_WRITE(sc, WMREG_WUFC, reg);
   13671 	}
   13672 
   13673 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13674 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13675 		|| (sc->sc_type == WM_T_PCH2))
   13676 		    && (sc->sc_phytype == WMPHY_IGP_3))
   13677 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   13678 
   13679 	/* Request PME */
   13680 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   13681 #if 0
   13682 	/* Disable WOL */
   13683 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   13684 #else
   13685 	/* For WOL */
   13686 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   13687 #endif
   13688 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   13689 }
   13690 
   13691 /* LPLU */
   13692 
   13693 static void
   13694 wm_lplu_d0_disable(struct wm_softc *sc)
   13695 {
   13696 	struct mii_data *mii = &sc->sc_mii;
   13697 	uint32_t reg;
   13698 
   13699 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13700 		device_xname(sc->sc_dev), __func__));
   13701 
   13702 	if (sc->sc_phytype == WMPHY_IFE)
   13703 		return;
   13704 
   13705 	switch (sc->sc_type) {
   13706 	case WM_T_82571:
   13707 	case WM_T_82572:
   13708 	case WM_T_82573:
   13709 	case WM_T_82575:
   13710 	case WM_T_82576:
   13711 		reg = mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT);
   13712 		reg &= ~PMR_D0_LPLU;
   13713 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, reg);
   13714 		break;
   13715 	case WM_T_82580:
   13716 	case WM_T_I350:
   13717 	case WM_T_I210:
   13718 	case WM_T_I211:
   13719 		reg = CSR_READ(sc, WMREG_PHPM);
   13720 		reg &= ~PHPM_D0A_LPLU;
   13721 		CSR_WRITE(sc, WMREG_PHPM, reg);
   13722 		break;
   13723 	case WM_T_82574:
   13724 	case WM_T_82583:
   13725 	case WM_T_ICH8:
   13726 	case WM_T_ICH9:
   13727 	case WM_T_ICH10:
   13728 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13729 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   13730 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13731 		CSR_WRITE_FLUSH(sc);
   13732 		break;
   13733 	case WM_T_PCH:
   13734 	case WM_T_PCH2:
   13735 	case WM_T_PCH_LPT:
   13736 	case WM_T_PCH_SPT:
   13737 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   13738 		reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   13739 		if (wm_phy_resetisblocked(sc) == false)
   13740 			reg |= HV_OEM_BITS_ANEGNOW;
   13741 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   13742 		break;
   13743 	default:
   13744 		break;
   13745 	}
   13746 }
   13747 
   13748 /* EEE */
   13749 
   13750 static void
   13751 wm_set_eee_i350(struct wm_softc *sc)
   13752 {
   13753 	uint32_t ipcnfg, eeer;
   13754 
   13755 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   13756 	eeer = CSR_READ(sc, WMREG_EEER);
   13757 
   13758 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   13759 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   13760 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   13761 		    | EEER_LPI_FC);
   13762 	} else {
   13763 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   13764 		ipcnfg &= ~IPCNFG_10BASE_TE;
   13765 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   13766 		    | EEER_LPI_FC);
   13767 	}
   13768 
   13769 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   13770 	CSR_WRITE(sc, WMREG_EEER, eeer);
   13771 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   13772 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   13773 }
   13774 
   13775 /*
   13776  * Workarounds (mainly PHY related).
   13777  * Basically, PHY's workarounds are in the PHY drivers.
   13778  */
   13779 
   13780 /* Work-around for 82566 Kumeran PCS lock loss */
   13781 static void
   13782 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   13783 {
   13784 	struct mii_data *mii = &sc->sc_mii;
   13785 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   13786 	int i;
   13787 	int reg;
   13788 
   13789 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13790 		device_xname(sc->sc_dev), __func__));
   13791 
   13792 	/* If the link is not up, do nothing */
   13793 	if ((status & STATUS_LU) == 0)
   13794 		return;
   13795 
   13796 	/* Nothing to do if the link is other than 1Gbps */
   13797 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   13798 		return;
   13799 
   13800 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13801 	for (i = 0; i < 10; i++) {
   13802 		/* read twice */
   13803 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   13804 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   13805 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   13806 			goto out;	/* GOOD! */
   13807 
   13808 		/* Reset the PHY */
   13809 		wm_reset_phy(sc);
   13810 		delay(5*1000);
   13811 	}
   13812 
   13813 	/* Disable GigE link negotiation */
   13814 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13815 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13816 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13817 
   13818 	/*
   13819 	 * Call gig speed drop workaround on Gig disable before accessing
   13820 	 * any PHY registers.
   13821 	 */
   13822 	wm_gig_downshift_workaround_ich8lan(sc);
   13823 
   13824 out:
   13825 	return;
   13826 }
   13827 
   13828 /* WOL from S5 stops working */
   13829 static void
   13830 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   13831 {
   13832 	uint16_t kmrn_reg;
   13833 
   13834 	/* Only for igp3 */
   13835 	if (sc->sc_phytype == WMPHY_IGP_3) {
   13836 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
   13837 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
   13838 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   13839 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
   13840 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   13841 	}
   13842 }
   13843 
   13844 /*
   13845  * Workaround for pch's PHYs
   13846  * XXX should be moved to new PHY driver?
   13847  */
   13848 static void
   13849 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   13850 {
   13851 
   13852 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13853 		device_xname(sc->sc_dev), __func__));
   13854 	KASSERT(sc->sc_type == WM_T_PCH);
   13855 
   13856 	if (sc->sc_phytype == WMPHY_82577)
   13857 		wm_set_mdio_slow_mode_hv(sc);
   13858 
   13859 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   13860 
   13861 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   13862 
   13863 	/* 82578 */
   13864 	if (sc->sc_phytype == WMPHY_82578) {
   13865 		struct mii_softc *child;
   13866 
   13867 		/*
   13868 		 * Return registers to default by doing a soft reset then
   13869 		 * writing 0x3140 to the control register
   13870 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   13871 		 */
   13872 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13873 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   13874 			PHY_RESET(child);
   13875 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   13876 			    0x3140);
   13877 		}
   13878 	}
   13879 
   13880 	/* Select page 0 */
   13881 	sc->phy.acquire(sc);
   13882 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   13883 	sc->phy.release(sc);
   13884 
   13885 	/*
   13886 	 * Configure the K1 Si workaround during phy reset assuming there is
   13887 	 * link so that it disables K1 if link is in 1Gbps.
   13888 	 */
   13889 	wm_k1_gig_workaround_hv(sc, 1);
   13890 }
   13891 
   13892 static void
   13893 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   13894 {
   13895 
   13896 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13897 		device_xname(sc->sc_dev), __func__));
   13898 	KASSERT(sc->sc_type == WM_T_PCH2);
   13899 
   13900 	wm_set_mdio_slow_mode_hv(sc);
   13901 }
   13902 
   13903 static int
   13904 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   13905 {
   13906 	int k1_enable = sc->sc_nvm_k1_enabled;
   13907 
   13908 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13909 		device_xname(sc->sc_dev), __func__));
   13910 
   13911 	if (sc->phy.acquire(sc) != 0)
   13912 		return -1;
   13913 
   13914 	if (link) {
   13915 		k1_enable = 0;
   13916 
   13917 		/* Link stall fix for link up */
   13918 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   13919 	} else {
   13920 		/* Link stall fix for link down */
   13921 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   13922 	}
   13923 
   13924 	wm_configure_k1_ich8lan(sc, k1_enable);
   13925 	sc->phy.release(sc);
   13926 
   13927 	return 0;
   13928 }
   13929 
   13930 static void
   13931 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   13932 {
   13933 	uint32_t reg;
   13934 
   13935 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   13936 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   13937 	    reg | HV_KMRN_MDIO_SLOW);
   13938 }
   13939 
   13940 static void
   13941 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   13942 {
   13943 	uint32_t ctrl, ctrl_ext, tmp;
   13944 	uint16_t kmrn_reg;
   13945 
   13946 	kmrn_reg = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
   13947 
   13948 	if (k1_enable)
   13949 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
   13950 	else
   13951 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
   13952 
   13953 	wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
   13954 
   13955 	delay(20);
   13956 
   13957 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13958 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   13959 
   13960 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   13961 	tmp |= CTRL_FRCSPD;
   13962 
   13963 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   13964 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   13965 	CSR_WRITE_FLUSH(sc);
   13966 	delay(20);
   13967 
   13968 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   13969 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13970 	CSR_WRITE_FLUSH(sc);
   13971 	delay(20);
   13972 }
   13973 
   13974 /* special case - for 82575 - need to do manual init ... */
   13975 static void
   13976 wm_reset_init_script_82575(struct wm_softc *sc)
   13977 {
   13978 	/*
   13979 	 * remark: this is untested code - we have no board without EEPROM
   13980 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   13981 	 */
   13982 
   13983 	/* SerDes configuration via SERDESCTRL */
   13984 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   13985 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   13986 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   13987 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   13988 
   13989 	/* CCM configuration via CCMCTL register */
   13990 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   13991 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   13992 
   13993 	/* PCIe lanes configuration */
   13994 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   13995 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   13996 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   13997 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   13998 
   13999 	/* PCIe PLL Configuration */
   14000 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   14001 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   14002 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   14003 }
   14004 
   14005 static void
   14006 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   14007 {
   14008 	uint32_t reg;
   14009 	uint16_t nvmword;
   14010 	int rv;
   14011 
   14012 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   14013 		return;
   14014 
   14015 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   14016 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   14017 	if (rv != 0) {
   14018 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   14019 		    __func__);
   14020 		return;
   14021 	}
   14022 
   14023 	reg = CSR_READ(sc, WMREG_MDICNFG);
   14024 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   14025 		reg |= MDICNFG_DEST;
   14026 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   14027 		reg |= MDICNFG_COM_MDIO;
   14028 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   14029 }
   14030 
   14031 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   14032 
   14033 static bool
   14034 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   14035 {
   14036 	int i;
   14037 	uint32_t reg;
   14038 	uint16_t id1, id2;
   14039 
   14040 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14041 		device_xname(sc->sc_dev), __func__));
   14042 	id1 = id2 = 0xffff;
   14043 	for (i = 0; i < 2; i++) {
   14044 		id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1);
   14045 		if (MII_INVALIDID(id1))
   14046 			continue;
   14047 		id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2);
   14048 		if (MII_INVALIDID(id2))
   14049 			continue;
   14050 		break;
   14051 	}
   14052 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) {
   14053 		goto out;
   14054 	}
   14055 
   14056 	if (sc->sc_type < WM_T_PCH_LPT) {
   14057 		sc->phy.release(sc);
   14058 		wm_set_mdio_slow_mode_hv(sc);
   14059 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
   14060 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
   14061 		sc->phy.acquire(sc);
   14062 	}
   14063 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   14064 		printf("XXX return with false\n");
   14065 		return false;
   14066 	}
   14067 out:
   14068 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   14069 		/* Only unforce SMBus if ME is not active */
   14070 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   14071 			/* Unforce SMBus mode in PHY */
   14072 			reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   14073 			    CV_SMB_CTRL);
   14074 			reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   14075 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   14076 			    CV_SMB_CTRL, reg);
   14077 
   14078 			/* Unforce SMBus mode in MAC */
   14079 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14080 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   14081 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14082 		}
   14083 	}
   14084 	return true;
   14085 }
   14086 
   14087 static void
   14088 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   14089 {
   14090 	uint32_t reg;
   14091 	int i;
   14092 
   14093 	/* Set PHY Config Counter to 50msec */
   14094 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   14095 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   14096 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   14097 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   14098 
   14099 	/* Toggle LANPHYPC */
   14100 	reg = CSR_READ(sc, WMREG_CTRL);
   14101 	reg |= CTRL_LANPHYPC_OVERRIDE;
   14102 	reg &= ~CTRL_LANPHYPC_VALUE;
   14103 	CSR_WRITE(sc, WMREG_CTRL, reg);
   14104 	CSR_WRITE_FLUSH(sc);
   14105 	delay(1000);
   14106 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   14107 	CSR_WRITE(sc, WMREG_CTRL, reg);
   14108 	CSR_WRITE_FLUSH(sc);
   14109 
   14110 	if (sc->sc_type < WM_T_PCH_LPT)
   14111 		delay(50 * 1000);
   14112 	else {
   14113 		i = 20;
   14114 
   14115 		do {
   14116 			delay(5 * 1000);
   14117 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   14118 		    && i--);
   14119 
   14120 		delay(30 * 1000);
   14121 	}
   14122 }
   14123 
   14124 static int
   14125 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   14126 {
   14127 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   14128 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   14129 	uint32_t rxa;
   14130 	uint16_t scale = 0, lat_enc = 0;
   14131 	int32_t obff_hwm = 0;
   14132 	int64_t lat_ns, value;
   14133 
   14134 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14135 		device_xname(sc->sc_dev), __func__));
   14136 
   14137 	if (link) {
   14138 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   14139 		uint32_t status;
   14140 		uint16_t speed;
   14141 		pcireg_t preg;
   14142 
   14143 		status = CSR_READ(sc, WMREG_STATUS);
   14144 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   14145 		case STATUS_SPEED_10:
   14146 			speed = 10;
   14147 			break;
   14148 		case STATUS_SPEED_100:
   14149 			speed = 100;
   14150 			break;
   14151 		case STATUS_SPEED_1000:
   14152 			speed = 1000;
   14153 			break;
   14154 		default:
   14155 			device_printf(sc->sc_dev, "Unknown speed "
   14156 			    "(status = %08x)\n", status);
   14157 			return -1;
   14158 		}
   14159 
   14160 		/* Rx Packet Buffer Allocation size (KB) */
   14161 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   14162 
   14163 		/*
   14164 		 * Determine the maximum latency tolerated by the device.
   14165 		 *
   14166 		 * Per the PCIe spec, the tolerated latencies are encoded as
   14167 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   14168 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   14169 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   14170 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   14171 		 */
   14172 		lat_ns = ((int64_t)rxa * 1024 -
   14173 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   14174 			+ ETHER_HDR_LEN))) * 8 * 1000;
   14175 		if (lat_ns < 0)
   14176 			lat_ns = 0;
   14177 		else
   14178 			lat_ns /= speed;
   14179 		value = lat_ns;
   14180 
   14181 		while (value > LTRV_VALUE) {
   14182 			scale ++;
   14183 			value = howmany(value, __BIT(5));
   14184 		}
   14185 		if (scale > LTRV_SCALE_MAX) {
   14186 			printf("%s: Invalid LTR latency scale %d\n",
   14187 			    device_xname(sc->sc_dev), scale);
   14188 			return -1;
   14189 		}
   14190 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   14191 
   14192 		/* Determine the maximum latency tolerated by the platform */
   14193 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14194 		    WM_PCI_LTR_CAP_LPT);
   14195 		max_snoop = preg & 0xffff;
   14196 		max_nosnoop = preg >> 16;
   14197 
   14198 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   14199 
   14200 		if (lat_enc > max_ltr_enc) {
   14201 			lat_enc = max_ltr_enc;
   14202 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   14203 			    * PCI_LTR_SCALETONS(
   14204 				    __SHIFTOUT(lat_enc,
   14205 					PCI_LTR_MAXSNOOPLAT_SCALE));
   14206 		}
   14207 
   14208 		if (lat_ns) {
   14209 			lat_ns *= speed * 1000;
   14210 			lat_ns /= 8;
   14211 			lat_ns /= 1000000000;
   14212 			obff_hwm = (int32_t)(rxa - lat_ns);
   14213 		}
   14214 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   14215 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   14216 			    "(rxa = %d, lat_ns = %d)\n",
   14217 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   14218 			return -1;
   14219 		}
   14220 	}
   14221 	/* Snoop and No-Snoop latencies the same */
   14222 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   14223 	CSR_WRITE(sc, WMREG_LTRV, reg);
   14224 
   14225 	/* Set OBFF high water mark */
   14226 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   14227 	reg |= obff_hwm;
   14228 	CSR_WRITE(sc, WMREG_SVT, reg);
   14229 
   14230 	/* Enable OBFF */
   14231 	reg = CSR_READ(sc, WMREG_SVCR);
   14232 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   14233 	CSR_WRITE(sc, WMREG_SVCR, reg);
   14234 
   14235 	return 0;
   14236 }
   14237 
   14238 /*
   14239  * I210 Errata 25 and I211 Errata 10
   14240  * Slow System Clock.
   14241  */
   14242 static void
   14243 wm_pll_workaround_i210(struct wm_softc *sc)
   14244 {
   14245 	uint32_t mdicnfg, wuc;
   14246 	uint32_t reg;
   14247 	pcireg_t pcireg;
   14248 	uint32_t pmreg;
   14249 	uint16_t nvmword, tmp_nvmword;
   14250 	int phyval;
   14251 	bool wa_done = false;
   14252 	int i;
   14253 
   14254 	/* Save WUC and MDICNFG registers */
   14255 	wuc = CSR_READ(sc, WMREG_WUC);
   14256 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   14257 
   14258 	reg = mdicnfg & ~MDICNFG_DEST;
   14259 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   14260 
   14261 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   14262 		nvmword = INVM_DEFAULT_AL;
   14263 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   14264 
   14265 	/* Get Power Management cap offset */
   14266 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   14267 		&pmreg, NULL) == 0)
   14268 		return;
   14269 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   14270 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   14271 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   14272 
   14273 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   14274 			break; /* OK */
   14275 		}
   14276 
   14277 		wa_done = true;
   14278 		/* Directly reset the internal PHY */
   14279 		reg = CSR_READ(sc, WMREG_CTRL);
   14280 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   14281 
   14282 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14283 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   14284 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14285 
   14286 		CSR_WRITE(sc, WMREG_WUC, 0);
   14287 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   14288 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   14289 
   14290 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14291 		    pmreg + PCI_PMCSR);
   14292 		pcireg |= PCI_PMCSR_STATE_D3;
   14293 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14294 		    pmreg + PCI_PMCSR, pcireg);
   14295 		delay(1000);
   14296 		pcireg &= ~PCI_PMCSR_STATE_D3;
   14297 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14298 		    pmreg + PCI_PMCSR, pcireg);
   14299 
   14300 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   14301 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   14302 
   14303 		/* Restore WUC register */
   14304 		CSR_WRITE(sc, WMREG_WUC, wuc);
   14305 	}
   14306 
   14307 	/* Restore MDICNFG setting */
   14308 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   14309 	if (wa_done)
   14310 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   14311 }
   14312 
   14313 static void
   14314 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   14315 {
   14316 	uint32_t reg;
   14317 
   14318 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14319 		device_xname(sc->sc_dev), __func__));
   14320 	KASSERT(sc->sc_type == WM_T_PCH_SPT);
   14321 
   14322 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   14323 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   14324 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   14325 
   14326 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   14327 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   14328 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   14329 }
   14330