Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.534
      1 /*	$NetBSD: if_wm.c,v 1.534 2017/07/28 09:12:40 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet)
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  *	- Image Unique ID
     83  */
     84 
     85 #include <sys/cdefs.h>
     86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.534 2017/07/28 09:12:40 msaitoh Exp $");
     87 
     88 #ifdef _KERNEL_OPT
     89 #include "opt_net_mpsafe.h"
     90 #include "opt_if_wm.h"
     91 #endif
     92 
     93 #include <sys/param.h>
     94 #include <sys/systm.h>
     95 #include <sys/callout.h>
     96 #include <sys/mbuf.h>
     97 #include <sys/malloc.h>
     98 #include <sys/kmem.h>
     99 #include <sys/kernel.h>
    100 #include <sys/socket.h>
    101 #include <sys/ioctl.h>
    102 #include <sys/errno.h>
    103 #include <sys/device.h>
    104 #include <sys/queue.h>
    105 #include <sys/syslog.h>
    106 #include <sys/interrupt.h>
    107 #include <sys/cpu.h>
    108 #include <sys/pcq.h>
    109 
    110 #include <sys/rndsource.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <netinet/in.h>			/* XXX for struct ip */
    120 #include <netinet/in_systm.h>		/* XXX for struct ip */
    121 #include <netinet/ip.h>			/* XXX for struct ip */
    122 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    123 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    124 
    125 #include <sys/bus.h>
    126 #include <sys/intr.h>
    127 #include <machine/endian.h>
    128 
    129 #include <dev/mii/mii.h>
    130 #include <dev/mii/miivar.h>
    131 #include <dev/mii/miidevs.h>
    132 #include <dev/mii/mii_bitbang.h>
    133 #include <dev/mii/ikphyreg.h>
    134 #include <dev/mii/igphyreg.h>
    135 #include <dev/mii/igphyvar.h>
    136 #include <dev/mii/inbmphyreg.h>
    137 #include <dev/mii/ihphyreg.h>
    138 
    139 #include <dev/pci/pcireg.h>
    140 #include <dev/pci/pcivar.h>
    141 #include <dev/pci/pcidevs.h>
    142 
    143 #include <dev/pci/if_wmreg.h>
    144 #include <dev/pci/if_wmvar.h>
    145 
    146 #ifdef WM_DEBUG
    147 #define	WM_DEBUG_LINK		__BIT(0)
    148 #define	WM_DEBUG_TX		__BIT(1)
    149 #define	WM_DEBUG_RX		__BIT(2)
    150 #define	WM_DEBUG_GMII		__BIT(3)
    151 #define	WM_DEBUG_MANAGE		__BIT(4)
    152 #define	WM_DEBUG_NVM		__BIT(5)
    153 #define	WM_DEBUG_INIT		__BIT(6)
    154 #define	WM_DEBUG_LOCK		__BIT(7)
    155 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    156     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    157 
    158 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    159 #else
    160 #define	DPRINTF(x, y)	/* nothing */
    161 #endif /* WM_DEBUG */
    162 
    163 #ifdef NET_MPSAFE
    164 #define WM_MPSAFE	1
    165 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    166 #else
    167 #define CALLOUT_FLAGS	0
    168 #endif
    169 
    170 /*
    171  * This device driver's max interrupt numbers.
    172  */
    173 #define WM_MAX_NQUEUEINTR	16
    174 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    175 
    176 #ifndef WM_DISABLE_MSI
    177 #define	WM_DISABLE_MSI 0
    178 #endif
    179 #ifndef WM_DISABLE_MSIX
    180 #define	WM_DISABLE_MSIX 0
    181 #endif
    182 
    183 int wm_disable_msi = WM_DISABLE_MSI;
    184 int wm_disable_msix = WM_DISABLE_MSIX;
    185 
    186 /*
    187  * Transmit descriptor list size.  Due to errata, we can only have
    188  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    189  * on >= 82544.  We tell the upper layers that they can queue a lot
    190  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    191  * of them at a time.
    192  *
    193  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    194  * chains containing many small mbufs have been observed in zero-copy
    195  * situations with jumbo frames.
    196  */
    197 #define	WM_NTXSEGS		256
    198 #define	WM_IFQUEUELEN		256
    199 #define	WM_TXQUEUELEN_MAX	64
    200 #define	WM_TXQUEUELEN_MAX_82547	16
    201 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    202 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    203 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    204 #define	WM_NTXDESC_82542	256
    205 #define	WM_NTXDESC_82544	4096
    206 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    207 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    208 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    209 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    210 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    211 
    212 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    213 
    214 #define	WM_TXINTERQSIZE		256
    215 
    216 /*
    217  * Receive descriptor list size.  We have one Rx buffer for normal
    218  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    219  * packet.  We allocate 256 receive descriptors, each with a 2k
    220  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    221  */
    222 #define	WM_NRXDESC		256
    223 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    224 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    225 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    226 
    227 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    228 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    229 #endif
    230 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    231 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    232 #endif
    233 
    234 typedef union txdescs {
    235 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    236 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    237 } txdescs_t;
    238 
    239 typedef union rxdescs {
    240 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    241 	ext_rxdesc_t      sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    242 	nq_rxdesc_t      sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    243 } rxdescs_t;
    244 
    245 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    246 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    247 
    248 /*
    249  * Software state for transmit jobs.
    250  */
    251 struct wm_txsoft {
    252 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    253 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    254 	int txs_firstdesc;		/* first descriptor in packet */
    255 	int txs_lastdesc;		/* last descriptor in packet */
    256 	int txs_ndesc;			/* # of descriptors used */
    257 };
    258 
    259 /*
    260  * Software state for receive buffers.  Each descriptor gets a
    261  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    262  * more than one buffer, we chain them together.
    263  */
    264 struct wm_rxsoft {
    265 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    266 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    267 };
    268 
    269 #define WM_LINKUP_TIMEOUT	50
    270 
    271 static uint16_t swfwphysem[] = {
    272 	SWFW_PHY0_SM,
    273 	SWFW_PHY1_SM,
    274 	SWFW_PHY2_SM,
    275 	SWFW_PHY3_SM
    276 };
    277 
    278 static const uint32_t wm_82580_rxpbs_table[] = {
    279 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    280 };
    281 
    282 struct wm_softc;
    283 
    284 #ifdef WM_EVENT_COUNTERS
    285 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    286 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    287 	struct evcnt qname##_ev_##evname;
    288 
    289 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    290 	do{								\
    291 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    292 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    293 		    "%s%02d%s", #qname, (qnum), #evname);		\
    294 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    295 		    (evtype), NULL, (xname),				\
    296 		    (q)->qname##_##evname##_evcnt_name);		\
    297 	}while(0)
    298 
    299 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    300 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    301 
    302 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    303 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    304 
    305 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    306 	evcnt_detach(&(q)->qname##_ev_##evname);
    307 #endif /* WM_EVENT_COUNTERS */
    308 
    309 struct wm_txqueue {
    310 	kmutex_t *txq_lock;		/* lock for tx operations */
    311 
    312 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    313 
    314 	/* Software state for the transmit descriptors. */
    315 	int txq_num;			/* must be a power of two */
    316 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    317 
    318 	/* TX control data structures. */
    319 	int txq_ndesc;			/* must be a power of two */
    320 	size_t txq_descsize;		/* a tx descriptor size */
    321 	txdescs_t *txq_descs_u;
    322         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    323 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    324 	int txq_desc_rseg;		/* real number of control segment */
    325 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    326 #define	txq_descs	txq_descs_u->sctxu_txdescs
    327 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    328 
    329 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    330 
    331 	int txq_free;			/* number of free Tx descriptors */
    332 	int txq_next;			/* next ready Tx descriptor */
    333 
    334 	int txq_sfree;			/* number of free Tx jobs */
    335 	int txq_snext;			/* next free Tx job */
    336 	int txq_sdirty;			/* dirty Tx jobs */
    337 
    338 	/* These 4 variables are used only on the 82547. */
    339 	int txq_fifo_size;		/* Tx FIFO size */
    340 	int txq_fifo_head;		/* current head of FIFO */
    341 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    342 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    343 
    344 	/*
    345 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    346 	 * CPUs. This queue intermediate them without block.
    347 	 */
    348 	pcq_t *txq_interq;
    349 
    350 	/*
    351 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    352 	 * to manage Tx H/W queue's busy flag.
    353 	 */
    354 	int txq_flags;			/* flags for H/W queue, see below */
    355 #define	WM_TXQ_NO_SPACE	0x1
    356 
    357 	bool txq_stopping;
    358 
    359 	uint32_t txq_packets;		/* for AIM */
    360 	uint32_t txq_bytes;		/* for AIM */
    361 #ifdef WM_EVENT_COUNTERS
    362 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
    363 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
    364 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
    365 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
    366 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
    367 						/* XXX not used? */
    368 
    369 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
    370 	WM_Q_EVCNT_DEFINE(txq,txtusum)		/* TCP/UDP cksums comp. out-bound */
    371 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
    372 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
    373 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
    374 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
    375 
    376 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
    377 
    378 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
    379 
    380 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    381 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    382 #endif /* WM_EVENT_COUNTERS */
    383 };
    384 
    385 struct wm_rxqueue {
    386 	kmutex_t *rxq_lock;		/* lock for rx operations */
    387 
    388 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    389 
    390 	/* Software state for the receive descriptors. */
    391 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    392 
    393 	/* RX control data structures. */
    394 	int rxq_ndesc;			/* must be a power of two */
    395 	size_t rxq_descsize;		/* a rx descriptor size */
    396 	rxdescs_t *rxq_descs_u;
    397 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    398 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    399 	int rxq_desc_rseg;		/* real number of control segment */
    400 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    401 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    402 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    403 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    404 
    405 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    406 
    407 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    408 	int rxq_discard;
    409 	int rxq_len;
    410 	struct mbuf *rxq_head;
    411 	struct mbuf *rxq_tail;
    412 	struct mbuf **rxq_tailp;
    413 
    414 	bool rxq_stopping;
    415 
    416 	uint32_t rxq_packets;		/* for AIM */
    417 	uint32_t rxq_bytes;		/* for AIM */
    418 #ifdef WM_EVENT_COUNTERS
    419 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
    420 
    421 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
    422 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
    423 #endif
    424 };
    425 
    426 struct wm_queue {
    427 	int wmq_id;			/* index of transmit and receive queues */
    428 	int wmq_intr_idx;		/* index of MSI-X tables */
    429 
    430 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    431 	bool wmq_set_itr;
    432 
    433 	struct wm_txqueue wmq_txq;
    434 	struct wm_rxqueue wmq_rxq;
    435 
    436 	void *wmq_si;
    437 };
    438 
    439 struct wm_phyop {
    440 	int (*acquire)(struct wm_softc *);
    441 	void (*release)(struct wm_softc *);
    442 	int reset_delay_us;
    443 };
    444 
    445 struct wm_nvmop {
    446 	int (*acquire)(struct wm_softc *);
    447 	void (*release)(struct wm_softc *);
    448 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    449 };
    450 
    451 /*
    452  * Software state per device.
    453  */
    454 struct wm_softc {
    455 	device_t sc_dev;		/* generic device information */
    456 	bus_space_tag_t sc_st;		/* bus space tag */
    457 	bus_space_handle_t sc_sh;	/* bus space handle */
    458 	bus_size_t sc_ss;		/* bus space size */
    459 	bus_space_tag_t sc_iot;		/* I/O space tag */
    460 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    461 	bus_size_t sc_ios;		/* I/O space size */
    462 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    463 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    464 	bus_size_t sc_flashs;		/* flash registers space size */
    465 	off_t sc_flashreg_offset;	/*
    466 					 * offset to flash registers from
    467 					 * start of BAR
    468 					 */
    469 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    470 
    471 	struct ethercom sc_ethercom;	/* ethernet common data */
    472 	struct mii_data sc_mii;		/* MII/media information */
    473 
    474 	pci_chipset_tag_t sc_pc;
    475 	pcitag_t sc_pcitag;
    476 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    477 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    478 
    479 	uint16_t sc_pcidevid;		/* PCI device ID */
    480 	wm_chip_type sc_type;		/* MAC type */
    481 	int sc_rev;			/* MAC revision */
    482 	wm_phy_type sc_phytype;		/* PHY type */
    483 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    484 #define	WM_MEDIATYPE_UNKNOWN		0x00
    485 #define	WM_MEDIATYPE_FIBER		0x01
    486 #define	WM_MEDIATYPE_COPPER		0x02
    487 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    488 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    489 	int sc_flags;			/* flags; see below */
    490 	int sc_if_flags;		/* last if_flags */
    491 	int sc_flowflags;		/* 802.3x flow control flags */
    492 	int sc_align_tweak;
    493 
    494 	void *sc_ihs[WM_MAX_NINTR];	/*
    495 					 * interrupt cookie.
    496 					 * - legacy and msi use sc_ihs[0] only
    497 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    498 					 */
    499 	pci_intr_handle_t *sc_intrs;	/*
    500 					 * legacy and msi use sc_intrs[0] only
    501 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    502 					 */
    503 	int sc_nintrs;			/* number of interrupts */
    504 
    505 	int sc_link_intr_idx;		/* index of MSI-X tables */
    506 
    507 	callout_t sc_tick_ch;		/* tick callout */
    508 	bool sc_core_stopping;
    509 
    510 	int sc_nvm_ver_major;
    511 	int sc_nvm_ver_minor;
    512 	int sc_nvm_ver_build;
    513 	int sc_nvm_addrbits;		/* NVM address bits */
    514 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    515 	int sc_ich8_flash_base;
    516 	int sc_ich8_flash_bank_size;
    517 	int sc_nvm_k1_enabled;
    518 
    519 	int sc_nqueues;
    520 	struct wm_queue *sc_queue;
    521 	u_int sc_rx_process_limit;	/* Rx processing repeat limit in softint */
    522 	u_int sc_rx_intr_process_limit;	/* Rx processing repeat limit in H/W intr */
    523 
    524 	int sc_affinity_offset;
    525 
    526 #ifdef WM_EVENT_COUNTERS
    527 	/* Event counters. */
    528 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    529 
    530         /* WM_T_82542_2_1 only */
    531 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    532 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    533 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    534 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    535 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    536 #endif /* WM_EVENT_COUNTERS */
    537 
    538 	/* This variable are used only on the 82547. */
    539 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    540 
    541 	uint32_t sc_ctrl;		/* prototype CTRL register */
    542 #if 0
    543 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    544 #endif
    545 	uint32_t sc_icr;		/* prototype interrupt bits */
    546 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    547 	uint32_t sc_tctl;		/* prototype TCTL register */
    548 	uint32_t sc_rctl;		/* prototype RCTL register */
    549 	uint32_t sc_txcw;		/* prototype TXCW register */
    550 	uint32_t sc_tipg;		/* prototype TIPG register */
    551 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    552 	uint32_t sc_pba;		/* prototype PBA register */
    553 
    554 	int sc_tbi_linkup;		/* TBI link status */
    555 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    556 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    557 
    558 	int sc_mchash_type;		/* multicast filter offset */
    559 
    560 	krndsource_t rnd_source;	/* random source */
    561 
    562 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    563 
    564 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    565 	kmutex_t *sc_ich_phymtx;	/*
    566 					 * 82574/82583/ICH/PCH specific PHY
    567 					 * mutex. For 82574/82583, the mutex
    568 					 * is used for both PHY and NVM.
    569 					 */
    570 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    571 
    572 	struct wm_phyop phy;
    573 	struct wm_nvmop nvm;
    574 };
    575 
    576 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    577 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    578 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    579 
    580 #define	WM_RXCHAIN_RESET(rxq)						\
    581 do {									\
    582 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    583 	*(rxq)->rxq_tailp = NULL;					\
    584 	(rxq)->rxq_len = 0;						\
    585 } while (/*CONSTCOND*/0)
    586 
    587 #define	WM_RXCHAIN_LINK(rxq, m)						\
    588 do {									\
    589 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    590 	(rxq)->rxq_tailp = &(m)->m_next;				\
    591 } while (/*CONSTCOND*/0)
    592 
    593 #ifdef WM_EVENT_COUNTERS
    594 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    595 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    596 
    597 #define WM_Q_EVCNT_INCR(qname, evname)			\
    598 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    599 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    600 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    601 #else /* !WM_EVENT_COUNTERS */
    602 #define	WM_EVCNT_INCR(ev)	/* nothing */
    603 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    604 
    605 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    606 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    607 #endif /* !WM_EVENT_COUNTERS */
    608 
    609 #define	CSR_READ(sc, reg)						\
    610 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    611 #define	CSR_WRITE(sc, reg, val)						\
    612 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    613 #define	CSR_WRITE_FLUSH(sc)						\
    614 	(void) CSR_READ((sc), WMREG_STATUS)
    615 
    616 #define ICH8_FLASH_READ32(sc, reg)					\
    617 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    618 	    (reg) + sc->sc_flashreg_offset)
    619 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    620 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    621 	    (reg) + sc->sc_flashreg_offset, (data))
    622 
    623 #define ICH8_FLASH_READ16(sc, reg)					\
    624 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    625 	    (reg) + sc->sc_flashreg_offset)
    626 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    627 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    628 	    (reg) + sc->sc_flashreg_offset, (data))
    629 
    630 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    631 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    632 
    633 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    634 #define	WM_CDTXADDR_HI(txq, x)						\
    635 	(sizeof(bus_addr_t) == 8 ?					\
    636 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    637 
    638 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    639 #define	WM_CDRXADDR_HI(rxq, x)						\
    640 	(sizeof(bus_addr_t) == 8 ?					\
    641 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    642 
    643 /*
    644  * Register read/write functions.
    645  * Other than CSR_{READ|WRITE}().
    646  */
    647 #if 0
    648 static inline uint32_t wm_io_read(struct wm_softc *, int);
    649 #endif
    650 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    651 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    652 	uint32_t, uint32_t);
    653 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    654 
    655 /*
    656  * Descriptor sync/init functions.
    657  */
    658 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    659 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    660 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    661 
    662 /*
    663  * Device driver interface functions and commonly used functions.
    664  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    665  */
    666 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    667 static int	wm_match(device_t, cfdata_t, void *);
    668 static void	wm_attach(device_t, device_t, void *);
    669 static int	wm_detach(device_t, int);
    670 static bool	wm_suspend(device_t, const pmf_qual_t *);
    671 static bool	wm_resume(device_t, const pmf_qual_t *);
    672 static void	wm_watchdog(struct ifnet *);
    673 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
    674 static void	wm_tick(void *);
    675 static int	wm_ifflags_cb(struct ethercom *);
    676 static int	wm_ioctl(struct ifnet *, u_long, void *);
    677 /* MAC address related */
    678 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    679 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    680 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    681 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    682 static void	wm_set_filter(struct wm_softc *);
    683 /* Reset and init related */
    684 static void	wm_set_vlan(struct wm_softc *);
    685 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    686 static void	wm_get_auto_rd_done(struct wm_softc *);
    687 static void	wm_lan_init_done(struct wm_softc *);
    688 static void	wm_get_cfg_done(struct wm_softc *);
    689 static void	wm_phy_post_reset(struct wm_softc *);
    690 static void	wm_write_smbus_addr(struct wm_softc *);
    691 static void	wm_init_lcd_from_nvm(struct wm_softc *);
    692 static void	wm_initialize_hardware_bits(struct wm_softc *);
    693 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    694 static void	wm_reset_phy(struct wm_softc *);
    695 static void	wm_flush_desc_rings(struct wm_softc *);
    696 static void	wm_reset(struct wm_softc *);
    697 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    698 static void	wm_rxdrain(struct wm_rxqueue *);
    699 static void	wm_rss_getkey(uint8_t *);
    700 static void	wm_init_rss(struct wm_softc *);
    701 static void	wm_adjust_qnum(struct wm_softc *, int);
    702 static inline bool	wm_is_using_msix(struct wm_softc *);
    703 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    704 static int	wm_softint_establish(struct wm_softc *, int, int);
    705 static int	wm_setup_legacy(struct wm_softc *);
    706 static int	wm_setup_msix(struct wm_softc *);
    707 static int	wm_init(struct ifnet *);
    708 static int	wm_init_locked(struct ifnet *);
    709 static void	wm_turnon(struct wm_softc *);
    710 static void	wm_turnoff(struct wm_softc *);
    711 static void	wm_stop(struct ifnet *, int);
    712 static void	wm_stop_locked(struct ifnet *, int);
    713 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    714 static void	wm_82547_txfifo_stall(void *);
    715 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    716 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    717 /* DMA related */
    718 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    719 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    720 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    721 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    722     struct wm_txqueue *);
    723 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    724 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    725 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    726     struct wm_rxqueue *);
    727 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    728 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    729 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    730 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    731 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    732 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    733 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    734     struct wm_txqueue *);
    735 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    736     struct wm_rxqueue *);
    737 static int	wm_alloc_txrx_queues(struct wm_softc *);
    738 static void	wm_free_txrx_queues(struct wm_softc *);
    739 static int	wm_init_txrx_queues(struct wm_softc *);
    740 /* Start */
    741 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    742     struct wm_txsoft *, uint32_t *, uint8_t *);
    743 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    744 static void	wm_start(struct ifnet *);
    745 static void	wm_start_locked(struct ifnet *);
    746 static int	wm_transmit(struct ifnet *, struct mbuf *);
    747 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    748 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    749 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    750     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    751 static void	wm_nq_start(struct ifnet *);
    752 static void	wm_nq_start_locked(struct ifnet *);
    753 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    754 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    755 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    756 static void	wm_deferred_start_locked(struct wm_txqueue *);
    757 static void	wm_handle_queue(void *);
    758 /* Interrupt */
    759 static int	wm_txeof(struct wm_softc *, struct wm_txqueue *);
    760 static void	wm_rxeof(struct wm_rxqueue *, u_int);
    761 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    762 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    763 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    764 static void	wm_linkintr(struct wm_softc *, uint32_t);
    765 static int	wm_intr_legacy(void *);
    766 static inline void	wm_txrxintr_disable(struct wm_queue *);
    767 static inline void	wm_txrxintr_enable(struct wm_queue *);
    768 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    769 static int	wm_txrxintr_msix(void *);
    770 static int	wm_linkintr_msix(void *);
    771 
    772 /*
    773  * Media related.
    774  * GMII, SGMII, TBI, SERDES and SFP.
    775  */
    776 /* Common */
    777 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    778 /* GMII related */
    779 static void	wm_gmii_reset(struct wm_softc *);
    780 static void	wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t, uint16_t);
    781 static int	wm_get_phy_id_82575(struct wm_softc *);
    782 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    783 static int	wm_gmii_mediachange(struct ifnet *);
    784 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    785 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    786 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    787 static int	wm_gmii_i82543_readreg(device_t, int, int);
    788 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    789 static int	wm_gmii_mdic_readreg(device_t, int, int);
    790 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    791 static int	wm_gmii_i82544_readreg(device_t, int, int);
    792 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    793 static int	wm_gmii_i80003_readreg(device_t, int, int);
    794 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    795 static int	wm_gmii_bm_readreg(device_t, int, int);
    796 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    797 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    798 static int	wm_gmii_hv_readreg(device_t, int, int);
    799 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
    800 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    801 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
    802 static int	wm_gmii_82580_readreg(device_t, int, int);
    803 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    804 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    805 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    806 static void	wm_gmii_statchg(struct ifnet *);
    807 /*
    808  * kumeran related (80003, ICH* and PCH*).
    809  * These functions are not for accessing MII registers but for accessing
    810  * kumeran specific registers.
    811  */
    812 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    813 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    814 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    815 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    816 /* SGMII */
    817 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    818 static int	wm_sgmii_readreg(device_t, int, int);
    819 static void	wm_sgmii_writereg(device_t, int, int, int);
    820 /* TBI related */
    821 static void	wm_tbi_mediainit(struct wm_softc *);
    822 static int	wm_tbi_mediachange(struct ifnet *);
    823 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    824 static int	wm_check_for_link(struct wm_softc *);
    825 static void	wm_tbi_tick(struct wm_softc *);
    826 /* SERDES related */
    827 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    828 static int	wm_serdes_mediachange(struct ifnet *);
    829 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    830 static void	wm_serdes_tick(struct wm_softc *);
    831 /* SFP related */
    832 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    833 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    834 
    835 /*
    836  * NVM related.
    837  * Microwire, SPI (w/wo EERD) and Flash.
    838  */
    839 /* Misc functions */
    840 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    841 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    842 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    843 /* Microwire */
    844 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    845 /* SPI */
    846 static int	wm_nvm_ready_spi(struct wm_softc *);
    847 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    848 /* Using with EERD */
    849 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    850 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    851 /* Flash */
    852 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    853     unsigned int *);
    854 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    855 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    856 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    857 	uint32_t *);
    858 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    859 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    860 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    861 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    862 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    863 /* iNVM */
    864 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    865 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    866 /* Lock, detecting NVM type, validate checksum and read */
    867 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    868 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    869 static int	wm_nvm_validate_checksum(struct wm_softc *);
    870 static void	wm_nvm_version_invm(struct wm_softc *);
    871 static void	wm_nvm_version(struct wm_softc *);
    872 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    873 
    874 /*
    875  * Hardware semaphores.
    876  * Very complexed...
    877  */
    878 static int	wm_get_null(struct wm_softc *);
    879 static void	wm_put_null(struct wm_softc *);
    880 static int	wm_get_eecd(struct wm_softc *);
    881 static void	wm_put_eecd(struct wm_softc *);
    882 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    883 static void	wm_put_swsm_semaphore(struct wm_softc *);
    884 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    885 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    886 static int	wm_get_nvm_80003(struct wm_softc *);
    887 static void	wm_put_nvm_80003(struct wm_softc *);
    888 static int	wm_get_nvm_82571(struct wm_softc *);
    889 static void	wm_put_nvm_82571(struct wm_softc *);
    890 static int	wm_get_phy_82575(struct wm_softc *);
    891 static void	wm_put_phy_82575(struct wm_softc *);
    892 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    893 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    894 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    895 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    896 static int	wm_get_nvm_ich8lan(struct wm_softc *);
    897 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    898 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    899 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    900 
    901 /*
    902  * Management mode and power management related subroutines.
    903  * BMC, AMT, suspend/resume and EEE.
    904  */
    905 #if 0
    906 static int	wm_check_mng_mode(struct wm_softc *);
    907 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    908 static int	wm_check_mng_mode_82574(struct wm_softc *);
    909 static int	wm_check_mng_mode_generic(struct wm_softc *);
    910 #endif
    911 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    912 static bool	wm_phy_resetisblocked(struct wm_softc *);
    913 static void	wm_get_hw_control(struct wm_softc *);
    914 static void	wm_release_hw_control(struct wm_softc *);
    915 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    916 static void	wm_smbustopci(struct wm_softc *);
    917 static void	wm_init_manageability(struct wm_softc *);
    918 static void	wm_release_manageability(struct wm_softc *);
    919 static void	wm_get_wakeup(struct wm_softc *);
    920 static void	wm_ulp_disable(struct wm_softc *);
    921 static void	wm_enable_phy_wakeup(struct wm_softc *);
    922 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    923 static void	wm_enable_wakeup(struct wm_softc *);
    924 /* LPLU (Low Power Link Up) */
    925 static void	wm_lplu_d0_disable(struct wm_softc *);
    926 /* EEE */
    927 static void	wm_set_eee_i350(struct wm_softc *);
    928 
    929 /*
    930  * Workarounds (mainly PHY related).
    931  * Basically, PHY's workarounds are in the PHY drivers.
    932  */
    933 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    934 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    935 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    936 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    937 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    938 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    939 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    940 static void	wm_reset_init_script_82575(struct wm_softc *);
    941 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    942 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
    943 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
    944 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
    945 static void	wm_pll_workaround_i210(struct wm_softc *);
    946 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
    947 
    948 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    949     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    950 
    951 /*
    952  * Devices supported by this driver.
    953  */
    954 static const struct wm_product {
    955 	pci_vendor_id_t		wmp_vendor;
    956 	pci_product_id_t	wmp_product;
    957 	const char		*wmp_name;
    958 	wm_chip_type		wmp_type;
    959 	uint32_t		wmp_flags;
    960 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    961 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    962 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    963 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    964 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    965 } wm_products[] = {
    966 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    967 	  "Intel i82542 1000BASE-X Ethernet",
    968 	  WM_T_82542_2_1,	WMP_F_FIBER },
    969 
    970 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    971 	  "Intel i82543GC 1000BASE-X Ethernet",
    972 	  WM_T_82543,		WMP_F_FIBER },
    973 
    974 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    975 	  "Intel i82543GC 1000BASE-T Ethernet",
    976 	  WM_T_82543,		WMP_F_COPPER },
    977 
    978 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    979 	  "Intel i82544EI 1000BASE-T Ethernet",
    980 	  WM_T_82544,		WMP_F_COPPER },
    981 
    982 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    983 	  "Intel i82544EI 1000BASE-X Ethernet",
    984 	  WM_T_82544,		WMP_F_FIBER },
    985 
    986 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    987 	  "Intel i82544GC 1000BASE-T Ethernet",
    988 	  WM_T_82544,		WMP_F_COPPER },
    989 
    990 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    991 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    992 	  WM_T_82544,		WMP_F_COPPER },
    993 
    994 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    995 	  "Intel i82540EM 1000BASE-T Ethernet",
    996 	  WM_T_82540,		WMP_F_COPPER },
    997 
    998 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    999 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1000 	  WM_T_82540,		WMP_F_COPPER },
   1001 
   1002 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1003 	  "Intel i82540EP 1000BASE-T Ethernet",
   1004 	  WM_T_82540,		WMP_F_COPPER },
   1005 
   1006 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1007 	  "Intel i82540EP 1000BASE-T Ethernet",
   1008 	  WM_T_82540,		WMP_F_COPPER },
   1009 
   1010 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1011 	  "Intel i82540EP 1000BASE-T Ethernet",
   1012 	  WM_T_82540,		WMP_F_COPPER },
   1013 
   1014 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1015 	  "Intel i82545EM 1000BASE-T Ethernet",
   1016 	  WM_T_82545,		WMP_F_COPPER },
   1017 
   1018 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1019 	  "Intel i82545GM 1000BASE-T Ethernet",
   1020 	  WM_T_82545_3,		WMP_F_COPPER },
   1021 
   1022 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1023 	  "Intel i82545GM 1000BASE-X Ethernet",
   1024 	  WM_T_82545_3,		WMP_F_FIBER },
   1025 
   1026 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1027 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1028 	  WM_T_82545_3,		WMP_F_SERDES },
   1029 
   1030 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1031 	  "Intel i82546EB 1000BASE-T Ethernet",
   1032 	  WM_T_82546,		WMP_F_COPPER },
   1033 
   1034 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1035 	  "Intel i82546EB 1000BASE-T Ethernet",
   1036 	  WM_T_82546,		WMP_F_COPPER },
   1037 
   1038 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1039 	  "Intel i82545EM 1000BASE-X Ethernet",
   1040 	  WM_T_82545,		WMP_F_FIBER },
   1041 
   1042 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1043 	  "Intel i82546EB 1000BASE-X Ethernet",
   1044 	  WM_T_82546,		WMP_F_FIBER },
   1045 
   1046 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1047 	  "Intel i82546GB 1000BASE-T Ethernet",
   1048 	  WM_T_82546_3,		WMP_F_COPPER },
   1049 
   1050 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1051 	  "Intel i82546GB 1000BASE-X Ethernet",
   1052 	  WM_T_82546_3,		WMP_F_FIBER },
   1053 
   1054 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1055 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1056 	  WM_T_82546_3,		WMP_F_SERDES },
   1057 
   1058 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1059 	  "i82546GB quad-port Gigabit Ethernet",
   1060 	  WM_T_82546_3,		WMP_F_COPPER },
   1061 
   1062 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1063 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1064 	  WM_T_82546_3,		WMP_F_COPPER },
   1065 
   1066 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1067 	  "Intel PRO/1000MT (82546GB)",
   1068 	  WM_T_82546_3,		WMP_F_COPPER },
   1069 
   1070 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1071 	  "Intel i82541EI 1000BASE-T Ethernet",
   1072 	  WM_T_82541,		WMP_F_COPPER },
   1073 
   1074 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1075 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1076 	  WM_T_82541,		WMP_F_COPPER },
   1077 
   1078 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1079 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1080 	  WM_T_82541,		WMP_F_COPPER },
   1081 
   1082 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1083 	  "Intel i82541ER 1000BASE-T Ethernet",
   1084 	  WM_T_82541_2,		WMP_F_COPPER },
   1085 
   1086 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1087 	  "Intel i82541GI 1000BASE-T Ethernet",
   1088 	  WM_T_82541_2,		WMP_F_COPPER },
   1089 
   1090 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1091 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1092 	  WM_T_82541_2,		WMP_F_COPPER },
   1093 
   1094 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1095 	  "Intel i82541PI 1000BASE-T Ethernet",
   1096 	  WM_T_82541_2,		WMP_F_COPPER },
   1097 
   1098 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1099 	  "Intel i82547EI 1000BASE-T Ethernet",
   1100 	  WM_T_82547,		WMP_F_COPPER },
   1101 
   1102 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1103 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1104 	  WM_T_82547,		WMP_F_COPPER },
   1105 
   1106 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1107 	  "Intel i82547GI 1000BASE-T Ethernet",
   1108 	  WM_T_82547_2,		WMP_F_COPPER },
   1109 
   1110 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1111 	  "Intel PRO/1000 PT (82571EB)",
   1112 	  WM_T_82571,		WMP_F_COPPER },
   1113 
   1114 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1115 	  "Intel PRO/1000 PF (82571EB)",
   1116 	  WM_T_82571,		WMP_F_FIBER },
   1117 
   1118 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1119 	  "Intel PRO/1000 PB (82571EB)",
   1120 	  WM_T_82571,		WMP_F_SERDES },
   1121 
   1122 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1123 	  "Intel PRO/1000 QT (82571EB)",
   1124 	  WM_T_82571,		WMP_F_COPPER },
   1125 
   1126 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1127 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1128 	  WM_T_82571,		WMP_F_COPPER, },
   1129 
   1130 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1131 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1132 	  WM_T_82571,		WMP_F_COPPER, },
   1133 
   1134 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1135 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1136 	  WM_T_82571,		WMP_F_SERDES, },
   1137 
   1138 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1139 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1140 	  WM_T_82571,		WMP_F_SERDES, },
   1141 
   1142 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1143 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1144 	  WM_T_82571,		WMP_F_FIBER, },
   1145 
   1146 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1147 	  "Intel i82572EI 1000baseT Ethernet",
   1148 	  WM_T_82572,		WMP_F_COPPER },
   1149 
   1150 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1151 	  "Intel i82572EI 1000baseX Ethernet",
   1152 	  WM_T_82572,		WMP_F_FIBER },
   1153 
   1154 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1155 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1156 	  WM_T_82572,		WMP_F_SERDES },
   1157 
   1158 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1159 	  "Intel i82572EI 1000baseT Ethernet",
   1160 	  WM_T_82572,		WMP_F_COPPER },
   1161 
   1162 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1163 	  "Intel i82573E",
   1164 	  WM_T_82573,		WMP_F_COPPER },
   1165 
   1166 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1167 	  "Intel i82573E IAMT",
   1168 	  WM_T_82573,		WMP_F_COPPER },
   1169 
   1170 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1171 	  "Intel i82573L Gigabit Ethernet",
   1172 	  WM_T_82573,		WMP_F_COPPER },
   1173 
   1174 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1175 	  "Intel i82574L",
   1176 	  WM_T_82574,		WMP_F_COPPER },
   1177 
   1178 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1179 	  "Intel i82574L",
   1180 	  WM_T_82574,		WMP_F_COPPER },
   1181 
   1182 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1183 	  "Intel i82583V",
   1184 	  WM_T_82583,		WMP_F_COPPER },
   1185 
   1186 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1187 	  "i80003 dual 1000baseT Ethernet",
   1188 	  WM_T_80003,		WMP_F_COPPER },
   1189 
   1190 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1191 	  "i80003 dual 1000baseX Ethernet",
   1192 	  WM_T_80003,		WMP_F_COPPER },
   1193 
   1194 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1195 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1196 	  WM_T_80003,		WMP_F_SERDES },
   1197 
   1198 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1199 	  "Intel i80003 1000baseT Ethernet",
   1200 	  WM_T_80003,		WMP_F_COPPER },
   1201 
   1202 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1203 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1204 	  WM_T_80003,		WMP_F_SERDES },
   1205 
   1206 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1207 	  "Intel i82801H (M_AMT) LAN Controller",
   1208 	  WM_T_ICH8,		WMP_F_COPPER },
   1209 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1210 	  "Intel i82801H (AMT) LAN Controller",
   1211 	  WM_T_ICH8,		WMP_F_COPPER },
   1212 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1213 	  "Intel i82801H LAN Controller",
   1214 	  WM_T_ICH8,		WMP_F_COPPER },
   1215 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1216 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1217 	  WM_T_ICH8,		WMP_F_COPPER },
   1218 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1219 	  "Intel i82801H (M) LAN Controller",
   1220 	  WM_T_ICH8,		WMP_F_COPPER },
   1221 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1222 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1223 	  WM_T_ICH8,		WMP_F_COPPER },
   1224 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1225 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1226 	  WM_T_ICH8,		WMP_F_COPPER },
   1227 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1228 	  "82567V-3 LAN Controller",
   1229 	  WM_T_ICH8,		WMP_F_COPPER },
   1230 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1231 	  "82801I (AMT) LAN Controller",
   1232 	  WM_T_ICH9,		WMP_F_COPPER },
   1233 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1234 	  "82801I 10/100 LAN Controller",
   1235 	  WM_T_ICH9,		WMP_F_COPPER },
   1236 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1237 	  "82801I (G) 10/100 LAN Controller",
   1238 	  WM_T_ICH9,		WMP_F_COPPER },
   1239 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1240 	  "82801I (GT) 10/100 LAN Controller",
   1241 	  WM_T_ICH9,		WMP_F_COPPER },
   1242 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1243 	  "82801I (C) LAN Controller",
   1244 	  WM_T_ICH9,		WMP_F_COPPER },
   1245 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1246 	  "82801I mobile LAN Controller",
   1247 	  WM_T_ICH9,		WMP_F_COPPER },
   1248 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1249 	  "82801I mobile (V) LAN Controller",
   1250 	  WM_T_ICH9,		WMP_F_COPPER },
   1251 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1252 	  "82801I mobile (AMT) LAN Controller",
   1253 	  WM_T_ICH9,		WMP_F_COPPER },
   1254 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1255 	  "82567LM-4 LAN Controller",
   1256 	  WM_T_ICH9,		WMP_F_COPPER },
   1257 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1258 	  "82567LM-2 LAN Controller",
   1259 	  WM_T_ICH10,		WMP_F_COPPER },
   1260 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1261 	  "82567LF-2 LAN Controller",
   1262 	  WM_T_ICH10,		WMP_F_COPPER },
   1263 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1264 	  "82567LM-3 LAN Controller",
   1265 	  WM_T_ICH10,		WMP_F_COPPER },
   1266 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1267 	  "82567LF-3 LAN Controller",
   1268 	  WM_T_ICH10,		WMP_F_COPPER },
   1269 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1270 	  "82567V-2 LAN Controller",
   1271 	  WM_T_ICH10,		WMP_F_COPPER },
   1272 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1273 	  "82567V-3? LAN Controller",
   1274 	  WM_T_ICH10,		WMP_F_COPPER },
   1275 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1276 	  "HANKSVILLE LAN Controller",
   1277 	  WM_T_ICH10,		WMP_F_COPPER },
   1278 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1279 	  "PCH LAN (82577LM) Controller",
   1280 	  WM_T_PCH,		WMP_F_COPPER },
   1281 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1282 	  "PCH LAN (82577LC) Controller",
   1283 	  WM_T_PCH,		WMP_F_COPPER },
   1284 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1285 	  "PCH LAN (82578DM) Controller",
   1286 	  WM_T_PCH,		WMP_F_COPPER },
   1287 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1288 	  "PCH LAN (82578DC) Controller",
   1289 	  WM_T_PCH,		WMP_F_COPPER },
   1290 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1291 	  "PCH2 LAN (82579LM) Controller",
   1292 	  WM_T_PCH2,		WMP_F_COPPER },
   1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1294 	  "PCH2 LAN (82579V) Controller",
   1295 	  WM_T_PCH2,		WMP_F_COPPER },
   1296 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1297 	  "82575EB dual-1000baseT Ethernet",
   1298 	  WM_T_82575,		WMP_F_COPPER },
   1299 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1300 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1301 	  WM_T_82575,		WMP_F_SERDES },
   1302 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1303 	  "82575GB quad-1000baseT Ethernet",
   1304 	  WM_T_82575,		WMP_F_COPPER },
   1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1306 	  "82575GB quad-1000baseT Ethernet (PM)",
   1307 	  WM_T_82575,		WMP_F_COPPER },
   1308 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1309 	  "82576 1000BaseT Ethernet",
   1310 	  WM_T_82576,		WMP_F_COPPER },
   1311 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1312 	  "82576 1000BaseX Ethernet",
   1313 	  WM_T_82576,		WMP_F_FIBER },
   1314 
   1315 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1316 	  "82576 gigabit Ethernet (SERDES)",
   1317 	  WM_T_82576,		WMP_F_SERDES },
   1318 
   1319 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1320 	  "82576 quad-1000BaseT Ethernet",
   1321 	  WM_T_82576,		WMP_F_COPPER },
   1322 
   1323 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1324 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1325 	  WM_T_82576,		WMP_F_COPPER },
   1326 
   1327 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1328 	  "82576 gigabit Ethernet",
   1329 	  WM_T_82576,		WMP_F_COPPER },
   1330 
   1331 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1332 	  "82576 gigabit Ethernet (SERDES)",
   1333 	  WM_T_82576,		WMP_F_SERDES },
   1334 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1335 	  "82576 quad-gigabit Ethernet (SERDES)",
   1336 	  WM_T_82576,		WMP_F_SERDES },
   1337 
   1338 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1339 	  "82580 1000BaseT Ethernet",
   1340 	  WM_T_82580,		WMP_F_COPPER },
   1341 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1342 	  "82580 1000BaseX Ethernet",
   1343 	  WM_T_82580,		WMP_F_FIBER },
   1344 
   1345 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1346 	  "82580 1000BaseT Ethernet (SERDES)",
   1347 	  WM_T_82580,		WMP_F_SERDES },
   1348 
   1349 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1350 	  "82580 gigabit Ethernet (SGMII)",
   1351 	  WM_T_82580,		WMP_F_COPPER },
   1352 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1353 	  "82580 dual-1000BaseT Ethernet",
   1354 	  WM_T_82580,		WMP_F_COPPER },
   1355 
   1356 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1357 	  "82580 quad-1000BaseX Ethernet",
   1358 	  WM_T_82580,		WMP_F_FIBER },
   1359 
   1360 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1361 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1362 	  WM_T_82580,		WMP_F_COPPER },
   1363 
   1364 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1365 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1366 	  WM_T_82580,		WMP_F_SERDES },
   1367 
   1368 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1369 	  "DH89XXCC 1000BASE-KX Ethernet",
   1370 	  WM_T_82580,		WMP_F_SERDES },
   1371 
   1372 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1373 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1374 	  WM_T_82580,		WMP_F_SERDES },
   1375 
   1376 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1377 	  "I350 Gigabit Network Connection",
   1378 	  WM_T_I350,		WMP_F_COPPER },
   1379 
   1380 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1381 	  "I350 Gigabit Fiber Network Connection",
   1382 	  WM_T_I350,		WMP_F_FIBER },
   1383 
   1384 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1385 	  "I350 Gigabit Backplane Connection",
   1386 	  WM_T_I350,		WMP_F_SERDES },
   1387 
   1388 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1389 	  "I350 Quad Port Gigabit Ethernet",
   1390 	  WM_T_I350,		WMP_F_SERDES },
   1391 
   1392 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1393 	  "I350 Gigabit Connection",
   1394 	  WM_T_I350,		WMP_F_COPPER },
   1395 
   1396 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1397 	  "I354 Gigabit Ethernet (KX)",
   1398 	  WM_T_I354,		WMP_F_SERDES },
   1399 
   1400 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1401 	  "I354 Gigabit Ethernet (SGMII)",
   1402 	  WM_T_I354,		WMP_F_COPPER },
   1403 
   1404 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1405 	  "I354 Gigabit Ethernet (2.5G)",
   1406 	  WM_T_I354,		WMP_F_COPPER },
   1407 
   1408 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1409 	  "I210-T1 Ethernet Server Adapter",
   1410 	  WM_T_I210,		WMP_F_COPPER },
   1411 
   1412 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1413 	  "I210 Ethernet (Copper OEM)",
   1414 	  WM_T_I210,		WMP_F_COPPER },
   1415 
   1416 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1417 	  "I210 Ethernet (Copper IT)",
   1418 	  WM_T_I210,		WMP_F_COPPER },
   1419 
   1420 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1421 	  "I210 Ethernet (FLASH less)",
   1422 	  WM_T_I210,		WMP_F_COPPER },
   1423 
   1424 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1425 	  "I210 Gigabit Ethernet (Fiber)",
   1426 	  WM_T_I210,		WMP_F_FIBER },
   1427 
   1428 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1429 	  "I210 Gigabit Ethernet (SERDES)",
   1430 	  WM_T_I210,		WMP_F_SERDES },
   1431 
   1432 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1433 	  "I210 Gigabit Ethernet (FLASH less)",
   1434 	  WM_T_I210,		WMP_F_SERDES },
   1435 
   1436 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1437 	  "I210 Gigabit Ethernet (SGMII)",
   1438 	  WM_T_I210,		WMP_F_COPPER },
   1439 
   1440 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1441 	  "I211 Ethernet (COPPER)",
   1442 	  WM_T_I211,		WMP_F_COPPER },
   1443 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1444 	  "I217 V Ethernet Connection",
   1445 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1446 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1447 	  "I217 LM Ethernet Connection",
   1448 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1449 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1450 	  "I218 V Ethernet Connection",
   1451 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1452 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1453 	  "I218 V Ethernet Connection",
   1454 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1455 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1456 	  "I218 V Ethernet Connection",
   1457 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1458 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1459 	  "I218 LM Ethernet Connection",
   1460 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1461 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1462 	  "I218 LM Ethernet Connection",
   1463 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1464 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1465 	  "I218 LM Ethernet Connection",
   1466 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1467 #if 0
   1468 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1469 	  "I219 V Ethernet Connection",
   1470 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1471 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1472 	  "I219 V Ethernet Connection",
   1473 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1474 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1475 	  "I219 V Ethernet Connection",
   1476 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1477 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1478 	  "I219 V Ethernet Connection",
   1479 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1480 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1481 	  "I219 LM Ethernet Connection",
   1482 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1483 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1484 	  "I219 LM Ethernet Connection",
   1485 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1486 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1487 	  "I219 LM Ethernet Connection",
   1488 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1489 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1490 	  "I219 LM Ethernet Connection",
   1491 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1492 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1493 	  "I219 LM Ethernet Connection",
   1494 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1495 #endif
   1496 	{ 0,			0,
   1497 	  NULL,
   1498 	  0,			0 },
   1499 };
   1500 
   1501 /*
   1502  * Register read/write functions.
   1503  * Other than CSR_{READ|WRITE}().
   1504  */
   1505 
   1506 #if 0 /* Not currently used */
   1507 static inline uint32_t
   1508 wm_io_read(struct wm_softc *sc, int reg)
   1509 {
   1510 
   1511 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1512 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1513 }
   1514 #endif
   1515 
   1516 static inline void
   1517 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1518 {
   1519 
   1520 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1521 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1522 }
   1523 
   1524 static inline void
   1525 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1526     uint32_t data)
   1527 {
   1528 	uint32_t regval;
   1529 	int i;
   1530 
   1531 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1532 
   1533 	CSR_WRITE(sc, reg, regval);
   1534 
   1535 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1536 		delay(5);
   1537 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1538 			break;
   1539 	}
   1540 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1541 		aprint_error("%s: WARNING:"
   1542 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1543 		    device_xname(sc->sc_dev), reg);
   1544 	}
   1545 }
   1546 
   1547 static inline void
   1548 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1549 {
   1550 	wa->wa_low = htole32(v & 0xffffffffU);
   1551 	if (sizeof(bus_addr_t) == 8)
   1552 		wa->wa_high = htole32((uint64_t) v >> 32);
   1553 	else
   1554 		wa->wa_high = 0;
   1555 }
   1556 
   1557 /*
   1558  * Descriptor sync/init functions.
   1559  */
   1560 static inline void
   1561 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1562 {
   1563 	struct wm_softc *sc = txq->txq_sc;
   1564 
   1565 	/* If it will wrap around, sync to the end of the ring. */
   1566 	if ((start + num) > WM_NTXDESC(txq)) {
   1567 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1568 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1569 		    (WM_NTXDESC(txq) - start), ops);
   1570 		num -= (WM_NTXDESC(txq) - start);
   1571 		start = 0;
   1572 	}
   1573 
   1574 	/* Now sync whatever is left. */
   1575 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1576 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1577 }
   1578 
   1579 static inline void
   1580 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1581 {
   1582 	struct wm_softc *sc = rxq->rxq_sc;
   1583 
   1584 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1585 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1586 }
   1587 
   1588 static inline void
   1589 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1590 {
   1591 	struct wm_softc *sc = rxq->rxq_sc;
   1592 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1593 	struct mbuf *m = rxs->rxs_mbuf;
   1594 
   1595 	/*
   1596 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1597 	 * so that the payload after the Ethernet header is aligned
   1598 	 * to a 4-byte boundary.
   1599 
   1600 	 * XXX BRAINDAMAGE ALERT!
   1601 	 * The stupid chip uses the same size for every buffer, which
   1602 	 * is set in the Receive Control register.  We are using the 2K
   1603 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1604 	 * reason, we can't "scoot" packets longer than the standard
   1605 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1606 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1607 	 * the upper layer copy the headers.
   1608 	 */
   1609 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1610 
   1611 	if (sc->sc_type == WM_T_82574) {
   1612 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1613 		rxd->erx_data.erxd_addr =
   1614 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1615 		rxd->erx_data.erxd_dd = 0;
   1616 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1617 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1618 
   1619 		rxd->nqrx_data.nrxd_paddr =
   1620 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1621 		/* Currently, split header is not supported. */
   1622 		rxd->nqrx_data.nrxd_haddr = 0;
   1623 	} else {
   1624 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1625 
   1626 		wm_set_dma_addr(&rxd->wrx_addr,
   1627 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1628 		rxd->wrx_len = 0;
   1629 		rxd->wrx_cksum = 0;
   1630 		rxd->wrx_status = 0;
   1631 		rxd->wrx_errors = 0;
   1632 		rxd->wrx_special = 0;
   1633 	}
   1634 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1635 
   1636 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1637 }
   1638 
   1639 /*
   1640  * Device driver interface functions and commonly used functions.
   1641  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1642  */
   1643 
   1644 /* Lookup supported device table */
   1645 static const struct wm_product *
   1646 wm_lookup(const struct pci_attach_args *pa)
   1647 {
   1648 	const struct wm_product *wmp;
   1649 
   1650 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1651 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1652 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1653 			return wmp;
   1654 	}
   1655 	return NULL;
   1656 }
   1657 
   1658 /* The match function (ca_match) */
   1659 static int
   1660 wm_match(device_t parent, cfdata_t cf, void *aux)
   1661 {
   1662 	struct pci_attach_args *pa = aux;
   1663 
   1664 	if (wm_lookup(pa) != NULL)
   1665 		return 1;
   1666 
   1667 	return 0;
   1668 }
   1669 
   1670 /* The attach function (ca_attach) */
   1671 static void
   1672 wm_attach(device_t parent, device_t self, void *aux)
   1673 {
   1674 	struct wm_softc *sc = device_private(self);
   1675 	struct pci_attach_args *pa = aux;
   1676 	prop_dictionary_t dict;
   1677 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1678 	pci_chipset_tag_t pc = pa->pa_pc;
   1679 	int counts[PCI_INTR_TYPE_SIZE];
   1680 	pci_intr_type_t max_type;
   1681 	const char *eetype, *xname;
   1682 	bus_space_tag_t memt;
   1683 	bus_space_handle_t memh;
   1684 	bus_size_t memsize;
   1685 	int memh_valid;
   1686 	int i, error;
   1687 	const struct wm_product *wmp;
   1688 	prop_data_t ea;
   1689 	prop_number_t pn;
   1690 	uint8_t enaddr[ETHER_ADDR_LEN];
   1691 	char buf[256];
   1692 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1693 	pcireg_t preg, memtype;
   1694 	uint16_t eeprom_data, apme_mask;
   1695 	bool force_clear_smbi;
   1696 	uint32_t link_mode;
   1697 	uint32_t reg;
   1698 
   1699 	sc->sc_dev = self;
   1700 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1701 	sc->sc_core_stopping = false;
   1702 
   1703 	wmp = wm_lookup(pa);
   1704 #ifdef DIAGNOSTIC
   1705 	if (wmp == NULL) {
   1706 		printf("\n");
   1707 		panic("wm_attach: impossible");
   1708 	}
   1709 #endif
   1710 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1711 
   1712 	sc->sc_pc = pa->pa_pc;
   1713 	sc->sc_pcitag = pa->pa_tag;
   1714 
   1715 	if (pci_dma64_available(pa))
   1716 		sc->sc_dmat = pa->pa_dmat64;
   1717 	else
   1718 		sc->sc_dmat = pa->pa_dmat;
   1719 
   1720 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1721 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1722 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1723 
   1724 	sc->sc_type = wmp->wmp_type;
   1725 
   1726 	/* Set default function pointers */
   1727 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1728 	sc->phy.release = sc->nvm.release = wm_put_null;
   1729 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1730 
   1731 	if (sc->sc_type < WM_T_82543) {
   1732 		if (sc->sc_rev < 2) {
   1733 			aprint_error_dev(sc->sc_dev,
   1734 			    "i82542 must be at least rev. 2\n");
   1735 			return;
   1736 		}
   1737 		if (sc->sc_rev < 3)
   1738 			sc->sc_type = WM_T_82542_2_0;
   1739 	}
   1740 
   1741 	/*
   1742 	 * Disable MSI for Errata:
   1743 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1744 	 *
   1745 	 *  82544: Errata 25
   1746 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1747 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1748 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1749 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1750 	 *
   1751 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1752 	 *
   1753 	 *  82571 & 82572: Errata 63
   1754 	 */
   1755 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1756 	    || (sc->sc_type == WM_T_82572))
   1757 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1758 
   1759 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1760 	    || (sc->sc_type == WM_T_82580)
   1761 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1762 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1763 		sc->sc_flags |= WM_F_NEWQUEUE;
   1764 
   1765 	/* Set device properties (mactype) */
   1766 	dict = device_properties(sc->sc_dev);
   1767 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1768 
   1769 	/*
   1770 	 * Map the device.  All devices support memory-mapped acccess,
   1771 	 * and it is really required for normal operation.
   1772 	 */
   1773 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1774 	switch (memtype) {
   1775 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1776 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1777 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1778 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1779 		break;
   1780 	default:
   1781 		memh_valid = 0;
   1782 		break;
   1783 	}
   1784 
   1785 	if (memh_valid) {
   1786 		sc->sc_st = memt;
   1787 		sc->sc_sh = memh;
   1788 		sc->sc_ss = memsize;
   1789 	} else {
   1790 		aprint_error_dev(sc->sc_dev,
   1791 		    "unable to map device registers\n");
   1792 		return;
   1793 	}
   1794 
   1795 	/*
   1796 	 * In addition, i82544 and later support I/O mapped indirect
   1797 	 * register access.  It is not desirable (nor supported in
   1798 	 * this driver) to use it for normal operation, though it is
   1799 	 * required to work around bugs in some chip versions.
   1800 	 */
   1801 	if (sc->sc_type >= WM_T_82544) {
   1802 		/* First we have to find the I/O BAR. */
   1803 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1804 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1805 			if (memtype == PCI_MAPREG_TYPE_IO)
   1806 				break;
   1807 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1808 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1809 				i += 4;	/* skip high bits, too */
   1810 		}
   1811 		if (i < PCI_MAPREG_END) {
   1812 			/*
   1813 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1814 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1815 			 * It's no problem because newer chips has no this
   1816 			 * bug.
   1817 			 *
   1818 			 * The i8254x doesn't apparently respond when the
   1819 			 * I/O BAR is 0, which looks somewhat like it's not
   1820 			 * been configured.
   1821 			 */
   1822 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1823 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1824 				aprint_error_dev(sc->sc_dev,
   1825 				    "WARNING: I/O BAR at zero.\n");
   1826 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1827 					0, &sc->sc_iot, &sc->sc_ioh,
   1828 					NULL, &sc->sc_ios) == 0) {
   1829 				sc->sc_flags |= WM_F_IOH_VALID;
   1830 			} else {
   1831 				aprint_error_dev(sc->sc_dev,
   1832 				    "WARNING: unable to map I/O space\n");
   1833 			}
   1834 		}
   1835 
   1836 	}
   1837 
   1838 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1839 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1840 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1841 	if (sc->sc_type < WM_T_82542_2_1)
   1842 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1843 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1844 
   1845 	/* power up chip */
   1846 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1847 	    NULL)) && error != EOPNOTSUPP) {
   1848 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1849 		return;
   1850 	}
   1851 
   1852 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1853 
   1854 	/* Allocation settings */
   1855 	max_type = PCI_INTR_TYPE_MSIX;
   1856 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1;
   1857 	counts[PCI_INTR_TYPE_MSI] = 1;
   1858 	counts[PCI_INTR_TYPE_INTX] = 1;
   1859 	/* overridden by disable flags */
   1860 	if (wm_disable_msi != 0) {
   1861 		counts[PCI_INTR_TYPE_MSI] = 0;
   1862 		if (wm_disable_msix != 0) {
   1863 			max_type = PCI_INTR_TYPE_INTX;
   1864 			counts[PCI_INTR_TYPE_MSIX] = 0;
   1865 		}
   1866 	} else if (wm_disable_msix != 0) {
   1867 		max_type = PCI_INTR_TYPE_MSI;
   1868 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1869 	}
   1870 
   1871 alloc_retry:
   1872 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1873 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1874 		return;
   1875 	}
   1876 
   1877 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1878 		error = wm_setup_msix(sc);
   1879 		if (error) {
   1880 			pci_intr_release(pc, sc->sc_intrs,
   1881 			    counts[PCI_INTR_TYPE_MSIX]);
   1882 
   1883 			/* Setup for MSI: Disable MSI-X */
   1884 			max_type = PCI_INTR_TYPE_MSI;
   1885 			counts[PCI_INTR_TYPE_MSI] = 1;
   1886 			counts[PCI_INTR_TYPE_INTX] = 1;
   1887 			goto alloc_retry;
   1888 		}
   1889 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1890 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1891 		error = wm_setup_legacy(sc);
   1892 		if (error) {
   1893 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1894 			    counts[PCI_INTR_TYPE_MSI]);
   1895 
   1896 			/* The next try is for INTx: Disable MSI */
   1897 			max_type = PCI_INTR_TYPE_INTX;
   1898 			counts[PCI_INTR_TYPE_INTX] = 1;
   1899 			goto alloc_retry;
   1900 		}
   1901 	} else {
   1902 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1903 		error = wm_setup_legacy(sc);
   1904 		if (error) {
   1905 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1906 			    counts[PCI_INTR_TYPE_INTX]);
   1907 			return;
   1908 		}
   1909 	}
   1910 
   1911 	/*
   1912 	 * Check the function ID (unit number of the chip).
   1913 	 */
   1914 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1915 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1916 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1917 	    || (sc->sc_type == WM_T_82580)
   1918 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1919 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1920 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1921 	else
   1922 		sc->sc_funcid = 0;
   1923 
   1924 	/*
   1925 	 * Determine a few things about the bus we're connected to.
   1926 	 */
   1927 	if (sc->sc_type < WM_T_82543) {
   1928 		/* We don't really know the bus characteristics here. */
   1929 		sc->sc_bus_speed = 33;
   1930 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1931 		/*
   1932 		 * CSA (Communication Streaming Architecture) is about as fast
   1933 		 * a 32-bit 66MHz PCI Bus.
   1934 		 */
   1935 		sc->sc_flags |= WM_F_CSA;
   1936 		sc->sc_bus_speed = 66;
   1937 		aprint_verbose_dev(sc->sc_dev,
   1938 		    "Communication Streaming Architecture\n");
   1939 		if (sc->sc_type == WM_T_82547) {
   1940 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1941 			callout_setfunc(&sc->sc_txfifo_ch,
   1942 					wm_82547_txfifo_stall, sc);
   1943 			aprint_verbose_dev(sc->sc_dev,
   1944 			    "using 82547 Tx FIFO stall work-around\n");
   1945 		}
   1946 	} else if (sc->sc_type >= WM_T_82571) {
   1947 		sc->sc_flags |= WM_F_PCIE;
   1948 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1949 		    && (sc->sc_type != WM_T_ICH10)
   1950 		    && (sc->sc_type != WM_T_PCH)
   1951 		    && (sc->sc_type != WM_T_PCH2)
   1952 		    && (sc->sc_type != WM_T_PCH_LPT)
   1953 		    && (sc->sc_type != WM_T_PCH_SPT)) {
   1954 			/* ICH* and PCH* have no PCIe capability registers */
   1955 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1956 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1957 				NULL) == 0)
   1958 				aprint_error_dev(sc->sc_dev,
   1959 				    "unable to find PCIe capability\n");
   1960 		}
   1961 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1962 	} else {
   1963 		reg = CSR_READ(sc, WMREG_STATUS);
   1964 		if (reg & STATUS_BUS64)
   1965 			sc->sc_flags |= WM_F_BUS64;
   1966 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1967 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1968 
   1969 			sc->sc_flags |= WM_F_PCIX;
   1970 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1971 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1972 				aprint_error_dev(sc->sc_dev,
   1973 				    "unable to find PCIX capability\n");
   1974 			else if (sc->sc_type != WM_T_82545_3 &&
   1975 				 sc->sc_type != WM_T_82546_3) {
   1976 				/*
   1977 				 * Work around a problem caused by the BIOS
   1978 				 * setting the max memory read byte count
   1979 				 * incorrectly.
   1980 				 */
   1981 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1982 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1983 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1984 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1985 
   1986 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1987 				    PCIX_CMD_BYTECNT_SHIFT;
   1988 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1989 				    PCIX_STATUS_MAXB_SHIFT;
   1990 				if (bytecnt > maxb) {
   1991 					aprint_verbose_dev(sc->sc_dev,
   1992 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1993 					    512 << bytecnt, 512 << maxb);
   1994 					pcix_cmd = (pcix_cmd &
   1995 					    ~PCIX_CMD_BYTECNT_MASK) |
   1996 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1997 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1998 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1999 					    pcix_cmd);
   2000 				}
   2001 			}
   2002 		}
   2003 		/*
   2004 		 * The quad port adapter is special; it has a PCIX-PCIX
   2005 		 * bridge on the board, and can run the secondary bus at
   2006 		 * a higher speed.
   2007 		 */
   2008 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2009 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2010 								      : 66;
   2011 		} else if (sc->sc_flags & WM_F_PCIX) {
   2012 			switch (reg & STATUS_PCIXSPD_MASK) {
   2013 			case STATUS_PCIXSPD_50_66:
   2014 				sc->sc_bus_speed = 66;
   2015 				break;
   2016 			case STATUS_PCIXSPD_66_100:
   2017 				sc->sc_bus_speed = 100;
   2018 				break;
   2019 			case STATUS_PCIXSPD_100_133:
   2020 				sc->sc_bus_speed = 133;
   2021 				break;
   2022 			default:
   2023 				aprint_error_dev(sc->sc_dev,
   2024 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2025 				    reg & STATUS_PCIXSPD_MASK);
   2026 				sc->sc_bus_speed = 66;
   2027 				break;
   2028 			}
   2029 		} else
   2030 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2031 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2032 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2033 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2034 	}
   2035 
   2036 	/* clear interesting stat counters */
   2037 	CSR_READ(sc, WMREG_COLC);
   2038 	CSR_READ(sc, WMREG_RXERRC);
   2039 
   2040 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2041 	    || (sc->sc_type >= WM_T_ICH8))
   2042 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2043 	if (sc->sc_type >= WM_T_ICH8)
   2044 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2045 
   2046 	/* Set PHY, NVM mutex related stuff */
   2047 	switch (sc->sc_type) {
   2048 	case WM_T_82542_2_0:
   2049 	case WM_T_82542_2_1:
   2050 	case WM_T_82543:
   2051 	case WM_T_82544:
   2052 		/* Microwire */
   2053 		sc->nvm.read = wm_nvm_read_uwire;
   2054 		sc->sc_nvm_wordsize = 64;
   2055 		sc->sc_nvm_addrbits = 6;
   2056 		break;
   2057 	case WM_T_82540:
   2058 	case WM_T_82545:
   2059 	case WM_T_82545_3:
   2060 	case WM_T_82546:
   2061 	case WM_T_82546_3:
   2062 		/* Microwire */
   2063 		sc->nvm.read = wm_nvm_read_uwire;
   2064 		reg = CSR_READ(sc, WMREG_EECD);
   2065 		if (reg & EECD_EE_SIZE) {
   2066 			sc->sc_nvm_wordsize = 256;
   2067 			sc->sc_nvm_addrbits = 8;
   2068 		} else {
   2069 			sc->sc_nvm_wordsize = 64;
   2070 			sc->sc_nvm_addrbits = 6;
   2071 		}
   2072 		sc->sc_flags |= WM_F_LOCK_EECD;
   2073 		sc->nvm.acquire = wm_get_eecd;
   2074 		sc->nvm.release = wm_put_eecd;
   2075 		break;
   2076 	case WM_T_82541:
   2077 	case WM_T_82541_2:
   2078 	case WM_T_82547:
   2079 	case WM_T_82547_2:
   2080 		reg = CSR_READ(sc, WMREG_EECD);
   2081 		/*
   2082 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2083 		 * on 8254[17], so set flags and functios before calling it.
   2084 		 */
   2085 		sc->sc_flags |= WM_F_LOCK_EECD;
   2086 		sc->nvm.acquire = wm_get_eecd;
   2087 		sc->nvm.release = wm_put_eecd;
   2088 		if (reg & EECD_EE_TYPE) {
   2089 			/* SPI */
   2090 			sc->nvm.read = wm_nvm_read_spi;
   2091 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2092 			wm_nvm_set_addrbits_size_eecd(sc);
   2093 		} else {
   2094 			/* Microwire */
   2095 			sc->nvm.read = wm_nvm_read_uwire;
   2096 			if ((reg & EECD_EE_ABITS) != 0) {
   2097 				sc->sc_nvm_wordsize = 256;
   2098 				sc->sc_nvm_addrbits = 8;
   2099 			} else {
   2100 				sc->sc_nvm_wordsize = 64;
   2101 				sc->sc_nvm_addrbits = 6;
   2102 			}
   2103 		}
   2104 		break;
   2105 	case WM_T_82571:
   2106 	case WM_T_82572:
   2107 		/* SPI */
   2108 		sc->nvm.read = wm_nvm_read_eerd;
   2109 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2110 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2111 		wm_nvm_set_addrbits_size_eecd(sc);
   2112 		sc->phy.acquire = wm_get_swsm_semaphore;
   2113 		sc->phy.release = wm_put_swsm_semaphore;
   2114 		sc->nvm.acquire = wm_get_nvm_82571;
   2115 		sc->nvm.release = wm_put_nvm_82571;
   2116 		break;
   2117 	case WM_T_82573:
   2118 	case WM_T_82574:
   2119 	case WM_T_82583:
   2120 		sc->nvm.read = wm_nvm_read_eerd;
   2121 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2122 		if (sc->sc_type == WM_T_82573) {
   2123 			sc->phy.acquire = wm_get_swsm_semaphore;
   2124 			sc->phy.release = wm_put_swsm_semaphore;
   2125 			sc->nvm.acquire = wm_get_nvm_82571;
   2126 			sc->nvm.release = wm_put_nvm_82571;
   2127 		} else {
   2128 			/* Both PHY and NVM use the same semaphore. */
   2129 			sc->phy.acquire = sc->nvm.acquire
   2130 			    = wm_get_swfwhw_semaphore;
   2131 			sc->phy.release = sc->nvm.release
   2132 			    = wm_put_swfwhw_semaphore;
   2133 		}
   2134 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2135 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2136 			sc->sc_nvm_wordsize = 2048;
   2137 		} else {
   2138 			/* SPI */
   2139 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2140 			wm_nvm_set_addrbits_size_eecd(sc);
   2141 		}
   2142 		break;
   2143 	case WM_T_82575:
   2144 	case WM_T_82576:
   2145 	case WM_T_82580:
   2146 	case WM_T_I350:
   2147 	case WM_T_I354:
   2148 	case WM_T_80003:
   2149 		/* SPI */
   2150 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2151 		wm_nvm_set_addrbits_size_eecd(sc);
   2152 		if((sc->sc_type == WM_T_80003)
   2153 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2154 			sc->nvm.read = wm_nvm_read_eerd;
   2155 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2156 		} else {
   2157 			sc->nvm.read = wm_nvm_read_spi;
   2158 			sc->sc_flags |= WM_F_LOCK_EECD;
   2159 		}
   2160 		sc->phy.acquire = wm_get_phy_82575;
   2161 		sc->phy.release = wm_put_phy_82575;
   2162 		sc->nvm.acquire = wm_get_nvm_80003;
   2163 		sc->nvm.release = wm_put_nvm_80003;
   2164 		break;
   2165 	case WM_T_ICH8:
   2166 	case WM_T_ICH9:
   2167 	case WM_T_ICH10:
   2168 	case WM_T_PCH:
   2169 	case WM_T_PCH2:
   2170 	case WM_T_PCH_LPT:
   2171 		sc->nvm.read = wm_nvm_read_ich8;
   2172 		/* FLASH */
   2173 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2174 		sc->sc_nvm_wordsize = 2048;
   2175 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2176 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2177 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2178 			aprint_error_dev(sc->sc_dev,
   2179 			    "can't map FLASH registers\n");
   2180 			goto out;
   2181 		}
   2182 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2183 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2184 		    ICH_FLASH_SECTOR_SIZE;
   2185 		sc->sc_ich8_flash_bank_size =
   2186 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2187 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2188 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2189 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2190 		sc->sc_flashreg_offset = 0;
   2191 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2192 		sc->phy.release = wm_put_swflag_ich8lan;
   2193 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2194 		sc->nvm.release = wm_put_nvm_ich8lan;
   2195 		break;
   2196 	case WM_T_PCH_SPT:
   2197 		sc->nvm.read = wm_nvm_read_spt;
   2198 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2199 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2200 		sc->sc_flasht = sc->sc_st;
   2201 		sc->sc_flashh = sc->sc_sh;
   2202 		sc->sc_ich8_flash_base = 0;
   2203 		sc->sc_nvm_wordsize =
   2204 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2205 			* NVM_SIZE_MULTIPLIER;
   2206 		/* It is size in bytes, we want words */
   2207 		sc->sc_nvm_wordsize /= 2;
   2208 		/* assume 2 banks */
   2209 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2210 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2211 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2212 		sc->phy.release = wm_put_swflag_ich8lan;
   2213 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2214 		sc->nvm.release = wm_put_nvm_ich8lan;
   2215 		break;
   2216 	case WM_T_I210:
   2217 	case WM_T_I211:
   2218 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2219 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2220 		if (wm_nvm_get_flash_presence_i210(sc)) {
   2221 			sc->nvm.read = wm_nvm_read_eerd;
   2222 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2223 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2224 			wm_nvm_set_addrbits_size_eecd(sc);
   2225 		} else {
   2226 			sc->nvm.read = wm_nvm_read_invm;
   2227 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2228 			sc->sc_nvm_wordsize = INVM_SIZE;
   2229 		}
   2230 		sc->phy.acquire = wm_get_phy_82575;
   2231 		sc->phy.release = wm_put_phy_82575;
   2232 		sc->nvm.acquire = wm_get_nvm_80003;
   2233 		sc->nvm.release = wm_put_nvm_80003;
   2234 		break;
   2235 	default:
   2236 		break;
   2237 	}
   2238 
   2239 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2240 	switch (sc->sc_type) {
   2241 	case WM_T_82571:
   2242 	case WM_T_82572:
   2243 		reg = CSR_READ(sc, WMREG_SWSM2);
   2244 		if ((reg & SWSM2_LOCK) == 0) {
   2245 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2246 			force_clear_smbi = true;
   2247 		} else
   2248 			force_clear_smbi = false;
   2249 		break;
   2250 	case WM_T_82573:
   2251 	case WM_T_82574:
   2252 	case WM_T_82583:
   2253 		force_clear_smbi = true;
   2254 		break;
   2255 	default:
   2256 		force_clear_smbi = false;
   2257 		break;
   2258 	}
   2259 	if (force_clear_smbi) {
   2260 		reg = CSR_READ(sc, WMREG_SWSM);
   2261 		if ((reg & SWSM_SMBI) != 0)
   2262 			aprint_error_dev(sc->sc_dev,
   2263 			    "Please update the Bootagent\n");
   2264 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2265 	}
   2266 
   2267 	/*
   2268 	 * Defer printing the EEPROM type until after verifying the checksum
   2269 	 * This allows the EEPROM type to be printed correctly in the case
   2270 	 * that no EEPROM is attached.
   2271 	 */
   2272 	/*
   2273 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2274 	 * this for later, so we can fail future reads from the EEPROM.
   2275 	 */
   2276 	if (wm_nvm_validate_checksum(sc)) {
   2277 		/*
   2278 		 * Read twice again because some PCI-e parts fail the
   2279 		 * first check due to the link being in sleep state.
   2280 		 */
   2281 		if (wm_nvm_validate_checksum(sc))
   2282 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2283 	}
   2284 
   2285 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2286 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2287 	else {
   2288 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2289 		    sc->sc_nvm_wordsize);
   2290 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2291 			aprint_verbose("iNVM");
   2292 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2293 			aprint_verbose("FLASH(HW)");
   2294 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2295 			aprint_verbose("FLASH");
   2296 		else {
   2297 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2298 				eetype = "SPI";
   2299 			else
   2300 				eetype = "MicroWire";
   2301 			aprint_verbose("(%d address bits) %s EEPROM",
   2302 			    sc->sc_nvm_addrbits, eetype);
   2303 		}
   2304 	}
   2305 	wm_nvm_version(sc);
   2306 	aprint_verbose("\n");
   2307 
   2308 	/*
   2309 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2310 	 * incorrect.
   2311 	 */
   2312 	wm_gmii_setup_phytype(sc, 0, 0);
   2313 
   2314 	/* Reset the chip to a known state. */
   2315 	wm_reset(sc);
   2316 
   2317 	/* Check for I21[01] PLL workaround */
   2318 	if (sc->sc_type == WM_T_I210)
   2319 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2320 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2321 		/* NVM image release 3.25 has a workaround */
   2322 		if ((sc->sc_nvm_ver_major < 3)
   2323 		    || ((sc->sc_nvm_ver_major == 3)
   2324 			&& (sc->sc_nvm_ver_minor < 25))) {
   2325 			aprint_verbose_dev(sc->sc_dev,
   2326 			    "ROM image version %d.%d is older than 3.25\n",
   2327 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2328 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2329 		}
   2330 	}
   2331 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2332 		wm_pll_workaround_i210(sc);
   2333 
   2334 	wm_get_wakeup(sc);
   2335 
   2336 	/* Non-AMT based hardware can now take control from firmware */
   2337 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2338 		wm_get_hw_control(sc);
   2339 
   2340 	/*
   2341 	 * Read the Ethernet address from the EEPROM, if not first found
   2342 	 * in device properties.
   2343 	 */
   2344 	ea = prop_dictionary_get(dict, "mac-address");
   2345 	if (ea != NULL) {
   2346 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2347 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2348 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2349 	} else {
   2350 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2351 			aprint_error_dev(sc->sc_dev,
   2352 			    "unable to read Ethernet address\n");
   2353 			goto out;
   2354 		}
   2355 	}
   2356 
   2357 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2358 	    ether_sprintf(enaddr));
   2359 
   2360 	/*
   2361 	 * Read the config info from the EEPROM, and set up various
   2362 	 * bits in the control registers based on their contents.
   2363 	 */
   2364 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2365 	if (pn != NULL) {
   2366 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2367 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2368 	} else {
   2369 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2370 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2371 			goto out;
   2372 		}
   2373 	}
   2374 
   2375 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2376 	if (pn != NULL) {
   2377 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2378 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2379 	} else {
   2380 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2381 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2382 			goto out;
   2383 		}
   2384 	}
   2385 
   2386 	/* check for WM_F_WOL */
   2387 	switch (sc->sc_type) {
   2388 	case WM_T_82542_2_0:
   2389 	case WM_T_82542_2_1:
   2390 	case WM_T_82543:
   2391 		/* dummy? */
   2392 		eeprom_data = 0;
   2393 		apme_mask = NVM_CFG3_APME;
   2394 		break;
   2395 	case WM_T_82544:
   2396 		apme_mask = NVM_CFG2_82544_APM_EN;
   2397 		eeprom_data = cfg2;
   2398 		break;
   2399 	case WM_T_82546:
   2400 	case WM_T_82546_3:
   2401 	case WM_T_82571:
   2402 	case WM_T_82572:
   2403 	case WM_T_82573:
   2404 	case WM_T_82574:
   2405 	case WM_T_82583:
   2406 	case WM_T_80003:
   2407 	default:
   2408 		apme_mask = NVM_CFG3_APME;
   2409 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2410 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2411 		break;
   2412 	case WM_T_82575:
   2413 	case WM_T_82576:
   2414 	case WM_T_82580:
   2415 	case WM_T_I350:
   2416 	case WM_T_I354: /* XXX ok? */
   2417 	case WM_T_ICH8:
   2418 	case WM_T_ICH9:
   2419 	case WM_T_ICH10:
   2420 	case WM_T_PCH:
   2421 	case WM_T_PCH2:
   2422 	case WM_T_PCH_LPT:
   2423 	case WM_T_PCH_SPT:
   2424 		/* XXX The funcid should be checked on some devices */
   2425 		apme_mask = WUC_APME;
   2426 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2427 		break;
   2428 	}
   2429 
   2430 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2431 	if ((eeprom_data & apme_mask) != 0)
   2432 		sc->sc_flags |= WM_F_WOL;
   2433 
   2434 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2435 		/* Check NVM for autonegotiation */
   2436 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2437 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2438 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2439 		}
   2440 	}
   2441 
   2442 	/*
   2443 	 * XXX need special handling for some multiple port cards
   2444 	 * to disable a paticular port.
   2445 	 */
   2446 
   2447 	if (sc->sc_type >= WM_T_82544) {
   2448 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2449 		if (pn != NULL) {
   2450 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2451 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2452 		} else {
   2453 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2454 				aprint_error_dev(sc->sc_dev,
   2455 				    "unable to read SWDPIN\n");
   2456 				goto out;
   2457 			}
   2458 		}
   2459 	}
   2460 
   2461 	if (cfg1 & NVM_CFG1_ILOS)
   2462 		sc->sc_ctrl |= CTRL_ILOS;
   2463 
   2464 	/*
   2465 	 * XXX
   2466 	 * This code isn't correct because pin 2 and 3 are located
   2467 	 * in different position on newer chips. Check all datasheet.
   2468 	 *
   2469 	 * Until resolve this problem, check if a chip < 82580
   2470 	 */
   2471 	if (sc->sc_type <= WM_T_82580) {
   2472 		if (sc->sc_type >= WM_T_82544) {
   2473 			sc->sc_ctrl |=
   2474 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2475 			    CTRL_SWDPIO_SHIFT;
   2476 			sc->sc_ctrl |=
   2477 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2478 			    CTRL_SWDPINS_SHIFT;
   2479 		} else {
   2480 			sc->sc_ctrl |=
   2481 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2482 			    CTRL_SWDPIO_SHIFT;
   2483 		}
   2484 	}
   2485 
   2486 	/* XXX For other than 82580? */
   2487 	if (sc->sc_type == WM_T_82580) {
   2488 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2489 		if (nvmword & __BIT(13))
   2490 			sc->sc_ctrl |= CTRL_ILOS;
   2491 	}
   2492 
   2493 #if 0
   2494 	if (sc->sc_type >= WM_T_82544) {
   2495 		if (cfg1 & NVM_CFG1_IPS0)
   2496 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2497 		if (cfg1 & NVM_CFG1_IPS1)
   2498 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2499 		sc->sc_ctrl_ext |=
   2500 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2501 		    CTRL_EXT_SWDPIO_SHIFT;
   2502 		sc->sc_ctrl_ext |=
   2503 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2504 		    CTRL_EXT_SWDPINS_SHIFT;
   2505 	} else {
   2506 		sc->sc_ctrl_ext |=
   2507 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2508 		    CTRL_EXT_SWDPIO_SHIFT;
   2509 	}
   2510 #endif
   2511 
   2512 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2513 #if 0
   2514 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2515 #endif
   2516 
   2517 	if (sc->sc_type == WM_T_PCH) {
   2518 		uint16_t val;
   2519 
   2520 		/* Save the NVM K1 bit setting */
   2521 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2522 
   2523 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2524 			sc->sc_nvm_k1_enabled = 1;
   2525 		else
   2526 			sc->sc_nvm_k1_enabled = 0;
   2527 	}
   2528 
   2529 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2530 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2531 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2532 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2533 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
   2534 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2535 		/* Copper only */
   2536 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2537 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2538 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2539 	    || (sc->sc_type ==WM_T_I211)) {
   2540 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2541 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2542 		switch (link_mode) {
   2543 		case CTRL_EXT_LINK_MODE_1000KX:
   2544 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2545 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2546 			break;
   2547 		case CTRL_EXT_LINK_MODE_SGMII:
   2548 			if (wm_sgmii_uses_mdio(sc)) {
   2549 				aprint_verbose_dev(sc->sc_dev,
   2550 				    "SGMII(MDIO)\n");
   2551 				sc->sc_flags |= WM_F_SGMII;
   2552 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2553 				break;
   2554 			}
   2555 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2556 			/*FALLTHROUGH*/
   2557 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2558 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2559 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2560 				if (link_mode
   2561 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2562 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2563 					sc->sc_flags |= WM_F_SGMII;
   2564 				} else {
   2565 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2566 					aprint_verbose_dev(sc->sc_dev,
   2567 					    "SERDES\n");
   2568 				}
   2569 				break;
   2570 			}
   2571 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2572 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2573 
   2574 			/* Change current link mode setting */
   2575 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2576 			switch (sc->sc_mediatype) {
   2577 			case WM_MEDIATYPE_COPPER:
   2578 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2579 				break;
   2580 			case WM_MEDIATYPE_SERDES:
   2581 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2582 				break;
   2583 			default:
   2584 				break;
   2585 			}
   2586 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2587 			break;
   2588 		case CTRL_EXT_LINK_MODE_GMII:
   2589 		default:
   2590 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2591 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2592 			break;
   2593 		}
   2594 
   2595 		reg &= ~CTRL_EXT_I2C_ENA;
   2596 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2597 			reg |= CTRL_EXT_I2C_ENA;
   2598 		else
   2599 			reg &= ~CTRL_EXT_I2C_ENA;
   2600 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2601 	} else if (sc->sc_type < WM_T_82543 ||
   2602 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2603 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2604 			aprint_error_dev(sc->sc_dev,
   2605 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2606 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2607 		}
   2608 	} else {
   2609 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2610 			aprint_error_dev(sc->sc_dev,
   2611 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2612 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2613 		}
   2614 	}
   2615 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2616 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2617 
   2618 	/* Set device properties (macflags) */
   2619 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2620 
   2621 	/* Initialize the media structures accordingly. */
   2622 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2623 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2624 	else
   2625 		wm_tbi_mediainit(sc); /* All others */
   2626 
   2627 	ifp = &sc->sc_ethercom.ec_if;
   2628 	xname = device_xname(sc->sc_dev);
   2629 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2630 	ifp->if_softc = sc;
   2631 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2632 #ifdef WM_MPSAFE
   2633 	ifp->if_extflags = IFEF_START_MPSAFE;
   2634 #endif
   2635 	ifp->if_ioctl = wm_ioctl;
   2636 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2637 		ifp->if_start = wm_nq_start;
   2638 		/*
   2639 		 * When the number of CPUs is one and the controller can use
   2640 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2641 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2642 		 * and the other is used for link status changing.
   2643 		 * In this situation, wm_nq_transmit() is disadvantageous
   2644 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2645 		 */
   2646 		if (wm_is_using_multiqueue(sc))
   2647 			ifp->if_transmit = wm_nq_transmit;
   2648 	} else {
   2649 		ifp->if_start = wm_start;
   2650 		/*
   2651 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2652 		 */
   2653 		if (wm_is_using_multiqueue(sc))
   2654 			ifp->if_transmit = wm_transmit;
   2655 	}
   2656 	ifp->if_watchdog = wm_watchdog;
   2657 	ifp->if_init = wm_init;
   2658 	ifp->if_stop = wm_stop;
   2659 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2660 	IFQ_SET_READY(&ifp->if_snd);
   2661 
   2662 	/* Check for jumbo frame */
   2663 	switch (sc->sc_type) {
   2664 	case WM_T_82573:
   2665 		/* XXX limited to 9234 if ASPM is disabled */
   2666 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2667 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2668 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2669 		break;
   2670 	case WM_T_82571:
   2671 	case WM_T_82572:
   2672 	case WM_T_82574:
   2673 	case WM_T_82575:
   2674 	case WM_T_82576:
   2675 	case WM_T_82580:
   2676 	case WM_T_I350:
   2677 	case WM_T_I354: /* XXXX ok? */
   2678 	case WM_T_I210:
   2679 	case WM_T_I211:
   2680 	case WM_T_80003:
   2681 	case WM_T_ICH9:
   2682 	case WM_T_ICH10:
   2683 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2684 	case WM_T_PCH_LPT:
   2685 	case WM_T_PCH_SPT:
   2686 		/* XXX limited to 9234 */
   2687 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2688 		break;
   2689 	case WM_T_PCH:
   2690 		/* XXX limited to 4096 */
   2691 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2692 		break;
   2693 	case WM_T_82542_2_0:
   2694 	case WM_T_82542_2_1:
   2695 	case WM_T_82583:
   2696 	case WM_T_ICH8:
   2697 		/* No support for jumbo frame */
   2698 		break;
   2699 	default:
   2700 		/* ETHER_MAX_LEN_JUMBO */
   2701 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2702 		break;
   2703 	}
   2704 
   2705 	/* If we're a i82543 or greater, we can support VLANs. */
   2706 	if (sc->sc_type >= WM_T_82543)
   2707 		sc->sc_ethercom.ec_capabilities |=
   2708 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2709 
   2710 	/*
   2711 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2712 	 * on i82543 and later.
   2713 	 */
   2714 	if (sc->sc_type >= WM_T_82543) {
   2715 		ifp->if_capabilities |=
   2716 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2717 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2718 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2719 		    IFCAP_CSUM_TCPv6_Tx |
   2720 		    IFCAP_CSUM_UDPv6_Tx;
   2721 	}
   2722 
   2723 	/*
   2724 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2725 	 *
   2726 	 *	82541GI (8086:1076) ... no
   2727 	 *	82572EI (8086:10b9) ... yes
   2728 	 */
   2729 	if (sc->sc_type >= WM_T_82571) {
   2730 		ifp->if_capabilities |=
   2731 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2732 	}
   2733 
   2734 	/*
   2735 	 * If we're a i82544 or greater (except i82547), we can do
   2736 	 * TCP segmentation offload.
   2737 	 */
   2738 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2739 		ifp->if_capabilities |= IFCAP_TSOv4;
   2740 	}
   2741 
   2742 	if (sc->sc_type >= WM_T_82571) {
   2743 		ifp->if_capabilities |= IFCAP_TSOv6;
   2744 	}
   2745 
   2746 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2747 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2748 
   2749 #ifdef WM_MPSAFE
   2750 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2751 #else
   2752 	sc->sc_core_lock = NULL;
   2753 #endif
   2754 
   2755 	/* Attach the interface. */
   2756 	if_initialize(ifp);
   2757 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2758 	ether_ifattach(ifp, enaddr);
   2759 	if_register(ifp);
   2760 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2761 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2762 			  RND_FLAG_DEFAULT);
   2763 
   2764 #ifdef WM_EVENT_COUNTERS
   2765 	/* Attach event counters. */
   2766 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2767 	    NULL, xname, "linkintr");
   2768 
   2769 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2770 	    NULL, xname, "tx_xoff");
   2771 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2772 	    NULL, xname, "tx_xon");
   2773 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2774 	    NULL, xname, "rx_xoff");
   2775 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2776 	    NULL, xname, "rx_xon");
   2777 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2778 	    NULL, xname, "rx_macctl");
   2779 #endif /* WM_EVENT_COUNTERS */
   2780 
   2781 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2782 		pmf_class_network_register(self, ifp);
   2783 	else
   2784 		aprint_error_dev(self, "couldn't establish power handler\n");
   2785 
   2786 	sc->sc_flags |= WM_F_ATTACHED;
   2787  out:
   2788 	return;
   2789 }
   2790 
   2791 /* The detach function (ca_detach) */
   2792 static int
   2793 wm_detach(device_t self, int flags __unused)
   2794 {
   2795 	struct wm_softc *sc = device_private(self);
   2796 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2797 	int i;
   2798 
   2799 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2800 		return 0;
   2801 
   2802 	/* Stop the interface. Callouts are stopped in it. */
   2803 	wm_stop(ifp, 1);
   2804 
   2805 	pmf_device_deregister(self);
   2806 
   2807 #ifdef WM_EVENT_COUNTERS
   2808 	evcnt_detach(&sc->sc_ev_linkintr);
   2809 
   2810 	evcnt_detach(&sc->sc_ev_tx_xoff);
   2811 	evcnt_detach(&sc->sc_ev_tx_xon);
   2812 	evcnt_detach(&sc->sc_ev_rx_xoff);
   2813 	evcnt_detach(&sc->sc_ev_rx_xon);
   2814 	evcnt_detach(&sc->sc_ev_rx_macctl);
   2815 #endif /* WM_EVENT_COUNTERS */
   2816 
   2817 	/* Tell the firmware about the release */
   2818 	WM_CORE_LOCK(sc);
   2819 	wm_release_manageability(sc);
   2820 	wm_release_hw_control(sc);
   2821 	wm_enable_wakeup(sc);
   2822 	WM_CORE_UNLOCK(sc);
   2823 
   2824 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2825 
   2826 	/* Delete all remaining media. */
   2827 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2828 
   2829 	ether_ifdetach(ifp);
   2830 	if_detach(ifp);
   2831 	if_percpuq_destroy(sc->sc_ipq);
   2832 
   2833 	/* Unload RX dmamaps and free mbufs */
   2834 	for (i = 0; i < sc->sc_nqueues; i++) {
   2835 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2836 		mutex_enter(rxq->rxq_lock);
   2837 		wm_rxdrain(rxq);
   2838 		mutex_exit(rxq->rxq_lock);
   2839 	}
   2840 	/* Must unlock here */
   2841 
   2842 	/* Disestablish the interrupt handler */
   2843 	for (i = 0; i < sc->sc_nintrs; i++) {
   2844 		if (sc->sc_ihs[i] != NULL) {
   2845 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2846 			sc->sc_ihs[i] = NULL;
   2847 		}
   2848 	}
   2849 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2850 
   2851 	wm_free_txrx_queues(sc);
   2852 
   2853 	/* Unmap the registers */
   2854 	if (sc->sc_ss) {
   2855 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2856 		sc->sc_ss = 0;
   2857 	}
   2858 	if (sc->sc_ios) {
   2859 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2860 		sc->sc_ios = 0;
   2861 	}
   2862 	if (sc->sc_flashs) {
   2863 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2864 		sc->sc_flashs = 0;
   2865 	}
   2866 
   2867 	if (sc->sc_core_lock)
   2868 		mutex_obj_free(sc->sc_core_lock);
   2869 	if (sc->sc_ich_phymtx)
   2870 		mutex_obj_free(sc->sc_ich_phymtx);
   2871 	if (sc->sc_ich_nvmmtx)
   2872 		mutex_obj_free(sc->sc_ich_nvmmtx);
   2873 
   2874 	return 0;
   2875 }
   2876 
   2877 static bool
   2878 wm_suspend(device_t self, const pmf_qual_t *qual)
   2879 {
   2880 	struct wm_softc *sc = device_private(self);
   2881 
   2882 	wm_release_manageability(sc);
   2883 	wm_release_hw_control(sc);
   2884 	wm_enable_wakeup(sc);
   2885 
   2886 	return true;
   2887 }
   2888 
   2889 static bool
   2890 wm_resume(device_t self, const pmf_qual_t *qual)
   2891 {
   2892 	struct wm_softc *sc = device_private(self);
   2893 
   2894 	wm_init_manageability(sc);
   2895 
   2896 	return true;
   2897 }
   2898 
   2899 /*
   2900  * wm_watchdog:		[ifnet interface function]
   2901  *
   2902  *	Watchdog timer handler.
   2903  */
   2904 static void
   2905 wm_watchdog(struct ifnet *ifp)
   2906 {
   2907 	int qid;
   2908 	struct wm_softc *sc = ifp->if_softc;
   2909 
   2910 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2911 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2912 
   2913 		wm_watchdog_txq(ifp, txq);
   2914 	}
   2915 
   2916 	/* Reset the interface. */
   2917 	(void) wm_init(ifp);
   2918 
   2919 	/*
   2920 	 * There are still some upper layer processing which call
   2921 	 * ifp->if_start(). e.g. ALTQ or one CPU system
   2922 	 */
   2923 	/* Try to get more packets going. */
   2924 	ifp->if_start(ifp);
   2925 }
   2926 
   2927 static void
   2928 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
   2929 {
   2930 	struct wm_softc *sc = ifp->if_softc;
   2931 
   2932 	/*
   2933 	 * Since we're using delayed interrupts, sweep up
   2934 	 * before we report an error.
   2935 	 */
   2936 	mutex_enter(txq->txq_lock);
   2937 	wm_txeof(sc, txq);
   2938 	mutex_exit(txq->txq_lock);
   2939 
   2940 	if (txq->txq_free != WM_NTXDESC(txq)) {
   2941 #ifdef WM_DEBUG
   2942 		int i, j;
   2943 		struct wm_txsoft *txs;
   2944 #endif
   2945 		log(LOG_ERR,
   2946 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2947 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   2948 		    txq->txq_next);
   2949 		ifp->if_oerrors++;
   2950 #ifdef WM_DEBUG
   2951 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   2952 		    i = WM_NEXTTXS(txq, i)) {
   2953 		    txs = &txq->txq_soft[i];
   2954 		    printf("txs %d tx %d -> %d\n",
   2955 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2956 		    for (j = txs->txs_firstdesc; ;
   2957 			j = WM_NEXTTX(txq, j)) {
   2958 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2959 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   2960 			printf("\t %#08x%08x\n",
   2961 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   2962 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   2963 			if (j == txs->txs_lastdesc)
   2964 				break;
   2965 			}
   2966 		}
   2967 #endif
   2968 	}
   2969 }
   2970 
   2971 /*
   2972  * wm_tick:
   2973  *
   2974  *	One second timer, used to check link status, sweep up
   2975  *	completed transmit jobs, etc.
   2976  */
   2977 static void
   2978 wm_tick(void *arg)
   2979 {
   2980 	struct wm_softc *sc = arg;
   2981 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2982 #ifndef WM_MPSAFE
   2983 	int s = splnet();
   2984 #endif
   2985 
   2986 	WM_CORE_LOCK(sc);
   2987 
   2988 	if (sc->sc_core_stopping)
   2989 		goto out;
   2990 
   2991 	if (sc->sc_type >= WM_T_82542_2_1) {
   2992 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2993 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2994 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2995 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2996 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2997 	}
   2998 
   2999 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   3000 	ifp->if_ierrors += 0ULL /* ensure quad_t */
   3001 	    + CSR_READ(sc, WMREG_CRCERRS)
   3002 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3003 	    + CSR_READ(sc, WMREG_SYMERRC)
   3004 	    + CSR_READ(sc, WMREG_RXERRC)
   3005 	    + CSR_READ(sc, WMREG_SEC)
   3006 	    + CSR_READ(sc, WMREG_CEXTERR)
   3007 	    + CSR_READ(sc, WMREG_RLEC);
   3008 	/*
   3009 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3010 	 * memory. It does not mean the number of dropped packet. Because
   3011 	 * ethernet controller can receive packets in such case if there is
   3012 	 * space in phy's FIFO.
   3013 	 *
   3014 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3015 	 * own EVCNT instead of if_iqdrops.
   3016 	 */
   3017 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   3018 
   3019 	if (sc->sc_flags & WM_F_HAS_MII)
   3020 		mii_tick(&sc->sc_mii);
   3021 	else if ((sc->sc_type >= WM_T_82575)
   3022 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3023 		wm_serdes_tick(sc);
   3024 	else
   3025 		wm_tbi_tick(sc);
   3026 
   3027 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   3028 out:
   3029 	WM_CORE_UNLOCK(sc);
   3030 #ifndef WM_MPSAFE
   3031 	splx(s);
   3032 #endif
   3033 }
   3034 
   3035 static int
   3036 wm_ifflags_cb(struct ethercom *ec)
   3037 {
   3038 	struct ifnet *ifp = &ec->ec_if;
   3039 	struct wm_softc *sc = ifp->if_softc;
   3040 	int rc = 0;
   3041 
   3042 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3043 		device_xname(sc->sc_dev), __func__));
   3044 
   3045 	WM_CORE_LOCK(sc);
   3046 
   3047 	int change = ifp->if_flags ^ sc->sc_if_flags;
   3048 	sc->sc_if_flags = ifp->if_flags;
   3049 
   3050 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3051 		rc = ENETRESET;
   3052 		goto out;
   3053 	}
   3054 
   3055 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   3056 		wm_set_filter(sc);
   3057 
   3058 	wm_set_vlan(sc);
   3059 
   3060 out:
   3061 	WM_CORE_UNLOCK(sc);
   3062 
   3063 	return rc;
   3064 }
   3065 
   3066 /*
   3067  * wm_ioctl:		[ifnet interface function]
   3068  *
   3069  *	Handle control requests from the operator.
   3070  */
   3071 static int
   3072 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3073 {
   3074 	struct wm_softc *sc = ifp->if_softc;
   3075 	struct ifreq *ifr = (struct ifreq *) data;
   3076 	struct ifaddr *ifa = (struct ifaddr *)data;
   3077 	struct sockaddr_dl *sdl;
   3078 	int s, error;
   3079 
   3080 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3081 		device_xname(sc->sc_dev), __func__));
   3082 
   3083 #ifndef WM_MPSAFE
   3084 	s = splnet();
   3085 #endif
   3086 	switch (cmd) {
   3087 	case SIOCSIFMEDIA:
   3088 	case SIOCGIFMEDIA:
   3089 		WM_CORE_LOCK(sc);
   3090 		/* Flow control requires full-duplex mode. */
   3091 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3092 		    (ifr->ifr_media & IFM_FDX) == 0)
   3093 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3094 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3095 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3096 				/* We can do both TXPAUSE and RXPAUSE. */
   3097 				ifr->ifr_media |=
   3098 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3099 			}
   3100 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3101 		}
   3102 		WM_CORE_UNLOCK(sc);
   3103 #ifdef WM_MPSAFE
   3104 		s = splnet();
   3105 #endif
   3106 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3107 #ifdef WM_MPSAFE
   3108 		splx(s);
   3109 #endif
   3110 		break;
   3111 	case SIOCINITIFADDR:
   3112 		WM_CORE_LOCK(sc);
   3113 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3114 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3115 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3116 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3117 			/* unicast address is first multicast entry */
   3118 			wm_set_filter(sc);
   3119 			error = 0;
   3120 			WM_CORE_UNLOCK(sc);
   3121 			break;
   3122 		}
   3123 		WM_CORE_UNLOCK(sc);
   3124 		/*FALLTHROUGH*/
   3125 	default:
   3126 #ifdef WM_MPSAFE
   3127 		s = splnet();
   3128 #endif
   3129 		/* It may call wm_start, so unlock here */
   3130 		error = ether_ioctl(ifp, cmd, data);
   3131 #ifdef WM_MPSAFE
   3132 		splx(s);
   3133 #endif
   3134 		if (error != ENETRESET)
   3135 			break;
   3136 
   3137 		error = 0;
   3138 
   3139 		if (cmd == SIOCSIFCAP) {
   3140 			error = (*ifp->if_init)(ifp);
   3141 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3142 			;
   3143 		else if (ifp->if_flags & IFF_RUNNING) {
   3144 			/*
   3145 			 * Multicast list has changed; set the hardware filter
   3146 			 * accordingly.
   3147 			 */
   3148 			WM_CORE_LOCK(sc);
   3149 			wm_set_filter(sc);
   3150 			WM_CORE_UNLOCK(sc);
   3151 		}
   3152 		break;
   3153 	}
   3154 
   3155 #ifndef WM_MPSAFE
   3156 	splx(s);
   3157 #endif
   3158 	return error;
   3159 }
   3160 
   3161 /* MAC address related */
   3162 
   3163 /*
   3164  * Get the offset of MAC address and return it.
   3165  * If error occured, use offset 0.
   3166  */
   3167 static uint16_t
   3168 wm_check_alt_mac_addr(struct wm_softc *sc)
   3169 {
   3170 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3171 	uint16_t offset = NVM_OFF_MACADDR;
   3172 
   3173 	/* Try to read alternative MAC address pointer */
   3174 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3175 		return 0;
   3176 
   3177 	/* Check pointer if it's valid or not. */
   3178 	if ((offset == 0x0000) || (offset == 0xffff))
   3179 		return 0;
   3180 
   3181 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3182 	/*
   3183 	 * Check whether alternative MAC address is valid or not.
   3184 	 * Some cards have non 0xffff pointer but those don't use
   3185 	 * alternative MAC address in reality.
   3186 	 *
   3187 	 * Check whether the broadcast bit is set or not.
   3188 	 */
   3189 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3190 		if (((myea[0] & 0xff) & 0x01) == 0)
   3191 			return offset; /* Found */
   3192 
   3193 	/* Not found */
   3194 	return 0;
   3195 }
   3196 
   3197 static int
   3198 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3199 {
   3200 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3201 	uint16_t offset = NVM_OFF_MACADDR;
   3202 	int do_invert = 0;
   3203 
   3204 	switch (sc->sc_type) {
   3205 	case WM_T_82580:
   3206 	case WM_T_I350:
   3207 	case WM_T_I354:
   3208 		/* EEPROM Top Level Partitioning */
   3209 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3210 		break;
   3211 	case WM_T_82571:
   3212 	case WM_T_82575:
   3213 	case WM_T_82576:
   3214 	case WM_T_80003:
   3215 	case WM_T_I210:
   3216 	case WM_T_I211:
   3217 		offset = wm_check_alt_mac_addr(sc);
   3218 		if (offset == 0)
   3219 			if ((sc->sc_funcid & 0x01) == 1)
   3220 				do_invert = 1;
   3221 		break;
   3222 	default:
   3223 		if ((sc->sc_funcid & 0x01) == 1)
   3224 			do_invert = 1;
   3225 		break;
   3226 	}
   3227 
   3228 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3229 		goto bad;
   3230 
   3231 	enaddr[0] = myea[0] & 0xff;
   3232 	enaddr[1] = myea[0] >> 8;
   3233 	enaddr[2] = myea[1] & 0xff;
   3234 	enaddr[3] = myea[1] >> 8;
   3235 	enaddr[4] = myea[2] & 0xff;
   3236 	enaddr[5] = myea[2] >> 8;
   3237 
   3238 	/*
   3239 	 * Toggle the LSB of the MAC address on the second port
   3240 	 * of some dual port cards.
   3241 	 */
   3242 	if (do_invert != 0)
   3243 		enaddr[5] ^= 1;
   3244 
   3245 	return 0;
   3246 
   3247  bad:
   3248 	return -1;
   3249 }
   3250 
   3251 /*
   3252  * wm_set_ral:
   3253  *
   3254  *	Set an entery in the receive address list.
   3255  */
   3256 static void
   3257 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3258 {
   3259 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3260 	uint32_t wlock_mac;
   3261 	int rv;
   3262 
   3263 	if (enaddr != NULL) {
   3264 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3265 		    (enaddr[3] << 24);
   3266 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3267 		ral_hi |= RAL_AV;
   3268 	} else {
   3269 		ral_lo = 0;
   3270 		ral_hi = 0;
   3271 	}
   3272 
   3273 	switch (sc->sc_type) {
   3274 	case WM_T_82542_2_0:
   3275 	case WM_T_82542_2_1:
   3276 	case WM_T_82543:
   3277 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3278 		CSR_WRITE_FLUSH(sc);
   3279 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3280 		CSR_WRITE_FLUSH(sc);
   3281 		break;
   3282 	case WM_T_PCH2:
   3283 	case WM_T_PCH_LPT:
   3284 	case WM_T_PCH_SPT:
   3285 		if (idx == 0) {
   3286 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3287 			CSR_WRITE_FLUSH(sc);
   3288 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3289 			CSR_WRITE_FLUSH(sc);
   3290 			return;
   3291 		}
   3292 		if (sc->sc_type != WM_T_PCH2) {
   3293 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3294 			    FWSM_WLOCK_MAC);
   3295 			addrl = WMREG_SHRAL(idx - 1);
   3296 			addrh = WMREG_SHRAH(idx - 1);
   3297 		} else {
   3298 			wlock_mac = 0;
   3299 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3300 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3301 		}
   3302 
   3303 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3304 			rv = wm_get_swflag_ich8lan(sc);
   3305 			if (rv != 0)
   3306 				return;
   3307 			CSR_WRITE(sc, addrl, ral_lo);
   3308 			CSR_WRITE_FLUSH(sc);
   3309 			CSR_WRITE(sc, addrh, ral_hi);
   3310 			CSR_WRITE_FLUSH(sc);
   3311 			wm_put_swflag_ich8lan(sc);
   3312 		}
   3313 
   3314 		break;
   3315 	default:
   3316 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3317 		CSR_WRITE_FLUSH(sc);
   3318 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3319 		CSR_WRITE_FLUSH(sc);
   3320 		break;
   3321 	}
   3322 }
   3323 
   3324 /*
   3325  * wm_mchash:
   3326  *
   3327  *	Compute the hash of the multicast address for the 4096-bit
   3328  *	multicast filter.
   3329  */
   3330 static uint32_t
   3331 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3332 {
   3333 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3334 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3335 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3336 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3337 	uint32_t hash;
   3338 
   3339 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3340 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3341 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3342 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   3343 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3344 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3345 		return (hash & 0x3ff);
   3346 	}
   3347 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3348 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3349 
   3350 	return (hash & 0xfff);
   3351 }
   3352 
   3353 /*
   3354  * wm_set_filter:
   3355  *
   3356  *	Set up the receive filter.
   3357  */
   3358 static void
   3359 wm_set_filter(struct wm_softc *sc)
   3360 {
   3361 	struct ethercom *ec = &sc->sc_ethercom;
   3362 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3363 	struct ether_multi *enm;
   3364 	struct ether_multistep step;
   3365 	bus_addr_t mta_reg;
   3366 	uint32_t hash, reg, bit;
   3367 	int i, size, ralmax;
   3368 
   3369 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3370 		device_xname(sc->sc_dev), __func__));
   3371 
   3372 	if (sc->sc_type >= WM_T_82544)
   3373 		mta_reg = WMREG_CORDOVA_MTA;
   3374 	else
   3375 		mta_reg = WMREG_MTA;
   3376 
   3377 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3378 
   3379 	if (ifp->if_flags & IFF_BROADCAST)
   3380 		sc->sc_rctl |= RCTL_BAM;
   3381 	if (ifp->if_flags & IFF_PROMISC) {
   3382 		sc->sc_rctl |= RCTL_UPE;
   3383 		goto allmulti;
   3384 	}
   3385 
   3386 	/*
   3387 	 * Set the station address in the first RAL slot, and
   3388 	 * clear the remaining slots.
   3389 	 */
   3390 	if (sc->sc_type == WM_T_ICH8)
   3391 		size = WM_RAL_TABSIZE_ICH8 -1;
   3392 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3393 	    || (sc->sc_type == WM_T_PCH))
   3394 		size = WM_RAL_TABSIZE_ICH8;
   3395 	else if (sc->sc_type == WM_T_PCH2)
   3396 		size = WM_RAL_TABSIZE_PCH2;
   3397 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
   3398 		size = WM_RAL_TABSIZE_PCH_LPT;
   3399 	else if (sc->sc_type == WM_T_82575)
   3400 		size = WM_RAL_TABSIZE_82575;
   3401 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3402 		size = WM_RAL_TABSIZE_82576;
   3403 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3404 		size = WM_RAL_TABSIZE_I350;
   3405 	else
   3406 		size = WM_RAL_TABSIZE;
   3407 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3408 
   3409 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   3410 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3411 		switch (i) {
   3412 		case 0:
   3413 			/* We can use all entries */
   3414 			ralmax = size;
   3415 			break;
   3416 		case 1:
   3417 			/* Only RAR[0] */
   3418 			ralmax = 1;
   3419 			break;
   3420 		default:
   3421 			/* available SHRA + RAR[0] */
   3422 			ralmax = i + 1;
   3423 		}
   3424 	} else
   3425 		ralmax = size;
   3426 	for (i = 1; i < size; i++) {
   3427 		if (i < ralmax)
   3428 			wm_set_ral(sc, NULL, i);
   3429 	}
   3430 
   3431 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3432 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3433 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3434 	    || (sc->sc_type == WM_T_PCH_SPT))
   3435 		size = WM_ICH8_MC_TABSIZE;
   3436 	else
   3437 		size = WM_MC_TABSIZE;
   3438 	/* Clear out the multicast table. */
   3439 	for (i = 0; i < size; i++) {
   3440 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3441 		CSR_WRITE_FLUSH(sc);
   3442 	}
   3443 
   3444 	ETHER_LOCK(ec);
   3445 	ETHER_FIRST_MULTI(step, ec, enm);
   3446 	while (enm != NULL) {
   3447 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3448 			ETHER_UNLOCK(ec);
   3449 			/*
   3450 			 * We must listen to a range of multicast addresses.
   3451 			 * For now, just accept all multicasts, rather than
   3452 			 * trying to set only those filter bits needed to match
   3453 			 * the range.  (At this time, the only use of address
   3454 			 * ranges is for IP multicast routing, for which the
   3455 			 * range is big enough to require all bits set.)
   3456 			 */
   3457 			goto allmulti;
   3458 		}
   3459 
   3460 		hash = wm_mchash(sc, enm->enm_addrlo);
   3461 
   3462 		reg = (hash >> 5);
   3463 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3464 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3465 		    || (sc->sc_type == WM_T_PCH2)
   3466 		    || (sc->sc_type == WM_T_PCH_LPT)
   3467 		    || (sc->sc_type == WM_T_PCH_SPT))
   3468 			reg &= 0x1f;
   3469 		else
   3470 			reg &= 0x7f;
   3471 		bit = hash & 0x1f;
   3472 
   3473 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3474 		hash |= 1U << bit;
   3475 
   3476 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3477 			/*
   3478 			 * 82544 Errata 9: Certain register cannot be written
   3479 			 * with particular alignments in PCI-X bus operation
   3480 			 * (FCAH, MTA and VFTA).
   3481 			 */
   3482 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3483 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3484 			CSR_WRITE_FLUSH(sc);
   3485 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3486 			CSR_WRITE_FLUSH(sc);
   3487 		} else {
   3488 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3489 			CSR_WRITE_FLUSH(sc);
   3490 		}
   3491 
   3492 		ETHER_NEXT_MULTI(step, enm);
   3493 	}
   3494 	ETHER_UNLOCK(ec);
   3495 
   3496 	ifp->if_flags &= ~IFF_ALLMULTI;
   3497 	goto setit;
   3498 
   3499  allmulti:
   3500 	ifp->if_flags |= IFF_ALLMULTI;
   3501 	sc->sc_rctl |= RCTL_MPE;
   3502 
   3503  setit:
   3504 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3505 }
   3506 
   3507 /* Reset and init related */
   3508 
   3509 static void
   3510 wm_set_vlan(struct wm_softc *sc)
   3511 {
   3512 
   3513 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3514 		device_xname(sc->sc_dev), __func__));
   3515 
   3516 	/* Deal with VLAN enables. */
   3517 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3518 		sc->sc_ctrl |= CTRL_VME;
   3519 	else
   3520 		sc->sc_ctrl &= ~CTRL_VME;
   3521 
   3522 	/* Write the control registers. */
   3523 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3524 }
   3525 
   3526 static void
   3527 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3528 {
   3529 	uint32_t gcr;
   3530 	pcireg_t ctrl2;
   3531 
   3532 	gcr = CSR_READ(sc, WMREG_GCR);
   3533 
   3534 	/* Only take action if timeout value is defaulted to 0 */
   3535 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3536 		goto out;
   3537 
   3538 	if ((gcr & GCR_CAP_VER2) == 0) {
   3539 		gcr |= GCR_CMPL_TMOUT_10MS;
   3540 		goto out;
   3541 	}
   3542 
   3543 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3544 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3545 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3546 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3547 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3548 
   3549 out:
   3550 	/* Disable completion timeout resend */
   3551 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3552 
   3553 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3554 }
   3555 
   3556 void
   3557 wm_get_auto_rd_done(struct wm_softc *sc)
   3558 {
   3559 	int i;
   3560 
   3561 	/* wait for eeprom to reload */
   3562 	switch (sc->sc_type) {
   3563 	case WM_T_82571:
   3564 	case WM_T_82572:
   3565 	case WM_T_82573:
   3566 	case WM_T_82574:
   3567 	case WM_T_82583:
   3568 	case WM_T_82575:
   3569 	case WM_T_82576:
   3570 	case WM_T_82580:
   3571 	case WM_T_I350:
   3572 	case WM_T_I354:
   3573 	case WM_T_I210:
   3574 	case WM_T_I211:
   3575 	case WM_T_80003:
   3576 	case WM_T_ICH8:
   3577 	case WM_T_ICH9:
   3578 		for (i = 0; i < 10; i++) {
   3579 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3580 				break;
   3581 			delay(1000);
   3582 		}
   3583 		if (i == 10) {
   3584 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3585 			    "complete\n", device_xname(sc->sc_dev));
   3586 		}
   3587 		break;
   3588 	default:
   3589 		break;
   3590 	}
   3591 }
   3592 
   3593 void
   3594 wm_lan_init_done(struct wm_softc *sc)
   3595 {
   3596 	uint32_t reg = 0;
   3597 	int i;
   3598 
   3599 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3600 		device_xname(sc->sc_dev), __func__));
   3601 
   3602 	/* Wait for eeprom to reload */
   3603 	switch (sc->sc_type) {
   3604 	case WM_T_ICH10:
   3605 	case WM_T_PCH:
   3606 	case WM_T_PCH2:
   3607 	case WM_T_PCH_LPT:
   3608 	case WM_T_PCH_SPT:
   3609 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3610 			reg = CSR_READ(sc, WMREG_STATUS);
   3611 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3612 				break;
   3613 			delay(100);
   3614 		}
   3615 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3616 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3617 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3618 		}
   3619 		break;
   3620 	default:
   3621 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3622 		    __func__);
   3623 		break;
   3624 	}
   3625 
   3626 	reg &= ~STATUS_LAN_INIT_DONE;
   3627 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3628 }
   3629 
   3630 void
   3631 wm_get_cfg_done(struct wm_softc *sc)
   3632 {
   3633 	int mask;
   3634 	uint32_t reg;
   3635 	int i;
   3636 
   3637 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3638 		device_xname(sc->sc_dev), __func__));
   3639 
   3640 	/* Wait for eeprom to reload */
   3641 	switch (sc->sc_type) {
   3642 	case WM_T_82542_2_0:
   3643 	case WM_T_82542_2_1:
   3644 		/* null */
   3645 		break;
   3646 	case WM_T_82543:
   3647 	case WM_T_82544:
   3648 	case WM_T_82540:
   3649 	case WM_T_82545:
   3650 	case WM_T_82545_3:
   3651 	case WM_T_82546:
   3652 	case WM_T_82546_3:
   3653 	case WM_T_82541:
   3654 	case WM_T_82541_2:
   3655 	case WM_T_82547:
   3656 	case WM_T_82547_2:
   3657 	case WM_T_82573:
   3658 	case WM_T_82574:
   3659 	case WM_T_82583:
   3660 		/* generic */
   3661 		delay(10*1000);
   3662 		break;
   3663 	case WM_T_80003:
   3664 	case WM_T_82571:
   3665 	case WM_T_82572:
   3666 	case WM_T_82575:
   3667 	case WM_T_82576:
   3668 	case WM_T_82580:
   3669 	case WM_T_I350:
   3670 	case WM_T_I354:
   3671 	case WM_T_I210:
   3672 	case WM_T_I211:
   3673 		if (sc->sc_type == WM_T_82571) {
   3674 			/* Only 82571 shares port 0 */
   3675 			mask = EEMNGCTL_CFGDONE_0;
   3676 		} else
   3677 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3678 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3679 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3680 				break;
   3681 			delay(1000);
   3682 		}
   3683 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3684 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3685 				device_xname(sc->sc_dev), __func__));
   3686 		}
   3687 		break;
   3688 	case WM_T_ICH8:
   3689 	case WM_T_ICH9:
   3690 	case WM_T_ICH10:
   3691 	case WM_T_PCH:
   3692 	case WM_T_PCH2:
   3693 	case WM_T_PCH_LPT:
   3694 	case WM_T_PCH_SPT:
   3695 		delay(10*1000);
   3696 		if (sc->sc_type >= WM_T_ICH10)
   3697 			wm_lan_init_done(sc);
   3698 		else
   3699 			wm_get_auto_rd_done(sc);
   3700 
   3701 		reg = CSR_READ(sc, WMREG_STATUS);
   3702 		if ((reg & STATUS_PHYRA) != 0)
   3703 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3704 		break;
   3705 	default:
   3706 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3707 		    __func__);
   3708 		break;
   3709 	}
   3710 }
   3711 
   3712 void
   3713 wm_phy_post_reset(struct wm_softc *sc)
   3714 {
   3715 	uint32_t reg;
   3716 
   3717 	/* This function is only for ICH8 and newer. */
   3718 	if (sc->sc_type < WM_T_ICH8)
   3719 		return;
   3720 
   3721 	if (wm_phy_resetisblocked(sc)) {
   3722 		/* XXX */
   3723 		device_printf(sc->sc_dev, "PHY is blocked\n");
   3724 		return;
   3725 	}
   3726 
   3727 	/* Allow time for h/w to get to quiescent state after reset */
   3728 	delay(10*1000);
   3729 
   3730 	/* Perform any necessary post-reset workarounds */
   3731 	if (sc->sc_type == WM_T_PCH)
   3732 		wm_hv_phy_workaround_ich8lan(sc);
   3733 	if (sc->sc_type == WM_T_PCH2)
   3734 		wm_lv_phy_workaround_ich8lan(sc);
   3735 
   3736 	/* Clear the host wakeup bit after lcd reset */
   3737 	if (sc->sc_type >= WM_T_PCH) {
   3738 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   3739 		    BM_PORT_GEN_CFG);
   3740 		reg &= ~BM_WUC_HOST_WU_BIT;
   3741 		wm_gmii_hv_writereg(sc->sc_dev, 2,
   3742 		    BM_PORT_GEN_CFG, reg);
   3743 	}
   3744 
   3745 	/* Configure the LCD with the extended configuration region in NVM */
   3746 	wm_init_lcd_from_nvm(sc);
   3747 
   3748 	/* Configure the LCD with the OEM bits in NVM */
   3749 }
   3750 
   3751 /* Only for PCH and newer */
   3752 static void
   3753 wm_write_smbus_addr(struct wm_softc *sc)
   3754 {
   3755 	uint32_t strap, freq;
   3756 	uint32_t phy_data;
   3757 
   3758 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3759 		device_xname(sc->sc_dev), __func__));
   3760 
   3761 	strap = CSR_READ(sc, WMREG_STRAP);
   3762 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   3763 
   3764 	phy_data = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR);
   3765 
   3766 	phy_data &= ~HV_SMB_ADDR_ADDR;
   3767 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   3768 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   3769 
   3770 	if (sc->sc_phytype == WMPHY_I217) {
   3771 		/* Restore SMBus frequency */
   3772 		if (freq --) {
   3773 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   3774 			    | HV_SMB_ADDR_FREQ_HIGH);
   3775 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   3776 			    HV_SMB_ADDR_FREQ_LOW);
   3777 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   3778 			    HV_SMB_ADDR_FREQ_HIGH);
   3779 		} else {
   3780 			DPRINTF(WM_DEBUG_INIT,
   3781 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   3782 				device_xname(sc->sc_dev), __func__));
   3783 		}
   3784 	}
   3785 
   3786 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR, phy_data);
   3787 }
   3788 
   3789 void
   3790 wm_init_lcd_from_nvm(struct wm_softc *sc)
   3791 {
   3792 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   3793 	uint16_t phy_page = 0;
   3794 
   3795 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3796 		device_xname(sc->sc_dev), __func__));
   3797 
   3798 	switch (sc->sc_type) {
   3799 	case WM_T_ICH8:
   3800 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   3801 		    || (sc->sc_phytype != WMPHY_IGP_3))
   3802 			return;
   3803 
   3804 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   3805 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   3806 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   3807 			break;
   3808 		}
   3809 		/* FALLTHROUGH */
   3810 	case WM_T_PCH:
   3811 	case WM_T_PCH2:
   3812 	case WM_T_PCH_LPT:
   3813 	case WM_T_PCH_SPT:
   3814 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   3815 		break;
   3816 	default:
   3817 		return;
   3818 	}
   3819 
   3820 	sc->phy.acquire(sc);
   3821 
   3822 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   3823 	if ((reg & sw_cfg_mask) == 0)
   3824 		goto release;
   3825 
   3826 	/*
   3827 	 * Make sure HW does not configure LCD from PHY extended configuration
   3828 	 * before SW configuration
   3829 	 */
   3830 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   3831 	if ((sc->sc_type < WM_T_PCH2)
   3832 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   3833 		goto release;
   3834 
   3835 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   3836 		device_xname(sc->sc_dev), __func__));
   3837 	/* word_addr is in DWORD */
   3838 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   3839 
   3840 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   3841 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   3842 
   3843 	if (((sc->sc_type == WM_T_PCH)
   3844 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   3845 	    || (sc->sc_type > WM_T_PCH)) {
   3846 		/*
   3847 		 * HW configures the SMBus address and LEDs when the OEM and
   3848 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   3849 		 * are cleared, SW will configure them instead.
   3850 		 */
   3851 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   3852 			device_xname(sc->sc_dev), __func__));
   3853 		wm_write_smbus_addr(sc);
   3854 
   3855 		reg = CSR_READ(sc, WMREG_LEDCTL);
   3856 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG, reg);
   3857 	}
   3858 
   3859 	/* Configure LCD from extended configuration region. */
   3860 	for (i = 0; i < cnf_size; i++) {
   3861 		uint16_t reg_data, reg_addr;
   3862 
   3863 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   3864 			goto release;
   3865 
   3866 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   3867 			goto release;
   3868 
   3869 		if (reg_addr == MII_IGPHY_PAGE_SELECT)
   3870 			phy_page = reg_data;
   3871 
   3872 		reg_addr &= IGPHY_MAXREGADDR;
   3873 		reg_addr |= phy_page;
   3874 
   3875 		sc->phy.release(sc); /* XXX */
   3876 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, reg_addr, reg_data);
   3877 		sc->phy.acquire(sc); /* XXX */
   3878 	}
   3879 
   3880 release:
   3881 	sc->phy.release(sc);
   3882 	return;
   3883 }
   3884 
   3885 
   3886 /* Init hardware bits */
   3887 void
   3888 wm_initialize_hardware_bits(struct wm_softc *sc)
   3889 {
   3890 	uint32_t tarc0, tarc1, reg;
   3891 
   3892 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3893 		device_xname(sc->sc_dev), __func__));
   3894 
   3895 	/* For 82571 variant, 80003 and ICHs */
   3896 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3897 	    || (sc->sc_type >= WM_T_80003)) {
   3898 
   3899 		/* Transmit Descriptor Control 0 */
   3900 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3901 		reg |= TXDCTL_COUNT_DESC;
   3902 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3903 
   3904 		/* Transmit Descriptor Control 1 */
   3905 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3906 		reg |= TXDCTL_COUNT_DESC;
   3907 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3908 
   3909 		/* TARC0 */
   3910 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3911 		switch (sc->sc_type) {
   3912 		case WM_T_82571:
   3913 		case WM_T_82572:
   3914 		case WM_T_82573:
   3915 		case WM_T_82574:
   3916 		case WM_T_82583:
   3917 		case WM_T_80003:
   3918 			/* Clear bits 30..27 */
   3919 			tarc0 &= ~__BITS(30, 27);
   3920 			break;
   3921 		default:
   3922 			break;
   3923 		}
   3924 
   3925 		switch (sc->sc_type) {
   3926 		case WM_T_82571:
   3927 		case WM_T_82572:
   3928 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3929 
   3930 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3931 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3932 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3933 			/* 8257[12] Errata No.7 */
   3934 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3935 
   3936 			/* TARC1 bit 28 */
   3937 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3938 				tarc1 &= ~__BIT(28);
   3939 			else
   3940 				tarc1 |= __BIT(28);
   3941 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3942 
   3943 			/*
   3944 			 * 8257[12] Errata No.13
   3945 			 * Disable Dyamic Clock Gating.
   3946 			 */
   3947 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3948 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3949 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3950 			break;
   3951 		case WM_T_82573:
   3952 		case WM_T_82574:
   3953 		case WM_T_82583:
   3954 			if ((sc->sc_type == WM_T_82574)
   3955 			    || (sc->sc_type == WM_T_82583))
   3956 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3957 
   3958 			/* Extended Device Control */
   3959 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3960 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3961 			reg |= __BIT(22);	/* Set bit 22 */
   3962 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3963 
   3964 			/* Device Control */
   3965 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   3966 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3967 
   3968 			/* PCIe Control Register */
   3969 			/*
   3970 			 * 82573 Errata (unknown).
   3971 			 *
   3972 			 * 82574 Errata 25 and 82583 Errata 12
   3973 			 * "Dropped Rx Packets":
   3974 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   3975 			 */
   3976 			reg = CSR_READ(sc, WMREG_GCR);
   3977 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   3978 			CSR_WRITE(sc, WMREG_GCR, reg);
   3979 
   3980 			if ((sc->sc_type == WM_T_82574)
   3981 			    || (sc->sc_type == WM_T_82583)) {
   3982 				/*
   3983 				 * Document says this bit must be set for
   3984 				 * proper operation.
   3985 				 */
   3986 				reg = CSR_READ(sc, WMREG_GCR);
   3987 				reg |= __BIT(22);
   3988 				CSR_WRITE(sc, WMREG_GCR, reg);
   3989 
   3990 				/*
   3991 				 * Apply workaround for hardware errata
   3992 				 * documented in errata docs Fixes issue where
   3993 				 * some error prone or unreliable PCIe
   3994 				 * completions are occurring, particularly
   3995 				 * with ASPM enabled. Without fix, issue can
   3996 				 * cause Tx timeouts.
   3997 				 */
   3998 				reg = CSR_READ(sc, WMREG_GCR2);
   3999 				reg |= __BIT(0);
   4000 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4001 			}
   4002 			break;
   4003 		case WM_T_80003:
   4004 			/* TARC0 */
   4005 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4006 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4007 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4008 
   4009 			/* TARC1 bit 28 */
   4010 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4011 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4012 				tarc1 &= ~__BIT(28);
   4013 			else
   4014 				tarc1 |= __BIT(28);
   4015 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4016 			break;
   4017 		case WM_T_ICH8:
   4018 		case WM_T_ICH9:
   4019 		case WM_T_ICH10:
   4020 		case WM_T_PCH:
   4021 		case WM_T_PCH2:
   4022 		case WM_T_PCH_LPT:
   4023 		case WM_T_PCH_SPT:
   4024 			/* TARC0 */
   4025 			if ((sc->sc_type == WM_T_ICH8)
   4026 			    || (sc->sc_type == WM_T_PCH_SPT)) {
   4027 				/* Set TARC0 bits 29 and 28 */
   4028 				tarc0 |= __BITS(29, 28);
   4029 			}
   4030 			/* Set TARC0 bits 23,24,26,27 */
   4031 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4032 
   4033 			/* CTRL_EXT */
   4034 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4035 			reg |= __BIT(22);	/* Set bit 22 */
   4036 			/*
   4037 			 * Enable PHY low-power state when MAC is at D3
   4038 			 * w/o WoL
   4039 			 */
   4040 			if (sc->sc_type >= WM_T_PCH)
   4041 				reg |= CTRL_EXT_PHYPDEN;
   4042 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4043 
   4044 			/* TARC1 */
   4045 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4046 			/* bit 28 */
   4047 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4048 				tarc1 &= ~__BIT(28);
   4049 			else
   4050 				tarc1 |= __BIT(28);
   4051 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4052 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4053 
   4054 			/* Device Status */
   4055 			if (sc->sc_type == WM_T_ICH8) {
   4056 				reg = CSR_READ(sc, WMREG_STATUS);
   4057 				reg &= ~__BIT(31);
   4058 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4059 
   4060 			}
   4061 
   4062 			/* IOSFPC */
   4063 			if (sc->sc_type == WM_T_PCH_SPT) {
   4064 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4065 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4066 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4067 			}
   4068 			/*
   4069 			 * Work-around descriptor data corruption issue during
   4070 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4071 			 * capability.
   4072 			 */
   4073 			reg = CSR_READ(sc, WMREG_RFCTL);
   4074 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4075 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4076 			break;
   4077 		default:
   4078 			break;
   4079 		}
   4080 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4081 
   4082 		switch (sc->sc_type) {
   4083 		/*
   4084 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4085 		 * Avoid RSS Hash Value bug.
   4086 		 */
   4087 		case WM_T_82571:
   4088 		case WM_T_82572:
   4089 		case WM_T_82573:
   4090 		case WM_T_80003:
   4091 		case WM_T_ICH8:
   4092 			reg = CSR_READ(sc, WMREG_RFCTL);
   4093 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4094 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4095 			break;
   4096 		case WM_T_82574:
   4097 			/* use extened Rx descriptor. */
   4098 			reg = CSR_READ(sc, WMREG_RFCTL);
   4099 			reg |= WMREG_RFCTL_EXSTEN;
   4100 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4101 			break;
   4102 		default:
   4103 			break;
   4104 		}
   4105 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4106 		/*
   4107 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4108 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4109 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4110 		 * Correctly by the Device"
   4111 		 *
   4112 		 * I354(C2000) Errata AVR53:
   4113 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4114 		 * Hang"
   4115 		 */
   4116 		reg = CSR_READ(sc, WMREG_RFCTL);
   4117 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4118 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4119 	}
   4120 }
   4121 
   4122 static uint32_t
   4123 wm_rxpbs_adjust_82580(uint32_t val)
   4124 {
   4125 	uint32_t rv = 0;
   4126 
   4127 	if (val < __arraycount(wm_82580_rxpbs_table))
   4128 		rv = wm_82580_rxpbs_table[val];
   4129 
   4130 	return rv;
   4131 }
   4132 
   4133 /*
   4134  * wm_reset_phy:
   4135  *
   4136  *	generic PHY reset function.
   4137  *	Same as e1000_phy_hw_reset_generic()
   4138  */
   4139 static void
   4140 wm_reset_phy(struct wm_softc *sc)
   4141 {
   4142 	uint32_t reg;
   4143 
   4144 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4145 		device_xname(sc->sc_dev), __func__));
   4146 	if (wm_phy_resetisblocked(sc))
   4147 		return;
   4148 
   4149 	sc->phy.acquire(sc);
   4150 
   4151 	reg = CSR_READ(sc, WMREG_CTRL);
   4152 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4153 	CSR_WRITE_FLUSH(sc);
   4154 
   4155 	delay(sc->phy.reset_delay_us);
   4156 
   4157 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4158 	CSR_WRITE_FLUSH(sc);
   4159 
   4160 	delay(150);
   4161 
   4162 	sc->phy.release(sc);
   4163 
   4164 	wm_get_cfg_done(sc);
   4165 	wm_phy_post_reset(sc);
   4166 }
   4167 
   4168 static void
   4169 wm_flush_desc_rings(struct wm_softc *sc)
   4170 {
   4171 	pcireg_t preg;
   4172 	uint32_t reg;
   4173 	struct wm_txqueue *txq;
   4174 	wiseman_txdesc_t *txd;
   4175 	int nexttx;
   4176 	uint32_t rctl;
   4177 
   4178 	/* First, disable MULR fix in FEXTNVM11 */
   4179 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4180 	reg |= FEXTNVM11_DIS_MULRFIX;
   4181 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4182 
   4183 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4184 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4185 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4186 		return;
   4187 
   4188 	/* TX */
   4189 	printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   4190 	    device_xname(sc->sc_dev), preg, reg);
   4191 	reg = CSR_READ(sc, WMREG_TCTL);
   4192 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4193 
   4194 	txq = &sc->sc_queue[0].wmq_txq;
   4195 	nexttx = txq->txq_next;
   4196 	txd = &txq->txq_descs[nexttx];
   4197 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4198 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS| 512);
   4199 	txd->wtx_fields.wtxu_status = 0;
   4200 	txd->wtx_fields.wtxu_options = 0;
   4201 	txd->wtx_fields.wtxu_vlan = 0;
   4202 
   4203 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4204 	    BUS_SPACE_BARRIER_WRITE);
   4205 
   4206 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4207 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4208 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4209 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4210 	delay(250);
   4211 
   4212 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4213 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4214 		return;
   4215 
   4216 	/* RX */
   4217 	printf("%s: Need RX flush (reg = %08x)\n",
   4218 	    device_xname(sc->sc_dev), preg);
   4219 	rctl = CSR_READ(sc, WMREG_RCTL);
   4220 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4221 	CSR_WRITE_FLUSH(sc);
   4222 	delay(150);
   4223 
   4224 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4225 	/* zero the lower 14 bits (prefetch and host thresholds) */
   4226 	reg &= 0xffffc000;
   4227 	/*
   4228 	 * update thresholds: prefetch threshold to 31, host threshold
   4229 	 * to 1 and make sure the granularity is "descriptors" and not
   4230 	 * "cache lines"
   4231 	 */
   4232 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4233 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4234 
   4235 	/*
   4236 	 * momentarily enable the RX ring for the changes to take
   4237 	 * effect
   4238 	 */
   4239 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4240 	CSR_WRITE_FLUSH(sc);
   4241 	delay(150);
   4242 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4243 }
   4244 
   4245 /*
   4246  * wm_reset:
   4247  *
   4248  *	Reset the i82542 chip.
   4249  */
   4250 static void
   4251 wm_reset(struct wm_softc *sc)
   4252 {
   4253 	int phy_reset = 0;
   4254 	int i, error = 0;
   4255 	uint32_t reg;
   4256 	uint16_t kmreg;
   4257 	int rv;
   4258 
   4259 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4260 		device_xname(sc->sc_dev), __func__));
   4261 	KASSERT(sc->sc_type != 0);
   4262 
   4263 	/*
   4264 	 * Allocate on-chip memory according to the MTU size.
   4265 	 * The Packet Buffer Allocation register must be written
   4266 	 * before the chip is reset.
   4267 	 */
   4268 	switch (sc->sc_type) {
   4269 	case WM_T_82547:
   4270 	case WM_T_82547_2:
   4271 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4272 		    PBA_22K : PBA_30K;
   4273 		for (i = 0; i < sc->sc_nqueues; i++) {
   4274 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4275 			txq->txq_fifo_head = 0;
   4276 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4277 			txq->txq_fifo_size =
   4278 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4279 			txq->txq_fifo_stall = 0;
   4280 		}
   4281 		break;
   4282 	case WM_T_82571:
   4283 	case WM_T_82572:
   4284 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4285 	case WM_T_80003:
   4286 		sc->sc_pba = PBA_32K;
   4287 		break;
   4288 	case WM_T_82573:
   4289 		sc->sc_pba = PBA_12K;
   4290 		break;
   4291 	case WM_T_82574:
   4292 	case WM_T_82583:
   4293 		sc->sc_pba = PBA_20K;
   4294 		break;
   4295 	case WM_T_82576:
   4296 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4297 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4298 		break;
   4299 	case WM_T_82580:
   4300 	case WM_T_I350:
   4301 	case WM_T_I354:
   4302 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4303 		break;
   4304 	case WM_T_I210:
   4305 	case WM_T_I211:
   4306 		sc->sc_pba = PBA_34K;
   4307 		break;
   4308 	case WM_T_ICH8:
   4309 		/* Workaround for a bit corruption issue in FIFO memory */
   4310 		sc->sc_pba = PBA_8K;
   4311 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4312 		break;
   4313 	case WM_T_ICH9:
   4314 	case WM_T_ICH10:
   4315 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4316 		    PBA_14K : PBA_10K;
   4317 		break;
   4318 	case WM_T_PCH:
   4319 	case WM_T_PCH2:
   4320 	case WM_T_PCH_LPT:
   4321 	case WM_T_PCH_SPT:
   4322 		sc->sc_pba = PBA_26K;
   4323 		break;
   4324 	default:
   4325 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4326 		    PBA_40K : PBA_48K;
   4327 		break;
   4328 	}
   4329 	/*
   4330 	 * Only old or non-multiqueue devices have the PBA register
   4331 	 * XXX Need special handling for 82575.
   4332 	 */
   4333 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4334 	    || (sc->sc_type == WM_T_82575))
   4335 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4336 
   4337 	/* Prevent the PCI-E bus from sticking */
   4338 	if (sc->sc_flags & WM_F_PCIE) {
   4339 		int timeout = 800;
   4340 
   4341 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4342 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4343 
   4344 		while (timeout--) {
   4345 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4346 			    == 0)
   4347 				break;
   4348 			delay(100);
   4349 		}
   4350 		if (timeout == 0)
   4351 			device_printf(sc->sc_dev,
   4352 			    "failed to disable busmastering\n");
   4353 	}
   4354 
   4355 	/* Set the completion timeout for interface */
   4356 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4357 	    || (sc->sc_type == WM_T_82580)
   4358 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4359 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4360 		wm_set_pcie_completion_timeout(sc);
   4361 
   4362 	/* Clear interrupt */
   4363 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4364 	if (wm_is_using_msix(sc)) {
   4365 		if (sc->sc_type != WM_T_82574) {
   4366 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4367 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4368 		} else {
   4369 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4370 		}
   4371 	}
   4372 
   4373 	/* Stop the transmit and receive processes. */
   4374 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4375 	sc->sc_rctl &= ~RCTL_EN;
   4376 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4377 	CSR_WRITE_FLUSH(sc);
   4378 
   4379 	/* XXX set_tbi_sbp_82543() */
   4380 
   4381 	delay(10*1000);
   4382 
   4383 	/* Must acquire the MDIO ownership before MAC reset */
   4384 	switch (sc->sc_type) {
   4385 	case WM_T_82573:
   4386 	case WM_T_82574:
   4387 	case WM_T_82583:
   4388 		error = wm_get_hw_semaphore_82573(sc);
   4389 		break;
   4390 	default:
   4391 		break;
   4392 	}
   4393 
   4394 	/*
   4395 	 * 82541 Errata 29? & 82547 Errata 28?
   4396 	 * See also the description about PHY_RST bit in CTRL register
   4397 	 * in 8254x_GBe_SDM.pdf.
   4398 	 */
   4399 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4400 		CSR_WRITE(sc, WMREG_CTRL,
   4401 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4402 		CSR_WRITE_FLUSH(sc);
   4403 		delay(5000);
   4404 	}
   4405 
   4406 	switch (sc->sc_type) {
   4407 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4408 	case WM_T_82541:
   4409 	case WM_T_82541_2:
   4410 	case WM_T_82547:
   4411 	case WM_T_82547_2:
   4412 		/*
   4413 		 * On some chipsets, a reset through a memory-mapped write
   4414 		 * cycle can cause the chip to reset before completing the
   4415 		 * write cycle.  This causes major headache that can be
   4416 		 * avoided by issuing the reset via indirect register writes
   4417 		 * through I/O space.
   4418 		 *
   4419 		 * So, if we successfully mapped the I/O BAR at attach time,
   4420 		 * use that.  Otherwise, try our luck with a memory-mapped
   4421 		 * reset.
   4422 		 */
   4423 		if (sc->sc_flags & WM_F_IOH_VALID)
   4424 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4425 		else
   4426 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4427 		break;
   4428 	case WM_T_82545_3:
   4429 	case WM_T_82546_3:
   4430 		/* Use the shadow control register on these chips. */
   4431 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4432 		break;
   4433 	case WM_T_80003:
   4434 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4435 		sc->phy.acquire(sc);
   4436 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4437 		sc->phy.release(sc);
   4438 		break;
   4439 	case WM_T_ICH8:
   4440 	case WM_T_ICH9:
   4441 	case WM_T_ICH10:
   4442 	case WM_T_PCH:
   4443 	case WM_T_PCH2:
   4444 	case WM_T_PCH_LPT:
   4445 	case WM_T_PCH_SPT:
   4446 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4447 		if (wm_phy_resetisblocked(sc) == false) {
   4448 			/*
   4449 			 * Gate automatic PHY configuration by hardware on
   4450 			 * non-managed 82579
   4451 			 */
   4452 			if ((sc->sc_type == WM_T_PCH2)
   4453 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4454 				== 0))
   4455 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4456 
   4457 			reg |= CTRL_PHY_RESET;
   4458 			phy_reset = 1;
   4459 		} else
   4460 			printf("XXX reset is blocked!!!\n");
   4461 		sc->phy.acquire(sc);
   4462 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4463 		/* Don't insert a completion barrier when reset */
   4464 		delay(20*1000);
   4465 		mutex_exit(sc->sc_ich_phymtx);
   4466 		break;
   4467 	case WM_T_82580:
   4468 	case WM_T_I350:
   4469 	case WM_T_I354:
   4470 	case WM_T_I210:
   4471 	case WM_T_I211:
   4472 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4473 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4474 			CSR_WRITE_FLUSH(sc);
   4475 		delay(5000);
   4476 		break;
   4477 	case WM_T_82542_2_0:
   4478 	case WM_T_82542_2_1:
   4479 	case WM_T_82543:
   4480 	case WM_T_82540:
   4481 	case WM_T_82545:
   4482 	case WM_T_82546:
   4483 	case WM_T_82571:
   4484 	case WM_T_82572:
   4485 	case WM_T_82573:
   4486 	case WM_T_82574:
   4487 	case WM_T_82575:
   4488 	case WM_T_82576:
   4489 	case WM_T_82583:
   4490 	default:
   4491 		/* Everything else can safely use the documented method. */
   4492 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4493 		break;
   4494 	}
   4495 
   4496 	/* Must release the MDIO ownership after MAC reset */
   4497 	switch (sc->sc_type) {
   4498 	case WM_T_82573:
   4499 	case WM_T_82574:
   4500 	case WM_T_82583:
   4501 		if (error == 0)
   4502 			wm_put_hw_semaphore_82573(sc);
   4503 		break;
   4504 	default:
   4505 		break;
   4506 	}
   4507 
   4508 	if (phy_reset != 0)
   4509 		wm_get_cfg_done(sc);
   4510 
   4511 	/* reload EEPROM */
   4512 	switch (sc->sc_type) {
   4513 	case WM_T_82542_2_0:
   4514 	case WM_T_82542_2_1:
   4515 	case WM_T_82543:
   4516 	case WM_T_82544:
   4517 		delay(10);
   4518 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4519 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4520 		CSR_WRITE_FLUSH(sc);
   4521 		delay(2000);
   4522 		break;
   4523 	case WM_T_82540:
   4524 	case WM_T_82545:
   4525 	case WM_T_82545_3:
   4526 	case WM_T_82546:
   4527 	case WM_T_82546_3:
   4528 		delay(5*1000);
   4529 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4530 		break;
   4531 	case WM_T_82541:
   4532 	case WM_T_82541_2:
   4533 	case WM_T_82547:
   4534 	case WM_T_82547_2:
   4535 		delay(20000);
   4536 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4537 		break;
   4538 	case WM_T_82571:
   4539 	case WM_T_82572:
   4540 	case WM_T_82573:
   4541 	case WM_T_82574:
   4542 	case WM_T_82583:
   4543 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4544 			delay(10);
   4545 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4546 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4547 			CSR_WRITE_FLUSH(sc);
   4548 		}
   4549 		/* check EECD_EE_AUTORD */
   4550 		wm_get_auto_rd_done(sc);
   4551 		/*
   4552 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4553 		 * is set.
   4554 		 */
   4555 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4556 		    || (sc->sc_type == WM_T_82583))
   4557 			delay(25*1000);
   4558 		break;
   4559 	case WM_T_82575:
   4560 	case WM_T_82576:
   4561 	case WM_T_82580:
   4562 	case WM_T_I350:
   4563 	case WM_T_I354:
   4564 	case WM_T_I210:
   4565 	case WM_T_I211:
   4566 	case WM_T_80003:
   4567 		/* check EECD_EE_AUTORD */
   4568 		wm_get_auto_rd_done(sc);
   4569 		break;
   4570 	case WM_T_ICH8:
   4571 	case WM_T_ICH9:
   4572 	case WM_T_ICH10:
   4573 	case WM_T_PCH:
   4574 	case WM_T_PCH2:
   4575 	case WM_T_PCH_LPT:
   4576 	case WM_T_PCH_SPT:
   4577 		break;
   4578 	default:
   4579 		panic("%s: unknown type\n", __func__);
   4580 	}
   4581 
   4582 	/* Check whether EEPROM is present or not */
   4583 	switch (sc->sc_type) {
   4584 	case WM_T_82575:
   4585 	case WM_T_82576:
   4586 	case WM_T_82580:
   4587 	case WM_T_I350:
   4588 	case WM_T_I354:
   4589 	case WM_T_ICH8:
   4590 	case WM_T_ICH9:
   4591 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4592 			/* Not found */
   4593 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4594 			if (sc->sc_type == WM_T_82575)
   4595 				wm_reset_init_script_82575(sc);
   4596 		}
   4597 		break;
   4598 	default:
   4599 		break;
   4600 	}
   4601 
   4602 	if (phy_reset != 0)
   4603 		wm_phy_post_reset(sc);
   4604 
   4605 	if ((sc->sc_type == WM_T_82580)
   4606 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4607 		/* clear global device reset status bit */
   4608 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4609 	}
   4610 
   4611 	/* Clear any pending interrupt events. */
   4612 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4613 	reg = CSR_READ(sc, WMREG_ICR);
   4614 	if (wm_is_using_msix(sc)) {
   4615 		if (sc->sc_type != WM_T_82574) {
   4616 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4617 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4618 		} else
   4619 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4620 	}
   4621 
   4622 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4623 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4624 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4625 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   4626 		reg = CSR_READ(sc, WMREG_KABGTXD);
   4627 		reg |= KABGTXD_BGSQLBIAS;
   4628 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   4629 	}
   4630 
   4631 	/* reload sc_ctrl */
   4632 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4633 
   4634 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4635 		wm_set_eee_i350(sc);
   4636 
   4637 	/*
   4638 	 * For PCH, this write will make sure that any noise will be detected
   4639 	 * as a CRC error and be dropped rather than show up as a bad packet
   4640 	 * to the DMA engine
   4641 	 */
   4642 	if (sc->sc_type == WM_T_PCH)
   4643 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4644 
   4645 	if (sc->sc_type >= WM_T_82544)
   4646 		CSR_WRITE(sc, WMREG_WUC, 0);
   4647 
   4648 	wm_reset_mdicnfg_82580(sc);
   4649 
   4650 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4651 		wm_pll_workaround_i210(sc);
   4652 
   4653 	if (sc->sc_type == WM_T_80003) {
   4654 		/* default to TRUE to enable the MDIC W/A */
   4655 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   4656 
   4657 		rv = wm_kmrn_readreg(sc,
   4658 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   4659 		if (rv == 0) {
   4660 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   4661 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   4662 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   4663 			else
   4664 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   4665 		}
   4666 	}
   4667 }
   4668 
   4669 /*
   4670  * wm_add_rxbuf:
   4671  *
   4672  *	Add a receive buffer to the indiciated descriptor.
   4673  */
   4674 static int
   4675 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4676 {
   4677 	struct wm_softc *sc = rxq->rxq_sc;
   4678 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4679 	struct mbuf *m;
   4680 	int error;
   4681 
   4682 	KASSERT(mutex_owned(rxq->rxq_lock));
   4683 
   4684 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4685 	if (m == NULL)
   4686 		return ENOBUFS;
   4687 
   4688 	MCLGET(m, M_DONTWAIT);
   4689 	if ((m->m_flags & M_EXT) == 0) {
   4690 		m_freem(m);
   4691 		return ENOBUFS;
   4692 	}
   4693 
   4694 	if (rxs->rxs_mbuf != NULL)
   4695 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4696 
   4697 	rxs->rxs_mbuf = m;
   4698 
   4699 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4700 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4701 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4702 	if (error) {
   4703 		/* XXX XXX XXX */
   4704 		aprint_error_dev(sc->sc_dev,
   4705 		    "unable to load rx DMA map %d, error = %d\n",
   4706 		    idx, error);
   4707 		panic("wm_add_rxbuf");
   4708 	}
   4709 
   4710 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4711 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4712 
   4713 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4714 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4715 			wm_init_rxdesc(rxq, idx);
   4716 	} else
   4717 		wm_init_rxdesc(rxq, idx);
   4718 
   4719 	return 0;
   4720 }
   4721 
   4722 /*
   4723  * wm_rxdrain:
   4724  *
   4725  *	Drain the receive queue.
   4726  */
   4727 static void
   4728 wm_rxdrain(struct wm_rxqueue *rxq)
   4729 {
   4730 	struct wm_softc *sc = rxq->rxq_sc;
   4731 	struct wm_rxsoft *rxs;
   4732 	int i;
   4733 
   4734 	KASSERT(mutex_owned(rxq->rxq_lock));
   4735 
   4736 	for (i = 0; i < WM_NRXDESC; i++) {
   4737 		rxs = &rxq->rxq_soft[i];
   4738 		if (rxs->rxs_mbuf != NULL) {
   4739 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4740 			m_freem(rxs->rxs_mbuf);
   4741 			rxs->rxs_mbuf = NULL;
   4742 		}
   4743 	}
   4744 }
   4745 
   4746 
   4747 /*
   4748  * XXX copy from FreeBSD's sys/net/rss_config.c
   4749  */
   4750 /*
   4751  * RSS secret key, intended to prevent attacks on load-balancing.  Its
   4752  * effectiveness may be limited by algorithm choice and available entropy
   4753  * during the boot.
   4754  *
   4755  * XXXRW: And that we don't randomize it yet!
   4756  *
   4757  * This is the default Microsoft RSS specification key which is also
   4758  * the Chelsio T5 firmware default key.
   4759  */
   4760 #define RSS_KEYSIZE 40
   4761 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
   4762 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
   4763 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
   4764 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
   4765 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
   4766 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
   4767 };
   4768 
   4769 /*
   4770  * Caller must pass an array of size sizeof(rss_key).
   4771  *
   4772  * XXX
   4773  * As if_ixgbe may use this function, this function should not be
   4774  * if_wm specific function.
   4775  */
   4776 static void
   4777 wm_rss_getkey(uint8_t *key)
   4778 {
   4779 
   4780 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
   4781 }
   4782 
   4783 /*
   4784  * Setup registers for RSS.
   4785  *
   4786  * XXX not yet VMDq support
   4787  */
   4788 static void
   4789 wm_init_rss(struct wm_softc *sc)
   4790 {
   4791 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4792 	int i;
   4793 
   4794 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
   4795 
   4796 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4797 		int qid, reta_ent;
   4798 
   4799 		qid  = i % sc->sc_nqueues;
   4800 		switch(sc->sc_type) {
   4801 		case WM_T_82574:
   4802 			reta_ent = __SHIFTIN(qid,
   4803 			    RETA_ENT_QINDEX_MASK_82574);
   4804 			break;
   4805 		case WM_T_82575:
   4806 			reta_ent = __SHIFTIN(qid,
   4807 			    RETA_ENT_QINDEX1_MASK_82575);
   4808 			break;
   4809 		default:
   4810 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4811 			break;
   4812 		}
   4813 
   4814 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4815 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4816 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4817 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4818 	}
   4819 
   4820 	wm_rss_getkey((uint8_t *)rss_key);
   4821 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4822 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4823 
   4824 	if (sc->sc_type == WM_T_82574)
   4825 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4826 	else
   4827 		mrqc = MRQC_ENABLE_RSS_MQ;
   4828 
   4829 	/*
   4830 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   4831 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   4832 	 */
   4833 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4834 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4835 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4836 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4837 
   4838 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4839 }
   4840 
   4841 /*
   4842  * Adjust TX and RX queue numbers which the system actulally uses.
   4843  *
   4844  * The numbers are affected by below parameters.
   4845  *     - The nubmer of hardware queues
   4846  *     - The number of MSI-X vectors (= "nvectors" argument)
   4847  *     - ncpu
   4848  */
   4849 static void
   4850 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4851 {
   4852 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4853 
   4854 	if (nvectors < 2) {
   4855 		sc->sc_nqueues = 1;
   4856 		return;
   4857 	}
   4858 
   4859 	switch(sc->sc_type) {
   4860 	case WM_T_82572:
   4861 		hw_ntxqueues = 2;
   4862 		hw_nrxqueues = 2;
   4863 		break;
   4864 	case WM_T_82574:
   4865 		hw_ntxqueues = 2;
   4866 		hw_nrxqueues = 2;
   4867 		break;
   4868 	case WM_T_82575:
   4869 		hw_ntxqueues = 4;
   4870 		hw_nrxqueues = 4;
   4871 		break;
   4872 	case WM_T_82576:
   4873 		hw_ntxqueues = 16;
   4874 		hw_nrxqueues = 16;
   4875 		break;
   4876 	case WM_T_82580:
   4877 	case WM_T_I350:
   4878 	case WM_T_I354:
   4879 		hw_ntxqueues = 8;
   4880 		hw_nrxqueues = 8;
   4881 		break;
   4882 	case WM_T_I210:
   4883 		hw_ntxqueues = 4;
   4884 		hw_nrxqueues = 4;
   4885 		break;
   4886 	case WM_T_I211:
   4887 		hw_ntxqueues = 2;
   4888 		hw_nrxqueues = 2;
   4889 		break;
   4890 		/*
   4891 		 * As below ethernet controllers does not support MSI-X,
   4892 		 * this driver let them not use multiqueue.
   4893 		 *     - WM_T_80003
   4894 		 *     - WM_T_ICH8
   4895 		 *     - WM_T_ICH9
   4896 		 *     - WM_T_ICH10
   4897 		 *     - WM_T_PCH
   4898 		 *     - WM_T_PCH2
   4899 		 *     - WM_T_PCH_LPT
   4900 		 */
   4901 	default:
   4902 		hw_ntxqueues = 1;
   4903 		hw_nrxqueues = 1;
   4904 		break;
   4905 	}
   4906 
   4907 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
   4908 
   4909 	/*
   4910 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   4911 	 * the number of queues used actually.
   4912 	 */
   4913 	if (nvectors < hw_nqueues + 1) {
   4914 		sc->sc_nqueues = nvectors - 1;
   4915 	} else {
   4916 		sc->sc_nqueues = hw_nqueues;
   4917 	}
   4918 
   4919 	/*
   4920 	 * As queues more then cpus cannot improve scaling, we limit
   4921 	 * the number of queues used actually.
   4922 	 */
   4923 	if (ncpu < sc->sc_nqueues)
   4924 		sc->sc_nqueues = ncpu;
   4925 }
   4926 
   4927 static inline bool
   4928 wm_is_using_msix(struct wm_softc *sc)
   4929 {
   4930 
   4931 	return (sc->sc_nintrs > 1);
   4932 }
   4933 
   4934 static inline bool
   4935 wm_is_using_multiqueue(struct wm_softc *sc)
   4936 {
   4937 
   4938 	return (sc->sc_nqueues > 1);
   4939 }
   4940 
   4941 static int
   4942 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   4943 {
   4944 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   4945 	wmq->wmq_id = qidx;
   4946 	wmq->wmq_intr_idx = intr_idx;
   4947 	wmq->wmq_si = softint_establish(SOFTINT_NET
   4948 #ifdef WM_MPSAFE
   4949 	    | SOFTINT_MPSAFE
   4950 #endif
   4951 	    , wm_handle_queue, wmq);
   4952 	if (wmq->wmq_si != NULL)
   4953 		return 0;
   4954 
   4955 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   4956 	    wmq->wmq_id);
   4957 
   4958 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   4959 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4960 	return ENOMEM;
   4961 }
   4962 
   4963 /*
   4964  * Both single interrupt MSI and INTx can use this function.
   4965  */
   4966 static int
   4967 wm_setup_legacy(struct wm_softc *sc)
   4968 {
   4969 	pci_chipset_tag_t pc = sc->sc_pc;
   4970 	const char *intrstr = NULL;
   4971 	char intrbuf[PCI_INTRSTR_LEN];
   4972 	int error;
   4973 
   4974 	error = wm_alloc_txrx_queues(sc);
   4975 	if (error) {
   4976 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4977 		    error);
   4978 		return ENOMEM;
   4979 	}
   4980 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   4981 	    sizeof(intrbuf));
   4982 #ifdef WM_MPSAFE
   4983 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   4984 #endif
   4985 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   4986 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   4987 	if (sc->sc_ihs[0] == NULL) {
   4988 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   4989 		    (pci_intr_type(pc, sc->sc_intrs[0])
   4990 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   4991 		return ENOMEM;
   4992 	}
   4993 
   4994 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   4995 	sc->sc_nintrs = 1;
   4996 
   4997 	return wm_softint_establish(sc, 0, 0);
   4998 }
   4999 
   5000 static int
   5001 wm_setup_msix(struct wm_softc *sc)
   5002 {
   5003 	void *vih;
   5004 	kcpuset_t *affinity;
   5005 	int qidx, error, intr_idx, txrx_established;
   5006 	pci_chipset_tag_t pc = sc->sc_pc;
   5007 	const char *intrstr = NULL;
   5008 	char intrbuf[PCI_INTRSTR_LEN];
   5009 	char intr_xname[INTRDEVNAMEBUF];
   5010 
   5011 	if (sc->sc_nqueues < ncpu) {
   5012 		/*
   5013 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5014 		 * interrupts start from CPU#1.
   5015 		 */
   5016 		sc->sc_affinity_offset = 1;
   5017 	} else {
   5018 		/*
   5019 		 * In this case, this device use all CPUs. So, we unify
   5020 		 * affinitied cpu_index to msix vector number for readability.
   5021 		 */
   5022 		sc->sc_affinity_offset = 0;
   5023 	}
   5024 
   5025 	error = wm_alloc_txrx_queues(sc);
   5026 	if (error) {
   5027 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5028 		    error);
   5029 		return ENOMEM;
   5030 	}
   5031 
   5032 	kcpuset_create(&affinity, false);
   5033 	intr_idx = 0;
   5034 
   5035 	/*
   5036 	 * TX and RX
   5037 	 */
   5038 	txrx_established = 0;
   5039 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5040 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5041 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5042 
   5043 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5044 		    sizeof(intrbuf));
   5045 #ifdef WM_MPSAFE
   5046 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5047 		    PCI_INTR_MPSAFE, true);
   5048 #endif
   5049 		memset(intr_xname, 0, sizeof(intr_xname));
   5050 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5051 		    device_xname(sc->sc_dev), qidx);
   5052 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5053 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5054 		if (vih == NULL) {
   5055 			aprint_error_dev(sc->sc_dev,
   5056 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5057 			    intrstr ? " at " : "",
   5058 			    intrstr ? intrstr : "");
   5059 
   5060 			goto fail;
   5061 		}
   5062 		kcpuset_zero(affinity);
   5063 		/* Round-robin affinity */
   5064 		kcpuset_set(affinity, affinity_to);
   5065 		error = interrupt_distribute(vih, affinity, NULL);
   5066 		if (error == 0) {
   5067 			aprint_normal_dev(sc->sc_dev,
   5068 			    "for TX and RX interrupting at %s affinity to %u\n",
   5069 			    intrstr, affinity_to);
   5070 		} else {
   5071 			aprint_normal_dev(sc->sc_dev,
   5072 			    "for TX and RX interrupting at %s\n", intrstr);
   5073 		}
   5074 		sc->sc_ihs[intr_idx] = vih;
   5075 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   5076 			goto fail;
   5077 		txrx_established++;
   5078 		intr_idx++;
   5079 	}
   5080 
   5081 	/*
   5082 	 * LINK
   5083 	 */
   5084 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5085 	    sizeof(intrbuf));
   5086 #ifdef WM_MPSAFE
   5087 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5088 #endif
   5089 	memset(intr_xname, 0, sizeof(intr_xname));
   5090 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5091 	    device_xname(sc->sc_dev));
   5092 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5093 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5094 	if (vih == NULL) {
   5095 		aprint_error_dev(sc->sc_dev,
   5096 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5097 		    intrstr ? " at " : "",
   5098 		    intrstr ? intrstr : "");
   5099 
   5100 		goto fail;
   5101 	}
   5102 	/* keep default affinity to LINK interrupt */
   5103 	aprint_normal_dev(sc->sc_dev,
   5104 	    "for LINK interrupting at %s\n", intrstr);
   5105 	sc->sc_ihs[intr_idx] = vih;
   5106 	sc->sc_link_intr_idx = intr_idx;
   5107 
   5108 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5109 	kcpuset_destroy(affinity);
   5110 	return 0;
   5111 
   5112  fail:
   5113 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5114 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5115 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5116 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5117 	}
   5118 
   5119 	kcpuset_destroy(affinity);
   5120 	return ENOMEM;
   5121 }
   5122 
   5123 static void
   5124 wm_turnon(struct wm_softc *sc)
   5125 {
   5126 	int i;
   5127 
   5128 	KASSERT(WM_CORE_LOCKED(sc));
   5129 
   5130 	/*
   5131 	 * must unset stopping flags in ascending order.
   5132 	 */
   5133 	for(i = 0; i < sc->sc_nqueues; i++) {
   5134 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5135 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5136 
   5137 		mutex_enter(txq->txq_lock);
   5138 		txq->txq_stopping = false;
   5139 		mutex_exit(txq->txq_lock);
   5140 
   5141 		mutex_enter(rxq->rxq_lock);
   5142 		rxq->rxq_stopping = false;
   5143 		mutex_exit(rxq->rxq_lock);
   5144 	}
   5145 
   5146 	sc->sc_core_stopping = false;
   5147 }
   5148 
   5149 static void
   5150 wm_turnoff(struct wm_softc *sc)
   5151 {
   5152 	int i;
   5153 
   5154 	KASSERT(WM_CORE_LOCKED(sc));
   5155 
   5156 	sc->sc_core_stopping = true;
   5157 
   5158 	/*
   5159 	 * must set stopping flags in ascending order.
   5160 	 */
   5161 	for(i = 0; i < sc->sc_nqueues; i++) {
   5162 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5163 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5164 
   5165 		mutex_enter(rxq->rxq_lock);
   5166 		rxq->rxq_stopping = true;
   5167 		mutex_exit(rxq->rxq_lock);
   5168 
   5169 		mutex_enter(txq->txq_lock);
   5170 		txq->txq_stopping = true;
   5171 		mutex_exit(txq->txq_lock);
   5172 	}
   5173 }
   5174 
   5175 /*
   5176  * write interrupt interval value to ITR or EITR
   5177  */
   5178 static void
   5179 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5180 {
   5181 
   5182 	if (!wmq->wmq_set_itr)
   5183 		return;
   5184 
   5185 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5186 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5187 
   5188 		/*
   5189 		 * 82575 doesn't have CNT_INGR field.
   5190 		 * So, overwrite counter field by software.
   5191 		 */
   5192 		if (sc->sc_type == WM_T_82575)
   5193 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5194 		else
   5195 			eitr |= EITR_CNT_INGR;
   5196 
   5197 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5198 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5199 		/*
   5200 		 * 82574 has both ITR and EITR. SET EITR when we use
   5201 		 * the multi queue function with MSI-X.
   5202 		 */
   5203 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5204 			    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5205 	} else {
   5206 		KASSERT(wmq->wmq_id == 0);
   5207 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5208 	}
   5209 
   5210 	wmq->wmq_set_itr = false;
   5211 }
   5212 
   5213 /*
   5214  * TODO
   5215  * Below dynamic calculation of itr is almost the same as linux igb,
   5216  * however it does not fit to wm(4). So, we will have been disable AIM
   5217  * until we will find appropriate calculation of itr.
   5218  */
   5219 /*
   5220  * calculate interrupt interval value to be going to write register in
   5221  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5222  */
   5223 static void
   5224 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5225 {
   5226 #ifdef NOTYET
   5227 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5228 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5229 	uint32_t avg_size = 0;
   5230 	uint32_t new_itr;
   5231 
   5232 	if (rxq->rxq_packets)
   5233 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5234 	if (txq->txq_packets)
   5235 		avg_size = max(avg_size, txq->txq_bytes / txq->txq_packets);
   5236 
   5237 	if (avg_size == 0) {
   5238 		new_itr = 450; /* restore default value */
   5239 		goto out;
   5240 	}
   5241 
   5242 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5243 	avg_size += 24;
   5244 
   5245 	/* Don't starve jumbo frames */
   5246 	avg_size = min(avg_size, 3000);
   5247 
   5248 	/* Give a little boost to mid-size frames */
   5249 	if ((avg_size > 300) && (avg_size < 1200))
   5250 		new_itr = avg_size / 3;
   5251 	else
   5252 		new_itr = avg_size / 2;
   5253 
   5254 out:
   5255 	/*
   5256 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5257 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5258 	 */
   5259 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5260 		new_itr *= 4;
   5261 
   5262 	if (new_itr != wmq->wmq_itr) {
   5263 		wmq->wmq_itr = new_itr;
   5264 		wmq->wmq_set_itr = true;
   5265 	} else
   5266 		wmq->wmq_set_itr = false;
   5267 
   5268 	rxq->rxq_packets = 0;
   5269 	rxq->rxq_bytes = 0;
   5270 	txq->txq_packets = 0;
   5271 	txq->txq_bytes = 0;
   5272 #endif
   5273 }
   5274 
   5275 /*
   5276  * wm_init:		[ifnet interface function]
   5277  *
   5278  *	Initialize the interface.
   5279  */
   5280 static int
   5281 wm_init(struct ifnet *ifp)
   5282 {
   5283 	struct wm_softc *sc = ifp->if_softc;
   5284 	int ret;
   5285 
   5286 	WM_CORE_LOCK(sc);
   5287 	ret = wm_init_locked(ifp);
   5288 	WM_CORE_UNLOCK(sc);
   5289 
   5290 	return ret;
   5291 }
   5292 
   5293 static int
   5294 wm_init_locked(struct ifnet *ifp)
   5295 {
   5296 	struct wm_softc *sc = ifp->if_softc;
   5297 	int i, j, trynum, error = 0;
   5298 	uint32_t reg;
   5299 
   5300 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5301 		device_xname(sc->sc_dev), __func__));
   5302 	KASSERT(WM_CORE_LOCKED(sc));
   5303 
   5304 	/*
   5305 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5306 	 * There is a small but measurable benefit to avoiding the adjusment
   5307 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5308 	 * on such platforms.  One possibility is that the DMA itself is
   5309 	 * slightly more efficient if the front of the entire packet (instead
   5310 	 * of the front of the headers) is aligned.
   5311 	 *
   5312 	 * Note we must always set align_tweak to 0 if we are using
   5313 	 * jumbo frames.
   5314 	 */
   5315 #ifdef __NO_STRICT_ALIGNMENT
   5316 	sc->sc_align_tweak = 0;
   5317 #else
   5318 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5319 		sc->sc_align_tweak = 0;
   5320 	else
   5321 		sc->sc_align_tweak = 2;
   5322 #endif /* __NO_STRICT_ALIGNMENT */
   5323 
   5324 	/* Cancel any pending I/O. */
   5325 	wm_stop_locked(ifp, 0);
   5326 
   5327 	/* update statistics before reset */
   5328 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   5329 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   5330 
   5331 	/* PCH_SPT hardware workaround */
   5332 	if (sc->sc_type == WM_T_PCH_SPT)
   5333 		wm_flush_desc_rings(sc);
   5334 
   5335 	/* Reset the chip to a known state. */
   5336 	wm_reset(sc);
   5337 
   5338 	/*
   5339 	 * AMT based hardware can now take control from firmware
   5340 	 * Do this after reset.
   5341 	 */
   5342 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5343 		wm_get_hw_control(sc);
   5344 
   5345 	if ((sc->sc_type == WM_T_PCH_SPT) &&
   5346 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5347 		wm_legacy_irq_quirk_spt(sc);
   5348 
   5349 	/* Init hardware bits */
   5350 	wm_initialize_hardware_bits(sc);
   5351 
   5352 	/* Reset the PHY. */
   5353 	if (sc->sc_flags & WM_F_HAS_MII)
   5354 		wm_gmii_reset(sc);
   5355 
   5356 	/* Calculate (E)ITR value */
   5357 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5358 		/*
   5359 		 * For NEWQUEUE's EITR (except for 82575).
   5360 		 * 82575's EITR should be set same throttling value as other
   5361 		 * old controllers' ITR because the interrupt/sec calculation
   5362 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5363 		 *
   5364 		 * 82574's EITR should be set same throttling value as ITR.
   5365 		 *
   5366 		 * For N interrupts/sec, set this value to:
   5367 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5368 		 */
   5369 		sc->sc_itr_init = 450;
   5370 	} else if (sc->sc_type >= WM_T_82543) {
   5371 		/*
   5372 		 * Set up the interrupt throttling register (units of 256ns)
   5373 		 * Note that a footnote in Intel's documentation says this
   5374 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5375 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5376 		 * that that is also true for the 1024ns units of the other
   5377 		 * interrupt-related timer registers -- so, really, we ought
   5378 		 * to divide this value by 4 when the link speed is low.
   5379 		 *
   5380 		 * XXX implement this division at link speed change!
   5381 		 */
   5382 
   5383 		/*
   5384 		 * For N interrupts/sec, set this value to:
   5385 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5386 		 * absolute and packet timer values to this value
   5387 		 * divided by 4 to get "simple timer" behavior.
   5388 		 */
   5389 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5390 	}
   5391 
   5392 	error = wm_init_txrx_queues(sc);
   5393 	if (error)
   5394 		goto out;
   5395 
   5396 	/*
   5397 	 * Clear out the VLAN table -- we don't use it (yet).
   5398 	 */
   5399 	CSR_WRITE(sc, WMREG_VET, 0);
   5400 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5401 		trynum = 10; /* Due to hw errata */
   5402 	else
   5403 		trynum = 1;
   5404 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5405 		for (j = 0; j < trynum; j++)
   5406 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5407 
   5408 	/*
   5409 	 * Set up flow-control parameters.
   5410 	 *
   5411 	 * XXX Values could probably stand some tuning.
   5412 	 */
   5413 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5414 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5415 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5416 	    && (sc->sc_type != WM_T_PCH_SPT)) {
   5417 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5418 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5419 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5420 	}
   5421 
   5422 	sc->sc_fcrtl = FCRTL_DFLT;
   5423 	if (sc->sc_type < WM_T_82543) {
   5424 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5425 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5426 	} else {
   5427 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5428 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5429 	}
   5430 
   5431 	if (sc->sc_type == WM_T_80003)
   5432 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5433 	else
   5434 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5435 
   5436 	/* Writes the control register. */
   5437 	wm_set_vlan(sc);
   5438 
   5439 	if (sc->sc_flags & WM_F_HAS_MII) {
   5440 		uint16_t kmreg;
   5441 
   5442 		switch (sc->sc_type) {
   5443 		case WM_T_80003:
   5444 		case WM_T_ICH8:
   5445 		case WM_T_ICH9:
   5446 		case WM_T_ICH10:
   5447 		case WM_T_PCH:
   5448 		case WM_T_PCH2:
   5449 		case WM_T_PCH_LPT:
   5450 		case WM_T_PCH_SPT:
   5451 			/*
   5452 			 * Set the mac to wait the maximum time between each
   5453 			 * iteration and increase the max iterations when
   5454 			 * polling the phy; this fixes erroneous timeouts at
   5455 			 * 10Mbps.
   5456 			 */
   5457 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5458 			    0xFFFF);
   5459 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5460 			    &kmreg);
   5461 			kmreg |= 0x3F;
   5462 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5463 			    kmreg);
   5464 			break;
   5465 		default:
   5466 			break;
   5467 		}
   5468 
   5469 		if (sc->sc_type == WM_T_80003) {
   5470 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5471 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   5472 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5473 
   5474 			/* Bypass RX and TX FIFO's */
   5475 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5476 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5477 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5478 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5479 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5480 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5481 		}
   5482 	}
   5483 #if 0
   5484 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5485 #endif
   5486 
   5487 	/* Set up checksum offload parameters. */
   5488 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5489 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5490 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5491 		reg |= RXCSUM_IPOFL;
   5492 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5493 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5494 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5495 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5496 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5497 
   5498 	/* Set registers about MSI-X */
   5499 	if (wm_is_using_msix(sc)) {
   5500 		uint32_t ivar;
   5501 		struct wm_queue *wmq;
   5502 		int qid, qintr_idx;
   5503 
   5504 		if (sc->sc_type == WM_T_82575) {
   5505 			/* Interrupt control */
   5506 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5507 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5508 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5509 
   5510 			/* TX and RX */
   5511 			for (i = 0; i < sc->sc_nqueues; i++) {
   5512 				wmq = &sc->sc_queue[i];
   5513 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5514 				    EITR_TX_QUEUE(wmq->wmq_id)
   5515 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5516 			}
   5517 			/* Link status */
   5518 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5519 			    EITR_OTHER);
   5520 		} else if (sc->sc_type == WM_T_82574) {
   5521 			/* Interrupt control */
   5522 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5523 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5524 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5525 
   5526 			/*
   5527 			 * workaround issue with spurious interrupts
   5528 			 * in MSI-X mode.
   5529 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5530 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5531 			 */
   5532 			reg = CSR_READ(sc, WMREG_RFCTL);
   5533 			reg |= WMREG_RFCTL_ACKDIS;
   5534 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5535 
   5536 			ivar = 0;
   5537 			/* TX and RX */
   5538 			for (i = 0; i < sc->sc_nqueues; i++) {
   5539 				wmq = &sc->sc_queue[i];
   5540 				qid = wmq->wmq_id;
   5541 				qintr_idx = wmq->wmq_intr_idx;
   5542 
   5543 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5544 				    IVAR_TX_MASK_Q_82574(qid));
   5545 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5546 				    IVAR_RX_MASK_Q_82574(qid));
   5547 			}
   5548 			/* Link status */
   5549 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5550 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5551 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5552 		} else {
   5553 			/* Interrupt control */
   5554 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5555 			    | GPIE_EIAME | GPIE_PBA);
   5556 
   5557 			switch (sc->sc_type) {
   5558 			case WM_T_82580:
   5559 			case WM_T_I350:
   5560 			case WM_T_I354:
   5561 			case WM_T_I210:
   5562 			case WM_T_I211:
   5563 				/* TX and RX */
   5564 				for (i = 0; i < sc->sc_nqueues; i++) {
   5565 					wmq = &sc->sc_queue[i];
   5566 					qid = wmq->wmq_id;
   5567 					qintr_idx = wmq->wmq_intr_idx;
   5568 
   5569 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5570 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5571 					ivar |= __SHIFTIN((qintr_idx
   5572 						| IVAR_VALID),
   5573 					    IVAR_TX_MASK_Q(qid));
   5574 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5575 					ivar |= __SHIFTIN((qintr_idx
   5576 						| IVAR_VALID),
   5577 					    IVAR_RX_MASK_Q(qid));
   5578 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5579 				}
   5580 				break;
   5581 			case WM_T_82576:
   5582 				/* TX and RX */
   5583 				for (i = 0; i < sc->sc_nqueues; i++) {
   5584 					wmq = &sc->sc_queue[i];
   5585 					qid = wmq->wmq_id;
   5586 					qintr_idx = wmq->wmq_intr_idx;
   5587 
   5588 					ivar = CSR_READ(sc,
   5589 					    WMREG_IVAR_Q_82576(qid));
   5590 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   5591 					ivar |= __SHIFTIN((qintr_idx
   5592 						| IVAR_VALID),
   5593 					    IVAR_TX_MASK_Q_82576(qid));
   5594 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   5595 					ivar |= __SHIFTIN((qintr_idx
   5596 						| IVAR_VALID),
   5597 					    IVAR_RX_MASK_Q_82576(qid));
   5598 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   5599 					    ivar);
   5600 				}
   5601 				break;
   5602 			default:
   5603 				break;
   5604 			}
   5605 
   5606 			/* Link status */
   5607 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   5608 			    IVAR_MISC_OTHER);
   5609 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   5610 		}
   5611 
   5612 		if (wm_is_using_multiqueue(sc)) {
   5613 			wm_init_rss(sc);
   5614 
   5615 			/*
   5616 			** NOTE: Receive Full-Packet Checksum Offload
   5617 			** is mutually exclusive with Multiqueue. However
   5618 			** this is not the same as TCP/IP checksums which
   5619 			** still work.
   5620 			*/
   5621 			reg = CSR_READ(sc, WMREG_RXCSUM);
   5622 			reg |= RXCSUM_PCSD;
   5623 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5624 		}
   5625 	}
   5626 
   5627 	/* Set up the interrupt registers. */
   5628 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5629 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   5630 	    ICR_RXO | ICR_RXT0;
   5631 	if (wm_is_using_msix(sc)) {
   5632 		uint32_t mask;
   5633 		struct wm_queue *wmq;
   5634 
   5635 		switch (sc->sc_type) {
   5636 		case WM_T_82574:
   5637 			mask = 0;
   5638 			for (i = 0; i < sc->sc_nqueues; i++) {
   5639 				wmq = &sc->sc_queue[i];
   5640 				mask |= ICR_TXQ(wmq->wmq_id);
   5641 				mask |= ICR_RXQ(wmq->wmq_id);
   5642 			}
   5643 			mask |= ICR_OTHER;
   5644 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   5645 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   5646 			break;
   5647 		default:
   5648 			if (sc->sc_type == WM_T_82575) {
   5649 				mask = 0;
   5650 				for (i = 0; i < sc->sc_nqueues; i++) {
   5651 					wmq = &sc->sc_queue[i];
   5652 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   5653 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   5654 				}
   5655 				mask |= EITR_OTHER;
   5656 			} else {
   5657 				mask = 0;
   5658 				for (i = 0; i < sc->sc_nqueues; i++) {
   5659 					wmq = &sc->sc_queue[i];
   5660 					mask |= 1 << wmq->wmq_intr_idx;
   5661 				}
   5662 				mask |= 1 << sc->sc_link_intr_idx;
   5663 			}
   5664 			CSR_WRITE(sc, WMREG_EIAC, mask);
   5665 			CSR_WRITE(sc, WMREG_EIAM, mask);
   5666 			CSR_WRITE(sc, WMREG_EIMS, mask);
   5667 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   5668 			break;
   5669 		}
   5670 	} else
   5671 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5672 
   5673 	/* Set up the inter-packet gap. */
   5674 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   5675 
   5676 	if (sc->sc_type >= WM_T_82543) {
   5677 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5678 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   5679 			wm_itrs_writereg(sc, wmq);
   5680 		}
   5681 		/*
   5682 		 * Link interrupts occur much less than TX
   5683 		 * interrupts and RX interrupts. So, we don't
   5684 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   5685 		 * FreeBSD's if_igb.
   5686 		 */
   5687 	}
   5688 
   5689 	/* Set the VLAN ethernetype. */
   5690 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   5691 
   5692 	/*
   5693 	 * Set up the transmit control register; we start out with
   5694 	 * a collision distance suitable for FDX, but update it whe
   5695 	 * we resolve the media type.
   5696 	 */
   5697 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   5698 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   5699 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5700 	if (sc->sc_type >= WM_T_82571)
   5701 		sc->sc_tctl |= TCTL_MULR;
   5702 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5703 
   5704 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5705 		/* Write TDT after TCTL.EN is set. See the document. */
   5706 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   5707 	}
   5708 
   5709 	if (sc->sc_type == WM_T_80003) {
   5710 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   5711 		reg &= ~TCTL_EXT_GCEX_MASK;
   5712 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   5713 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   5714 	}
   5715 
   5716 	/* Set the media. */
   5717 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   5718 		goto out;
   5719 
   5720 	/* Configure for OS presence */
   5721 	wm_init_manageability(sc);
   5722 
   5723 	/*
   5724 	 * Set up the receive control register; we actually program
   5725 	 * the register when we set the receive filter.  Use multicast
   5726 	 * address offset type 0.
   5727 	 *
   5728 	 * Only the i82544 has the ability to strip the incoming
   5729 	 * CRC, so we don't enable that feature.
   5730 	 */
   5731 	sc->sc_mchash_type = 0;
   5732 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   5733 	    | RCTL_MO(sc->sc_mchash_type);
   5734 
   5735 	/*
   5736 	 * 82574 use one buffer extended Rx descriptor.
   5737 	 */
   5738 	if (sc->sc_type == WM_T_82574)
   5739 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   5740 
   5741 	/*
   5742 	 * The I350 has a bug where it always strips the CRC whether
   5743 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   5744 	 */
   5745 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5746 	    || (sc->sc_type == WM_T_I210))
   5747 		sc->sc_rctl |= RCTL_SECRC;
   5748 
   5749 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5750 	    && (ifp->if_mtu > ETHERMTU)) {
   5751 		sc->sc_rctl |= RCTL_LPE;
   5752 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5753 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5754 	}
   5755 
   5756 	if (MCLBYTES == 2048) {
   5757 		sc->sc_rctl |= RCTL_2k;
   5758 	} else {
   5759 		if (sc->sc_type >= WM_T_82543) {
   5760 			switch (MCLBYTES) {
   5761 			case 4096:
   5762 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5763 				break;
   5764 			case 8192:
   5765 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5766 				break;
   5767 			case 16384:
   5768 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5769 				break;
   5770 			default:
   5771 				panic("wm_init: MCLBYTES %d unsupported",
   5772 				    MCLBYTES);
   5773 				break;
   5774 			}
   5775 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   5776 	}
   5777 
   5778 	/* Enable ECC */
   5779 	switch (sc->sc_type) {
   5780 	case WM_T_82571:
   5781 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5782 		reg |= PBA_ECC_CORR_EN;
   5783 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5784 		break;
   5785 	case WM_T_PCH_LPT:
   5786 	case WM_T_PCH_SPT:
   5787 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5788 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5789 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5790 
   5791 		sc->sc_ctrl |= CTRL_MEHE;
   5792 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5793 		break;
   5794 	default:
   5795 		break;
   5796 	}
   5797 
   5798 	/* On 575 and later set RDT only if RX enabled */
   5799 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5800 		int qidx;
   5801 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5802 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5803 			for (i = 0; i < WM_NRXDESC; i++) {
   5804 				mutex_enter(rxq->rxq_lock);
   5805 				wm_init_rxdesc(rxq, i);
   5806 				mutex_exit(rxq->rxq_lock);
   5807 
   5808 			}
   5809 		}
   5810 	}
   5811 
   5812 	/* Set the receive filter. */
   5813 	wm_set_filter(sc);
   5814 
   5815 	wm_turnon(sc);
   5816 
   5817 	/* Start the one second link check clock. */
   5818 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5819 
   5820 	/* ...all done! */
   5821 	ifp->if_flags |= IFF_RUNNING;
   5822 	ifp->if_flags &= ~IFF_OACTIVE;
   5823 
   5824  out:
   5825 	sc->sc_if_flags = ifp->if_flags;
   5826 	if (error)
   5827 		log(LOG_ERR, "%s: interface not running\n",
   5828 		    device_xname(sc->sc_dev));
   5829 	return error;
   5830 }
   5831 
   5832 /*
   5833  * wm_stop:		[ifnet interface function]
   5834  *
   5835  *	Stop transmission on the interface.
   5836  */
   5837 static void
   5838 wm_stop(struct ifnet *ifp, int disable)
   5839 {
   5840 	struct wm_softc *sc = ifp->if_softc;
   5841 
   5842 	WM_CORE_LOCK(sc);
   5843 	wm_stop_locked(ifp, disable);
   5844 	WM_CORE_UNLOCK(sc);
   5845 }
   5846 
   5847 static void
   5848 wm_stop_locked(struct ifnet *ifp, int disable)
   5849 {
   5850 	struct wm_softc *sc = ifp->if_softc;
   5851 	struct wm_txsoft *txs;
   5852 	int i, qidx;
   5853 
   5854 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5855 		device_xname(sc->sc_dev), __func__));
   5856 	KASSERT(WM_CORE_LOCKED(sc));
   5857 
   5858 	wm_turnoff(sc);
   5859 
   5860 	/* Stop the one second clock. */
   5861 	callout_stop(&sc->sc_tick_ch);
   5862 
   5863 	/* Stop the 82547 Tx FIFO stall check timer. */
   5864 	if (sc->sc_type == WM_T_82547)
   5865 		callout_stop(&sc->sc_txfifo_ch);
   5866 
   5867 	if (sc->sc_flags & WM_F_HAS_MII) {
   5868 		/* Down the MII. */
   5869 		mii_down(&sc->sc_mii);
   5870 	} else {
   5871 #if 0
   5872 		/* Should we clear PHY's status properly? */
   5873 		wm_reset(sc);
   5874 #endif
   5875 	}
   5876 
   5877 	/* Stop the transmit and receive processes. */
   5878 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5879 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5880 	sc->sc_rctl &= ~RCTL_EN;
   5881 
   5882 	/*
   5883 	 * Clear the interrupt mask to ensure the device cannot assert its
   5884 	 * interrupt line.
   5885 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5886 	 * service any currently pending or shared interrupt.
   5887 	 */
   5888 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5889 	sc->sc_icr = 0;
   5890 	if (wm_is_using_msix(sc)) {
   5891 		if (sc->sc_type != WM_T_82574) {
   5892 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5893 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5894 		} else
   5895 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5896 	}
   5897 
   5898 	/* Release any queued transmit buffers. */
   5899 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5900 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5901 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5902 		mutex_enter(txq->txq_lock);
   5903 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5904 			txs = &txq->txq_soft[i];
   5905 			if (txs->txs_mbuf != NULL) {
   5906 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   5907 				m_freem(txs->txs_mbuf);
   5908 				txs->txs_mbuf = NULL;
   5909 			}
   5910 		}
   5911 		mutex_exit(txq->txq_lock);
   5912 	}
   5913 
   5914 	/* Mark the interface as down and cancel the watchdog timer. */
   5915 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5916 	ifp->if_timer = 0;
   5917 
   5918 	if (disable) {
   5919 		for (i = 0; i < sc->sc_nqueues; i++) {
   5920 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5921 			mutex_enter(rxq->rxq_lock);
   5922 			wm_rxdrain(rxq);
   5923 			mutex_exit(rxq->rxq_lock);
   5924 		}
   5925 	}
   5926 
   5927 #if 0 /* notyet */
   5928 	if (sc->sc_type >= WM_T_82544)
   5929 		CSR_WRITE(sc, WMREG_WUC, 0);
   5930 #endif
   5931 }
   5932 
   5933 static void
   5934 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   5935 {
   5936 	struct mbuf *m;
   5937 	int i;
   5938 
   5939 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   5940 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   5941 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   5942 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   5943 		    m->m_data, m->m_len, m->m_flags);
   5944 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   5945 	    i, i == 1 ? "" : "s");
   5946 }
   5947 
   5948 /*
   5949  * wm_82547_txfifo_stall:
   5950  *
   5951  *	Callout used to wait for the 82547 Tx FIFO to drain,
   5952  *	reset the FIFO pointers, and restart packet transmission.
   5953  */
   5954 static void
   5955 wm_82547_txfifo_stall(void *arg)
   5956 {
   5957 	struct wm_softc *sc = arg;
   5958 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5959 
   5960 	mutex_enter(txq->txq_lock);
   5961 
   5962 	if (txq->txq_stopping)
   5963 		goto out;
   5964 
   5965 	if (txq->txq_fifo_stall) {
   5966 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   5967 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   5968 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   5969 			/*
   5970 			 * Packets have drained.  Stop transmitter, reset
   5971 			 * FIFO pointers, restart transmitter, and kick
   5972 			 * the packet queue.
   5973 			 */
   5974 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   5975 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   5976 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   5977 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   5978 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   5979 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   5980 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   5981 			CSR_WRITE_FLUSH(sc);
   5982 
   5983 			txq->txq_fifo_head = 0;
   5984 			txq->txq_fifo_stall = 0;
   5985 			wm_start_locked(&sc->sc_ethercom.ec_if);
   5986 		} else {
   5987 			/*
   5988 			 * Still waiting for packets to drain; try again in
   5989 			 * another tick.
   5990 			 */
   5991 			callout_schedule(&sc->sc_txfifo_ch, 1);
   5992 		}
   5993 	}
   5994 
   5995 out:
   5996 	mutex_exit(txq->txq_lock);
   5997 }
   5998 
   5999 /*
   6000  * wm_82547_txfifo_bugchk:
   6001  *
   6002  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6003  *	prevent enqueueing a packet that would wrap around the end
   6004  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6005  *
   6006  *	We do this by checking the amount of space before the end
   6007  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   6008  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6009  *	the internal FIFO pointers to the beginning, and restart
   6010  *	transmission on the interface.
   6011  */
   6012 #define	WM_FIFO_HDR		0x10
   6013 #define	WM_82547_PAD_LEN	0x3e0
   6014 static int
   6015 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6016 {
   6017 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6018 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6019 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6020 
   6021 	/* Just return if already stalled. */
   6022 	if (txq->txq_fifo_stall)
   6023 		return 1;
   6024 
   6025 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6026 		/* Stall only occurs in half-duplex mode. */
   6027 		goto send_packet;
   6028 	}
   6029 
   6030 	if (len >= WM_82547_PAD_LEN + space) {
   6031 		txq->txq_fifo_stall = 1;
   6032 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6033 		return 1;
   6034 	}
   6035 
   6036  send_packet:
   6037 	txq->txq_fifo_head += len;
   6038 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6039 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6040 
   6041 	return 0;
   6042 }
   6043 
   6044 static int
   6045 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6046 {
   6047 	int error;
   6048 
   6049 	/*
   6050 	 * Allocate the control data structures, and create and load the
   6051 	 * DMA map for it.
   6052 	 *
   6053 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6054 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6055 	 * both sets within the same 4G segment.
   6056 	 */
   6057 	if (sc->sc_type < WM_T_82544)
   6058 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6059 	else
   6060 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6061 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6062 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6063 	else
   6064 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6065 
   6066 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6067 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6068 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6069 		aprint_error_dev(sc->sc_dev,
   6070 		    "unable to allocate TX control data, error = %d\n",
   6071 		    error);
   6072 		goto fail_0;
   6073 	}
   6074 
   6075 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6076 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6077 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6078 		aprint_error_dev(sc->sc_dev,
   6079 		    "unable to map TX control data, error = %d\n", error);
   6080 		goto fail_1;
   6081 	}
   6082 
   6083 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6084 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6085 		aprint_error_dev(sc->sc_dev,
   6086 		    "unable to create TX control data DMA map, error = %d\n",
   6087 		    error);
   6088 		goto fail_2;
   6089 	}
   6090 
   6091 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6092 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6093 		aprint_error_dev(sc->sc_dev,
   6094 		    "unable to load TX control data DMA map, error = %d\n",
   6095 		    error);
   6096 		goto fail_3;
   6097 	}
   6098 
   6099 	return 0;
   6100 
   6101  fail_3:
   6102 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6103  fail_2:
   6104 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6105 	    WM_TXDESCS_SIZE(txq));
   6106  fail_1:
   6107 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6108  fail_0:
   6109 	return error;
   6110 }
   6111 
   6112 static void
   6113 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6114 {
   6115 
   6116 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6117 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6118 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6119 	    WM_TXDESCS_SIZE(txq));
   6120 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6121 }
   6122 
   6123 static int
   6124 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6125 {
   6126 	int error;
   6127 	size_t rxq_descs_size;
   6128 
   6129 	/*
   6130 	 * Allocate the control data structures, and create and load the
   6131 	 * DMA map for it.
   6132 	 *
   6133 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6134 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6135 	 * both sets within the same 4G segment.
   6136 	 */
   6137 	rxq->rxq_ndesc = WM_NRXDESC;
   6138 	if (sc->sc_type == WM_T_82574)
   6139 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6140 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6141 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6142 	else
   6143 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6144 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6145 
   6146 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6147 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6148 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6149 		aprint_error_dev(sc->sc_dev,
   6150 		    "unable to allocate RX control data, error = %d\n",
   6151 		    error);
   6152 		goto fail_0;
   6153 	}
   6154 
   6155 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6156 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6157 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6158 		aprint_error_dev(sc->sc_dev,
   6159 		    "unable to map RX control data, error = %d\n", error);
   6160 		goto fail_1;
   6161 	}
   6162 
   6163 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6164 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6165 		aprint_error_dev(sc->sc_dev,
   6166 		    "unable to create RX control data DMA map, error = %d\n",
   6167 		    error);
   6168 		goto fail_2;
   6169 	}
   6170 
   6171 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6172 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6173 		aprint_error_dev(sc->sc_dev,
   6174 		    "unable to load RX control data DMA map, error = %d\n",
   6175 		    error);
   6176 		goto fail_3;
   6177 	}
   6178 
   6179 	return 0;
   6180 
   6181  fail_3:
   6182 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6183  fail_2:
   6184 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6185 	    rxq_descs_size);
   6186  fail_1:
   6187 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6188  fail_0:
   6189 	return error;
   6190 }
   6191 
   6192 static void
   6193 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6194 {
   6195 
   6196 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6197 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6198 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6199 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6200 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6201 }
   6202 
   6203 
   6204 static int
   6205 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6206 {
   6207 	int i, error;
   6208 
   6209 	/* Create the transmit buffer DMA maps. */
   6210 	WM_TXQUEUELEN(txq) =
   6211 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6212 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6213 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6214 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6215 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6216 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6217 			aprint_error_dev(sc->sc_dev,
   6218 			    "unable to create Tx DMA map %d, error = %d\n",
   6219 			    i, error);
   6220 			goto fail;
   6221 		}
   6222 	}
   6223 
   6224 	return 0;
   6225 
   6226  fail:
   6227 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6228 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6229 			bus_dmamap_destroy(sc->sc_dmat,
   6230 			    txq->txq_soft[i].txs_dmamap);
   6231 	}
   6232 	return error;
   6233 }
   6234 
   6235 static void
   6236 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6237 {
   6238 	int i;
   6239 
   6240 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6241 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6242 			bus_dmamap_destroy(sc->sc_dmat,
   6243 			    txq->txq_soft[i].txs_dmamap);
   6244 	}
   6245 }
   6246 
   6247 static int
   6248 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6249 {
   6250 	int i, error;
   6251 
   6252 	/* Create the receive buffer DMA maps. */
   6253 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6254 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6255 			    MCLBYTES, 0, 0,
   6256 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6257 			aprint_error_dev(sc->sc_dev,
   6258 			    "unable to create Rx DMA map %d error = %d\n",
   6259 			    i, error);
   6260 			goto fail;
   6261 		}
   6262 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6263 	}
   6264 
   6265 	return 0;
   6266 
   6267  fail:
   6268 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6269 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6270 			bus_dmamap_destroy(sc->sc_dmat,
   6271 			    rxq->rxq_soft[i].rxs_dmamap);
   6272 	}
   6273 	return error;
   6274 }
   6275 
   6276 static void
   6277 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6278 {
   6279 	int i;
   6280 
   6281 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6282 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6283 			bus_dmamap_destroy(sc->sc_dmat,
   6284 			    rxq->rxq_soft[i].rxs_dmamap);
   6285 	}
   6286 }
   6287 
   6288 /*
   6289  * wm_alloc_quques:
   6290  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6291  */
   6292 static int
   6293 wm_alloc_txrx_queues(struct wm_softc *sc)
   6294 {
   6295 	int i, error, tx_done, rx_done;
   6296 
   6297 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6298 	    KM_SLEEP);
   6299 	if (sc->sc_queue == NULL) {
   6300 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6301 		error = ENOMEM;
   6302 		goto fail_0;
   6303 	}
   6304 
   6305 	/*
   6306 	 * For transmission
   6307 	 */
   6308 	error = 0;
   6309 	tx_done = 0;
   6310 	for (i = 0; i < sc->sc_nqueues; i++) {
   6311 #ifdef WM_EVENT_COUNTERS
   6312 		int j;
   6313 		const char *xname;
   6314 #endif
   6315 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6316 		txq->txq_sc = sc;
   6317 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6318 
   6319 		error = wm_alloc_tx_descs(sc, txq);
   6320 		if (error)
   6321 			break;
   6322 		error = wm_alloc_tx_buffer(sc, txq);
   6323 		if (error) {
   6324 			wm_free_tx_descs(sc, txq);
   6325 			break;
   6326 		}
   6327 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6328 		if (txq->txq_interq == NULL) {
   6329 			wm_free_tx_descs(sc, txq);
   6330 			wm_free_tx_buffer(sc, txq);
   6331 			error = ENOMEM;
   6332 			break;
   6333 		}
   6334 
   6335 #ifdef WM_EVENT_COUNTERS
   6336 		xname = device_xname(sc->sc_dev);
   6337 
   6338 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6339 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6340 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
   6341 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6342 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6343 
   6344 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
   6345 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
   6346 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
   6347 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
   6348 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
   6349 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
   6350 
   6351 		for (j = 0; j < WM_NTXSEGS; j++) {
   6352 			snprintf(txq->txq_txseg_evcnt_names[j],
   6353 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6354 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6355 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6356 		}
   6357 
   6358 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
   6359 
   6360 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
   6361 #endif /* WM_EVENT_COUNTERS */
   6362 
   6363 		tx_done++;
   6364 	}
   6365 	if (error)
   6366 		goto fail_1;
   6367 
   6368 	/*
   6369 	 * For recieve
   6370 	 */
   6371 	error = 0;
   6372 	rx_done = 0;
   6373 	for (i = 0; i < sc->sc_nqueues; i++) {
   6374 #ifdef WM_EVENT_COUNTERS
   6375 		const char *xname;
   6376 #endif
   6377 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6378 		rxq->rxq_sc = sc;
   6379 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6380 
   6381 		error = wm_alloc_rx_descs(sc, rxq);
   6382 		if (error)
   6383 			break;
   6384 
   6385 		error = wm_alloc_rx_buffer(sc, rxq);
   6386 		if (error) {
   6387 			wm_free_rx_descs(sc, rxq);
   6388 			break;
   6389 		}
   6390 
   6391 #ifdef WM_EVENT_COUNTERS
   6392 		xname = device_xname(sc->sc_dev);
   6393 
   6394 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
   6395 
   6396 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
   6397 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
   6398 #endif /* WM_EVENT_COUNTERS */
   6399 
   6400 		rx_done++;
   6401 	}
   6402 	if (error)
   6403 		goto fail_2;
   6404 
   6405 	return 0;
   6406 
   6407  fail_2:
   6408 	for (i = 0; i < rx_done; i++) {
   6409 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6410 		wm_free_rx_buffer(sc, rxq);
   6411 		wm_free_rx_descs(sc, rxq);
   6412 		if (rxq->rxq_lock)
   6413 			mutex_obj_free(rxq->rxq_lock);
   6414 	}
   6415  fail_1:
   6416 	for (i = 0; i < tx_done; i++) {
   6417 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6418 		pcq_destroy(txq->txq_interq);
   6419 		wm_free_tx_buffer(sc, txq);
   6420 		wm_free_tx_descs(sc, txq);
   6421 		if (txq->txq_lock)
   6422 			mutex_obj_free(txq->txq_lock);
   6423 	}
   6424 
   6425 	kmem_free(sc->sc_queue,
   6426 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6427  fail_0:
   6428 	return error;
   6429 }
   6430 
   6431 /*
   6432  * wm_free_quques:
   6433  *	Free {tx,rx}descs and {tx,rx} buffers
   6434  */
   6435 static void
   6436 wm_free_txrx_queues(struct wm_softc *sc)
   6437 {
   6438 	int i;
   6439 
   6440 	for (i = 0; i < sc->sc_nqueues; i++) {
   6441 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6442 
   6443 #ifdef WM_EVENT_COUNTERS
   6444 		WM_Q_EVCNT_DETACH(rxq, rxintr, rxq, i);
   6445 		WM_Q_EVCNT_DETACH(rxq, rxipsum, rxq, i);
   6446 		WM_Q_EVCNT_DETACH(rxq, rxtusum, rxq, i);
   6447 #endif /* WM_EVENT_COUNTERS */
   6448 
   6449 		wm_free_rx_buffer(sc, rxq);
   6450 		wm_free_rx_descs(sc, rxq);
   6451 		if (rxq->rxq_lock)
   6452 			mutex_obj_free(rxq->rxq_lock);
   6453 	}
   6454 
   6455 	for (i = 0; i < sc->sc_nqueues; i++) {
   6456 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6457 		struct mbuf *m;
   6458 #ifdef WM_EVENT_COUNTERS
   6459 		int j;
   6460 
   6461 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6462 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6463 		WM_Q_EVCNT_DETACH(txq, txfifo_stall, txq, i);
   6464 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6465 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6466 		WM_Q_EVCNT_DETACH(txq, txipsum, txq, i);
   6467 		WM_Q_EVCNT_DETACH(txq, txtusum, txq, i);
   6468 		WM_Q_EVCNT_DETACH(txq, txtusum6, txq, i);
   6469 		WM_Q_EVCNT_DETACH(txq, txtso, txq, i);
   6470 		WM_Q_EVCNT_DETACH(txq, txtso6, txq, i);
   6471 		WM_Q_EVCNT_DETACH(txq, txtsopain, txq, i);
   6472 
   6473 		for (j = 0; j < WM_NTXSEGS; j++)
   6474 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6475 
   6476 		WM_Q_EVCNT_DETACH(txq, txdrop, txq, i);
   6477 		WM_Q_EVCNT_DETACH(txq, tu, txq, i);
   6478 #endif /* WM_EVENT_COUNTERS */
   6479 
   6480 		/* drain txq_interq */
   6481 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6482 			m_freem(m);
   6483 		pcq_destroy(txq->txq_interq);
   6484 
   6485 		wm_free_tx_buffer(sc, txq);
   6486 		wm_free_tx_descs(sc, txq);
   6487 		if (txq->txq_lock)
   6488 			mutex_obj_free(txq->txq_lock);
   6489 	}
   6490 
   6491 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6492 }
   6493 
   6494 static void
   6495 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6496 {
   6497 
   6498 	KASSERT(mutex_owned(txq->txq_lock));
   6499 
   6500 	/* Initialize the transmit descriptor ring. */
   6501 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6502 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6503 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6504 	txq->txq_free = WM_NTXDESC(txq);
   6505 	txq->txq_next = 0;
   6506 }
   6507 
   6508 static void
   6509 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6510     struct wm_txqueue *txq)
   6511 {
   6512 
   6513 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6514 		device_xname(sc->sc_dev), __func__));
   6515 	KASSERT(mutex_owned(txq->txq_lock));
   6516 
   6517 	if (sc->sc_type < WM_T_82543) {
   6518 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6519 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6520 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6521 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6522 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6523 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6524 	} else {
   6525 		int qid = wmq->wmq_id;
   6526 
   6527 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6528 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6529 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6530 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6531 
   6532 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6533 			/*
   6534 			 * Don't write TDT before TCTL.EN is set.
   6535 			 * See the document.
   6536 			 */
   6537 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6538 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6539 			    | TXDCTL_WTHRESH(0));
   6540 		else {
   6541 			/* XXX should update with AIM? */
   6542 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6543 			if (sc->sc_type >= WM_T_82540) {
   6544 				/* should be same */
   6545 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6546 			}
   6547 
   6548 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6549 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6550 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6551 		}
   6552 	}
   6553 }
   6554 
   6555 static void
   6556 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6557 {
   6558 	int i;
   6559 
   6560 	KASSERT(mutex_owned(txq->txq_lock));
   6561 
   6562 	/* Initialize the transmit job descriptors. */
   6563 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6564 		txq->txq_soft[i].txs_mbuf = NULL;
   6565 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6566 	txq->txq_snext = 0;
   6567 	txq->txq_sdirty = 0;
   6568 }
   6569 
   6570 static void
   6571 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6572     struct wm_txqueue *txq)
   6573 {
   6574 
   6575 	KASSERT(mutex_owned(txq->txq_lock));
   6576 
   6577 	/*
   6578 	 * Set up some register offsets that are different between
   6579 	 * the i82542 and the i82543 and later chips.
   6580 	 */
   6581 	if (sc->sc_type < WM_T_82543)
   6582 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   6583 	else
   6584 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   6585 
   6586 	wm_init_tx_descs(sc, txq);
   6587 	wm_init_tx_regs(sc, wmq, txq);
   6588 	wm_init_tx_buffer(sc, txq);
   6589 }
   6590 
   6591 static void
   6592 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6593     struct wm_rxqueue *rxq)
   6594 {
   6595 
   6596 	KASSERT(mutex_owned(rxq->rxq_lock));
   6597 
   6598 	/*
   6599 	 * Initialize the receive descriptor and receive job
   6600 	 * descriptor rings.
   6601 	 */
   6602 	if (sc->sc_type < WM_T_82543) {
   6603 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   6604 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   6605 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   6606 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6607 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   6608 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   6609 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   6610 
   6611 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   6612 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   6613 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   6614 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   6615 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   6616 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   6617 	} else {
   6618 		int qid = wmq->wmq_id;
   6619 
   6620 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   6621 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   6622 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_descsize * rxq->rxq_ndesc);
   6623 
   6624 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6625 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   6626 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   6627 
   6628 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   6629 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   6630 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   6631 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   6632 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   6633 			    | RXDCTL_WTHRESH(1));
   6634 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6635 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6636 		} else {
   6637 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6638 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6639 			/* XXX should update with AIM? */
   6640 			CSR_WRITE(sc, WMREG_RDTR, (wmq->wmq_itr / 4) | RDTR_FPD);
   6641 			/* MUST be same */
   6642 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   6643 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   6644 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   6645 		}
   6646 	}
   6647 }
   6648 
   6649 static int
   6650 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6651 {
   6652 	struct wm_rxsoft *rxs;
   6653 	int error, i;
   6654 
   6655 	KASSERT(mutex_owned(rxq->rxq_lock));
   6656 
   6657 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6658 		rxs = &rxq->rxq_soft[i];
   6659 		if (rxs->rxs_mbuf == NULL) {
   6660 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   6661 				log(LOG_ERR, "%s: unable to allocate or map "
   6662 				    "rx buffer %d, error = %d\n",
   6663 				    device_xname(sc->sc_dev), i, error);
   6664 				/*
   6665 				 * XXX Should attempt to run with fewer receive
   6666 				 * XXX buffers instead of just failing.
   6667 				 */
   6668 				wm_rxdrain(rxq);
   6669 				return ENOMEM;
   6670 			}
   6671 		} else {
   6672 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   6673 				wm_init_rxdesc(rxq, i);
   6674 			/*
   6675 			 * For 82575 and newer device, the RX descriptors
   6676 			 * must be initialized after the setting of RCTL.EN in
   6677 			 * wm_set_filter()
   6678 			 */
   6679 		}
   6680 	}
   6681 	rxq->rxq_ptr = 0;
   6682 	rxq->rxq_discard = 0;
   6683 	WM_RXCHAIN_RESET(rxq);
   6684 
   6685 	return 0;
   6686 }
   6687 
   6688 static int
   6689 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6690     struct wm_rxqueue *rxq)
   6691 {
   6692 
   6693 	KASSERT(mutex_owned(rxq->rxq_lock));
   6694 
   6695 	/*
   6696 	 * Set up some register offsets that are different between
   6697 	 * the i82542 and the i82543 and later chips.
   6698 	 */
   6699 	if (sc->sc_type < WM_T_82543)
   6700 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   6701 	else
   6702 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   6703 
   6704 	wm_init_rx_regs(sc, wmq, rxq);
   6705 	return wm_init_rx_buffer(sc, rxq);
   6706 }
   6707 
   6708 /*
   6709  * wm_init_quques:
   6710  *	Initialize {tx,rx}descs and {tx,rx} buffers
   6711  */
   6712 static int
   6713 wm_init_txrx_queues(struct wm_softc *sc)
   6714 {
   6715 	int i, error = 0;
   6716 
   6717 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6718 		device_xname(sc->sc_dev), __func__));
   6719 
   6720 	for (i = 0; i < sc->sc_nqueues; i++) {
   6721 		struct wm_queue *wmq = &sc->sc_queue[i];
   6722 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6723 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6724 
   6725 		/*
   6726 		 * TODO
   6727 		 * Currently, use constant variable instead of AIM.
   6728 		 * Furthermore, the interrupt interval of multiqueue which use
   6729 		 * polling mode is less than default value.
   6730 		 * More tuning and AIM are required.
   6731 		 */
   6732 		if (wm_is_using_multiqueue(sc))
   6733 			wmq->wmq_itr = 50;
   6734 		else
   6735 			wmq->wmq_itr = sc->sc_itr_init;
   6736 		wmq->wmq_set_itr = true;
   6737 
   6738 		mutex_enter(txq->txq_lock);
   6739 		wm_init_tx_queue(sc, wmq, txq);
   6740 		mutex_exit(txq->txq_lock);
   6741 
   6742 		mutex_enter(rxq->rxq_lock);
   6743 		error = wm_init_rx_queue(sc, wmq, rxq);
   6744 		mutex_exit(rxq->rxq_lock);
   6745 		if (error)
   6746 			break;
   6747 	}
   6748 
   6749 	return error;
   6750 }
   6751 
   6752 /*
   6753  * wm_tx_offload:
   6754  *
   6755  *	Set up TCP/IP checksumming parameters for the
   6756  *	specified packet.
   6757  */
   6758 static int
   6759 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   6760     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   6761 {
   6762 	struct mbuf *m0 = txs->txs_mbuf;
   6763 	struct livengood_tcpip_ctxdesc *t;
   6764 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   6765 	uint32_t ipcse;
   6766 	struct ether_header *eh;
   6767 	int offset, iphl;
   6768 	uint8_t fields;
   6769 
   6770 	/*
   6771 	 * XXX It would be nice if the mbuf pkthdr had offset
   6772 	 * fields for the protocol headers.
   6773 	 */
   6774 
   6775 	eh = mtod(m0, struct ether_header *);
   6776 	switch (htons(eh->ether_type)) {
   6777 	case ETHERTYPE_IP:
   6778 	case ETHERTYPE_IPV6:
   6779 		offset = ETHER_HDR_LEN;
   6780 		break;
   6781 
   6782 	case ETHERTYPE_VLAN:
   6783 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6784 		break;
   6785 
   6786 	default:
   6787 		/*
   6788 		 * Don't support this protocol or encapsulation.
   6789 		 */
   6790 		*fieldsp = 0;
   6791 		*cmdp = 0;
   6792 		return 0;
   6793 	}
   6794 
   6795 	if ((m0->m_pkthdr.csum_flags &
   6796 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6797 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6798 	} else {
   6799 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6800 	}
   6801 	ipcse = offset + iphl - 1;
   6802 
   6803 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   6804 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   6805 	seg = 0;
   6806 	fields = 0;
   6807 
   6808 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6809 		int hlen = offset + iphl;
   6810 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6811 
   6812 		if (__predict_false(m0->m_len <
   6813 				    (hlen + sizeof(struct tcphdr)))) {
   6814 			/*
   6815 			 * TCP/IP headers are not in the first mbuf; we need
   6816 			 * to do this the slow and painful way.  Let's just
   6817 			 * hope this doesn't happen very often.
   6818 			 */
   6819 			struct tcphdr th;
   6820 
   6821 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6822 
   6823 			m_copydata(m0, hlen, sizeof(th), &th);
   6824 			if (v4) {
   6825 				struct ip ip;
   6826 
   6827 				m_copydata(m0, offset, sizeof(ip), &ip);
   6828 				ip.ip_len = 0;
   6829 				m_copyback(m0,
   6830 				    offset + offsetof(struct ip, ip_len),
   6831 				    sizeof(ip.ip_len), &ip.ip_len);
   6832 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6833 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6834 			} else {
   6835 				struct ip6_hdr ip6;
   6836 
   6837 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6838 				ip6.ip6_plen = 0;
   6839 				m_copyback(m0,
   6840 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6841 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6842 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6843 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6844 			}
   6845 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6846 			    sizeof(th.th_sum), &th.th_sum);
   6847 
   6848 			hlen += th.th_off << 2;
   6849 		} else {
   6850 			/*
   6851 			 * TCP/IP headers are in the first mbuf; we can do
   6852 			 * this the easy way.
   6853 			 */
   6854 			struct tcphdr *th;
   6855 
   6856 			if (v4) {
   6857 				struct ip *ip =
   6858 				    (void *)(mtod(m0, char *) + offset);
   6859 				th = (void *)(mtod(m0, char *) + hlen);
   6860 
   6861 				ip->ip_len = 0;
   6862 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6863 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6864 			} else {
   6865 				struct ip6_hdr *ip6 =
   6866 				    (void *)(mtod(m0, char *) + offset);
   6867 				th = (void *)(mtod(m0, char *) + hlen);
   6868 
   6869 				ip6->ip6_plen = 0;
   6870 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6871 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6872 			}
   6873 			hlen += th->th_off << 2;
   6874 		}
   6875 
   6876 		if (v4) {
   6877 			WM_Q_EVCNT_INCR(txq, txtso);
   6878 			cmdlen |= WTX_TCPIP_CMD_IP;
   6879 		} else {
   6880 			WM_Q_EVCNT_INCR(txq, txtso6);
   6881 			ipcse = 0;
   6882 		}
   6883 		cmd |= WTX_TCPIP_CMD_TSE;
   6884 		cmdlen |= WTX_TCPIP_CMD_TSE |
   6885 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   6886 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   6887 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   6888 	}
   6889 
   6890 	/*
   6891 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   6892 	 * offload feature, if we load the context descriptor, we
   6893 	 * MUST provide valid values for IPCSS and TUCSS fields.
   6894 	 */
   6895 
   6896 	ipcs = WTX_TCPIP_IPCSS(offset) |
   6897 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   6898 	    WTX_TCPIP_IPCSE(ipcse);
   6899 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   6900 		WM_Q_EVCNT_INCR(txq, txipsum);
   6901 		fields |= WTX_IXSM;
   6902 	}
   6903 
   6904 	offset += iphl;
   6905 
   6906 	if (m0->m_pkthdr.csum_flags &
   6907 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   6908 		WM_Q_EVCNT_INCR(txq, txtusum);
   6909 		fields |= WTX_TXSM;
   6910 		tucs = WTX_TCPIP_TUCSS(offset) |
   6911 		    WTX_TCPIP_TUCSO(offset +
   6912 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   6913 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6914 	} else if ((m0->m_pkthdr.csum_flags &
   6915 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   6916 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6917 		fields |= WTX_TXSM;
   6918 		tucs = WTX_TCPIP_TUCSS(offset) |
   6919 		    WTX_TCPIP_TUCSO(offset +
   6920 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   6921 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6922 	} else {
   6923 		/* Just initialize it to a valid TCP context. */
   6924 		tucs = WTX_TCPIP_TUCSS(offset) |
   6925 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   6926 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6927 	}
   6928 
   6929 	/*
   6930 	 * We don't have to write context descriptor for every packet
   6931 	 * except for 82574. For 82574, we must write context descriptor
   6932 	 * for every packet when we use two descriptor queues.
   6933 	 * It would be overhead to write context descriptor for every packet,
   6934 	 * however it does not cause problems.
   6935 	 */
   6936 	/* Fill in the context descriptor. */
   6937 	t = (struct livengood_tcpip_ctxdesc *)
   6938 	    &txq->txq_descs[txq->txq_next];
   6939 	t->tcpip_ipcs = htole32(ipcs);
   6940 	t->tcpip_tucs = htole32(tucs);
   6941 	t->tcpip_cmdlen = htole32(cmdlen);
   6942 	t->tcpip_seg = htole32(seg);
   6943 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6944 
   6945 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6946 	txs->txs_ndesc++;
   6947 
   6948 	*cmdp = cmd;
   6949 	*fieldsp = fields;
   6950 
   6951 	return 0;
   6952 }
   6953 
   6954 static inline int
   6955 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   6956 {
   6957 	struct wm_softc *sc = ifp->if_softc;
   6958 	u_int cpuid = cpu_index(curcpu());
   6959 
   6960 	/*
   6961 	 * Currently, simple distribute strategy.
   6962 	 * TODO:
   6963 	 * distribute by flowid(RSS has value).
   6964 	 */
   6965         return (cpuid + ncpu - sc->sc_affinity_offset) % sc->sc_nqueues;
   6966 }
   6967 
   6968 /*
   6969  * wm_start:		[ifnet interface function]
   6970  *
   6971  *	Start packet transmission on the interface.
   6972  */
   6973 static void
   6974 wm_start(struct ifnet *ifp)
   6975 {
   6976 	struct wm_softc *sc = ifp->if_softc;
   6977 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6978 
   6979 #ifdef WM_MPSAFE
   6980 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6981 #endif
   6982 	/*
   6983 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   6984 	 */
   6985 
   6986 	mutex_enter(txq->txq_lock);
   6987 	if (!txq->txq_stopping)
   6988 		wm_start_locked(ifp);
   6989 	mutex_exit(txq->txq_lock);
   6990 }
   6991 
   6992 static void
   6993 wm_start_locked(struct ifnet *ifp)
   6994 {
   6995 	struct wm_softc *sc = ifp->if_softc;
   6996 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6997 
   6998 	wm_send_common_locked(ifp, txq, false);
   6999 }
   7000 
   7001 static int
   7002 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7003 {
   7004 	int qid;
   7005 	struct wm_softc *sc = ifp->if_softc;
   7006 	struct wm_txqueue *txq;
   7007 
   7008 	qid = wm_select_txqueue(ifp, m);
   7009 	txq = &sc->sc_queue[qid].wmq_txq;
   7010 
   7011 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7012 		m_freem(m);
   7013 		WM_Q_EVCNT_INCR(txq, txdrop);
   7014 		return ENOBUFS;
   7015 	}
   7016 
   7017 	/*
   7018 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7019 	 */
   7020 	ifp->if_obytes += m->m_pkthdr.len;
   7021 	if (m->m_flags & M_MCAST)
   7022 		ifp->if_omcasts++;
   7023 
   7024 	if (mutex_tryenter(txq->txq_lock)) {
   7025 		if (!txq->txq_stopping)
   7026 			wm_transmit_locked(ifp, txq);
   7027 		mutex_exit(txq->txq_lock);
   7028 	}
   7029 
   7030 	return 0;
   7031 }
   7032 
   7033 static void
   7034 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7035 {
   7036 
   7037 	wm_send_common_locked(ifp, txq, true);
   7038 }
   7039 
   7040 static void
   7041 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7042     bool is_transmit)
   7043 {
   7044 	struct wm_softc *sc = ifp->if_softc;
   7045 	struct mbuf *m0;
   7046 	struct m_tag *mtag;
   7047 	struct wm_txsoft *txs;
   7048 	bus_dmamap_t dmamap;
   7049 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7050 	bus_addr_t curaddr;
   7051 	bus_size_t seglen, curlen;
   7052 	uint32_t cksumcmd;
   7053 	uint8_t cksumfields;
   7054 
   7055 	KASSERT(mutex_owned(txq->txq_lock));
   7056 
   7057 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7058 		return;
   7059 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7060 		return;
   7061 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7062 		return;
   7063 
   7064 	/* Remember the previous number of free descriptors. */
   7065 	ofree = txq->txq_free;
   7066 
   7067 	/*
   7068 	 * Loop through the send queue, setting up transmit descriptors
   7069 	 * until we drain the queue, or use up all available transmit
   7070 	 * descriptors.
   7071 	 */
   7072 	for (;;) {
   7073 		m0 = NULL;
   7074 
   7075 		/* Get a work queue entry. */
   7076 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7077 			wm_txeof(sc, txq);
   7078 			if (txq->txq_sfree == 0) {
   7079 				DPRINTF(WM_DEBUG_TX,
   7080 				    ("%s: TX: no free job descriptors\n",
   7081 					device_xname(sc->sc_dev)));
   7082 				WM_Q_EVCNT_INCR(txq, txsstall);
   7083 				break;
   7084 			}
   7085 		}
   7086 
   7087 		/* Grab a packet off the queue. */
   7088 		if (is_transmit)
   7089 			m0 = pcq_get(txq->txq_interq);
   7090 		else
   7091 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7092 		if (m0 == NULL)
   7093 			break;
   7094 
   7095 		DPRINTF(WM_DEBUG_TX,
   7096 		    ("%s: TX: have packet to transmit: %p\n",
   7097 		    device_xname(sc->sc_dev), m0));
   7098 
   7099 		txs = &txq->txq_soft[txq->txq_snext];
   7100 		dmamap = txs->txs_dmamap;
   7101 
   7102 		use_tso = (m0->m_pkthdr.csum_flags &
   7103 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7104 
   7105 		/*
   7106 		 * So says the Linux driver:
   7107 		 * The controller does a simple calculation to make sure
   7108 		 * there is enough room in the FIFO before initiating the
   7109 		 * DMA for each buffer.  The calc is:
   7110 		 *	4 = ceil(buffer len / MSS)
   7111 		 * To make sure we don't overrun the FIFO, adjust the max
   7112 		 * buffer len if the MSS drops.
   7113 		 */
   7114 		dmamap->dm_maxsegsz =
   7115 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7116 		    ? m0->m_pkthdr.segsz << 2
   7117 		    : WTX_MAX_LEN;
   7118 
   7119 		/*
   7120 		 * Load the DMA map.  If this fails, the packet either
   7121 		 * didn't fit in the allotted number of segments, or we
   7122 		 * were short on resources.  For the too-many-segments
   7123 		 * case, we simply report an error and drop the packet,
   7124 		 * since we can't sanely copy a jumbo packet to a single
   7125 		 * buffer.
   7126 		 */
   7127 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7128 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7129 		if (error) {
   7130 			if (error == EFBIG) {
   7131 				WM_Q_EVCNT_INCR(txq, txdrop);
   7132 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7133 				    "DMA segments, dropping...\n",
   7134 				    device_xname(sc->sc_dev));
   7135 				wm_dump_mbuf_chain(sc, m0);
   7136 				m_freem(m0);
   7137 				continue;
   7138 			}
   7139 			/*  Short on resources, just stop for now. */
   7140 			DPRINTF(WM_DEBUG_TX,
   7141 			    ("%s: TX: dmamap load failed: %d\n",
   7142 			    device_xname(sc->sc_dev), error));
   7143 			break;
   7144 		}
   7145 
   7146 		segs_needed = dmamap->dm_nsegs;
   7147 		if (use_tso) {
   7148 			/* For sentinel descriptor; see below. */
   7149 			segs_needed++;
   7150 		}
   7151 
   7152 		/*
   7153 		 * Ensure we have enough descriptors free to describe
   7154 		 * the packet.  Note, we always reserve one descriptor
   7155 		 * at the end of the ring due to the semantics of the
   7156 		 * TDT register, plus one more in the event we need
   7157 		 * to load offload context.
   7158 		 */
   7159 		if (segs_needed > txq->txq_free - 2) {
   7160 			/*
   7161 			 * Not enough free descriptors to transmit this
   7162 			 * packet.  We haven't committed anything yet,
   7163 			 * so just unload the DMA map, put the packet
   7164 			 * pack on the queue, and punt.  Notify the upper
   7165 			 * layer that there are no more slots left.
   7166 			 */
   7167 			DPRINTF(WM_DEBUG_TX,
   7168 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7169 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7170 			    segs_needed, txq->txq_free - 1));
   7171 			if (!is_transmit)
   7172 				ifp->if_flags |= IFF_OACTIVE;
   7173 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7174 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7175 			WM_Q_EVCNT_INCR(txq, txdstall);
   7176 			break;
   7177 		}
   7178 
   7179 		/*
   7180 		 * Check for 82547 Tx FIFO bug.  We need to do this
   7181 		 * once we know we can transmit the packet, since we
   7182 		 * do some internal FIFO space accounting here.
   7183 		 */
   7184 		if (sc->sc_type == WM_T_82547 &&
   7185 		    wm_82547_txfifo_bugchk(sc, m0)) {
   7186 			DPRINTF(WM_DEBUG_TX,
   7187 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   7188 			    device_xname(sc->sc_dev)));
   7189 			if (!is_transmit)
   7190 				ifp->if_flags |= IFF_OACTIVE;
   7191 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7192 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7193 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
   7194 			break;
   7195 		}
   7196 
   7197 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7198 
   7199 		DPRINTF(WM_DEBUG_TX,
   7200 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7201 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7202 
   7203 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7204 
   7205 		/*
   7206 		 * Store a pointer to the packet so that we can free it
   7207 		 * later.
   7208 		 *
   7209 		 * Initially, we consider the number of descriptors the
   7210 		 * packet uses the number of DMA segments.  This may be
   7211 		 * incremented by 1 if we do checksum offload (a descriptor
   7212 		 * is used to set the checksum context).
   7213 		 */
   7214 		txs->txs_mbuf = m0;
   7215 		txs->txs_firstdesc = txq->txq_next;
   7216 		txs->txs_ndesc = segs_needed;
   7217 
   7218 		/* Set up offload parameters for this packet. */
   7219 		if (m0->m_pkthdr.csum_flags &
   7220 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7221 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7222 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7223 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   7224 					  &cksumfields) != 0) {
   7225 				/* Error message already displayed. */
   7226 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7227 				continue;
   7228 			}
   7229 		} else {
   7230 			cksumcmd = 0;
   7231 			cksumfields = 0;
   7232 		}
   7233 
   7234 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7235 
   7236 		/* Sync the DMA map. */
   7237 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7238 		    BUS_DMASYNC_PREWRITE);
   7239 
   7240 		/* Initialize the transmit descriptor. */
   7241 		for (nexttx = txq->txq_next, seg = 0;
   7242 		     seg < dmamap->dm_nsegs; seg++) {
   7243 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7244 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7245 			     seglen != 0;
   7246 			     curaddr += curlen, seglen -= curlen,
   7247 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7248 				curlen = seglen;
   7249 
   7250 				/*
   7251 				 * So says the Linux driver:
   7252 				 * Work around for premature descriptor
   7253 				 * write-backs in TSO mode.  Append a
   7254 				 * 4-byte sentinel descriptor.
   7255 				 */
   7256 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7257 				    curlen > 8)
   7258 					curlen -= 4;
   7259 
   7260 				wm_set_dma_addr(
   7261 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7262 				txq->txq_descs[nexttx].wtx_cmdlen
   7263 				    = htole32(cksumcmd | curlen);
   7264 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7265 				    = 0;
   7266 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7267 				    = cksumfields;
   7268 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7269 				lasttx = nexttx;
   7270 
   7271 				DPRINTF(WM_DEBUG_TX,
   7272 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7273 				     "len %#04zx\n",
   7274 				    device_xname(sc->sc_dev), nexttx,
   7275 				    (uint64_t)curaddr, curlen));
   7276 			}
   7277 		}
   7278 
   7279 		KASSERT(lasttx != -1);
   7280 
   7281 		/*
   7282 		 * Set up the command byte on the last descriptor of
   7283 		 * the packet.  If we're in the interrupt delay window,
   7284 		 * delay the interrupt.
   7285 		 */
   7286 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7287 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7288 
   7289 		/*
   7290 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7291 		 * up the descriptor to encapsulate the packet for us.
   7292 		 *
   7293 		 * This is only valid on the last descriptor of the packet.
   7294 		 */
   7295 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   7296 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7297 			    htole32(WTX_CMD_VLE);
   7298 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7299 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   7300 		}
   7301 
   7302 		txs->txs_lastdesc = lasttx;
   7303 
   7304 		DPRINTF(WM_DEBUG_TX,
   7305 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7306 		    device_xname(sc->sc_dev),
   7307 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7308 
   7309 		/* Sync the descriptors we're using. */
   7310 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7311 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7312 
   7313 		/* Give the packet to the chip. */
   7314 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7315 
   7316 		DPRINTF(WM_DEBUG_TX,
   7317 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7318 
   7319 		DPRINTF(WM_DEBUG_TX,
   7320 		    ("%s: TX: finished transmitting packet, job %d\n",
   7321 		    device_xname(sc->sc_dev), txq->txq_snext));
   7322 
   7323 		/* Advance the tx pointer. */
   7324 		txq->txq_free -= txs->txs_ndesc;
   7325 		txq->txq_next = nexttx;
   7326 
   7327 		txq->txq_sfree--;
   7328 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7329 
   7330 		/* Pass the packet to any BPF listeners. */
   7331 		bpf_mtap(ifp, m0);
   7332 	}
   7333 
   7334 	if (m0 != NULL) {
   7335 		if (!is_transmit)
   7336 			ifp->if_flags |= IFF_OACTIVE;
   7337 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7338 		WM_Q_EVCNT_INCR(txq, txdrop);
   7339 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7340 			__func__));
   7341 		m_freem(m0);
   7342 	}
   7343 
   7344 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7345 		/* No more slots; notify upper layer. */
   7346 		if (!is_transmit)
   7347 			ifp->if_flags |= IFF_OACTIVE;
   7348 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7349 	}
   7350 
   7351 	if (txq->txq_free != ofree) {
   7352 		/* Set a watchdog timer in case the chip flakes out. */
   7353 		ifp->if_timer = 5;
   7354 	}
   7355 }
   7356 
   7357 /*
   7358  * wm_nq_tx_offload:
   7359  *
   7360  *	Set up TCP/IP checksumming parameters for the
   7361  *	specified packet, for NEWQUEUE devices
   7362  */
   7363 static int
   7364 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7365     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7366 {
   7367 	struct mbuf *m0 = txs->txs_mbuf;
   7368 	struct m_tag *mtag;
   7369 	uint32_t vl_len, mssidx, cmdc;
   7370 	struct ether_header *eh;
   7371 	int offset, iphl;
   7372 
   7373 	/*
   7374 	 * XXX It would be nice if the mbuf pkthdr had offset
   7375 	 * fields for the protocol headers.
   7376 	 */
   7377 	*cmdlenp = 0;
   7378 	*fieldsp = 0;
   7379 
   7380 	eh = mtod(m0, struct ether_header *);
   7381 	switch (htons(eh->ether_type)) {
   7382 	case ETHERTYPE_IP:
   7383 	case ETHERTYPE_IPV6:
   7384 		offset = ETHER_HDR_LEN;
   7385 		break;
   7386 
   7387 	case ETHERTYPE_VLAN:
   7388 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7389 		break;
   7390 
   7391 	default:
   7392 		/* Don't support this protocol or encapsulation. */
   7393 		*do_csum = false;
   7394 		return 0;
   7395 	}
   7396 	*do_csum = true;
   7397 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7398 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7399 
   7400 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7401 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7402 
   7403 	if ((m0->m_pkthdr.csum_flags &
   7404 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7405 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7406 	} else {
   7407 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   7408 	}
   7409 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7410 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7411 
   7412 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   7413 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   7414 		     << NQTXC_VLLEN_VLAN_SHIFT);
   7415 		*cmdlenp |= NQTX_CMD_VLE;
   7416 	}
   7417 
   7418 	mssidx = 0;
   7419 
   7420 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7421 		int hlen = offset + iphl;
   7422 		int tcp_hlen;
   7423 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7424 
   7425 		if (__predict_false(m0->m_len <
   7426 				    (hlen + sizeof(struct tcphdr)))) {
   7427 			/*
   7428 			 * TCP/IP headers are not in the first mbuf; we need
   7429 			 * to do this the slow and painful way.  Let's just
   7430 			 * hope this doesn't happen very often.
   7431 			 */
   7432 			struct tcphdr th;
   7433 
   7434 			WM_Q_EVCNT_INCR(txq, txtsopain);
   7435 
   7436 			m_copydata(m0, hlen, sizeof(th), &th);
   7437 			if (v4) {
   7438 				struct ip ip;
   7439 
   7440 				m_copydata(m0, offset, sizeof(ip), &ip);
   7441 				ip.ip_len = 0;
   7442 				m_copyback(m0,
   7443 				    offset + offsetof(struct ip, ip_len),
   7444 				    sizeof(ip.ip_len), &ip.ip_len);
   7445 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7446 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7447 			} else {
   7448 				struct ip6_hdr ip6;
   7449 
   7450 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7451 				ip6.ip6_plen = 0;
   7452 				m_copyback(m0,
   7453 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7454 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7455 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7456 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7457 			}
   7458 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7459 			    sizeof(th.th_sum), &th.th_sum);
   7460 
   7461 			tcp_hlen = th.th_off << 2;
   7462 		} else {
   7463 			/*
   7464 			 * TCP/IP headers are in the first mbuf; we can do
   7465 			 * this the easy way.
   7466 			 */
   7467 			struct tcphdr *th;
   7468 
   7469 			if (v4) {
   7470 				struct ip *ip =
   7471 				    (void *)(mtod(m0, char *) + offset);
   7472 				th = (void *)(mtod(m0, char *) + hlen);
   7473 
   7474 				ip->ip_len = 0;
   7475 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7476 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7477 			} else {
   7478 				struct ip6_hdr *ip6 =
   7479 				    (void *)(mtod(m0, char *) + offset);
   7480 				th = (void *)(mtod(m0, char *) + hlen);
   7481 
   7482 				ip6->ip6_plen = 0;
   7483 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7484 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7485 			}
   7486 			tcp_hlen = th->th_off << 2;
   7487 		}
   7488 		hlen += tcp_hlen;
   7489 		*cmdlenp |= NQTX_CMD_TSE;
   7490 
   7491 		if (v4) {
   7492 			WM_Q_EVCNT_INCR(txq, txtso);
   7493 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7494 		} else {
   7495 			WM_Q_EVCNT_INCR(txq, txtso6);
   7496 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7497 		}
   7498 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7499 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7500 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7501 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7502 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7503 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7504 	} else {
   7505 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7506 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7507 	}
   7508 
   7509 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7510 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7511 		cmdc |= NQTXC_CMD_IP4;
   7512 	}
   7513 
   7514 	if (m0->m_pkthdr.csum_flags &
   7515 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7516 		WM_Q_EVCNT_INCR(txq, txtusum);
   7517 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7518 			cmdc |= NQTXC_CMD_TCP;
   7519 		} else {
   7520 			cmdc |= NQTXC_CMD_UDP;
   7521 		}
   7522 		cmdc |= NQTXC_CMD_IP4;
   7523 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7524 	}
   7525 	if (m0->m_pkthdr.csum_flags &
   7526 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7527 		WM_Q_EVCNT_INCR(txq, txtusum6);
   7528 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7529 			cmdc |= NQTXC_CMD_TCP;
   7530 		} else {
   7531 			cmdc |= NQTXC_CMD_UDP;
   7532 		}
   7533 		cmdc |= NQTXC_CMD_IP6;
   7534 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7535 	}
   7536 
   7537 	/*
   7538 	 * We don't have to write context descriptor for every packet to
   7539 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   7540 	 * I210 and I211. It is enough to write once per a Tx queue for these
   7541 	 * controllers.
   7542 	 * It would be overhead to write context descriptor for every packet,
   7543 	 * however it does not cause problems.
   7544 	 */
   7545 	/* Fill in the context descriptor. */
   7546 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   7547 	    htole32(vl_len);
   7548 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   7549 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   7550 	    htole32(cmdc);
   7551 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   7552 	    htole32(mssidx);
   7553 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7554 	DPRINTF(WM_DEBUG_TX,
   7555 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   7556 	    txq->txq_next, 0, vl_len));
   7557 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   7558 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7559 	txs->txs_ndesc++;
   7560 	return 0;
   7561 }
   7562 
   7563 /*
   7564  * wm_nq_start:		[ifnet interface function]
   7565  *
   7566  *	Start packet transmission on the interface for NEWQUEUE devices
   7567  */
   7568 static void
   7569 wm_nq_start(struct ifnet *ifp)
   7570 {
   7571 	struct wm_softc *sc = ifp->if_softc;
   7572 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7573 
   7574 #ifdef WM_MPSAFE
   7575 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   7576 #endif
   7577 	/*
   7578 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7579 	 */
   7580 
   7581 	mutex_enter(txq->txq_lock);
   7582 	if (!txq->txq_stopping)
   7583 		wm_nq_start_locked(ifp);
   7584 	mutex_exit(txq->txq_lock);
   7585 }
   7586 
   7587 static void
   7588 wm_nq_start_locked(struct ifnet *ifp)
   7589 {
   7590 	struct wm_softc *sc = ifp->if_softc;
   7591 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7592 
   7593 	wm_nq_send_common_locked(ifp, txq, false);
   7594 }
   7595 
   7596 static int
   7597 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   7598 {
   7599 	int qid;
   7600 	struct wm_softc *sc = ifp->if_softc;
   7601 	struct wm_txqueue *txq;
   7602 
   7603 	qid = wm_select_txqueue(ifp, m);
   7604 	txq = &sc->sc_queue[qid].wmq_txq;
   7605 
   7606 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7607 		m_freem(m);
   7608 		WM_Q_EVCNT_INCR(txq, txdrop);
   7609 		return ENOBUFS;
   7610 	}
   7611 
   7612 	/*
   7613 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7614 	 */
   7615 	ifp->if_obytes += m->m_pkthdr.len;
   7616 	if (m->m_flags & M_MCAST)
   7617 		ifp->if_omcasts++;
   7618 
   7619 	/*
   7620 	 * The situations which this mutex_tryenter() fails at running time
   7621 	 * are below two patterns.
   7622 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   7623 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   7624 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   7625 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   7626 	 * In the case of (2), the last packet enqueued to txq->txq_interq is also
   7627 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck, either.
   7628 	 */
   7629 	if (mutex_tryenter(txq->txq_lock)) {
   7630 		if (!txq->txq_stopping)
   7631 			wm_nq_transmit_locked(ifp, txq);
   7632 		mutex_exit(txq->txq_lock);
   7633 	}
   7634 
   7635 	return 0;
   7636 }
   7637 
   7638 static void
   7639 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7640 {
   7641 
   7642 	wm_nq_send_common_locked(ifp, txq, true);
   7643 }
   7644 
   7645 static void
   7646 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7647     bool is_transmit)
   7648 {
   7649 	struct wm_softc *sc = ifp->if_softc;
   7650 	struct mbuf *m0;
   7651 	struct m_tag *mtag;
   7652 	struct wm_txsoft *txs;
   7653 	bus_dmamap_t dmamap;
   7654 	int error, nexttx, lasttx = -1, seg, segs_needed;
   7655 	bool do_csum, sent;
   7656 
   7657 	KASSERT(mutex_owned(txq->txq_lock));
   7658 
   7659 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7660 		return;
   7661 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7662 		return;
   7663 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7664 		return;
   7665 
   7666 	sent = false;
   7667 
   7668 	/*
   7669 	 * Loop through the send queue, setting up transmit descriptors
   7670 	 * until we drain the queue, or use up all available transmit
   7671 	 * descriptors.
   7672 	 */
   7673 	for (;;) {
   7674 		m0 = NULL;
   7675 
   7676 		/* Get a work queue entry. */
   7677 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7678 			wm_txeof(sc, txq);
   7679 			if (txq->txq_sfree == 0) {
   7680 				DPRINTF(WM_DEBUG_TX,
   7681 				    ("%s: TX: no free job descriptors\n",
   7682 					device_xname(sc->sc_dev)));
   7683 				WM_Q_EVCNT_INCR(txq, txsstall);
   7684 				break;
   7685 			}
   7686 		}
   7687 
   7688 		/* Grab a packet off the queue. */
   7689 		if (is_transmit)
   7690 			m0 = pcq_get(txq->txq_interq);
   7691 		else
   7692 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7693 		if (m0 == NULL)
   7694 			break;
   7695 
   7696 		DPRINTF(WM_DEBUG_TX,
   7697 		    ("%s: TX: have packet to transmit: %p\n",
   7698 		    device_xname(sc->sc_dev), m0));
   7699 
   7700 		txs = &txq->txq_soft[txq->txq_snext];
   7701 		dmamap = txs->txs_dmamap;
   7702 
   7703 		/*
   7704 		 * Load the DMA map.  If this fails, the packet either
   7705 		 * didn't fit in the allotted number of segments, or we
   7706 		 * were short on resources.  For the too-many-segments
   7707 		 * case, we simply report an error and drop the packet,
   7708 		 * since we can't sanely copy a jumbo packet to a single
   7709 		 * buffer.
   7710 		 */
   7711 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7712 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7713 		if (error) {
   7714 			if (error == EFBIG) {
   7715 				WM_Q_EVCNT_INCR(txq, txdrop);
   7716 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7717 				    "DMA segments, dropping...\n",
   7718 				    device_xname(sc->sc_dev));
   7719 				wm_dump_mbuf_chain(sc, m0);
   7720 				m_freem(m0);
   7721 				continue;
   7722 			}
   7723 			/* Short on resources, just stop for now. */
   7724 			DPRINTF(WM_DEBUG_TX,
   7725 			    ("%s: TX: dmamap load failed: %d\n",
   7726 			    device_xname(sc->sc_dev), error));
   7727 			break;
   7728 		}
   7729 
   7730 		segs_needed = dmamap->dm_nsegs;
   7731 
   7732 		/*
   7733 		 * Ensure we have enough descriptors free to describe
   7734 		 * the packet.  Note, we always reserve one descriptor
   7735 		 * at the end of the ring due to the semantics of the
   7736 		 * TDT register, plus one more in the event we need
   7737 		 * to load offload context.
   7738 		 */
   7739 		if (segs_needed > txq->txq_free - 2) {
   7740 			/*
   7741 			 * Not enough free descriptors to transmit this
   7742 			 * packet.  We haven't committed anything yet,
   7743 			 * so just unload the DMA map, put the packet
   7744 			 * pack on the queue, and punt.  Notify the upper
   7745 			 * layer that there are no more slots left.
   7746 			 */
   7747 			DPRINTF(WM_DEBUG_TX,
   7748 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7749 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7750 			    segs_needed, txq->txq_free - 1));
   7751 			if (!is_transmit)
   7752 				ifp->if_flags |= IFF_OACTIVE;
   7753 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7754 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7755 			WM_Q_EVCNT_INCR(txq, txdstall);
   7756 			break;
   7757 		}
   7758 
   7759 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7760 
   7761 		DPRINTF(WM_DEBUG_TX,
   7762 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7763 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7764 
   7765 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7766 
   7767 		/*
   7768 		 * Store a pointer to the packet so that we can free it
   7769 		 * later.
   7770 		 *
   7771 		 * Initially, we consider the number of descriptors the
   7772 		 * packet uses the number of DMA segments.  This may be
   7773 		 * incremented by 1 if we do checksum offload (a descriptor
   7774 		 * is used to set the checksum context).
   7775 		 */
   7776 		txs->txs_mbuf = m0;
   7777 		txs->txs_firstdesc = txq->txq_next;
   7778 		txs->txs_ndesc = segs_needed;
   7779 
   7780 		/* Set up offload parameters for this packet. */
   7781 		uint32_t cmdlen, fields, dcmdlen;
   7782 		if (m0->m_pkthdr.csum_flags &
   7783 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7784 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7785 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7786 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   7787 			    &do_csum) != 0) {
   7788 				/* Error message already displayed. */
   7789 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7790 				continue;
   7791 			}
   7792 		} else {
   7793 			do_csum = false;
   7794 			cmdlen = 0;
   7795 			fields = 0;
   7796 		}
   7797 
   7798 		/* Sync the DMA map. */
   7799 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7800 		    BUS_DMASYNC_PREWRITE);
   7801 
   7802 		/* Initialize the first transmit descriptor. */
   7803 		nexttx = txq->txq_next;
   7804 		if (!do_csum) {
   7805 			/* setup a legacy descriptor */
   7806 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   7807 			    dmamap->dm_segs[0].ds_addr);
   7808 			txq->txq_descs[nexttx].wtx_cmdlen =
   7809 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   7810 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   7811 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   7812 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   7813 			    NULL) {
   7814 				txq->txq_descs[nexttx].wtx_cmdlen |=
   7815 				    htole32(WTX_CMD_VLE);
   7816 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   7817 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   7818 			} else {
   7819 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7820 			}
   7821 			dcmdlen = 0;
   7822 		} else {
   7823 			/* setup an advanced data descriptor */
   7824 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7825 			    htole64(dmamap->dm_segs[0].ds_addr);
   7826 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   7827 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7828 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   7829 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   7830 			    htole32(fields);
   7831 			DPRINTF(WM_DEBUG_TX,
   7832 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   7833 			    device_xname(sc->sc_dev), nexttx,
   7834 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   7835 			DPRINTF(WM_DEBUG_TX,
   7836 			    ("\t 0x%08x%08x\n", fields,
   7837 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   7838 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   7839 		}
   7840 
   7841 		lasttx = nexttx;
   7842 		nexttx = WM_NEXTTX(txq, nexttx);
   7843 		/*
   7844 		 * fill in the next descriptors. legacy or adcanced format
   7845 		 * is the same here
   7846 		 */
   7847 		for (seg = 1; seg < dmamap->dm_nsegs;
   7848 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   7849 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7850 			    htole64(dmamap->dm_segs[seg].ds_addr);
   7851 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7852 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   7853 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   7854 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   7855 			lasttx = nexttx;
   7856 
   7857 			DPRINTF(WM_DEBUG_TX,
   7858 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   7859 			     "len %#04zx\n",
   7860 			    device_xname(sc->sc_dev), nexttx,
   7861 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   7862 			    dmamap->dm_segs[seg].ds_len));
   7863 		}
   7864 
   7865 		KASSERT(lasttx != -1);
   7866 
   7867 		/*
   7868 		 * Set up the command byte on the last descriptor of
   7869 		 * the packet.  If we're in the interrupt delay window,
   7870 		 * delay the interrupt.
   7871 		 */
   7872 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   7873 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   7874 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7875 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7876 
   7877 		txs->txs_lastdesc = lasttx;
   7878 
   7879 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7880 		    device_xname(sc->sc_dev),
   7881 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7882 
   7883 		/* Sync the descriptors we're using. */
   7884 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7885 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7886 
   7887 		/* Give the packet to the chip. */
   7888 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7889 		sent = true;
   7890 
   7891 		DPRINTF(WM_DEBUG_TX,
   7892 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7893 
   7894 		DPRINTF(WM_DEBUG_TX,
   7895 		    ("%s: TX: finished transmitting packet, job %d\n",
   7896 		    device_xname(sc->sc_dev), txq->txq_snext));
   7897 
   7898 		/* Advance the tx pointer. */
   7899 		txq->txq_free -= txs->txs_ndesc;
   7900 		txq->txq_next = nexttx;
   7901 
   7902 		txq->txq_sfree--;
   7903 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7904 
   7905 		/* Pass the packet to any BPF listeners. */
   7906 		bpf_mtap(ifp, m0);
   7907 	}
   7908 
   7909 	if (m0 != NULL) {
   7910 		if (!is_transmit)
   7911 			ifp->if_flags |= IFF_OACTIVE;
   7912 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7913 		WM_Q_EVCNT_INCR(txq, txdrop);
   7914 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7915 			__func__));
   7916 		m_freem(m0);
   7917 	}
   7918 
   7919 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7920 		/* No more slots; notify upper layer. */
   7921 		if (!is_transmit)
   7922 			ifp->if_flags |= IFF_OACTIVE;
   7923 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7924 	}
   7925 
   7926 	if (sent) {
   7927 		/* Set a watchdog timer in case the chip flakes out. */
   7928 		ifp->if_timer = 5;
   7929 	}
   7930 }
   7931 
   7932 static void
   7933 wm_deferred_start_locked(struct wm_txqueue *txq)
   7934 {
   7935 	struct wm_softc *sc = txq->txq_sc;
   7936 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7937 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   7938 	int qid = wmq->wmq_id;
   7939 
   7940 	KASSERT(mutex_owned(txq->txq_lock));
   7941 
   7942 	if (txq->txq_stopping) {
   7943 		mutex_exit(txq->txq_lock);
   7944 		return;
   7945 	}
   7946 
   7947 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7948 		/* XXX need for ALTQ or one CPU system */
   7949 		if (qid == 0)
   7950 			wm_nq_start_locked(ifp);
   7951 		wm_nq_transmit_locked(ifp, txq);
   7952 	} else {
   7953 		/* XXX need for ALTQ or one CPU system */
   7954 		if (qid == 0)
   7955 			wm_start_locked(ifp);
   7956 		wm_transmit_locked(ifp, txq);
   7957 	}
   7958 }
   7959 
   7960 /* Interrupt */
   7961 
   7962 /*
   7963  * wm_txeof:
   7964  *
   7965  *	Helper; handle transmit interrupts.
   7966  */
   7967 static int
   7968 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
   7969 {
   7970 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7971 	struct wm_txsoft *txs;
   7972 	bool processed = false;
   7973 	int count = 0;
   7974 	int i;
   7975 	uint8_t status;
   7976 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   7977 
   7978 	KASSERT(mutex_owned(txq->txq_lock));
   7979 
   7980 	if (txq->txq_stopping)
   7981 		return 0;
   7982 
   7983 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   7984 	/* for ALTQ and legacy(not use multiqueue) ethernet controller */
   7985 	if (wmq->wmq_id == 0)
   7986 		ifp->if_flags &= ~IFF_OACTIVE;
   7987 
   7988 	/*
   7989 	 * Go through the Tx list and free mbufs for those
   7990 	 * frames which have been transmitted.
   7991 	 */
   7992 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   7993 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   7994 		txs = &txq->txq_soft[i];
   7995 
   7996 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   7997 			device_xname(sc->sc_dev), i));
   7998 
   7999 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8000 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8001 
   8002 		status =
   8003 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8004 		if ((status & WTX_ST_DD) == 0) {
   8005 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8006 			    BUS_DMASYNC_PREREAD);
   8007 			break;
   8008 		}
   8009 
   8010 		processed = true;
   8011 		count++;
   8012 		DPRINTF(WM_DEBUG_TX,
   8013 		    ("%s: TX: job %d done: descs %d..%d\n",
   8014 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8015 		    txs->txs_lastdesc));
   8016 
   8017 		/*
   8018 		 * XXX We should probably be using the statistics
   8019 		 * XXX registers, but I don't know if they exist
   8020 		 * XXX on chips before the i82544.
   8021 		 */
   8022 
   8023 #ifdef WM_EVENT_COUNTERS
   8024 		if (status & WTX_ST_TU)
   8025 			WM_Q_EVCNT_INCR(txq, tu);
   8026 #endif /* WM_EVENT_COUNTERS */
   8027 
   8028 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   8029 			ifp->if_oerrors++;
   8030 			if (status & WTX_ST_LC)
   8031 				log(LOG_WARNING, "%s: late collision\n",
   8032 				    device_xname(sc->sc_dev));
   8033 			else if (status & WTX_ST_EC) {
   8034 				ifp->if_collisions += 16;
   8035 				log(LOG_WARNING, "%s: excessive collisions\n",
   8036 				    device_xname(sc->sc_dev));
   8037 			}
   8038 		} else
   8039 			ifp->if_opackets++;
   8040 
   8041 		txq->txq_packets++;
   8042 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8043 
   8044 		txq->txq_free += txs->txs_ndesc;
   8045 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8046 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8047 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8048 		m_freem(txs->txs_mbuf);
   8049 		txs->txs_mbuf = NULL;
   8050 	}
   8051 
   8052 	/* Update the dirty transmit buffer pointer. */
   8053 	txq->txq_sdirty = i;
   8054 	DPRINTF(WM_DEBUG_TX,
   8055 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8056 
   8057 	if (count != 0)
   8058 		rnd_add_uint32(&sc->rnd_source, count);
   8059 
   8060 	/*
   8061 	 * If there are no more pending transmissions, cancel the watchdog
   8062 	 * timer.
   8063 	 */
   8064 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8065 		ifp->if_timer = 0;
   8066 
   8067 	return processed;
   8068 }
   8069 
   8070 static inline uint32_t
   8071 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8072 {
   8073 	struct wm_softc *sc = rxq->rxq_sc;
   8074 
   8075 	if (sc->sc_type == WM_T_82574)
   8076 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8077 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8078 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8079 	else
   8080 		return rxq->rxq_descs[idx].wrx_status;
   8081 }
   8082 
   8083 static inline uint32_t
   8084 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8085 {
   8086 	struct wm_softc *sc = rxq->rxq_sc;
   8087 
   8088 	if (sc->sc_type == WM_T_82574)
   8089 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8090 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8091 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8092 	else
   8093 		return rxq->rxq_descs[idx].wrx_errors;
   8094 }
   8095 
   8096 static inline uint16_t
   8097 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8098 {
   8099 	struct wm_softc *sc = rxq->rxq_sc;
   8100 
   8101 	if (sc->sc_type == WM_T_82574)
   8102 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8103 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8104 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8105 	else
   8106 		return rxq->rxq_descs[idx].wrx_special;
   8107 }
   8108 
   8109 static inline int
   8110 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8111 {
   8112 	struct wm_softc *sc = rxq->rxq_sc;
   8113 
   8114 	if (sc->sc_type == WM_T_82574)
   8115 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8116 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8117 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   8118 	else
   8119 		return rxq->rxq_descs[idx].wrx_len;
   8120 }
   8121 
   8122 #ifdef WM_DEBUG
   8123 static inline uint32_t
   8124 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   8125 {
   8126 	struct wm_softc *sc = rxq->rxq_sc;
   8127 
   8128 	if (sc->sc_type == WM_T_82574)
   8129 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   8130 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8131 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   8132 	else
   8133 		return 0;
   8134 }
   8135 
   8136 static inline uint8_t
   8137 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   8138 {
   8139 	struct wm_softc *sc = rxq->rxq_sc;
   8140 
   8141 	if (sc->sc_type == WM_T_82574)
   8142 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   8143 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8144 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   8145 	else
   8146 		return 0;
   8147 }
   8148 #endif /* WM_DEBUG */
   8149 
   8150 static inline bool
   8151 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   8152     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8153 {
   8154 
   8155 	if (sc->sc_type == WM_T_82574)
   8156 		return (status & ext_bit) != 0;
   8157 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8158 		return (status & nq_bit) != 0;
   8159 	else
   8160 		return (status & legacy_bit) != 0;
   8161 }
   8162 
   8163 static inline bool
   8164 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   8165     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8166 {
   8167 
   8168 	if (sc->sc_type == WM_T_82574)
   8169 		return (error & ext_bit) != 0;
   8170 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8171 		return (error & nq_bit) != 0;
   8172 	else
   8173 		return (error & legacy_bit) != 0;
   8174 }
   8175 
   8176 static inline bool
   8177 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   8178 {
   8179 
   8180 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8181 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   8182 		return true;
   8183 	else
   8184 		return false;
   8185 }
   8186 
   8187 static inline bool
   8188 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   8189 {
   8190 	struct wm_softc *sc = rxq->rxq_sc;
   8191 
   8192 	/* XXXX missing error bit for newqueue? */
   8193 	if (wm_rxdesc_is_set_error(sc, errors,
   8194 		WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE,
   8195 		EXTRXC_ERROR_CE|EXTRXC_ERROR_SE|EXTRXC_ERROR_SEQ|EXTRXC_ERROR_CXE|EXTRXC_ERROR_RXE,
   8196 		NQRXC_ERROR_RXE)) {
   8197 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE, EXTRXC_ERROR_SE, 0))
   8198 			log(LOG_WARNING, "%s: symbol error\n",
   8199 			    device_xname(sc->sc_dev));
   8200 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ, EXTRXC_ERROR_SEQ, 0))
   8201 			log(LOG_WARNING, "%s: receive sequence error\n",
   8202 			    device_xname(sc->sc_dev));
   8203 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE, EXTRXC_ERROR_CE, 0))
   8204 			log(LOG_WARNING, "%s: CRC error\n",
   8205 			    device_xname(sc->sc_dev));
   8206 		return true;
   8207 	}
   8208 
   8209 	return false;
   8210 }
   8211 
   8212 static inline bool
   8213 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8214 {
   8215 	struct wm_softc *sc = rxq->rxq_sc;
   8216 
   8217 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8218 		NQRXC_STATUS_DD)) {
   8219 		/* We have processed all of the receive descriptors. */
   8220 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8221 		return false;
   8222 	}
   8223 
   8224 	return true;
   8225 }
   8226 
   8227 static inline bool
   8228 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status, uint16_t vlantag,
   8229     struct mbuf *m)
   8230 {
   8231 	struct ifnet *ifp = &rxq->rxq_sc->sc_ethercom.ec_if;
   8232 
   8233 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8234 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8235 		VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), return false);
   8236 	}
   8237 
   8238 	return true;
   8239 }
   8240 
   8241 static inline void
   8242 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8243     uint32_t errors, struct mbuf *m)
   8244 {
   8245 	struct wm_softc *sc = rxq->rxq_sc;
   8246 
   8247 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8248 		if (wm_rxdesc_is_set_status(sc, status,
   8249 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8250 			WM_Q_EVCNT_INCR(rxq, rxipsum);
   8251 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8252 			if (wm_rxdesc_is_set_error(sc, errors,
   8253 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8254 				m->m_pkthdr.csum_flags |=
   8255 					M_CSUM_IPv4_BAD;
   8256 		}
   8257 		if (wm_rxdesc_is_set_status(sc, status,
   8258 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8259 			/*
   8260 			 * Note: we don't know if this was TCP or UDP,
   8261 			 * so we just set both bits, and expect the
   8262 			 * upper layers to deal.
   8263 			 */
   8264 			WM_Q_EVCNT_INCR(rxq, rxtusum);
   8265 			m->m_pkthdr.csum_flags |=
   8266 				M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8267 				M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8268 			if (wm_rxdesc_is_set_error(sc, errors,
   8269 				WRX_ER_TCPE, EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8270 				m->m_pkthdr.csum_flags |=
   8271 					M_CSUM_TCP_UDP_BAD;
   8272 		}
   8273 	}
   8274 }
   8275 
   8276 /*
   8277  * wm_rxeof:
   8278  *
   8279  *	Helper; handle receive interrupts.
   8280  */
   8281 static void
   8282 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8283 {
   8284 	struct wm_softc *sc = rxq->rxq_sc;
   8285 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8286 	struct wm_rxsoft *rxs;
   8287 	struct mbuf *m;
   8288 	int i, len;
   8289 	int count = 0;
   8290 	uint32_t status, errors;
   8291 	uint16_t vlantag;
   8292 
   8293 	KASSERT(mutex_owned(rxq->rxq_lock));
   8294 
   8295 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8296 		if (limit-- == 0) {
   8297 			rxq->rxq_ptr = i;
   8298 			break;
   8299 		}
   8300 
   8301 		rxs = &rxq->rxq_soft[i];
   8302 
   8303 		DPRINTF(WM_DEBUG_RX,
   8304 		    ("%s: RX: checking descriptor %d\n",
   8305 		    device_xname(sc->sc_dev), i));
   8306 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   8307 
   8308 		status = wm_rxdesc_get_status(rxq, i);
   8309 		errors = wm_rxdesc_get_errors(rxq, i);
   8310 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8311 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8312 #ifdef WM_DEBUG
   8313 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8314 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8315 #endif
   8316 
   8317 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8318 			/*
   8319 			 * Update the receive pointer holding rxq_lock
   8320 			 * consistent with increment counter.
   8321 			 */
   8322 			rxq->rxq_ptr = i;
   8323 			break;
   8324 		}
   8325 
   8326 		count++;
   8327 		if (__predict_false(rxq->rxq_discard)) {
   8328 			DPRINTF(WM_DEBUG_RX,
   8329 			    ("%s: RX: discarding contents of descriptor %d\n",
   8330 			    device_xname(sc->sc_dev), i));
   8331 			wm_init_rxdesc(rxq, i);
   8332 			if (wm_rxdesc_is_eop(rxq, status)) {
   8333 				/* Reset our state. */
   8334 				DPRINTF(WM_DEBUG_RX,
   8335 				    ("%s: RX: resetting rxdiscard -> 0\n",
   8336 				    device_xname(sc->sc_dev)));
   8337 				rxq->rxq_discard = 0;
   8338 			}
   8339 			continue;
   8340 		}
   8341 
   8342 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8343 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   8344 
   8345 		m = rxs->rxs_mbuf;
   8346 
   8347 		/*
   8348 		 * Add a new receive buffer to the ring, unless of
   8349 		 * course the length is zero. Treat the latter as a
   8350 		 * failed mapping.
   8351 		 */
   8352 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   8353 			/*
   8354 			 * Failed, throw away what we've done so
   8355 			 * far, and discard the rest of the packet.
   8356 			 */
   8357 			ifp->if_ierrors++;
   8358 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8359 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   8360 			wm_init_rxdesc(rxq, i);
   8361 			if (!wm_rxdesc_is_eop(rxq, status))
   8362 				rxq->rxq_discard = 1;
   8363 			if (rxq->rxq_head != NULL)
   8364 				m_freem(rxq->rxq_head);
   8365 			WM_RXCHAIN_RESET(rxq);
   8366 			DPRINTF(WM_DEBUG_RX,
   8367 			    ("%s: RX: Rx buffer allocation failed, "
   8368 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8369 			    rxq->rxq_discard ? " (discard)" : ""));
   8370 			continue;
   8371 		}
   8372 
   8373 		m->m_len = len;
   8374 		rxq->rxq_len += len;
   8375 		DPRINTF(WM_DEBUG_RX,
   8376 		    ("%s: RX: buffer at %p len %d\n",
   8377 		    device_xname(sc->sc_dev), m->m_data, len));
   8378 
   8379 		/* If this is not the end of the packet, keep looking. */
   8380 		if (!wm_rxdesc_is_eop(rxq, status)) {
   8381 			WM_RXCHAIN_LINK(rxq, m);
   8382 			DPRINTF(WM_DEBUG_RX,
   8383 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   8384 			    device_xname(sc->sc_dev), rxq->rxq_len));
   8385 			continue;
   8386 		}
   8387 
   8388 		/*
   8389 		 * Okay, we have the entire packet now.  The chip is
   8390 		 * configured to include the FCS except I350 and I21[01]
   8391 		 * (not all chips can be configured to strip it),
   8392 		 * so we need to trim it.
   8393 		 * May need to adjust length of previous mbuf in the
   8394 		 * chain if the current mbuf is too short.
   8395 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   8396 		 * is always set in I350, so we don't trim it.
   8397 		 */
   8398 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   8399 		    && (sc->sc_type != WM_T_I210)
   8400 		    && (sc->sc_type != WM_T_I211)) {
   8401 			if (m->m_len < ETHER_CRC_LEN) {
   8402 				rxq->rxq_tail->m_len
   8403 				    -= (ETHER_CRC_LEN - m->m_len);
   8404 				m->m_len = 0;
   8405 			} else
   8406 				m->m_len -= ETHER_CRC_LEN;
   8407 			len = rxq->rxq_len - ETHER_CRC_LEN;
   8408 		} else
   8409 			len = rxq->rxq_len;
   8410 
   8411 		WM_RXCHAIN_LINK(rxq, m);
   8412 
   8413 		*rxq->rxq_tailp = NULL;
   8414 		m = rxq->rxq_head;
   8415 
   8416 		WM_RXCHAIN_RESET(rxq);
   8417 
   8418 		DPRINTF(WM_DEBUG_RX,
   8419 		    ("%s: RX: have entire packet, len -> %d\n",
   8420 		    device_xname(sc->sc_dev), len));
   8421 
   8422 		/* If an error occurred, update stats and drop the packet. */
   8423 		if (wm_rxdesc_has_errors(rxq, errors)) {
   8424 			m_freem(m);
   8425 			continue;
   8426 		}
   8427 
   8428 		/* No errors.  Receive the packet. */
   8429 		m_set_rcvif(m, ifp);
   8430 		m->m_pkthdr.len = len;
   8431 		/*
   8432 		 * TODO
   8433 		 * should be save rsshash and rsstype to this mbuf.
   8434 		 */
   8435 		DPRINTF(WM_DEBUG_RX,
   8436 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   8437 			device_xname(sc->sc_dev), rsstype, rsshash));
   8438 
   8439 		/*
   8440 		 * If VLANs are enabled, VLAN packets have been unwrapped
   8441 		 * for us.  Associate the tag with the packet.
   8442 		 */
   8443 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   8444 			continue;
   8445 
   8446 		/* Set up checksum info for this packet. */
   8447 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   8448 		/*
   8449 		 * Update the receive pointer holding rxq_lock consistent with
   8450 		 * increment counter.
   8451 		 */
   8452 		rxq->rxq_ptr = i;
   8453 		rxq->rxq_packets++;
   8454 		rxq->rxq_bytes += len;
   8455 		mutex_exit(rxq->rxq_lock);
   8456 
   8457 		/* Pass it on. */
   8458 		if_percpuq_enqueue(sc->sc_ipq, m);
   8459 
   8460 		mutex_enter(rxq->rxq_lock);
   8461 
   8462 		if (rxq->rxq_stopping)
   8463 			break;
   8464 	}
   8465 
   8466 	if (count != 0)
   8467 		rnd_add_uint32(&sc->rnd_source, count);
   8468 
   8469 	DPRINTF(WM_DEBUG_RX,
   8470 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8471 }
   8472 
   8473 /*
   8474  * wm_linkintr_gmii:
   8475  *
   8476  *	Helper; handle link interrupts for GMII.
   8477  */
   8478 static void
   8479 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8480 {
   8481 
   8482 	KASSERT(WM_CORE_LOCKED(sc));
   8483 
   8484 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8485 		__func__));
   8486 
   8487 	if (icr & ICR_LSC) {
   8488 		uint32_t reg;
   8489 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   8490 
   8491 		if ((status & STATUS_LU) != 0) {
   8492 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8493 				device_xname(sc->sc_dev),
   8494 				(status & STATUS_FD) ? "FDX" : "HDX"));
   8495 		} else {
   8496 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8497 				device_xname(sc->sc_dev)));
   8498 		}
   8499 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   8500 			wm_gig_downshift_workaround_ich8lan(sc);
   8501 
   8502 		if ((sc->sc_type == WM_T_ICH8)
   8503 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   8504 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   8505 		}
   8506 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   8507 			device_xname(sc->sc_dev)));
   8508 		mii_pollstat(&sc->sc_mii);
   8509 		if (sc->sc_type == WM_T_82543) {
   8510 			int miistatus, active;
   8511 
   8512 			/*
   8513 			 * With 82543, we need to force speed and
   8514 			 * duplex on the MAC equal to what the PHY
   8515 			 * speed and duplex configuration is.
   8516 			 */
   8517 			miistatus = sc->sc_mii.mii_media_status;
   8518 
   8519 			if (miistatus & IFM_ACTIVE) {
   8520 				active = sc->sc_mii.mii_media_active;
   8521 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8522 				switch (IFM_SUBTYPE(active)) {
   8523 				case IFM_10_T:
   8524 					sc->sc_ctrl |= CTRL_SPEED_10;
   8525 					break;
   8526 				case IFM_100_TX:
   8527 					sc->sc_ctrl |= CTRL_SPEED_100;
   8528 					break;
   8529 				case IFM_1000_T:
   8530 					sc->sc_ctrl |= CTRL_SPEED_1000;
   8531 					break;
   8532 				default:
   8533 					/*
   8534 					 * fiber?
   8535 					 * Shoud not enter here.
   8536 					 */
   8537 					printf("unknown media (%x)\n", active);
   8538 					break;
   8539 				}
   8540 				if (active & IFM_FDX)
   8541 					sc->sc_ctrl |= CTRL_FD;
   8542 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8543 			}
   8544 		} else if (sc->sc_type == WM_T_PCH) {
   8545 			wm_k1_gig_workaround_hv(sc,
   8546 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   8547 		}
   8548 
   8549 		if ((sc->sc_phytype == WMPHY_82578)
   8550 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   8551 			== IFM_1000_T)) {
   8552 
   8553 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   8554 				delay(200*1000); /* XXX too big */
   8555 
   8556 				/* Link stall fix for link up */
   8557 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8558 				    HV_MUX_DATA_CTRL,
   8559 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   8560 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   8561 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8562 				    HV_MUX_DATA_CTRL,
   8563 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   8564 			}
   8565 		}
   8566 		/*
   8567 		 * I217 Packet Loss issue:
   8568 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
   8569 		 * on power up.
   8570 		 * Set the Beacon Duration for I217 to 8 usec
   8571 		 */
   8572 		if ((sc->sc_type == WM_T_PCH_LPT)
   8573 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8574 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   8575 			reg &= ~FEXTNVM4_BEACON_DURATION;
   8576 			reg |= FEXTNVM4_BEACON_DURATION_8US;
   8577 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   8578 		}
   8579 
   8580 		/* XXX Work-around I218 hang issue */
   8581 		/* e1000_k1_workaround_lpt_lp() */
   8582 
   8583 		if ((sc->sc_type == WM_T_PCH_LPT)
   8584 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8585 			/*
   8586 			 * Set platform power management values for Latency
   8587 			 * Tolerance Reporting (LTR)
   8588 			 */
   8589 			wm_platform_pm_pch_lpt(sc,
   8590 				((sc->sc_mii.mii_media_status & IFM_ACTIVE)
   8591 				    != 0));
   8592 		}
   8593 
   8594 		/* FEXTNVM6 K1-off workaround */
   8595 		if (sc->sc_type == WM_T_PCH_SPT) {
   8596 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
   8597 			if (CSR_READ(sc, WMREG_PCIEANACFG)
   8598 			    & FEXTNVM6_K1_OFF_ENABLE)
   8599 				reg |= FEXTNVM6_K1_OFF_ENABLE;
   8600 			else
   8601 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   8602 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   8603 		}
   8604 	} else if (icr & ICR_RXSEQ) {
   8605 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   8606 			device_xname(sc->sc_dev)));
   8607 	}
   8608 }
   8609 
   8610 /*
   8611  * wm_linkintr_tbi:
   8612  *
   8613  *	Helper; handle link interrupts for TBI mode.
   8614  */
   8615 static void
   8616 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   8617 {
   8618 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8619 	uint32_t status;
   8620 
   8621 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8622 		__func__));
   8623 
   8624 	status = CSR_READ(sc, WMREG_STATUS);
   8625 	if (icr & ICR_LSC) {
   8626 		if (status & STATUS_LU) {
   8627 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8628 			    device_xname(sc->sc_dev),
   8629 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   8630 			/*
   8631 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   8632 			 * so we should update sc->sc_ctrl
   8633 			 */
   8634 
   8635 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   8636 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8637 			sc->sc_fcrtl &= ~FCRTL_XONE;
   8638 			if (status & STATUS_FD)
   8639 				sc->sc_tctl |=
   8640 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8641 			else
   8642 				sc->sc_tctl |=
   8643 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8644 			if (sc->sc_ctrl & CTRL_TFCE)
   8645 				sc->sc_fcrtl |= FCRTL_XONE;
   8646 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8647 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   8648 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   8649 				      sc->sc_fcrtl);
   8650 			sc->sc_tbi_linkup = 1;
   8651 			if_link_state_change(ifp, LINK_STATE_UP);
   8652 		} else {
   8653 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8654 			    device_xname(sc->sc_dev)));
   8655 			sc->sc_tbi_linkup = 0;
   8656 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8657 		}
   8658 		/* Update LED */
   8659 		wm_tbi_serdes_set_linkled(sc);
   8660 	} else if (icr & ICR_RXSEQ) {
   8661 		DPRINTF(WM_DEBUG_LINK,
   8662 		    ("%s: LINK: Receive sequence error\n",
   8663 		    device_xname(sc->sc_dev)));
   8664 	}
   8665 }
   8666 
   8667 /*
   8668  * wm_linkintr_serdes:
   8669  *
   8670  *	Helper; handle link interrupts for TBI mode.
   8671  */
   8672 static void
   8673 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   8674 {
   8675 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8676 	struct mii_data *mii = &sc->sc_mii;
   8677 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8678 	uint32_t pcs_adv, pcs_lpab, reg;
   8679 
   8680 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8681 		__func__));
   8682 
   8683 	if (icr & ICR_LSC) {
   8684 		/* Check PCS */
   8685 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8686 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   8687 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   8688 				device_xname(sc->sc_dev)));
   8689 			mii->mii_media_status |= IFM_ACTIVE;
   8690 			sc->sc_tbi_linkup = 1;
   8691 			if_link_state_change(ifp, LINK_STATE_UP);
   8692 		} else {
   8693 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8694 				device_xname(sc->sc_dev)));
   8695 			mii->mii_media_status |= IFM_NONE;
   8696 			sc->sc_tbi_linkup = 0;
   8697 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8698 			wm_tbi_serdes_set_linkled(sc);
   8699 			return;
   8700 		}
   8701 		mii->mii_media_active |= IFM_1000_SX;
   8702 		if ((reg & PCS_LSTS_FDX) != 0)
   8703 			mii->mii_media_active |= IFM_FDX;
   8704 		else
   8705 			mii->mii_media_active |= IFM_HDX;
   8706 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   8707 			/* Check flow */
   8708 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8709 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   8710 				DPRINTF(WM_DEBUG_LINK,
   8711 				    ("XXX LINKOK but not ACOMP\n"));
   8712 				return;
   8713 			}
   8714 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   8715 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   8716 			DPRINTF(WM_DEBUG_LINK,
   8717 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   8718 			if ((pcs_adv & TXCW_SYM_PAUSE)
   8719 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   8720 				mii->mii_media_active |= IFM_FLOW
   8721 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   8722 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   8723 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8724 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   8725 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8726 				mii->mii_media_active |= IFM_FLOW
   8727 				    | IFM_ETH_TXPAUSE;
   8728 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   8729 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8730 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   8731 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8732 				mii->mii_media_active |= IFM_FLOW
   8733 				    | IFM_ETH_RXPAUSE;
   8734 		}
   8735 		/* Update LED */
   8736 		wm_tbi_serdes_set_linkled(sc);
   8737 	} else {
   8738 		DPRINTF(WM_DEBUG_LINK,
   8739 		    ("%s: LINK: Receive sequence error\n",
   8740 		    device_xname(sc->sc_dev)));
   8741 	}
   8742 }
   8743 
   8744 /*
   8745  * wm_linkintr:
   8746  *
   8747  *	Helper; handle link interrupts.
   8748  */
   8749 static void
   8750 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   8751 {
   8752 
   8753 	KASSERT(WM_CORE_LOCKED(sc));
   8754 
   8755 	if (sc->sc_flags & WM_F_HAS_MII)
   8756 		wm_linkintr_gmii(sc, icr);
   8757 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   8758 	    && (sc->sc_type >= WM_T_82575))
   8759 		wm_linkintr_serdes(sc, icr);
   8760 	else
   8761 		wm_linkintr_tbi(sc, icr);
   8762 }
   8763 
   8764 /*
   8765  * wm_intr_legacy:
   8766  *
   8767  *	Interrupt service routine for INTx and MSI.
   8768  */
   8769 static int
   8770 wm_intr_legacy(void *arg)
   8771 {
   8772 	struct wm_softc *sc = arg;
   8773 	struct wm_queue *wmq = &sc->sc_queue[0];
   8774 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8775 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8776 	uint32_t icr, rndval = 0;
   8777 	int handled = 0;
   8778 
   8779 	while (1 /* CONSTCOND */) {
   8780 		icr = CSR_READ(sc, WMREG_ICR);
   8781 		if ((icr & sc->sc_icr) == 0)
   8782 			break;
   8783 		if (handled == 0) {
   8784 			DPRINTF(WM_DEBUG_TX,
   8785 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   8786 		}
   8787 		if (rndval == 0)
   8788 			rndval = icr;
   8789 
   8790 		mutex_enter(rxq->rxq_lock);
   8791 
   8792 		if (rxq->rxq_stopping) {
   8793 			mutex_exit(rxq->rxq_lock);
   8794 			break;
   8795 		}
   8796 
   8797 		handled = 1;
   8798 
   8799 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8800 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   8801 			DPRINTF(WM_DEBUG_RX,
   8802 			    ("%s: RX: got Rx intr 0x%08x\n",
   8803 			    device_xname(sc->sc_dev),
   8804 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
   8805 			WM_Q_EVCNT_INCR(rxq, rxintr);
   8806 		}
   8807 #endif
   8808 		wm_rxeof(rxq, UINT_MAX);
   8809 
   8810 		mutex_exit(rxq->rxq_lock);
   8811 		mutex_enter(txq->txq_lock);
   8812 
   8813 		if (txq->txq_stopping) {
   8814 			mutex_exit(txq->txq_lock);
   8815 			break;
   8816 		}
   8817 
   8818 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8819 		if (icr & ICR_TXDW) {
   8820 			DPRINTF(WM_DEBUG_TX,
   8821 			    ("%s: TX: got TXDW interrupt\n",
   8822 			    device_xname(sc->sc_dev)));
   8823 			WM_Q_EVCNT_INCR(txq, txdw);
   8824 		}
   8825 #endif
   8826 		wm_txeof(sc, txq);
   8827 
   8828 		mutex_exit(txq->txq_lock);
   8829 		WM_CORE_LOCK(sc);
   8830 
   8831 		if (sc->sc_core_stopping) {
   8832 			WM_CORE_UNLOCK(sc);
   8833 			break;
   8834 		}
   8835 
   8836 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   8837 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8838 			wm_linkintr(sc, icr);
   8839 		}
   8840 
   8841 		WM_CORE_UNLOCK(sc);
   8842 
   8843 		if (icr & ICR_RXO) {
   8844 #if defined(WM_DEBUG)
   8845 			log(LOG_WARNING, "%s: Receive overrun\n",
   8846 			    device_xname(sc->sc_dev));
   8847 #endif /* defined(WM_DEBUG) */
   8848 		}
   8849 	}
   8850 
   8851 	rnd_add_uint32(&sc->rnd_source, rndval);
   8852 
   8853 	if (handled) {
   8854 		/* Try to get more packets going. */
   8855 		softint_schedule(wmq->wmq_si);
   8856 	}
   8857 
   8858 	return handled;
   8859 }
   8860 
   8861 static inline void
   8862 wm_txrxintr_disable(struct wm_queue *wmq)
   8863 {
   8864 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8865 
   8866 	if (sc->sc_type == WM_T_82574)
   8867 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8868 	else if (sc->sc_type == WM_T_82575)
   8869 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8870 	else
   8871 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   8872 }
   8873 
   8874 static inline void
   8875 wm_txrxintr_enable(struct wm_queue *wmq)
   8876 {
   8877 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8878 
   8879 	wm_itrs_calculate(sc, wmq);
   8880 
   8881 	if (sc->sc_type == WM_T_82574)
   8882 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8883 	else if (sc->sc_type == WM_T_82575)
   8884 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8885 	else
   8886 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   8887 }
   8888 
   8889 static int
   8890 wm_txrxintr_msix(void *arg)
   8891 {
   8892 	struct wm_queue *wmq = arg;
   8893 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8894 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8895 	struct wm_softc *sc = txq->txq_sc;
   8896 	u_int limit = sc->sc_rx_intr_process_limit;
   8897 
   8898 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   8899 
   8900 	DPRINTF(WM_DEBUG_TX,
   8901 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   8902 
   8903 	wm_txrxintr_disable(wmq);
   8904 
   8905 	mutex_enter(txq->txq_lock);
   8906 
   8907 	if (txq->txq_stopping) {
   8908 		mutex_exit(txq->txq_lock);
   8909 		return 0;
   8910 	}
   8911 
   8912 	WM_Q_EVCNT_INCR(txq, txdw);
   8913 	wm_txeof(sc, txq);
   8914 	/* wm_deferred start() is done in wm_handle_queue(). */
   8915 	mutex_exit(txq->txq_lock);
   8916 
   8917 	DPRINTF(WM_DEBUG_RX,
   8918 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   8919 	mutex_enter(rxq->rxq_lock);
   8920 
   8921 	if (rxq->rxq_stopping) {
   8922 		mutex_exit(rxq->rxq_lock);
   8923 		return 0;
   8924 	}
   8925 
   8926 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8927 	wm_rxeof(rxq, limit);
   8928 	mutex_exit(rxq->rxq_lock);
   8929 
   8930 	wm_itrs_writereg(sc, wmq);
   8931 
   8932 	softint_schedule(wmq->wmq_si);
   8933 
   8934 	return 1;
   8935 }
   8936 
   8937 static void
   8938 wm_handle_queue(void *arg)
   8939 {
   8940 	struct wm_queue *wmq = arg;
   8941 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8942 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8943 	struct wm_softc *sc = txq->txq_sc;
   8944 	u_int limit = sc->sc_rx_process_limit;
   8945 
   8946 	mutex_enter(txq->txq_lock);
   8947 	if (txq->txq_stopping) {
   8948 		mutex_exit(txq->txq_lock);
   8949 		return;
   8950 	}
   8951 	wm_txeof(sc, txq);
   8952 	wm_deferred_start_locked(txq);
   8953 	mutex_exit(txq->txq_lock);
   8954 
   8955 	mutex_enter(rxq->rxq_lock);
   8956 	if (rxq->rxq_stopping) {
   8957 		mutex_exit(rxq->rxq_lock);
   8958 		return;
   8959 	}
   8960 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8961 	wm_rxeof(rxq, limit);
   8962 	mutex_exit(rxq->rxq_lock);
   8963 
   8964 	wm_txrxintr_enable(wmq);
   8965 }
   8966 
   8967 /*
   8968  * wm_linkintr_msix:
   8969  *
   8970  *	Interrupt service routine for link status change for MSI-X.
   8971  */
   8972 static int
   8973 wm_linkintr_msix(void *arg)
   8974 {
   8975 	struct wm_softc *sc = arg;
   8976 	uint32_t reg;
   8977 
   8978 	DPRINTF(WM_DEBUG_LINK,
   8979 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   8980 
   8981 	reg = CSR_READ(sc, WMREG_ICR);
   8982 	WM_CORE_LOCK(sc);
   8983 	if ((sc->sc_core_stopping) || ((reg & ICR_LSC) == 0))
   8984 		goto out;
   8985 
   8986 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8987 	wm_linkintr(sc, ICR_LSC);
   8988 
   8989 out:
   8990 	WM_CORE_UNLOCK(sc);
   8991 
   8992 	if (sc->sc_type == WM_T_82574)
   8993 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   8994 	else if (sc->sc_type == WM_T_82575)
   8995 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   8996 	else
   8997 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   8998 
   8999 	return 1;
   9000 }
   9001 
   9002 /*
   9003  * Media related.
   9004  * GMII, SGMII, TBI (and SERDES)
   9005  */
   9006 
   9007 /* Common */
   9008 
   9009 /*
   9010  * wm_tbi_serdes_set_linkled:
   9011  *
   9012  *	Update the link LED on TBI and SERDES devices.
   9013  */
   9014 static void
   9015 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   9016 {
   9017 
   9018 	if (sc->sc_tbi_linkup)
   9019 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   9020 	else
   9021 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   9022 
   9023 	/* 82540 or newer devices are active low */
   9024 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   9025 
   9026 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9027 }
   9028 
   9029 /* GMII related */
   9030 
   9031 /*
   9032  * wm_gmii_reset:
   9033  *
   9034  *	Reset the PHY.
   9035  */
   9036 static void
   9037 wm_gmii_reset(struct wm_softc *sc)
   9038 {
   9039 	uint32_t reg;
   9040 	int rv;
   9041 
   9042 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9043 		device_xname(sc->sc_dev), __func__));
   9044 
   9045 	rv = sc->phy.acquire(sc);
   9046 	if (rv != 0) {
   9047 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9048 		    __func__);
   9049 		return;
   9050 	}
   9051 
   9052 	switch (sc->sc_type) {
   9053 	case WM_T_82542_2_0:
   9054 	case WM_T_82542_2_1:
   9055 		/* null */
   9056 		break;
   9057 	case WM_T_82543:
   9058 		/*
   9059 		 * With 82543, we need to force speed and duplex on the MAC
   9060 		 * equal to what the PHY speed and duplex configuration is.
   9061 		 * In addition, we need to perform a hardware reset on the PHY
   9062 		 * to take it out of reset.
   9063 		 */
   9064 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9065 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9066 
   9067 		/* The PHY reset pin is active-low. */
   9068 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9069 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   9070 		    CTRL_EXT_SWDPIN(4));
   9071 		reg |= CTRL_EXT_SWDPIO(4);
   9072 
   9073 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9074 		CSR_WRITE_FLUSH(sc);
   9075 		delay(10*1000);
   9076 
   9077 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   9078 		CSR_WRITE_FLUSH(sc);
   9079 		delay(150);
   9080 #if 0
   9081 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   9082 #endif
   9083 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   9084 		break;
   9085 	case WM_T_82544:	/* reset 10000us */
   9086 	case WM_T_82540:
   9087 	case WM_T_82545:
   9088 	case WM_T_82545_3:
   9089 	case WM_T_82546:
   9090 	case WM_T_82546_3:
   9091 	case WM_T_82541:
   9092 	case WM_T_82541_2:
   9093 	case WM_T_82547:
   9094 	case WM_T_82547_2:
   9095 	case WM_T_82571:	/* reset 100us */
   9096 	case WM_T_82572:
   9097 	case WM_T_82573:
   9098 	case WM_T_82574:
   9099 	case WM_T_82575:
   9100 	case WM_T_82576:
   9101 	case WM_T_82580:
   9102 	case WM_T_I350:
   9103 	case WM_T_I354:
   9104 	case WM_T_I210:
   9105 	case WM_T_I211:
   9106 	case WM_T_82583:
   9107 	case WM_T_80003:
   9108 		/* generic reset */
   9109 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9110 		CSR_WRITE_FLUSH(sc);
   9111 		delay(20000);
   9112 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9113 		CSR_WRITE_FLUSH(sc);
   9114 		delay(20000);
   9115 
   9116 		if ((sc->sc_type == WM_T_82541)
   9117 		    || (sc->sc_type == WM_T_82541_2)
   9118 		    || (sc->sc_type == WM_T_82547)
   9119 		    || (sc->sc_type == WM_T_82547_2)) {
   9120 			/* workaround for igp are done in igp_reset() */
   9121 			/* XXX add code to set LED after phy reset */
   9122 		}
   9123 		break;
   9124 	case WM_T_ICH8:
   9125 	case WM_T_ICH9:
   9126 	case WM_T_ICH10:
   9127 	case WM_T_PCH:
   9128 	case WM_T_PCH2:
   9129 	case WM_T_PCH_LPT:
   9130 	case WM_T_PCH_SPT:
   9131 		/* generic reset */
   9132 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9133 		CSR_WRITE_FLUSH(sc);
   9134 		delay(100);
   9135 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9136 		CSR_WRITE_FLUSH(sc);
   9137 		delay(150);
   9138 		break;
   9139 	default:
   9140 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   9141 		    __func__);
   9142 		break;
   9143 	}
   9144 
   9145 	sc->phy.release(sc);
   9146 
   9147 	/* get_cfg_done */
   9148 	wm_get_cfg_done(sc);
   9149 
   9150 	/* extra setup */
   9151 	switch (sc->sc_type) {
   9152 	case WM_T_82542_2_0:
   9153 	case WM_T_82542_2_1:
   9154 	case WM_T_82543:
   9155 	case WM_T_82544:
   9156 	case WM_T_82540:
   9157 	case WM_T_82545:
   9158 	case WM_T_82545_3:
   9159 	case WM_T_82546:
   9160 	case WM_T_82546_3:
   9161 	case WM_T_82541_2:
   9162 	case WM_T_82547_2:
   9163 	case WM_T_82571:
   9164 	case WM_T_82572:
   9165 	case WM_T_82573:
   9166 	case WM_T_82574:
   9167 	case WM_T_82583:
   9168 	case WM_T_82575:
   9169 	case WM_T_82576:
   9170 	case WM_T_82580:
   9171 	case WM_T_I350:
   9172 	case WM_T_I354:
   9173 	case WM_T_I210:
   9174 	case WM_T_I211:
   9175 	case WM_T_80003:
   9176 		/* null */
   9177 		break;
   9178 	case WM_T_82541:
   9179 	case WM_T_82547:
   9180 		/* XXX Configure actively LED after PHY reset */
   9181 		break;
   9182 	case WM_T_ICH8:
   9183 	case WM_T_ICH9:
   9184 	case WM_T_ICH10:
   9185 	case WM_T_PCH:
   9186 	case WM_T_PCH2:
   9187 	case WM_T_PCH_LPT:
   9188 	case WM_T_PCH_SPT:
   9189 		wm_phy_post_reset(sc);
   9190 		break;
   9191 	default:
   9192 		panic("%s: unknown type\n", __func__);
   9193 		break;
   9194 	}
   9195 }
   9196 
   9197 /*
   9198  * Setup sc_phytype and mii_{read|write}reg.
   9199  *
   9200  *  To identify PHY type, correct read/write function should be selected.
   9201  * To select correct read/write function, PCI ID or MAC type are required
   9202  * without accessing PHY registers.
   9203  *
   9204  *  On the first call of this function, PHY ID is not known yet. Check
   9205  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   9206  * result might be incorrect.
   9207  *
   9208  *  In the second call, PHY OUI and model is used to identify PHY type.
   9209  * It might not be perfpect because of the lack of compared entry, but it
   9210  * would be better than the first call.
   9211  *
   9212  *  If the detected new result and previous assumption is different,
   9213  * diagnous message will be printed.
   9214  */
   9215 static void
   9216 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   9217     uint16_t phy_model)
   9218 {
   9219 	device_t dev = sc->sc_dev;
   9220 	struct mii_data *mii = &sc->sc_mii;
   9221 	uint16_t new_phytype = WMPHY_UNKNOWN;
   9222 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   9223 	mii_readreg_t new_readreg;
   9224 	mii_writereg_t new_writereg;
   9225 
   9226 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9227 		device_xname(sc->sc_dev), __func__));
   9228 
   9229 	if (mii->mii_readreg == NULL) {
   9230 		/*
   9231 		 *  This is the first call of this function. For ICH and PCH
   9232 		 * variants, it's difficult to determine the PHY access method
   9233 		 * by sc_type, so use the PCI product ID for some devices.
   9234 		 */
   9235 
   9236 		switch (sc->sc_pcidevid) {
   9237 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   9238 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   9239 			/* 82577 */
   9240 			new_phytype = WMPHY_82577;
   9241 			break;
   9242 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   9243 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   9244 			/* 82578 */
   9245 			new_phytype = WMPHY_82578;
   9246 			break;
   9247 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   9248 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   9249 			/* 82579 */
   9250 			new_phytype = WMPHY_82579;
   9251 			break;
   9252 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   9253 		case PCI_PRODUCT_INTEL_82801I_BM:
   9254 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   9255 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   9256 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   9257 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   9258 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   9259 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   9260 			/* ICH8, 9, 10 with 82567 */
   9261 			new_phytype = WMPHY_BM;
   9262 			break;
   9263 		default:
   9264 			break;
   9265 		}
   9266 	} else {
   9267 		/* It's not the first call. Use PHY OUI and model */
   9268 		switch (phy_oui) {
   9269 		case MII_OUI_ATHEROS: /* XXX ??? */
   9270 			switch (phy_model) {
   9271 			case 0x0004: /* XXX */
   9272 				new_phytype = WMPHY_82578;
   9273 				break;
   9274 			default:
   9275 				break;
   9276 			}
   9277 			break;
   9278 		case MII_OUI_xxMARVELL:
   9279 			switch (phy_model) {
   9280 			case MII_MODEL_xxMARVELL_I210:
   9281 				new_phytype = WMPHY_I210;
   9282 				break;
   9283 			case MII_MODEL_xxMARVELL_E1011:
   9284 			case MII_MODEL_xxMARVELL_E1000_3:
   9285 			case MII_MODEL_xxMARVELL_E1000_5:
   9286 			case MII_MODEL_xxMARVELL_E1112:
   9287 				new_phytype = WMPHY_M88;
   9288 				break;
   9289 			case MII_MODEL_xxMARVELL_E1149:
   9290 				new_phytype = WMPHY_BM;
   9291 				break;
   9292 			case MII_MODEL_xxMARVELL_E1111:
   9293 			case MII_MODEL_xxMARVELL_I347:
   9294 			case MII_MODEL_xxMARVELL_E1512:
   9295 			case MII_MODEL_xxMARVELL_E1340M:
   9296 			case MII_MODEL_xxMARVELL_E1543:
   9297 				new_phytype = WMPHY_M88;
   9298 				break;
   9299 			case MII_MODEL_xxMARVELL_I82563:
   9300 				new_phytype = WMPHY_GG82563;
   9301 				break;
   9302 			default:
   9303 				break;
   9304 			}
   9305 			break;
   9306 		case MII_OUI_INTEL:
   9307 			switch (phy_model) {
   9308 			case MII_MODEL_INTEL_I82577:
   9309 				new_phytype = WMPHY_82577;
   9310 				break;
   9311 			case MII_MODEL_INTEL_I82579:
   9312 				new_phytype = WMPHY_82579;
   9313 				break;
   9314 			case MII_MODEL_INTEL_I217:
   9315 				new_phytype = WMPHY_I217;
   9316 				break;
   9317 			case MII_MODEL_INTEL_I82580:
   9318 			case MII_MODEL_INTEL_I350:
   9319 				new_phytype = WMPHY_82580;
   9320 				break;
   9321 			default:
   9322 				break;
   9323 			}
   9324 			break;
   9325 		case MII_OUI_yyINTEL:
   9326 			switch (phy_model) {
   9327 			case MII_MODEL_yyINTEL_I82562G:
   9328 			case MII_MODEL_yyINTEL_I82562EM:
   9329 			case MII_MODEL_yyINTEL_I82562ET:
   9330 				new_phytype = WMPHY_IFE;
   9331 				break;
   9332 			case MII_MODEL_yyINTEL_IGP01E1000:
   9333 				new_phytype = WMPHY_IGP;
   9334 				break;
   9335 			case MII_MODEL_yyINTEL_I82566:
   9336 				new_phytype = WMPHY_IGP_3;
   9337 				break;
   9338 			default:
   9339 				break;
   9340 			}
   9341 			break;
   9342 		default:
   9343 			break;
   9344 		}
   9345 		if (new_phytype == WMPHY_UNKNOWN)
   9346 			aprint_verbose_dev(dev, "%s: unknown PHY model\n",
   9347 			    __func__);
   9348 
   9349 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9350 		    && (sc->sc_phytype != new_phytype )) {
   9351 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9352 			    "was incorrect. PHY type from PHY ID = %u\n",
   9353 			    sc->sc_phytype, new_phytype);
   9354 		}
   9355 	}
   9356 
   9357 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   9358 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   9359 		/* SGMII */
   9360 		new_readreg = wm_sgmii_readreg;
   9361 		new_writereg = wm_sgmii_writereg;
   9362 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   9363 		/* BM2 (phyaddr == 1) */
   9364 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9365 		    && (new_phytype != WMPHY_BM)
   9366 		    && (new_phytype != WMPHY_UNKNOWN))
   9367 			doubt_phytype = new_phytype;
   9368 		new_phytype = WMPHY_BM;
   9369 		new_readreg = wm_gmii_bm_readreg;
   9370 		new_writereg = wm_gmii_bm_writereg;
   9371 	} else if (sc->sc_type >= WM_T_PCH) {
   9372 		/* All PCH* use _hv_ */
   9373 		new_readreg = wm_gmii_hv_readreg;
   9374 		new_writereg = wm_gmii_hv_writereg;
   9375 	} else if (sc->sc_type >= WM_T_ICH8) {
   9376 		/* non-82567 ICH8, 9 and 10 */
   9377 		new_readreg = wm_gmii_i82544_readreg;
   9378 		new_writereg = wm_gmii_i82544_writereg;
   9379 	} else if (sc->sc_type >= WM_T_80003) {
   9380 		/* 80003 */
   9381 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9382 		    && (new_phytype != WMPHY_GG82563)
   9383 		    && (new_phytype != WMPHY_UNKNOWN))
   9384 			doubt_phytype = new_phytype;
   9385 		new_phytype = WMPHY_GG82563;
   9386 		new_readreg = wm_gmii_i80003_readreg;
   9387 		new_writereg = wm_gmii_i80003_writereg;
   9388 	} else if (sc->sc_type >= WM_T_I210) {
   9389 		/* I210 and I211 */
   9390 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9391 		    && (new_phytype != WMPHY_I210)
   9392 		    && (new_phytype != WMPHY_UNKNOWN))
   9393 			doubt_phytype = new_phytype;
   9394 		new_phytype = WMPHY_I210;
   9395 		new_readreg = wm_gmii_gs40g_readreg;
   9396 		new_writereg = wm_gmii_gs40g_writereg;
   9397 	} else if (sc->sc_type >= WM_T_82580) {
   9398 		/* 82580, I350 and I354 */
   9399 		new_readreg = wm_gmii_82580_readreg;
   9400 		new_writereg = wm_gmii_82580_writereg;
   9401 	} else if (sc->sc_type >= WM_T_82544) {
   9402 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   9403 		new_readreg = wm_gmii_i82544_readreg;
   9404 		new_writereg = wm_gmii_i82544_writereg;
   9405 	} else {
   9406 		new_readreg = wm_gmii_i82543_readreg;
   9407 		new_writereg = wm_gmii_i82543_writereg;
   9408 	}
   9409 
   9410 	if (new_phytype == WMPHY_BM) {
   9411 		/* All BM use _bm_ */
   9412 		new_readreg = wm_gmii_bm_readreg;
   9413 		new_writereg = wm_gmii_bm_writereg;
   9414 	}
   9415 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
   9416 		/* All PCH* use _hv_ */
   9417 		new_readreg = wm_gmii_hv_readreg;
   9418 		new_writereg = wm_gmii_hv_writereg;
   9419 	}
   9420 
   9421 	/* Diag output */
   9422 	if (doubt_phytype != WMPHY_UNKNOWN)
   9423 		aprint_error_dev(dev, "Assumed new PHY type was "
   9424 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   9425 		    new_phytype);
   9426 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9427 	    && (sc->sc_phytype != new_phytype ))
   9428 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9429 		    "was incorrect. New PHY type = %u\n",
   9430 		    sc->sc_phytype, new_phytype);
   9431 
   9432 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   9433 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   9434 
   9435 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   9436 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   9437 		    "function was incorrect.\n");
   9438 
   9439 	/* Update now */
   9440 	sc->sc_phytype = new_phytype;
   9441 	mii->mii_readreg = new_readreg;
   9442 	mii->mii_writereg = new_writereg;
   9443 }
   9444 
   9445 /*
   9446  * wm_get_phy_id_82575:
   9447  *
   9448  * Return PHY ID. Return -1 if it failed.
   9449  */
   9450 static int
   9451 wm_get_phy_id_82575(struct wm_softc *sc)
   9452 {
   9453 	uint32_t reg;
   9454 	int phyid = -1;
   9455 
   9456 	/* XXX */
   9457 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   9458 		return -1;
   9459 
   9460 	if (wm_sgmii_uses_mdio(sc)) {
   9461 		switch (sc->sc_type) {
   9462 		case WM_T_82575:
   9463 		case WM_T_82576:
   9464 			reg = CSR_READ(sc, WMREG_MDIC);
   9465 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   9466 			break;
   9467 		case WM_T_82580:
   9468 		case WM_T_I350:
   9469 		case WM_T_I354:
   9470 		case WM_T_I210:
   9471 		case WM_T_I211:
   9472 			reg = CSR_READ(sc, WMREG_MDICNFG);
   9473 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   9474 			break;
   9475 		default:
   9476 			return -1;
   9477 		}
   9478 	}
   9479 
   9480 	return phyid;
   9481 }
   9482 
   9483 
   9484 /*
   9485  * wm_gmii_mediainit:
   9486  *
   9487  *	Initialize media for use on 1000BASE-T devices.
   9488  */
   9489 static void
   9490 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   9491 {
   9492 	device_t dev = sc->sc_dev;
   9493 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9494 	struct mii_data *mii = &sc->sc_mii;
   9495 	uint32_t reg;
   9496 
   9497 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9498 		device_xname(sc->sc_dev), __func__));
   9499 
   9500 	/* We have GMII. */
   9501 	sc->sc_flags |= WM_F_HAS_MII;
   9502 
   9503 	if (sc->sc_type == WM_T_80003)
   9504 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9505 	else
   9506 		sc->sc_tipg = TIPG_1000T_DFLT;
   9507 
   9508 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   9509 	if ((sc->sc_type == WM_T_82580)
   9510 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   9511 	    || (sc->sc_type == WM_T_I211)) {
   9512 		reg = CSR_READ(sc, WMREG_PHPM);
   9513 		reg &= ~PHPM_GO_LINK_D;
   9514 		CSR_WRITE(sc, WMREG_PHPM, reg);
   9515 	}
   9516 
   9517 	/*
   9518 	 * Let the chip set speed/duplex on its own based on
   9519 	 * signals from the PHY.
   9520 	 * XXXbouyer - I'm not sure this is right for the 80003,
   9521 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   9522 	 */
   9523 	sc->sc_ctrl |= CTRL_SLU;
   9524 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9525 
   9526 	/* Initialize our media structures and probe the GMII. */
   9527 	mii->mii_ifp = ifp;
   9528 
   9529 	mii->mii_statchg = wm_gmii_statchg;
   9530 
   9531 	/* get PHY control from SMBus to PCIe */
   9532 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   9533 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
   9534 		wm_smbustopci(sc);
   9535 
   9536 	wm_gmii_reset(sc);
   9537 
   9538 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9539 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   9540 	    wm_gmii_mediastatus);
   9541 
   9542 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   9543 	    || (sc->sc_type == WM_T_82580)
   9544 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   9545 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   9546 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   9547 			/* Attach only one port */
   9548 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   9549 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9550 		} else {
   9551 			int i, id;
   9552 			uint32_t ctrl_ext;
   9553 
   9554 			id = wm_get_phy_id_82575(sc);
   9555 			if (id != -1) {
   9556 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   9557 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   9558 			}
   9559 			if ((id == -1)
   9560 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9561 				/* Power on sgmii phy if it is disabled */
   9562 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9563 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   9564 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   9565 				CSR_WRITE_FLUSH(sc);
   9566 				delay(300*1000); /* XXX too long */
   9567 
   9568 				/* from 1 to 8 */
   9569 				for (i = 1; i < 8; i++)
   9570 					mii_attach(sc->sc_dev, &sc->sc_mii,
   9571 					    0xffffffff, i, MII_OFFSET_ANY,
   9572 					    MIIF_DOPAUSE);
   9573 
   9574 				/* restore previous sfp cage power state */
   9575 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9576 			}
   9577 		}
   9578 	} else {
   9579 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9580 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9581 	}
   9582 
   9583 	/*
   9584 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   9585 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   9586 	 */
   9587 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   9588 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9589 		wm_set_mdio_slow_mode_hv(sc);
   9590 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9591 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9592 	}
   9593 
   9594 	/*
   9595 	 * (For ICH8 variants)
   9596 	 * If PHY detection failed, use BM's r/w function and retry.
   9597 	 */
   9598 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9599 		/* if failed, retry with *_bm_* */
   9600 		aprint_verbose_dev(dev, "Assumed PHY access function "
   9601 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   9602 		    sc->sc_phytype);
   9603 		sc->sc_phytype = WMPHY_BM;
   9604 		mii->mii_readreg = wm_gmii_bm_readreg;
   9605 		mii->mii_writereg = wm_gmii_bm_writereg;
   9606 
   9607 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9608 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9609 	}
   9610 
   9611 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9612 		/* Any PHY wasn't find */
   9613 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   9614 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   9615 		sc->sc_phytype = WMPHY_NONE;
   9616 	} else {
   9617 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   9618 
   9619 		/*
   9620 		 * PHY Found! Check PHY type again by the second call of
   9621 		 * wm_gmii_setup_phytype.
   9622 		 */
   9623 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   9624 		    child->mii_mpd_model);
   9625 
   9626 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   9627 	}
   9628 }
   9629 
   9630 /*
   9631  * wm_gmii_mediachange:	[ifmedia interface function]
   9632  *
   9633  *	Set hardware to newly-selected media on a 1000BASE-T device.
   9634  */
   9635 static int
   9636 wm_gmii_mediachange(struct ifnet *ifp)
   9637 {
   9638 	struct wm_softc *sc = ifp->if_softc;
   9639 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9640 	int rc;
   9641 
   9642 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9643 		device_xname(sc->sc_dev), __func__));
   9644 	if ((ifp->if_flags & IFF_UP) == 0)
   9645 		return 0;
   9646 
   9647 	/* Disable D0 LPLU. */
   9648 	wm_lplu_d0_disable(sc);
   9649 
   9650 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9651 	sc->sc_ctrl |= CTRL_SLU;
   9652 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9653 	    || (sc->sc_type > WM_T_82543)) {
   9654 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   9655 	} else {
   9656 		sc->sc_ctrl &= ~CTRL_ASDE;
   9657 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9658 		if (ife->ifm_media & IFM_FDX)
   9659 			sc->sc_ctrl |= CTRL_FD;
   9660 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   9661 		case IFM_10_T:
   9662 			sc->sc_ctrl |= CTRL_SPEED_10;
   9663 			break;
   9664 		case IFM_100_TX:
   9665 			sc->sc_ctrl |= CTRL_SPEED_100;
   9666 			break;
   9667 		case IFM_1000_T:
   9668 			sc->sc_ctrl |= CTRL_SPEED_1000;
   9669 			break;
   9670 		default:
   9671 			panic("wm_gmii_mediachange: bad media 0x%x",
   9672 			    ife->ifm_media);
   9673 		}
   9674 	}
   9675 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9676 	CSR_WRITE_FLUSH(sc);
   9677 	if (sc->sc_type <= WM_T_82543)
   9678 		wm_gmii_reset(sc);
   9679 
   9680 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   9681 		return 0;
   9682 	return rc;
   9683 }
   9684 
   9685 /*
   9686  * wm_gmii_mediastatus:	[ifmedia interface function]
   9687  *
   9688  *	Get the current interface media status on a 1000BASE-T device.
   9689  */
   9690 static void
   9691 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9692 {
   9693 	struct wm_softc *sc = ifp->if_softc;
   9694 
   9695 	ether_mediastatus(ifp, ifmr);
   9696 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9697 	    | sc->sc_flowflags;
   9698 }
   9699 
   9700 #define	MDI_IO		CTRL_SWDPIN(2)
   9701 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   9702 #define	MDI_CLK		CTRL_SWDPIN(3)
   9703 
   9704 static void
   9705 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   9706 {
   9707 	uint32_t i, v;
   9708 
   9709 	v = CSR_READ(sc, WMREG_CTRL);
   9710 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9711 	v |= MDI_DIR | CTRL_SWDPIO(3);
   9712 
   9713 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   9714 		if (data & i)
   9715 			v |= MDI_IO;
   9716 		else
   9717 			v &= ~MDI_IO;
   9718 		CSR_WRITE(sc, WMREG_CTRL, v);
   9719 		CSR_WRITE_FLUSH(sc);
   9720 		delay(10);
   9721 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9722 		CSR_WRITE_FLUSH(sc);
   9723 		delay(10);
   9724 		CSR_WRITE(sc, WMREG_CTRL, v);
   9725 		CSR_WRITE_FLUSH(sc);
   9726 		delay(10);
   9727 	}
   9728 }
   9729 
   9730 static uint32_t
   9731 wm_i82543_mii_recvbits(struct wm_softc *sc)
   9732 {
   9733 	uint32_t v, i, data = 0;
   9734 
   9735 	v = CSR_READ(sc, WMREG_CTRL);
   9736 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9737 	v |= CTRL_SWDPIO(3);
   9738 
   9739 	CSR_WRITE(sc, WMREG_CTRL, v);
   9740 	CSR_WRITE_FLUSH(sc);
   9741 	delay(10);
   9742 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9743 	CSR_WRITE_FLUSH(sc);
   9744 	delay(10);
   9745 	CSR_WRITE(sc, WMREG_CTRL, v);
   9746 	CSR_WRITE_FLUSH(sc);
   9747 	delay(10);
   9748 
   9749 	for (i = 0; i < 16; i++) {
   9750 		data <<= 1;
   9751 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9752 		CSR_WRITE_FLUSH(sc);
   9753 		delay(10);
   9754 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   9755 			data |= 1;
   9756 		CSR_WRITE(sc, WMREG_CTRL, v);
   9757 		CSR_WRITE_FLUSH(sc);
   9758 		delay(10);
   9759 	}
   9760 
   9761 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9762 	CSR_WRITE_FLUSH(sc);
   9763 	delay(10);
   9764 	CSR_WRITE(sc, WMREG_CTRL, v);
   9765 	CSR_WRITE_FLUSH(sc);
   9766 	delay(10);
   9767 
   9768 	return data;
   9769 }
   9770 
   9771 #undef MDI_IO
   9772 #undef MDI_DIR
   9773 #undef MDI_CLK
   9774 
   9775 /*
   9776  * wm_gmii_i82543_readreg:	[mii interface function]
   9777  *
   9778  *	Read a PHY register on the GMII (i82543 version).
   9779  */
   9780 static int
   9781 wm_gmii_i82543_readreg(device_t dev, int phy, int reg)
   9782 {
   9783 	struct wm_softc *sc = device_private(dev);
   9784 	int rv;
   9785 
   9786 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9787 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   9788 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   9789 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   9790 
   9791 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   9792 	    device_xname(dev), phy, reg, rv));
   9793 
   9794 	return rv;
   9795 }
   9796 
   9797 /*
   9798  * wm_gmii_i82543_writereg:	[mii interface function]
   9799  *
   9800  *	Write a PHY register on the GMII (i82543 version).
   9801  */
   9802 static void
   9803 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, int val)
   9804 {
   9805 	struct wm_softc *sc = device_private(dev);
   9806 
   9807 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9808 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   9809 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   9810 	    (MII_COMMAND_START << 30), 32);
   9811 }
   9812 
   9813 /*
   9814  * wm_gmii_mdic_readreg:	[mii interface function]
   9815  *
   9816  *	Read a PHY register on the GMII.
   9817  */
   9818 static int
   9819 wm_gmii_mdic_readreg(device_t dev, int phy, int reg)
   9820 {
   9821 	struct wm_softc *sc = device_private(dev);
   9822 	uint32_t mdic = 0;
   9823 	int i, rv;
   9824 
   9825 	if (reg > MII_ADDRMASK) {
   9826 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   9827 		    __func__, sc->sc_phytype, reg);
   9828 		reg &= MII_ADDRMASK;
   9829 	}
   9830 
   9831 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   9832 	    MDIC_REGADD(reg));
   9833 
   9834 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9835 		mdic = CSR_READ(sc, WMREG_MDIC);
   9836 		if (mdic & MDIC_READY)
   9837 			break;
   9838 		delay(50);
   9839 	}
   9840 
   9841 	if ((mdic & MDIC_READY) == 0) {
   9842 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   9843 		    device_xname(dev), phy, reg);
   9844 		rv = 0;
   9845 	} else if (mdic & MDIC_E) {
   9846 #if 0 /* This is normal if no PHY is present. */
   9847 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   9848 		    device_xname(dev), phy, reg);
   9849 #endif
   9850 		rv = 0;
   9851 	} else {
   9852 		rv = MDIC_DATA(mdic);
   9853 		if (rv == 0xffff)
   9854 			rv = 0;
   9855 	}
   9856 
   9857 	return rv;
   9858 }
   9859 
   9860 /*
   9861  * wm_gmii_mdic_writereg:	[mii interface function]
   9862  *
   9863  *	Write a PHY register on the GMII.
   9864  */
   9865 static void
   9866 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, int val)
   9867 {
   9868 	struct wm_softc *sc = device_private(dev);
   9869 	uint32_t mdic = 0;
   9870 	int i;
   9871 
   9872 	if (reg > MII_ADDRMASK) {
   9873 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   9874 		    __func__, sc->sc_phytype, reg);
   9875 		reg &= MII_ADDRMASK;
   9876 	}
   9877 
   9878 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   9879 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   9880 
   9881 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9882 		mdic = CSR_READ(sc, WMREG_MDIC);
   9883 		if (mdic & MDIC_READY)
   9884 			break;
   9885 		delay(50);
   9886 	}
   9887 
   9888 	if ((mdic & MDIC_READY) == 0)
   9889 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   9890 		    device_xname(dev), phy, reg);
   9891 	else if (mdic & MDIC_E)
   9892 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   9893 		    device_xname(dev), phy, reg);
   9894 }
   9895 
   9896 /*
   9897  * wm_gmii_i82544_readreg:	[mii interface function]
   9898  *
   9899  *	Read a PHY register on the GMII.
   9900  */
   9901 static int
   9902 wm_gmii_i82544_readreg(device_t dev, int phy, int reg)
   9903 {
   9904 	struct wm_softc *sc = device_private(dev);
   9905 	int rv;
   9906 
   9907 	if (sc->phy.acquire(sc)) {
   9908 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   9909 		return 0;
   9910 	}
   9911 
   9912 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9913 		switch (sc->sc_phytype) {
   9914 		case WMPHY_IGP:
   9915 		case WMPHY_IGP_2:
   9916 		case WMPHY_IGP_3:
   9917 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT, reg);
   9918 			break;
   9919 		default:
   9920 #ifdef WM_DEBUG
   9921 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   9922 			    __func__, sc->sc_phytype, reg);
   9923 #endif
   9924 			break;
   9925 		}
   9926 	}
   9927 
   9928 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   9929 	sc->phy.release(sc);
   9930 
   9931 	return rv;
   9932 }
   9933 
   9934 /*
   9935  * wm_gmii_i82544_writereg:	[mii interface function]
   9936  *
   9937  *	Write a PHY register on the GMII.
   9938  */
   9939 static void
   9940 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, int val)
   9941 {
   9942 	struct wm_softc *sc = device_private(dev);
   9943 
   9944 	if (sc->phy.acquire(sc)) {
   9945 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   9946 		return;
   9947 	}
   9948 
   9949 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9950 		switch (sc->sc_phytype) {
   9951 		case WMPHY_IGP:
   9952 		case WMPHY_IGP_2:
   9953 		case WMPHY_IGP_3:
   9954 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT, reg);
   9955 			break;
   9956 		default:
   9957 #ifdef WM_DEBUG
   9958 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   9959 			    __func__, sc->sc_phytype, reg);
   9960 #endif
   9961 			break;
   9962 		}
   9963 	}
   9964 
   9965 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   9966 	sc->phy.release(sc);
   9967 }
   9968 
   9969 /*
   9970  * wm_gmii_i80003_readreg:	[mii interface function]
   9971  *
   9972  *	Read a PHY register on the kumeran
   9973  * This could be handled by the PHY layer if we didn't have to lock the
   9974  * ressource ...
   9975  */
   9976 static int
   9977 wm_gmii_i80003_readreg(device_t dev, int phy, int reg)
   9978 {
   9979 	struct wm_softc *sc = device_private(dev);
   9980 	int page_select, temp;
   9981 	int rv;
   9982 
   9983 	if (phy != 1) /* only one PHY on kumeran bus */
   9984 		return 0;
   9985 
   9986 	if (sc->phy.acquire(sc)) {
   9987 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   9988 		return 0;
   9989 	}
   9990 
   9991 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   9992 		page_select = GG82563_PHY_PAGE_SELECT;
   9993 	else {
   9994 		/*
   9995 		 * Use Alternative Page Select register to access registers
   9996 		 * 30 and 31.
   9997 		 */
   9998 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   9999 	}
   10000 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10001 	wm_gmii_mdic_writereg(dev, phy, page_select, temp);
   10002 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10003 		/*
   10004 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10005 		 * register.
   10006 		 */
   10007 		delay(200);
   10008 		if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) {
   10009 			device_printf(dev, "%s failed\n", __func__);
   10010 			rv = 0; /* XXX */
   10011 			goto out;
   10012 		}
   10013 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10014 		delay(200);
   10015 	} else
   10016 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10017 
   10018 out:
   10019 	sc->phy.release(sc);
   10020 	return rv;
   10021 }
   10022 
   10023 /*
   10024  * wm_gmii_i80003_writereg:	[mii interface function]
   10025  *
   10026  *	Write a PHY register on the kumeran.
   10027  * This could be handled by the PHY layer if we didn't have to lock the
   10028  * ressource ...
   10029  */
   10030 static void
   10031 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, int val)
   10032 {
   10033 	struct wm_softc *sc = device_private(dev);
   10034 	int page_select, temp;
   10035 
   10036 	if (phy != 1) /* only one PHY on kumeran bus */
   10037 		return;
   10038 
   10039 	if (sc->phy.acquire(sc)) {
   10040 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10041 		return;
   10042 	}
   10043 
   10044 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10045 		page_select = GG82563_PHY_PAGE_SELECT;
   10046 	else {
   10047 		/*
   10048 		 * Use Alternative Page Select register to access registers
   10049 		 * 30 and 31.
   10050 		 */
   10051 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10052 	}
   10053 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10054 	wm_gmii_mdic_writereg(dev, phy, page_select, temp);
   10055 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10056 		/*
   10057 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10058 		 * register.
   10059 		 */
   10060 		delay(200);
   10061 		if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) {
   10062 			device_printf(dev, "%s failed\n", __func__);
   10063 			goto out;
   10064 		}
   10065 		wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10066 		delay(200);
   10067 	} else
   10068 		wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10069 
   10070 out:
   10071 	sc->phy.release(sc);
   10072 }
   10073 
   10074 /*
   10075  * wm_gmii_bm_readreg:	[mii interface function]
   10076  *
   10077  *	Read a PHY register on the kumeran
   10078  * This could be handled by the PHY layer if we didn't have to lock the
   10079  * ressource ...
   10080  */
   10081 static int
   10082 wm_gmii_bm_readreg(device_t dev, int phy, int reg)
   10083 {
   10084 	struct wm_softc *sc = device_private(dev);
   10085 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10086 	uint16_t val;
   10087 	int rv;
   10088 
   10089 	if (sc->phy.acquire(sc)) {
   10090 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10091 		return 0;
   10092 	}
   10093 
   10094 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10095 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10096 		    || (reg == 31)) ? 1 : phy;
   10097 	/* Page 800 works differently than the rest so it has its own func */
   10098 	if (page == BM_WUC_PAGE) {
   10099 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
   10100 		rv = val;
   10101 		goto release;
   10102 	}
   10103 
   10104 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10105 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10106 		    && (sc->sc_type != WM_T_82583))
   10107 			wm_gmii_mdic_writereg(dev, phy,
   10108 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10109 		else
   10110 			wm_gmii_mdic_writereg(dev, phy,
   10111 			    BME1000_PHY_PAGE_SELECT, page);
   10112 	}
   10113 
   10114 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10115 
   10116 release:
   10117 	sc->phy.release(sc);
   10118 	return rv;
   10119 }
   10120 
   10121 /*
   10122  * wm_gmii_bm_writereg:	[mii interface function]
   10123  *
   10124  *	Write a PHY register on the kumeran.
   10125  * This could be handled by the PHY layer if we didn't have to lock the
   10126  * ressource ...
   10127  */
   10128 static void
   10129 wm_gmii_bm_writereg(device_t dev, int phy, int reg, int val)
   10130 {
   10131 	struct wm_softc *sc = device_private(dev);
   10132 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10133 
   10134 	if (sc->phy.acquire(sc)) {
   10135 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10136 		return;
   10137 	}
   10138 
   10139 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10140 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10141 		    || (reg == 31)) ? 1 : phy;
   10142 	/* Page 800 works differently than the rest so it has its own func */
   10143 	if (page == BM_WUC_PAGE) {
   10144 		uint16_t tmp;
   10145 
   10146 		tmp = val;
   10147 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   10148 		goto release;
   10149 	}
   10150 
   10151 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10152 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10153 		    && (sc->sc_type != WM_T_82583))
   10154 			wm_gmii_mdic_writereg(dev, phy,
   10155 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10156 		else
   10157 			wm_gmii_mdic_writereg(dev, phy,
   10158 			    BME1000_PHY_PAGE_SELECT, page);
   10159 	}
   10160 
   10161 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10162 
   10163 release:
   10164 	sc->phy.release(sc);
   10165 }
   10166 
   10167 static void
   10168 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd)
   10169 {
   10170 	struct wm_softc *sc = device_private(dev);
   10171 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   10172 	uint16_t wuce, reg;
   10173 
   10174 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10175 		device_xname(dev), __func__));
   10176 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   10177 	if (sc->sc_type == WM_T_PCH) {
   10178 		/* XXX e1000 driver do nothing... why? */
   10179 	}
   10180 
   10181 	/*
   10182 	 * 1) Enable PHY wakeup register first.
   10183 	 * See e1000_enable_phy_wakeup_reg_access_bm().
   10184 	 */
   10185 
   10186 	/* Set page 769 */
   10187 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10188 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   10189 
   10190 	/* Read WUCE and save it */
   10191 	wuce = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG);
   10192 
   10193 	reg = wuce | BM_WUC_ENABLE_BIT;
   10194 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   10195 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, reg);
   10196 
   10197 	/* Select page 800 */
   10198 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10199 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   10200 
   10201 	/*
   10202 	 * 2) Access PHY wakeup register.
   10203 	 * See e1000_access_phy_wakeup_reg_bm.
   10204 	 */
   10205 
   10206 	/* Write page 800 */
   10207 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   10208 
   10209 	if (rd)
   10210 		*val = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE);
   10211 	else
   10212 		wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   10213 
   10214 	/*
   10215 	 * 3) Disable PHY wakeup register.
   10216 	 * See e1000_disable_phy_wakeup_reg_access_bm().
   10217 	 */
   10218 	/* Set page 769 */
   10219 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10220 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   10221 
   10222 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, wuce);
   10223 }
   10224 
   10225 /*
   10226  * wm_gmii_hv_readreg:	[mii interface function]
   10227  *
   10228  *	Read a PHY register on the kumeran
   10229  * This could be handled by the PHY layer if we didn't have to lock the
   10230  * ressource ...
   10231  */
   10232 static int
   10233 wm_gmii_hv_readreg(device_t dev, int phy, int reg)
   10234 {
   10235 	struct wm_softc *sc = device_private(dev);
   10236 	int rv;
   10237 
   10238 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10239 		device_xname(dev), __func__));
   10240 	if (sc->phy.acquire(sc)) {
   10241 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10242 		return 0;
   10243 	}
   10244 
   10245 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg);
   10246 	sc->phy.release(sc);
   10247 	return rv;
   10248 }
   10249 
   10250 static int
   10251 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg)
   10252 {
   10253 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10254 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10255 	uint16_t val;
   10256 	int rv;
   10257 
   10258 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10259 
   10260 	/* Page 800 works differently than the rest so it has its own func */
   10261 	if (page == BM_WUC_PAGE) {
   10262 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
   10263 		return val;
   10264 	}
   10265 
   10266 	/*
   10267 	 * Lower than page 768 works differently than the rest so it has its
   10268 	 * own func
   10269 	 */
   10270 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10271 		printf("gmii_hv_readreg!!!\n");
   10272 		return 0;
   10273 	}
   10274 
   10275 	/*
   10276 	 * XXX I21[789] documents say that the SMBus Address register is at
   10277 	 * PHY address 01, Page 0 (not 768), Register 26.
   10278 	 */
   10279 	if (page == HV_INTC_FC_PAGE_START)
   10280 		page = 0;
   10281 
   10282 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10283 		wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10284 		    page << BME1000_PAGE_SHIFT);
   10285 	}
   10286 
   10287 	rv = wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK);
   10288 	return rv;
   10289 }
   10290 
   10291 /*
   10292  * wm_gmii_hv_writereg:	[mii interface function]
   10293  *
   10294  *	Write a PHY register on the kumeran.
   10295  * This could be handled by the PHY layer if we didn't have to lock the
   10296  * ressource ...
   10297  */
   10298 static void
   10299 wm_gmii_hv_writereg(device_t dev, int phy, int reg, int val)
   10300 {
   10301 	struct wm_softc *sc = device_private(dev);
   10302 
   10303 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10304 		device_xname(dev), __func__));
   10305 
   10306 	if (sc->phy.acquire(sc)) {
   10307 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10308 		return;
   10309 	}
   10310 
   10311 	wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   10312 	sc->phy.release(sc);
   10313 }
   10314 
   10315 static void
   10316 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, int val)
   10317 {
   10318 	struct wm_softc *sc = device_private(dev);
   10319 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10320 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10321 
   10322 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10323 
   10324 	/* Page 800 works differently than the rest so it has its own func */
   10325 	if (page == BM_WUC_PAGE) {
   10326 		uint16_t tmp;
   10327 
   10328 		tmp = val;
   10329 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   10330 		return;
   10331 	}
   10332 
   10333 	/*
   10334 	 * Lower than page 768 works differently than the rest so it has its
   10335 	 * own func
   10336 	 */
   10337 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10338 		printf("gmii_hv_writereg!!!\n");
   10339 		return;
   10340 	}
   10341 
   10342 	{
   10343 		/*
   10344 		 * XXX I21[789] documents say that the SMBus Address register
   10345 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   10346 		 */
   10347 		if (page == HV_INTC_FC_PAGE_START)
   10348 			page = 0;
   10349 
   10350 		/*
   10351 		 * XXX Workaround MDIO accesses being disabled after entering
   10352 		 * IEEE Power Down (whenever bit 11 of the PHY control
   10353 		 * register is set)
   10354 		 */
   10355 		if (sc->sc_phytype == WMPHY_82578) {
   10356 			struct mii_softc *child;
   10357 
   10358 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   10359 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   10360 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   10361 			    && ((val & (1 << 11)) != 0)) {
   10362 				printf("XXX need workaround\n");
   10363 			}
   10364 		}
   10365 
   10366 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10367 			wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10368 			    page << BME1000_PAGE_SHIFT);
   10369 		}
   10370 	}
   10371 
   10372 	wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   10373 }
   10374 
   10375 /*
   10376  * wm_gmii_82580_readreg:	[mii interface function]
   10377  *
   10378  *	Read a PHY register on the 82580 and I350.
   10379  * This could be handled by the PHY layer if we didn't have to lock the
   10380  * ressource ...
   10381  */
   10382 static int
   10383 wm_gmii_82580_readreg(device_t dev, int phy, int reg)
   10384 {
   10385 	struct wm_softc *sc = device_private(dev);
   10386 	int rv;
   10387 
   10388 	if (sc->phy.acquire(sc) != 0) {
   10389 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10390 		return 0;
   10391 	}
   10392 
   10393 #ifdef DIAGNOSTIC
   10394 	if (reg > MII_ADDRMASK) {
   10395 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10396 		    __func__, sc->sc_phytype, reg);
   10397 		reg &= MII_ADDRMASK;
   10398 	}
   10399 #endif
   10400 	rv = wm_gmii_mdic_readreg(dev, phy, reg);
   10401 
   10402 	sc->phy.release(sc);
   10403 	return rv;
   10404 }
   10405 
   10406 /*
   10407  * wm_gmii_82580_writereg:	[mii interface function]
   10408  *
   10409  *	Write a PHY register on the 82580 and I350.
   10410  * This could be handled by the PHY layer if we didn't have to lock the
   10411  * ressource ...
   10412  */
   10413 static void
   10414 wm_gmii_82580_writereg(device_t dev, int phy, int reg, int val)
   10415 {
   10416 	struct wm_softc *sc = device_private(dev);
   10417 
   10418 	if (sc->phy.acquire(sc) != 0) {
   10419 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10420 		return;
   10421 	}
   10422 
   10423 #ifdef DIAGNOSTIC
   10424 	if (reg > MII_ADDRMASK) {
   10425 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10426 		    __func__, sc->sc_phytype, reg);
   10427 		reg &= MII_ADDRMASK;
   10428 	}
   10429 #endif
   10430 	wm_gmii_mdic_writereg(dev, phy, reg, val);
   10431 
   10432 	sc->phy.release(sc);
   10433 }
   10434 
   10435 /*
   10436  * wm_gmii_gs40g_readreg:	[mii interface function]
   10437  *
   10438  *	Read a PHY register on the I2100 and I211.
   10439  * This could be handled by the PHY layer if we didn't have to lock the
   10440  * ressource ...
   10441  */
   10442 static int
   10443 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg)
   10444 {
   10445 	struct wm_softc *sc = device_private(dev);
   10446 	int page, offset;
   10447 	int rv;
   10448 
   10449 	/* Acquire semaphore */
   10450 	if (sc->phy.acquire(sc)) {
   10451 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10452 		return 0;
   10453 	}
   10454 
   10455 	/* Page select */
   10456 	page = reg >> GS40G_PAGE_SHIFT;
   10457 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10458 
   10459 	/* Read reg */
   10460 	offset = reg & GS40G_OFFSET_MASK;
   10461 	rv = wm_gmii_mdic_readreg(dev, phy, offset);
   10462 
   10463 	sc->phy.release(sc);
   10464 	return rv;
   10465 }
   10466 
   10467 /*
   10468  * wm_gmii_gs40g_writereg:	[mii interface function]
   10469  *
   10470  *	Write a PHY register on the I210 and I211.
   10471  * This could be handled by the PHY layer if we didn't have to lock the
   10472  * ressource ...
   10473  */
   10474 static void
   10475 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, int val)
   10476 {
   10477 	struct wm_softc *sc = device_private(dev);
   10478 	int page, offset;
   10479 
   10480 	/* Acquire semaphore */
   10481 	if (sc->phy.acquire(sc)) {
   10482 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10483 		return;
   10484 	}
   10485 
   10486 	/* Page select */
   10487 	page = reg >> GS40G_PAGE_SHIFT;
   10488 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10489 
   10490 	/* Write reg */
   10491 	offset = reg & GS40G_OFFSET_MASK;
   10492 	wm_gmii_mdic_writereg(dev, phy, offset, val);
   10493 
   10494 	/* Release semaphore */
   10495 	sc->phy.release(sc);
   10496 }
   10497 
   10498 /*
   10499  * wm_gmii_statchg:	[mii interface function]
   10500  *
   10501  *	Callback from MII layer when media changes.
   10502  */
   10503 static void
   10504 wm_gmii_statchg(struct ifnet *ifp)
   10505 {
   10506 	struct wm_softc *sc = ifp->if_softc;
   10507 	struct mii_data *mii = &sc->sc_mii;
   10508 
   10509 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   10510 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10511 	sc->sc_fcrtl &= ~FCRTL_XONE;
   10512 
   10513 	/*
   10514 	 * Get flow control negotiation result.
   10515 	 */
   10516 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   10517 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   10518 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   10519 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   10520 	}
   10521 
   10522 	if (sc->sc_flowflags & IFM_FLOW) {
   10523 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   10524 			sc->sc_ctrl |= CTRL_TFCE;
   10525 			sc->sc_fcrtl |= FCRTL_XONE;
   10526 		}
   10527 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   10528 			sc->sc_ctrl |= CTRL_RFCE;
   10529 	}
   10530 
   10531 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   10532 		DPRINTF(WM_DEBUG_LINK,
   10533 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   10534 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10535 	} else {
   10536 		DPRINTF(WM_DEBUG_LINK,
   10537 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   10538 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10539 	}
   10540 
   10541 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10542 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10543 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   10544 						 : WMREG_FCRTL, sc->sc_fcrtl);
   10545 	if (sc->sc_type == WM_T_80003) {
   10546 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   10547 		case IFM_1000_T:
   10548 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10549 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   10550 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10551 			break;
   10552 		default:
   10553 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10554 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   10555 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   10556 			break;
   10557 		}
   10558 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   10559 	}
   10560 }
   10561 
   10562 /* kumeran related (80003, ICH* and PCH*) */
   10563 
   10564 /*
   10565  * wm_kmrn_readreg:
   10566  *
   10567  *	Read a kumeran register
   10568  */
   10569 static int
   10570 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   10571 {
   10572 	int rv;
   10573 
   10574 	if (sc->sc_type == WM_T_80003)
   10575 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10576 	else
   10577 		rv = sc->phy.acquire(sc);
   10578 	if (rv != 0) {
   10579 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   10580 		    __func__);
   10581 		return rv;
   10582 	}
   10583 
   10584 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   10585 
   10586 	if (sc->sc_type == WM_T_80003)
   10587 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10588 	else
   10589 		sc->phy.release(sc);
   10590 
   10591 	return rv;
   10592 }
   10593 
   10594 static int
   10595 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   10596 {
   10597 
   10598 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10599 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10600 	    KUMCTRLSTA_REN);
   10601 	CSR_WRITE_FLUSH(sc);
   10602 	delay(2);
   10603 
   10604 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   10605 
   10606 	return 0;
   10607 }
   10608 
   10609 /*
   10610  * wm_kmrn_writereg:
   10611  *
   10612  *	Write a kumeran register
   10613  */
   10614 static int
   10615 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   10616 {
   10617 	int rv;
   10618 
   10619 	if (sc->sc_type == WM_T_80003)
   10620 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10621 	else
   10622 		rv = sc->phy.acquire(sc);
   10623 	if (rv != 0) {
   10624 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   10625 		    __func__);
   10626 		return rv;
   10627 	}
   10628 
   10629 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   10630 
   10631 	if (sc->sc_type == WM_T_80003)
   10632 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10633 	else
   10634 		sc->phy.release(sc);
   10635 
   10636 	return rv;
   10637 }
   10638 
   10639 static int
   10640 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   10641 {
   10642 
   10643 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10644 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   10645 
   10646 	return 0;
   10647 }
   10648 
   10649 /* SGMII related */
   10650 
   10651 /*
   10652  * wm_sgmii_uses_mdio
   10653  *
   10654  * Check whether the transaction is to the internal PHY or the external
   10655  * MDIO interface. Return true if it's MDIO.
   10656  */
   10657 static bool
   10658 wm_sgmii_uses_mdio(struct wm_softc *sc)
   10659 {
   10660 	uint32_t reg;
   10661 	bool ismdio = false;
   10662 
   10663 	switch (sc->sc_type) {
   10664 	case WM_T_82575:
   10665 	case WM_T_82576:
   10666 		reg = CSR_READ(sc, WMREG_MDIC);
   10667 		ismdio = ((reg & MDIC_DEST) != 0);
   10668 		break;
   10669 	case WM_T_82580:
   10670 	case WM_T_I350:
   10671 	case WM_T_I354:
   10672 	case WM_T_I210:
   10673 	case WM_T_I211:
   10674 		reg = CSR_READ(sc, WMREG_MDICNFG);
   10675 		ismdio = ((reg & MDICNFG_DEST) != 0);
   10676 		break;
   10677 	default:
   10678 		break;
   10679 	}
   10680 
   10681 	return ismdio;
   10682 }
   10683 
   10684 /*
   10685  * wm_sgmii_readreg:	[mii interface function]
   10686  *
   10687  *	Read a PHY register on the SGMII
   10688  * This could be handled by the PHY layer if we didn't have to lock the
   10689  * ressource ...
   10690  */
   10691 static int
   10692 wm_sgmii_readreg(device_t dev, int phy, int reg)
   10693 {
   10694 	struct wm_softc *sc = device_private(dev);
   10695 	uint32_t i2ccmd;
   10696 	int i, rv;
   10697 
   10698 	if (sc->phy.acquire(sc)) {
   10699 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10700 		return 0;
   10701 	}
   10702 
   10703 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10704 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10705 	    | I2CCMD_OPCODE_READ;
   10706 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10707 
   10708 	/* Poll the ready bit */
   10709 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10710 		delay(50);
   10711 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10712 		if (i2ccmd & I2CCMD_READY)
   10713 			break;
   10714 	}
   10715 	if ((i2ccmd & I2CCMD_READY) == 0)
   10716 		device_printf(dev, "I2CCMD Read did not complete\n");
   10717 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10718 		device_printf(dev, "I2CCMD Error bit set\n");
   10719 
   10720 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   10721 
   10722 	sc->phy.release(sc);
   10723 	return rv;
   10724 }
   10725 
   10726 /*
   10727  * wm_sgmii_writereg:	[mii interface function]
   10728  *
   10729  *	Write a PHY register on the SGMII.
   10730  * This could be handled by the PHY layer if we didn't have to lock the
   10731  * ressource ...
   10732  */
   10733 static void
   10734 wm_sgmii_writereg(device_t dev, int phy, int reg, int val)
   10735 {
   10736 	struct wm_softc *sc = device_private(dev);
   10737 	uint32_t i2ccmd;
   10738 	int i;
   10739 	int val_swapped;
   10740 
   10741 	if (sc->phy.acquire(sc) != 0) {
   10742 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10743 		return;
   10744 	}
   10745 	/* Swap the data bytes for the I2C interface */
   10746 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   10747 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10748 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10749 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   10750 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10751 
   10752 	/* Poll the ready bit */
   10753 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10754 		delay(50);
   10755 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10756 		if (i2ccmd & I2CCMD_READY)
   10757 			break;
   10758 	}
   10759 	if ((i2ccmd & I2CCMD_READY) == 0)
   10760 		device_printf(dev, "I2CCMD Write did not complete\n");
   10761 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10762 		device_printf(dev, "I2CCMD Error bit set\n");
   10763 
   10764 	sc->phy.release(sc);
   10765 }
   10766 
   10767 /* TBI related */
   10768 
   10769 /*
   10770  * wm_tbi_mediainit:
   10771  *
   10772  *	Initialize media for use on 1000BASE-X devices.
   10773  */
   10774 static void
   10775 wm_tbi_mediainit(struct wm_softc *sc)
   10776 {
   10777 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10778 	const char *sep = "";
   10779 
   10780 	if (sc->sc_type < WM_T_82543)
   10781 		sc->sc_tipg = TIPG_WM_DFLT;
   10782 	else
   10783 		sc->sc_tipg = TIPG_LG_DFLT;
   10784 
   10785 	sc->sc_tbi_serdes_anegticks = 5;
   10786 
   10787 	/* Initialize our media structures */
   10788 	sc->sc_mii.mii_ifp = ifp;
   10789 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10790 
   10791 	if ((sc->sc_type >= WM_T_82575)
   10792 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   10793 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10794 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   10795 	else
   10796 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10797 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   10798 
   10799 	/*
   10800 	 * SWD Pins:
   10801 	 *
   10802 	 *	0 = Link LED (output)
   10803 	 *	1 = Loss Of Signal (input)
   10804 	 */
   10805 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   10806 
   10807 	/* XXX Perhaps this is only for TBI */
   10808 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   10809 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   10810 
   10811 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   10812 		sc->sc_ctrl &= ~CTRL_LRST;
   10813 
   10814 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10815 
   10816 #define	ADD(ss, mm, dd)							\
   10817 do {									\
   10818 	aprint_normal("%s%s", sep, ss);					\
   10819 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   10820 	sep = ", ";							\
   10821 } while (/*CONSTCOND*/0)
   10822 
   10823 	aprint_normal_dev(sc->sc_dev, "");
   10824 
   10825 	if (sc->sc_type == WM_T_I354) {
   10826 		uint32_t status;
   10827 
   10828 		status = CSR_READ(sc, WMREG_STATUS);
   10829 		if (((status & STATUS_2P5_SKU) != 0)
   10830 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   10831 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   10832 		} else
   10833 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   10834 	} else if (sc->sc_type == WM_T_82545) {
   10835 		/* Only 82545 is LX (XXX except SFP) */
   10836 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   10837 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   10838 	} else {
   10839 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   10840 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   10841 	}
   10842 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   10843 	aprint_normal("\n");
   10844 
   10845 #undef ADD
   10846 
   10847 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   10848 }
   10849 
   10850 /*
   10851  * wm_tbi_mediachange:	[ifmedia interface function]
   10852  *
   10853  *	Set hardware to newly-selected media on a 1000BASE-X device.
   10854  */
   10855 static int
   10856 wm_tbi_mediachange(struct ifnet *ifp)
   10857 {
   10858 	struct wm_softc *sc = ifp->if_softc;
   10859 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10860 	uint32_t status;
   10861 	int i;
   10862 
   10863 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   10864 		/* XXX need some work for >= 82571 and < 82575 */
   10865 		if (sc->sc_type < WM_T_82575)
   10866 			return 0;
   10867 	}
   10868 
   10869 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   10870 	    || (sc->sc_type >= WM_T_82575))
   10871 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   10872 
   10873 	sc->sc_ctrl &= ~CTRL_LRST;
   10874 	sc->sc_txcw = TXCW_ANE;
   10875 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10876 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   10877 	else if (ife->ifm_media & IFM_FDX)
   10878 		sc->sc_txcw |= TXCW_FD;
   10879 	else
   10880 		sc->sc_txcw |= TXCW_HD;
   10881 
   10882 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   10883 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   10884 
   10885 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   10886 		    device_xname(sc->sc_dev), sc->sc_txcw));
   10887 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10888 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10889 	CSR_WRITE_FLUSH(sc);
   10890 	delay(1000);
   10891 
   10892 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   10893 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   10894 
   10895 	/*
   10896 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   10897 	 * optics detect a signal, 0 if they don't.
   10898 	 */
   10899 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   10900 		/* Have signal; wait for the link to come up. */
   10901 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   10902 			delay(10000);
   10903 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   10904 				break;
   10905 		}
   10906 
   10907 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   10908 			    device_xname(sc->sc_dev),i));
   10909 
   10910 		status = CSR_READ(sc, WMREG_STATUS);
   10911 		DPRINTF(WM_DEBUG_LINK,
   10912 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   10913 			device_xname(sc->sc_dev),status, STATUS_LU));
   10914 		if (status & STATUS_LU) {
   10915 			/* Link is up. */
   10916 			DPRINTF(WM_DEBUG_LINK,
   10917 			    ("%s: LINK: set media -> link up %s\n",
   10918 			    device_xname(sc->sc_dev),
   10919 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   10920 
   10921 			/*
   10922 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   10923 			 * so we should update sc->sc_ctrl
   10924 			 */
   10925 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   10926 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10927 			sc->sc_fcrtl &= ~FCRTL_XONE;
   10928 			if (status & STATUS_FD)
   10929 				sc->sc_tctl |=
   10930 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10931 			else
   10932 				sc->sc_tctl |=
   10933 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10934 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   10935 				sc->sc_fcrtl |= FCRTL_XONE;
   10936 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10937 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   10938 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   10939 				      sc->sc_fcrtl);
   10940 			sc->sc_tbi_linkup = 1;
   10941 		} else {
   10942 			if (i == WM_LINKUP_TIMEOUT)
   10943 				wm_check_for_link(sc);
   10944 			/* Link is down. */
   10945 			DPRINTF(WM_DEBUG_LINK,
   10946 			    ("%s: LINK: set media -> link down\n",
   10947 			    device_xname(sc->sc_dev)));
   10948 			sc->sc_tbi_linkup = 0;
   10949 		}
   10950 	} else {
   10951 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   10952 		    device_xname(sc->sc_dev)));
   10953 		sc->sc_tbi_linkup = 0;
   10954 	}
   10955 
   10956 	wm_tbi_serdes_set_linkled(sc);
   10957 
   10958 	return 0;
   10959 }
   10960 
   10961 /*
   10962  * wm_tbi_mediastatus:	[ifmedia interface function]
   10963  *
   10964  *	Get the current interface media status on a 1000BASE-X device.
   10965  */
   10966 static void
   10967 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10968 {
   10969 	struct wm_softc *sc = ifp->if_softc;
   10970 	uint32_t ctrl, status;
   10971 
   10972 	ifmr->ifm_status = IFM_AVALID;
   10973 	ifmr->ifm_active = IFM_ETHER;
   10974 
   10975 	status = CSR_READ(sc, WMREG_STATUS);
   10976 	if ((status & STATUS_LU) == 0) {
   10977 		ifmr->ifm_active |= IFM_NONE;
   10978 		return;
   10979 	}
   10980 
   10981 	ifmr->ifm_status |= IFM_ACTIVE;
   10982 	/* Only 82545 is LX */
   10983 	if (sc->sc_type == WM_T_82545)
   10984 		ifmr->ifm_active |= IFM_1000_LX;
   10985 	else
   10986 		ifmr->ifm_active |= IFM_1000_SX;
   10987 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   10988 		ifmr->ifm_active |= IFM_FDX;
   10989 	else
   10990 		ifmr->ifm_active |= IFM_HDX;
   10991 	ctrl = CSR_READ(sc, WMREG_CTRL);
   10992 	if (ctrl & CTRL_RFCE)
   10993 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   10994 	if (ctrl & CTRL_TFCE)
   10995 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   10996 }
   10997 
   10998 /* XXX TBI only */
   10999 static int
   11000 wm_check_for_link(struct wm_softc *sc)
   11001 {
   11002 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11003 	uint32_t rxcw;
   11004 	uint32_t ctrl;
   11005 	uint32_t status;
   11006 	uint32_t sig;
   11007 
   11008 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11009 		/* XXX need some work for >= 82571 */
   11010 		if (sc->sc_type >= WM_T_82571) {
   11011 			sc->sc_tbi_linkup = 1;
   11012 			return 0;
   11013 		}
   11014 	}
   11015 
   11016 	rxcw = CSR_READ(sc, WMREG_RXCW);
   11017 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11018 	status = CSR_READ(sc, WMREG_STATUS);
   11019 
   11020 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   11021 
   11022 	DPRINTF(WM_DEBUG_LINK,
   11023 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   11024 		device_xname(sc->sc_dev), __func__,
   11025 		((ctrl & CTRL_SWDPIN(1)) == sig),
   11026 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   11027 
   11028 	/*
   11029 	 * SWDPIN   LU RXCW
   11030 	 *      0    0    0
   11031 	 *      0    0    1	(should not happen)
   11032 	 *      0    1    0	(should not happen)
   11033 	 *      0    1    1	(should not happen)
   11034 	 *      1    0    0	Disable autonego and force linkup
   11035 	 *      1    0    1	got /C/ but not linkup yet
   11036 	 *      1    1    0	(linkup)
   11037 	 *      1    1    1	If IFM_AUTO, back to autonego
   11038 	 *
   11039 	 */
   11040 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   11041 	    && ((status & STATUS_LU) == 0)
   11042 	    && ((rxcw & RXCW_C) == 0)) {
   11043 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   11044 			__func__));
   11045 		sc->sc_tbi_linkup = 0;
   11046 		/* Disable auto-negotiation in the TXCW register */
   11047 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   11048 
   11049 		/*
   11050 		 * Force link-up and also force full-duplex.
   11051 		 *
   11052 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   11053 		 * so we should update sc->sc_ctrl
   11054 		 */
   11055 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   11056 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11057 	} else if (((status & STATUS_LU) != 0)
   11058 	    && ((rxcw & RXCW_C) != 0)
   11059 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   11060 		sc->sc_tbi_linkup = 1;
   11061 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   11062 			__func__));
   11063 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11064 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   11065 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   11066 	    && ((rxcw & RXCW_C) != 0)) {
   11067 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   11068 	} else {
   11069 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   11070 			status));
   11071 	}
   11072 
   11073 	return 0;
   11074 }
   11075 
   11076 /*
   11077  * wm_tbi_tick:
   11078  *
   11079  *	Check the link on TBI devices.
   11080  *	This function acts as mii_tick().
   11081  */
   11082 static void
   11083 wm_tbi_tick(struct wm_softc *sc)
   11084 {
   11085 	struct mii_data *mii = &sc->sc_mii;
   11086 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11087 	uint32_t status;
   11088 
   11089 	KASSERT(WM_CORE_LOCKED(sc));
   11090 
   11091 	status = CSR_READ(sc, WMREG_STATUS);
   11092 
   11093 	/* XXX is this needed? */
   11094 	(void)CSR_READ(sc, WMREG_RXCW);
   11095 	(void)CSR_READ(sc, WMREG_CTRL);
   11096 
   11097 	/* set link status */
   11098 	if ((status & STATUS_LU) == 0) {
   11099 		DPRINTF(WM_DEBUG_LINK,
   11100 		    ("%s: LINK: checklink -> down\n",
   11101 			device_xname(sc->sc_dev)));
   11102 		sc->sc_tbi_linkup = 0;
   11103 	} else if (sc->sc_tbi_linkup == 0) {
   11104 		DPRINTF(WM_DEBUG_LINK,
   11105 		    ("%s: LINK: checklink -> up %s\n",
   11106 			device_xname(sc->sc_dev),
   11107 			(status & STATUS_FD) ? "FDX" : "HDX"));
   11108 		sc->sc_tbi_linkup = 1;
   11109 		sc->sc_tbi_serdes_ticks = 0;
   11110 	}
   11111 
   11112 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   11113 		goto setled;
   11114 
   11115 	if ((status & STATUS_LU) == 0) {
   11116 		sc->sc_tbi_linkup = 0;
   11117 		/* If the timer expired, retry autonegotiation */
   11118 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11119 		    && (++sc->sc_tbi_serdes_ticks
   11120 			>= sc->sc_tbi_serdes_anegticks)) {
   11121 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11122 			sc->sc_tbi_serdes_ticks = 0;
   11123 			/*
   11124 			 * Reset the link, and let autonegotiation do
   11125 			 * its thing
   11126 			 */
   11127 			sc->sc_ctrl |= CTRL_LRST;
   11128 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11129 			CSR_WRITE_FLUSH(sc);
   11130 			delay(1000);
   11131 			sc->sc_ctrl &= ~CTRL_LRST;
   11132 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11133 			CSR_WRITE_FLUSH(sc);
   11134 			delay(1000);
   11135 			CSR_WRITE(sc, WMREG_TXCW,
   11136 			    sc->sc_txcw & ~TXCW_ANE);
   11137 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11138 		}
   11139 	}
   11140 
   11141 setled:
   11142 	wm_tbi_serdes_set_linkled(sc);
   11143 }
   11144 
   11145 /* SERDES related */
   11146 static void
   11147 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   11148 {
   11149 	uint32_t reg;
   11150 
   11151 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11152 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   11153 		return;
   11154 
   11155 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   11156 	reg |= PCS_CFG_PCS_EN;
   11157 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   11158 
   11159 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11160 	reg &= ~CTRL_EXT_SWDPIN(3);
   11161 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11162 	CSR_WRITE_FLUSH(sc);
   11163 }
   11164 
   11165 static int
   11166 wm_serdes_mediachange(struct ifnet *ifp)
   11167 {
   11168 	struct wm_softc *sc = ifp->if_softc;
   11169 	bool pcs_autoneg = true; /* XXX */
   11170 	uint32_t ctrl_ext, pcs_lctl, reg;
   11171 
   11172 	/* XXX Currently, this function is not called on 8257[12] */
   11173 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11174 	    || (sc->sc_type >= WM_T_82575))
   11175 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11176 
   11177 	wm_serdes_power_up_link_82575(sc);
   11178 
   11179 	sc->sc_ctrl |= CTRL_SLU;
   11180 
   11181 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   11182 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   11183 
   11184 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11185 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   11186 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   11187 	case CTRL_EXT_LINK_MODE_SGMII:
   11188 		pcs_autoneg = true;
   11189 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   11190 		break;
   11191 	case CTRL_EXT_LINK_MODE_1000KX:
   11192 		pcs_autoneg = false;
   11193 		/* FALLTHROUGH */
   11194 	default:
   11195 		if ((sc->sc_type == WM_T_82575)
   11196 		    || (sc->sc_type == WM_T_82576)) {
   11197 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   11198 				pcs_autoneg = false;
   11199 		}
   11200 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   11201 		    | CTRL_FRCFDX;
   11202 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   11203 	}
   11204 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11205 
   11206 	if (pcs_autoneg) {
   11207 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   11208 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   11209 
   11210 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   11211 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   11212 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   11213 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   11214 	} else
   11215 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   11216 
   11217 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   11218 
   11219 
   11220 	return 0;
   11221 }
   11222 
   11223 static void
   11224 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11225 {
   11226 	struct wm_softc *sc = ifp->if_softc;
   11227 	struct mii_data *mii = &sc->sc_mii;
   11228 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11229 	uint32_t pcs_adv, pcs_lpab, reg;
   11230 
   11231 	ifmr->ifm_status = IFM_AVALID;
   11232 	ifmr->ifm_active = IFM_ETHER;
   11233 
   11234 	/* Check PCS */
   11235 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11236 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   11237 		ifmr->ifm_active |= IFM_NONE;
   11238 		sc->sc_tbi_linkup = 0;
   11239 		goto setled;
   11240 	}
   11241 
   11242 	sc->sc_tbi_linkup = 1;
   11243 	ifmr->ifm_status |= IFM_ACTIVE;
   11244 	if (sc->sc_type == WM_T_I354) {
   11245 		uint32_t status;
   11246 
   11247 		status = CSR_READ(sc, WMREG_STATUS);
   11248 		if (((status & STATUS_2P5_SKU) != 0)
   11249 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11250 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
   11251 		} else
   11252 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
   11253 	} else {
   11254 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   11255 		case PCS_LSTS_SPEED_10:
   11256 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   11257 			break;
   11258 		case PCS_LSTS_SPEED_100:
   11259 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   11260 			break;
   11261 		case PCS_LSTS_SPEED_1000:
   11262 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11263 			break;
   11264 		default:
   11265 			device_printf(sc->sc_dev, "Unknown speed\n");
   11266 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11267 			break;
   11268 		}
   11269 	}
   11270 	if ((reg & PCS_LSTS_FDX) != 0)
   11271 		ifmr->ifm_active |= IFM_FDX;
   11272 	else
   11273 		ifmr->ifm_active |= IFM_HDX;
   11274 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   11275 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   11276 		/* Check flow */
   11277 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11278 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   11279 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   11280 			goto setled;
   11281 		}
   11282 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   11283 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   11284 		DPRINTF(WM_DEBUG_LINK,
   11285 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   11286 		if ((pcs_adv & TXCW_SYM_PAUSE)
   11287 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   11288 			mii->mii_media_active |= IFM_FLOW
   11289 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   11290 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   11291 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11292 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   11293 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11294 			mii->mii_media_active |= IFM_FLOW
   11295 			    | IFM_ETH_TXPAUSE;
   11296 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   11297 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11298 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   11299 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11300 			mii->mii_media_active |= IFM_FLOW
   11301 			    | IFM_ETH_RXPAUSE;
   11302 		}
   11303 	}
   11304 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   11305 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   11306 setled:
   11307 	wm_tbi_serdes_set_linkled(sc);
   11308 }
   11309 
   11310 /*
   11311  * wm_serdes_tick:
   11312  *
   11313  *	Check the link on serdes devices.
   11314  */
   11315 static void
   11316 wm_serdes_tick(struct wm_softc *sc)
   11317 {
   11318 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11319 	struct mii_data *mii = &sc->sc_mii;
   11320 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11321 	uint32_t reg;
   11322 
   11323 	KASSERT(WM_CORE_LOCKED(sc));
   11324 
   11325 	mii->mii_media_status = IFM_AVALID;
   11326 	mii->mii_media_active = IFM_ETHER;
   11327 
   11328 	/* Check PCS */
   11329 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11330 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   11331 		mii->mii_media_status |= IFM_ACTIVE;
   11332 		sc->sc_tbi_linkup = 1;
   11333 		sc->sc_tbi_serdes_ticks = 0;
   11334 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   11335 		if ((reg & PCS_LSTS_FDX) != 0)
   11336 			mii->mii_media_active |= IFM_FDX;
   11337 		else
   11338 			mii->mii_media_active |= IFM_HDX;
   11339 	} else {
   11340 		mii->mii_media_status |= IFM_NONE;
   11341 		sc->sc_tbi_linkup = 0;
   11342 		/* If the timer expired, retry autonegotiation */
   11343 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11344 		    && (++sc->sc_tbi_serdes_ticks
   11345 			>= sc->sc_tbi_serdes_anegticks)) {
   11346 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11347 			sc->sc_tbi_serdes_ticks = 0;
   11348 			/* XXX */
   11349 			wm_serdes_mediachange(ifp);
   11350 		}
   11351 	}
   11352 
   11353 	wm_tbi_serdes_set_linkled(sc);
   11354 }
   11355 
   11356 /* SFP related */
   11357 
   11358 static int
   11359 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   11360 {
   11361 	uint32_t i2ccmd;
   11362 	int i;
   11363 
   11364 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11365 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11366 
   11367 	/* Poll the ready bit */
   11368 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11369 		delay(50);
   11370 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11371 		if (i2ccmd & I2CCMD_READY)
   11372 			break;
   11373 	}
   11374 	if ((i2ccmd & I2CCMD_READY) == 0)
   11375 		return -1;
   11376 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   11377 		return -1;
   11378 
   11379 	*data = i2ccmd & 0x00ff;
   11380 
   11381 	return 0;
   11382 }
   11383 
   11384 static uint32_t
   11385 wm_sfp_get_media_type(struct wm_softc *sc)
   11386 {
   11387 	uint32_t ctrl_ext;
   11388 	uint8_t val = 0;
   11389 	int timeout = 3;
   11390 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   11391 	int rv = -1;
   11392 
   11393 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11394 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   11395 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   11396 	CSR_WRITE_FLUSH(sc);
   11397 
   11398 	/* Read SFP module data */
   11399 	while (timeout) {
   11400 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   11401 		if (rv == 0)
   11402 			break;
   11403 		delay(100*1000); /* XXX too big */
   11404 		timeout--;
   11405 	}
   11406 	if (rv != 0)
   11407 		goto out;
   11408 	switch (val) {
   11409 	case SFF_SFP_ID_SFF:
   11410 		aprint_normal_dev(sc->sc_dev,
   11411 		    "Module/Connector soldered to board\n");
   11412 		break;
   11413 	case SFF_SFP_ID_SFP:
   11414 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   11415 		break;
   11416 	case SFF_SFP_ID_UNKNOWN:
   11417 		goto out;
   11418 	default:
   11419 		break;
   11420 	}
   11421 
   11422 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   11423 	if (rv != 0) {
   11424 		goto out;
   11425 	}
   11426 
   11427 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   11428 		mediatype = WM_MEDIATYPE_SERDES;
   11429 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   11430 		sc->sc_flags |= WM_F_SGMII;
   11431 		mediatype = WM_MEDIATYPE_COPPER;
   11432 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   11433 		sc->sc_flags |= WM_F_SGMII;
   11434 		mediatype = WM_MEDIATYPE_SERDES;
   11435 	}
   11436 
   11437 out:
   11438 	/* Restore I2C interface setting */
   11439 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11440 
   11441 	return mediatype;
   11442 }
   11443 
   11444 /*
   11445  * NVM related.
   11446  * Microwire, SPI (w/wo EERD) and Flash.
   11447  */
   11448 
   11449 /* Both spi and uwire */
   11450 
   11451 /*
   11452  * wm_eeprom_sendbits:
   11453  *
   11454  *	Send a series of bits to the EEPROM.
   11455  */
   11456 static void
   11457 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   11458 {
   11459 	uint32_t reg;
   11460 	int x;
   11461 
   11462 	reg = CSR_READ(sc, WMREG_EECD);
   11463 
   11464 	for (x = nbits; x > 0; x--) {
   11465 		if (bits & (1U << (x - 1)))
   11466 			reg |= EECD_DI;
   11467 		else
   11468 			reg &= ~EECD_DI;
   11469 		CSR_WRITE(sc, WMREG_EECD, reg);
   11470 		CSR_WRITE_FLUSH(sc);
   11471 		delay(2);
   11472 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11473 		CSR_WRITE_FLUSH(sc);
   11474 		delay(2);
   11475 		CSR_WRITE(sc, WMREG_EECD, reg);
   11476 		CSR_WRITE_FLUSH(sc);
   11477 		delay(2);
   11478 	}
   11479 }
   11480 
   11481 /*
   11482  * wm_eeprom_recvbits:
   11483  *
   11484  *	Receive a series of bits from the EEPROM.
   11485  */
   11486 static void
   11487 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   11488 {
   11489 	uint32_t reg, val;
   11490 	int x;
   11491 
   11492 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   11493 
   11494 	val = 0;
   11495 	for (x = nbits; x > 0; x--) {
   11496 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11497 		CSR_WRITE_FLUSH(sc);
   11498 		delay(2);
   11499 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   11500 			val |= (1U << (x - 1));
   11501 		CSR_WRITE(sc, WMREG_EECD, reg);
   11502 		CSR_WRITE_FLUSH(sc);
   11503 		delay(2);
   11504 	}
   11505 	*valp = val;
   11506 }
   11507 
   11508 /* Microwire */
   11509 
   11510 /*
   11511  * wm_nvm_read_uwire:
   11512  *
   11513  *	Read a word from the EEPROM using the MicroWire protocol.
   11514  */
   11515 static int
   11516 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11517 {
   11518 	uint32_t reg, val;
   11519 	int i;
   11520 
   11521 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11522 		device_xname(sc->sc_dev), __func__));
   11523 
   11524 	if (sc->nvm.acquire(sc) != 0)
   11525 		return -1;
   11526 
   11527 	for (i = 0; i < wordcnt; i++) {
   11528 		/* Clear SK and DI. */
   11529 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   11530 		CSR_WRITE(sc, WMREG_EECD, reg);
   11531 
   11532 		/*
   11533 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   11534 		 * and Xen.
   11535 		 *
   11536 		 * We use this workaround only for 82540 because qemu's
   11537 		 * e1000 act as 82540.
   11538 		 */
   11539 		if (sc->sc_type == WM_T_82540) {
   11540 			reg |= EECD_SK;
   11541 			CSR_WRITE(sc, WMREG_EECD, reg);
   11542 			reg &= ~EECD_SK;
   11543 			CSR_WRITE(sc, WMREG_EECD, reg);
   11544 			CSR_WRITE_FLUSH(sc);
   11545 			delay(2);
   11546 		}
   11547 		/* XXX: end of workaround */
   11548 
   11549 		/* Set CHIP SELECT. */
   11550 		reg |= EECD_CS;
   11551 		CSR_WRITE(sc, WMREG_EECD, reg);
   11552 		CSR_WRITE_FLUSH(sc);
   11553 		delay(2);
   11554 
   11555 		/* Shift in the READ command. */
   11556 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   11557 
   11558 		/* Shift in address. */
   11559 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   11560 
   11561 		/* Shift out the data. */
   11562 		wm_eeprom_recvbits(sc, &val, 16);
   11563 		data[i] = val & 0xffff;
   11564 
   11565 		/* Clear CHIP SELECT. */
   11566 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   11567 		CSR_WRITE(sc, WMREG_EECD, reg);
   11568 		CSR_WRITE_FLUSH(sc);
   11569 		delay(2);
   11570 	}
   11571 
   11572 	sc->nvm.release(sc);
   11573 	return 0;
   11574 }
   11575 
   11576 /* SPI */
   11577 
   11578 /*
   11579  * Set SPI and FLASH related information from the EECD register.
   11580  * For 82541 and 82547, the word size is taken from EEPROM.
   11581  */
   11582 static int
   11583 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   11584 {
   11585 	int size;
   11586 	uint32_t reg;
   11587 	uint16_t data;
   11588 
   11589 	reg = CSR_READ(sc, WMREG_EECD);
   11590 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   11591 
   11592 	/* Read the size of NVM from EECD by default */
   11593 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11594 	switch (sc->sc_type) {
   11595 	case WM_T_82541:
   11596 	case WM_T_82541_2:
   11597 	case WM_T_82547:
   11598 	case WM_T_82547_2:
   11599 		/* Set dummy value to access EEPROM */
   11600 		sc->sc_nvm_wordsize = 64;
   11601 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
   11602 		reg = data;
   11603 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11604 		if (size == 0)
   11605 			size = 6; /* 64 word size */
   11606 		else
   11607 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   11608 		break;
   11609 	case WM_T_80003:
   11610 	case WM_T_82571:
   11611 	case WM_T_82572:
   11612 	case WM_T_82573: /* SPI case */
   11613 	case WM_T_82574: /* SPI case */
   11614 	case WM_T_82583: /* SPI case */
   11615 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11616 		if (size > 14)
   11617 			size = 14;
   11618 		break;
   11619 	case WM_T_82575:
   11620 	case WM_T_82576:
   11621 	case WM_T_82580:
   11622 	case WM_T_I350:
   11623 	case WM_T_I354:
   11624 	case WM_T_I210:
   11625 	case WM_T_I211:
   11626 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11627 		if (size > 15)
   11628 			size = 15;
   11629 		break;
   11630 	default:
   11631 		aprint_error_dev(sc->sc_dev,
   11632 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   11633 		return -1;
   11634 		break;
   11635 	}
   11636 
   11637 	sc->sc_nvm_wordsize = 1 << size;
   11638 
   11639 	return 0;
   11640 }
   11641 
   11642 /*
   11643  * wm_nvm_ready_spi:
   11644  *
   11645  *	Wait for a SPI EEPROM to be ready for commands.
   11646  */
   11647 static int
   11648 wm_nvm_ready_spi(struct wm_softc *sc)
   11649 {
   11650 	uint32_t val;
   11651 	int usec;
   11652 
   11653 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11654 		device_xname(sc->sc_dev), __func__));
   11655 
   11656 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   11657 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   11658 		wm_eeprom_recvbits(sc, &val, 8);
   11659 		if ((val & SPI_SR_RDY) == 0)
   11660 			break;
   11661 	}
   11662 	if (usec >= SPI_MAX_RETRIES) {
   11663 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   11664 		return -1;
   11665 	}
   11666 	return 0;
   11667 }
   11668 
   11669 /*
   11670  * wm_nvm_read_spi:
   11671  *
   11672  *	Read a work from the EEPROM using the SPI protocol.
   11673  */
   11674 static int
   11675 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11676 {
   11677 	uint32_t reg, val;
   11678 	int i;
   11679 	uint8_t opc;
   11680 	int rv = 0;
   11681 
   11682 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11683 		device_xname(sc->sc_dev), __func__));
   11684 
   11685 	if (sc->nvm.acquire(sc) != 0)
   11686 		return -1;
   11687 
   11688 	/* Clear SK and CS. */
   11689 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   11690 	CSR_WRITE(sc, WMREG_EECD, reg);
   11691 	CSR_WRITE_FLUSH(sc);
   11692 	delay(2);
   11693 
   11694 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   11695 		goto out;
   11696 
   11697 	/* Toggle CS to flush commands. */
   11698 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   11699 	CSR_WRITE_FLUSH(sc);
   11700 	delay(2);
   11701 	CSR_WRITE(sc, WMREG_EECD, reg);
   11702 	CSR_WRITE_FLUSH(sc);
   11703 	delay(2);
   11704 
   11705 	opc = SPI_OPC_READ;
   11706 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   11707 		opc |= SPI_OPC_A8;
   11708 
   11709 	wm_eeprom_sendbits(sc, opc, 8);
   11710 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   11711 
   11712 	for (i = 0; i < wordcnt; i++) {
   11713 		wm_eeprom_recvbits(sc, &val, 16);
   11714 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   11715 	}
   11716 
   11717 	/* Raise CS and clear SK. */
   11718 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   11719 	CSR_WRITE(sc, WMREG_EECD, reg);
   11720 	CSR_WRITE_FLUSH(sc);
   11721 	delay(2);
   11722 
   11723 out:
   11724 	sc->nvm.release(sc);
   11725 	return rv;
   11726 }
   11727 
   11728 /* Using with EERD */
   11729 
   11730 static int
   11731 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   11732 {
   11733 	uint32_t attempts = 100000;
   11734 	uint32_t i, reg = 0;
   11735 	int32_t done = -1;
   11736 
   11737 	for (i = 0; i < attempts; i++) {
   11738 		reg = CSR_READ(sc, rw);
   11739 
   11740 		if (reg & EERD_DONE) {
   11741 			done = 0;
   11742 			break;
   11743 		}
   11744 		delay(5);
   11745 	}
   11746 
   11747 	return done;
   11748 }
   11749 
   11750 static int
   11751 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   11752     uint16_t *data)
   11753 {
   11754 	int i, eerd = 0;
   11755 	int rv = 0;
   11756 
   11757 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11758 		device_xname(sc->sc_dev), __func__));
   11759 
   11760 	if (sc->nvm.acquire(sc) != 0)
   11761 		return -1;
   11762 
   11763 	for (i = 0; i < wordcnt; i++) {
   11764 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   11765 		CSR_WRITE(sc, WMREG_EERD, eerd);
   11766 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   11767 		if (rv != 0) {
   11768 			aprint_error_dev(sc->sc_dev, "EERD polling failed\n");
   11769 			break;
   11770 		}
   11771 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   11772 	}
   11773 
   11774 	sc->nvm.release(sc);
   11775 	return rv;
   11776 }
   11777 
   11778 /* Flash */
   11779 
   11780 static int
   11781 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   11782 {
   11783 	uint32_t eecd;
   11784 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   11785 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   11786 	uint8_t sig_byte = 0;
   11787 
   11788 	switch (sc->sc_type) {
   11789 	case WM_T_PCH_SPT:
   11790 		/*
   11791 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
   11792 		 * sector valid bits from the NVM.
   11793 		 */
   11794 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
   11795 		if ((*bank == 0) || (*bank == 1)) {
   11796 			aprint_error_dev(sc->sc_dev,
   11797 			    "%s: no valid NVM bank present (%u)\n", __func__,
   11798 				*bank);
   11799 			return -1;
   11800 		} else {
   11801 			*bank = *bank - 2;
   11802 			return 0;
   11803 		}
   11804 	case WM_T_ICH8:
   11805 	case WM_T_ICH9:
   11806 		eecd = CSR_READ(sc, WMREG_EECD);
   11807 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   11808 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   11809 			return 0;
   11810 		}
   11811 		/* FALLTHROUGH */
   11812 	default:
   11813 		/* Default to 0 */
   11814 		*bank = 0;
   11815 
   11816 		/* Check bank 0 */
   11817 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   11818 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11819 			*bank = 0;
   11820 			return 0;
   11821 		}
   11822 
   11823 		/* Check bank 1 */
   11824 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   11825 		    &sig_byte);
   11826 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11827 			*bank = 1;
   11828 			return 0;
   11829 		}
   11830 	}
   11831 
   11832 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   11833 		device_xname(sc->sc_dev)));
   11834 	return -1;
   11835 }
   11836 
   11837 /******************************************************************************
   11838  * This function does initial flash setup so that a new read/write/erase cycle
   11839  * can be started.
   11840  *
   11841  * sc - The pointer to the hw structure
   11842  ****************************************************************************/
   11843 static int32_t
   11844 wm_ich8_cycle_init(struct wm_softc *sc)
   11845 {
   11846 	uint16_t hsfsts;
   11847 	int32_t error = 1;
   11848 	int32_t i     = 0;
   11849 
   11850 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11851 
   11852 	/* May be check the Flash Des Valid bit in Hw status */
   11853 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   11854 		return error;
   11855 	}
   11856 
   11857 	/* Clear FCERR in Hw status by writing 1 */
   11858 	/* Clear DAEL in Hw status by writing a 1 */
   11859 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   11860 
   11861 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11862 
   11863 	/*
   11864 	 * Either we should have a hardware SPI cycle in progress bit to check
   11865 	 * against, in order to start a new cycle or FDONE bit should be
   11866 	 * changed in the hardware so that it is 1 after harware reset, which
   11867 	 * can then be used as an indication whether a cycle is in progress or
   11868 	 * has been completed .. we should also have some software semaphore
   11869 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   11870 	 * threads access to those bits can be sequentiallized or a way so that
   11871 	 * 2 threads dont start the cycle at the same time
   11872 	 */
   11873 
   11874 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11875 		/*
   11876 		 * There is no cycle running at present, so we can start a
   11877 		 * cycle
   11878 		 */
   11879 
   11880 		/* Begin by setting Flash Cycle Done. */
   11881 		hsfsts |= HSFSTS_DONE;
   11882 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11883 		error = 0;
   11884 	} else {
   11885 		/*
   11886 		 * otherwise poll for sometime so the current cycle has a
   11887 		 * chance to end before giving up.
   11888 		 */
   11889 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   11890 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11891 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11892 				error = 0;
   11893 				break;
   11894 			}
   11895 			delay(1);
   11896 		}
   11897 		if (error == 0) {
   11898 			/*
   11899 			 * Successful in waiting for previous cycle to timeout,
   11900 			 * now set the Flash Cycle Done.
   11901 			 */
   11902 			hsfsts |= HSFSTS_DONE;
   11903 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11904 		}
   11905 	}
   11906 	return error;
   11907 }
   11908 
   11909 /******************************************************************************
   11910  * This function starts a flash cycle and waits for its completion
   11911  *
   11912  * sc - The pointer to the hw structure
   11913  ****************************************************************************/
   11914 static int32_t
   11915 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   11916 {
   11917 	uint16_t hsflctl;
   11918 	uint16_t hsfsts;
   11919 	int32_t error = 1;
   11920 	uint32_t i = 0;
   11921 
   11922 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   11923 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11924 	hsflctl |= HSFCTL_GO;
   11925 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11926 
   11927 	/* Wait till FDONE bit is set to 1 */
   11928 	do {
   11929 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11930 		if (hsfsts & HSFSTS_DONE)
   11931 			break;
   11932 		delay(1);
   11933 		i++;
   11934 	} while (i < timeout);
   11935 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   11936 		error = 0;
   11937 
   11938 	return error;
   11939 }
   11940 
   11941 /******************************************************************************
   11942  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   11943  *
   11944  * sc - The pointer to the hw structure
   11945  * index - The index of the byte or word to read.
   11946  * size - Size of data to read, 1=byte 2=word, 4=dword
   11947  * data - Pointer to the word to store the value read.
   11948  *****************************************************************************/
   11949 static int32_t
   11950 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   11951     uint32_t size, uint32_t *data)
   11952 {
   11953 	uint16_t hsfsts;
   11954 	uint16_t hsflctl;
   11955 	uint32_t flash_linear_address;
   11956 	uint32_t flash_data = 0;
   11957 	int32_t error = 1;
   11958 	int32_t count = 0;
   11959 
   11960 	if (size < 1  || size > 4 || data == 0x0 ||
   11961 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   11962 		return error;
   11963 
   11964 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   11965 	    sc->sc_ich8_flash_base;
   11966 
   11967 	do {
   11968 		delay(1);
   11969 		/* Steps */
   11970 		error = wm_ich8_cycle_init(sc);
   11971 		if (error)
   11972 			break;
   11973 
   11974 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11975 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   11976 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   11977 		    & HSFCTL_BCOUNT_MASK;
   11978 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   11979 		if (sc->sc_type == WM_T_PCH_SPT) {
   11980 			/*
   11981 			 * In SPT, This register is in Lan memory space, not
   11982 			 * flash. Therefore, only 32 bit access is supported.
   11983 			 */
   11984 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
   11985 			    (uint32_t)hsflctl);
   11986 		} else
   11987 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11988 
   11989 		/*
   11990 		 * Write the last 24 bits of index into Flash Linear address
   11991 		 * field in Flash Address
   11992 		 */
   11993 		/* TODO: TBD maybe check the index against the size of flash */
   11994 
   11995 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   11996 
   11997 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   11998 
   11999 		/*
   12000 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   12001 		 * the whole sequence a few more times, else read in (shift in)
   12002 		 * the Flash Data0, the order is least significant byte first
   12003 		 * msb to lsb
   12004 		 */
   12005 		if (error == 0) {
   12006 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   12007 			if (size == 1)
   12008 				*data = (uint8_t)(flash_data & 0x000000FF);
   12009 			else if (size == 2)
   12010 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   12011 			else if (size == 4)
   12012 				*data = (uint32_t)flash_data;
   12013 			break;
   12014 		} else {
   12015 			/*
   12016 			 * If we've gotten here, then things are probably
   12017 			 * completely hosed, but if the error condition is
   12018 			 * detected, it won't hurt to give it another try...
   12019 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   12020 			 */
   12021 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12022 			if (hsfsts & HSFSTS_ERR) {
   12023 				/* Repeat for some time before giving up. */
   12024 				continue;
   12025 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   12026 				break;
   12027 		}
   12028 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   12029 
   12030 	return error;
   12031 }
   12032 
   12033 /******************************************************************************
   12034  * Reads a single byte from the NVM using the ICH8 flash access registers.
   12035  *
   12036  * sc - pointer to wm_hw structure
   12037  * index - The index of the byte to read.
   12038  * data - Pointer to a byte to store the value read.
   12039  *****************************************************************************/
   12040 static int32_t
   12041 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   12042 {
   12043 	int32_t status;
   12044 	uint32_t word = 0;
   12045 
   12046 	status = wm_read_ich8_data(sc, index, 1, &word);
   12047 	if (status == 0)
   12048 		*data = (uint8_t)word;
   12049 	else
   12050 		*data = 0;
   12051 
   12052 	return status;
   12053 }
   12054 
   12055 /******************************************************************************
   12056  * Reads a word from the NVM using the ICH8 flash access registers.
   12057  *
   12058  * sc - pointer to wm_hw structure
   12059  * index - The starting byte index of the word to read.
   12060  * data - Pointer to a word to store the value read.
   12061  *****************************************************************************/
   12062 static int32_t
   12063 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   12064 {
   12065 	int32_t status;
   12066 	uint32_t word = 0;
   12067 
   12068 	status = wm_read_ich8_data(sc, index, 2, &word);
   12069 	if (status == 0)
   12070 		*data = (uint16_t)word;
   12071 	else
   12072 		*data = 0;
   12073 
   12074 	return status;
   12075 }
   12076 
   12077 /******************************************************************************
   12078  * Reads a dword from the NVM using the ICH8 flash access registers.
   12079  *
   12080  * sc - pointer to wm_hw structure
   12081  * index - The starting byte index of the word to read.
   12082  * data - Pointer to a word to store the value read.
   12083  *****************************************************************************/
   12084 static int32_t
   12085 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   12086 {
   12087 	int32_t status;
   12088 
   12089 	status = wm_read_ich8_data(sc, index, 4, data);
   12090 	return status;
   12091 }
   12092 
   12093 /******************************************************************************
   12094  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   12095  * register.
   12096  *
   12097  * sc - Struct containing variables accessed by shared code
   12098  * offset - offset of word in the EEPROM to read
   12099  * data - word read from the EEPROM
   12100  * words - number of words to read
   12101  *****************************************************************************/
   12102 static int
   12103 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12104 {
   12105 	int32_t  rv = 0;
   12106 	uint32_t flash_bank = 0;
   12107 	uint32_t act_offset = 0;
   12108 	uint32_t bank_offset = 0;
   12109 	uint16_t word = 0;
   12110 	uint16_t i = 0;
   12111 
   12112 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12113 		device_xname(sc->sc_dev), __func__));
   12114 
   12115 	if (sc->nvm.acquire(sc) != 0)
   12116 		return -1;
   12117 
   12118 	/*
   12119 	 * We need to know which is the valid flash bank.  In the event
   12120 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12121 	 * managing flash_bank.  So it cannot be trusted and needs
   12122 	 * to be updated with each read.
   12123 	 */
   12124 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   12125 	if (rv) {
   12126 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   12127 			device_xname(sc->sc_dev)));
   12128 		flash_bank = 0;
   12129 	}
   12130 
   12131 	/*
   12132 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   12133 	 * size
   12134 	 */
   12135 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   12136 
   12137 	for (i = 0; i < words; i++) {
   12138 		/* The NVM part needs a byte offset, hence * 2 */
   12139 		act_offset = bank_offset + ((offset + i) * 2);
   12140 		rv = wm_read_ich8_word(sc, act_offset, &word);
   12141 		if (rv) {
   12142 			aprint_error_dev(sc->sc_dev,
   12143 			    "%s: failed to read NVM\n", __func__);
   12144 			break;
   12145 		}
   12146 		data[i] = word;
   12147 	}
   12148 
   12149 	sc->nvm.release(sc);
   12150 	return rv;
   12151 }
   12152 
   12153 /******************************************************************************
   12154  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   12155  * register.
   12156  *
   12157  * sc - Struct containing variables accessed by shared code
   12158  * offset - offset of word in the EEPROM to read
   12159  * data - word read from the EEPROM
   12160  * words - number of words to read
   12161  *****************************************************************************/
   12162 static int
   12163 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12164 {
   12165 	int32_t  rv = 0;
   12166 	uint32_t flash_bank = 0;
   12167 	uint32_t act_offset = 0;
   12168 	uint32_t bank_offset = 0;
   12169 	uint32_t dword = 0;
   12170 	uint16_t i = 0;
   12171 
   12172 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12173 		device_xname(sc->sc_dev), __func__));
   12174 
   12175 	if (sc->nvm.acquire(sc) != 0)
   12176 		return -1;
   12177 
   12178 	/*
   12179 	 * We need to know which is the valid flash bank.  In the event
   12180 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12181 	 * managing flash_bank.  So it cannot be trusted and needs
   12182 	 * to be updated with each read.
   12183 	 */
   12184 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   12185 	if (rv) {
   12186 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   12187 			device_xname(sc->sc_dev)));
   12188 		flash_bank = 0;
   12189 	}
   12190 
   12191 	/*
   12192 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   12193 	 * size
   12194 	 */
   12195 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   12196 
   12197 	for (i = 0; i < words; i++) {
   12198 		/* The NVM part needs a byte offset, hence * 2 */
   12199 		act_offset = bank_offset + ((offset + i) * 2);
   12200 		/* but we must read dword aligned, so mask ... */
   12201 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   12202 		if (rv) {
   12203 			aprint_error_dev(sc->sc_dev,
   12204 			    "%s: failed to read NVM\n", __func__);
   12205 			break;
   12206 		}
   12207 		/* ... and pick out low or high word */
   12208 		if ((act_offset & 0x2) == 0)
   12209 			data[i] = (uint16_t)(dword & 0xFFFF);
   12210 		else
   12211 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   12212 	}
   12213 
   12214 	sc->nvm.release(sc);
   12215 	return rv;
   12216 }
   12217 
   12218 /* iNVM */
   12219 
   12220 static int
   12221 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   12222 {
   12223 	int32_t  rv = 0;
   12224 	uint32_t invm_dword;
   12225 	uint16_t i;
   12226 	uint8_t record_type, word_address;
   12227 
   12228 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12229 		device_xname(sc->sc_dev), __func__));
   12230 
   12231 	for (i = 0; i < INVM_SIZE; i++) {
   12232 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   12233 		/* Get record type */
   12234 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   12235 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   12236 			break;
   12237 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   12238 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   12239 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   12240 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   12241 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   12242 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   12243 			if (word_address == address) {
   12244 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   12245 				rv = 0;
   12246 				break;
   12247 			}
   12248 		}
   12249 	}
   12250 
   12251 	return rv;
   12252 }
   12253 
   12254 static int
   12255 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12256 {
   12257 	int rv = 0;
   12258 	int i;
   12259 
   12260 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12261 		device_xname(sc->sc_dev), __func__));
   12262 
   12263 	if (sc->nvm.acquire(sc) != 0)
   12264 		return -1;
   12265 
   12266 	for (i = 0; i < words; i++) {
   12267 		switch (offset + i) {
   12268 		case NVM_OFF_MACADDR:
   12269 		case NVM_OFF_MACADDR1:
   12270 		case NVM_OFF_MACADDR2:
   12271 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   12272 			if (rv != 0) {
   12273 				data[i] = 0xffff;
   12274 				rv = -1;
   12275 			}
   12276 			break;
   12277 		case NVM_OFF_CFG2:
   12278 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12279 			if (rv != 0) {
   12280 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   12281 				rv = 0;
   12282 			}
   12283 			break;
   12284 		case NVM_OFF_CFG4:
   12285 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12286 			if (rv != 0) {
   12287 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   12288 				rv = 0;
   12289 			}
   12290 			break;
   12291 		case NVM_OFF_LED_1_CFG:
   12292 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12293 			if (rv != 0) {
   12294 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   12295 				rv = 0;
   12296 			}
   12297 			break;
   12298 		case NVM_OFF_LED_0_2_CFG:
   12299 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12300 			if (rv != 0) {
   12301 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   12302 				rv = 0;
   12303 			}
   12304 			break;
   12305 		case NVM_OFF_ID_LED_SETTINGS:
   12306 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12307 			if (rv != 0) {
   12308 				*data = ID_LED_RESERVED_FFFF;
   12309 				rv = 0;
   12310 			}
   12311 			break;
   12312 		default:
   12313 			DPRINTF(WM_DEBUG_NVM,
   12314 			    ("NVM word 0x%02x is not mapped.\n", offset));
   12315 			*data = NVM_RESERVED_WORD;
   12316 			break;
   12317 		}
   12318 	}
   12319 
   12320 	sc->nvm.release(sc);
   12321 	return rv;
   12322 }
   12323 
   12324 /* Lock, detecting NVM type, validate checksum, version and read */
   12325 
   12326 static int
   12327 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   12328 {
   12329 	uint32_t eecd = 0;
   12330 
   12331 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   12332 	    || sc->sc_type == WM_T_82583) {
   12333 		eecd = CSR_READ(sc, WMREG_EECD);
   12334 
   12335 		/* Isolate bits 15 & 16 */
   12336 		eecd = ((eecd >> 15) & 0x03);
   12337 
   12338 		/* If both bits are set, device is Flash type */
   12339 		if (eecd == 0x03)
   12340 			return 0;
   12341 	}
   12342 	return 1;
   12343 }
   12344 
   12345 static int
   12346 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   12347 {
   12348 	uint32_t eec;
   12349 
   12350 	eec = CSR_READ(sc, WMREG_EEC);
   12351 	if ((eec & EEC_FLASH_DETECTED) != 0)
   12352 		return 1;
   12353 
   12354 	return 0;
   12355 }
   12356 
   12357 /*
   12358  * wm_nvm_validate_checksum
   12359  *
   12360  * The checksum is defined as the sum of the first 64 (16 bit) words.
   12361  */
   12362 static int
   12363 wm_nvm_validate_checksum(struct wm_softc *sc)
   12364 {
   12365 	uint16_t checksum;
   12366 	uint16_t eeprom_data;
   12367 #ifdef WM_DEBUG
   12368 	uint16_t csum_wordaddr, valid_checksum;
   12369 #endif
   12370 	int i;
   12371 
   12372 	checksum = 0;
   12373 
   12374 	/* Don't check for I211 */
   12375 	if (sc->sc_type == WM_T_I211)
   12376 		return 0;
   12377 
   12378 #ifdef WM_DEBUG
   12379 	if (sc->sc_type == WM_T_PCH_LPT) {
   12380 		csum_wordaddr = NVM_OFF_COMPAT;
   12381 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   12382 	} else {
   12383 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   12384 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   12385 	}
   12386 
   12387 	/* Dump EEPROM image for debug */
   12388 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12389 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12390 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   12391 		/* XXX PCH_SPT? */
   12392 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   12393 		if ((eeprom_data & valid_checksum) == 0) {
   12394 			DPRINTF(WM_DEBUG_NVM,
   12395 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   12396 				device_xname(sc->sc_dev), eeprom_data,
   12397 				    valid_checksum));
   12398 		}
   12399 	}
   12400 
   12401 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   12402 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   12403 		for (i = 0; i < NVM_SIZE; i++) {
   12404 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12405 				printf("XXXX ");
   12406 			else
   12407 				printf("%04hx ", eeprom_data);
   12408 			if (i % 8 == 7)
   12409 				printf("\n");
   12410 		}
   12411 	}
   12412 
   12413 #endif /* WM_DEBUG */
   12414 
   12415 	for (i = 0; i < NVM_SIZE; i++) {
   12416 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12417 			return 1;
   12418 		checksum += eeprom_data;
   12419 	}
   12420 
   12421 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   12422 #ifdef WM_DEBUG
   12423 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   12424 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   12425 #endif
   12426 	}
   12427 
   12428 	return 0;
   12429 }
   12430 
   12431 static void
   12432 wm_nvm_version_invm(struct wm_softc *sc)
   12433 {
   12434 	uint32_t dword;
   12435 
   12436 	/*
   12437 	 * Linux's code to decode version is very strange, so we don't
   12438 	 * obey that algorithm and just use word 61 as the document.
   12439 	 * Perhaps it's not perfect though...
   12440 	 *
   12441 	 * Example:
   12442 	 *
   12443 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   12444 	 */
   12445 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   12446 	dword = __SHIFTOUT(dword, INVM_VER_1);
   12447 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   12448 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   12449 }
   12450 
   12451 static void
   12452 wm_nvm_version(struct wm_softc *sc)
   12453 {
   12454 	uint16_t major, minor, build, patch;
   12455 	uint16_t uid0, uid1;
   12456 	uint16_t nvm_data;
   12457 	uint16_t off;
   12458 	bool check_version = false;
   12459 	bool check_optionrom = false;
   12460 	bool have_build = false;
   12461 	bool have_uid = true;
   12462 
   12463 	/*
   12464 	 * Version format:
   12465 	 *
   12466 	 * XYYZ
   12467 	 * X0YZ
   12468 	 * X0YY
   12469 	 *
   12470 	 * Example:
   12471 	 *
   12472 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   12473 	 *	82571	0x50a6	5.10.6?
   12474 	 *	82572	0x506a	5.6.10?
   12475 	 *	82572EI	0x5069	5.6.9?
   12476 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   12477 	 *		0x2013	2.1.3?
   12478 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   12479 	 */
   12480 
   12481 	/*
   12482 	 * XXX
   12483 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   12484 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   12485 	 */
   12486 	if (sc->sc_nvm_wordsize >= NVM_OFF_IMAGE_UID1)
   12487 		wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
   12488 	else
   12489 		have_uid = false;
   12490 
   12491 	switch (sc->sc_type) {
   12492 	case WM_T_82571:
   12493 	case WM_T_82572:
   12494 	case WM_T_82574:
   12495 	case WM_T_82583:
   12496 		check_version = true;
   12497 		check_optionrom = true;
   12498 		have_build = true;
   12499 		break;
   12500 	case WM_T_82575:
   12501 	case WM_T_82576:
   12502 	case WM_T_82580:
   12503 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   12504 			check_version = true;
   12505 		break;
   12506 	case WM_T_I211:
   12507 		wm_nvm_version_invm(sc);
   12508 		have_uid = false;
   12509 		goto printver;
   12510 	case WM_T_I210:
   12511 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   12512 			wm_nvm_version_invm(sc);
   12513 			have_uid = false;
   12514 			goto printver;
   12515 		}
   12516 		/* FALLTHROUGH */
   12517 	case WM_T_I350:
   12518 	case WM_T_I354:
   12519 		check_version = true;
   12520 		check_optionrom = true;
   12521 		break;
   12522 	default:
   12523 		return;
   12524 	}
   12525 	if (check_version) {
   12526 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
   12527 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   12528 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   12529 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   12530 			build = nvm_data & NVM_BUILD_MASK;
   12531 			have_build = true;
   12532 		} else
   12533 			minor = nvm_data & 0x00ff;
   12534 
   12535 		/* Decimal */
   12536 		minor = (minor / 16) * 10 + (minor % 16);
   12537 		sc->sc_nvm_ver_major = major;
   12538 		sc->sc_nvm_ver_minor = minor;
   12539 
   12540 printver:
   12541 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   12542 		    sc->sc_nvm_ver_minor);
   12543 		if (have_build) {
   12544 			sc->sc_nvm_ver_build = build;
   12545 			aprint_verbose(".%d", build);
   12546 		}
   12547 	}
   12548 
   12549 	/* Assume the Option ROM area is at avove NVM_SIZE */
   12550 	if ((sc->sc_nvm_wordsize >= NVM_SIZE) && check_optionrom) {
   12551 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
   12552 		/* Option ROM Version */
   12553 		if ((off != 0x0000) && (off != 0xffff)) {
   12554 			off += NVM_COMBO_VER_OFF;
   12555 			wm_nvm_read(sc, off + 1, 1, &uid1);
   12556 			wm_nvm_read(sc, off, 1, &uid0);
   12557 			if ((uid0 != 0) && (uid0 != 0xffff)
   12558 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   12559 				/* 16bits */
   12560 				major = uid0 >> 8;
   12561 				build = (uid0 << 8) | (uid1 >> 8);
   12562 				patch = uid1 & 0x00ff;
   12563 				aprint_verbose(", option ROM Version %d.%d.%d",
   12564 				    major, build, patch);
   12565 			}
   12566 		}
   12567 	}
   12568 
   12569 	if (have_uid) {
   12570 		wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
   12571 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   12572 	}
   12573 }
   12574 
   12575 /*
   12576  * wm_nvm_read:
   12577  *
   12578  *	Read data from the serial EEPROM.
   12579  */
   12580 static int
   12581 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12582 {
   12583 	int rv;
   12584 
   12585 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12586 		device_xname(sc->sc_dev), __func__));
   12587 
   12588 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   12589 		return -1;
   12590 
   12591 	rv = sc->nvm.read(sc, word, wordcnt, data);
   12592 
   12593 	return rv;
   12594 }
   12595 
   12596 /*
   12597  * Hardware semaphores.
   12598  * Very complexed...
   12599  */
   12600 
   12601 static int
   12602 wm_get_null(struct wm_softc *sc)
   12603 {
   12604 
   12605 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12606 		device_xname(sc->sc_dev), __func__));
   12607 	return 0;
   12608 }
   12609 
   12610 static void
   12611 wm_put_null(struct wm_softc *sc)
   12612 {
   12613 
   12614 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12615 		device_xname(sc->sc_dev), __func__));
   12616 	return;
   12617 }
   12618 
   12619 static int
   12620 wm_get_eecd(struct wm_softc *sc)
   12621 {
   12622 	uint32_t reg;
   12623 	int x;
   12624 
   12625 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   12626 		device_xname(sc->sc_dev), __func__));
   12627 
   12628 	reg = CSR_READ(sc, WMREG_EECD);
   12629 
   12630 	/* Request EEPROM access. */
   12631 	reg |= EECD_EE_REQ;
   12632 	CSR_WRITE(sc, WMREG_EECD, reg);
   12633 
   12634 	/* ..and wait for it to be granted. */
   12635 	for (x = 0; x < 1000; x++) {
   12636 		reg = CSR_READ(sc, WMREG_EECD);
   12637 		if (reg & EECD_EE_GNT)
   12638 			break;
   12639 		delay(5);
   12640 	}
   12641 	if ((reg & EECD_EE_GNT) == 0) {
   12642 		aprint_error_dev(sc->sc_dev,
   12643 		    "could not acquire EEPROM GNT\n");
   12644 		reg &= ~EECD_EE_REQ;
   12645 		CSR_WRITE(sc, WMREG_EECD, reg);
   12646 		return -1;
   12647 	}
   12648 
   12649 	return 0;
   12650 }
   12651 
   12652 static void
   12653 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   12654 {
   12655 
   12656 	*eecd |= EECD_SK;
   12657 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   12658 	CSR_WRITE_FLUSH(sc);
   12659 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   12660 		delay(1);
   12661 	else
   12662 		delay(50);
   12663 }
   12664 
   12665 static void
   12666 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   12667 {
   12668 
   12669 	*eecd &= ~EECD_SK;
   12670 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   12671 	CSR_WRITE_FLUSH(sc);
   12672 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   12673 		delay(1);
   12674 	else
   12675 		delay(50);
   12676 }
   12677 
   12678 static void
   12679 wm_put_eecd(struct wm_softc *sc)
   12680 {
   12681 	uint32_t reg;
   12682 
   12683 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12684 		device_xname(sc->sc_dev), __func__));
   12685 
   12686 	/* Stop nvm */
   12687 	reg = CSR_READ(sc, WMREG_EECD);
   12688 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   12689 		/* Pull CS high */
   12690 		reg |= EECD_CS;
   12691 		wm_nvm_eec_clock_lower(sc, &reg);
   12692 	} else {
   12693 		/* CS on Microwire is active-high */
   12694 		reg &= ~(EECD_CS | EECD_DI);
   12695 		CSR_WRITE(sc, WMREG_EECD, reg);
   12696 		wm_nvm_eec_clock_raise(sc, &reg);
   12697 		wm_nvm_eec_clock_lower(sc, &reg);
   12698 	}
   12699 
   12700 	reg = CSR_READ(sc, WMREG_EECD);
   12701 	reg &= ~EECD_EE_REQ;
   12702 	CSR_WRITE(sc, WMREG_EECD, reg);
   12703 
   12704 	return;
   12705 }
   12706 
   12707 /*
   12708  * Get hardware semaphore.
   12709  * Same as e1000_get_hw_semaphore_generic()
   12710  */
   12711 static int
   12712 wm_get_swsm_semaphore(struct wm_softc *sc)
   12713 {
   12714 	int32_t timeout;
   12715 	uint32_t swsm;
   12716 
   12717 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12718 		device_xname(sc->sc_dev), __func__));
   12719 	KASSERT(sc->sc_nvm_wordsize > 0);
   12720 
   12721 retry:
   12722 	/* Get the SW semaphore. */
   12723 	timeout = sc->sc_nvm_wordsize + 1;
   12724 	while (timeout) {
   12725 		swsm = CSR_READ(sc, WMREG_SWSM);
   12726 
   12727 		if ((swsm & SWSM_SMBI) == 0)
   12728 			break;
   12729 
   12730 		delay(50);
   12731 		timeout--;
   12732 	}
   12733 
   12734 	if (timeout == 0) {
   12735 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   12736 			/*
   12737 			 * In rare circumstances, the SW semaphore may already
   12738 			 * be held unintentionally. Clear the semaphore once
   12739 			 * before giving up.
   12740 			 */
   12741 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   12742 			wm_put_swsm_semaphore(sc);
   12743 			goto retry;
   12744 		}
   12745 		aprint_error_dev(sc->sc_dev,
   12746 		    "could not acquire SWSM SMBI\n");
   12747 		return 1;
   12748 	}
   12749 
   12750 	/* Get the FW semaphore. */
   12751 	timeout = sc->sc_nvm_wordsize + 1;
   12752 	while (timeout) {
   12753 		swsm = CSR_READ(sc, WMREG_SWSM);
   12754 		swsm |= SWSM_SWESMBI;
   12755 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   12756 		/* If we managed to set the bit we got the semaphore. */
   12757 		swsm = CSR_READ(sc, WMREG_SWSM);
   12758 		if (swsm & SWSM_SWESMBI)
   12759 			break;
   12760 
   12761 		delay(50);
   12762 		timeout--;
   12763 	}
   12764 
   12765 	if (timeout == 0) {
   12766 		aprint_error_dev(sc->sc_dev,
   12767 		    "could not acquire SWSM SWESMBI\n");
   12768 		/* Release semaphores */
   12769 		wm_put_swsm_semaphore(sc);
   12770 		return 1;
   12771 	}
   12772 	return 0;
   12773 }
   12774 
   12775 /*
   12776  * Put hardware semaphore.
   12777  * Same as e1000_put_hw_semaphore_generic()
   12778  */
   12779 static void
   12780 wm_put_swsm_semaphore(struct wm_softc *sc)
   12781 {
   12782 	uint32_t swsm;
   12783 
   12784 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12785 		device_xname(sc->sc_dev), __func__));
   12786 
   12787 	swsm = CSR_READ(sc, WMREG_SWSM);
   12788 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   12789 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   12790 }
   12791 
   12792 /*
   12793  * Get SW/FW semaphore.
   12794  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   12795  */
   12796 static int
   12797 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12798 {
   12799 	uint32_t swfw_sync;
   12800 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   12801 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   12802 	int timeout;
   12803 
   12804 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12805 		device_xname(sc->sc_dev), __func__));
   12806 
   12807 	if (sc->sc_type == WM_T_80003)
   12808 		timeout = 50;
   12809 	else
   12810 		timeout = 200;
   12811 
   12812 	for (timeout = 0; timeout < 200; timeout++) {
   12813 		if (wm_get_swsm_semaphore(sc)) {
   12814 			aprint_error_dev(sc->sc_dev,
   12815 			    "%s: failed to get semaphore\n",
   12816 			    __func__);
   12817 			return 1;
   12818 		}
   12819 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12820 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   12821 			swfw_sync |= swmask;
   12822 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12823 			wm_put_swsm_semaphore(sc);
   12824 			return 0;
   12825 		}
   12826 		wm_put_swsm_semaphore(sc);
   12827 		delay(5000);
   12828 	}
   12829 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   12830 	    device_xname(sc->sc_dev), mask, swfw_sync);
   12831 	return 1;
   12832 }
   12833 
   12834 static void
   12835 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12836 {
   12837 	uint32_t swfw_sync;
   12838 
   12839 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12840 		device_xname(sc->sc_dev), __func__));
   12841 
   12842 	while (wm_get_swsm_semaphore(sc) != 0)
   12843 		continue;
   12844 
   12845 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12846 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   12847 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12848 
   12849 	wm_put_swsm_semaphore(sc);
   12850 }
   12851 
   12852 static int
   12853 wm_get_nvm_80003(struct wm_softc *sc)
   12854 {
   12855 	int rv;
   12856 
   12857 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   12858 		device_xname(sc->sc_dev), __func__));
   12859 
   12860 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   12861 		aprint_error_dev(sc->sc_dev,
   12862 		    "%s: failed to get semaphore(SWFW)\n",
   12863 		    __func__);
   12864 		return rv;
   12865 	}
   12866 
   12867 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   12868 	    && (rv = wm_get_eecd(sc)) != 0) {
   12869 		aprint_error_dev(sc->sc_dev,
   12870 		    "%s: failed to get semaphore(EECD)\n",
   12871 		    __func__);
   12872 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   12873 		return rv;
   12874 	}
   12875 
   12876 	return 0;
   12877 }
   12878 
   12879 static void
   12880 wm_put_nvm_80003(struct wm_softc *sc)
   12881 {
   12882 
   12883 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12884 		device_xname(sc->sc_dev), __func__));
   12885 
   12886 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   12887 		wm_put_eecd(sc);
   12888 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   12889 }
   12890 
   12891 static int
   12892 wm_get_nvm_82571(struct wm_softc *sc)
   12893 {
   12894 	int rv;
   12895 
   12896 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12897 		device_xname(sc->sc_dev), __func__));
   12898 
   12899 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   12900 		return rv;
   12901 
   12902 	switch (sc->sc_type) {
   12903 	case WM_T_82573:
   12904 		break;
   12905 	default:
   12906 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   12907 			rv = wm_get_eecd(sc);
   12908 		break;
   12909 	}
   12910 
   12911 	if (rv != 0) {
   12912 		aprint_error_dev(sc->sc_dev,
   12913 		    "%s: failed to get semaphore\n",
   12914 		    __func__);
   12915 		wm_put_swsm_semaphore(sc);
   12916 	}
   12917 
   12918 	return rv;
   12919 }
   12920 
   12921 static void
   12922 wm_put_nvm_82571(struct wm_softc *sc)
   12923 {
   12924 
   12925 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12926 		device_xname(sc->sc_dev), __func__));
   12927 
   12928 	switch (sc->sc_type) {
   12929 	case WM_T_82573:
   12930 		break;
   12931 	default:
   12932 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   12933 			wm_put_eecd(sc);
   12934 		break;
   12935 	}
   12936 
   12937 	wm_put_swsm_semaphore(sc);
   12938 }
   12939 
   12940 static int
   12941 wm_get_phy_82575(struct wm_softc *sc)
   12942 {
   12943 
   12944 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12945 		device_xname(sc->sc_dev), __func__));
   12946 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   12947 }
   12948 
   12949 static void
   12950 wm_put_phy_82575(struct wm_softc *sc)
   12951 {
   12952 
   12953 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12954 		device_xname(sc->sc_dev), __func__));
   12955 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   12956 }
   12957 
   12958 static int
   12959 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   12960 {
   12961 	uint32_t ext_ctrl;
   12962 	int timeout = 200;
   12963 
   12964 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12965 		device_xname(sc->sc_dev), __func__));
   12966 
   12967 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12968 	for (timeout = 0; timeout < 200; timeout++) {
   12969 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12970 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12971 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12972 
   12973 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12974 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   12975 			return 0;
   12976 		delay(5000);
   12977 	}
   12978 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   12979 	    device_xname(sc->sc_dev), ext_ctrl);
   12980 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12981 	return 1;
   12982 }
   12983 
   12984 static void
   12985 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   12986 {
   12987 	uint32_t ext_ctrl;
   12988 
   12989 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12990 		device_xname(sc->sc_dev), __func__));
   12991 
   12992 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12993 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12994 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12995 
   12996 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12997 }
   12998 
   12999 static int
   13000 wm_get_swflag_ich8lan(struct wm_softc *sc)
   13001 {
   13002 	uint32_t ext_ctrl;
   13003 	int timeout;
   13004 
   13005 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13006 		device_xname(sc->sc_dev), __func__));
   13007 	mutex_enter(sc->sc_ich_phymtx);
   13008 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   13009 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13010 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   13011 			break;
   13012 		delay(1000);
   13013 	}
   13014 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   13015 		printf("%s: SW has already locked the resource\n",
   13016 		    device_xname(sc->sc_dev));
   13017 		goto out;
   13018 	}
   13019 
   13020 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13021 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13022 	for (timeout = 0; timeout < 1000; timeout++) {
   13023 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13024 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   13025 			break;
   13026 		delay(1000);
   13027 	}
   13028 	if (timeout >= 1000) {
   13029 		printf("%s: failed to acquire semaphore\n",
   13030 		    device_xname(sc->sc_dev));
   13031 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13032 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13033 		goto out;
   13034 	}
   13035 	return 0;
   13036 
   13037 out:
   13038 	mutex_exit(sc->sc_ich_phymtx);
   13039 	return 1;
   13040 }
   13041 
   13042 static void
   13043 wm_put_swflag_ich8lan(struct wm_softc *sc)
   13044 {
   13045 	uint32_t ext_ctrl;
   13046 
   13047 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13048 		device_xname(sc->sc_dev), __func__));
   13049 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13050 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   13051 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13052 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13053 	} else {
   13054 		printf("%s: Semaphore unexpectedly released\n",
   13055 		    device_xname(sc->sc_dev));
   13056 	}
   13057 
   13058 	mutex_exit(sc->sc_ich_phymtx);
   13059 }
   13060 
   13061 static int
   13062 wm_get_nvm_ich8lan(struct wm_softc *sc)
   13063 {
   13064 
   13065 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13066 		device_xname(sc->sc_dev), __func__));
   13067 	mutex_enter(sc->sc_ich_nvmmtx);
   13068 
   13069 	return 0;
   13070 }
   13071 
   13072 static void
   13073 wm_put_nvm_ich8lan(struct wm_softc *sc)
   13074 {
   13075 
   13076 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13077 		device_xname(sc->sc_dev), __func__));
   13078 	mutex_exit(sc->sc_ich_nvmmtx);
   13079 }
   13080 
   13081 static int
   13082 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   13083 {
   13084 	int i = 0;
   13085 	uint32_t reg;
   13086 
   13087 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13088 		device_xname(sc->sc_dev), __func__));
   13089 
   13090 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13091 	do {
   13092 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   13093 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   13094 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13095 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   13096 			break;
   13097 		delay(2*1000);
   13098 		i++;
   13099 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   13100 
   13101 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   13102 		wm_put_hw_semaphore_82573(sc);
   13103 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   13104 		    device_xname(sc->sc_dev));
   13105 		return -1;
   13106 	}
   13107 
   13108 	return 0;
   13109 }
   13110 
   13111 static void
   13112 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   13113 {
   13114 	uint32_t reg;
   13115 
   13116 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13117 		device_xname(sc->sc_dev), __func__));
   13118 
   13119 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13120 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13121 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   13122 }
   13123 
   13124 /*
   13125  * Management mode and power management related subroutines.
   13126  * BMC, AMT, suspend/resume and EEE.
   13127  */
   13128 
   13129 #ifdef WM_WOL
   13130 static int
   13131 wm_check_mng_mode(struct wm_softc *sc)
   13132 {
   13133 	int rv;
   13134 
   13135 	switch (sc->sc_type) {
   13136 	case WM_T_ICH8:
   13137 	case WM_T_ICH9:
   13138 	case WM_T_ICH10:
   13139 	case WM_T_PCH:
   13140 	case WM_T_PCH2:
   13141 	case WM_T_PCH_LPT:
   13142 	case WM_T_PCH_SPT:
   13143 		rv = wm_check_mng_mode_ich8lan(sc);
   13144 		break;
   13145 	case WM_T_82574:
   13146 	case WM_T_82583:
   13147 		rv = wm_check_mng_mode_82574(sc);
   13148 		break;
   13149 	case WM_T_82571:
   13150 	case WM_T_82572:
   13151 	case WM_T_82573:
   13152 	case WM_T_80003:
   13153 		rv = wm_check_mng_mode_generic(sc);
   13154 		break;
   13155 	default:
   13156 		/* noting to do */
   13157 		rv = 0;
   13158 		break;
   13159 	}
   13160 
   13161 	return rv;
   13162 }
   13163 
   13164 static int
   13165 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   13166 {
   13167 	uint32_t fwsm;
   13168 
   13169 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13170 
   13171 	if (((fwsm & FWSM_FW_VALID) != 0)
   13172 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   13173 		return 1;
   13174 
   13175 	return 0;
   13176 }
   13177 
   13178 static int
   13179 wm_check_mng_mode_82574(struct wm_softc *sc)
   13180 {
   13181 	uint16_t data;
   13182 
   13183 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   13184 
   13185 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   13186 		return 1;
   13187 
   13188 	return 0;
   13189 }
   13190 
   13191 static int
   13192 wm_check_mng_mode_generic(struct wm_softc *sc)
   13193 {
   13194 	uint32_t fwsm;
   13195 
   13196 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13197 
   13198 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   13199 		return 1;
   13200 
   13201 	return 0;
   13202 }
   13203 #endif /* WM_WOL */
   13204 
   13205 static int
   13206 wm_enable_mng_pass_thru(struct wm_softc *sc)
   13207 {
   13208 	uint32_t manc, fwsm, factps;
   13209 
   13210 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   13211 		return 0;
   13212 
   13213 	manc = CSR_READ(sc, WMREG_MANC);
   13214 
   13215 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   13216 		device_xname(sc->sc_dev), manc));
   13217 	if ((manc & MANC_RECV_TCO_EN) == 0)
   13218 		return 0;
   13219 
   13220 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   13221 		fwsm = CSR_READ(sc, WMREG_FWSM);
   13222 		factps = CSR_READ(sc, WMREG_FACTPS);
   13223 		if (((factps & FACTPS_MNGCG) == 0)
   13224 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   13225 			return 1;
   13226 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   13227 		uint16_t data;
   13228 
   13229 		factps = CSR_READ(sc, WMREG_FACTPS);
   13230 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   13231 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   13232 			device_xname(sc->sc_dev), factps, data));
   13233 		if (((factps & FACTPS_MNGCG) == 0)
   13234 		    && ((data & NVM_CFG2_MNGM_MASK)
   13235 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   13236 			return 1;
   13237 	} else if (((manc & MANC_SMBUS_EN) != 0)
   13238 	    && ((manc & MANC_ASF_EN) == 0))
   13239 		return 1;
   13240 
   13241 	return 0;
   13242 }
   13243 
   13244 static bool
   13245 wm_phy_resetisblocked(struct wm_softc *sc)
   13246 {
   13247 	bool blocked = false;
   13248 	uint32_t reg;
   13249 	int i = 0;
   13250 
   13251 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13252 		device_xname(sc->sc_dev), __func__));
   13253 
   13254 	switch (sc->sc_type) {
   13255 	case WM_T_ICH8:
   13256 	case WM_T_ICH9:
   13257 	case WM_T_ICH10:
   13258 	case WM_T_PCH:
   13259 	case WM_T_PCH2:
   13260 	case WM_T_PCH_LPT:
   13261 	case WM_T_PCH_SPT:
   13262 		do {
   13263 			reg = CSR_READ(sc, WMREG_FWSM);
   13264 			if ((reg & FWSM_RSPCIPHY) == 0) {
   13265 				blocked = true;
   13266 				delay(10*1000);
   13267 				continue;
   13268 			}
   13269 			blocked = false;
   13270 		} while (blocked && (i++ < 30));
   13271 		return blocked;
   13272 		break;
   13273 	case WM_T_82571:
   13274 	case WM_T_82572:
   13275 	case WM_T_82573:
   13276 	case WM_T_82574:
   13277 	case WM_T_82583:
   13278 	case WM_T_80003:
   13279 		reg = CSR_READ(sc, WMREG_MANC);
   13280 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   13281 			return true;
   13282 		else
   13283 			return false;
   13284 		break;
   13285 	default:
   13286 		/* no problem */
   13287 		break;
   13288 	}
   13289 
   13290 	return false;
   13291 }
   13292 
   13293 static void
   13294 wm_get_hw_control(struct wm_softc *sc)
   13295 {
   13296 	uint32_t reg;
   13297 
   13298 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13299 		device_xname(sc->sc_dev), __func__));
   13300 
   13301 	if (sc->sc_type == WM_T_82573) {
   13302 		reg = CSR_READ(sc, WMREG_SWSM);
   13303 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   13304 	} else if (sc->sc_type >= WM_T_82571) {
   13305 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13306 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   13307 	}
   13308 }
   13309 
   13310 static void
   13311 wm_release_hw_control(struct wm_softc *sc)
   13312 {
   13313 	uint32_t reg;
   13314 
   13315 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13316 		device_xname(sc->sc_dev), __func__));
   13317 
   13318 	if (sc->sc_type == WM_T_82573) {
   13319 		reg = CSR_READ(sc, WMREG_SWSM);
   13320 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   13321 	} else if (sc->sc_type >= WM_T_82571) {
   13322 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13323 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   13324 	}
   13325 }
   13326 
   13327 static void
   13328 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   13329 {
   13330 	uint32_t reg;
   13331 
   13332 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13333 		device_xname(sc->sc_dev), __func__));
   13334 
   13335 	if (sc->sc_type < WM_T_PCH2)
   13336 		return;
   13337 
   13338 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13339 
   13340 	if (gate)
   13341 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   13342 	else
   13343 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   13344 
   13345 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   13346 }
   13347 
   13348 static void
   13349 wm_smbustopci(struct wm_softc *sc)
   13350 {
   13351 	uint32_t fwsm, reg;
   13352 	int rv = 0;
   13353 
   13354 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13355 		device_xname(sc->sc_dev), __func__));
   13356 
   13357 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   13358 	wm_gate_hw_phy_config_ich8lan(sc, true);
   13359 
   13360 	/* Disable ULP */
   13361 	wm_ulp_disable(sc);
   13362 
   13363 	/* Acquire PHY semaphore */
   13364 	sc->phy.acquire(sc);
   13365 
   13366 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13367 	switch (sc->sc_type) {
   13368 	case WM_T_PCH_LPT:
   13369 	case WM_T_PCH_SPT:
   13370 		if (wm_phy_is_accessible_pchlan(sc))
   13371 			break;
   13372 
   13373 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13374 		reg |= CTRL_EXT_FORCE_SMBUS;
   13375 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13376 #if 0
   13377 		/* XXX Isn't this required??? */
   13378 		CSR_WRITE_FLUSH(sc);
   13379 #endif
   13380 		delay(50 * 1000);
   13381 		/* FALLTHROUGH */
   13382 	case WM_T_PCH2:
   13383 		if (wm_phy_is_accessible_pchlan(sc) == true)
   13384 			break;
   13385 		/* FALLTHROUGH */
   13386 	case WM_T_PCH:
   13387 		if (sc->sc_type == WM_T_PCH)
   13388 			if ((fwsm & FWSM_FW_VALID) != 0)
   13389 				break;
   13390 
   13391 		if (wm_phy_resetisblocked(sc) == true) {
   13392 			printf("XXX reset is blocked(3)\n");
   13393 			break;
   13394 		}
   13395 
   13396 		wm_toggle_lanphypc_pch_lpt(sc);
   13397 
   13398 		if (sc->sc_type >= WM_T_PCH_LPT) {
   13399 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13400 				break;
   13401 
   13402 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13403 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   13404 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13405 
   13406 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13407 				break;
   13408 			rv = -1;
   13409 		}
   13410 		break;
   13411 	default:
   13412 		break;
   13413 	}
   13414 
   13415 	/* Release semaphore */
   13416 	sc->phy.release(sc);
   13417 
   13418 	if (rv == 0) {
   13419 		if (wm_phy_resetisblocked(sc)) {
   13420 			printf("XXX reset is blocked(4)\n");
   13421 			goto out;
   13422 		}
   13423 		wm_reset_phy(sc);
   13424 		if (wm_phy_resetisblocked(sc))
   13425 			printf("XXX reset is blocked(4)\n");
   13426 	}
   13427 
   13428 out:
   13429 	/*
   13430 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   13431 	 */
   13432 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   13433 		delay(10*1000);
   13434 		wm_gate_hw_phy_config_ich8lan(sc, false);
   13435 	}
   13436 }
   13437 
   13438 static void
   13439 wm_init_manageability(struct wm_softc *sc)
   13440 {
   13441 
   13442 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13443 		device_xname(sc->sc_dev), __func__));
   13444 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13445 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   13446 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13447 
   13448 		/* Disable hardware interception of ARP */
   13449 		manc &= ~MANC_ARP_EN;
   13450 
   13451 		/* Enable receiving management packets to the host */
   13452 		if (sc->sc_type >= WM_T_82571) {
   13453 			manc |= MANC_EN_MNG2HOST;
   13454 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   13455 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   13456 		}
   13457 
   13458 		CSR_WRITE(sc, WMREG_MANC, manc);
   13459 	}
   13460 }
   13461 
   13462 static void
   13463 wm_release_manageability(struct wm_softc *sc)
   13464 {
   13465 
   13466 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13467 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13468 
   13469 		manc |= MANC_ARP_EN;
   13470 		if (sc->sc_type >= WM_T_82571)
   13471 			manc &= ~MANC_EN_MNG2HOST;
   13472 
   13473 		CSR_WRITE(sc, WMREG_MANC, manc);
   13474 	}
   13475 }
   13476 
   13477 static void
   13478 wm_get_wakeup(struct wm_softc *sc)
   13479 {
   13480 
   13481 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   13482 	switch (sc->sc_type) {
   13483 	case WM_T_82573:
   13484 	case WM_T_82583:
   13485 		sc->sc_flags |= WM_F_HAS_AMT;
   13486 		/* FALLTHROUGH */
   13487 	case WM_T_80003:
   13488 	case WM_T_82575:
   13489 	case WM_T_82576:
   13490 	case WM_T_82580:
   13491 	case WM_T_I350:
   13492 	case WM_T_I354:
   13493 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   13494 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   13495 		/* FALLTHROUGH */
   13496 	case WM_T_82541:
   13497 	case WM_T_82541_2:
   13498 	case WM_T_82547:
   13499 	case WM_T_82547_2:
   13500 	case WM_T_82571:
   13501 	case WM_T_82572:
   13502 	case WM_T_82574:
   13503 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13504 		break;
   13505 	case WM_T_ICH8:
   13506 	case WM_T_ICH9:
   13507 	case WM_T_ICH10:
   13508 	case WM_T_PCH:
   13509 	case WM_T_PCH2:
   13510 	case WM_T_PCH_LPT:
   13511 	case WM_T_PCH_SPT:
   13512 		sc->sc_flags |= WM_F_HAS_AMT;
   13513 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13514 		break;
   13515 	default:
   13516 		break;
   13517 	}
   13518 
   13519 	/* 1: HAS_MANAGE */
   13520 	if (wm_enable_mng_pass_thru(sc) != 0)
   13521 		sc->sc_flags |= WM_F_HAS_MANAGE;
   13522 
   13523 	/*
   13524 	 * Note that the WOL flags is set after the resetting of the eeprom
   13525 	 * stuff
   13526 	 */
   13527 }
   13528 
   13529 /*
   13530  * Unconfigure Ultra Low Power mode.
   13531  * Only for I217 and newer (see below).
   13532  */
   13533 static void
   13534 wm_ulp_disable(struct wm_softc *sc)
   13535 {
   13536 	uint32_t reg;
   13537 	int i = 0;
   13538 
   13539 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13540 		device_xname(sc->sc_dev), __func__));
   13541 	/* Exclude old devices */
   13542 	if ((sc->sc_type < WM_T_PCH_LPT)
   13543 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   13544 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   13545 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   13546 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   13547 		return;
   13548 
   13549 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   13550 		/* Request ME un-configure ULP mode in the PHY */
   13551 		reg = CSR_READ(sc, WMREG_H2ME);
   13552 		reg &= ~H2ME_ULP;
   13553 		reg |= H2ME_ENFORCE_SETTINGS;
   13554 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13555 
   13556 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   13557 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   13558 			if (i++ == 30) {
   13559 				printf("%s timed out\n", __func__);
   13560 				return;
   13561 			}
   13562 			delay(10 * 1000);
   13563 		}
   13564 		reg = CSR_READ(sc, WMREG_H2ME);
   13565 		reg &= ~H2ME_ENFORCE_SETTINGS;
   13566 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13567 
   13568 		return;
   13569 	}
   13570 
   13571 	/* Acquire semaphore */
   13572 	sc->phy.acquire(sc);
   13573 
   13574 	/* Toggle LANPHYPC */
   13575 	wm_toggle_lanphypc_pch_lpt(sc);
   13576 
   13577 	/* Unforce SMBus mode in PHY */
   13578 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13579 	if (reg == 0x0000 || reg == 0xffff) {
   13580 		uint32_t reg2;
   13581 
   13582 		printf("%s: Force SMBus first.\n", __func__);
   13583 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   13584 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   13585 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   13586 		delay(50 * 1000);
   13587 
   13588 		reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13589 	}
   13590 	reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13591 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg);
   13592 
   13593 	/* Unforce SMBus mode in MAC */
   13594 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13595 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   13596 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13597 
   13598 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL);
   13599 	reg |= HV_PM_CTRL_K1_ENA;
   13600 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg);
   13601 
   13602 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1);
   13603 	reg &= ~(I218_ULP_CONFIG1_IND
   13604 	    | I218_ULP_CONFIG1_STICKY_ULP
   13605 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   13606 	    | I218_ULP_CONFIG1_WOL_HOST
   13607 	    | I218_ULP_CONFIG1_INBAND_EXIT
   13608 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   13609 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   13610 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   13611 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13612 	reg |= I218_ULP_CONFIG1_START;
   13613 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13614 
   13615 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   13616 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   13617 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   13618 
   13619 	/* Release semaphore */
   13620 	sc->phy.release(sc);
   13621 	wm_gmii_reset(sc);
   13622 	delay(50 * 1000);
   13623 }
   13624 
   13625 /* WOL in the newer chipset interfaces (pchlan) */
   13626 static void
   13627 wm_enable_phy_wakeup(struct wm_softc *sc)
   13628 {
   13629 #if 0
   13630 	uint16_t preg;
   13631 
   13632 	/* Copy MAC RARs to PHY RARs */
   13633 
   13634 	/* Copy MAC MTA to PHY MTA */
   13635 
   13636 	/* Configure PHY Rx Control register */
   13637 
   13638 	/* Enable PHY wakeup in MAC register */
   13639 
   13640 	/* Configure and enable PHY wakeup in PHY registers */
   13641 
   13642 	/* Activate PHY wakeup */
   13643 
   13644 	/* XXX */
   13645 #endif
   13646 }
   13647 
   13648 /* Power down workaround on D3 */
   13649 static void
   13650 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   13651 {
   13652 	uint32_t reg;
   13653 	int i;
   13654 
   13655 	for (i = 0; i < 2; i++) {
   13656 		/* Disable link */
   13657 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13658 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13659 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13660 
   13661 		/*
   13662 		 * Call gig speed drop workaround on Gig disable before
   13663 		 * accessing any PHY registers
   13664 		 */
   13665 		if (sc->sc_type == WM_T_ICH8)
   13666 			wm_gig_downshift_workaround_ich8lan(sc);
   13667 
   13668 		/* Write VR power-down enable */
   13669 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13670 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13671 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   13672 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   13673 
   13674 		/* Read it back and test */
   13675 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13676 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13677 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   13678 			break;
   13679 
   13680 		/* Issue PHY reset and repeat at most one more time */
   13681 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   13682 	}
   13683 }
   13684 
   13685 static void
   13686 wm_enable_wakeup(struct wm_softc *sc)
   13687 {
   13688 	uint32_t reg, pmreg;
   13689 	pcireg_t pmode;
   13690 
   13691 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13692 		device_xname(sc->sc_dev), __func__));
   13693 
   13694 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   13695 		&pmreg, NULL) == 0)
   13696 		return;
   13697 
   13698 	/* Advertise the wakeup capability */
   13699 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   13700 	    | CTRL_SWDPIN(3));
   13701 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   13702 
   13703 	/* ICH workaround */
   13704 	switch (sc->sc_type) {
   13705 	case WM_T_ICH8:
   13706 	case WM_T_ICH9:
   13707 	case WM_T_ICH10:
   13708 	case WM_T_PCH:
   13709 	case WM_T_PCH2:
   13710 	case WM_T_PCH_LPT:
   13711 	case WM_T_PCH_SPT:
   13712 		/* Disable gig during WOL */
   13713 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13714 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   13715 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13716 		if (sc->sc_type == WM_T_PCH)
   13717 			wm_gmii_reset(sc);
   13718 
   13719 		/* Power down workaround */
   13720 		if (sc->sc_phytype == WMPHY_82577) {
   13721 			struct mii_softc *child;
   13722 
   13723 			/* Assume that the PHY is copper */
   13724 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13725 			if ((child != NULL) && (child->mii_mpd_rev <= 2))
   13726 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   13727 				    (768 << 5) | 25, 0x0444); /* magic num */
   13728 		}
   13729 		break;
   13730 	default:
   13731 		break;
   13732 	}
   13733 
   13734 	/* Keep the laser running on fiber adapters */
   13735 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   13736 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   13737 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13738 		reg |= CTRL_EXT_SWDPIN(3);
   13739 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13740 	}
   13741 
   13742 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   13743 #if 0	/* for the multicast packet */
   13744 	reg |= WUFC_MC;
   13745 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   13746 #endif
   13747 
   13748 	if (sc->sc_type >= WM_T_PCH)
   13749 		wm_enable_phy_wakeup(sc);
   13750 	else {
   13751 		CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
   13752 		CSR_WRITE(sc, WMREG_WUFC, reg);
   13753 	}
   13754 
   13755 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13756 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13757 		|| (sc->sc_type == WM_T_PCH2))
   13758 		    && (sc->sc_phytype == WMPHY_IGP_3))
   13759 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   13760 
   13761 	/* Request PME */
   13762 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   13763 #if 0
   13764 	/* Disable WOL */
   13765 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   13766 #else
   13767 	/* For WOL */
   13768 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   13769 #endif
   13770 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   13771 }
   13772 
   13773 /* LPLU */
   13774 
   13775 static void
   13776 wm_lplu_d0_disable(struct wm_softc *sc)
   13777 {
   13778 	struct mii_data *mii = &sc->sc_mii;
   13779 	uint32_t reg;
   13780 
   13781 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13782 		device_xname(sc->sc_dev), __func__));
   13783 
   13784 	if (sc->sc_phytype == WMPHY_IFE)
   13785 		return;
   13786 
   13787 	switch (sc->sc_type) {
   13788 	case WM_T_82571:
   13789 	case WM_T_82572:
   13790 	case WM_T_82573:
   13791 	case WM_T_82575:
   13792 	case WM_T_82576:
   13793 		reg = mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT);
   13794 		reg &= ~PMR_D0_LPLU;
   13795 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, reg);
   13796 		break;
   13797 	case WM_T_82580:
   13798 	case WM_T_I350:
   13799 	case WM_T_I210:
   13800 	case WM_T_I211:
   13801 		reg = CSR_READ(sc, WMREG_PHPM);
   13802 		reg &= ~PHPM_D0A_LPLU;
   13803 		CSR_WRITE(sc, WMREG_PHPM, reg);
   13804 		break;
   13805 	case WM_T_82574:
   13806 	case WM_T_82583:
   13807 	case WM_T_ICH8:
   13808 	case WM_T_ICH9:
   13809 	case WM_T_ICH10:
   13810 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13811 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   13812 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13813 		CSR_WRITE_FLUSH(sc);
   13814 		break;
   13815 	case WM_T_PCH:
   13816 	case WM_T_PCH2:
   13817 	case WM_T_PCH_LPT:
   13818 	case WM_T_PCH_SPT:
   13819 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   13820 		reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   13821 		if (wm_phy_resetisblocked(sc) == false)
   13822 			reg |= HV_OEM_BITS_ANEGNOW;
   13823 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   13824 		break;
   13825 	default:
   13826 		break;
   13827 	}
   13828 }
   13829 
   13830 /* EEE */
   13831 
   13832 static void
   13833 wm_set_eee_i350(struct wm_softc *sc)
   13834 {
   13835 	uint32_t ipcnfg, eeer;
   13836 
   13837 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   13838 	eeer = CSR_READ(sc, WMREG_EEER);
   13839 
   13840 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   13841 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   13842 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   13843 		    | EEER_LPI_FC);
   13844 	} else {
   13845 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   13846 		ipcnfg &= ~IPCNFG_10BASE_TE;
   13847 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   13848 		    | EEER_LPI_FC);
   13849 	}
   13850 
   13851 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   13852 	CSR_WRITE(sc, WMREG_EEER, eeer);
   13853 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   13854 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   13855 }
   13856 
   13857 /*
   13858  * Workarounds (mainly PHY related).
   13859  * Basically, PHY's workarounds are in the PHY drivers.
   13860  */
   13861 
   13862 /* Work-around for 82566 Kumeran PCS lock loss */
   13863 static void
   13864 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   13865 {
   13866 	struct mii_data *mii = &sc->sc_mii;
   13867 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   13868 	int i;
   13869 	int reg;
   13870 
   13871 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13872 		device_xname(sc->sc_dev), __func__));
   13873 
   13874 	/* If the link is not up, do nothing */
   13875 	if ((status & STATUS_LU) == 0)
   13876 		return;
   13877 
   13878 	/* Nothing to do if the link is other than 1Gbps */
   13879 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   13880 		return;
   13881 
   13882 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13883 	for (i = 0; i < 10; i++) {
   13884 		/* read twice */
   13885 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   13886 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   13887 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   13888 			goto out;	/* GOOD! */
   13889 
   13890 		/* Reset the PHY */
   13891 		wm_reset_phy(sc);
   13892 		delay(5*1000);
   13893 	}
   13894 
   13895 	/* Disable GigE link negotiation */
   13896 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13897 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13898 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13899 
   13900 	/*
   13901 	 * Call gig speed drop workaround on Gig disable before accessing
   13902 	 * any PHY registers.
   13903 	 */
   13904 	wm_gig_downshift_workaround_ich8lan(sc);
   13905 
   13906 out:
   13907 	return;
   13908 }
   13909 
   13910 /* WOL from S5 stops working */
   13911 static void
   13912 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   13913 {
   13914 	uint16_t kmreg;
   13915 
   13916 	/* Only for igp3 */
   13917 	if (sc->sc_phytype == WMPHY_IGP_3) {
   13918 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   13919 			return;
   13920 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   13921 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   13922 			return;
   13923 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   13924 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   13925 	}
   13926 }
   13927 
   13928 /*
   13929  * Workaround for pch's PHYs
   13930  * XXX should be moved to new PHY driver?
   13931  */
   13932 static void
   13933 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   13934 {
   13935 
   13936 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13937 		device_xname(sc->sc_dev), __func__));
   13938 	KASSERT(sc->sc_type == WM_T_PCH);
   13939 
   13940 	if (sc->sc_phytype == WMPHY_82577)
   13941 		wm_set_mdio_slow_mode_hv(sc);
   13942 
   13943 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   13944 
   13945 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   13946 
   13947 	/* 82578 */
   13948 	if (sc->sc_phytype == WMPHY_82578) {
   13949 		struct mii_softc *child;
   13950 
   13951 		/*
   13952 		 * Return registers to default by doing a soft reset then
   13953 		 * writing 0x3140 to the control register
   13954 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   13955 		 */
   13956 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13957 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   13958 			PHY_RESET(child);
   13959 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   13960 			    0x3140);
   13961 		}
   13962 	}
   13963 
   13964 	/* Select page 0 */
   13965 	sc->phy.acquire(sc);
   13966 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   13967 	sc->phy.release(sc);
   13968 
   13969 	/*
   13970 	 * Configure the K1 Si workaround during phy reset assuming there is
   13971 	 * link so that it disables K1 if link is in 1Gbps.
   13972 	 */
   13973 	wm_k1_gig_workaround_hv(sc, 1);
   13974 }
   13975 
   13976 static void
   13977 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   13978 {
   13979 
   13980 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13981 		device_xname(sc->sc_dev), __func__));
   13982 	KASSERT(sc->sc_type == WM_T_PCH2);
   13983 
   13984 	wm_set_mdio_slow_mode_hv(sc);
   13985 }
   13986 
   13987 static int
   13988 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   13989 {
   13990 	int k1_enable = sc->sc_nvm_k1_enabled;
   13991 
   13992 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13993 		device_xname(sc->sc_dev), __func__));
   13994 
   13995 	if (sc->phy.acquire(sc) != 0)
   13996 		return -1;
   13997 
   13998 	if (link) {
   13999 		k1_enable = 0;
   14000 
   14001 		/* Link stall fix for link up */
   14002 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   14003 	} else {
   14004 		/* Link stall fix for link down */
   14005 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   14006 	}
   14007 
   14008 	wm_configure_k1_ich8lan(sc, k1_enable);
   14009 	sc->phy.release(sc);
   14010 
   14011 	return 0;
   14012 }
   14013 
   14014 static void
   14015 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   14016 {
   14017 	uint32_t reg;
   14018 
   14019 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   14020 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   14021 	    reg | HV_KMRN_MDIO_SLOW);
   14022 }
   14023 
   14024 static void
   14025 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   14026 {
   14027 	uint32_t ctrl, ctrl_ext, tmp;
   14028 	uint16_t kmreg;
   14029 	int rv;
   14030 
   14031 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   14032 	if (rv != 0)
   14033 		return;
   14034 
   14035 	if (k1_enable)
   14036 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   14037 	else
   14038 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   14039 
   14040 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   14041 	if (rv != 0)
   14042 		return;
   14043 
   14044 	delay(20);
   14045 
   14046 	ctrl = CSR_READ(sc, WMREG_CTRL);
   14047 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   14048 
   14049 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   14050 	tmp |= CTRL_FRCSPD;
   14051 
   14052 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   14053 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   14054 	CSR_WRITE_FLUSH(sc);
   14055 	delay(20);
   14056 
   14057 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   14058 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   14059 	CSR_WRITE_FLUSH(sc);
   14060 	delay(20);
   14061 
   14062 	return;
   14063 }
   14064 
   14065 /* special case - for 82575 - need to do manual init ... */
   14066 static void
   14067 wm_reset_init_script_82575(struct wm_softc *sc)
   14068 {
   14069 	/*
   14070 	 * remark: this is untested code - we have no board without EEPROM
   14071 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   14072 	 */
   14073 
   14074 	/* SerDes configuration via SERDESCTRL */
   14075 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   14076 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   14077 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   14078 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   14079 
   14080 	/* CCM configuration via CCMCTL register */
   14081 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   14082 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   14083 
   14084 	/* PCIe lanes configuration */
   14085 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   14086 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   14087 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   14088 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   14089 
   14090 	/* PCIe PLL Configuration */
   14091 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   14092 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   14093 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   14094 }
   14095 
   14096 static void
   14097 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   14098 {
   14099 	uint32_t reg;
   14100 	uint16_t nvmword;
   14101 	int rv;
   14102 
   14103 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   14104 		return;
   14105 
   14106 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   14107 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   14108 	if (rv != 0) {
   14109 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   14110 		    __func__);
   14111 		return;
   14112 	}
   14113 
   14114 	reg = CSR_READ(sc, WMREG_MDICNFG);
   14115 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   14116 		reg |= MDICNFG_DEST;
   14117 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   14118 		reg |= MDICNFG_COM_MDIO;
   14119 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   14120 }
   14121 
   14122 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   14123 
   14124 static bool
   14125 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   14126 {
   14127 	int i;
   14128 	uint32_t reg;
   14129 	uint16_t id1, id2;
   14130 
   14131 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14132 		device_xname(sc->sc_dev), __func__));
   14133 	id1 = id2 = 0xffff;
   14134 	for (i = 0; i < 2; i++) {
   14135 		id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1);
   14136 		if (MII_INVALIDID(id1))
   14137 			continue;
   14138 		id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2);
   14139 		if (MII_INVALIDID(id2))
   14140 			continue;
   14141 		break;
   14142 	}
   14143 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) {
   14144 		goto out;
   14145 	}
   14146 
   14147 	if (sc->sc_type < WM_T_PCH_LPT) {
   14148 		sc->phy.release(sc);
   14149 		wm_set_mdio_slow_mode_hv(sc);
   14150 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
   14151 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
   14152 		sc->phy.acquire(sc);
   14153 	}
   14154 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   14155 		printf("XXX return with false\n");
   14156 		return false;
   14157 	}
   14158 out:
   14159 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   14160 		/* Only unforce SMBus if ME is not active */
   14161 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   14162 			/* Unforce SMBus mode in PHY */
   14163 			reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   14164 			    CV_SMB_CTRL);
   14165 			reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   14166 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   14167 			    CV_SMB_CTRL, reg);
   14168 
   14169 			/* Unforce SMBus mode in MAC */
   14170 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14171 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   14172 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14173 		}
   14174 	}
   14175 	return true;
   14176 }
   14177 
   14178 static void
   14179 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   14180 {
   14181 	uint32_t reg;
   14182 	int i;
   14183 
   14184 	/* Set PHY Config Counter to 50msec */
   14185 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   14186 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   14187 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   14188 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   14189 
   14190 	/* Toggle LANPHYPC */
   14191 	reg = CSR_READ(sc, WMREG_CTRL);
   14192 	reg |= CTRL_LANPHYPC_OVERRIDE;
   14193 	reg &= ~CTRL_LANPHYPC_VALUE;
   14194 	CSR_WRITE(sc, WMREG_CTRL, reg);
   14195 	CSR_WRITE_FLUSH(sc);
   14196 	delay(1000);
   14197 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   14198 	CSR_WRITE(sc, WMREG_CTRL, reg);
   14199 	CSR_WRITE_FLUSH(sc);
   14200 
   14201 	if (sc->sc_type < WM_T_PCH_LPT)
   14202 		delay(50 * 1000);
   14203 	else {
   14204 		i = 20;
   14205 
   14206 		do {
   14207 			delay(5 * 1000);
   14208 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   14209 		    && i--);
   14210 
   14211 		delay(30 * 1000);
   14212 	}
   14213 }
   14214 
   14215 static int
   14216 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   14217 {
   14218 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   14219 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   14220 	uint32_t rxa;
   14221 	uint16_t scale = 0, lat_enc = 0;
   14222 	int32_t obff_hwm = 0;
   14223 	int64_t lat_ns, value;
   14224 
   14225 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14226 		device_xname(sc->sc_dev), __func__));
   14227 
   14228 	if (link) {
   14229 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   14230 		uint32_t status;
   14231 		uint16_t speed;
   14232 		pcireg_t preg;
   14233 
   14234 		status = CSR_READ(sc, WMREG_STATUS);
   14235 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   14236 		case STATUS_SPEED_10:
   14237 			speed = 10;
   14238 			break;
   14239 		case STATUS_SPEED_100:
   14240 			speed = 100;
   14241 			break;
   14242 		case STATUS_SPEED_1000:
   14243 			speed = 1000;
   14244 			break;
   14245 		default:
   14246 			device_printf(sc->sc_dev, "Unknown speed "
   14247 			    "(status = %08x)\n", status);
   14248 			return -1;
   14249 		}
   14250 
   14251 		/* Rx Packet Buffer Allocation size (KB) */
   14252 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   14253 
   14254 		/*
   14255 		 * Determine the maximum latency tolerated by the device.
   14256 		 *
   14257 		 * Per the PCIe spec, the tolerated latencies are encoded as
   14258 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   14259 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   14260 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   14261 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   14262 		 */
   14263 		lat_ns = ((int64_t)rxa * 1024 -
   14264 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   14265 			+ ETHER_HDR_LEN))) * 8 * 1000;
   14266 		if (lat_ns < 0)
   14267 			lat_ns = 0;
   14268 		else
   14269 			lat_ns /= speed;
   14270 		value = lat_ns;
   14271 
   14272 		while (value > LTRV_VALUE) {
   14273 			scale ++;
   14274 			value = howmany(value, __BIT(5));
   14275 		}
   14276 		if (scale > LTRV_SCALE_MAX) {
   14277 			printf("%s: Invalid LTR latency scale %d\n",
   14278 			    device_xname(sc->sc_dev), scale);
   14279 			return -1;
   14280 		}
   14281 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   14282 
   14283 		/* Determine the maximum latency tolerated by the platform */
   14284 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14285 		    WM_PCI_LTR_CAP_LPT);
   14286 		max_snoop = preg & 0xffff;
   14287 		max_nosnoop = preg >> 16;
   14288 
   14289 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   14290 
   14291 		if (lat_enc > max_ltr_enc) {
   14292 			lat_enc = max_ltr_enc;
   14293 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   14294 			    * PCI_LTR_SCALETONS(
   14295 				    __SHIFTOUT(lat_enc,
   14296 					PCI_LTR_MAXSNOOPLAT_SCALE));
   14297 		}
   14298 
   14299 		if (lat_ns) {
   14300 			lat_ns *= speed * 1000;
   14301 			lat_ns /= 8;
   14302 			lat_ns /= 1000000000;
   14303 			obff_hwm = (int32_t)(rxa - lat_ns);
   14304 		}
   14305 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   14306 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   14307 			    "(rxa = %d, lat_ns = %d)\n",
   14308 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   14309 			return -1;
   14310 		}
   14311 	}
   14312 	/* Snoop and No-Snoop latencies the same */
   14313 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   14314 	CSR_WRITE(sc, WMREG_LTRV, reg);
   14315 
   14316 	/* Set OBFF high water mark */
   14317 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   14318 	reg |= obff_hwm;
   14319 	CSR_WRITE(sc, WMREG_SVT, reg);
   14320 
   14321 	/* Enable OBFF */
   14322 	reg = CSR_READ(sc, WMREG_SVCR);
   14323 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   14324 	CSR_WRITE(sc, WMREG_SVCR, reg);
   14325 
   14326 	return 0;
   14327 }
   14328 
   14329 /*
   14330  * I210 Errata 25 and I211 Errata 10
   14331  * Slow System Clock.
   14332  */
   14333 static void
   14334 wm_pll_workaround_i210(struct wm_softc *sc)
   14335 {
   14336 	uint32_t mdicnfg, wuc;
   14337 	uint32_t reg;
   14338 	pcireg_t pcireg;
   14339 	uint32_t pmreg;
   14340 	uint16_t nvmword, tmp_nvmword;
   14341 	int phyval;
   14342 	bool wa_done = false;
   14343 	int i;
   14344 
   14345 	/* Save WUC and MDICNFG registers */
   14346 	wuc = CSR_READ(sc, WMREG_WUC);
   14347 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   14348 
   14349 	reg = mdicnfg & ~MDICNFG_DEST;
   14350 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   14351 
   14352 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   14353 		nvmword = INVM_DEFAULT_AL;
   14354 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   14355 
   14356 	/* Get Power Management cap offset */
   14357 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   14358 		&pmreg, NULL) == 0)
   14359 		return;
   14360 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   14361 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   14362 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   14363 
   14364 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   14365 			break; /* OK */
   14366 		}
   14367 
   14368 		wa_done = true;
   14369 		/* Directly reset the internal PHY */
   14370 		reg = CSR_READ(sc, WMREG_CTRL);
   14371 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   14372 
   14373 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14374 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   14375 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14376 
   14377 		CSR_WRITE(sc, WMREG_WUC, 0);
   14378 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   14379 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   14380 
   14381 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14382 		    pmreg + PCI_PMCSR);
   14383 		pcireg |= PCI_PMCSR_STATE_D3;
   14384 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14385 		    pmreg + PCI_PMCSR, pcireg);
   14386 		delay(1000);
   14387 		pcireg &= ~PCI_PMCSR_STATE_D3;
   14388 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14389 		    pmreg + PCI_PMCSR, pcireg);
   14390 
   14391 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   14392 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   14393 
   14394 		/* Restore WUC register */
   14395 		CSR_WRITE(sc, WMREG_WUC, wuc);
   14396 	}
   14397 
   14398 	/* Restore MDICNFG setting */
   14399 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   14400 	if (wa_done)
   14401 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   14402 }
   14403 
   14404 static void
   14405 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   14406 {
   14407 	uint32_t reg;
   14408 
   14409 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14410 		device_xname(sc->sc_dev), __func__));
   14411 	KASSERT(sc->sc_type == WM_T_PCH_SPT);
   14412 
   14413 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   14414 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   14415 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   14416 
   14417 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   14418 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   14419 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   14420 }
   14421