Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.549
      1 /*	$NetBSD: if_wm.c,v 1.549 2017/12/08 05:22:23 ozaki-r Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet)
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  *	- Image Unique ID
     83  */
     84 
     85 #include <sys/cdefs.h>
     86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.549 2017/12/08 05:22:23 ozaki-r Exp $");
     87 
     88 #ifdef _KERNEL_OPT
     89 #include "opt_net_mpsafe.h"
     90 #include "opt_if_wm.h"
     91 #endif
     92 
     93 #include <sys/param.h>
     94 #include <sys/systm.h>
     95 #include <sys/callout.h>
     96 #include <sys/mbuf.h>
     97 #include <sys/malloc.h>
     98 #include <sys/kmem.h>
     99 #include <sys/kernel.h>
    100 #include <sys/socket.h>
    101 #include <sys/ioctl.h>
    102 #include <sys/errno.h>
    103 #include <sys/device.h>
    104 #include <sys/queue.h>
    105 #include <sys/syslog.h>
    106 #include <sys/interrupt.h>
    107 #include <sys/cpu.h>
    108 #include <sys/pcq.h>
    109 
    110 #include <sys/rndsource.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <netinet/in.h>			/* XXX for struct ip */
    120 #include <netinet/in_systm.h>		/* XXX for struct ip */
    121 #include <netinet/ip.h>			/* XXX for struct ip */
    122 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    123 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    124 
    125 #include <sys/bus.h>
    126 #include <sys/intr.h>
    127 #include <machine/endian.h>
    128 
    129 #include <dev/mii/mii.h>
    130 #include <dev/mii/miivar.h>
    131 #include <dev/mii/miidevs.h>
    132 #include <dev/mii/mii_bitbang.h>
    133 #include <dev/mii/ikphyreg.h>
    134 #include <dev/mii/igphyreg.h>
    135 #include <dev/mii/igphyvar.h>
    136 #include <dev/mii/inbmphyreg.h>
    137 #include <dev/mii/ihphyreg.h>
    138 
    139 #include <dev/pci/pcireg.h>
    140 #include <dev/pci/pcivar.h>
    141 #include <dev/pci/pcidevs.h>
    142 
    143 #include <dev/pci/if_wmreg.h>
    144 #include <dev/pci/if_wmvar.h>
    145 
    146 #ifdef WM_DEBUG
    147 #define	WM_DEBUG_LINK		__BIT(0)
    148 #define	WM_DEBUG_TX		__BIT(1)
    149 #define	WM_DEBUG_RX		__BIT(2)
    150 #define	WM_DEBUG_GMII		__BIT(3)
    151 #define	WM_DEBUG_MANAGE		__BIT(4)
    152 #define	WM_DEBUG_NVM		__BIT(5)
    153 #define	WM_DEBUG_INIT		__BIT(6)
    154 #define	WM_DEBUG_LOCK		__BIT(7)
    155 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    156     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    157 
    158 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    159 #else
    160 #define	DPRINTF(x, y)	/* nothing */
    161 #endif /* WM_DEBUG */
    162 
    163 #ifdef NET_MPSAFE
    164 #define WM_MPSAFE	1
    165 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    166 #else
    167 #define CALLOUT_FLAGS	0
    168 #endif
    169 
    170 /*
    171  * This device driver's max interrupt numbers.
    172  */
    173 #define WM_MAX_NQUEUEINTR	16
    174 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    175 
    176 #ifndef WM_DISABLE_MSI
    177 #define	WM_DISABLE_MSI 0
    178 #endif
    179 #ifndef WM_DISABLE_MSIX
    180 #define	WM_DISABLE_MSIX 0
    181 #endif
    182 
    183 int wm_disable_msi = WM_DISABLE_MSI;
    184 int wm_disable_msix = WM_DISABLE_MSIX;
    185 
    186 /*
    187  * Transmit descriptor list size.  Due to errata, we can only have
    188  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    189  * on >= 82544.  We tell the upper layers that they can queue a lot
    190  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    191  * of them at a time.
    192  *
    193  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    194  * chains containing many small mbufs have been observed in zero-copy
    195  * situations with jumbo frames.
    196  */
    197 #define	WM_NTXSEGS		256
    198 #define	WM_IFQUEUELEN		256
    199 #define	WM_TXQUEUELEN_MAX	64
    200 #define	WM_TXQUEUELEN_MAX_82547	16
    201 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    202 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    203 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    204 #define	WM_NTXDESC_82542	256
    205 #define	WM_NTXDESC_82544	4096
    206 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    207 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    208 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    209 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    210 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    211 
    212 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    213 
    214 #define	WM_TXINTERQSIZE		256
    215 
    216 /*
    217  * Receive descriptor list size.  We have one Rx buffer for normal
    218  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    219  * packet.  We allocate 256 receive descriptors, each with a 2k
    220  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    221  */
    222 #define	WM_NRXDESC		256
    223 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    224 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    225 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    226 
    227 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    228 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    229 #endif
    230 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    231 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    232 #endif
    233 
    234 typedef union txdescs {
    235 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    236 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    237 } txdescs_t;
    238 
    239 typedef union rxdescs {
    240 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    241 	ext_rxdesc_t      sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    242 	nq_rxdesc_t      sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    243 } rxdescs_t;
    244 
    245 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    246 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    247 
    248 /*
    249  * Software state for transmit jobs.
    250  */
    251 struct wm_txsoft {
    252 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    253 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    254 	int txs_firstdesc;		/* first descriptor in packet */
    255 	int txs_lastdesc;		/* last descriptor in packet */
    256 	int txs_ndesc;			/* # of descriptors used */
    257 };
    258 
    259 /*
    260  * Software state for receive buffers.  Each descriptor gets a
    261  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    262  * more than one buffer, we chain them together.
    263  */
    264 struct wm_rxsoft {
    265 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    266 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    267 };
    268 
    269 #define WM_LINKUP_TIMEOUT	50
    270 
    271 static uint16_t swfwphysem[] = {
    272 	SWFW_PHY0_SM,
    273 	SWFW_PHY1_SM,
    274 	SWFW_PHY2_SM,
    275 	SWFW_PHY3_SM
    276 };
    277 
    278 static const uint32_t wm_82580_rxpbs_table[] = {
    279 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    280 };
    281 
    282 struct wm_softc;
    283 
    284 #ifdef WM_EVENT_COUNTERS
    285 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    286 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    287 	struct evcnt qname##_ev_##evname;
    288 
    289 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    290 	do{								\
    291 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    292 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    293 		    "%s%02d%s", #qname, (qnum), #evname);		\
    294 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    295 		    (evtype), NULL, (xname),				\
    296 		    (q)->qname##_##evname##_evcnt_name);		\
    297 	}while(0)
    298 
    299 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    300 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    301 
    302 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    303 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    304 
    305 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    306 	evcnt_detach(&(q)->qname##_ev_##evname);
    307 #endif /* WM_EVENT_COUNTERS */
    308 
    309 struct wm_txqueue {
    310 	kmutex_t *txq_lock;		/* lock for tx operations */
    311 
    312 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    313 
    314 	/* Software state for the transmit descriptors. */
    315 	int txq_num;			/* must be a power of two */
    316 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    317 
    318 	/* TX control data structures. */
    319 	int txq_ndesc;			/* must be a power of two */
    320 	size_t txq_descsize;		/* a tx descriptor size */
    321 	txdescs_t *txq_descs_u;
    322         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    323 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    324 	int txq_desc_rseg;		/* real number of control segment */
    325 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    326 #define	txq_descs	txq_descs_u->sctxu_txdescs
    327 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    328 
    329 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    330 
    331 	int txq_free;			/* number of free Tx descriptors */
    332 	int txq_next;			/* next ready Tx descriptor */
    333 
    334 	int txq_sfree;			/* number of free Tx jobs */
    335 	int txq_snext;			/* next free Tx job */
    336 	int txq_sdirty;			/* dirty Tx jobs */
    337 
    338 	/* These 4 variables are used only on the 82547. */
    339 	int txq_fifo_size;		/* Tx FIFO size */
    340 	int txq_fifo_head;		/* current head of FIFO */
    341 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    342 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    343 
    344 	/*
    345 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    346 	 * CPUs. This queue intermediate them without block.
    347 	 */
    348 	pcq_t *txq_interq;
    349 
    350 	/*
    351 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    352 	 * to manage Tx H/W queue's busy flag.
    353 	 */
    354 	int txq_flags;			/* flags for H/W queue, see below */
    355 #define	WM_TXQ_NO_SPACE	0x1
    356 
    357 	bool txq_stopping;
    358 
    359 	uint32_t txq_packets;		/* for AIM */
    360 	uint32_t txq_bytes;		/* for AIM */
    361 #ifdef WM_EVENT_COUNTERS
    362 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
    363 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
    364 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
    365 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
    366 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
    367 						/* XXX not used? */
    368 
    369 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
    370 	WM_Q_EVCNT_DEFINE(txq,txtusum)		/* TCP/UDP cksums comp. out-bound */
    371 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
    372 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
    373 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
    374 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
    375 
    376 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
    377 
    378 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
    379 
    380 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    381 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    382 #endif /* WM_EVENT_COUNTERS */
    383 };
    384 
    385 struct wm_rxqueue {
    386 	kmutex_t *rxq_lock;		/* lock for rx operations */
    387 
    388 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    389 
    390 	/* Software state for the receive descriptors. */
    391 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    392 
    393 	/* RX control data structures. */
    394 	int rxq_ndesc;			/* must be a power of two */
    395 	size_t rxq_descsize;		/* a rx descriptor size */
    396 	rxdescs_t *rxq_descs_u;
    397 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    398 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    399 	int rxq_desc_rseg;		/* real number of control segment */
    400 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    401 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    402 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    403 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    404 
    405 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    406 
    407 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    408 	int rxq_discard;
    409 	int rxq_len;
    410 	struct mbuf *rxq_head;
    411 	struct mbuf *rxq_tail;
    412 	struct mbuf **rxq_tailp;
    413 
    414 	bool rxq_stopping;
    415 
    416 	uint32_t rxq_packets;		/* for AIM */
    417 	uint32_t rxq_bytes;		/* for AIM */
    418 #ifdef WM_EVENT_COUNTERS
    419 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
    420 
    421 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
    422 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
    423 #endif
    424 };
    425 
    426 struct wm_queue {
    427 	int wmq_id;			/* index of transmit and receive queues */
    428 	int wmq_intr_idx;		/* index of MSI-X tables */
    429 
    430 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    431 	bool wmq_set_itr;
    432 
    433 	struct wm_txqueue wmq_txq;
    434 	struct wm_rxqueue wmq_rxq;
    435 
    436 	void *wmq_si;
    437 };
    438 
    439 struct wm_phyop {
    440 	int (*acquire)(struct wm_softc *);
    441 	void (*release)(struct wm_softc *);
    442 	int reset_delay_us;
    443 };
    444 
    445 struct wm_nvmop {
    446 	int (*acquire)(struct wm_softc *);
    447 	void (*release)(struct wm_softc *);
    448 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    449 };
    450 
    451 /*
    452  * Software state per device.
    453  */
    454 struct wm_softc {
    455 	device_t sc_dev;		/* generic device information */
    456 	bus_space_tag_t sc_st;		/* bus space tag */
    457 	bus_space_handle_t sc_sh;	/* bus space handle */
    458 	bus_size_t sc_ss;		/* bus space size */
    459 	bus_space_tag_t sc_iot;		/* I/O space tag */
    460 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    461 	bus_size_t sc_ios;		/* I/O space size */
    462 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    463 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    464 	bus_size_t sc_flashs;		/* flash registers space size */
    465 	off_t sc_flashreg_offset;	/*
    466 					 * offset to flash registers from
    467 					 * start of BAR
    468 					 */
    469 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    470 
    471 	struct ethercom sc_ethercom;	/* ethernet common data */
    472 	struct mii_data sc_mii;		/* MII/media information */
    473 
    474 	pci_chipset_tag_t sc_pc;
    475 	pcitag_t sc_pcitag;
    476 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    477 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    478 
    479 	uint16_t sc_pcidevid;		/* PCI device ID */
    480 	wm_chip_type sc_type;		/* MAC type */
    481 	int sc_rev;			/* MAC revision */
    482 	wm_phy_type sc_phytype;		/* PHY type */
    483 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    484 #define	WM_MEDIATYPE_UNKNOWN		0x00
    485 #define	WM_MEDIATYPE_FIBER		0x01
    486 #define	WM_MEDIATYPE_COPPER		0x02
    487 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    488 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    489 	int sc_flags;			/* flags; see below */
    490 	int sc_if_flags;		/* last if_flags */
    491 	int sc_flowflags;		/* 802.3x flow control flags */
    492 	int sc_align_tweak;
    493 
    494 	void *sc_ihs[WM_MAX_NINTR];	/*
    495 					 * interrupt cookie.
    496 					 * - legacy and msi use sc_ihs[0] only
    497 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    498 					 */
    499 	pci_intr_handle_t *sc_intrs;	/*
    500 					 * legacy and msi use sc_intrs[0] only
    501 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    502 					 */
    503 	int sc_nintrs;			/* number of interrupts */
    504 
    505 	int sc_link_intr_idx;		/* index of MSI-X tables */
    506 
    507 	callout_t sc_tick_ch;		/* tick callout */
    508 	bool sc_core_stopping;
    509 
    510 	int sc_nvm_ver_major;
    511 	int sc_nvm_ver_minor;
    512 	int sc_nvm_ver_build;
    513 	int sc_nvm_addrbits;		/* NVM address bits */
    514 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    515 	int sc_ich8_flash_base;
    516 	int sc_ich8_flash_bank_size;
    517 	int sc_nvm_k1_enabled;
    518 
    519 	int sc_nqueues;
    520 	struct wm_queue *sc_queue;
    521 	u_int sc_rx_process_limit;	/* Rx processing repeat limit in softint */
    522 	u_int sc_rx_intr_process_limit;	/* Rx processing repeat limit in H/W intr */
    523 
    524 	int sc_affinity_offset;
    525 
    526 #ifdef WM_EVENT_COUNTERS
    527 	/* Event counters. */
    528 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    529 
    530         /* WM_T_82542_2_1 only */
    531 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    532 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    533 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    534 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    535 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    536 #endif /* WM_EVENT_COUNTERS */
    537 
    538 	/* This variable are used only on the 82547. */
    539 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    540 
    541 	uint32_t sc_ctrl;		/* prototype CTRL register */
    542 #if 0
    543 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    544 #endif
    545 	uint32_t sc_icr;		/* prototype interrupt bits */
    546 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    547 	uint32_t sc_tctl;		/* prototype TCTL register */
    548 	uint32_t sc_rctl;		/* prototype RCTL register */
    549 	uint32_t sc_txcw;		/* prototype TXCW register */
    550 	uint32_t sc_tipg;		/* prototype TIPG register */
    551 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    552 	uint32_t sc_pba;		/* prototype PBA register */
    553 
    554 	int sc_tbi_linkup;		/* TBI link status */
    555 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    556 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    557 
    558 	int sc_mchash_type;		/* multicast filter offset */
    559 
    560 	krndsource_t rnd_source;	/* random source */
    561 
    562 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    563 
    564 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    565 	kmutex_t *sc_ich_phymtx;	/*
    566 					 * 82574/82583/ICH/PCH specific PHY
    567 					 * mutex. For 82574/82583, the mutex
    568 					 * is used for both PHY and NVM.
    569 					 */
    570 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    571 
    572 	struct wm_phyop phy;
    573 	struct wm_nvmop nvm;
    574 };
    575 
    576 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    577 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    578 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    579 
    580 #define	WM_RXCHAIN_RESET(rxq)						\
    581 do {									\
    582 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    583 	*(rxq)->rxq_tailp = NULL;					\
    584 	(rxq)->rxq_len = 0;						\
    585 } while (/*CONSTCOND*/0)
    586 
    587 #define	WM_RXCHAIN_LINK(rxq, m)						\
    588 do {									\
    589 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    590 	(rxq)->rxq_tailp = &(m)->m_next;				\
    591 } while (/*CONSTCOND*/0)
    592 
    593 #ifdef WM_EVENT_COUNTERS
    594 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    595 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    596 
    597 #define WM_Q_EVCNT_INCR(qname, evname)			\
    598 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    599 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    600 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    601 #else /* !WM_EVENT_COUNTERS */
    602 #define	WM_EVCNT_INCR(ev)	/* nothing */
    603 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    604 
    605 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    606 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    607 #endif /* !WM_EVENT_COUNTERS */
    608 
    609 #define	CSR_READ(sc, reg)						\
    610 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    611 #define	CSR_WRITE(sc, reg, val)						\
    612 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    613 #define	CSR_WRITE_FLUSH(sc)						\
    614 	(void) CSR_READ((sc), WMREG_STATUS)
    615 
    616 #define ICH8_FLASH_READ32(sc, reg)					\
    617 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    618 	    (reg) + sc->sc_flashreg_offset)
    619 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    620 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    621 	    (reg) + sc->sc_flashreg_offset, (data))
    622 
    623 #define ICH8_FLASH_READ16(sc, reg)					\
    624 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    625 	    (reg) + sc->sc_flashreg_offset)
    626 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    627 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    628 	    (reg) + sc->sc_flashreg_offset, (data))
    629 
    630 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    631 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    632 
    633 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    634 #define	WM_CDTXADDR_HI(txq, x)						\
    635 	(sizeof(bus_addr_t) == 8 ?					\
    636 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    637 
    638 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    639 #define	WM_CDRXADDR_HI(rxq, x)						\
    640 	(sizeof(bus_addr_t) == 8 ?					\
    641 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    642 
    643 /*
    644  * Register read/write functions.
    645  * Other than CSR_{READ|WRITE}().
    646  */
    647 #if 0
    648 static inline uint32_t wm_io_read(struct wm_softc *, int);
    649 #endif
    650 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    651 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    652 	uint32_t, uint32_t);
    653 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    654 
    655 /*
    656  * Descriptor sync/init functions.
    657  */
    658 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    659 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    660 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    661 
    662 /*
    663  * Device driver interface functions and commonly used functions.
    664  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    665  */
    666 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    667 static int	wm_match(device_t, cfdata_t, void *);
    668 static void	wm_attach(device_t, device_t, void *);
    669 static int	wm_detach(device_t, int);
    670 static bool	wm_suspend(device_t, const pmf_qual_t *);
    671 static bool	wm_resume(device_t, const pmf_qual_t *);
    672 static void	wm_watchdog(struct ifnet *);
    673 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
    674 static void	wm_tick(void *);
    675 static int	wm_ifflags_cb(struct ethercom *);
    676 static int	wm_ioctl(struct ifnet *, u_long, void *);
    677 /* MAC address related */
    678 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    679 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    680 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    681 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    682 static void	wm_set_filter(struct wm_softc *);
    683 /* Reset and init related */
    684 static void	wm_set_vlan(struct wm_softc *);
    685 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    686 static void	wm_get_auto_rd_done(struct wm_softc *);
    687 static void	wm_lan_init_done(struct wm_softc *);
    688 static void	wm_get_cfg_done(struct wm_softc *);
    689 static void	wm_phy_post_reset(struct wm_softc *);
    690 static void	wm_write_smbus_addr(struct wm_softc *);
    691 static void	wm_init_lcd_from_nvm(struct wm_softc *);
    692 static void	wm_initialize_hardware_bits(struct wm_softc *);
    693 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    694 static void	wm_reset_phy(struct wm_softc *);
    695 static void	wm_flush_desc_rings(struct wm_softc *);
    696 static void	wm_reset(struct wm_softc *);
    697 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    698 static void	wm_rxdrain(struct wm_rxqueue *);
    699 static void	wm_rss_getkey(uint8_t *);
    700 static void	wm_init_rss(struct wm_softc *);
    701 static void	wm_adjust_qnum(struct wm_softc *, int);
    702 static inline bool	wm_is_using_msix(struct wm_softc *);
    703 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    704 static int	wm_softint_establish(struct wm_softc *, int, int);
    705 static int	wm_setup_legacy(struct wm_softc *);
    706 static int	wm_setup_msix(struct wm_softc *);
    707 static int	wm_init(struct ifnet *);
    708 static int	wm_init_locked(struct ifnet *);
    709 static void	wm_unset_stopping_flags(struct wm_softc *);
    710 static void	wm_set_stopping_flags(struct wm_softc *);
    711 static void	wm_stop(struct ifnet *, int);
    712 static void	wm_stop_locked(struct ifnet *, int);
    713 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    714 static void	wm_82547_txfifo_stall(void *);
    715 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    716 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    717 /* DMA related */
    718 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    719 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    720 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    721 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    722     struct wm_txqueue *);
    723 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    724 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    725 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    726     struct wm_rxqueue *);
    727 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    728 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    729 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    730 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    731 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    732 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    733 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    734     struct wm_txqueue *);
    735 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    736     struct wm_rxqueue *);
    737 static int	wm_alloc_txrx_queues(struct wm_softc *);
    738 static void	wm_free_txrx_queues(struct wm_softc *);
    739 static int	wm_init_txrx_queues(struct wm_softc *);
    740 /* Start */
    741 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    742     struct wm_txsoft *, uint32_t *, uint8_t *);
    743 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    744 static void	wm_start(struct ifnet *);
    745 static void	wm_start_locked(struct ifnet *);
    746 static int	wm_transmit(struct ifnet *, struct mbuf *);
    747 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    748 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    749 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    750     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    751 static void	wm_nq_start(struct ifnet *);
    752 static void	wm_nq_start_locked(struct ifnet *);
    753 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    754 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    755 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    756 static void	wm_deferred_start_locked(struct wm_txqueue *);
    757 static void	wm_handle_queue(void *);
    758 /* Interrupt */
    759 static int	wm_txeof(struct wm_softc *, struct wm_txqueue *);
    760 static void	wm_rxeof(struct wm_rxqueue *, u_int);
    761 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    762 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    763 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    764 static void	wm_linkintr(struct wm_softc *, uint32_t);
    765 static int	wm_intr_legacy(void *);
    766 static inline void	wm_txrxintr_disable(struct wm_queue *);
    767 static inline void	wm_txrxintr_enable(struct wm_queue *);
    768 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    769 static int	wm_txrxintr_msix(void *);
    770 static int	wm_linkintr_msix(void *);
    771 
    772 /*
    773  * Media related.
    774  * GMII, SGMII, TBI, SERDES and SFP.
    775  */
    776 /* Common */
    777 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    778 /* GMII related */
    779 static void	wm_gmii_reset(struct wm_softc *);
    780 static void	wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t, uint16_t);
    781 static int	wm_get_phy_id_82575(struct wm_softc *);
    782 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    783 static int	wm_gmii_mediachange(struct ifnet *);
    784 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    785 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    786 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    787 static int	wm_gmii_i82543_readreg(device_t, int, int);
    788 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    789 static int	wm_gmii_mdic_readreg(device_t, int, int);
    790 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    791 static int	wm_gmii_i82544_readreg(device_t, int, int);
    792 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    793 static int	wm_gmii_i80003_readreg(device_t, int, int);
    794 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    795 static int	wm_gmii_bm_readreg(device_t, int, int);
    796 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    797 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    798 static int	wm_gmii_hv_readreg(device_t, int, int);
    799 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
    800 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    801 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
    802 static int	wm_gmii_82580_readreg(device_t, int, int);
    803 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    804 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    805 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    806 static void	wm_gmii_statchg(struct ifnet *);
    807 /*
    808  * kumeran related (80003, ICH* and PCH*).
    809  * These functions are not for accessing MII registers but for accessing
    810  * kumeran specific registers.
    811  */
    812 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    813 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    814 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    815 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    816 /* SGMII */
    817 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    818 static int	wm_sgmii_readreg(device_t, int, int);
    819 static void	wm_sgmii_writereg(device_t, int, int, int);
    820 /* TBI related */
    821 static void	wm_tbi_mediainit(struct wm_softc *);
    822 static int	wm_tbi_mediachange(struct ifnet *);
    823 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    824 static int	wm_check_for_link(struct wm_softc *);
    825 static void	wm_tbi_tick(struct wm_softc *);
    826 /* SERDES related */
    827 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    828 static int	wm_serdes_mediachange(struct ifnet *);
    829 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    830 static void	wm_serdes_tick(struct wm_softc *);
    831 /* SFP related */
    832 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    833 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    834 
    835 /*
    836  * NVM related.
    837  * Microwire, SPI (w/wo EERD) and Flash.
    838  */
    839 /* Misc functions */
    840 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    841 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    842 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    843 /* Microwire */
    844 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    845 /* SPI */
    846 static int	wm_nvm_ready_spi(struct wm_softc *);
    847 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    848 /* Using with EERD */
    849 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    850 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    851 /* Flash */
    852 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    853     unsigned int *);
    854 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    855 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    856 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    857 	uint32_t *);
    858 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    859 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    860 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    861 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    862 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    863 /* iNVM */
    864 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    865 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    866 /* Lock, detecting NVM type, validate checksum and read */
    867 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    868 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    869 static int	wm_nvm_validate_checksum(struct wm_softc *);
    870 static void	wm_nvm_version_invm(struct wm_softc *);
    871 static void	wm_nvm_version(struct wm_softc *);
    872 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    873 
    874 /*
    875  * Hardware semaphores.
    876  * Very complexed...
    877  */
    878 static int	wm_get_null(struct wm_softc *);
    879 static void	wm_put_null(struct wm_softc *);
    880 static int	wm_get_eecd(struct wm_softc *);
    881 static void	wm_put_eecd(struct wm_softc *);
    882 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    883 static void	wm_put_swsm_semaphore(struct wm_softc *);
    884 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    885 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    886 static int	wm_get_nvm_80003(struct wm_softc *);
    887 static void	wm_put_nvm_80003(struct wm_softc *);
    888 static int	wm_get_nvm_82571(struct wm_softc *);
    889 static void	wm_put_nvm_82571(struct wm_softc *);
    890 static int	wm_get_phy_82575(struct wm_softc *);
    891 static void	wm_put_phy_82575(struct wm_softc *);
    892 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    893 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    894 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    895 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    896 static int	wm_get_nvm_ich8lan(struct wm_softc *);
    897 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    898 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    899 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    900 
    901 /*
    902  * Management mode and power management related subroutines.
    903  * BMC, AMT, suspend/resume and EEE.
    904  */
    905 #if 0
    906 static int	wm_check_mng_mode(struct wm_softc *);
    907 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    908 static int	wm_check_mng_mode_82574(struct wm_softc *);
    909 static int	wm_check_mng_mode_generic(struct wm_softc *);
    910 #endif
    911 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    912 static bool	wm_phy_resetisblocked(struct wm_softc *);
    913 static void	wm_get_hw_control(struct wm_softc *);
    914 static void	wm_release_hw_control(struct wm_softc *);
    915 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    916 static void	wm_smbustopci(struct wm_softc *);
    917 static void	wm_init_manageability(struct wm_softc *);
    918 static void	wm_release_manageability(struct wm_softc *);
    919 static void	wm_get_wakeup(struct wm_softc *);
    920 static void	wm_ulp_disable(struct wm_softc *);
    921 static void	wm_enable_phy_wakeup(struct wm_softc *);
    922 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    923 static void	wm_enable_wakeup(struct wm_softc *);
    924 /* LPLU (Low Power Link Up) */
    925 static void	wm_lplu_d0_disable(struct wm_softc *);
    926 /* EEE */
    927 static void	wm_set_eee_i350(struct wm_softc *);
    928 
    929 /*
    930  * Workarounds (mainly PHY related).
    931  * Basically, PHY's workarounds are in the PHY drivers.
    932  */
    933 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    934 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    935 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    936 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    937 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    938 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    939 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    940 static void	wm_reset_init_script_82575(struct wm_softc *);
    941 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    942 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
    943 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
    944 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
    945 static void	wm_pll_workaround_i210(struct wm_softc *);
    946 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
    947 
    948 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    949     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    950 
    951 /*
    952  * Devices supported by this driver.
    953  */
    954 static const struct wm_product {
    955 	pci_vendor_id_t		wmp_vendor;
    956 	pci_product_id_t	wmp_product;
    957 	const char		*wmp_name;
    958 	wm_chip_type		wmp_type;
    959 	uint32_t		wmp_flags;
    960 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    961 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    962 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    963 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    964 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    965 } wm_products[] = {
    966 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    967 	  "Intel i82542 1000BASE-X Ethernet",
    968 	  WM_T_82542_2_1,	WMP_F_FIBER },
    969 
    970 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    971 	  "Intel i82543GC 1000BASE-X Ethernet",
    972 	  WM_T_82543,		WMP_F_FIBER },
    973 
    974 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    975 	  "Intel i82543GC 1000BASE-T Ethernet",
    976 	  WM_T_82543,		WMP_F_COPPER },
    977 
    978 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    979 	  "Intel i82544EI 1000BASE-T Ethernet",
    980 	  WM_T_82544,		WMP_F_COPPER },
    981 
    982 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    983 	  "Intel i82544EI 1000BASE-X Ethernet",
    984 	  WM_T_82544,		WMP_F_FIBER },
    985 
    986 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    987 	  "Intel i82544GC 1000BASE-T Ethernet",
    988 	  WM_T_82544,		WMP_F_COPPER },
    989 
    990 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    991 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    992 	  WM_T_82544,		WMP_F_COPPER },
    993 
    994 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    995 	  "Intel i82540EM 1000BASE-T Ethernet",
    996 	  WM_T_82540,		WMP_F_COPPER },
    997 
    998 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    999 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1000 	  WM_T_82540,		WMP_F_COPPER },
   1001 
   1002 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1003 	  "Intel i82540EP 1000BASE-T Ethernet",
   1004 	  WM_T_82540,		WMP_F_COPPER },
   1005 
   1006 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1007 	  "Intel i82540EP 1000BASE-T Ethernet",
   1008 	  WM_T_82540,		WMP_F_COPPER },
   1009 
   1010 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1011 	  "Intel i82540EP 1000BASE-T Ethernet",
   1012 	  WM_T_82540,		WMP_F_COPPER },
   1013 
   1014 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1015 	  "Intel i82545EM 1000BASE-T Ethernet",
   1016 	  WM_T_82545,		WMP_F_COPPER },
   1017 
   1018 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1019 	  "Intel i82545GM 1000BASE-T Ethernet",
   1020 	  WM_T_82545_3,		WMP_F_COPPER },
   1021 
   1022 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1023 	  "Intel i82545GM 1000BASE-X Ethernet",
   1024 	  WM_T_82545_3,		WMP_F_FIBER },
   1025 
   1026 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1027 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1028 	  WM_T_82545_3,		WMP_F_SERDES },
   1029 
   1030 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1031 	  "Intel i82546EB 1000BASE-T Ethernet",
   1032 	  WM_T_82546,		WMP_F_COPPER },
   1033 
   1034 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1035 	  "Intel i82546EB 1000BASE-T Ethernet",
   1036 	  WM_T_82546,		WMP_F_COPPER },
   1037 
   1038 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1039 	  "Intel i82545EM 1000BASE-X Ethernet",
   1040 	  WM_T_82545,		WMP_F_FIBER },
   1041 
   1042 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1043 	  "Intel i82546EB 1000BASE-X Ethernet",
   1044 	  WM_T_82546,		WMP_F_FIBER },
   1045 
   1046 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1047 	  "Intel i82546GB 1000BASE-T Ethernet",
   1048 	  WM_T_82546_3,		WMP_F_COPPER },
   1049 
   1050 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1051 	  "Intel i82546GB 1000BASE-X Ethernet",
   1052 	  WM_T_82546_3,		WMP_F_FIBER },
   1053 
   1054 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1055 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1056 	  WM_T_82546_3,		WMP_F_SERDES },
   1057 
   1058 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1059 	  "i82546GB quad-port Gigabit Ethernet",
   1060 	  WM_T_82546_3,		WMP_F_COPPER },
   1061 
   1062 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1063 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1064 	  WM_T_82546_3,		WMP_F_COPPER },
   1065 
   1066 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1067 	  "Intel PRO/1000MT (82546GB)",
   1068 	  WM_T_82546_3,		WMP_F_COPPER },
   1069 
   1070 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1071 	  "Intel i82541EI 1000BASE-T Ethernet",
   1072 	  WM_T_82541,		WMP_F_COPPER },
   1073 
   1074 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1075 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1076 	  WM_T_82541,		WMP_F_COPPER },
   1077 
   1078 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1079 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1080 	  WM_T_82541,		WMP_F_COPPER },
   1081 
   1082 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1083 	  "Intel i82541ER 1000BASE-T Ethernet",
   1084 	  WM_T_82541_2,		WMP_F_COPPER },
   1085 
   1086 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1087 	  "Intel i82541GI 1000BASE-T Ethernet",
   1088 	  WM_T_82541_2,		WMP_F_COPPER },
   1089 
   1090 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1091 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1092 	  WM_T_82541_2,		WMP_F_COPPER },
   1093 
   1094 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1095 	  "Intel i82541PI 1000BASE-T Ethernet",
   1096 	  WM_T_82541_2,		WMP_F_COPPER },
   1097 
   1098 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1099 	  "Intel i82547EI 1000BASE-T Ethernet",
   1100 	  WM_T_82547,		WMP_F_COPPER },
   1101 
   1102 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1103 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1104 	  WM_T_82547,		WMP_F_COPPER },
   1105 
   1106 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1107 	  "Intel i82547GI 1000BASE-T Ethernet",
   1108 	  WM_T_82547_2,		WMP_F_COPPER },
   1109 
   1110 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1111 	  "Intel PRO/1000 PT (82571EB)",
   1112 	  WM_T_82571,		WMP_F_COPPER },
   1113 
   1114 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1115 	  "Intel PRO/1000 PF (82571EB)",
   1116 	  WM_T_82571,		WMP_F_FIBER },
   1117 
   1118 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1119 	  "Intel PRO/1000 PB (82571EB)",
   1120 	  WM_T_82571,		WMP_F_SERDES },
   1121 
   1122 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1123 	  "Intel PRO/1000 QT (82571EB)",
   1124 	  WM_T_82571,		WMP_F_COPPER },
   1125 
   1126 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1127 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1128 	  WM_T_82571,		WMP_F_COPPER, },
   1129 
   1130 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1131 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1132 	  WM_T_82571,		WMP_F_COPPER, },
   1133 
   1134 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1135 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1136 	  WM_T_82571,		WMP_F_SERDES, },
   1137 
   1138 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1139 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1140 	  WM_T_82571,		WMP_F_SERDES, },
   1141 
   1142 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1143 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1144 	  WM_T_82571,		WMP_F_FIBER, },
   1145 
   1146 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1147 	  "Intel i82572EI 1000baseT Ethernet",
   1148 	  WM_T_82572,		WMP_F_COPPER },
   1149 
   1150 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1151 	  "Intel i82572EI 1000baseX Ethernet",
   1152 	  WM_T_82572,		WMP_F_FIBER },
   1153 
   1154 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1155 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1156 	  WM_T_82572,		WMP_F_SERDES },
   1157 
   1158 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1159 	  "Intel i82572EI 1000baseT Ethernet",
   1160 	  WM_T_82572,		WMP_F_COPPER },
   1161 
   1162 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1163 	  "Intel i82573E",
   1164 	  WM_T_82573,		WMP_F_COPPER },
   1165 
   1166 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1167 	  "Intel i82573E IAMT",
   1168 	  WM_T_82573,		WMP_F_COPPER },
   1169 
   1170 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1171 	  "Intel i82573L Gigabit Ethernet",
   1172 	  WM_T_82573,		WMP_F_COPPER },
   1173 
   1174 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1175 	  "Intel i82574L",
   1176 	  WM_T_82574,		WMP_F_COPPER },
   1177 
   1178 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1179 	  "Intel i82574L",
   1180 	  WM_T_82574,		WMP_F_COPPER },
   1181 
   1182 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1183 	  "Intel i82583V",
   1184 	  WM_T_82583,		WMP_F_COPPER },
   1185 
   1186 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1187 	  "i80003 dual 1000baseT Ethernet",
   1188 	  WM_T_80003,		WMP_F_COPPER },
   1189 
   1190 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1191 	  "i80003 dual 1000baseX Ethernet",
   1192 	  WM_T_80003,		WMP_F_COPPER },
   1193 
   1194 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1195 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1196 	  WM_T_80003,		WMP_F_SERDES },
   1197 
   1198 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1199 	  "Intel i80003 1000baseT Ethernet",
   1200 	  WM_T_80003,		WMP_F_COPPER },
   1201 
   1202 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1203 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1204 	  WM_T_80003,		WMP_F_SERDES },
   1205 
   1206 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1207 	  "Intel i82801H (M_AMT) LAN Controller",
   1208 	  WM_T_ICH8,		WMP_F_COPPER },
   1209 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1210 	  "Intel i82801H (AMT) LAN Controller",
   1211 	  WM_T_ICH8,		WMP_F_COPPER },
   1212 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1213 	  "Intel i82801H LAN Controller",
   1214 	  WM_T_ICH8,		WMP_F_COPPER },
   1215 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1216 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1217 	  WM_T_ICH8,		WMP_F_COPPER },
   1218 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1219 	  "Intel i82801H (M) LAN Controller",
   1220 	  WM_T_ICH8,		WMP_F_COPPER },
   1221 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1222 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1223 	  WM_T_ICH8,		WMP_F_COPPER },
   1224 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1225 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1226 	  WM_T_ICH8,		WMP_F_COPPER },
   1227 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1228 	  "82567V-3 LAN Controller",
   1229 	  WM_T_ICH8,		WMP_F_COPPER },
   1230 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1231 	  "82801I (AMT) LAN Controller",
   1232 	  WM_T_ICH9,		WMP_F_COPPER },
   1233 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1234 	  "82801I 10/100 LAN Controller",
   1235 	  WM_T_ICH9,		WMP_F_COPPER },
   1236 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1237 	  "82801I (G) 10/100 LAN Controller",
   1238 	  WM_T_ICH9,		WMP_F_COPPER },
   1239 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1240 	  "82801I (GT) 10/100 LAN Controller",
   1241 	  WM_T_ICH9,		WMP_F_COPPER },
   1242 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1243 	  "82801I (C) LAN Controller",
   1244 	  WM_T_ICH9,		WMP_F_COPPER },
   1245 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1246 	  "82801I mobile LAN Controller",
   1247 	  WM_T_ICH9,		WMP_F_COPPER },
   1248 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1249 	  "82801I mobile (V) LAN Controller",
   1250 	  WM_T_ICH9,		WMP_F_COPPER },
   1251 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1252 	  "82801I mobile (AMT) LAN Controller",
   1253 	  WM_T_ICH9,		WMP_F_COPPER },
   1254 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1255 	  "82567LM-4 LAN Controller",
   1256 	  WM_T_ICH9,		WMP_F_COPPER },
   1257 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1258 	  "82567LM-2 LAN Controller",
   1259 	  WM_T_ICH10,		WMP_F_COPPER },
   1260 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1261 	  "82567LF-2 LAN Controller",
   1262 	  WM_T_ICH10,		WMP_F_COPPER },
   1263 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1264 	  "82567LM-3 LAN Controller",
   1265 	  WM_T_ICH10,		WMP_F_COPPER },
   1266 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1267 	  "82567LF-3 LAN Controller",
   1268 	  WM_T_ICH10,		WMP_F_COPPER },
   1269 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1270 	  "82567V-2 LAN Controller",
   1271 	  WM_T_ICH10,		WMP_F_COPPER },
   1272 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1273 	  "82567V-3? LAN Controller",
   1274 	  WM_T_ICH10,		WMP_F_COPPER },
   1275 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1276 	  "HANKSVILLE LAN Controller",
   1277 	  WM_T_ICH10,		WMP_F_COPPER },
   1278 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1279 	  "PCH LAN (82577LM) Controller",
   1280 	  WM_T_PCH,		WMP_F_COPPER },
   1281 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1282 	  "PCH LAN (82577LC) Controller",
   1283 	  WM_T_PCH,		WMP_F_COPPER },
   1284 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1285 	  "PCH LAN (82578DM) Controller",
   1286 	  WM_T_PCH,		WMP_F_COPPER },
   1287 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1288 	  "PCH LAN (82578DC) Controller",
   1289 	  WM_T_PCH,		WMP_F_COPPER },
   1290 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1291 	  "PCH2 LAN (82579LM) Controller",
   1292 	  WM_T_PCH2,		WMP_F_COPPER },
   1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1294 	  "PCH2 LAN (82579V) Controller",
   1295 	  WM_T_PCH2,		WMP_F_COPPER },
   1296 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1297 	  "82575EB dual-1000baseT Ethernet",
   1298 	  WM_T_82575,		WMP_F_COPPER },
   1299 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1300 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1301 	  WM_T_82575,		WMP_F_SERDES },
   1302 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1303 	  "82575GB quad-1000baseT Ethernet",
   1304 	  WM_T_82575,		WMP_F_COPPER },
   1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1306 	  "82575GB quad-1000baseT Ethernet (PM)",
   1307 	  WM_T_82575,		WMP_F_COPPER },
   1308 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1309 	  "82576 1000BaseT Ethernet",
   1310 	  WM_T_82576,		WMP_F_COPPER },
   1311 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1312 	  "82576 1000BaseX Ethernet",
   1313 	  WM_T_82576,		WMP_F_FIBER },
   1314 
   1315 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1316 	  "82576 gigabit Ethernet (SERDES)",
   1317 	  WM_T_82576,		WMP_F_SERDES },
   1318 
   1319 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1320 	  "82576 quad-1000BaseT Ethernet",
   1321 	  WM_T_82576,		WMP_F_COPPER },
   1322 
   1323 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1324 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1325 	  WM_T_82576,		WMP_F_COPPER },
   1326 
   1327 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1328 	  "82576 gigabit Ethernet",
   1329 	  WM_T_82576,		WMP_F_COPPER },
   1330 
   1331 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1332 	  "82576 gigabit Ethernet (SERDES)",
   1333 	  WM_T_82576,		WMP_F_SERDES },
   1334 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1335 	  "82576 quad-gigabit Ethernet (SERDES)",
   1336 	  WM_T_82576,		WMP_F_SERDES },
   1337 
   1338 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1339 	  "82580 1000BaseT Ethernet",
   1340 	  WM_T_82580,		WMP_F_COPPER },
   1341 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1342 	  "82580 1000BaseX Ethernet",
   1343 	  WM_T_82580,		WMP_F_FIBER },
   1344 
   1345 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1346 	  "82580 1000BaseT Ethernet (SERDES)",
   1347 	  WM_T_82580,		WMP_F_SERDES },
   1348 
   1349 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1350 	  "82580 gigabit Ethernet (SGMII)",
   1351 	  WM_T_82580,		WMP_F_COPPER },
   1352 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1353 	  "82580 dual-1000BaseT Ethernet",
   1354 	  WM_T_82580,		WMP_F_COPPER },
   1355 
   1356 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1357 	  "82580 quad-1000BaseX Ethernet",
   1358 	  WM_T_82580,		WMP_F_FIBER },
   1359 
   1360 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1361 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1362 	  WM_T_82580,		WMP_F_COPPER },
   1363 
   1364 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1365 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1366 	  WM_T_82580,		WMP_F_SERDES },
   1367 
   1368 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1369 	  "DH89XXCC 1000BASE-KX Ethernet",
   1370 	  WM_T_82580,		WMP_F_SERDES },
   1371 
   1372 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1373 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1374 	  WM_T_82580,		WMP_F_SERDES },
   1375 
   1376 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1377 	  "I350 Gigabit Network Connection",
   1378 	  WM_T_I350,		WMP_F_COPPER },
   1379 
   1380 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1381 	  "I350 Gigabit Fiber Network Connection",
   1382 	  WM_T_I350,		WMP_F_FIBER },
   1383 
   1384 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1385 	  "I350 Gigabit Backplane Connection",
   1386 	  WM_T_I350,		WMP_F_SERDES },
   1387 
   1388 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1389 	  "I350 Quad Port Gigabit Ethernet",
   1390 	  WM_T_I350,		WMP_F_SERDES },
   1391 
   1392 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1393 	  "I350 Gigabit Connection",
   1394 	  WM_T_I350,		WMP_F_COPPER },
   1395 
   1396 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1397 	  "I354 Gigabit Ethernet (KX)",
   1398 	  WM_T_I354,		WMP_F_SERDES },
   1399 
   1400 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1401 	  "I354 Gigabit Ethernet (SGMII)",
   1402 	  WM_T_I354,		WMP_F_COPPER },
   1403 
   1404 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1405 	  "I354 Gigabit Ethernet (2.5G)",
   1406 	  WM_T_I354,		WMP_F_COPPER },
   1407 
   1408 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1409 	  "I210-T1 Ethernet Server Adapter",
   1410 	  WM_T_I210,		WMP_F_COPPER },
   1411 
   1412 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1413 	  "I210 Ethernet (Copper OEM)",
   1414 	  WM_T_I210,		WMP_F_COPPER },
   1415 
   1416 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1417 	  "I210 Ethernet (Copper IT)",
   1418 	  WM_T_I210,		WMP_F_COPPER },
   1419 
   1420 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1421 	  "I210 Ethernet (FLASH less)",
   1422 	  WM_T_I210,		WMP_F_COPPER },
   1423 
   1424 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1425 	  "I210 Gigabit Ethernet (Fiber)",
   1426 	  WM_T_I210,		WMP_F_FIBER },
   1427 
   1428 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1429 	  "I210 Gigabit Ethernet (SERDES)",
   1430 	  WM_T_I210,		WMP_F_SERDES },
   1431 
   1432 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1433 	  "I210 Gigabit Ethernet (FLASH less)",
   1434 	  WM_T_I210,		WMP_F_SERDES },
   1435 
   1436 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1437 	  "I210 Gigabit Ethernet (SGMII)",
   1438 	  WM_T_I210,		WMP_F_COPPER },
   1439 
   1440 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1441 	  "I211 Ethernet (COPPER)",
   1442 	  WM_T_I211,		WMP_F_COPPER },
   1443 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1444 	  "I217 V Ethernet Connection",
   1445 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1446 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1447 	  "I217 LM Ethernet Connection",
   1448 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1449 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1450 	  "I218 V Ethernet Connection",
   1451 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1452 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1453 	  "I218 V Ethernet Connection",
   1454 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1455 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1456 	  "I218 V Ethernet Connection",
   1457 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1458 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1459 	  "I218 LM Ethernet Connection",
   1460 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1461 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1462 	  "I218 LM Ethernet Connection",
   1463 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1464 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1465 	  "I218 LM Ethernet Connection",
   1466 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1467 #if 0
   1468 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1469 	  "I219 V Ethernet Connection",
   1470 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1471 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1472 	  "I219 V Ethernet Connection",
   1473 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1474 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1475 	  "I219 V Ethernet Connection",
   1476 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1477 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1478 	  "I219 V Ethernet Connection",
   1479 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1480 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1481 	  "I219 LM Ethernet Connection",
   1482 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1483 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1484 	  "I219 LM Ethernet Connection",
   1485 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1486 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1487 	  "I219 LM Ethernet Connection",
   1488 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1489 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1490 	  "I219 LM Ethernet Connection",
   1491 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1492 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1493 	  "I219 LM Ethernet Connection",
   1494 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1495 #endif
   1496 	{ 0,			0,
   1497 	  NULL,
   1498 	  0,			0 },
   1499 };
   1500 
   1501 /*
   1502  * Register read/write functions.
   1503  * Other than CSR_{READ|WRITE}().
   1504  */
   1505 
   1506 #if 0 /* Not currently used */
   1507 static inline uint32_t
   1508 wm_io_read(struct wm_softc *sc, int reg)
   1509 {
   1510 
   1511 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1512 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1513 }
   1514 #endif
   1515 
   1516 static inline void
   1517 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1518 {
   1519 
   1520 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1521 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1522 }
   1523 
   1524 static inline void
   1525 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1526     uint32_t data)
   1527 {
   1528 	uint32_t regval;
   1529 	int i;
   1530 
   1531 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1532 
   1533 	CSR_WRITE(sc, reg, regval);
   1534 
   1535 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1536 		delay(5);
   1537 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1538 			break;
   1539 	}
   1540 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1541 		aprint_error("%s: WARNING:"
   1542 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1543 		    device_xname(sc->sc_dev), reg);
   1544 	}
   1545 }
   1546 
   1547 static inline void
   1548 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1549 {
   1550 	wa->wa_low = htole32(v & 0xffffffffU);
   1551 	if (sizeof(bus_addr_t) == 8)
   1552 		wa->wa_high = htole32((uint64_t) v >> 32);
   1553 	else
   1554 		wa->wa_high = 0;
   1555 }
   1556 
   1557 /*
   1558  * Descriptor sync/init functions.
   1559  */
   1560 static inline void
   1561 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1562 {
   1563 	struct wm_softc *sc = txq->txq_sc;
   1564 
   1565 	/* If it will wrap around, sync to the end of the ring. */
   1566 	if ((start + num) > WM_NTXDESC(txq)) {
   1567 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1568 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1569 		    (WM_NTXDESC(txq) - start), ops);
   1570 		num -= (WM_NTXDESC(txq) - start);
   1571 		start = 0;
   1572 	}
   1573 
   1574 	/* Now sync whatever is left. */
   1575 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1576 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1577 }
   1578 
   1579 static inline void
   1580 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1581 {
   1582 	struct wm_softc *sc = rxq->rxq_sc;
   1583 
   1584 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1585 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1586 }
   1587 
   1588 static inline void
   1589 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1590 {
   1591 	struct wm_softc *sc = rxq->rxq_sc;
   1592 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1593 	struct mbuf *m = rxs->rxs_mbuf;
   1594 
   1595 	/*
   1596 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1597 	 * so that the payload after the Ethernet header is aligned
   1598 	 * to a 4-byte boundary.
   1599 
   1600 	 * XXX BRAINDAMAGE ALERT!
   1601 	 * The stupid chip uses the same size for every buffer, which
   1602 	 * is set in the Receive Control register.  We are using the 2K
   1603 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1604 	 * reason, we can't "scoot" packets longer than the standard
   1605 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1606 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1607 	 * the upper layer copy the headers.
   1608 	 */
   1609 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1610 
   1611 	if (sc->sc_type == WM_T_82574) {
   1612 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1613 		rxd->erx_data.erxd_addr =
   1614 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1615 		rxd->erx_data.erxd_dd = 0;
   1616 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1617 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1618 
   1619 		rxd->nqrx_data.nrxd_paddr =
   1620 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1621 		/* Currently, split header is not supported. */
   1622 		rxd->nqrx_data.nrxd_haddr = 0;
   1623 	} else {
   1624 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1625 
   1626 		wm_set_dma_addr(&rxd->wrx_addr,
   1627 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1628 		rxd->wrx_len = 0;
   1629 		rxd->wrx_cksum = 0;
   1630 		rxd->wrx_status = 0;
   1631 		rxd->wrx_errors = 0;
   1632 		rxd->wrx_special = 0;
   1633 	}
   1634 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1635 
   1636 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1637 }
   1638 
   1639 /*
   1640  * Device driver interface functions and commonly used functions.
   1641  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1642  */
   1643 
   1644 /* Lookup supported device table */
   1645 static const struct wm_product *
   1646 wm_lookup(const struct pci_attach_args *pa)
   1647 {
   1648 	const struct wm_product *wmp;
   1649 
   1650 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1651 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1652 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1653 			return wmp;
   1654 	}
   1655 	return NULL;
   1656 }
   1657 
   1658 /* The match function (ca_match) */
   1659 static int
   1660 wm_match(device_t parent, cfdata_t cf, void *aux)
   1661 {
   1662 	struct pci_attach_args *pa = aux;
   1663 
   1664 	if (wm_lookup(pa) != NULL)
   1665 		return 1;
   1666 
   1667 	return 0;
   1668 }
   1669 
   1670 /* The attach function (ca_attach) */
   1671 static void
   1672 wm_attach(device_t parent, device_t self, void *aux)
   1673 {
   1674 	struct wm_softc *sc = device_private(self);
   1675 	struct pci_attach_args *pa = aux;
   1676 	prop_dictionary_t dict;
   1677 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1678 	pci_chipset_tag_t pc = pa->pa_pc;
   1679 	int counts[PCI_INTR_TYPE_SIZE];
   1680 	pci_intr_type_t max_type;
   1681 	const char *eetype, *xname;
   1682 	bus_space_tag_t memt;
   1683 	bus_space_handle_t memh;
   1684 	bus_size_t memsize;
   1685 	int memh_valid;
   1686 	int i, error;
   1687 	const struct wm_product *wmp;
   1688 	prop_data_t ea;
   1689 	prop_number_t pn;
   1690 	uint8_t enaddr[ETHER_ADDR_LEN];
   1691 	char buf[256];
   1692 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1693 	pcireg_t preg, memtype;
   1694 	uint16_t eeprom_data, apme_mask;
   1695 	bool force_clear_smbi;
   1696 	uint32_t link_mode;
   1697 	uint32_t reg;
   1698 
   1699 	sc->sc_dev = self;
   1700 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1701 	sc->sc_core_stopping = false;
   1702 
   1703 	wmp = wm_lookup(pa);
   1704 #ifdef DIAGNOSTIC
   1705 	if (wmp == NULL) {
   1706 		printf("\n");
   1707 		panic("wm_attach: impossible");
   1708 	}
   1709 #endif
   1710 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1711 
   1712 	sc->sc_pc = pa->pa_pc;
   1713 	sc->sc_pcitag = pa->pa_tag;
   1714 
   1715 	if (pci_dma64_available(pa))
   1716 		sc->sc_dmat = pa->pa_dmat64;
   1717 	else
   1718 		sc->sc_dmat = pa->pa_dmat;
   1719 
   1720 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1721 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1722 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1723 
   1724 	sc->sc_type = wmp->wmp_type;
   1725 
   1726 	/* Set default function pointers */
   1727 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1728 	sc->phy.release = sc->nvm.release = wm_put_null;
   1729 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1730 
   1731 	if (sc->sc_type < WM_T_82543) {
   1732 		if (sc->sc_rev < 2) {
   1733 			aprint_error_dev(sc->sc_dev,
   1734 			    "i82542 must be at least rev. 2\n");
   1735 			return;
   1736 		}
   1737 		if (sc->sc_rev < 3)
   1738 			sc->sc_type = WM_T_82542_2_0;
   1739 	}
   1740 
   1741 	/*
   1742 	 * Disable MSI for Errata:
   1743 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1744 	 *
   1745 	 *  82544: Errata 25
   1746 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1747 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1748 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1749 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1750 	 *
   1751 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1752 	 *
   1753 	 *  82571 & 82572: Errata 63
   1754 	 */
   1755 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1756 	    || (sc->sc_type == WM_T_82572))
   1757 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1758 
   1759 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1760 	    || (sc->sc_type == WM_T_82580)
   1761 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1762 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1763 		sc->sc_flags |= WM_F_NEWQUEUE;
   1764 
   1765 	/* Set device properties (mactype) */
   1766 	dict = device_properties(sc->sc_dev);
   1767 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1768 
   1769 	/*
   1770 	 * Map the device.  All devices support memory-mapped acccess,
   1771 	 * and it is really required for normal operation.
   1772 	 */
   1773 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1774 	switch (memtype) {
   1775 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1776 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1777 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1778 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1779 		break;
   1780 	default:
   1781 		memh_valid = 0;
   1782 		break;
   1783 	}
   1784 
   1785 	if (memh_valid) {
   1786 		sc->sc_st = memt;
   1787 		sc->sc_sh = memh;
   1788 		sc->sc_ss = memsize;
   1789 	} else {
   1790 		aprint_error_dev(sc->sc_dev,
   1791 		    "unable to map device registers\n");
   1792 		return;
   1793 	}
   1794 
   1795 	/*
   1796 	 * In addition, i82544 and later support I/O mapped indirect
   1797 	 * register access.  It is not desirable (nor supported in
   1798 	 * this driver) to use it for normal operation, though it is
   1799 	 * required to work around bugs in some chip versions.
   1800 	 */
   1801 	if (sc->sc_type >= WM_T_82544) {
   1802 		/* First we have to find the I/O BAR. */
   1803 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1804 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1805 			if (memtype == PCI_MAPREG_TYPE_IO)
   1806 				break;
   1807 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1808 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1809 				i += 4;	/* skip high bits, too */
   1810 		}
   1811 		if (i < PCI_MAPREG_END) {
   1812 			/*
   1813 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1814 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1815 			 * It's no problem because newer chips has no this
   1816 			 * bug.
   1817 			 *
   1818 			 * The i8254x doesn't apparently respond when the
   1819 			 * I/O BAR is 0, which looks somewhat like it's not
   1820 			 * been configured.
   1821 			 */
   1822 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1823 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1824 				aprint_error_dev(sc->sc_dev,
   1825 				    "WARNING: I/O BAR at zero.\n");
   1826 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1827 					0, &sc->sc_iot, &sc->sc_ioh,
   1828 					NULL, &sc->sc_ios) == 0) {
   1829 				sc->sc_flags |= WM_F_IOH_VALID;
   1830 			} else {
   1831 				aprint_error_dev(sc->sc_dev,
   1832 				    "WARNING: unable to map I/O space\n");
   1833 			}
   1834 		}
   1835 
   1836 	}
   1837 
   1838 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1839 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1840 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1841 	if (sc->sc_type < WM_T_82542_2_1)
   1842 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1843 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1844 
   1845 	/* power up chip */
   1846 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1847 	    NULL)) && error != EOPNOTSUPP) {
   1848 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1849 		return;
   1850 	}
   1851 
   1852 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1853 
   1854 	/* Allocation settings */
   1855 	max_type = PCI_INTR_TYPE_MSIX;
   1856 	/*
   1857 	 * 82583 has a MSI-X capability in the PCI configuration space but
   1858 	 * it doesn't support it. At least the document doesn't say anything
   1859 	 * about MSI-X.
   1860 	 */
   1861 	counts[PCI_INTR_TYPE_MSIX]
   1862 	    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   1863 	counts[PCI_INTR_TYPE_MSI] = 1;
   1864 	counts[PCI_INTR_TYPE_INTX] = 1;
   1865 	/* overridden by disable flags */
   1866 	if (wm_disable_msi != 0) {
   1867 		counts[PCI_INTR_TYPE_MSI] = 0;
   1868 		if (wm_disable_msix != 0) {
   1869 			max_type = PCI_INTR_TYPE_INTX;
   1870 			counts[PCI_INTR_TYPE_MSIX] = 0;
   1871 		}
   1872 	} else if (wm_disable_msix != 0) {
   1873 		max_type = PCI_INTR_TYPE_MSI;
   1874 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1875 	}
   1876 
   1877 alloc_retry:
   1878 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1879 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1880 		return;
   1881 	}
   1882 
   1883 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1884 		error = wm_setup_msix(sc);
   1885 		if (error) {
   1886 			pci_intr_release(pc, sc->sc_intrs,
   1887 			    counts[PCI_INTR_TYPE_MSIX]);
   1888 
   1889 			/* Setup for MSI: Disable MSI-X */
   1890 			max_type = PCI_INTR_TYPE_MSI;
   1891 			counts[PCI_INTR_TYPE_MSI] = 1;
   1892 			counts[PCI_INTR_TYPE_INTX] = 1;
   1893 			goto alloc_retry;
   1894 		}
   1895 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1896 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1897 		error = wm_setup_legacy(sc);
   1898 		if (error) {
   1899 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1900 			    counts[PCI_INTR_TYPE_MSI]);
   1901 
   1902 			/* The next try is for INTx: Disable MSI */
   1903 			max_type = PCI_INTR_TYPE_INTX;
   1904 			counts[PCI_INTR_TYPE_INTX] = 1;
   1905 			goto alloc_retry;
   1906 		}
   1907 	} else {
   1908 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1909 		error = wm_setup_legacy(sc);
   1910 		if (error) {
   1911 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1912 			    counts[PCI_INTR_TYPE_INTX]);
   1913 			return;
   1914 		}
   1915 	}
   1916 
   1917 	/*
   1918 	 * Check the function ID (unit number of the chip).
   1919 	 */
   1920 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1921 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1922 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1923 	    || (sc->sc_type == WM_T_82580)
   1924 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1925 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1926 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1927 	else
   1928 		sc->sc_funcid = 0;
   1929 
   1930 	/*
   1931 	 * Determine a few things about the bus we're connected to.
   1932 	 */
   1933 	if (sc->sc_type < WM_T_82543) {
   1934 		/* We don't really know the bus characteristics here. */
   1935 		sc->sc_bus_speed = 33;
   1936 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1937 		/*
   1938 		 * CSA (Communication Streaming Architecture) is about as fast
   1939 		 * a 32-bit 66MHz PCI Bus.
   1940 		 */
   1941 		sc->sc_flags |= WM_F_CSA;
   1942 		sc->sc_bus_speed = 66;
   1943 		aprint_verbose_dev(sc->sc_dev,
   1944 		    "Communication Streaming Architecture\n");
   1945 		if (sc->sc_type == WM_T_82547) {
   1946 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1947 			callout_setfunc(&sc->sc_txfifo_ch,
   1948 					wm_82547_txfifo_stall, sc);
   1949 			aprint_verbose_dev(sc->sc_dev,
   1950 			    "using 82547 Tx FIFO stall work-around\n");
   1951 		}
   1952 	} else if (sc->sc_type >= WM_T_82571) {
   1953 		sc->sc_flags |= WM_F_PCIE;
   1954 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1955 		    && (sc->sc_type != WM_T_ICH10)
   1956 		    && (sc->sc_type != WM_T_PCH)
   1957 		    && (sc->sc_type != WM_T_PCH2)
   1958 		    && (sc->sc_type != WM_T_PCH_LPT)
   1959 		    && (sc->sc_type != WM_T_PCH_SPT)) {
   1960 			/* ICH* and PCH* have no PCIe capability registers */
   1961 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1962 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1963 				NULL) == 0)
   1964 				aprint_error_dev(sc->sc_dev,
   1965 				    "unable to find PCIe capability\n");
   1966 		}
   1967 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1968 	} else {
   1969 		reg = CSR_READ(sc, WMREG_STATUS);
   1970 		if (reg & STATUS_BUS64)
   1971 			sc->sc_flags |= WM_F_BUS64;
   1972 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1973 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1974 
   1975 			sc->sc_flags |= WM_F_PCIX;
   1976 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1977 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1978 				aprint_error_dev(sc->sc_dev,
   1979 				    "unable to find PCIX capability\n");
   1980 			else if (sc->sc_type != WM_T_82545_3 &&
   1981 				 sc->sc_type != WM_T_82546_3) {
   1982 				/*
   1983 				 * Work around a problem caused by the BIOS
   1984 				 * setting the max memory read byte count
   1985 				 * incorrectly.
   1986 				 */
   1987 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1988 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1989 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1990 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1991 
   1992 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1993 				    PCIX_CMD_BYTECNT_SHIFT;
   1994 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1995 				    PCIX_STATUS_MAXB_SHIFT;
   1996 				if (bytecnt > maxb) {
   1997 					aprint_verbose_dev(sc->sc_dev,
   1998 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1999 					    512 << bytecnt, 512 << maxb);
   2000 					pcix_cmd = (pcix_cmd &
   2001 					    ~PCIX_CMD_BYTECNT_MASK) |
   2002 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2003 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2004 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2005 					    pcix_cmd);
   2006 				}
   2007 			}
   2008 		}
   2009 		/*
   2010 		 * The quad port adapter is special; it has a PCIX-PCIX
   2011 		 * bridge on the board, and can run the secondary bus at
   2012 		 * a higher speed.
   2013 		 */
   2014 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2015 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2016 								      : 66;
   2017 		} else if (sc->sc_flags & WM_F_PCIX) {
   2018 			switch (reg & STATUS_PCIXSPD_MASK) {
   2019 			case STATUS_PCIXSPD_50_66:
   2020 				sc->sc_bus_speed = 66;
   2021 				break;
   2022 			case STATUS_PCIXSPD_66_100:
   2023 				sc->sc_bus_speed = 100;
   2024 				break;
   2025 			case STATUS_PCIXSPD_100_133:
   2026 				sc->sc_bus_speed = 133;
   2027 				break;
   2028 			default:
   2029 				aprint_error_dev(sc->sc_dev,
   2030 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2031 				    reg & STATUS_PCIXSPD_MASK);
   2032 				sc->sc_bus_speed = 66;
   2033 				break;
   2034 			}
   2035 		} else
   2036 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2037 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2038 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2039 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2040 	}
   2041 
   2042 	/* clear interesting stat counters */
   2043 	CSR_READ(sc, WMREG_COLC);
   2044 	CSR_READ(sc, WMREG_RXERRC);
   2045 
   2046 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2047 	    || (sc->sc_type >= WM_T_ICH8))
   2048 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2049 	if (sc->sc_type >= WM_T_ICH8)
   2050 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2051 
   2052 	/* Set PHY, NVM mutex related stuff */
   2053 	switch (sc->sc_type) {
   2054 	case WM_T_82542_2_0:
   2055 	case WM_T_82542_2_1:
   2056 	case WM_T_82543:
   2057 	case WM_T_82544:
   2058 		/* Microwire */
   2059 		sc->nvm.read = wm_nvm_read_uwire;
   2060 		sc->sc_nvm_wordsize = 64;
   2061 		sc->sc_nvm_addrbits = 6;
   2062 		break;
   2063 	case WM_T_82540:
   2064 	case WM_T_82545:
   2065 	case WM_T_82545_3:
   2066 	case WM_T_82546:
   2067 	case WM_T_82546_3:
   2068 		/* Microwire */
   2069 		sc->nvm.read = wm_nvm_read_uwire;
   2070 		reg = CSR_READ(sc, WMREG_EECD);
   2071 		if (reg & EECD_EE_SIZE) {
   2072 			sc->sc_nvm_wordsize = 256;
   2073 			sc->sc_nvm_addrbits = 8;
   2074 		} else {
   2075 			sc->sc_nvm_wordsize = 64;
   2076 			sc->sc_nvm_addrbits = 6;
   2077 		}
   2078 		sc->sc_flags |= WM_F_LOCK_EECD;
   2079 		sc->nvm.acquire = wm_get_eecd;
   2080 		sc->nvm.release = wm_put_eecd;
   2081 		break;
   2082 	case WM_T_82541:
   2083 	case WM_T_82541_2:
   2084 	case WM_T_82547:
   2085 	case WM_T_82547_2:
   2086 		reg = CSR_READ(sc, WMREG_EECD);
   2087 		/*
   2088 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2089 		 * on 8254[17], so set flags and functios before calling it.
   2090 		 */
   2091 		sc->sc_flags |= WM_F_LOCK_EECD;
   2092 		sc->nvm.acquire = wm_get_eecd;
   2093 		sc->nvm.release = wm_put_eecd;
   2094 		if (reg & EECD_EE_TYPE) {
   2095 			/* SPI */
   2096 			sc->nvm.read = wm_nvm_read_spi;
   2097 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2098 			wm_nvm_set_addrbits_size_eecd(sc);
   2099 		} else {
   2100 			/* Microwire */
   2101 			sc->nvm.read = wm_nvm_read_uwire;
   2102 			if ((reg & EECD_EE_ABITS) != 0) {
   2103 				sc->sc_nvm_wordsize = 256;
   2104 				sc->sc_nvm_addrbits = 8;
   2105 			} else {
   2106 				sc->sc_nvm_wordsize = 64;
   2107 				sc->sc_nvm_addrbits = 6;
   2108 			}
   2109 		}
   2110 		break;
   2111 	case WM_T_82571:
   2112 	case WM_T_82572:
   2113 		/* SPI */
   2114 		sc->nvm.read = wm_nvm_read_eerd;
   2115 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2116 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2117 		wm_nvm_set_addrbits_size_eecd(sc);
   2118 		sc->phy.acquire = wm_get_swsm_semaphore;
   2119 		sc->phy.release = wm_put_swsm_semaphore;
   2120 		sc->nvm.acquire = wm_get_nvm_82571;
   2121 		sc->nvm.release = wm_put_nvm_82571;
   2122 		break;
   2123 	case WM_T_82573:
   2124 	case WM_T_82574:
   2125 	case WM_T_82583:
   2126 		sc->nvm.read = wm_nvm_read_eerd;
   2127 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2128 		if (sc->sc_type == WM_T_82573) {
   2129 			sc->phy.acquire = wm_get_swsm_semaphore;
   2130 			sc->phy.release = wm_put_swsm_semaphore;
   2131 			sc->nvm.acquire = wm_get_nvm_82571;
   2132 			sc->nvm.release = wm_put_nvm_82571;
   2133 		} else {
   2134 			/* Both PHY and NVM use the same semaphore. */
   2135 			sc->phy.acquire = sc->nvm.acquire
   2136 			    = wm_get_swfwhw_semaphore;
   2137 			sc->phy.release = sc->nvm.release
   2138 			    = wm_put_swfwhw_semaphore;
   2139 		}
   2140 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2141 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2142 			sc->sc_nvm_wordsize = 2048;
   2143 		} else {
   2144 			/* SPI */
   2145 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2146 			wm_nvm_set_addrbits_size_eecd(sc);
   2147 		}
   2148 		break;
   2149 	case WM_T_82575:
   2150 	case WM_T_82576:
   2151 	case WM_T_82580:
   2152 	case WM_T_I350:
   2153 	case WM_T_I354:
   2154 	case WM_T_80003:
   2155 		/* SPI */
   2156 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2157 		wm_nvm_set_addrbits_size_eecd(sc);
   2158 		if((sc->sc_type == WM_T_80003)
   2159 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2160 			sc->nvm.read = wm_nvm_read_eerd;
   2161 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2162 		} else {
   2163 			sc->nvm.read = wm_nvm_read_spi;
   2164 			sc->sc_flags |= WM_F_LOCK_EECD;
   2165 		}
   2166 		sc->phy.acquire = wm_get_phy_82575;
   2167 		sc->phy.release = wm_put_phy_82575;
   2168 		sc->nvm.acquire = wm_get_nvm_80003;
   2169 		sc->nvm.release = wm_put_nvm_80003;
   2170 		break;
   2171 	case WM_T_ICH8:
   2172 	case WM_T_ICH9:
   2173 	case WM_T_ICH10:
   2174 	case WM_T_PCH:
   2175 	case WM_T_PCH2:
   2176 	case WM_T_PCH_LPT:
   2177 		sc->nvm.read = wm_nvm_read_ich8;
   2178 		/* FLASH */
   2179 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2180 		sc->sc_nvm_wordsize = 2048;
   2181 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2182 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2183 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2184 			aprint_error_dev(sc->sc_dev,
   2185 			    "can't map FLASH registers\n");
   2186 			goto out;
   2187 		}
   2188 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2189 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2190 		    ICH_FLASH_SECTOR_SIZE;
   2191 		sc->sc_ich8_flash_bank_size =
   2192 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2193 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2194 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2195 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2196 		sc->sc_flashreg_offset = 0;
   2197 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2198 		sc->phy.release = wm_put_swflag_ich8lan;
   2199 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2200 		sc->nvm.release = wm_put_nvm_ich8lan;
   2201 		break;
   2202 	case WM_T_PCH_SPT:
   2203 		sc->nvm.read = wm_nvm_read_spt;
   2204 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2205 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2206 		sc->sc_flasht = sc->sc_st;
   2207 		sc->sc_flashh = sc->sc_sh;
   2208 		sc->sc_ich8_flash_base = 0;
   2209 		sc->sc_nvm_wordsize =
   2210 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2211 			* NVM_SIZE_MULTIPLIER;
   2212 		/* It is size in bytes, we want words */
   2213 		sc->sc_nvm_wordsize /= 2;
   2214 		/* assume 2 banks */
   2215 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2216 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2217 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2218 		sc->phy.release = wm_put_swflag_ich8lan;
   2219 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2220 		sc->nvm.release = wm_put_nvm_ich8lan;
   2221 		break;
   2222 	case WM_T_I210:
   2223 	case WM_T_I211:
   2224 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2225 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2226 		if (wm_nvm_get_flash_presence_i210(sc)) {
   2227 			sc->nvm.read = wm_nvm_read_eerd;
   2228 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2229 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2230 			wm_nvm_set_addrbits_size_eecd(sc);
   2231 		} else {
   2232 			sc->nvm.read = wm_nvm_read_invm;
   2233 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2234 			sc->sc_nvm_wordsize = INVM_SIZE;
   2235 		}
   2236 		sc->phy.acquire = wm_get_phy_82575;
   2237 		sc->phy.release = wm_put_phy_82575;
   2238 		sc->nvm.acquire = wm_get_nvm_80003;
   2239 		sc->nvm.release = wm_put_nvm_80003;
   2240 		break;
   2241 	default:
   2242 		break;
   2243 	}
   2244 
   2245 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2246 	switch (sc->sc_type) {
   2247 	case WM_T_82571:
   2248 	case WM_T_82572:
   2249 		reg = CSR_READ(sc, WMREG_SWSM2);
   2250 		if ((reg & SWSM2_LOCK) == 0) {
   2251 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2252 			force_clear_smbi = true;
   2253 		} else
   2254 			force_clear_smbi = false;
   2255 		break;
   2256 	case WM_T_82573:
   2257 	case WM_T_82574:
   2258 	case WM_T_82583:
   2259 		force_clear_smbi = true;
   2260 		break;
   2261 	default:
   2262 		force_clear_smbi = false;
   2263 		break;
   2264 	}
   2265 	if (force_clear_smbi) {
   2266 		reg = CSR_READ(sc, WMREG_SWSM);
   2267 		if ((reg & SWSM_SMBI) != 0)
   2268 			aprint_error_dev(sc->sc_dev,
   2269 			    "Please update the Bootagent\n");
   2270 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2271 	}
   2272 
   2273 	/*
   2274 	 * Defer printing the EEPROM type until after verifying the checksum
   2275 	 * This allows the EEPROM type to be printed correctly in the case
   2276 	 * that no EEPROM is attached.
   2277 	 */
   2278 	/*
   2279 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2280 	 * this for later, so we can fail future reads from the EEPROM.
   2281 	 */
   2282 	if (wm_nvm_validate_checksum(sc)) {
   2283 		/*
   2284 		 * Read twice again because some PCI-e parts fail the
   2285 		 * first check due to the link being in sleep state.
   2286 		 */
   2287 		if (wm_nvm_validate_checksum(sc))
   2288 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2289 	}
   2290 
   2291 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2292 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2293 	else {
   2294 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2295 		    sc->sc_nvm_wordsize);
   2296 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2297 			aprint_verbose("iNVM");
   2298 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2299 			aprint_verbose("FLASH(HW)");
   2300 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2301 			aprint_verbose("FLASH");
   2302 		else {
   2303 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2304 				eetype = "SPI";
   2305 			else
   2306 				eetype = "MicroWire";
   2307 			aprint_verbose("(%d address bits) %s EEPROM",
   2308 			    sc->sc_nvm_addrbits, eetype);
   2309 		}
   2310 	}
   2311 	wm_nvm_version(sc);
   2312 	aprint_verbose("\n");
   2313 
   2314 	/*
   2315 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2316 	 * incorrect.
   2317 	 */
   2318 	wm_gmii_setup_phytype(sc, 0, 0);
   2319 
   2320 	/* Reset the chip to a known state. */
   2321 	wm_reset(sc);
   2322 
   2323 	/* Check for I21[01] PLL workaround */
   2324 	if (sc->sc_type == WM_T_I210)
   2325 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2326 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2327 		/* NVM image release 3.25 has a workaround */
   2328 		if ((sc->sc_nvm_ver_major < 3)
   2329 		    || ((sc->sc_nvm_ver_major == 3)
   2330 			&& (sc->sc_nvm_ver_minor < 25))) {
   2331 			aprint_verbose_dev(sc->sc_dev,
   2332 			    "ROM image version %d.%d is older than 3.25\n",
   2333 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2334 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2335 		}
   2336 	}
   2337 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2338 		wm_pll_workaround_i210(sc);
   2339 
   2340 	wm_get_wakeup(sc);
   2341 
   2342 	/* Non-AMT based hardware can now take control from firmware */
   2343 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2344 		wm_get_hw_control(sc);
   2345 
   2346 	/*
   2347 	 * Read the Ethernet address from the EEPROM, if not first found
   2348 	 * in device properties.
   2349 	 */
   2350 	ea = prop_dictionary_get(dict, "mac-address");
   2351 	if (ea != NULL) {
   2352 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2353 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2354 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2355 	} else {
   2356 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2357 			aprint_error_dev(sc->sc_dev,
   2358 			    "unable to read Ethernet address\n");
   2359 			goto out;
   2360 		}
   2361 	}
   2362 
   2363 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2364 	    ether_sprintf(enaddr));
   2365 
   2366 	/*
   2367 	 * Read the config info from the EEPROM, and set up various
   2368 	 * bits in the control registers based on their contents.
   2369 	 */
   2370 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2371 	if (pn != NULL) {
   2372 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2373 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2374 	} else {
   2375 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2376 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2377 			goto out;
   2378 		}
   2379 	}
   2380 
   2381 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2382 	if (pn != NULL) {
   2383 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2384 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2385 	} else {
   2386 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2387 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2388 			goto out;
   2389 		}
   2390 	}
   2391 
   2392 	/* check for WM_F_WOL */
   2393 	switch (sc->sc_type) {
   2394 	case WM_T_82542_2_0:
   2395 	case WM_T_82542_2_1:
   2396 	case WM_T_82543:
   2397 		/* dummy? */
   2398 		eeprom_data = 0;
   2399 		apme_mask = NVM_CFG3_APME;
   2400 		break;
   2401 	case WM_T_82544:
   2402 		apme_mask = NVM_CFG2_82544_APM_EN;
   2403 		eeprom_data = cfg2;
   2404 		break;
   2405 	case WM_T_82546:
   2406 	case WM_T_82546_3:
   2407 	case WM_T_82571:
   2408 	case WM_T_82572:
   2409 	case WM_T_82573:
   2410 	case WM_T_82574:
   2411 	case WM_T_82583:
   2412 	case WM_T_80003:
   2413 	default:
   2414 		apme_mask = NVM_CFG3_APME;
   2415 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2416 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2417 		break;
   2418 	case WM_T_82575:
   2419 	case WM_T_82576:
   2420 	case WM_T_82580:
   2421 	case WM_T_I350:
   2422 	case WM_T_I354: /* XXX ok? */
   2423 	case WM_T_ICH8:
   2424 	case WM_T_ICH9:
   2425 	case WM_T_ICH10:
   2426 	case WM_T_PCH:
   2427 	case WM_T_PCH2:
   2428 	case WM_T_PCH_LPT:
   2429 	case WM_T_PCH_SPT:
   2430 		/* XXX The funcid should be checked on some devices */
   2431 		apme_mask = WUC_APME;
   2432 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2433 		break;
   2434 	}
   2435 
   2436 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2437 	if ((eeprom_data & apme_mask) != 0)
   2438 		sc->sc_flags |= WM_F_WOL;
   2439 
   2440 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2441 		/* Check NVM for autonegotiation */
   2442 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2443 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2444 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2445 		}
   2446 	}
   2447 
   2448 	/*
   2449 	 * XXX need special handling for some multiple port cards
   2450 	 * to disable a paticular port.
   2451 	 */
   2452 
   2453 	if (sc->sc_type >= WM_T_82544) {
   2454 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2455 		if (pn != NULL) {
   2456 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2457 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2458 		} else {
   2459 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2460 				aprint_error_dev(sc->sc_dev,
   2461 				    "unable to read SWDPIN\n");
   2462 				goto out;
   2463 			}
   2464 		}
   2465 	}
   2466 
   2467 	if (cfg1 & NVM_CFG1_ILOS)
   2468 		sc->sc_ctrl |= CTRL_ILOS;
   2469 
   2470 	/*
   2471 	 * XXX
   2472 	 * This code isn't correct because pin 2 and 3 are located
   2473 	 * in different position on newer chips. Check all datasheet.
   2474 	 *
   2475 	 * Until resolve this problem, check if a chip < 82580
   2476 	 */
   2477 	if (sc->sc_type <= WM_T_82580) {
   2478 		if (sc->sc_type >= WM_T_82544) {
   2479 			sc->sc_ctrl |=
   2480 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2481 			    CTRL_SWDPIO_SHIFT;
   2482 			sc->sc_ctrl |=
   2483 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2484 			    CTRL_SWDPINS_SHIFT;
   2485 		} else {
   2486 			sc->sc_ctrl |=
   2487 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2488 			    CTRL_SWDPIO_SHIFT;
   2489 		}
   2490 	}
   2491 
   2492 	/* XXX For other than 82580? */
   2493 	if (sc->sc_type == WM_T_82580) {
   2494 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2495 		if (nvmword & __BIT(13))
   2496 			sc->sc_ctrl |= CTRL_ILOS;
   2497 	}
   2498 
   2499 #if 0
   2500 	if (sc->sc_type >= WM_T_82544) {
   2501 		if (cfg1 & NVM_CFG1_IPS0)
   2502 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2503 		if (cfg1 & NVM_CFG1_IPS1)
   2504 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2505 		sc->sc_ctrl_ext |=
   2506 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2507 		    CTRL_EXT_SWDPIO_SHIFT;
   2508 		sc->sc_ctrl_ext |=
   2509 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2510 		    CTRL_EXT_SWDPINS_SHIFT;
   2511 	} else {
   2512 		sc->sc_ctrl_ext |=
   2513 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2514 		    CTRL_EXT_SWDPIO_SHIFT;
   2515 	}
   2516 #endif
   2517 
   2518 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2519 #if 0
   2520 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2521 #endif
   2522 
   2523 	if (sc->sc_type == WM_T_PCH) {
   2524 		uint16_t val;
   2525 
   2526 		/* Save the NVM K1 bit setting */
   2527 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2528 
   2529 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2530 			sc->sc_nvm_k1_enabled = 1;
   2531 		else
   2532 			sc->sc_nvm_k1_enabled = 0;
   2533 	}
   2534 
   2535 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2536 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2537 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2538 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2539 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
   2540 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2541 		/* Copper only */
   2542 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2543 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2544 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2545 	    || (sc->sc_type ==WM_T_I211)) {
   2546 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2547 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2548 		switch (link_mode) {
   2549 		case CTRL_EXT_LINK_MODE_1000KX:
   2550 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2551 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2552 			break;
   2553 		case CTRL_EXT_LINK_MODE_SGMII:
   2554 			if (wm_sgmii_uses_mdio(sc)) {
   2555 				aprint_verbose_dev(sc->sc_dev,
   2556 				    "SGMII(MDIO)\n");
   2557 				sc->sc_flags |= WM_F_SGMII;
   2558 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2559 				break;
   2560 			}
   2561 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2562 			/*FALLTHROUGH*/
   2563 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2564 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2565 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2566 				if (link_mode
   2567 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2568 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2569 					sc->sc_flags |= WM_F_SGMII;
   2570 				} else {
   2571 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2572 					aprint_verbose_dev(sc->sc_dev,
   2573 					    "SERDES\n");
   2574 				}
   2575 				break;
   2576 			}
   2577 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2578 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2579 
   2580 			/* Change current link mode setting */
   2581 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2582 			switch (sc->sc_mediatype) {
   2583 			case WM_MEDIATYPE_COPPER:
   2584 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2585 				break;
   2586 			case WM_MEDIATYPE_SERDES:
   2587 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2588 				break;
   2589 			default:
   2590 				break;
   2591 			}
   2592 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2593 			break;
   2594 		case CTRL_EXT_LINK_MODE_GMII:
   2595 		default:
   2596 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2597 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2598 			break;
   2599 		}
   2600 
   2601 		reg &= ~CTRL_EXT_I2C_ENA;
   2602 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2603 			reg |= CTRL_EXT_I2C_ENA;
   2604 		else
   2605 			reg &= ~CTRL_EXT_I2C_ENA;
   2606 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2607 	} else if (sc->sc_type < WM_T_82543 ||
   2608 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2609 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2610 			aprint_error_dev(sc->sc_dev,
   2611 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2612 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2613 		}
   2614 	} else {
   2615 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2616 			aprint_error_dev(sc->sc_dev,
   2617 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2618 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2619 		}
   2620 	}
   2621 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2622 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2623 
   2624 	/* Set device properties (macflags) */
   2625 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2626 
   2627 	/* Initialize the media structures accordingly. */
   2628 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2629 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2630 	else
   2631 		wm_tbi_mediainit(sc); /* All others */
   2632 
   2633 	ifp = &sc->sc_ethercom.ec_if;
   2634 	xname = device_xname(sc->sc_dev);
   2635 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2636 	ifp->if_softc = sc;
   2637 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2638 #ifdef WM_MPSAFE
   2639 	ifp->if_extflags = IFEF_MPSAFE;
   2640 #endif
   2641 	ifp->if_ioctl = wm_ioctl;
   2642 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2643 		ifp->if_start = wm_nq_start;
   2644 		/*
   2645 		 * When the number of CPUs is one and the controller can use
   2646 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2647 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2648 		 * and the other is used for link status changing.
   2649 		 * In this situation, wm_nq_transmit() is disadvantageous
   2650 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2651 		 */
   2652 		if (wm_is_using_multiqueue(sc))
   2653 			ifp->if_transmit = wm_nq_transmit;
   2654 	} else {
   2655 		ifp->if_start = wm_start;
   2656 		/*
   2657 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2658 		 */
   2659 		if (wm_is_using_multiqueue(sc))
   2660 			ifp->if_transmit = wm_transmit;
   2661 	}
   2662 	ifp->if_watchdog = wm_watchdog;
   2663 	ifp->if_init = wm_init;
   2664 	ifp->if_stop = wm_stop;
   2665 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2666 	IFQ_SET_READY(&ifp->if_snd);
   2667 
   2668 	/* Check for jumbo frame */
   2669 	switch (sc->sc_type) {
   2670 	case WM_T_82573:
   2671 		/* XXX limited to 9234 if ASPM is disabled */
   2672 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2673 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2674 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2675 		break;
   2676 	case WM_T_82571:
   2677 	case WM_T_82572:
   2678 	case WM_T_82574:
   2679 	case WM_T_82583:
   2680 	case WM_T_82575:
   2681 	case WM_T_82576:
   2682 	case WM_T_82580:
   2683 	case WM_T_I350:
   2684 	case WM_T_I354:
   2685 	case WM_T_I210:
   2686 	case WM_T_I211:
   2687 	case WM_T_80003:
   2688 	case WM_T_ICH9:
   2689 	case WM_T_ICH10:
   2690 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2691 	case WM_T_PCH_LPT:
   2692 	case WM_T_PCH_SPT:
   2693 		/* XXX limited to 9234 */
   2694 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2695 		break;
   2696 	case WM_T_PCH:
   2697 		/* XXX limited to 4096 */
   2698 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2699 		break;
   2700 	case WM_T_82542_2_0:
   2701 	case WM_T_82542_2_1:
   2702 	case WM_T_ICH8:
   2703 		/* No support for jumbo frame */
   2704 		break;
   2705 	default:
   2706 		/* ETHER_MAX_LEN_JUMBO */
   2707 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2708 		break;
   2709 	}
   2710 
   2711 	/* If we're a i82543 or greater, we can support VLANs. */
   2712 	if (sc->sc_type >= WM_T_82543)
   2713 		sc->sc_ethercom.ec_capabilities |=
   2714 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2715 
   2716 	/*
   2717 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2718 	 * on i82543 and later.
   2719 	 */
   2720 	if (sc->sc_type >= WM_T_82543) {
   2721 		ifp->if_capabilities |=
   2722 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2723 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2724 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2725 		    IFCAP_CSUM_TCPv6_Tx |
   2726 		    IFCAP_CSUM_UDPv6_Tx;
   2727 	}
   2728 
   2729 	/*
   2730 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2731 	 *
   2732 	 *	82541GI (8086:1076) ... no
   2733 	 *	82572EI (8086:10b9) ... yes
   2734 	 */
   2735 	if (sc->sc_type >= WM_T_82571) {
   2736 		ifp->if_capabilities |=
   2737 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2738 	}
   2739 
   2740 	/*
   2741 	 * If we're a i82544 or greater (except i82547), we can do
   2742 	 * TCP segmentation offload.
   2743 	 */
   2744 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2745 		ifp->if_capabilities |= IFCAP_TSOv4;
   2746 	}
   2747 
   2748 	if (sc->sc_type >= WM_T_82571) {
   2749 		ifp->if_capabilities |= IFCAP_TSOv6;
   2750 	}
   2751 
   2752 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2753 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2754 
   2755 #ifdef WM_MPSAFE
   2756 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2757 #else
   2758 	sc->sc_core_lock = NULL;
   2759 #endif
   2760 
   2761 	/* Attach the interface. */
   2762 	error = if_initialize(ifp);
   2763 	if (error != 0) {
   2764 		aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
   2765 		    error);
   2766 		return; /* Error */
   2767 	}
   2768 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2769 	ether_ifattach(ifp, enaddr);
   2770 	if_register(ifp);
   2771 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2772 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2773 			  RND_FLAG_DEFAULT);
   2774 
   2775 #ifdef WM_EVENT_COUNTERS
   2776 	/* Attach event counters. */
   2777 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2778 	    NULL, xname, "linkintr");
   2779 
   2780 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2781 	    NULL, xname, "tx_xoff");
   2782 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2783 	    NULL, xname, "tx_xon");
   2784 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2785 	    NULL, xname, "rx_xoff");
   2786 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2787 	    NULL, xname, "rx_xon");
   2788 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2789 	    NULL, xname, "rx_macctl");
   2790 #endif /* WM_EVENT_COUNTERS */
   2791 
   2792 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2793 		pmf_class_network_register(self, ifp);
   2794 	else
   2795 		aprint_error_dev(self, "couldn't establish power handler\n");
   2796 
   2797 	sc->sc_flags |= WM_F_ATTACHED;
   2798  out:
   2799 	return;
   2800 }
   2801 
   2802 /* The detach function (ca_detach) */
   2803 static int
   2804 wm_detach(device_t self, int flags __unused)
   2805 {
   2806 	struct wm_softc *sc = device_private(self);
   2807 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2808 	int i;
   2809 
   2810 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2811 		return 0;
   2812 
   2813 	/* Stop the interface. Callouts are stopped in it. */
   2814 	wm_stop(ifp, 1);
   2815 
   2816 	pmf_device_deregister(self);
   2817 
   2818 #ifdef WM_EVENT_COUNTERS
   2819 	evcnt_detach(&sc->sc_ev_linkintr);
   2820 
   2821 	evcnt_detach(&sc->sc_ev_tx_xoff);
   2822 	evcnt_detach(&sc->sc_ev_tx_xon);
   2823 	evcnt_detach(&sc->sc_ev_rx_xoff);
   2824 	evcnt_detach(&sc->sc_ev_rx_xon);
   2825 	evcnt_detach(&sc->sc_ev_rx_macctl);
   2826 #endif /* WM_EVENT_COUNTERS */
   2827 
   2828 	/* Tell the firmware about the release */
   2829 	WM_CORE_LOCK(sc);
   2830 	wm_release_manageability(sc);
   2831 	wm_release_hw_control(sc);
   2832 	wm_enable_wakeup(sc);
   2833 	WM_CORE_UNLOCK(sc);
   2834 
   2835 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2836 
   2837 	/* Delete all remaining media. */
   2838 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2839 
   2840 	ether_ifdetach(ifp);
   2841 	if_detach(ifp);
   2842 	if_percpuq_destroy(sc->sc_ipq);
   2843 
   2844 	/* Unload RX dmamaps and free mbufs */
   2845 	for (i = 0; i < sc->sc_nqueues; i++) {
   2846 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2847 		mutex_enter(rxq->rxq_lock);
   2848 		wm_rxdrain(rxq);
   2849 		mutex_exit(rxq->rxq_lock);
   2850 	}
   2851 	/* Must unlock here */
   2852 
   2853 	/* Disestablish the interrupt handler */
   2854 	for (i = 0; i < sc->sc_nintrs; i++) {
   2855 		if (sc->sc_ihs[i] != NULL) {
   2856 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2857 			sc->sc_ihs[i] = NULL;
   2858 		}
   2859 	}
   2860 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2861 
   2862 	wm_free_txrx_queues(sc);
   2863 
   2864 	/* Unmap the registers */
   2865 	if (sc->sc_ss) {
   2866 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2867 		sc->sc_ss = 0;
   2868 	}
   2869 	if (sc->sc_ios) {
   2870 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2871 		sc->sc_ios = 0;
   2872 	}
   2873 	if (sc->sc_flashs) {
   2874 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2875 		sc->sc_flashs = 0;
   2876 	}
   2877 
   2878 	if (sc->sc_core_lock)
   2879 		mutex_obj_free(sc->sc_core_lock);
   2880 	if (sc->sc_ich_phymtx)
   2881 		mutex_obj_free(sc->sc_ich_phymtx);
   2882 	if (sc->sc_ich_nvmmtx)
   2883 		mutex_obj_free(sc->sc_ich_nvmmtx);
   2884 
   2885 	return 0;
   2886 }
   2887 
   2888 static bool
   2889 wm_suspend(device_t self, const pmf_qual_t *qual)
   2890 {
   2891 	struct wm_softc *sc = device_private(self);
   2892 
   2893 	wm_release_manageability(sc);
   2894 	wm_release_hw_control(sc);
   2895 	wm_enable_wakeup(sc);
   2896 
   2897 	return true;
   2898 }
   2899 
   2900 static bool
   2901 wm_resume(device_t self, const pmf_qual_t *qual)
   2902 {
   2903 	struct wm_softc *sc = device_private(self);
   2904 
   2905 	wm_init_manageability(sc);
   2906 
   2907 	return true;
   2908 }
   2909 
   2910 /*
   2911  * wm_watchdog:		[ifnet interface function]
   2912  *
   2913  *	Watchdog timer handler.
   2914  */
   2915 static void
   2916 wm_watchdog(struct ifnet *ifp)
   2917 {
   2918 	int qid;
   2919 	struct wm_softc *sc = ifp->if_softc;
   2920 
   2921 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2922 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2923 
   2924 		wm_watchdog_txq(ifp, txq);
   2925 	}
   2926 
   2927 	/* Reset the interface. */
   2928 	(void) wm_init(ifp);
   2929 
   2930 	/*
   2931 	 * There are still some upper layer processing which call
   2932 	 * ifp->if_start(). e.g. ALTQ or one CPU system
   2933 	 */
   2934 	/* Try to get more packets going. */
   2935 	ifp->if_start(ifp);
   2936 }
   2937 
   2938 static void
   2939 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
   2940 {
   2941 	struct wm_softc *sc = ifp->if_softc;
   2942 
   2943 	/*
   2944 	 * Since we're using delayed interrupts, sweep up
   2945 	 * before we report an error.
   2946 	 */
   2947 	mutex_enter(txq->txq_lock);
   2948 	wm_txeof(sc, txq);
   2949 	mutex_exit(txq->txq_lock);
   2950 
   2951 	if (txq->txq_free != WM_NTXDESC(txq)) {
   2952 #ifdef WM_DEBUG
   2953 		int i, j;
   2954 		struct wm_txsoft *txs;
   2955 #endif
   2956 		log(LOG_ERR,
   2957 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2958 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   2959 		    txq->txq_next);
   2960 		ifp->if_oerrors++;
   2961 #ifdef WM_DEBUG
   2962 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   2963 		    i = WM_NEXTTXS(txq, i)) {
   2964 		    txs = &txq->txq_soft[i];
   2965 		    printf("txs %d tx %d -> %d\n",
   2966 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2967 		    for (j = txs->txs_firstdesc; ;
   2968 			j = WM_NEXTTX(txq, j)) {
   2969 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2970 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   2971 			printf("\t %#08x%08x\n",
   2972 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   2973 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   2974 			if (j == txs->txs_lastdesc)
   2975 				break;
   2976 			}
   2977 		}
   2978 #endif
   2979 	}
   2980 }
   2981 
   2982 /*
   2983  * wm_tick:
   2984  *
   2985  *	One second timer, used to check link status, sweep up
   2986  *	completed transmit jobs, etc.
   2987  */
   2988 static void
   2989 wm_tick(void *arg)
   2990 {
   2991 	struct wm_softc *sc = arg;
   2992 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2993 #ifndef WM_MPSAFE
   2994 	int s = splnet();
   2995 #endif
   2996 
   2997 	WM_CORE_LOCK(sc);
   2998 
   2999 	if (sc->sc_core_stopping)
   3000 		goto out;
   3001 
   3002 	if (sc->sc_type >= WM_T_82542_2_1) {
   3003 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3004 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3005 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3006 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3007 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3008 	}
   3009 
   3010 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   3011 	ifp->if_ierrors += 0ULL /* ensure quad_t */
   3012 	    + CSR_READ(sc, WMREG_CRCERRS)
   3013 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3014 	    + CSR_READ(sc, WMREG_SYMERRC)
   3015 	    + CSR_READ(sc, WMREG_RXERRC)
   3016 	    + CSR_READ(sc, WMREG_SEC)
   3017 	    + CSR_READ(sc, WMREG_CEXTERR)
   3018 	    + CSR_READ(sc, WMREG_RLEC);
   3019 	/*
   3020 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3021 	 * memory. It does not mean the number of dropped packet. Because
   3022 	 * ethernet controller can receive packets in such case if there is
   3023 	 * space in phy's FIFO.
   3024 	 *
   3025 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3026 	 * own EVCNT instead of if_iqdrops.
   3027 	 */
   3028 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   3029 
   3030 	if (sc->sc_flags & WM_F_HAS_MII)
   3031 		mii_tick(&sc->sc_mii);
   3032 	else if ((sc->sc_type >= WM_T_82575)
   3033 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3034 		wm_serdes_tick(sc);
   3035 	else
   3036 		wm_tbi_tick(sc);
   3037 
   3038 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   3039 out:
   3040 	WM_CORE_UNLOCK(sc);
   3041 #ifndef WM_MPSAFE
   3042 	splx(s);
   3043 #endif
   3044 }
   3045 
   3046 static int
   3047 wm_ifflags_cb(struct ethercom *ec)
   3048 {
   3049 	struct ifnet *ifp = &ec->ec_if;
   3050 	struct wm_softc *sc = ifp->if_softc;
   3051 	int rc = 0;
   3052 
   3053 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3054 		device_xname(sc->sc_dev), __func__));
   3055 
   3056 	WM_CORE_LOCK(sc);
   3057 
   3058 	int change = ifp->if_flags ^ sc->sc_if_flags;
   3059 	sc->sc_if_flags = ifp->if_flags;
   3060 
   3061 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3062 		rc = ENETRESET;
   3063 		goto out;
   3064 	}
   3065 
   3066 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   3067 		wm_set_filter(sc);
   3068 
   3069 	wm_set_vlan(sc);
   3070 
   3071 out:
   3072 	WM_CORE_UNLOCK(sc);
   3073 
   3074 	return rc;
   3075 }
   3076 
   3077 /*
   3078  * wm_ioctl:		[ifnet interface function]
   3079  *
   3080  *	Handle control requests from the operator.
   3081  */
   3082 static int
   3083 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3084 {
   3085 	struct wm_softc *sc = ifp->if_softc;
   3086 	struct ifreq *ifr = (struct ifreq *) data;
   3087 	struct ifaddr *ifa = (struct ifaddr *)data;
   3088 	struct sockaddr_dl *sdl;
   3089 	int s, error;
   3090 
   3091 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3092 		device_xname(sc->sc_dev), __func__));
   3093 
   3094 #ifndef WM_MPSAFE
   3095 	s = splnet();
   3096 #endif
   3097 	switch (cmd) {
   3098 	case SIOCSIFMEDIA:
   3099 	case SIOCGIFMEDIA:
   3100 		WM_CORE_LOCK(sc);
   3101 		/* Flow control requires full-duplex mode. */
   3102 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3103 		    (ifr->ifr_media & IFM_FDX) == 0)
   3104 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3105 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3106 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3107 				/* We can do both TXPAUSE and RXPAUSE. */
   3108 				ifr->ifr_media |=
   3109 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3110 			}
   3111 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3112 		}
   3113 		WM_CORE_UNLOCK(sc);
   3114 #ifdef WM_MPSAFE
   3115 		s = splnet();
   3116 #endif
   3117 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3118 #ifdef WM_MPSAFE
   3119 		splx(s);
   3120 #endif
   3121 		break;
   3122 	case SIOCINITIFADDR:
   3123 		WM_CORE_LOCK(sc);
   3124 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3125 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3126 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3127 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3128 			/* unicast address is first multicast entry */
   3129 			wm_set_filter(sc);
   3130 			error = 0;
   3131 			WM_CORE_UNLOCK(sc);
   3132 			break;
   3133 		}
   3134 		WM_CORE_UNLOCK(sc);
   3135 		/*FALLTHROUGH*/
   3136 	default:
   3137 #ifdef WM_MPSAFE
   3138 		s = splnet();
   3139 #endif
   3140 		/* It may call wm_start, so unlock here */
   3141 		error = ether_ioctl(ifp, cmd, data);
   3142 #ifdef WM_MPSAFE
   3143 		splx(s);
   3144 #endif
   3145 		if (error != ENETRESET)
   3146 			break;
   3147 
   3148 		error = 0;
   3149 
   3150 		if (cmd == SIOCSIFCAP) {
   3151 			error = (*ifp->if_init)(ifp);
   3152 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3153 			;
   3154 		else if (ifp->if_flags & IFF_RUNNING) {
   3155 			/*
   3156 			 * Multicast list has changed; set the hardware filter
   3157 			 * accordingly.
   3158 			 */
   3159 			WM_CORE_LOCK(sc);
   3160 			wm_set_filter(sc);
   3161 			WM_CORE_UNLOCK(sc);
   3162 		}
   3163 		break;
   3164 	}
   3165 
   3166 #ifndef WM_MPSAFE
   3167 	splx(s);
   3168 #endif
   3169 	return error;
   3170 }
   3171 
   3172 /* MAC address related */
   3173 
   3174 /*
   3175  * Get the offset of MAC address and return it.
   3176  * If error occured, use offset 0.
   3177  */
   3178 static uint16_t
   3179 wm_check_alt_mac_addr(struct wm_softc *sc)
   3180 {
   3181 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3182 	uint16_t offset = NVM_OFF_MACADDR;
   3183 
   3184 	/* Try to read alternative MAC address pointer */
   3185 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3186 		return 0;
   3187 
   3188 	/* Check pointer if it's valid or not. */
   3189 	if ((offset == 0x0000) || (offset == 0xffff))
   3190 		return 0;
   3191 
   3192 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3193 	/*
   3194 	 * Check whether alternative MAC address is valid or not.
   3195 	 * Some cards have non 0xffff pointer but those don't use
   3196 	 * alternative MAC address in reality.
   3197 	 *
   3198 	 * Check whether the broadcast bit is set or not.
   3199 	 */
   3200 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3201 		if (((myea[0] & 0xff) & 0x01) == 0)
   3202 			return offset; /* Found */
   3203 
   3204 	/* Not found */
   3205 	return 0;
   3206 }
   3207 
   3208 static int
   3209 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3210 {
   3211 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3212 	uint16_t offset = NVM_OFF_MACADDR;
   3213 	int do_invert = 0;
   3214 
   3215 	switch (sc->sc_type) {
   3216 	case WM_T_82580:
   3217 	case WM_T_I350:
   3218 	case WM_T_I354:
   3219 		/* EEPROM Top Level Partitioning */
   3220 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3221 		break;
   3222 	case WM_T_82571:
   3223 	case WM_T_82575:
   3224 	case WM_T_82576:
   3225 	case WM_T_80003:
   3226 	case WM_T_I210:
   3227 	case WM_T_I211:
   3228 		offset = wm_check_alt_mac_addr(sc);
   3229 		if (offset == 0)
   3230 			if ((sc->sc_funcid & 0x01) == 1)
   3231 				do_invert = 1;
   3232 		break;
   3233 	default:
   3234 		if ((sc->sc_funcid & 0x01) == 1)
   3235 			do_invert = 1;
   3236 		break;
   3237 	}
   3238 
   3239 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3240 		goto bad;
   3241 
   3242 	enaddr[0] = myea[0] & 0xff;
   3243 	enaddr[1] = myea[0] >> 8;
   3244 	enaddr[2] = myea[1] & 0xff;
   3245 	enaddr[3] = myea[1] >> 8;
   3246 	enaddr[4] = myea[2] & 0xff;
   3247 	enaddr[5] = myea[2] >> 8;
   3248 
   3249 	/*
   3250 	 * Toggle the LSB of the MAC address on the second port
   3251 	 * of some dual port cards.
   3252 	 */
   3253 	if (do_invert != 0)
   3254 		enaddr[5] ^= 1;
   3255 
   3256 	return 0;
   3257 
   3258  bad:
   3259 	return -1;
   3260 }
   3261 
   3262 /*
   3263  * wm_set_ral:
   3264  *
   3265  *	Set an entery in the receive address list.
   3266  */
   3267 static void
   3268 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3269 {
   3270 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3271 	uint32_t wlock_mac;
   3272 	int rv;
   3273 
   3274 	if (enaddr != NULL) {
   3275 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3276 		    (enaddr[3] << 24);
   3277 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3278 		ral_hi |= RAL_AV;
   3279 	} else {
   3280 		ral_lo = 0;
   3281 		ral_hi = 0;
   3282 	}
   3283 
   3284 	switch (sc->sc_type) {
   3285 	case WM_T_82542_2_0:
   3286 	case WM_T_82542_2_1:
   3287 	case WM_T_82543:
   3288 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3289 		CSR_WRITE_FLUSH(sc);
   3290 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3291 		CSR_WRITE_FLUSH(sc);
   3292 		break;
   3293 	case WM_T_PCH2:
   3294 	case WM_T_PCH_LPT:
   3295 	case WM_T_PCH_SPT:
   3296 		if (idx == 0) {
   3297 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3298 			CSR_WRITE_FLUSH(sc);
   3299 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3300 			CSR_WRITE_FLUSH(sc);
   3301 			return;
   3302 		}
   3303 		if (sc->sc_type != WM_T_PCH2) {
   3304 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3305 			    FWSM_WLOCK_MAC);
   3306 			addrl = WMREG_SHRAL(idx - 1);
   3307 			addrh = WMREG_SHRAH(idx - 1);
   3308 		} else {
   3309 			wlock_mac = 0;
   3310 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3311 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3312 		}
   3313 
   3314 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3315 			rv = wm_get_swflag_ich8lan(sc);
   3316 			if (rv != 0)
   3317 				return;
   3318 			CSR_WRITE(sc, addrl, ral_lo);
   3319 			CSR_WRITE_FLUSH(sc);
   3320 			CSR_WRITE(sc, addrh, ral_hi);
   3321 			CSR_WRITE_FLUSH(sc);
   3322 			wm_put_swflag_ich8lan(sc);
   3323 		}
   3324 
   3325 		break;
   3326 	default:
   3327 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3328 		CSR_WRITE_FLUSH(sc);
   3329 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3330 		CSR_WRITE_FLUSH(sc);
   3331 		break;
   3332 	}
   3333 }
   3334 
   3335 /*
   3336  * wm_mchash:
   3337  *
   3338  *	Compute the hash of the multicast address for the 4096-bit
   3339  *	multicast filter.
   3340  */
   3341 static uint32_t
   3342 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3343 {
   3344 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3345 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3346 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3347 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3348 	uint32_t hash;
   3349 
   3350 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3351 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3352 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3353 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   3354 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3355 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3356 		return (hash & 0x3ff);
   3357 	}
   3358 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3359 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3360 
   3361 	return (hash & 0xfff);
   3362 }
   3363 
   3364 /*
   3365  * wm_set_filter:
   3366  *
   3367  *	Set up the receive filter.
   3368  */
   3369 static void
   3370 wm_set_filter(struct wm_softc *sc)
   3371 {
   3372 	struct ethercom *ec = &sc->sc_ethercom;
   3373 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3374 	struct ether_multi *enm;
   3375 	struct ether_multistep step;
   3376 	bus_addr_t mta_reg;
   3377 	uint32_t hash, reg, bit;
   3378 	int i, size, ralmax;
   3379 
   3380 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3381 		device_xname(sc->sc_dev), __func__));
   3382 
   3383 	if (sc->sc_type >= WM_T_82544)
   3384 		mta_reg = WMREG_CORDOVA_MTA;
   3385 	else
   3386 		mta_reg = WMREG_MTA;
   3387 
   3388 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3389 
   3390 	if (ifp->if_flags & IFF_BROADCAST)
   3391 		sc->sc_rctl |= RCTL_BAM;
   3392 	if (ifp->if_flags & IFF_PROMISC) {
   3393 		sc->sc_rctl |= RCTL_UPE;
   3394 		goto allmulti;
   3395 	}
   3396 
   3397 	/*
   3398 	 * Set the station address in the first RAL slot, and
   3399 	 * clear the remaining slots.
   3400 	 */
   3401 	if (sc->sc_type == WM_T_ICH8)
   3402 		size = WM_RAL_TABSIZE_ICH8 -1;
   3403 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3404 	    || (sc->sc_type == WM_T_PCH))
   3405 		size = WM_RAL_TABSIZE_ICH8;
   3406 	else if (sc->sc_type == WM_T_PCH2)
   3407 		size = WM_RAL_TABSIZE_PCH2;
   3408 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
   3409 		size = WM_RAL_TABSIZE_PCH_LPT;
   3410 	else if (sc->sc_type == WM_T_82575)
   3411 		size = WM_RAL_TABSIZE_82575;
   3412 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3413 		size = WM_RAL_TABSIZE_82576;
   3414 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3415 		size = WM_RAL_TABSIZE_I350;
   3416 	else
   3417 		size = WM_RAL_TABSIZE;
   3418 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3419 
   3420 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   3421 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3422 		switch (i) {
   3423 		case 0:
   3424 			/* We can use all entries */
   3425 			ralmax = size;
   3426 			break;
   3427 		case 1:
   3428 			/* Only RAR[0] */
   3429 			ralmax = 1;
   3430 			break;
   3431 		default:
   3432 			/* available SHRA + RAR[0] */
   3433 			ralmax = i + 1;
   3434 		}
   3435 	} else
   3436 		ralmax = size;
   3437 	for (i = 1; i < size; i++) {
   3438 		if (i < ralmax)
   3439 			wm_set_ral(sc, NULL, i);
   3440 	}
   3441 
   3442 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3443 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3444 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3445 	    || (sc->sc_type == WM_T_PCH_SPT))
   3446 		size = WM_ICH8_MC_TABSIZE;
   3447 	else
   3448 		size = WM_MC_TABSIZE;
   3449 	/* Clear out the multicast table. */
   3450 	for (i = 0; i < size; i++) {
   3451 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3452 		CSR_WRITE_FLUSH(sc);
   3453 	}
   3454 
   3455 	ETHER_LOCK(ec);
   3456 	ETHER_FIRST_MULTI(step, ec, enm);
   3457 	while (enm != NULL) {
   3458 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3459 			ETHER_UNLOCK(ec);
   3460 			/*
   3461 			 * We must listen to a range of multicast addresses.
   3462 			 * For now, just accept all multicasts, rather than
   3463 			 * trying to set only those filter bits needed to match
   3464 			 * the range.  (At this time, the only use of address
   3465 			 * ranges is for IP multicast routing, for which the
   3466 			 * range is big enough to require all bits set.)
   3467 			 */
   3468 			goto allmulti;
   3469 		}
   3470 
   3471 		hash = wm_mchash(sc, enm->enm_addrlo);
   3472 
   3473 		reg = (hash >> 5);
   3474 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3475 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3476 		    || (sc->sc_type == WM_T_PCH2)
   3477 		    || (sc->sc_type == WM_T_PCH_LPT)
   3478 		    || (sc->sc_type == WM_T_PCH_SPT))
   3479 			reg &= 0x1f;
   3480 		else
   3481 			reg &= 0x7f;
   3482 		bit = hash & 0x1f;
   3483 
   3484 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3485 		hash |= 1U << bit;
   3486 
   3487 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3488 			/*
   3489 			 * 82544 Errata 9: Certain register cannot be written
   3490 			 * with particular alignments in PCI-X bus operation
   3491 			 * (FCAH, MTA and VFTA).
   3492 			 */
   3493 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3494 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3495 			CSR_WRITE_FLUSH(sc);
   3496 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3497 			CSR_WRITE_FLUSH(sc);
   3498 		} else {
   3499 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3500 			CSR_WRITE_FLUSH(sc);
   3501 		}
   3502 
   3503 		ETHER_NEXT_MULTI(step, enm);
   3504 	}
   3505 	ETHER_UNLOCK(ec);
   3506 
   3507 	ifp->if_flags &= ~IFF_ALLMULTI;
   3508 	goto setit;
   3509 
   3510  allmulti:
   3511 	ifp->if_flags |= IFF_ALLMULTI;
   3512 	sc->sc_rctl |= RCTL_MPE;
   3513 
   3514  setit:
   3515 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3516 }
   3517 
   3518 /* Reset and init related */
   3519 
   3520 static void
   3521 wm_set_vlan(struct wm_softc *sc)
   3522 {
   3523 
   3524 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3525 		device_xname(sc->sc_dev), __func__));
   3526 
   3527 	/* Deal with VLAN enables. */
   3528 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3529 		sc->sc_ctrl |= CTRL_VME;
   3530 	else
   3531 		sc->sc_ctrl &= ~CTRL_VME;
   3532 
   3533 	/* Write the control registers. */
   3534 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3535 }
   3536 
   3537 static void
   3538 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3539 {
   3540 	uint32_t gcr;
   3541 	pcireg_t ctrl2;
   3542 
   3543 	gcr = CSR_READ(sc, WMREG_GCR);
   3544 
   3545 	/* Only take action if timeout value is defaulted to 0 */
   3546 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3547 		goto out;
   3548 
   3549 	if ((gcr & GCR_CAP_VER2) == 0) {
   3550 		gcr |= GCR_CMPL_TMOUT_10MS;
   3551 		goto out;
   3552 	}
   3553 
   3554 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3555 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3556 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3557 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3558 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3559 
   3560 out:
   3561 	/* Disable completion timeout resend */
   3562 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3563 
   3564 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3565 }
   3566 
   3567 void
   3568 wm_get_auto_rd_done(struct wm_softc *sc)
   3569 {
   3570 	int i;
   3571 
   3572 	/* wait for eeprom to reload */
   3573 	switch (sc->sc_type) {
   3574 	case WM_T_82571:
   3575 	case WM_T_82572:
   3576 	case WM_T_82573:
   3577 	case WM_T_82574:
   3578 	case WM_T_82583:
   3579 	case WM_T_82575:
   3580 	case WM_T_82576:
   3581 	case WM_T_82580:
   3582 	case WM_T_I350:
   3583 	case WM_T_I354:
   3584 	case WM_T_I210:
   3585 	case WM_T_I211:
   3586 	case WM_T_80003:
   3587 	case WM_T_ICH8:
   3588 	case WM_T_ICH9:
   3589 		for (i = 0; i < 10; i++) {
   3590 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3591 				break;
   3592 			delay(1000);
   3593 		}
   3594 		if (i == 10) {
   3595 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3596 			    "complete\n", device_xname(sc->sc_dev));
   3597 		}
   3598 		break;
   3599 	default:
   3600 		break;
   3601 	}
   3602 }
   3603 
   3604 void
   3605 wm_lan_init_done(struct wm_softc *sc)
   3606 {
   3607 	uint32_t reg = 0;
   3608 	int i;
   3609 
   3610 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3611 		device_xname(sc->sc_dev), __func__));
   3612 
   3613 	/* Wait for eeprom to reload */
   3614 	switch (sc->sc_type) {
   3615 	case WM_T_ICH10:
   3616 	case WM_T_PCH:
   3617 	case WM_T_PCH2:
   3618 	case WM_T_PCH_LPT:
   3619 	case WM_T_PCH_SPT:
   3620 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3621 			reg = CSR_READ(sc, WMREG_STATUS);
   3622 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3623 				break;
   3624 			delay(100);
   3625 		}
   3626 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3627 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3628 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3629 		}
   3630 		break;
   3631 	default:
   3632 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3633 		    __func__);
   3634 		break;
   3635 	}
   3636 
   3637 	reg &= ~STATUS_LAN_INIT_DONE;
   3638 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3639 }
   3640 
   3641 void
   3642 wm_get_cfg_done(struct wm_softc *sc)
   3643 {
   3644 	int mask;
   3645 	uint32_t reg;
   3646 	int i;
   3647 
   3648 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3649 		device_xname(sc->sc_dev), __func__));
   3650 
   3651 	/* Wait for eeprom to reload */
   3652 	switch (sc->sc_type) {
   3653 	case WM_T_82542_2_0:
   3654 	case WM_T_82542_2_1:
   3655 		/* null */
   3656 		break;
   3657 	case WM_T_82543:
   3658 	case WM_T_82544:
   3659 	case WM_T_82540:
   3660 	case WM_T_82545:
   3661 	case WM_T_82545_3:
   3662 	case WM_T_82546:
   3663 	case WM_T_82546_3:
   3664 	case WM_T_82541:
   3665 	case WM_T_82541_2:
   3666 	case WM_T_82547:
   3667 	case WM_T_82547_2:
   3668 	case WM_T_82573:
   3669 	case WM_T_82574:
   3670 	case WM_T_82583:
   3671 		/* generic */
   3672 		delay(10*1000);
   3673 		break;
   3674 	case WM_T_80003:
   3675 	case WM_T_82571:
   3676 	case WM_T_82572:
   3677 	case WM_T_82575:
   3678 	case WM_T_82576:
   3679 	case WM_T_82580:
   3680 	case WM_T_I350:
   3681 	case WM_T_I354:
   3682 	case WM_T_I210:
   3683 	case WM_T_I211:
   3684 		if (sc->sc_type == WM_T_82571) {
   3685 			/* Only 82571 shares port 0 */
   3686 			mask = EEMNGCTL_CFGDONE_0;
   3687 		} else
   3688 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3689 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3690 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3691 				break;
   3692 			delay(1000);
   3693 		}
   3694 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3695 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3696 				device_xname(sc->sc_dev), __func__));
   3697 		}
   3698 		break;
   3699 	case WM_T_ICH8:
   3700 	case WM_T_ICH9:
   3701 	case WM_T_ICH10:
   3702 	case WM_T_PCH:
   3703 	case WM_T_PCH2:
   3704 	case WM_T_PCH_LPT:
   3705 	case WM_T_PCH_SPT:
   3706 		delay(10*1000);
   3707 		if (sc->sc_type >= WM_T_ICH10)
   3708 			wm_lan_init_done(sc);
   3709 		else
   3710 			wm_get_auto_rd_done(sc);
   3711 
   3712 		reg = CSR_READ(sc, WMREG_STATUS);
   3713 		if ((reg & STATUS_PHYRA) != 0)
   3714 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3715 		break;
   3716 	default:
   3717 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3718 		    __func__);
   3719 		break;
   3720 	}
   3721 }
   3722 
   3723 void
   3724 wm_phy_post_reset(struct wm_softc *sc)
   3725 {
   3726 	uint32_t reg;
   3727 
   3728 	/* This function is only for ICH8 and newer. */
   3729 	if (sc->sc_type < WM_T_ICH8)
   3730 		return;
   3731 
   3732 	if (wm_phy_resetisblocked(sc)) {
   3733 		/* XXX */
   3734 		device_printf(sc->sc_dev, "PHY is blocked\n");
   3735 		return;
   3736 	}
   3737 
   3738 	/* Allow time for h/w to get to quiescent state after reset */
   3739 	delay(10*1000);
   3740 
   3741 	/* Perform any necessary post-reset workarounds */
   3742 	if (sc->sc_type == WM_T_PCH)
   3743 		wm_hv_phy_workaround_ich8lan(sc);
   3744 	if (sc->sc_type == WM_T_PCH2)
   3745 		wm_lv_phy_workaround_ich8lan(sc);
   3746 
   3747 	/* Clear the host wakeup bit after lcd reset */
   3748 	if (sc->sc_type >= WM_T_PCH) {
   3749 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   3750 		    BM_PORT_GEN_CFG);
   3751 		reg &= ~BM_WUC_HOST_WU_BIT;
   3752 		wm_gmii_hv_writereg(sc->sc_dev, 2,
   3753 		    BM_PORT_GEN_CFG, reg);
   3754 	}
   3755 
   3756 	/* Configure the LCD with the extended configuration region in NVM */
   3757 	wm_init_lcd_from_nvm(sc);
   3758 
   3759 	/* Configure the LCD with the OEM bits in NVM */
   3760 }
   3761 
   3762 /* Only for PCH and newer */
   3763 static void
   3764 wm_write_smbus_addr(struct wm_softc *sc)
   3765 {
   3766 	uint32_t strap, freq;
   3767 	uint32_t phy_data;
   3768 
   3769 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3770 		device_xname(sc->sc_dev), __func__));
   3771 
   3772 	strap = CSR_READ(sc, WMREG_STRAP);
   3773 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   3774 
   3775 	phy_data = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR);
   3776 
   3777 	phy_data &= ~HV_SMB_ADDR_ADDR;
   3778 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   3779 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   3780 
   3781 	if (sc->sc_phytype == WMPHY_I217) {
   3782 		/* Restore SMBus frequency */
   3783 		if (freq --) {
   3784 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   3785 			    | HV_SMB_ADDR_FREQ_HIGH);
   3786 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   3787 			    HV_SMB_ADDR_FREQ_LOW);
   3788 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   3789 			    HV_SMB_ADDR_FREQ_HIGH);
   3790 		} else {
   3791 			DPRINTF(WM_DEBUG_INIT,
   3792 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   3793 				device_xname(sc->sc_dev), __func__));
   3794 		}
   3795 	}
   3796 
   3797 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR, phy_data);
   3798 }
   3799 
   3800 void
   3801 wm_init_lcd_from_nvm(struct wm_softc *sc)
   3802 {
   3803 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   3804 	uint16_t phy_page = 0;
   3805 
   3806 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3807 		device_xname(sc->sc_dev), __func__));
   3808 
   3809 	switch (sc->sc_type) {
   3810 	case WM_T_ICH8:
   3811 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   3812 		    || (sc->sc_phytype != WMPHY_IGP_3))
   3813 			return;
   3814 
   3815 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   3816 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   3817 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   3818 			break;
   3819 		}
   3820 		/* FALLTHROUGH */
   3821 	case WM_T_PCH:
   3822 	case WM_T_PCH2:
   3823 	case WM_T_PCH_LPT:
   3824 	case WM_T_PCH_SPT:
   3825 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   3826 		break;
   3827 	default:
   3828 		return;
   3829 	}
   3830 
   3831 	sc->phy.acquire(sc);
   3832 
   3833 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   3834 	if ((reg & sw_cfg_mask) == 0)
   3835 		goto release;
   3836 
   3837 	/*
   3838 	 * Make sure HW does not configure LCD from PHY extended configuration
   3839 	 * before SW configuration
   3840 	 */
   3841 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   3842 	if ((sc->sc_type < WM_T_PCH2)
   3843 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   3844 		goto release;
   3845 
   3846 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   3847 		device_xname(sc->sc_dev), __func__));
   3848 	/* word_addr is in DWORD */
   3849 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   3850 
   3851 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   3852 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   3853 
   3854 	if (((sc->sc_type == WM_T_PCH)
   3855 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   3856 	    || (sc->sc_type > WM_T_PCH)) {
   3857 		/*
   3858 		 * HW configures the SMBus address and LEDs when the OEM and
   3859 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   3860 		 * are cleared, SW will configure them instead.
   3861 		 */
   3862 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   3863 			device_xname(sc->sc_dev), __func__));
   3864 		wm_write_smbus_addr(sc);
   3865 
   3866 		reg = CSR_READ(sc, WMREG_LEDCTL);
   3867 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG, reg);
   3868 	}
   3869 
   3870 	/* Configure LCD from extended configuration region. */
   3871 	for (i = 0; i < cnf_size; i++) {
   3872 		uint16_t reg_data, reg_addr;
   3873 
   3874 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   3875 			goto release;
   3876 
   3877 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   3878 			goto release;
   3879 
   3880 		if (reg_addr == MII_IGPHY_PAGE_SELECT)
   3881 			phy_page = reg_data;
   3882 
   3883 		reg_addr &= IGPHY_MAXREGADDR;
   3884 		reg_addr |= phy_page;
   3885 
   3886 		sc->phy.release(sc); /* XXX */
   3887 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, reg_addr, reg_data);
   3888 		sc->phy.acquire(sc); /* XXX */
   3889 	}
   3890 
   3891 release:
   3892 	sc->phy.release(sc);
   3893 	return;
   3894 }
   3895 
   3896 
   3897 /* Init hardware bits */
   3898 void
   3899 wm_initialize_hardware_bits(struct wm_softc *sc)
   3900 {
   3901 	uint32_t tarc0, tarc1, reg;
   3902 
   3903 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3904 		device_xname(sc->sc_dev), __func__));
   3905 
   3906 	/* For 82571 variant, 80003 and ICHs */
   3907 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3908 	    || (sc->sc_type >= WM_T_80003)) {
   3909 
   3910 		/* Transmit Descriptor Control 0 */
   3911 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3912 		reg |= TXDCTL_COUNT_DESC;
   3913 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3914 
   3915 		/* Transmit Descriptor Control 1 */
   3916 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3917 		reg |= TXDCTL_COUNT_DESC;
   3918 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3919 
   3920 		/* TARC0 */
   3921 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3922 		switch (sc->sc_type) {
   3923 		case WM_T_82571:
   3924 		case WM_T_82572:
   3925 		case WM_T_82573:
   3926 		case WM_T_82574:
   3927 		case WM_T_82583:
   3928 		case WM_T_80003:
   3929 			/* Clear bits 30..27 */
   3930 			tarc0 &= ~__BITS(30, 27);
   3931 			break;
   3932 		default:
   3933 			break;
   3934 		}
   3935 
   3936 		switch (sc->sc_type) {
   3937 		case WM_T_82571:
   3938 		case WM_T_82572:
   3939 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3940 
   3941 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3942 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3943 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3944 			/* 8257[12] Errata No.7 */
   3945 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3946 
   3947 			/* TARC1 bit 28 */
   3948 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3949 				tarc1 &= ~__BIT(28);
   3950 			else
   3951 				tarc1 |= __BIT(28);
   3952 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3953 
   3954 			/*
   3955 			 * 8257[12] Errata No.13
   3956 			 * Disable Dyamic Clock Gating.
   3957 			 */
   3958 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3959 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3960 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3961 			break;
   3962 		case WM_T_82573:
   3963 		case WM_T_82574:
   3964 		case WM_T_82583:
   3965 			if ((sc->sc_type == WM_T_82574)
   3966 			    || (sc->sc_type == WM_T_82583))
   3967 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3968 
   3969 			/* Extended Device Control */
   3970 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3971 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3972 			reg |= __BIT(22);	/* Set bit 22 */
   3973 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3974 
   3975 			/* Device Control */
   3976 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   3977 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3978 
   3979 			/* PCIe Control Register */
   3980 			/*
   3981 			 * 82573 Errata (unknown).
   3982 			 *
   3983 			 * 82574 Errata 25 and 82583 Errata 12
   3984 			 * "Dropped Rx Packets":
   3985 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   3986 			 */
   3987 			reg = CSR_READ(sc, WMREG_GCR);
   3988 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   3989 			CSR_WRITE(sc, WMREG_GCR, reg);
   3990 
   3991 			if ((sc->sc_type == WM_T_82574)
   3992 			    || (sc->sc_type == WM_T_82583)) {
   3993 				/*
   3994 				 * Document says this bit must be set for
   3995 				 * proper operation.
   3996 				 */
   3997 				reg = CSR_READ(sc, WMREG_GCR);
   3998 				reg |= __BIT(22);
   3999 				CSR_WRITE(sc, WMREG_GCR, reg);
   4000 
   4001 				/*
   4002 				 * Apply workaround for hardware errata
   4003 				 * documented in errata docs Fixes issue where
   4004 				 * some error prone or unreliable PCIe
   4005 				 * completions are occurring, particularly
   4006 				 * with ASPM enabled. Without fix, issue can
   4007 				 * cause Tx timeouts.
   4008 				 */
   4009 				reg = CSR_READ(sc, WMREG_GCR2);
   4010 				reg |= __BIT(0);
   4011 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4012 			}
   4013 			break;
   4014 		case WM_T_80003:
   4015 			/* TARC0 */
   4016 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4017 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4018 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4019 
   4020 			/* TARC1 bit 28 */
   4021 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4022 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4023 				tarc1 &= ~__BIT(28);
   4024 			else
   4025 				tarc1 |= __BIT(28);
   4026 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4027 			break;
   4028 		case WM_T_ICH8:
   4029 		case WM_T_ICH9:
   4030 		case WM_T_ICH10:
   4031 		case WM_T_PCH:
   4032 		case WM_T_PCH2:
   4033 		case WM_T_PCH_LPT:
   4034 		case WM_T_PCH_SPT:
   4035 			/* TARC0 */
   4036 			if (sc->sc_type == WM_T_ICH8) {
   4037 				/* Set TARC0 bits 29 and 28 */
   4038 				tarc0 |= __BITS(29, 28);
   4039 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4040 				tarc0 |= __BIT(29);
   4041 				/*
   4042 				 *  Drop bit 28. From Linux.
   4043 				 * See I218/I219 spec update
   4044 				 * "5. Buffer Overrun While the I219 is
   4045 				 * Processing DMA Transactions"
   4046 				 */
   4047 				tarc0 &= ~__BIT(28);
   4048 			}
   4049 			/* Set TARC0 bits 23,24,26,27 */
   4050 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4051 
   4052 			/* CTRL_EXT */
   4053 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4054 			reg |= __BIT(22);	/* Set bit 22 */
   4055 			/*
   4056 			 * Enable PHY low-power state when MAC is at D3
   4057 			 * w/o WoL
   4058 			 */
   4059 			if (sc->sc_type >= WM_T_PCH)
   4060 				reg |= CTRL_EXT_PHYPDEN;
   4061 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4062 
   4063 			/* TARC1 */
   4064 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4065 			/* bit 28 */
   4066 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4067 				tarc1 &= ~__BIT(28);
   4068 			else
   4069 				tarc1 |= __BIT(28);
   4070 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4071 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4072 
   4073 			/* Device Status */
   4074 			if (sc->sc_type == WM_T_ICH8) {
   4075 				reg = CSR_READ(sc, WMREG_STATUS);
   4076 				reg &= ~__BIT(31);
   4077 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4078 
   4079 			}
   4080 
   4081 			/* IOSFPC */
   4082 			if (sc->sc_type == WM_T_PCH_SPT) {
   4083 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4084 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4085 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4086 			}
   4087 			/*
   4088 			 * Work-around descriptor data corruption issue during
   4089 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4090 			 * capability.
   4091 			 */
   4092 			reg = CSR_READ(sc, WMREG_RFCTL);
   4093 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4094 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4095 			break;
   4096 		default:
   4097 			break;
   4098 		}
   4099 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4100 
   4101 		switch (sc->sc_type) {
   4102 		/*
   4103 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4104 		 * Avoid RSS Hash Value bug.
   4105 		 */
   4106 		case WM_T_82571:
   4107 		case WM_T_82572:
   4108 		case WM_T_82573:
   4109 		case WM_T_80003:
   4110 		case WM_T_ICH8:
   4111 			reg = CSR_READ(sc, WMREG_RFCTL);
   4112 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4113 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4114 			break;
   4115 		case WM_T_82574:
   4116 			/* use extened Rx descriptor. */
   4117 			reg = CSR_READ(sc, WMREG_RFCTL);
   4118 			reg |= WMREG_RFCTL_EXSTEN;
   4119 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4120 			break;
   4121 		default:
   4122 			break;
   4123 		}
   4124 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4125 		/*
   4126 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4127 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4128 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4129 		 * Correctly by the Device"
   4130 		 *
   4131 		 * I354(C2000) Errata AVR53:
   4132 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4133 		 * Hang"
   4134 		 */
   4135 		reg = CSR_READ(sc, WMREG_RFCTL);
   4136 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4137 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4138 	}
   4139 }
   4140 
   4141 static uint32_t
   4142 wm_rxpbs_adjust_82580(uint32_t val)
   4143 {
   4144 	uint32_t rv = 0;
   4145 
   4146 	if (val < __arraycount(wm_82580_rxpbs_table))
   4147 		rv = wm_82580_rxpbs_table[val];
   4148 
   4149 	return rv;
   4150 }
   4151 
   4152 /*
   4153  * wm_reset_phy:
   4154  *
   4155  *	generic PHY reset function.
   4156  *	Same as e1000_phy_hw_reset_generic()
   4157  */
   4158 static void
   4159 wm_reset_phy(struct wm_softc *sc)
   4160 {
   4161 	uint32_t reg;
   4162 
   4163 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4164 		device_xname(sc->sc_dev), __func__));
   4165 	if (wm_phy_resetisblocked(sc))
   4166 		return;
   4167 
   4168 	sc->phy.acquire(sc);
   4169 
   4170 	reg = CSR_READ(sc, WMREG_CTRL);
   4171 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4172 	CSR_WRITE_FLUSH(sc);
   4173 
   4174 	delay(sc->phy.reset_delay_us);
   4175 
   4176 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4177 	CSR_WRITE_FLUSH(sc);
   4178 
   4179 	delay(150);
   4180 
   4181 	sc->phy.release(sc);
   4182 
   4183 	wm_get_cfg_done(sc);
   4184 	wm_phy_post_reset(sc);
   4185 }
   4186 
   4187 static void
   4188 wm_flush_desc_rings(struct wm_softc *sc)
   4189 {
   4190 	pcireg_t preg;
   4191 	uint32_t reg;
   4192 	struct wm_txqueue *txq;
   4193 	wiseman_txdesc_t *txd;
   4194 	int nexttx;
   4195 	uint32_t rctl;
   4196 
   4197 	/* First, disable MULR fix in FEXTNVM11 */
   4198 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4199 	reg |= FEXTNVM11_DIS_MULRFIX;
   4200 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4201 
   4202 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4203 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4204 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4205 		return;
   4206 
   4207 	/* TX */
   4208 	printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   4209 	    device_xname(sc->sc_dev), preg, reg);
   4210 	reg = CSR_READ(sc, WMREG_TCTL);
   4211 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4212 
   4213 	txq = &sc->sc_queue[0].wmq_txq;
   4214 	nexttx = txq->txq_next;
   4215 	txd = &txq->txq_descs[nexttx];
   4216 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4217 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS| 512);
   4218 	txd->wtx_fields.wtxu_status = 0;
   4219 	txd->wtx_fields.wtxu_options = 0;
   4220 	txd->wtx_fields.wtxu_vlan = 0;
   4221 
   4222 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4223 	    BUS_SPACE_BARRIER_WRITE);
   4224 
   4225 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4226 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4227 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4228 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4229 	delay(250);
   4230 
   4231 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4232 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4233 		return;
   4234 
   4235 	/* RX */
   4236 	printf("%s: Need RX flush (reg = %08x)\n",
   4237 	    device_xname(sc->sc_dev), preg);
   4238 	rctl = CSR_READ(sc, WMREG_RCTL);
   4239 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4240 	CSR_WRITE_FLUSH(sc);
   4241 	delay(150);
   4242 
   4243 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4244 	/* zero the lower 14 bits (prefetch and host thresholds) */
   4245 	reg &= 0xffffc000;
   4246 	/*
   4247 	 * update thresholds: prefetch threshold to 31, host threshold
   4248 	 * to 1 and make sure the granularity is "descriptors" and not
   4249 	 * "cache lines"
   4250 	 */
   4251 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4252 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4253 
   4254 	/*
   4255 	 * momentarily enable the RX ring for the changes to take
   4256 	 * effect
   4257 	 */
   4258 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4259 	CSR_WRITE_FLUSH(sc);
   4260 	delay(150);
   4261 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4262 }
   4263 
   4264 /*
   4265  * wm_reset:
   4266  *
   4267  *	Reset the i82542 chip.
   4268  */
   4269 static void
   4270 wm_reset(struct wm_softc *sc)
   4271 {
   4272 	int phy_reset = 0;
   4273 	int i, error = 0;
   4274 	uint32_t reg;
   4275 	uint16_t kmreg;
   4276 	int rv;
   4277 
   4278 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4279 		device_xname(sc->sc_dev), __func__));
   4280 	KASSERT(sc->sc_type != 0);
   4281 
   4282 	/*
   4283 	 * Allocate on-chip memory according to the MTU size.
   4284 	 * The Packet Buffer Allocation register must be written
   4285 	 * before the chip is reset.
   4286 	 */
   4287 	switch (sc->sc_type) {
   4288 	case WM_T_82547:
   4289 	case WM_T_82547_2:
   4290 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4291 		    PBA_22K : PBA_30K;
   4292 		for (i = 0; i < sc->sc_nqueues; i++) {
   4293 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4294 			txq->txq_fifo_head = 0;
   4295 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4296 			txq->txq_fifo_size =
   4297 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4298 			txq->txq_fifo_stall = 0;
   4299 		}
   4300 		break;
   4301 	case WM_T_82571:
   4302 	case WM_T_82572:
   4303 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4304 	case WM_T_80003:
   4305 		sc->sc_pba = PBA_32K;
   4306 		break;
   4307 	case WM_T_82573:
   4308 		sc->sc_pba = PBA_12K;
   4309 		break;
   4310 	case WM_T_82574:
   4311 	case WM_T_82583:
   4312 		sc->sc_pba = PBA_20K;
   4313 		break;
   4314 	case WM_T_82576:
   4315 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4316 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4317 		break;
   4318 	case WM_T_82580:
   4319 	case WM_T_I350:
   4320 	case WM_T_I354:
   4321 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4322 		break;
   4323 	case WM_T_I210:
   4324 	case WM_T_I211:
   4325 		sc->sc_pba = PBA_34K;
   4326 		break;
   4327 	case WM_T_ICH8:
   4328 		/* Workaround for a bit corruption issue in FIFO memory */
   4329 		sc->sc_pba = PBA_8K;
   4330 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4331 		break;
   4332 	case WM_T_ICH9:
   4333 	case WM_T_ICH10:
   4334 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4335 		    PBA_14K : PBA_10K;
   4336 		break;
   4337 	case WM_T_PCH:
   4338 	case WM_T_PCH2:
   4339 	case WM_T_PCH_LPT:
   4340 	case WM_T_PCH_SPT:
   4341 		sc->sc_pba = PBA_26K;
   4342 		break;
   4343 	default:
   4344 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4345 		    PBA_40K : PBA_48K;
   4346 		break;
   4347 	}
   4348 	/*
   4349 	 * Only old or non-multiqueue devices have the PBA register
   4350 	 * XXX Need special handling for 82575.
   4351 	 */
   4352 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4353 	    || (sc->sc_type == WM_T_82575))
   4354 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4355 
   4356 	/* Prevent the PCI-E bus from sticking */
   4357 	if (sc->sc_flags & WM_F_PCIE) {
   4358 		int timeout = 800;
   4359 
   4360 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4361 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4362 
   4363 		while (timeout--) {
   4364 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4365 			    == 0)
   4366 				break;
   4367 			delay(100);
   4368 		}
   4369 		if (timeout == 0)
   4370 			device_printf(sc->sc_dev,
   4371 			    "failed to disable busmastering\n");
   4372 	}
   4373 
   4374 	/* Set the completion timeout for interface */
   4375 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4376 	    || (sc->sc_type == WM_T_82580)
   4377 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4378 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4379 		wm_set_pcie_completion_timeout(sc);
   4380 
   4381 	/* Clear interrupt */
   4382 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4383 	if (wm_is_using_msix(sc)) {
   4384 		if (sc->sc_type != WM_T_82574) {
   4385 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4386 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4387 		} else {
   4388 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4389 		}
   4390 	}
   4391 
   4392 	/* Stop the transmit and receive processes. */
   4393 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4394 	sc->sc_rctl &= ~RCTL_EN;
   4395 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4396 	CSR_WRITE_FLUSH(sc);
   4397 
   4398 	/* XXX set_tbi_sbp_82543() */
   4399 
   4400 	delay(10*1000);
   4401 
   4402 	/* Must acquire the MDIO ownership before MAC reset */
   4403 	switch (sc->sc_type) {
   4404 	case WM_T_82573:
   4405 	case WM_T_82574:
   4406 	case WM_T_82583:
   4407 		error = wm_get_hw_semaphore_82573(sc);
   4408 		break;
   4409 	default:
   4410 		break;
   4411 	}
   4412 
   4413 	/*
   4414 	 * 82541 Errata 29? & 82547 Errata 28?
   4415 	 * See also the description about PHY_RST bit in CTRL register
   4416 	 * in 8254x_GBe_SDM.pdf.
   4417 	 */
   4418 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4419 		CSR_WRITE(sc, WMREG_CTRL,
   4420 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4421 		CSR_WRITE_FLUSH(sc);
   4422 		delay(5000);
   4423 	}
   4424 
   4425 	switch (sc->sc_type) {
   4426 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4427 	case WM_T_82541:
   4428 	case WM_T_82541_2:
   4429 	case WM_T_82547:
   4430 	case WM_T_82547_2:
   4431 		/*
   4432 		 * On some chipsets, a reset through a memory-mapped write
   4433 		 * cycle can cause the chip to reset before completing the
   4434 		 * write cycle.  This causes major headache that can be
   4435 		 * avoided by issuing the reset via indirect register writes
   4436 		 * through I/O space.
   4437 		 *
   4438 		 * So, if we successfully mapped the I/O BAR at attach time,
   4439 		 * use that.  Otherwise, try our luck with a memory-mapped
   4440 		 * reset.
   4441 		 */
   4442 		if (sc->sc_flags & WM_F_IOH_VALID)
   4443 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4444 		else
   4445 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4446 		break;
   4447 	case WM_T_82545_3:
   4448 	case WM_T_82546_3:
   4449 		/* Use the shadow control register on these chips. */
   4450 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4451 		break;
   4452 	case WM_T_80003:
   4453 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4454 		sc->phy.acquire(sc);
   4455 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4456 		sc->phy.release(sc);
   4457 		break;
   4458 	case WM_T_ICH8:
   4459 	case WM_T_ICH9:
   4460 	case WM_T_ICH10:
   4461 	case WM_T_PCH:
   4462 	case WM_T_PCH2:
   4463 	case WM_T_PCH_LPT:
   4464 	case WM_T_PCH_SPT:
   4465 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4466 		if (wm_phy_resetisblocked(sc) == false) {
   4467 			/*
   4468 			 * Gate automatic PHY configuration by hardware on
   4469 			 * non-managed 82579
   4470 			 */
   4471 			if ((sc->sc_type == WM_T_PCH2)
   4472 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4473 				== 0))
   4474 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4475 
   4476 			reg |= CTRL_PHY_RESET;
   4477 			phy_reset = 1;
   4478 		} else
   4479 			printf("XXX reset is blocked!!!\n");
   4480 		sc->phy.acquire(sc);
   4481 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4482 		/* Don't insert a completion barrier when reset */
   4483 		delay(20*1000);
   4484 		mutex_exit(sc->sc_ich_phymtx);
   4485 		break;
   4486 	case WM_T_82580:
   4487 	case WM_T_I350:
   4488 	case WM_T_I354:
   4489 	case WM_T_I210:
   4490 	case WM_T_I211:
   4491 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4492 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4493 			CSR_WRITE_FLUSH(sc);
   4494 		delay(5000);
   4495 		break;
   4496 	case WM_T_82542_2_0:
   4497 	case WM_T_82542_2_1:
   4498 	case WM_T_82543:
   4499 	case WM_T_82540:
   4500 	case WM_T_82545:
   4501 	case WM_T_82546:
   4502 	case WM_T_82571:
   4503 	case WM_T_82572:
   4504 	case WM_T_82573:
   4505 	case WM_T_82574:
   4506 	case WM_T_82575:
   4507 	case WM_T_82576:
   4508 	case WM_T_82583:
   4509 	default:
   4510 		/* Everything else can safely use the documented method. */
   4511 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4512 		break;
   4513 	}
   4514 
   4515 	/* Must release the MDIO ownership after MAC reset */
   4516 	switch (sc->sc_type) {
   4517 	case WM_T_82573:
   4518 	case WM_T_82574:
   4519 	case WM_T_82583:
   4520 		if (error == 0)
   4521 			wm_put_hw_semaphore_82573(sc);
   4522 		break;
   4523 	default:
   4524 		break;
   4525 	}
   4526 
   4527 	if (phy_reset != 0)
   4528 		wm_get_cfg_done(sc);
   4529 
   4530 	/* reload EEPROM */
   4531 	switch (sc->sc_type) {
   4532 	case WM_T_82542_2_0:
   4533 	case WM_T_82542_2_1:
   4534 	case WM_T_82543:
   4535 	case WM_T_82544:
   4536 		delay(10);
   4537 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4538 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4539 		CSR_WRITE_FLUSH(sc);
   4540 		delay(2000);
   4541 		break;
   4542 	case WM_T_82540:
   4543 	case WM_T_82545:
   4544 	case WM_T_82545_3:
   4545 	case WM_T_82546:
   4546 	case WM_T_82546_3:
   4547 		delay(5*1000);
   4548 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4549 		break;
   4550 	case WM_T_82541:
   4551 	case WM_T_82541_2:
   4552 	case WM_T_82547:
   4553 	case WM_T_82547_2:
   4554 		delay(20000);
   4555 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4556 		break;
   4557 	case WM_T_82571:
   4558 	case WM_T_82572:
   4559 	case WM_T_82573:
   4560 	case WM_T_82574:
   4561 	case WM_T_82583:
   4562 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4563 			delay(10);
   4564 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4565 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4566 			CSR_WRITE_FLUSH(sc);
   4567 		}
   4568 		/* check EECD_EE_AUTORD */
   4569 		wm_get_auto_rd_done(sc);
   4570 		/*
   4571 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4572 		 * is set.
   4573 		 */
   4574 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4575 		    || (sc->sc_type == WM_T_82583))
   4576 			delay(25*1000);
   4577 		break;
   4578 	case WM_T_82575:
   4579 	case WM_T_82576:
   4580 	case WM_T_82580:
   4581 	case WM_T_I350:
   4582 	case WM_T_I354:
   4583 	case WM_T_I210:
   4584 	case WM_T_I211:
   4585 	case WM_T_80003:
   4586 		/* check EECD_EE_AUTORD */
   4587 		wm_get_auto_rd_done(sc);
   4588 		break;
   4589 	case WM_T_ICH8:
   4590 	case WM_T_ICH9:
   4591 	case WM_T_ICH10:
   4592 	case WM_T_PCH:
   4593 	case WM_T_PCH2:
   4594 	case WM_T_PCH_LPT:
   4595 	case WM_T_PCH_SPT:
   4596 		break;
   4597 	default:
   4598 		panic("%s: unknown type\n", __func__);
   4599 	}
   4600 
   4601 	/* Check whether EEPROM is present or not */
   4602 	switch (sc->sc_type) {
   4603 	case WM_T_82575:
   4604 	case WM_T_82576:
   4605 	case WM_T_82580:
   4606 	case WM_T_I350:
   4607 	case WM_T_I354:
   4608 	case WM_T_ICH8:
   4609 	case WM_T_ICH9:
   4610 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4611 			/* Not found */
   4612 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4613 			if (sc->sc_type == WM_T_82575)
   4614 				wm_reset_init_script_82575(sc);
   4615 		}
   4616 		break;
   4617 	default:
   4618 		break;
   4619 	}
   4620 
   4621 	if (phy_reset != 0)
   4622 		wm_phy_post_reset(sc);
   4623 
   4624 	if ((sc->sc_type == WM_T_82580)
   4625 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4626 		/* clear global device reset status bit */
   4627 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4628 	}
   4629 
   4630 	/* Clear any pending interrupt events. */
   4631 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4632 	reg = CSR_READ(sc, WMREG_ICR);
   4633 	if (wm_is_using_msix(sc)) {
   4634 		if (sc->sc_type != WM_T_82574) {
   4635 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4636 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4637 		} else
   4638 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4639 	}
   4640 
   4641 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4642 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4643 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4644 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   4645 		reg = CSR_READ(sc, WMREG_KABGTXD);
   4646 		reg |= KABGTXD_BGSQLBIAS;
   4647 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   4648 	}
   4649 
   4650 	/* reload sc_ctrl */
   4651 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4652 
   4653 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4654 		wm_set_eee_i350(sc);
   4655 
   4656 	/*
   4657 	 * For PCH, this write will make sure that any noise will be detected
   4658 	 * as a CRC error and be dropped rather than show up as a bad packet
   4659 	 * to the DMA engine
   4660 	 */
   4661 	if (sc->sc_type == WM_T_PCH)
   4662 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4663 
   4664 	if (sc->sc_type >= WM_T_82544)
   4665 		CSR_WRITE(sc, WMREG_WUC, 0);
   4666 
   4667 	wm_reset_mdicnfg_82580(sc);
   4668 
   4669 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4670 		wm_pll_workaround_i210(sc);
   4671 
   4672 	if (sc->sc_type == WM_T_80003) {
   4673 		/* default to TRUE to enable the MDIC W/A */
   4674 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   4675 
   4676 		rv = wm_kmrn_readreg(sc,
   4677 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   4678 		if (rv == 0) {
   4679 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   4680 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   4681 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   4682 			else
   4683 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   4684 		}
   4685 	}
   4686 }
   4687 
   4688 /*
   4689  * wm_add_rxbuf:
   4690  *
   4691  *	Add a receive buffer to the indiciated descriptor.
   4692  */
   4693 static int
   4694 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4695 {
   4696 	struct wm_softc *sc = rxq->rxq_sc;
   4697 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4698 	struct mbuf *m;
   4699 	int error;
   4700 
   4701 	KASSERT(mutex_owned(rxq->rxq_lock));
   4702 
   4703 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4704 	if (m == NULL)
   4705 		return ENOBUFS;
   4706 
   4707 	MCLGET(m, M_DONTWAIT);
   4708 	if ((m->m_flags & M_EXT) == 0) {
   4709 		m_freem(m);
   4710 		return ENOBUFS;
   4711 	}
   4712 
   4713 	if (rxs->rxs_mbuf != NULL)
   4714 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4715 
   4716 	rxs->rxs_mbuf = m;
   4717 
   4718 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4719 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4720 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4721 	if (error) {
   4722 		/* XXX XXX XXX */
   4723 		aprint_error_dev(sc->sc_dev,
   4724 		    "unable to load rx DMA map %d, error = %d\n",
   4725 		    idx, error);
   4726 		panic("wm_add_rxbuf");
   4727 	}
   4728 
   4729 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4730 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4731 
   4732 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4733 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4734 			wm_init_rxdesc(rxq, idx);
   4735 	} else
   4736 		wm_init_rxdesc(rxq, idx);
   4737 
   4738 	return 0;
   4739 }
   4740 
   4741 /*
   4742  * wm_rxdrain:
   4743  *
   4744  *	Drain the receive queue.
   4745  */
   4746 static void
   4747 wm_rxdrain(struct wm_rxqueue *rxq)
   4748 {
   4749 	struct wm_softc *sc = rxq->rxq_sc;
   4750 	struct wm_rxsoft *rxs;
   4751 	int i;
   4752 
   4753 	KASSERT(mutex_owned(rxq->rxq_lock));
   4754 
   4755 	for (i = 0; i < WM_NRXDESC; i++) {
   4756 		rxs = &rxq->rxq_soft[i];
   4757 		if (rxs->rxs_mbuf != NULL) {
   4758 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4759 			m_freem(rxs->rxs_mbuf);
   4760 			rxs->rxs_mbuf = NULL;
   4761 		}
   4762 	}
   4763 }
   4764 
   4765 
   4766 /*
   4767  * XXX copy from FreeBSD's sys/net/rss_config.c
   4768  */
   4769 /*
   4770  * RSS secret key, intended to prevent attacks on load-balancing.  Its
   4771  * effectiveness may be limited by algorithm choice and available entropy
   4772  * during the boot.
   4773  *
   4774  * XXXRW: And that we don't randomize it yet!
   4775  *
   4776  * This is the default Microsoft RSS specification key which is also
   4777  * the Chelsio T5 firmware default key.
   4778  */
   4779 #define RSS_KEYSIZE 40
   4780 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
   4781 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
   4782 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
   4783 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
   4784 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
   4785 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
   4786 };
   4787 
   4788 /*
   4789  * Caller must pass an array of size sizeof(rss_key).
   4790  *
   4791  * XXX
   4792  * As if_ixgbe may use this function, this function should not be
   4793  * if_wm specific function.
   4794  */
   4795 static void
   4796 wm_rss_getkey(uint8_t *key)
   4797 {
   4798 
   4799 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
   4800 }
   4801 
   4802 /*
   4803  * Setup registers for RSS.
   4804  *
   4805  * XXX not yet VMDq support
   4806  */
   4807 static void
   4808 wm_init_rss(struct wm_softc *sc)
   4809 {
   4810 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4811 	int i;
   4812 
   4813 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
   4814 
   4815 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4816 		int qid, reta_ent;
   4817 
   4818 		qid  = i % sc->sc_nqueues;
   4819 		switch(sc->sc_type) {
   4820 		case WM_T_82574:
   4821 			reta_ent = __SHIFTIN(qid,
   4822 			    RETA_ENT_QINDEX_MASK_82574);
   4823 			break;
   4824 		case WM_T_82575:
   4825 			reta_ent = __SHIFTIN(qid,
   4826 			    RETA_ENT_QINDEX1_MASK_82575);
   4827 			break;
   4828 		default:
   4829 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4830 			break;
   4831 		}
   4832 
   4833 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4834 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4835 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4836 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4837 	}
   4838 
   4839 	wm_rss_getkey((uint8_t *)rss_key);
   4840 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4841 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4842 
   4843 	if (sc->sc_type == WM_T_82574)
   4844 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4845 	else
   4846 		mrqc = MRQC_ENABLE_RSS_MQ;
   4847 
   4848 	/*
   4849 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   4850 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   4851 	 */
   4852 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4853 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4854 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4855 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4856 
   4857 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4858 }
   4859 
   4860 /*
   4861  * Adjust TX and RX queue numbers which the system actulally uses.
   4862  *
   4863  * The numbers are affected by below parameters.
   4864  *     - The nubmer of hardware queues
   4865  *     - The number of MSI-X vectors (= "nvectors" argument)
   4866  *     - ncpu
   4867  */
   4868 static void
   4869 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4870 {
   4871 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4872 
   4873 	if (nvectors < 2) {
   4874 		sc->sc_nqueues = 1;
   4875 		return;
   4876 	}
   4877 
   4878 	switch(sc->sc_type) {
   4879 	case WM_T_82572:
   4880 		hw_ntxqueues = 2;
   4881 		hw_nrxqueues = 2;
   4882 		break;
   4883 	case WM_T_82574:
   4884 		hw_ntxqueues = 2;
   4885 		hw_nrxqueues = 2;
   4886 		break;
   4887 	case WM_T_82575:
   4888 		hw_ntxqueues = 4;
   4889 		hw_nrxqueues = 4;
   4890 		break;
   4891 	case WM_T_82576:
   4892 		hw_ntxqueues = 16;
   4893 		hw_nrxqueues = 16;
   4894 		break;
   4895 	case WM_T_82580:
   4896 	case WM_T_I350:
   4897 	case WM_T_I354:
   4898 		hw_ntxqueues = 8;
   4899 		hw_nrxqueues = 8;
   4900 		break;
   4901 	case WM_T_I210:
   4902 		hw_ntxqueues = 4;
   4903 		hw_nrxqueues = 4;
   4904 		break;
   4905 	case WM_T_I211:
   4906 		hw_ntxqueues = 2;
   4907 		hw_nrxqueues = 2;
   4908 		break;
   4909 		/*
   4910 		 * As below ethernet controllers does not support MSI-X,
   4911 		 * this driver let them not use multiqueue.
   4912 		 *     - WM_T_80003
   4913 		 *     - WM_T_ICH8
   4914 		 *     - WM_T_ICH9
   4915 		 *     - WM_T_ICH10
   4916 		 *     - WM_T_PCH
   4917 		 *     - WM_T_PCH2
   4918 		 *     - WM_T_PCH_LPT
   4919 		 */
   4920 	default:
   4921 		hw_ntxqueues = 1;
   4922 		hw_nrxqueues = 1;
   4923 		break;
   4924 	}
   4925 
   4926 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
   4927 
   4928 	/*
   4929 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   4930 	 * the number of queues used actually.
   4931 	 */
   4932 	if (nvectors < hw_nqueues + 1) {
   4933 		sc->sc_nqueues = nvectors - 1;
   4934 	} else {
   4935 		sc->sc_nqueues = hw_nqueues;
   4936 	}
   4937 
   4938 	/*
   4939 	 * As queues more then cpus cannot improve scaling, we limit
   4940 	 * the number of queues used actually.
   4941 	 */
   4942 	if (ncpu < sc->sc_nqueues)
   4943 		sc->sc_nqueues = ncpu;
   4944 }
   4945 
   4946 static inline bool
   4947 wm_is_using_msix(struct wm_softc *sc)
   4948 {
   4949 
   4950 	return (sc->sc_nintrs > 1);
   4951 }
   4952 
   4953 static inline bool
   4954 wm_is_using_multiqueue(struct wm_softc *sc)
   4955 {
   4956 
   4957 	return (sc->sc_nqueues > 1);
   4958 }
   4959 
   4960 static int
   4961 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   4962 {
   4963 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   4964 	wmq->wmq_id = qidx;
   4965 	wmq->wmq_intr_idx = intr_idx;
   4966 	wmq->wmq_si = softint_establish(SOFTINT_NET
   4967 #ifdef WM_MPSAFE
   4968 	    | SOFTINT_MPSAFE
   4969 #endif
   4970 	    , wm_handle_queue, wmq);
   4971 	if (wmq->wmq_si != NULL)
   4972 		return 0;
   4973 
   4974 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   4975 	    wmq->wmq_id);
   4976 
   4977 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   4978 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4979 	return ENOMEM;
   4980 }
   4981 
   4982 /*
   4983  * Both single interrupt MSI and INTx can use this function.
   4984  */
   4985 static int
   4986 wm_setup_legacy(struct wm_softc *sc)
   4987 {
   4988 	pci_chipset_tag_t pc = sc->sc_pc;
   4989 	const char *intrstr = NULL;
   4990 	char intrbuf[PCI_INTRSTR_LEN];
   4991 	int error;
   4992 
   4993 	error = wm_alloc_txrx_queues(sc);
   4994 	if (error) {
   4995 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4996 		    error);
   4997 		return ENOMEM;
   4998 	}
   4999 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   5000 	    sizeof(intrbuf));
   5001 #ifdef WM_MPSAFE
   5002 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   5003 #endif
   5004 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   5005 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5006 	if (sc->sc_ihs[0] == NULL) {
   5007 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5008 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5009 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5010 		return ENOMEM;
   5011 	}
   5012 
   5013 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5014 	sc->sc_nintrs = 1;
   5015 
   5016 	return wm_softint_establish(sc, 0, 0);
   5017 }
   5018 
   5019 static int
   5020 wm_setup_msix(struct wm_softc *sc)
   5021 {
   5022 	void *vih;
   5023 	kcpuset_t *affinity;
   5024 	int qidx, error, intr_idx, txrx_established;
   5025 	pci_chipset_tag_t pc = sc->sc_pc;
   5026 	const char *intrstr = NULL;
   5027 	char intrbuf[PCI_INTRSTR_LEN];
   5028 	char intr_xname[INTRDEVNAMEBUF];
   5029 
   5030 	if (sc->sc_nqueues < ncpu) {
   5031 		/*
   5032 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5033 		 * interrupts start from CPU#1.
   5034 		 */
   5035 		sc->sc_affinity_offset = 1;
   5036 	} else {
   5037 		/*
   5038 		 * In this case, this device use all CPUs. So, we unify
   5039 		 * affinitied cpu_index to msix vector number for readability.
   5040 		 */
   5041 		sc->sc_affinity_offset = 0;
   5042 	}
   5043 
   5044 	error = wm_alloc_txrx_queues(sc);
   5045 	if (error) {
   5046 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5047 		    error);
   5048 		return ENOMEM;
   5049 	}
   5050 
   5051 	kcpuset_create(&affinity, false);
   5052 	intr_idx = 0;
   5053 
   5054 	/*
   5055 	 * TX and RX
   5056 	 */
   5057 	txrx_established = 0;
   5058 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5059 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5060 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5061 
   5062 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5063 		    sizeof(intrbuf));
   5064 #ifdef WM_MPSAFE
   5065 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5066 		    PCI_INTR_MPSAFE, true);
   5067 #endif
   5068 		memset(intr_xname, 0, sizeof(intr_xname));
   5069 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5070 		    device_xname(sc->sc_dev), qidx);
   5071 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5072 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5073 		if (vih == NULL) {
   5074 			aprint_error_dev(sc->sc_dev,
   5075 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5076 			    intrstr ? " at " : "",
   5077 			    intrstr ? intrstr : "");
   5078 
   5079 			goto fail;
   5080 		}
   5081 		kcpuset_zero(affinity);
   5082 		/* Round-robin affinity */
   5083 		kcpuset_set(affinity, affinity_to);
   5084 		error = interrupt_distribute(vih, affinity, NULL);
   5085 		if (error == 0) {
   5086 			aprint_normal_dev(sc->sc_dev,
   5087 			    "for TX and RX interrupting at %s affinity to %u\n",
   5088 			    intrstr, affinity_to);
   5089 		} else {
   5090 			aprint_normal_dev(sc->sc_dev,
   5091 			    "for TX and RX interrupting at %s\n", intrstr);
   5092 		}
   5093 		sc->sc_ihs[intr_idx] = vih;
   5094 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   5095 			goto fail;
   5096 		txrx_established++;
   5097 		intr_idx++;
   5098 	}
   5099 
   5100 	/*
   5101 	 * LINK
   5102 	 */
   5103 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5104 	    sizeof(intrbuf));
   5105 #ifdef WM_MPSAFE
   5106 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5107 #endif
   5108 	memset(intr_xname, 0, sizeof(intr_xname));
   5109 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5110 	    device_xname(sc->sc_dev));
   5111 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5112 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5113 	if (vih == NULL) {
   5114 		aprint_error_dev(sc->sc_dev,
   5115 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5116 		    intrstr ? " at " : "",
   5117 		    intrstr ? intrstr : "");
   5118 
   5119 		goto fail;
   5120 	}
   5121 	/* keep default affinity to LINK interrupt */
   5122 	aprint_normal_dev(sc->sc_dev,
   5123 	    "for LINK interrupting at %s\n", intrstr);
   5124 	sc->sc_ihs[intr_idx] = vih;
   5125 	sc->sc_link_intr_idx = intr_idx;
   5126 
   5127 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5128 	kcpuset_destroy(affinity);
   5129 	return 0;
   5130 
   5131  fail:
   5132 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5133 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5134 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5135 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5136 	}
   5137 
   5138 	kcpuset_destroy(affinity);
   5139 	return ENOMEM;
   5140 }
   5141 
   5142 static void
   5143 wm_unset_stopping_flags(struct wm_softc *sc)
   5144 {
   5145 	int i;
   5146 
   5147 	KASSERT(WM_CORE_LOCKED(sc));
   5148 
   5149 	/*
   5150 	 * must unset stopping flags in ascending order.
   5151 	 */
   5152 	for(i = 0; i < sc->sc_nqueues; i++) {
   5153 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5154 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5155 
   5156 		mutex_enter(txq->txq_lock);
   5157 		txq->txq_stopping = false;
   5158 		mutex_exit(txq->txq_lock);
   5159 
   5160 		mutex_enter(rxq->rxq_lock);
   5161 		rxq->rxq_stopping = false;
   5162 		mutex_exit(rxq->rxq_lock);
   5163 	}
   5164 
   5165 	sc->sc_core_stopping = false;
   5166 }
   5167 
   5168 static void
   5169 wm_set_stopping_flags(struct wm_softc *sc)
   5170 {
   5171 	int i;
   5172 
   5173 	KASSERT(WM_CORE_LOCKED(sc));
   5174 
   5175 	sc->sc_core_stopping = true;
   5176 
   5177 	/*
   5178 	 * must set stopping flags in ascending order.
   5179 	 */
   5180 	for(i = 0; i < sc->sc_nqueues; i++) {
   5181 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5182 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5183 
   5184 		mutex_enter(rxq->rxq_lock);
   5185 		rxq->rxq_stopping = true;
   5186 		mutex_exit(rxq->rxq_lock);
   5187 
   5188 		mutex_enter(txq->txq_lock);
   5189 		txq->txq_stopping = true;
   5190 		mutex_exit(txq->txq_lock);
   5191 	}
   5192 }
   5193 
   5194 /*
   5195  * write interrupt interval value to ITR or EITR
   5196  */
   5197 static void
   5198 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5199 {
   5200 
   5201 	if (!wmq->wmq_set_itr)
   5202 		return;
   5203 
   5204 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5205 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5206 
   5207 		/*
   5208 		 * 82575 doesn't have CNT_INGR field.
   5209 		 * So, overwrite counter field by software.
   5210 		 */
   5211 		if (sc->sc_type == WM_T_82575)
   5212 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5213 		else
   5214 			eitr |= EITR_CNT_INGR;
   5215 
   5216 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5217 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5218 		/*
   5219 		 * 82574 has both ITR and EITR. SET EITR when we use
   5220 		 * the multi queue function with MSI-X.
   5221 		 */
   5222 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5223 			    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5224 	} else {
   5225 		KASSERT(wmq->wmq_id == 0);
   5226 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5227 	}
   5228 
   5229 	wmq->wmq_set_itr = false;
   5230 }
   5231 
   5232 /*
   5233  * TODO
   5234  * Below dynamic calculation of itr is almost the same as linux igb,
   5235  * however it does not fit to wm(4). So, we will have been disable AIM
   5236  * until we will find appropriate calculation of itr.
   5237  */
   5238 /*
   5239  * calculate interrupt interval value to be going to write register in
   5240  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5241  */
   5242 static void
   5243 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5244 {
   5245 #ifdef NOTYET
   5246 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5247 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5248 	uint32_t avg_size = 0;
   5249 	uint32_t new_itr;
   5250 
   5251 	if (rxq->rxq_packets)
   5252 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5253 	if (txq->txq_packets)
   5254 		avg_size = max(avg_size, txq->txq_bytes / txq->txq_packets);
   5255 
   5256 	if (avg_size == 0) {
   5257 		new_itr = 450; /* restore default value */
   5258 		goto out;
   5259 	}
   5260 
   5261 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5262 	avg_size += 24;
   5263 
   5264 	/* Don't starve jumbo frames */
   5265 	avg_size = min(avg_size, 3000);
   5266 
   5267 	/* Give a little boost to mid-size frames */
   5268 	if ((avg_size > 300) && (avg_size < 1200))
   5269 		new_itr = avg_size / 3;
   5270 	else
   5271 		new_itr = avg_size / 2;
   5272 
   5273 out:
   5274 	/*
   5275 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5276 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5277 	 */
   5278 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5279 		new_itr *= 4;
   5280 
   5281 	if (new_itr != wmq->wmq_itr) {
   5282 		wmq->wmq_itr = new_itr;
   5283 		wmq->wmq_set_itr = true;
   5284 	} else
   5285 		wmq->wmq_set_itr = false;
   5286 
   5287 	rxq->rxq_packets = 0;
   5288 	rxq->rxq_bytes = 0;
   5289 	txq->txq_packets = 0;
   5290 	txq->txq_bytes = 0;
   5291 #endif
   5292 }
   5293 
   5294 /*
   5295  * wm_init:		[ifnet interface function]
   5296  *
   5297  *	Initialize the interface.
   5298  */
   5299 static int
   5300 wm_init(struct ifnet *ifp)
   5301 {
   5302 	struct wm_softc *sc = ifp->if_softc;
   5303 	int ret;
   5304 
   5305 	WM_CORE_LOCK(sc);
   5306 	ret = wm_init_locked(ifp);
   5307 	WM_CORE_UNLOCK(sc);
   5308 
   5309 	return ret;
   5310 }
   5311 
   5312 static int
   5313 wm_init_locked(struct ifnet *ifp)
   5314 {
   5315 	struct wm_softc *sc = ifp->if_softc;
   5316 	int i, j, trynum, error = 0;
   5317 	uint32_t reg;
   5318 
   5319 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5320 		device_xname(sc->sc_dev), __func__));
   5321 	KASSERT(WM_CORE_LOCKED(sc));
   5322 
   5323 	/*
   5324 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5325 	 * There is a small but measurable benefit to avoiding the adjusment
   5326 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5327 	 * on such platforms.  One possibility is that the DMA itself is
   5328 	 * slightly more efficient if the front of the entire packet (instead
   5329 	 * of the front of the headers) is aligned.
   5330 	 *
   5331 	 * Note we must always set align_tweak to 0 if we are using
   5332 	 * jumbo frames.
   5333 	 */
   5334 #ifdef __NO_STRICT_ALIGNMENT
   5335 	sc->sc_align_tweak = 0;
   5336 #else
   5337 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5338 		sc->sc_align_tweak = 0;
   5339 	else
   5340 		sc->sc_align_tweak = 2;
   5341 #endif /* __NO_STRICT_ALIGNMENT */
   5342 
   5343 	/* Cancel any pending I/O. */
   5344 	wm_stop_locked(ifp, 0);
   5345 
   5346 	/* update statistics before reset */
   5347 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   5348 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   5349 
   5350 	/* PCH_SPT hardware workaround */
   5351 	if (sc->sc_type == WM_T_PCH_SPT)
   5352 		wm_flush_desc_rings(sc);
   5353 
   5354 	/* Reset the chip to a known state. */
   5355 	wm_reset(sc);
   5356 
   5357 	/*
   5358 	 * AMT based hardware can now take control from firmware
   5359 	 * Do this after reset.
   5360 	 */
   5361 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5362 		wm_get_hw_control(sc);
   5363 
   5364 	if ((sc->sc_type == WM_T_PCH_SPT) &&
   5365 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5366 		wm_legacy_irq_quirk_spt(sc);
   5367 
   5368 	/* Init hardware bits */
   5369 	wm_initialize_hardware_bits(sc);
   5370 
   5371 	/* Reset the PHY. */
   5372 	if (sc->sc_flags & WM_F_HAS_MII)
   5373 		wm_gmii_reset(sc);
   5374 
   5375 	/* Calculate (E)ITR value */
   5376 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5377 		/*
   5378 		 * For NEWQUEUE's EITR (except for 82575).
   5379 		 * 82575's EITR should be set same throttling value as other
   5380 		 * old controllers' ITR because the interrupt/sec calculation
   5381 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5382 		 *
   5383 		 * 82574's EITR should be set same throttling value as ITR.
   5384 		 *
   5385 		 * For N interrupts/sec, set this value to:
   5386 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5387 		 */
   5388 		sc->sc_itr_init = 450;
   5389 	} else if (sc->sc_type >= WM_T_82543) {
   5390 		/*
   5391 		 * Set up the interrupt throttling register (units of 256ns)
   5392 		 * Note that a footnote in Intel's documentation says this
   5393 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5394 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5395 		 * that that is also true for the 1024ns units of the other
   5396 		 * interrupt-related timer registers -- so, really, we ought
   5397 		 * to divide this value by 4 when the link speed is low.
   5398 		 *
   5399 		 * XXX implement this division at link speed change!
   5400 		 */
   5401 
   5402 		/*
   5403 		 * For N interrupts/sec, set this value to:
   5404 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5405 		 * absolute and packet timer values to this value
   5406 		 * divided by 4 to get "simple timer" behavior.
   5407 		 */
   5408 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5409 	}
   5410 
   5411 	error = wm_init_txrx_queues(sc);
   5412 	if (error)
   5413 		goto out;
   5414 
   5415 	/*
   5416 	 * Clear out the VLAN table -- we don't use it (yet).
   5417 	 */
   5418 	CSR_WRITE(sc, WMREG_VET, 0);
   5419 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5420 		trynum = 10; /* Due to hw errata */
   5421 	else
   5422 		trynum = 1;
   5423 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5424 		for (j = 0; j < trynum; j++)
   5425 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5426 
   5427 	/*
   5428 	 * Set up flow-control parameters.
   5429 	 *
   5430 	 * XXX Values could probably stand some tuning.
   5431 	 */
   5432 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5433 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5434 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5435 	    && (sc->sc_type != WM_T_PCH_SPT)) {
   5436 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5437 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5438 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5439 	}
   5440 
   5441 	sc->sc_fcrtl = FCRTL_DFLT;
   5442 	if (sc->sc_type < WM_T_82543) {
   5443 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5444 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5445 	} else {
   5446 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5447 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5448 	}
   5449 
   5450 	if (sc->sc_type == WM_T_80003)
   5451 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5452 	else
   5453 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5454 
   5455 	/* Writes the control register. */
   5456 	wm_set_vlan(sc);
   5457 
   5458 	if (sc->sc_flags & WM_F_HAS_MII) {
   5459 		uint16_t kmreg;
   5460 
   5461 		switch (sc->sc_type) {
   5462 		case WM_T_80003:
   5463 		case WM_T_ICH8:
   5464 		case WM_T_ICH9:
   5465 		case WM_T_ICH10:
   5466 		case WM_T_PCH:
   5467 		case WM_T_PCH2:
   5468 		case WM_T_PCH_LPT:
   5469 		case WM_T_PCH_SPT:
   5470 			/*
   5471 			 * Set the mac to wait the maximum time between each
   5472 			 * iteration and increase the max iterations when
   5473 			 * polling the phy; this fixes erroneous timeouts at
   5474 			 * 10Mbps.
   5475 			 */
   5476 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5477 			    0xFFFF);
   5478 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5479 			    &kmreg);
   5480 			kmreg |= 0x3F;
   5481 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5482 			    kmreg);
   5483 			break;
   5484 		default:
   5485 			break;
   5486 		}
   5487 
   5488 		if (sc->sc_type == WM_T_80003) {
   5489 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5490 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   5491 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5492 
   5493 			/* Bypass RX and TX FIFO's */
   5494 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5495 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5496 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5497 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5498 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5499 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5500 		}
   5501 	}
   5502 #if 0
   5503 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5504 #endif
   5505 
   5506 	/* Set up checksum offload parameters. */
   5507 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5508 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5509 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5510 		reg |= RXCSUM_IPOFL;
   5511 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5512 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5513 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5514 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5515 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5516 
   5517 	/* Set registers about MSI-X */
   5518 	if (wm_is_using_msix(sc)) {
   5519 		uint32_t ivar;
   5520 		struct wm_queue *wmq;
   5521 		int qid, qintr_idx;
   5522 
   5523 		if (sc->sc_type == WM_T_82575) {
   5524 			/* Interrupt control */
   5525 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5526 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5527 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5528 
   5529 			/* TX and RX */
   5530 			for (i = 0; i < sc->sc_nqueues; i++) {
   5531 				wmq = &sc->sc_queue[i];
   5532 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5533 				    EITR_TX_QUEUE(wmq->wmq_id)
   5534 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5535 			}
   5536 			/* Link status */
   5537 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5538 			    EITR_OTHER);
   5539 		} else if (sc->sc_type == WM_T_82574) {
   5540 			/* Interrupt control */
   5541 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5542 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5543 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5544 
   5545 			/*
   5546 			 * workaround issue with spurious interrupts
   5547 			 * in MSI-X mode.
   5548 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5549 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5550 			 */
   5551 			reg = CSR_READ(sc, WMREG_RFCTL);
   5552 			reg |= WMREG_RFCTL_ACKDIS;
   5553 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5554 
   5555 			ivar = 0;
   5556 			/* TX and RX */
   5557 			for (i = 0; i < sc->sc_nqueues; i++) {
   5558 				wmq = &sc->sc_queue[i];
   5559 				qid = wmq->wmq_id;
   5560 				qintr_idx = wmq->wmq_intr_idx;
   5561 
   5562 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5563 				    IVAR_TX_MASK_Q_82574(qid));
   5564 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5565 				    IVAR_RX_MASK_Q_82574(qid));
   5566 			}
   5567 			/* Link status */
   5568 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5569 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5570 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5571 		} else {
   5572 			/* Interrupt control */
   5573 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5574 			    | GPIE_EIAME | GPIE_PBA);
   5575 
   5576 			switch (sc->sc_type) {
   5577 			case WM_T_82580:
   5578 			case WM_T_I350:
   5579 			case WM_T_I354:
   5580 			case WM_T_I210:
   5581 			case WM_T_I211:
   5582 				/* TX and RX */
   5583 				for (i = 0; i < sc->sc_nqueues; i++) {
   5584 					wmq = &sc->sc_queue[i];
   5585 					qid = wmq->wmq_id;
   5586 					qintr_idx = wmq->wmq_intr_idx;
   5587 
   5588 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5589 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5590 					ivar |= __SHIFTIN((qintr_idx
   5591 						| IVAR_VALID),
   5592 					    IVAR_TX_MASK_Q(qid));
   5593 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5594 					ivar |= __SHIFTIN((qintr_idx
   5595 						| IVAR_VALID),
   5596 					    IVAR_RX_MASK_Q(qid));
   5597 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5598 				}
   5599 				break;
   5600 			case WM_T_82576:
   5601 				/* TX and RX */
   5602 				for (i = 0; i < sc->sc_nqueues; i++) {
   5603 					wmq = &sc->sc_queue[i];
   5604 					qid = wmq->wmq_id;
   5605 					qintr_idx = wmq->wmq_intr_idx;
   5606 
   5607 					ivar = CSR_READ(sc,
   5608 					    WMREG_IVAR_Q_82576(qid));
   5609 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   5610 					ivar |= __SHIFTIN((qintr_idx
   5611 						| IVAR_VALID),
   5612 					    IVAR_TX_MASK_Q_82576(qid));
   5613 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   5614 					ivar |= __SHIFTIN((qintr_idx
   5615 						| IVAR_VALID),
   5616 					    IVAR_RX_MASK_Q_82576(qid));
   5617 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   5618 					    ivar);
   5619 				}
   5620 				break;
   5621 			default:
   5622 				break;
   5623 			}
   5624 
   5625 			/* Link status */
   5626 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   5627 			    IVAR_MISC_OTHER);
   5628 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   5629 		}
   5630 
   5631 		if (wm_is_using_multiqueue(sc)) {
   5632 			wm_init_rss(sc);
   5633 
   5634 			/*
   5635 			** NOTE: Receive Full-Packet Checksum Offload
   5636 			** is mutually exclusive with Multiqueue. However
   5637 			** this is not the same as TCP/IP checksums which
   5638 			** still work.
   5639 			*/
   5640 			reg = CSR_READ(sc, WMREG_RXCSUM);
   5641 			reg |= RXCSUM_PCSD;
   5642 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5643 		}
   5644 	}
   5645 
   5646 	/* Set up the interrupt registers. */
   5647 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5648 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   5649 	    ICR_RXO | ICR_RXT0;
   5650 	if (wm_is_using_msix(sc)) {
   5651 		uint32_t mask;
   5652 		struct wm_queue *wmq;
   5653 
   5654 		switch (sc->sc_type) {
   5655 		case WM_T_82574:
   5656 			mask = 0;
   5657 			for (i = 0; i < sc->sc_nqueues; i++) {
   5658 				wmq = &sc->sc_queue[i];
   5659 				mask |= ICR_TXQ(wmq->wmq_id);
   5660 				mask |= ICR_RXQ(wmq->wmq_id);
   5661 			}
   5662 			mask |= ICR_OTHER;
   5663 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   5664 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   5665 			break;
   5666 		default:
   5667 			if (sc->sc_type == WM_T_82575) {
   5668 				mask = 0;
   5669 				for (i = 0; i < sc->sc_nqueues; i++) {
   5670 					wmq = &sc->sc_queue[i];
   5671 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   5672 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   5673 				}
   5674 				mask |= EITR_OTHER;
   5675 			} else {
   5676 				mask = 0;
   5677 				for (i = 0; i < sc->sc_nqueues; i++) {
   5678 					wmq = &sc->sc_queue[i];
   5679 					mask |= 1 << wmq->wmq_intr_idx;
   5680 				}
   5681 				mask |= 1 << sc->sc_link_intr_idx;
   5682 			}
   5683 			CSR_WRITE(sc, WMREG_EIAC, mask);
   5684 			CSR_WRITE(sc, WMREG_EIAM, mask);
   5685 			CSR_WRITE(sc, WMREG_EIMS, mask);
   5686 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   5687 			break;
   5688 		}
   5689 	} else
   5690 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5691 
   5692 	/* Set up the inter-packet gap. */
   5693 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   5694 
   5695 	if (sc->sc_type >= WM_T_82543) {
   5696 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5697 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   5698 			wm_itrs_writereg(sc, wmq);
   5699 		}
   5700 		/*
   5701 		 * Link interrupts occur much less than TX
   5702 		 * interrupts and RX interrupts. So, we don't
   5703 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   5704 		 * FreeBSD's if_igb.
   5705 		 */
   5706 	}
   5707 
   5708 	/* Set the VLAN ethernetype. */
   5709 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   5710 
   5711 	/*
   5712 	 * Set up the transmit control register; we start out with
   5713 	 * a collision distance suitable for FDX, but update it whe
   5714 	 * we resolve the media type.
   5715 	 */
   5716 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   5717 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   5718 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5719 	if (sc->sc_type >= WM_T_82571)
   5720 		sc->sc_tctl |= TCTL_MULR;
   5721 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5722 
   5723 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5724 		/* Write TDT after TCTL.EN is set. See the document. */
   5725 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   5726 	}
   5727 
   5728 	if (sc->sc_type == WM_T_80003) {
   5729 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   5730 		reg &= ~TCTL_EXT_GCEX_MASK;
   5731 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   5732 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   5733 	}
   5734 
   5735 	/* Set the media. */
   5736 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   5737 		goto out;
   5738 
   5739 	/* Configure for OS presence */
   5740 	wm_init_manageability(sc);
   5741 
   5742 	/*
   5743 	 * Set up the receive control register; we actually program
   5744 	 * the register when we set the receive filter.  Use multicast
   5745 	 * address offset type 0.
   5746 	 *
   5747 	 * Only the i82544 has the ability to strip the incoming
   5748 	 * CRC, so we don't enable that feature.
   5749 	 */
   5750 	sc->sc_mchash_type = 0;
   5751 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   5752 	    | RCTL_MO(sc->sc_mchash_type);
   5753 
   5754 	/*
   5755 	 * 82574 use one buffer extended Rx descriptor.
   5756 	 */
   5757 	if (sc->sc_type == WM_T_82574)
   5758 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   5759 
   5760 	/*
   5761 	 * The I350 has a bug where it always strips the CRC whether
   5762 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   5763 	 */
   5764 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5765 	    || (sc->sc_type == WM_T_I210))
   5766 		sc->sc_rctl |= RCTL_SECRC;
   5767 
   5768 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5769 	    && (ifp->if_mtu > ETHERMTU)) {
   5770 		sc->sc_rctl |= RCTL_LPE;
   5771 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5772 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5773 	}
   5774 
   5775 	if (MCLBYTES == 2048) {
   5776 		sc->sc_rctl |= RCTL_2k;
   5777 	} else {
   5778 		if (sc->sc_type >= WM_T_82543) {
   5779 			switch (MCLBYTES) {
   5780 			case 4096:
   5781 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5782 				break;
   5783 			case 8192:
   5784 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5785 				break;
   5786 			case 16384:
   5787 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5788 				break;
   5789 			default:
   5790 				panic("wm_init: MCLBYTES %d unsupported",
   5791 				    MCLBYTES);
   5792 				break;
   5793 			}
   5794 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   5795 	}
   5796 
   5797 	/* Enable ECC */
   5798 	switch (sc->sc_type) {
   5799 	case WM_T_82571:
   5800 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5801 		reg |= PBA_ECC_CORR_EN;
   5802 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5803 		break;
   5804 	case WM_T_PCH_LPT:
   5805 	case WM_T_PCH_SPT:
   5806 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5807 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5808 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5809 
   5810 		sc->sc_ctrl |= CTRL_MEHE;
   5811 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5812 		break;
   5813 	default:
   5814 		break;
   5815 	}
   5816 
   5817 	/*
   5818 	 * Set the receive filter.
   5819 	 *
   5820 	 * For 82575 and 82576, the RX descriptors must be initialized after
   5821 	 * the setting of RCTL.EN in wm_set_filter()
   5822 	 */
   5823 	wm_set_filter(sc);
   5824 
   5825 	/* On 575 and later set RDT only if RX enabled */
   5826 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5827 		int qidx;
   5828 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5829 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5830 			for (i = 0; i < WM_NRXDESC; i++) {
   5831 				mutex_enter(rxq->rxq_lock);
   5832 				wm_init_rxdesc(rxq, i);
   5833 				mutex_exit(rxq->rxq_lock);
   5834 
   5835 			}
   5836 		}
   5837 	}
   5838 
   5839 	wm_unset_stopping_flags(sc);
   5840 
   5841 	/* Start the one second link check clock. */
   5842 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5843 
   5844 	/* ...all done! */
   5845 	ifp->if_flags |= IFF_RUNNING;
   5846 	ifp->if_flags &= ~IFF_OACTIVE;
   5847 
   5848  out:
   5849 	sc->sc_if_flags = ifp->if_flags;
   5850 	if (error)
   5851 		log(LOG_ERR, "%s: interface not running\n",
   5852 		    device_xname(sc->sc_dev));
   5853 	return error;
   5854 }
   5855 
   5856 /*
   5857  * wm_stop:		[ifnet interface function]
   5858  *
   5859  *	Stop transmission on the interface.
   5860  */
   5861 static void
   5862 wm_stop(struct ifnet *ifp, int disable)
   5863 {
   5864 	struct wm_softc *sc = ifp->if_softc;
   5865 
   5866 	WM_CORE_LOCK(sc);
   5867 	wm_stop_locked(ifp, disable);
   5868 	WM_CORE_UNLOCK(sc);
   5869 }
   5870 
   5871 static void
   5872 wm_stop_locked(struct ifnet *ifp, int disable)
   5873 {
   5874 	struct wm_softc *sc = ifp->if_softc;
   5875 	struct wm_txsoft *txs;
   5876 	int i, qidx;
   5877 
   5878 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5879 		device_xname(sc->sc_dev), __func__));
   5880 	KASSERT(WM_CORE_LOCKED(sc));
   5881 
   5882 	wm_set_stopping_flags(sc);
   5883 
   5884 	/* Stop the one second clock. */
   5885 	callout_stop(&sc->sc_tick_ch);
   5886 
   5887 	/* Stop the 82547 Tx FIFO stall check timer. */
   5888 	if (sc->sc_type == WM_T_82547)
   5889 		callout_stop(&sc->sc_txfifo_ch);
   5890 
   5891 	if (sc->sc_flags & WM_F_HAS_MII) {
   5892 		/* Down the MII. */
   5893 		mii_down(&sc->sc_mii);
   5894 	} else {
   5895 #if 0
   5896 		/* Should we clear PHY's status properly? */
   5897 		wm_reset(sc);
   5898 #endif
   5899 	}
   5900 
   5901 	/* Stop the transmit and receive processes. */
   5902 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5903 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5904 	sc->sc_rctl &= ~RCTL_EN;
   5905 
   5906 	/*
   5907 	 * Clear the interrupt mask to ensure the device cannot assert its
   5908 	 * interrupt line.
   5909 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5910 	 * service any currently pending or shared interrupt.
   5911 	 */
   5912 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5913 	sc->sc_icr = 0;
   5914 	if (wm_is_using_msix(sc)) {
   5915 		if (sc->sc_type != WM_T_82574) {
   5916 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5917 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5918 		} else
   5919 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5920 	}
   5921 
   5922 	/* Release any queued transmit buffers. */
   5923 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5924 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5925 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5926 		mutex_enter(txq->txq_lock);
   5927 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5928 			txs = &txq->txq_soft[i];
   5929 			if (txs->txs_mbuf != NULL) {
   5930 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   5931 				m_freem(txs->txs_mbuf);
   5932 				txs->txs_mbuf = NULL;
   5933 			}
   5934 		}
   5935 		mutex_exit(txq->txq_lock);
   5936 	}
   5937 
   5938 	/* Mark the interface as down and cancel the watchdog timer. */
   5939 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5940 	ifp->if_timer = 0;
   5941 
   5942 	if (disable) {
   5943 		for (i = 0; i < sc->sc_nqueues; i++) {
   5944 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5945 			mutex_enter(rxq->rxq_lock);
   5946 			wm_rxdrain(rxq);
   5947 			mutex_exit(rxq->rxq_lock);
   5948 		}
   5949 	}
   5950 
   5951 #if 0 /* notyet */
   5952 	if (sc->sc_type >= WM_T_82544)
   5953 		CSR_WRITE(sc, WMREG_WUC, 0);
   5954 #endif
   5955 }
   5956 
   5957 static void
   5958 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   5959 {
   5960 	struct mbuf *m;
   5961 	int i;
   5962 
   5963 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   5964 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   5965 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   5966 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   5967 		    m->m_data, m->m_len, m->m_flags);
   5968 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   5969 	    i, i == 1 ? "" : "s");
   5970 }
   5971 
   5972 /*
   5973  * wm_82547_txfifo_stall:
   5974  *
   5975  *	Callout used to wait for the 82547 Tx FIFO to drain,
   5976  *	reset the FIFO pointers, and restart packet transmission.
   5977  */
   5978 static void
   5979 wm_82547_txfifo_stall(void *arg)
   5980 {
   5981 	struct wm_softc *sc = arg;
   5982 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5983 
   5984 	mutex_enter(txq->txq_lock);
   5985 
   5986 	if (txq->txq_stopping)
   5987 		goto out;
   5988 
   5989 	if (txq->txq_fifo_stall) {
   5990 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   5991 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   5992 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   5993 			/*
   5994 			 * Packets have drained.  Stop transmitter, reset
   5995 			 * FIFO pointers, restart transmitter, and kick
   5996 			 * the packet queue.
   5997 			 */
   5998 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   5999 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   6000 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   6001 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   6002 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   6003 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   6004 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   6005 			CSR_WRITE_FLUSH(sc);
   6006 
   6007 			txq->txq_fifo_head = 0;
   6008 			txq->txq_fifo_stall = 0;
   6009 			wm_start_locked(&sc->sc_ethercom.ec_if);
   6010 		} else {
   6011 			/*
   6012 			 * Still waiting for packets to drain; try again in
   6013 			 * another tick.
   6014 			 */
   6015 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6016 		}
   6017 	}
   6018 
   6019 out:
   6020 	mutex_exit(txq->txq_lock);
   6021 }
   6022 
   6023 /*
   6024  * wm_82547_txfifo_bugchk:
   6025  *
   6026  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6027  *	prevent enqueueing a packet that would wrap around the end
   6028  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6029  *
   6030  *	We do this by checking the amount of space before the end
   6031  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   6032  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6033  *	the internal FIFO pointers to the beginning, and restart
   6034  *	transmission on the interface.
   6035  */
   6036 #define	WM_FIFO_HDR		0x10
   6037 #define	WM_82547_PAD_LEN	0x3e0
   6038 static int
   6039 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6040 {
   6041 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6042 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6043 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6044 
   6045 	/* Just return if already stalled. */
   6046 	if (txq->txq_fifo_stall)
   6047 		return 1;
   6048 
   6049 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6050 		/* Stall only occurs in half-duplex mode. */
   6051 		goto send_packet;
   6052 	}
   6053 
   6054 	if (len >= WM_82547_PAD_LEN + space) {
   6055 		txq->txq_fifo_stall = 1;
   6056 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6057 		return 1;
   6058 	}
   6059 
   6060  send_packet:
   6061 	txq->txq_fifo_head += len;
   6062 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6063 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6064 
   6065 	return 0;
   6066 }
   6067 
   6068 static int
   6069 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6070 {
   6071 	int error;
   6072 
   6073 	/*
   6074 	 * Allocate the control data structures, and create and load the
   6075 	 * DMA map for it.
   6076 	 *
   6077 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6078 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6079 	 * both sets within the same 4G segment.
   6080 	 */
   6081 	if (sc->sc_type < WM_T_82544)
   6082 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6083 	else
   6084 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6085 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6086 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6087 	else
   6088 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6089 
   6090 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6091 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6092 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6093 		aprint_error_dev(sc->sc_dev,
   6094 		    "unable to allocate TX control data, error = %d\n",
   6095 		    error);
   6096 		goto fail_0;
   6097 	}
   6098 
   6099 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6100 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6101 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6102 		aprint_error_dev(sc->sc_dev,
   6103 		    "unable to map TX control data, error = %d\n", error);
   6104 		goto fail_1;
   6105 	}
   6106 
   6107 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6108 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6109 		aprint_error_dev(sc->sc_dev,
   6110 		    "unable to create TX control data DMA map, error = %d\n",
   6111 		    error);
   6112 		goto fail_2;
   6113 	}
   6114 
   6115 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6116 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6117 		aprint_error_dev(sc->sc_dev,
   6118 		    "unable to load TX control data DMA map, error = %d\n",
   6119 		    error);
   6120 		goto fail_3;
   6121 	}
   6122 
   6123 	return 0;
   6124 
   6125  fail_3:
   6126 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6127  fail_2:
   6128 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6129 	    WM_TXDESCS_SIZE(txq));
   6130  fail_1:
   6131 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6132  fail_0:
   6133 	return error;
   6134 }
   6135 
   6136 static void
   6137 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6138 {
   6139 
   6140 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6141 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6142 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6143 	    WM_TXDESCS_SIZE(txq));
   6144 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6145 }
   6146 
   6147 static int
   6148 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6149 {
   6150 	int error;
   6151 	size_t rxq_descs_size;
   6152 
   6153 	/*
   6154 	 * Allocate the control data structures, and create and load the
   6155 	 * DMA map for it.
   6156 	 *
   6157 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6158 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6159 	 * both sets within the same 4G segment.
   6160 	 */
   6161 	rxq->rxq_ndesc = WM_NRXDESC;
   6162 	if (sc->sc_type == WM_T_82574)
   6163 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6164 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6165 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6166 	else
   6167 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6168 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6169 
   6170 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6171 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6172 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6173 		aprint_error_dev(sc->sc_dev,
   6174 		    "unable to allocate RX control data, error = %d\n",
   6175 		    error);
   6176 		goto fail_0;
   6177 	}
   6178 
   6179 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6180 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6181 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6182 		aprint_error_dev(sc->sc_dev,
   6183 		    "unable to map RX control data, error = %d\n", error);
   6184 		goto fail_1;
   6185 	}
   6186 
   6187 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6188 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6189 		aprint_error_dev(sc->sc_dev,
   6190 		    "unable to create RX control data DMA map, error = %d\n",
   6191 		    error);
   6192 		goto fail_2;
   6193 	}
   6194 
   6195 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6196 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6197 		aprint_error_dev(sc->sc_dev,
   6198 		    "unable to load RX control data DMA map, error = %d\n",
   6199 		    error);
   6200 		goto fail_3;
   6201 	}
   6202 
   6203 	return 0;
   6204 
   6205  fail_3:
   6206 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6207  fail_2:
   6208 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6209 	    rxq_descs_size);
   6210  fail_1:
   6211 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6212  fail_0:
   6213 	return error;
   6214 }
   6215 
   6216 static void
   6217 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6218 {
   6219 
   6220 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6221 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6222 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6223 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6224 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6225 }
   6226 
   6227 
   6228 static int
   6229 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6230 {
   6231 	int i, error;
   6232 
   6233 	/* Create the transmit buffer DMA maps. */
   6234 	WM_TXQUEUELEN(txq) =
   6235 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6236 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6237 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6238 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6239 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6240 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6241 			aprint_error_dev(sc->sc_dev,
   6242 			    "unable to create Tx DMA map %d, error = %d\n",
   6243 			    i, error);
   6244 			goto fail;
   6245 		}
   6246 	}
   6247 
   6248 	return 0;
   6249 
   6250  fail:
   6251 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6252 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6253 			bus_dmamap_destroy(sc->sc_dmat,
   6254 			    txq->txq_soft[i].txs_dmamap);
   6255 	}
   6256 	return error;
   6257 }
   6258 
   6259 static void
   6260 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6261 {
   6262 	int i;
   6263 
   6264 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6265 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6266 			bus_dmamap_destroy(sc->sc_dmat,
   6267 			    txq->txq_soft[i].txs_dmamap);
   6268 	}
   6269 }
   6270 
   6271 static int
   6272 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6273 {
   6274 	int i, error;
   6275 
   6276 	/* Create the receive buffer DMA maps. */
   6277 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6278 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6279 			    MCLBYTES, 0, 0,
   6280 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6281 			aprint_error_dev(sc->sc_dev,
   6282 			    "unable to create Rx DMA map %d error = %d\n",
   6283 			    i, error);
   6284 			goto fail;
   6285 		}
   6286 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6287 	}
   6288 
   6289 	return 0;
   6290 
   6291  fail:
   6292 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6293 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6294 			bus_dmamap_destroy(sc->sc_dmat,
   6295 			    rxq->rxq_soft[i].rxs_dmamap);
   6296 	}
   6297 	return error;
   6298 }
   6299 
   6300 static void
   6301 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6302 {
   6303 	int i;
   6304 
   6305 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6306 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6307 			bus_dmamap_destroy(sc->sc_dmat,
   6308 			    rxq->rxq_soft[i].rxs_dmamap);
   6309 	}
   6310 }
   6311 
   6312 /*
   6313  * wm_alloc_quques:
   6314  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6315  */
   6316 static int
   6317 wm_alloc_txrx_queues(struct wm_softc *sc)
   6318 {
   6319 	int i, error, tx_done, rx_done;
   6320 
   6321 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6322 	    KM_SLEEP);
   6323 	if (sc->sc_queue == NULL) {
   6324 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6325 		error = ENOMEM;
   6326 		goto fail_0;
   6327 	}
   6328 
   6329 	/*
   6330 	 * For transmission
   6331 	 */
   6332 	error = 0;
   6333 	tx_done = 0;
   6334 	for (i = 0; i < sc->sc_nqueues; i++) {
   6335 #ifdef WM_EVENT_COUNTERS
   6336 		int j;
   6337 		const char *xname;
   6338 #endif
   6339 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6340 		txq->txq_sc = sc;
   6341 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6342 
   6343 		error = wm_alloc_tx_descs(sc, txq);
   6344 		if (error)
   6345 			break;
   6346 		error = wm_alloc_tx_buffer(sc, txq);
   6347 		if (error) {
   6348 			wm_free_tx_descs(sc, txq);
   6349 			break;
   6350 		}
   6351 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6352 		if (txq->txq_interq == NULL) {
   6353 			wm_free_tx_descs(sc, txq);
   6354 			wm_free_tx_buffer(sc, txq);
   6355 			error = ENOMEM;
   6356 			break;
   6357 		}
   6358 
   6359 #ifdef WM_EVENT_COUNTERS
   6360 		xname = device_xname(sc->sc_dev);
   6361 
   6362 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6363 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6364 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
   6365 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6366 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6367 
   6368 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
   6369 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
   6370 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
   6371 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
   6372 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
   6373 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
   6374 
   6375 		for (j = 0; j < WM_NTXSEGS; j++) {
   6376 			snprintf(txq->txq_txseg_evcnt_names[j],
   6377 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6378 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6379 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6380 		}
   6381 
   6382 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
   6383 
   6384 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
   6385 #endif /* WM_EVENT_COUNTERS */
   6386 
   6387 		tx_done++;
   6388 	}
   6389 	if (error)
   6390 		goto fail_1;
   6391 
   6392 	/*
   6393 	 * For recieve
   6394 	 */
   6395 	error = 0;
   6396 	rx_done = 0;
   6397 	for (i = 0; i < sc->sc_nqueues; i++) {
   6398 #ifdef WM_EVENT_COUNTERS
   6399 		const char *xname;
   6400 #endif
   6401 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6402 		rxq->rxq_sc = sc;
   6403 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6404 
   6405 		error = wm_alloc_rx_descs(sc, rxq);
   6406 		if (error)
   6407 			break;
   6408 
   6409 		error = wm_alloc_rx_buffer(sc, rxq);
   6410 		if (error) {
   6411 			wm_free_rx_descs(sc, rxq);
   6412 			break;
   6413 		}
   6414 
   6415 #ifdef WM_EVENT_COUNTERS
   6416 		xname = device_xname(sc->sc_dev);
   6417 
   6418 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
   6419 
   6420 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
   6421 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
   6422 #endif /* WM_EVENT_COUNTERS */
   6423 
   6424 		rx_done++;
   6425 	}
   6426 	if (error)
   6427 		goto fail_2;
   6428 
   6429 	return 0;
   6430 
   6431  fail_2:
   6432 	for (i = 0; i < rx_done; i++) {
   6433 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6434 		wm_free_rx_buffer(sc, rxq);
   6435 		wm_free_rx_descs(sc, rxq);
   6436 		if (rxq->rxq_lock)
   6437 			mutex_obj_free(rxq->rxq_lock);
   6438 	}
   6439  fail_1:
   6440 	for (i = 0; i < tx_done; i++) {
   6441 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6442 		pcq_destroy(txq->txq_interq);
   6443 		wm_free_tx_buffer(sc, txq);
   6444 		wm_free_tx_descs(sc, txq);
   6445 		if (txq->txq_lock)
   6446 			mutex_obj_free(txq->txq_lock);
   6447 	}
   6448 
   6449 	kmem_free(sc->sc_queue,
   6450 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6451  fail_0:
   6452 	return error;
   6453 }
   6454 
   6455 /*
   6456  * wm_free_quques:
   6457  *	Free {tx,rx}descs and {tx,rx} buffers
   6458  */
   6459 static void
   6460 wm_free_txrx_queues(struct wm_softc *sc)
   6461 {
   6462 	int i;
   6463 
   6464 	for (i = 0; i < sc->sc_nqueues; i++) {
   6465 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6466 
   6467 #ifdef WM_EVENT_COUNTERS
   6468 		WM_Q_EVCNT_DETACH(rxq, rxintr, rxq, i);
   6469 		WM_Q_EVCNT_DETACH(rxq, rxipsum, rxq, i);
   6470 		WM_Q_EVCNT_DETACH(rxq, rxtusum, rxq, i);
   6471 #endif /* WM_EVENT_COUNTERS */
   6472 
   6473 		wm_free_rx_buffer(sc, rxq);
   6474 		wm_free_rx_descs(sc, rxq);
   6475 		if (rxq->rxq_lock)
   6476 			mutex_obj_free(rxq->rxq_lock);
   6477 	}
   6478 
   6479 	for (i = 0; i < sc->sc_nqueues; i++) {
   6480 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6481 		struct mbuf *m;
   6482 #ifdef WM_EVENT_COUNTERS
   6483 		int j;
   6484 
   6485 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6486 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6487 		WM_Q_EVCNT_DETACH(txq, txfifo_stall, txq, i);
   6488 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6489 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6490 		WM_Q_EVCNT_DETACH(txq, txipsum, txq, i);
   6491 		WM_Q_EVCNT_DETACH(txq, txtusum, txq, i);
   6492 		WM_Q_EVCNT_DETACH(txq, txtusum6, txq, i);
   6493 		WM_Q_EVCNT_DETACH(txq, txtso, txq, i);
   6494 		WM_Q_EVCNT_DETACH(txq, txtso6, txq, i);
   6495 		WM_Q_EVCNT_DETACH(txq, txtsopain, txq, i);
   6496 
   6497 		for (j = 0; j < WM_NTXSEGS; j++)
   6498 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6499 
   6500 		WM_Q_EVCNT_DETACH(txq, txdrop, txq, i);
   6501 		WM_Q_EVCNT_DETACH(txq, tu, txq, i);
   6502 #endif /* WM_EVENT_COUNTERS */
   6503 
   6504 		/* drain txq_interq */
   6505 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6506 			m_freem(m);
   6507 		pcq_destroy(txq->txq_interq);
   6508 
   6509 		wm_free_tx_buffer(sc, txq);
   6510 		wm_free_tx_descs(sc, txq);
   6511 		if (txq->txq_lock)
   6512 			mutex_obj_free(txq->txq_lock);
   6513 	}
   6514 
   6515 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6516 }
   6517 
   6518 static void
   6519 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6520 {
   6521 
   6522 	KASSERT(mutex_owned(txq->txq_lock));
   6523 
   6524 	/* Initialize the transmit descriptor ring. */
   6525 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6526 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6527 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6528 	txq->txq_free = WM_NTXDESC(txq);
   6529 	txq->txq_next = 0;
   6530 }
   6531 
   6532 static void
   6533 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6534     struct wm_txqueue *txq)
   6535 {
   6536 
   6537 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6538 		device_xname(sc->sc_dev), __func__));
   6539 	KASSERT(mutex_owned(txq->txq_lock));
   6540 
   6541 	if (sc->sc_type < WM_T_82543) {
   6542 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6543 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6544 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6545 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6546 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6547 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6548 	} else {
   6549 		int qid = wmq->wmq_id;
   6550 
   6551 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6552 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6553 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6554 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6555 
   6556 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6557 			/*
   6558 			 * Don't write TDT before TCTL.EN is set.
   6559 			 * See the document.
   6560 			 */
   6561 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6562 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6563 			    | TXDCTL_WTHRESH(0));
   6564 		else {
   6565 			/* XXX should update with AIM? */
   6566 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6567 			if (sc->sc_type >= WM_T_82540) {
   6568 				/* should be same */
   6569 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6570 			}
   6571 
   6572 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6573 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6574 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6575 		}
   6576 	}
   6577 }
   6578 
   6579 static void
   6580 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6581 {
   6582 	int i;
   6583 
   6584 	KASSERT(mutex_owned(txq->txq_lock));
   6585 
   6586 	/* Initialize the transmit job descriptors. */
   6587 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6588 		txq->txq_soft[i].txs_mbuf = NULL;
   6589 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6590 	txq->txq_snext = 0;
   6591 	txq->txq_sdirty = 0;
   6592 }
   6593 
   6594 static void
   6595 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6596     struct wm_txqueue *txq)
   6597 {
   6598 
   6599 	KASSERT(mutex_owned(txq->txq_lock));
   6600 
   6601 	/*
   6602 	 * Set up some register offsets that are different between
   6603 	 * the i82542 and the i82543 and later chips.
   6604 	 */
   6605 	if (sc->sc_type < WM_T_82543)
   6606 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   6607 	else
   6608 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   6609 
   6610 	wm_init_tx_descs(sc, txq);
   6611 	wm_init_tx_regs(sc, wmq, txq);
   6612 	wm_init_tx_buffer(sc, txq);
   6613 }
   6614 
   6615 static void
   6616 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6617     struct wm_rxqueue *rxq)
   6618 {
   6619 
   6620 	KASSERT(mutex_owned(rxq->rxq_lock));
   6621 
   6622 	/*
   6623 	 * Initialize the receive descriptor and receive job
   6624 	 * descriptor rings.
   6625 	 */
   6626 	if (sc->sc_type < WM_T_82543) {
   6627 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   6628 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   6629 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   6630 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6631 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   6632 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   6633 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   6634 
   6635 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   6636 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   6637 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   6638 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   6639 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   6640 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   6641 	} else {
   6642 		int qid = wmq->wmq_id;
   6643 
   6644 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   6645 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   6646 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_descsize * rxq->rxq_ndesc);
   6647 
   6648 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6649 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   6650 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   6651 
   6652 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   6653 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   6654 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   6655 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   6656 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   6657 			    | RXDCTL_WTHRESH(1));
   6658 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6659 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6660 		} else {
   6661 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6662 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6663 			/* XXX should update with AIM? */
   6664 			CSR_WRITE(sc, WMREG_RDTR, (wmq->wmq_itr / 4) | RDTR_FPD);
   6665 			/* MUST be same */
   6666 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   6667 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   6668 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   6669 		}
   6670 	}
   6671 }
   6672 
   6673 static int
   6674 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6675 {
   6676 	struct wm_rxsoft *rxs;
   6677 	int error, i;
   6678 
   6679 	KASSERT(mutex_owned(rxq->rxq_lock));
   6680 
   6681 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6682 		rxs = &rxq->rxq_soft[i];
   6683 		if (rxs->rxs_mbuf == NULL) {
   6684 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   6685 				log(LOG_ERR, "%s: unable to allocate or map "
   6686 				    "rx buffer %d, error = %d\n",
   6687 				    device_xname(sc->sc_dev), i, error);
   6688 				/*
   6689 				 * XXX Should attempt to run with fewer receive
   6690 				 * XXX buffers instead of just failing.
   6691 				 */
   6692 				wm_rxdrain(rxq);
   6693 				return ENOMEM;
   6694 			}
   6695 		} else {
   6696 			/*
   6697 			 * For 82575 and 82576, the RX descriptors must be
   6698 			 * initialized after the setting of RCTL.EN in
   6699 			 * wm_set_filter()
   6700 			 */
   6701 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   6702 				wm_init_rxdesc(rxq, i);
   6703 		}
   6704 	}
   6705 	rxq->rxq_ptr = 0;
   6706 	rxq->rxq_discard = 0;
   6707 	WM_RXCHAIN_RESET(rxq);
   6708 
   6709 	return 0;
   6710 }
   6711 
   6712 static int
   6713 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6714     struct wm_rxqueue *rxq)
   6715 {
   6716 
   6717 	KASSERT(mutex_owned(rxq->rxq_lock));
   6718 
   6719 	/*
   6720 	 * Set up some register offsets that are different between
   6721 	 * the i82542 and the i82543 and later chips.
   6722 	 */
   6723 	if (sc->sc_type < WM_T_82543)
   6724 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   6725 	else
   6726 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   6727 
   6728 	wm_init_rx_regs(sc, wmq, rxq);
   6729 	return wm_init_rx_buffer(sc, rxq);
   6730 }
   6731 
   6732 /*
   6733  * wm_init_quques:
   6734  *	Initialize {tx,rx}descs and {tx,rx} buffers
   6735  */
   6736 static int
   6737 wm_init_txrx_queues(struct wm_softc *sc)
   6738 {
   6739 	int i, error = 0;
   6740 
   6741 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6742 		device_xname(sc->sc_dev), __func__));
   6743 
   6744 	for (i = 0; i < sc->sc_nqueues; i++) {
   6745 		struct wm_queue *wmq = &sc->sc_queue[i];
   6746 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6747 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6748 
   6749 		/*
   6750 		 * TODO
   6751 		 * Currently, use constant variable instead of AIM.
   6752 		 * Furthermore, the interrupt interval of multiqueue which use
   6753 		 * polling mode is less than default value.
   6754 		 * More tuning and AIM are required.
   6755 		 */
   6756 		if (wm_is_using_multiqueue(sc))
   6757 			wmq->wmq_itr = 50;
   6758 		else
   6759 			wmq->wmq_itr = sc->sc_itr_init;
   6760 		wmq->wmq_set_itr = true;
   6761 
   6762 		mutex_enter(txq->txq_lock);
   6763 		wm_init_tx_queue(sc, wmq, txq);
   6764 		mutex_exit(txq->txq_lock);
   6765 
   6766 		mutex_enter(rxq->rxq_lock);
   6767 		error = wm_init_rx_queue(sc, wmq, rxq);
   6768 		mutex_exit(rxq->rxq_lock);
   6769 		if (error)
   6770 			break;
   6771 	}
   6772 
   6773 	return error;
   6774 }
   6775 
   6776 /*
   6777  * wm_tx_offload:
   6778  *
   6779  *	Set up TCP/IP checksumming parameters for the
   6780  *	specified packet.
   6781  */
   6782 static int
   6783 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   6784     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   6785 {
   6786 	struct mbuf *m0 = txs->txs_mbuf;
   6787 	struct livengood_tcpip_ctxdesc *t;
   6788 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   6789 	uint32_t ipcse;
   6790 	struct ether_header *eh;
   6791 	int offset, iphl;
   6792 	uint8_t fields;
   6793 
   6794 	/*
   6795 	 * XXX It would be nice if the mbuf pkthdr had offset
   6796 	 * fields for the protocol headers.
   6797 	 */
   6798 
   6799 	eh = mtod(m0, struct ether_header *);
   6800 	switch (htons(eh->ether_type)) {
   6801 	case ETHERTYPE_IP:
   6802 	case ETHERTYPE_IPV6:
   6803 		offset = ETHER_HDR_LEN;
   6804 		break;
   6805 
   6806 	case ETHERTYPE_VLAN:
   6807 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6808 		break;
   6809 
   6810 	default:
   6811 		/*
   6812 		 * Don't support this protocol or encapsulation.
   6813 		 */
   6814 		*fieldsp = 0;
   6815 		*cmdp = 0;
   6816 		return 0;
   6817 	}
   6818 
   6819 	if ((m0->m_pkthdr.csum_flags &
   6820 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6821 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6822 	} else {
   6823 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6824 	}
   6825 	ipcse = offset + iphl - 1;
   6826 
   6827 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   6828 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   6829 	seg = 0;
   6830 	fields = 0;
   6831 
   6832 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6833 		int hlen = offset + iphl;
   6834 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6835 
   6836 		if (__predict_false(m0->m_len <
   6837 				    (hlen + sizeof(struct tcphdr)))) {
   6838 			/*
   6839 			 * TCP/IP headers are not in the first mbuf; we need
   6840 			 * to do this the slow and painful way.  Let's just
   6841 			 * hope this doesn't happen very often.
   6842 			 */
   6843 			struct tcphdr th;
   6844 
   6845 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6846 
   6847 			m_copydata(m0, hlen, sizeof(th), &th);
   6848 			if (v4) {
   6849 				struct ip ip;
   6850 
   6851 				m_copydata(m0, offset, sizeof(ip), &ip);
   6852 				ip.ip_len = 0;
   6853 				m_copyback(m0,
   6854 				    offset + offsetof(struct ip, ip_len),
   6855 				    sizeof(ip.ip_len), &ip.ip_len);
   6856 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6857 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6858 			} else {
   6859 				struct ip6_hdr ip6;
   6860 
   6861 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6862 				ip6.ip6_plen = 0;
   6863 				m_copyback(m0,
   6864 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6865 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6866 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6867 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6868 			}
   6869 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6870 			    sizeof(th.th_sum), &th.th_sum);
   6871 
   6872 			hlen += th.th_off << 2;
   6873 		} else {
   6874 			/*
   6875 			 * TCP/IP headers are in the first mbuf; we can do
   6876 			 * this the easy way.
   6877 			 */
   6878 			struct tcphdr *th;
   6879 
   6880 			if (v4) {
   6881 				struct ip *ip =
   6882 				    (void *)(mtod(m0, char *) + offset);
   6883 				th = (void *)(mtod(m0, char *) + hlen);
   6884 
   6885 				ip->ip_len = 0;
   6886 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6887 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6888 			} else {
   6889 				struct ip6_hdr *ip6 =
   6890 				    (void *)(mtod(m0, char *) + offset);
   6891 				th = (void *)(mtod(m0, char *) + hlen);
   6892 
   6893 				ip6->ip6_plen = 0;
   6894 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6895 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6896 			}
   6897 			hlen += th->th_off << 2;
   6898 		}
   6899 
   6900 		if (v4) {
   6901 			WM_Q_EVCNT_INCR(txq, txtso);
   6902 			cmdlen |= WTX_TCPIP_CMD_IP;
   6903 		} else {
   6904 			WM_Q_EVCNT_INCR(txq, txtso6);
   6905 			ipcse = 0;
   6906 		}
   6907 		cmd |= WTX_TCPIP_CMD_TSE;
   6908 		cmdlen |= WTX_TCPIP_CMD_TSE |
   6909 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   6910 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   6911 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   6912 	}
   6913 
   6914 	/*
   6915 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   6916 	 * offload feature, if we load the context descriptor, we
   6917 	 * MUST provide valid values for IPCSS and TUCSS fields.
   6918 	 */
   6919 
   6920 	ipcs = WTX_TCPIP_IPCSS(offset) |
   6921 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   6922 	    WTX_TCPIP_IPCSE(ipcse);
   6923 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   6924 		WM_Q_EVCNT_INCR(txq, txipsum);
   6925 		fields |= WTX_IXSM;
   6926 	}
   6927 
   6928 	offset += iphl;
   6929 
   6930 	if (m0->m_pkthdr.csum_flags &
   6931 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   6932 		WM_Q_EVCNT_INCR(txq, txtusum);
   6933 		fields |= WTX_TXSM;
   6934 		tucs = WTX_TCPIP_TUCSS(offset) |
   6935 		    WTX_TCPIP_TUCSO(offset +
   6936 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   6937 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6938 	} else if ((m0->m_pkthdr.csum_flags &
   6939 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   6940 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6941 		fields |= WTX_TXSM;
   6942 		tucs = WTX_TCPIP_TUCSS(offset) |
   6943 		    WTX_TCPIP_TUCSO(offset +
   6944 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   6945 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6946 	} else {
   6947 		/* Just initialize it to a valid TCP context. */
   6948 		tucs = WTX_TCPIP_TUCSS(offset) |
   6949 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   6950 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6951 	}
   6952 
   6953 	/*
   6954 	 * We don't have to write context descriptor for every packet
   6955 	 * except for 82574. For 82574, we must write context descriptor
   6956 	 * for every packet when we use two descriptor queues.
   6957 	 * It would be overhead to write context descriptor for every packet,
   6958 	 * however it does not cause problems.
   6959 	 */
   6960 	/* Fill in the context descriptor. */
   6961 	t = (struct livengood_tcpip_ctxdesc *)
   6962 	    &txq->txq_descs[txq->txq_next];
   6963 	t->tcpip_ipcs = htole32(ipcs);
   6964 	t->tcpip_tucs = htole32(tucs);
   6965 	t->tcpip_cmdlen = htole32(cmdlen);
   6966 	t->tcpip_seg = htole32(seg);
   6967 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6968 
   6969 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6970 	txs->txs_ndesc++;
   6971 
   6972 	*cmdp = cmd;
   6973 	*fieldsp = fields;
   6974 
   6975 	return 0;
   6976 }
   6977 
   6978 static inline int
   6979 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   6980 {
   6981 	struct wm_softc *sc = ifp->if_softc;
   6982 	u_int cpuid = cpu_index(curcpu());
   6983 
   6984 	/*
   6985 	 * Currently, simple distribute strategy.
   6986 	 * TODO:
   6987 	 * distribute by flowid(RSS has value).
   6988 	 */
   6989         return (cpuid + ncpu - sc->sc_affinity_offset) % sc->sc_nqueues;
   6990 }
   6991 
   6992 /*
   6993  * wm_start:		[ifnet interface function]
   6994  *
   6995  *	Start packet transmission on the interface.
   6996  */
   6997 static void
   6998 wm_start(struct ifnet *ifp)
   6999 {
   7000 	struct wm_softc *sc = ifp->if_softc;
   7001 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7002 
   7003 #ifdef WM_MPSAFE
   7004 	KASSERT(if_is_mpsafe(ifp));
   7005 #endif
   7006 	/*
   7007 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7008 	 */
   7009 
   7010 	mutex_enter(txq->txq_lock);
   7011 	if (!txq->txq_stopping)
   7012 		wm_start_locked(ifp);
   7013 	mutex_exit(txq->txq_lock);
   7014 }
   7015 
   7016 static void
   7017 wm_start_locked(struct ifnet *ifp)
   7018 {
   7019 	struct wm_softc *sc = ifp->if_softc;
   7020 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7021 
   7022 	wm_send_common_locked(ifp, txq, false);
   7023 }
   7024 
   7025 static int
   7026 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7027 {
   7028 	int qid;
   7029 	struct wm_softc *sc = ifp->if_softc;
   7030 	struct wm_txqueue *txq;
   7031 
   7032 	qid = wm_select_txqueue(ifp, m);
   7033 	txq = &sc->sc_queue[qid].wmq_txq;
   7034 
   7035 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7036 		m_freem(m);
   7037 		WM_Q_EVCNT_INCR(txq, txdrop);
   7038 		return ENOBUFS;
   7039 	}
   7040 
   7041 	/*
   7042 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7043 	 */
   7044 	ifp->if_obytes += m->m_pkthdr.len;
   7045 	if (m->m_flags & M_MCAST)
   7046 		ifp->if_omcasts++;
   7047 
   7048 	if (mutex_tryenter(txq->txq_lock)) {
   7049 		if (!txq->txq_stopping)
   7050 			wm_transmit_locked(ifp, txq);
   7051 		mutex_exit(txq->txq_lock);
   7052 	}
   7053 
   7054 	return 0;
   7055 }
   7056 
   7057 static void
   7058 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7059 {
   7060 
   7061 	wm_send_common_locked(ifp, txq, true);
   7062 }
   7063 
   7064 static void
   7065 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7066     bool is_transmit)
   7067 {
   7068 	struct wm_softc *sc = ifp->if_softc;
   7069 	struct mbuf *m0;
   7070 	struct wm_txsoft *txs;
   7071 	bus_dmamap_t dmamap;
   7072 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7073 	bus_addr_t curaddr;
   7074 	bus_size_t seglen, curlen;
   7075 	uint32_t cksumcmd;
   7076 	uint8_t cksumfields;
   7077 
   7078 	KASSERT(mutex_owned(txq->txq_lock));
   7079 
   7080 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7081 		return;
   7082 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7083 		return;
   7084 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7085 		return;
   7086 
   7087 	/* Remember the previous number of free descriptors. */
   7088 	ofree = txq->txq_free;
   7089 
   7090 	/*
   7091 	 * Loop through the send queue, setting up transmit descriptors
   7092 	 * until we drain the queue, or use up all available transmit
   7093 	 * descriptors.
   7094 	 */
   7095 	for (;;) {
   7096 		m0 = NULL;
   7097 
   7098 		/* Get a work queue entry. */
   7099 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7100 			wm_txeof(sc, txq);
   7101 			if (txq->txq_sfree == 0) {
   7102 				DPRINTF(WM_DEBUG_TX,
   7103 				    ("%s: TX: no free job descriptors\n",
   7104 					device_xname(sc->sc_dev)));
   7105 				WM_Q_EVCNT_INCR(txq, txsstall);
   7106 				break;
   7107 			}
   7108 		}
   7109 
   7110 		/* Grab a packet off the queue. */
   7111 		if (is_transmit)
   7112 			m0 = pcq_get(txq->txq_interq);
   7113 		else
   7114 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7115 		if (m0 == NULL)
   7116 			break;
   7117 
   7118 		DPRINTF(WM_DEBUG_TX,
   7119 		    ("%s: TX: have packet to transmit: %p\n",
   7120 		    device_xname(sc->sc_dev), m0));
   7121 
   7122 		txs = &txq->txq_soft[txq->txq_snext];
   7123 		dmamap = txs->txs_dmamap;
   7124 
   7125 		use_tso = (m0->m_pkthdr.csum_flags &
   7126 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7127 
   7128 		/*
   7129 		 * So says the Linux driver:
   7130 		 * The controller does a simple calculation to make sure
   7131 		 * there is enough room in the FIFO before initiating the
   7132 		 * DMA for each buffer.  The calc is:
   7133 		 *	4 = ceil(buffer len / MSS)
   7134 		 * To make sure we don't overrun the FIFO, adjust the max
   7135 		 * buffer len if the MSS drops.
   7136 		 */
   7137 		dmamap->dm_maxsegsz =
   7138 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7139 		    ? m0->m_pkthdr.segsz << 2
   7140 		    : WTX_MAX_LEN;
   7141 
   7142 		/*
   7143 		 * Load the DMA map.  If this fails, the packet either
   7144 		 * didn't fit in the allotted number of segments, or we
   7145 		 * were short on resources.  For the too-many-segments
   7146 		 * case, we simply report an error and drop the packet,
   7147 		 * since we can't sanely copy a jumbo packet to a single
   7148 		 * buffer.
   7149 		 */
   7150 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7151 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7152 		if (error) {
   7153 			if (error == EFBIG) {
   7154 				WM_Q_EVCNT_INCR(txq, txdrop);
   7155 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7156 				    "DMA segments, dropping...\n",
   7157 				    device_xname(sc->sc_dev));
   7158 				wm_dump_mbuf_chain(sc, m0);
   7159 				m_freem(m0);
   7160 				continue;
   7161 			}
   7162 			/*  Short on resources, just stop for now. */
   7163 			DPRINTF(WM_DEBUG_TX,
   7164 			    ("%s: TX: dmamap load failed: %d\n",
   7165 			    device_xname(sc->sc_dev), error));
   7166 			break;
   7167 		}
   7168 
   7169 		segs_needed = dmamap->dm_nsegs;
   7170 		if (use_tso) {
   7171 			/* For sentinel descriptor; see below. */
   7172 			segs_needed++;
   7173 		}
   7174 
   7175 		/*
   7176 		 * Ensure we have enough descriptors free to describe
   7177 		 * the packet.  Note, we always reserve one descriptor
   7178 		 * at the end of the ring due to the semantics of the
   7179 		 * TDT register, plus one more in the event we need
   7180 		 * to load offload context.
   7181 		 */
   7182 		if (segs_needed > txq->txq_free - 2) {
   7183 			/*
   7184 			 * Not enough free descriptors to transmit this
   7185 			 * packet.  We haven't committed anything yet,
   7186 			 * so just unload the DMA map, put the packet
   7187 			 * pack on the queue, and punt.  Notify the upper
   7188 			 * layer that there are no more slots left.
   7189 			 */
   7190 			DPRINTF(WM_DEBUG_TX,
   7191 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7192 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7193 			    segs_needed, txq->txq_free - 1));
   7194 			if (!is_transmit)
   7195 				ifp->if_flags |= IFF_OACTIVE;
   7196 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7197 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7198 			WM_Q_EVCNT_INCR(txq, txdstall);
   7199 			break;
   7200 		}
   7201 
   7202 		/*
   7203 		 * Check for 82547 Tx FIFO bug.  We need to do this
   7204 		 * once we know we can transmit the packet, since we
   7205 		 * do some internal FIFO space accounting here.
   7206 		 */
   7207 		if (sc->sc_type == WM_T_82547 &&
   7208 		    wm_82547_txfifo_bugchk(sc, m0)) {
   7209 			DPRINTF(WM_DEBUG_TX,
   7210 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   7211 			    device_xname(sc->sc_dev)));
   7212 			if (!is_transmit)
   7213 				ifp->if_flags |= IFF_OACTIVE;
   7214 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7215 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7216 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
   7217 			break;
   7218 		}
   7219 
   7220 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7221 
   7222 		DPRINTF(WM_DEBUG_TX,
   7223 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7224 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7225 
   7226 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7227 
   7228 		/*
   7229 		 * Store a pointer to the packet so that we can free it
   7230 		 * later.
   7231 		 *
   7232 		 * Initially, we consider the number of descriptors the
   7233 		 * packet uses the number of DMA segments.  This may be
   7234 		 * incremented by 1 if we do checksum offload (a descriptor
   7235 		 * is used to set the checksum context).
   7236 		 */
   7237 		txs->txs_mbuf = m0;
   7238 		txs->txs_firstdesc = txq->txq_next;
   7239 		txs->txs_ndesc = segs_needed;
   7240 
   7241 		/* Set up offload parameters for this packet. */
   7242 		if (m0->m_pkthdr.csum_flags &
   7243 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7244 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7245 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7246 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   7247 					  &cksumfields) != 0) {
   7248 				/* Error message already displayed. */
   7249 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7250 				continue;
   7251 			}
   7252 		} else {
   7253 			cksumcmd = 0;
   7254 			cksumfields = 0;
   7255 		}
   7256 
   7257 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7258 
   7259 		/* Sync the DMA map. */
   7260 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7261 		    BUS_DMASYNC_PREWRITE);
   7262 
   7263 		/* Initialize the transmit descriptor. */
   7264 		for (nexttx = txq->txq_next, seg = 0;
   7265 		     seg < dmamap->dm_nsegs; seg++) {
   7266 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7267 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7268 			     seglen != 0;
   7269 			     curaddr += curlen, seglen -= curlen,
   7270 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7271 				curlen = seglen;
   7272 
   7273 				/*
   7274 				 * So says the Linux driver:
   7275 				 * Work around for premature descriptor
   7276 				 * write-backs in TSO mode.  Append a
   7277 				 * 4-byte sentinel descriptor.
   7278 				 */
   7279 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7280 				    curlen > 8)
   7281 					curlen -= 4;
   7282 
   7283 				wm_set_dma_addr(
   7284 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7285 				txq->txq_descs[nexttx].wtx_cmdlen
   7286 				    = htole32(cksumcmd | curlen);
   7287 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7288 				    = 0;
   7289 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7290 				    = cksumfields;
   7291 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7292 				lasttx = nexttx;
   7293 
   7294 				DPRINTF(WM_DEBUG_TX,
   7295 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7296 				     "len %#04zx\n",
   7297 				    device_xname(sc->sc_dev), nexttx,
   7298 				    (uint64_t)curaddr, curlen));
   7299 			}
   7300 		}
   7301 
   7302 		KASSERT(lasttx != -1);
   7303 
   7304 		/*
   7305 		 * Set up the command byte on the last descriptor of
   7306 		 * the packet.  If we're in the interrupt delay window,
   7307 		 * delay the interrupt.
   7308 		 */
   7309 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7310 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7311 
   7312 		/*
   7313 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7314 		 * up the descriptor to encapsulate the packet for us.
   7315 		 *
   7316 		 * This is only valid on the last descriptor of the packet.
   7317 		 */
   7318 		if (vlan_has_tag(m0)) {
   7319 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7320 			    htole32(WTX_CMD_VLE);
   7321 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7322 			    = htole16(vlan_get_tag(m0));
   7323 		}
   7324 
   7325 		txs->txs_lastdesc = lasttx;
   7326 
   7327 		DPRINTF(WM_DEBUG_TX,
   7328 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7329 		    device_xname(sc->sc_dev),
   7330 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7331 
   7332 		/* Sync the descriptors we're using. */
   7333 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7334 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7335 
   7336 		/* Give the packet to the chip. */
   7337 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7338 
   7339 		DPRINTF(WM_DEBUG_TX,
   7340 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7341 
   7342 		DPRINTF(WM_DEBUG_TX,
   7343 		    ("%s: TX: finished transmitting packet, job %d\n",
   7344 		    device_xname(sc->sc_dev), txq->txq_snext));
   7345 
   7346 		/* Advance the tx pointer. */
   7347 		txq->txq_free -= txs->txs_ndesc;
   7348 		txq->txq_next = nexttx;
   7349 
   7350 		txq->txq_sfree--;
   7351 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7352 
   7353 		/* Pass the packet to any BPF listeners. */
   7354 		bpf_mtap(ifp, m0);
   7355 	}
   7356 
   7357 	if (m0 != NULL) {
   7358 		if (!is_transmit)
   7359 			ifp->if_flags |= IFF_OACTIVE;
   7360 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7361 		WM_Q_EVCNT_INCR(txq, txdrop);
   7362 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7363 			__func__));
   7364 		m_freem(m0);
   7365 	}
   7366 
   7367 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7368 		/* No more slots; notify upper layer. */
   7369 		if (!is_transmit)
   7370 			ifp->if_flags |= IFF_OACTIVE;
   7371 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7372 	}
   7373 
   7374 	if (txq->txq_free != ofree) {
   7375 		/* Set a watchdog timer in case the chip flakes out. */
   7376 		ifp->if_timer = 5;
   7377 	}
   7378 }
   7379 
   7380 /*
   7381  * wm_nq_tx_offload:
   7382  *
   7383  *	Set up TCP/IP checksumming parameters for the
   7384  *	specified packet, for NEWQUEUE devices
   7385  */
   7386 static int
   7387 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7388     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7389 {
   7390 	struct mbuf *m0 = txs->txs_mbuf;
   7391 	uint32_t vl_len, mssidx, cmdc;
   7392 	struct ether_header *eh;
   7393 	int offset, iphl;
   7394 
   7395 	/*
   7396 	 * XXX It would be nice if the mbuf pkthdr had offset
   7397 	 * fields for the protocol headers.
   7398 	 */
   7399 	*cmdlenp = 0;
   7400 	*fieldsp = 0;
   7401 
   7402 	eh = mtod(m0, struct ether_header *);
   7403 	switch (htons(eh->ether_type)) {
   7404 	case ETHERTYPE_IP:
   7405 	case ETHERTYPE_IPV6:
   7406 		offset = ETHER_HDR_LEN;
   7407 		break;
   7408 
   7409 	case ETHERTYPE_VLAN:
   7410 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7411 		break;
   7412 
   7413 	default:
   7414 		/* Don't support this protocol or encapsulation. */
   7415 		*do_csum = false;
   7416 		return 0;
   7417 	}
   7418 	*do_csum = true;
   7419 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7420 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7421 
   7422 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7423 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7424 
   7425 	if ((m0->m_pkthdr.csum_flags &
   7426 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7427 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7428 	} else {
   7429 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   7430 	}
   7431 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7432 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7433 
   7434 	if (vlan_has_tag(m0)) {
   7435 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   7436 		     << NQTXC_VLLEN_VLAN_SHIFT);
   7437 		*cmdlenp |= NQTX_CMD_VLE;
   7438 	}
   7439 
   7440 	mssidx = 0;
   7441 
   7442 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7443 		int hlen = offset + iphl;
   7444 		int tcp_hlen;
   7445 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7446 
   7447 		if (__predict_false(m0->m_len <
   7448 				    (hlen + sizeof(struct tcphdr)))) {
   7449 			/*
   7450 			 * TCP/IP headers are not in the first mbuf; we need
   7451 			 * to do this the slow and painful way.  Let's just
   7452 			 * hope this doesn't happen very often.
   7453 			 */
   7454 			struct tcphdr th;
   7455 
   7456 			WM_Q_EVCNT_INCR(txq, txtsopain);
   7457 
   7458 			m_copydata(m0, hlen, sizeof(th), &th);
   7459 			if (v4) {
   7460 				struct ip ip;
   7461 
   7462 				m_copydata(m0, offset, sizeof(ip), &ip);
   7463 				ip.ip_len = 0;
   7464 				m_copyback(m0,
   7465 				    offset + offsetof(struct ip, ip_len),
   7466 				    sizeof(ip.ip_len), &ip.ip_len);
   7467 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7468 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7469 			} else {
   7470 				struct ip6_hdr ip6;
   7471 
   7472 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7473 				ip6.ip6_plen = 0;
   7474 				m_copyback(m0,
   7475 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7476 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7477 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7478 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7479 			}
   7480 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7481 			    sizeof(th.th_sum), &th.th_sum);
   7482 
   7483 			tcp_hlen = th.th_off << 2;
   7484 		} else {
   7485 			/*
   7486 			 * TCP/IP headers are in the first mbuf; we can do
   7487 			 * this the easy way.
   7488 			 */
   7489 			struct tcphdr *th;
   7490 
   7491 			if (v4) {
   7492 				struct ip *ip =
   7493 				    (void *)(mtod(m0, char *) + offset);
   7494 				th = (void *)(mtod(m0, char *) + hlen);
   7495 
   7496 				ip->ip_len = 0;
   7497 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7498 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7499 			} else {
   7500 				struct ip6_hdr *ip6 =
   7501 				    (void *)(mtod(m0, char *) + offset);
   7502 				th = (void *)(mtod(m0, char *) + hlen);
   7503 
   7504 				ip6->ip6_plen = 0;
   7505 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7506 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7507 			}
   7508 			tcp_hlen = th->th_off << 2;
   7509 		}
   7510 		hlen += tcp_hlen;
   7511 		*cmdlenp |= NQTX_CMD_TSE;
   7512 
   7513 		if (v4) {
   7514 			WM_Q_EVCNT_INCR(txq, txtso);
   7515 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7516 		} else {
   7517 			WM_Q_EVCNT_INCR(txq, txtso6);
   7518 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7519 		}
   7520 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7521 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7522 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7523 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7524 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7525 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7526 	} else {
   7527 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7528 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7529 	}
   7530 
   7531 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7532 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7533 		cmdc |= NQTXC_CMD_IP4;
   7534 	}
   7535 
   7536 	if (m0->m_pkthdr.csum_flags &
   7537 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7538 		WM_Q_EVCNT_INCR(txq, txtusum);
   7539 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7540 			cmdc |= NQTXC_CMD_TCP;
   7541 		} else {
   7542 			cmdc |= NQTXC_CMD_UDP;
   7543 		}
   7544 		cmdc |= NQTXC_CMD_IP4;
   7545 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7546 	}
   7547 	if (m0->m_pkthdr.csum_flags &
   7548 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7549 		WM_Q_EVCNT_INCR(txq, txtusum6);
   7550 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7551 			cmdc |= NQTXC_CMD_TCP;
   7552 		} else {
   7553 			cmdc |= NQTXC_CMD_UDP;
   7554 		}
   7555 		cmdc |= NQTXC_CMD_IP6;
   7556 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7557 	}
   7558 
   7559 	/*
   7560 	 * We don't have to write context descriptor for every packet to
   7561 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   7562 	 * I210 and I211. It is enough to write once per a Tx queue for these
   7563 	 * controllers.
   7564 	 * It would be overhead to write context descriptor for every packet,
   7565 	 * however it does not cause problems.
   7566 	 */
   7567 	/* Fill in the context descriptor. */
   7568 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   7569 	    htole32(vl_len);
   7570 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   7571 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   7572 	    htole32(cmdc);
   7573 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   7574 	    htole32(mssidx);
   7575 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7576 	DPRINTF(WM_DEBUG_TX,
   7577 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   7578 	    txq->txq_next, 0, vl_len));
   7579 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   7580 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7581 	txs->txs_ndesc++;
   7582 	return 0;
   7583 }
   7584 
   7585 /*
   7586  * wm_nq_start:		[ifnet interface function]
   7587  *
   7588  *	Start packet transmission on the interface for NEWQUEUE devices
   7589  */
   7590 static void
   7591 wm_nq_start(struct ifnet *ifp)
   7592 {
   7593 	struct wm_softc *sc = ifp->if_softc;
   7594 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7595 
   7596 #ifdef WM_MPSAFE
   7597 	KASSERT(if_is_mpsafe(ifp));
   7598 #endif
   7599 	/*
   7600 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7601 	 */
   7602 
   7603 	mutex_enter(txq->txq_lock);
   7604 	if (!txq->txq_stopping)
   7605 		wm_nq_start_locked(ifp);
   7606 	mutex_exit(txq->txq_lock);
   7607 }
   7608 
   7609 static void
   7610 wm_nq_start_locked(struct ifnet *ifp)
   7611 {
   7612 	struct wm_softc *sc = ifp->if_softc;
   7613 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7614 
   7615 	wm_nq_send_common_locked(ifp, txq, false);
   7616 }
   7617 
   7618 static int
   7619 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   7620 {
   7621 	int qid;
   7622 	struct wm_softc *sc = ifp->if_softc;
   7623 	struct wm_txqueue *txq;
   7624 
   7625 	qid = wm_select_txqueue(ifp, m);
   7626 	txq = &sc->sc_queue[qid].wmq_txq;
   7627 
   7628 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7629 		m_freem(m);
   7630 		WM_Q_EVCNT_INCR(txq, txdrop);
   7631 		return ENOBUFS;
   7632 	}
   7633 
   7634 	/*
   7635 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7636 	 */
   7637 	ifp->if_obytes += m->m_pkthdr.len;
   7638 	if (m->m_flags & M_MCAST)
   7639 		ifp->if_omcasts++;
   7640 
   7641 	/*
   7642 	 * The situations which this mutex_tryenter() fails at running time
   7643 	 * are below two patterns.
   7644 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   7645 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   7646 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   7647 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   7648 	 * In the case of (2), the last packet enqueued to txq->txq_interq is also
   7649 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck, either.
   7650 	 */
   7651 	if (mutex_tryenter(txq->txq_lock)) {
   7652 		if (!txq->txq_stopping)
   7653 			wm_nq_transmit_locked(ifp, txq);
   7654 		mutex_exit(txq->txq_lock);
   7655 	}
   7656 
   7657 	return 0;
   7658 }
   7659 
   7660 static void
   7661 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7662 {
   7663 
   7664 	wm_nq_send_common_locked(ifp, txq, true);
   7665 }
   7666 
   7667 static void
   7668 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7669     bool is_transmit)
   7670 {
   7671 	struct wm_softc *sc = ifp->if_softc;
   7672 	struct mbuf *m0;
   7673 	struct wm_txsoft *txs;
   7674 	bus_dmamap_t dmamap;
   7675 	int error, nexttx, lasttx = -1, seg, segs_needed;
   7676 	bool do_csum, sent;
   7677 
   7678 	KASSERT(mutex_owned(txq->txq_lock));
   7679 
   7680 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7681 		return;
   7682 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7683 		return;
   7684 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7685 		return;
   7686 
   7687 	sent = false;
   7688 
   7689 	/*
   7690 	 * Loop through the send queue, setting up transmit descriptors
   7691 	 * until we drain the queue, or use up all available transmit
   7692 	 * descriptors.
   7693 	 */
   7694 	for (;;) {
   7695 		m0 = NULL;
   7696 
   7697 		/* Get a work queue entry. */
   7698 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7699 			wm_txeof(sc, txq);
   7700 			if (txq->txq_sfree == 0) {
   7701 				DPRINTF(WM_DEBUG_TX,
   7702 				    ("%s: TX: no free job descriptors\n",
   7703 					device_xname(sc->sc_dev)));
   7704 				WM_Q_EVCNT_INCR(txq, txsstall);
   7705 				break;
   7706 			}
   7707 		}
   7708 
   7709 		/* Grab a packet off the queue. */
   7710 		if (is_transmit)
   7711 			m0 = pcq_get(txq->txq_interq);
   7712 		else
   7713 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7714 		if (m0 == NULL)
   7715 			break;
   7716 
   7717 		DPRINTF(WM_DEBUG_TX,
   7718 		    ("%s: TX: have packet to transmit: %p\n",
   7719 		    device_xname(sc->sc_dev), m0));
   7720 
   7721 		txs = &txq->txq_soft[txq->txq_snext];
   7722 		dmamap = txs->txs_dmamap;
   7723 
   7724 		/*
   7725 		 * Load the DMA map.  If this fails, the packet either
   7726 		 * didn't fit in the allotted number of segments, or we
   7727 		 * were short on resources.  For the too-many-segments
   7728 		 * case, we simply report an error and drop the packet,
   7729 		 * since we can't sanely copy a jumbo packet to a single
   7730 		 * buffer.
   7731 		 */
   7732 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7733 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7734 		if (error) {
   7735 			if (error == EFBIG) {
   7736 				WM_Q_EVCNT_INCR(txq, txdrop);
   7737 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7738 				    "DMA segments, dropping...\n",
   7739 				    device_xname(sc->sc_dev));
   7740 				wm_dump_mbuf_chain(sc, m0);
   7741 				m_freem(m0);
   7742 				continue;
   7743 			}
   7744 			/* Short on resources, just stop for now. */
   7745 			DPRINTF(WM_DEBUG_TX,
   7746 			    ("%s: TX: dmamap load failed: %d\n",
   7747 			    device_xname(sc->sc_dev), error));
   7748 			break;
   7749 		}
   7750 
   7751 		segs_needed = dmamap->dm_nsegs;
   7752 
   7753 		/*
   7754 		 * Ensure we have enough descriptors free to describe
   7755 		 * the packet.  Note, we always reserve one descriptor
   7756 		 * at the end of the ring due to the semantics of the
   7757 		 * TDT register, plus one more in the event we need
   7758 		 * to load offload context.
   7759 		 */
   7760 		if (segs_needed > txq->txq_free - 2) {
   7761 			/*
   7762 			 * Not enough free descriptors to transmit this
   7763 			 * packet.  We haven't committed anything yet,
   7764 			 * so just unload the DMA map, put the packet
   7765 			 * pack on the queue, and punt.  Notify the upper
   7766 			 * layer that there are no more slots left.
   7767 			 */
   7768 			DPRINTF(WM_DEBUG_TX,
   7769 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7770 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7771 			    segs_needed, txq->txq_free - 1));
   7772 			if (!is_transmit)
   7773 				ifp->if_flags |= IFF_OACTIVE;
   7774 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7775 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7776 			WM_Q_EVCNT_INCR(txq, txdstall);
   7777 			break;
   7778 		}
   7779 
   7780 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7781 
   7782 		DPRINTF(WM_DEBUG_TX,
   7783 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7784 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7785 
   7786 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7787 
   7788 		/*
   7789 		 * Store a pointer to the packet so that we can free it
   7790 		 * later.
   7791 		 *
   7792 		 * Initially, we consider the number of descriptors the
   7793 		 * packet uses the number of DMA segments.  This may be
   7794 		 * incremented by 1 if we do checksum offload (a descriptor
   7795 		 * is used to set the checksum context).
   7796 		 */
   7797 		txs->txs_mbuf = m0;
   7798 		txs->txs_firstdesc = txq->txq_next;
   7799 		txs->txs_ndesc = segs_needed;
   7800 
   7801 		/* Set up offload parameters for this packet. */
   7802 		uint32_t cmdlen, fields, dcmdlen;
   7803 		if (m0->m_pkthdr.csum_flags &
   7804 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7805 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7806 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7807 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   7808 			    &do_csum) != 0) {
   7809 				/* Error message already displayed. */
   7810 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7811 				continue;
   7812 			}
   7813 		} else {
   7814 			do_csum = false;
   7815 			cmdlen = 0;
   7816 			fields = 0;
   7817 		}
   7818 
   7819 		/* Sync the DMA map. */
   7820 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7821 		    BUS_DMASYNC_PREWRITE);
   7822 
   7823 		/* Initialize the first transmit descriptor. */
   7824 		nexttx = txq->txq_next;
   7825 		if (!do_csum) {
   7826 			/* setup a legacy descriptor */
   7827 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   7828 			    dmamap->dm_segs[0].ds_addr);
   7829 			txq->txq_descs[nexttx].wtx_cmdlen =
   7830 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   7831 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   7832 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   7833 			if (vlan_has_tag(m0)) {
   7834 				txq->txq_descs[nexttx].wtx_cmdlen |=
   7835 				    htole32(WTX_CMD_VLE);
   7836 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   7837 				    htole16(vlan_get_tag(m0));
   7838 			} else {
   7839 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7840 			}
   7841 			dcmdlen = 0;
   7842 		} else {
   7843 			/* setup an advanced data descriptor */
   7844 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7845 			    htole64(dmamap->dm_segs[0].ds_addr);
   7846 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   7847 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7848 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   7849 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   7850 			    htole32(fields);
   7851 			DPRINTF(WM_DEBUG_TX,
   7852 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   7853 			    device_xname(sc->sc_dev), nexttx,
   7854 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   7855 			DPRINTF(WM_DEBUG_TX,
   7856 			    ("\t 0x%08x%08x\n", fields,
   7857 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   7858 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   7859 		}
   7860 
   7861 		lasttx = nexttx;
   7862 		nexttx = WM_NEXTTX(txq, nexttx);
   7863 		/*
   7864 		 * fill in the next descriptors. legacy or adcanced format
   7865 		 * is the same here
   7866 		 */
   7867 		for (seg = 1; seg < dmamap->dm_nsegs;
   7868 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   7869 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7870 			    htole64(dmamap->dm_segs[seg].ds_addr);
   7871 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7872 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   7873 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   7874 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   7875 			lasttx = nexttx;
   7876 
   7877 			DPRINTF(WM_DEBUG_TX,
   7878 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   7879 			     "len %#04zx\n",
   7880 			    device_xname(sc->sc_dev), nexttx,
   7881 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   7882 			    dmamap->dm_segs[seg].ds_len));
   7883 		}
   7884 
   7885 		KASSERT(lasttx != -1);
   7886 
   7887 		/*
   7888 		 * Set up the command byte on the last descriptor of
   7889 		 * the packet.  If we're in the interrupt delay window,
   7890 		 * delay the interrupt.
   7891 		 */
   7892 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   7893 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   7894 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7895 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7896 
   7897 		txs->txs_lastdesc = lasttx;
   7898 
   7899 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7900 		    device_xname(sc->sc_dev),
   7901 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7902 
   7903 		/* Sync the descriptors we're using. */
   7904 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7905 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7906 
   7907 		/* Give the packet to the chip. */
   7908 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7909 		sent = true;
   7910 
   7911 		DPRINTF(WM_DEBUG_TX,
   7912 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7913 
   7914 		DPRINTF(WM_DEBUG_TX,
   7915 		    ("%s: TX: finished transmitting packet, job %d\n",
   7916 		    device_xname(sc->sc_dev), txq->txq_snext));
   7917 
   7918 		/* Advance the tx pointer. */
   7919 		txq->txq_free -= txs->txs_ndesc;
   7920 		txq->txq_next = nexttx;
   7921 
   7922 		txq->txq_sfree--;
   7923 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7924 
   7925 		/* Pass the packet to any BPF listeners. */
   7926 		bpf_mtap(ifp, m0);
   7927 	}
   7928 
   7929 	if (m0 != NULL) {
   7930 		if (!is_transmit)
   7931 			ifp->if_flags |= IFF_OACTIVE;
   7932 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7933 		WM_Q_EVCNT_INCR(txq, txdrop);
   7934 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7935 			__func__));
   7936 		m_freem(m0);
   7937 	}
   7938 
   7939 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7940 		/* No more slots; notify upper layer. */
   7941 		if (!is_transmit)
   7942 			ifp->if_flags |= IFF_OACTIVE;
   7943 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7944 	}
   7945 
   7946 	if (sent) {
   7947 		/* Set a watchdog timer in case the chip flakes out. */
   7948 		ifp->if_timer = 5;
   7949 	}
   7950 }
   7951 
   7952 static void
   7953 wm_deferred_start_locked(struct wm_txqueue *txq)
   7954 {
   7955 	struct wm_softc *sc = txq->txq_sc;
   7956 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7957 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   7958 	int qid = wmq->wmq_id;
   7959 
   7960 	KASSERT(mutex_owned(txq->txq_lock));
   7961 
   7962 	if (txq->txq_stopping) {
   7963 		mutex_exit(txq->txq_lock);
   7964 		return;
   7965 	}
   7966 
   7967 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7968 		/* XXX need for ALTQ or one CPU system */
   7969 		if (qid == 0)
   7970 			wm_nq_start_locked(ifp);
   7971 		wm_nq_transmit_locked(ifp, txq);
   7972 	} else {
   7973 		/* XXX need for ALTQ or one CPU system */
   7974 		if (qid == 0)
   7975 			wm_start_locked(ifp);
   7976 		wm_transmit_locked(ifp, txq);
   7977 	}
   7978 }
   7979 
   7980 /* Interrupt */
   7981 
   7982 /*
   7983  * wm_txeof:
   7984  *
   7985  *	Helper; handle transmit interrupts.
   7986  */
   7987 static int
   7988 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
   7989 {
   7990 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7991 	struct wm_txsoft *txs;
   7992 	bool processed = false;
   7993 	int count = 0;
   7994 	int i;
   7995 	uint8_t status;
   7996 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   7997 
   7998 	KASSERT(mutex_owned(txq->txq_lock));
   7999 
   8000 	if (txq->txq_stopping)
   8001 		return 0;
   8002 
   8003 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   8004 	/* for ALTQ and legacy(not use multiqueue) ethernet controller */
   8005 	if (wmq->wmq_id == 0)
   8006 		ifp->if_flags &= ~IFF_OACTIVE;
   8007 
   8008 	/*
   8009 	 * Go through the Tx list and free mbufs for those
   8010 	 * frames which have been transmitted.
   8011 	 */
   8012 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8013 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8014 		txs = &txq->txq_soft[i];
   8015 
   8016 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8017 			device_xname(sc->sc_dev), i));
   8018 
   8019 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8020 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8021 
   8022 		status =
   8023 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8024 		if ((status & WTX_ST_DD) == 0) {
   8025 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8026 			    BUS_DMASYNC_PREREAD);
   8027 			break;
   8028 		}
   8029 
   8030 		processed = true;
   8031 		count++;
   8032 		DPRINTF(WM_DEBUG_TX,
   8033 		    ("%s: TX: job %d done: descs %d..%d\n",
   8034 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8035 		    txs->txs_lastdesc));
   8036 
   8037 		/*
   8038 		 * XXX We should probably be using the statistics
   8039 		 * XXX registers, but I don't know if they exist
   8040 		 * XXX on chips before the i82544.
   8041 		 */
   8042 
   8043 #ifdef WM_EVENT_COUNTERS
   8044 		if (status & WTX_ST_TU)
   8045 			WM_Q_EVCNT_INCR(txq, tu);
   8046 #endif /* WM_EVENT_COUNTERS */
   8047 
   8048 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   8049 			ifp->if_oerrors++;
   8050 			if (status & WTX_ST_LC)
   8051 				log(LOG_WARNING, "%s: late collision\n",
   8052 				    device_xname(sc->sc_dev));
   8053 			else if (status & WTX_ST_EC) {
   8054 				ifp->if_collisions += 16;
   8055 				log(LOG_WARNING, "%s: excessive collisions\n",
   8056 				    device_xname(sc->sc_dev));
   8057 			}
   8058 		} else
   8059 			ifp->if_opackets++;
   8060 
   8061 		txq->txq_packets++;
   8062 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8063 
   8064 		txq->txq_free += txs->txs_ndesc;
   8065 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8066 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8067 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8068 		m_freem(txs->txs_mbuf);
   8069 		txs->txs_mbuf = NULL;
   8070 	}
   8071 
   8072 	/* Update the dirty transmit buffer pointer. */
   8073 	txq->txq_sdirty = i;
   8074 	DPRINTF(WM_DEBUG_TX,
   8075 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8076 
   8077 	if (count != 0)
   8078 		rnd_add_uint32(&sc->rnd_source, count);
   8079 
   8080 	/*
   8081 	 * If there are no more pending transmissions, cancel the watchdog
   8082 	 * timer.
   8083 	 */
   8084 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8085 		ifp->if_timer = 0;
   8086 
   8087 	return processed;
   8088 }
   8089 
   8090 static inline uint32_t
   8091 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8092 {
   8093 	struct wm_softc *sc = rxq->rxq_sc;
   8094 
   8095 	if (sc->sc_type == WM_T_82574)
   8096 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8097 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8098 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8099 	else
   8100 		return rxq->rxq_descs[idx].wrx_status;
   8101 }
   8102 
   8103 static inline uint32_t
   8104 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8105 {
   8106 	struct wm_softc *sc = rxq->rxq_sc;
   8107 
   8108 	if (sc->sc_type == WM_T_82574)
   8109 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8110 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8111 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8112 	else
   8113 		return rxq->rxq_descs[idx].wrx_errors;
   8114 }
   8115 
   8116 static inline uint16_t
   8117 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8118 {
   8119 	struct wm_softc *sc = rxq->rxq_sc;
   8120 
   8121 	if (sc->sc_type == WM_T_82574)
   8122 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8123 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8124 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8125 	else
   8126 		return rxq->rxq_descs[idx].wrx_special;
   8127 }
   8128 
   8129 static inline int
   8130 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8131 {
   8132 	struct wm_softc *sc = rxq->rxq_sc;
   8133 
   8134 	if (sc->sc_type == WM_T_82574)
   8135 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8136 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8137 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   8138 	else
   8139 		return rxq->rxq_descs[idx].wrx_len;
   8140 }
   8141 
   8142 #ifdef WM_DEBUG
   8143 static inline uint32_t
   8144 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   8145 {
   8146 	struct wm_softc *sc = rxq->rxq_sc;
   8147 
   8148 	if (sc->sc_type == WM_T_82574)
   8149 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   8150 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8151 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   8152 	else
   8153 		return 0;
   8154 }
   8155 
   8156 static inline uint8_t
   8157 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   8158 {
   8159 	struct wm_softc *sc = rxq->rxq_sc;
   8160 
   8161 	if (sc->sc_type == WM_T_82574)
   8162 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   8163 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8164 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   8165 	else
   8166 		return 0;
   8167 }
   8168 #endif /* WM_DEBUG */
   8169 
   8170 static inline bool
   8171 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   8172     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8173 {
   8174 
   8175 	if (sc->sc_type == WM_T_82574)
   8176 		return (status & ext_bit) != 0;
   8177 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8178 		return (status & nq_bit) != 0;
   8179 	else
   8180 		return (status & legacy_bit) != 0;
   8181 }
   8182 
   8183 static inline bool
   8184 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   8185     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8186 {
   8187 
   8188 	if (sc->sc_type == WM_T_82574)
   8189 		return (error & ext_bit) != 0;
   8190 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8191 		return (error & nq_bit) != 0;
   8192 	else
   8193 		return (error & legacy_bit) != 0;
   8194 }
   8195 
   8196 static inline bool
   8197 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   8198 {
   8199 
   8200 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8201 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   8202 		return true;
   8203 	else
   8204 		return false;
   8205 }
   8206 
   8207 static inline bool
   8208 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   8209 {
   8210 	struct wm_softc *sc = rxq->rxq_sc;
   8211 
   8212 	/* XXXX missing error bit for newqueue? */
   8213 	if (wm_rxdesc_is_set_error(sc, errors,
   8214 		WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE,
   8215 		EXTRXC_ERROR_CE|EXTRXC_ERROR_SE|EXTRXC_ERROR_SEQ|EXTRXC_ERROR_CXE|EXTRXC_ERROR_RXE,
   8216 		NQRXC_ERROR_RXE)) {
   8217 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE, EXTRXC_ERROR_SE, 0))
   8218 			log(LOG_WARNING, "%s: symbol error\n",
   8219 			    device_xname(sc->sc_dev));
   8220 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ, EXTRXC_ERROR_SEQ, 0))
   8221 			log(LOG_WARNING, "%s: receive sequence error\n",
   8222 			    device_xname(sc->sc_dev));
   8223 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE, EXTRXC_ERROR_CE, 0))
   8224 			log(LOG_WARNING, "%s: CRC error\n",
   8225 			    device_xname(sc->sc_dev));
   8226 		return true;
   8227 	}
   8228 
   8229 	return false;
   8230 }
   8231 
   8232 static inline bool
   8233 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8234 {
   8235 	struct wm_softc *sc = rxq->rxq_sc;
   8236 
   8237 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8238 		NQRXC_STATUS_DD)) {
   8239 		/* We have processed all of the receive descriptors. */
   8240 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8241 		return false;
   8242 	}
   8243 
   8244 	return true;
   8245 }
   8246 
   8247 static inline bool
   8248 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status, uint16_t vlantag,
   8249     struct mbuf *m)
   8250 {
   8251 
   8252 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8253 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8254 		vlan_set_tag(m, le16toh(vlantag));
   8255 	}
   8256 
   8257 	return true;
   8258 }
   8259 
   8260 static inline void
   8261 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8262     uint32_t errors, struct mbuf *m)
   8263 {
   8264 	struct wm_softc *sc = rxq->rxq_sc;
   8265 
   8266 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8267 		if (wm_rxdesc_is_set_status(sc, status,
   8268 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8269 			WM_Q_EVCNT_INCR(rxq, rxipsum);
   8270 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8271 			if (wm_rxdesc_is_set_error(sc, errors,
   8272 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8273 				m->m_pkthdr.csum_flags |=
   8274 					M_CSUM_IPv4_BAD;
   8275 		}
   8276 		if (wm_rxdesc_is_set_status(sc, status,
   8277 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8278 			/*
   8279 			 * Note: we don't know if this was TCP or UDP,
   8280 			 * so we just set both bits, and expect the
   8281 			 * upper layers to deal.
   8282 			 */
   8283 			WM_Q_EVCNT_INCR(rxq, rxtusum);
   8284 			m->m_pkthdr.csum_flags |=
   8285 				M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8286 				M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8287 			if (wm_rxdesc_is_set_error(sc, errors,
   8288 				WRX_ER_TCPE, EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8289 				m->m_pkthdr.csum_flags |=
   8290 					M_CSUM_TCP_UDP_BAD;
   8291 		}
   8292 	}
   8293 }
   8294 
   8295 /*
   8296  * wm_rxeof:
   8297  *
   8298  *	Helper; handle receive interrupts.
   8299  */
   8300 static void
   8301 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8302 {
   8303 	struct wm_softc *sc = rxq->rxq_sc;
   8304 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8305 	struct wm_rxsoft *rxs;
   8306 	struct mbuf *m;
   8307 	int i, len;
   8308 	int count = 0;
   8309 	uint32_t status, errors;
   8310 	uint16_t vlantag;
   8311 
   8312 	KASSERT(mutex_owned(rxq->rxq_lock));
   8313 
   8314 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8315 		if (limit-- == 0) {
   8316 			rxq->rxq_ptr = i;
   8317 			break;
   8318 		}
   8319 
   8320 		rxs = &rxq->rxq_soft[i];
   8321 
   8322 		DPRINTF(WM_DEBUG_RX,
   8323 		    ("%s: RX: checking descriptor %d\n",
   8324 		    device_xname(sc->sc_dev), i));
   8325 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   8326 
   8327 		status = wm_rxdesc_get_status(rxq, i);
   8328 		errors = wm_rxdesc_get_errors(rxq, i);
   8329 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8330 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8331 #ifdef WM_DEBUG
   8332 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8333 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8334 #endif
   8335 
   8336 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8337 			/*
   8338 			 * Update the receive pointer holding rxq_lock
   8339 			 * consistent with increment counter.
   8340 			 */
   8341 			rxq->rxq_ptr = i;
   8342 			break;
   8343 		}
   8344 
   8345 		count++;
   8346 		if (__predict_false(rxq->rxq_discard)) {
   8347 			DPRINTF(WM_DEBUG_RX,
   8348 			    ("%s: RX: discarding contents of descriptor %d\n",
   8349 			    device_xname(sc->sc_dev), i));
   8350 			wm_init_rxdesc(rxq, i);
   8351 			if (wm_rxdesc_is_eop(rxq, status)) {
   8352 				/* Reset our state. */
   8353 				DPRINTF(WM_DEBUG_RX,
   8354 				    ("%s: RX: resetting rxdiscard -> 0\n",
   8355 				    device_xname(sc->sc_dev)));
   8356 				rxq->rxq_discard = 0;
   8357 			}
   8358 			continue;
   8359 		}
   8360 
   8361 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8362 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   8363 
   8364 		m = rxs->rxs_mbuf;
   8365 
   8366 		/*
   8367 		 * Add a new receive buffer to the ring, unless of
   8368 		 * course the length is zero. Treat the latter as a
   8369 		 * failed mapping.
   8370 		 */
   8371 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   8372 			/*
   8373 			 * Failed, throw away what we've done so
   8374 			 * far, and discard the rest of the packet.
   8375 			 */
   8376 			ifp->if_ierrors++;
   8377 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8378 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   8379 			wm_init_rxdesc(rxq, i);
   8380 			if (!wm_rxdesc_is_eop(rxq, status))
   8381 				rxq->rxq_discard = 1;
   8382 			if (rxq->rxq_head != NULL)
   8383 				m_freem(rxq->rxq_head);
   8384 			WM_RXCHAIN_RESET(rxq);
   8385 			DPRINTF(WM_DEBUG_RX,
   8386 			    ("%s: RX: Rx buffer allocation failed, "
   8387 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8388 			    rxq->rxq_discard ? " (discard)" : ""));
   8389 			continue;
   8390 		}
   8391 
   8392 		m->m_len = len;
   8393 		rxq->rxq_len += len;
   8394 		DPRINTF(WM_DEBUG_RX,
   8395 		    ("%s: RX: buffer at %p len %d\n",
   8396 		    device_xname(sc->sc_dev), m->m_data, len));
   8397 
   8398 		/* If this is not the end of the packet, keep looking. */
   8399 		if (!wm_rxdesc_is_eop(rxq, status)) {
   8400 			WM_RXCHAIN_LINK(rxq, m);
   8401 			DPRINTF(WM_DEBUG_RX,
   8402 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   8403 			    device_xname(sc->sc_dev), rxq->rxq_len));
   8404 			continue;
   8405 		}
   8406 
   8407 		/*
   8408 		 * Okay, we have the entire packet now.  The chip is
   8409 		 * configured to include the FCS except I350 and I21[01]
   8410 		 * (not all chips can be configured to strip it),
   8411 		 * so we need to trim it.
   8412 		 * May need to adjust length of previous mbuf in the
   8413 		 * chain if the current mbuf is too short.
   8414 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   8415 		 * is always set in I350, so we don't trim it.
   8416 		 */
   8417 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   8418 		    && (sc->sc_type != WM_T_I210)
   8419 		    && (sc->sc_type != WM_T_I211)) {
   8420 			if (m->m_len < ETHER_CRC_LEN) {
   8421 				rxq->rxq_tail->m_len
   8422 				    -= (ETHER_CRC_LEN - m->m_len);
   8423 				m->m_len = 0;
   8424 			} else
   8425 				m->m_len -= ETHER_CRC_LEN;
   8426 			len = rxq->rxq_len - ETHER_CRC_LEN;
   8427 		} else
   8428 			len = rxq->rxq_len;
   8429 
   8430 		WM_RXCHAIN_LINK(rxq, m);
   8431 
   8432 		*rxq->rxq_tailp = NULL;
   8433 		m = rxq->rxq_head;
   8434 
   8435 		WM_RXCHAIN_RESET(rxq);
   8436 
   8437 		DPRINTF(WM_DEBUG_RX,
   8438 		    ("%s: RX: have entire packet, len -> %d\n",
   8439 		    device_xname(sc->sc_dev), len));
   8440 
   8441 		/* If an error occurred, update stats and drop the packet. */
   8442 		if (wm_rxdesc_has_errors(rxq, errors)) {
   8443 			m_freem(m);
   8444 			continue;
   8445 		}
   8446 
   8447 		/* No errors.  Receive the packet. */
   8448 		m_set_rcvif(m, ifp);
   8449 		m->m_pkthdr.len = len;
   8450 		/*
   8451 		 * TODO
   8452 		 * should be save rsshash and rsstype to this mbuf.
   8453 		 */
   8454 		DPRINTF(WM_DEBUG_RX,
   8455 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   8456 			device_xname(sc->sc_dev), rsstype, rsshash));
   8457 
   8458 		/*
   8459 		 * If VLANs are enabled, VLAN packets have been unwrapped
   8460 		 * for us.  Associate the tag with the packet.
   8461 		 */
   8462 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   8463 			continue;
   8464 
   8465 		/* Set up checksum info for this packet. */
   8466 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   8467 		/*
   8468 		 * Update the receive pointer holding rxq_lock consistent with
   8469 		 * increment counter.
   8470 		 */
   8471 		rxq->rxq_ptr = i;
   8472 		rxq->rxq_packets++;
   8473 		rxq->rxq_bytes += len;
   8474 		mutex_exit(rxq->rxq_lock);
   8475 
   8476 		/* Pass it on. */
   8477 		if_percpuq_enqueue(sc->sc_ipq, m);
   8478 
   8479 		mutex_enter(rxq->rxq_lock);
   8480 
   8481 		if (rxq->rxq_stopping)
   8482 			break;
   8483 	}
   8484 
   8485 	if (count != 0)
   8486 		rnd_add_uint32(&sc->rnd_source, count);
   8487 
   8488 	DPRINTF(WM_DEBUG_RX,
   8489 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8490 }
   8491 
   8492 /*
   8493  * wm_linkintr_gmii:
   8494  *
   8495  *	Helper; handle link interrupts for GMII.
   8496  */
   8497 static void
   8498 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8499 {
   8500 
   8501 	KASSERT(WM_CORE_LOCKED(sc));
   8502 
   8503 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8504 		__func__));
   8505 
   8506 	if (icr & ICR_LSC) {
   8507 		uint32_t reg;
   8508 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   8509 
   8510 		if ((status & STATUS_LU) != 0) {
   8511 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8512 				device_xname(sc->sc_dev),
   8513 				(status & STATUS_FD) ? "FDX" : "HDX"));
   8514 		} else {
   8515 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8516 				device_xname(sc->sc_dev)));
   8517 		}
   8518 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   8519 			wm_gig_downshift_workaround_ich8lan(sc);
   8520 
   8521 		if ((sc->sc_type == WM_T_ICH8)
   8522 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   8523 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   8524 		}
   8525 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   8526 			device_xname(sc->sc_dev)));
   8527 		mii_pollstat(&sc->sc_mii);
   8528 		if (sc->sc_type == WM_T_82543) {
   8529 			int miistatus, active;
   8530 
   8531 			/*
   8532 			 * With 82543, we need to force speed and
   8533 			 * duplex on the MAC equal to what the PHY
   8534 			 * speed and duplex configuration is.
   8535 			 */
   8536 			miistatus = sc->sc_mii.mii_media_status;
   8537 
   8538 			if (miistatus & IFM_ACTIVE) {
   8539 				active = sc->sc_mii.mii_media_active;
   8540 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8541 				switch (IFM_SUBTYPE(active)) {
   8542 				case IFM_10_T:
   8543 					sc->sc_ctrl |= CTRL_SPEED_10;
   8544 					break;
   8545 				case IFM_100_TX:
   8546 					sc->sc_ctrl |= CTRL_SPEED_100;
   8547 					break;
   8548 				case IFM_1000_T:
   8549 					sc->sc_ctrl |= CTRL_SPEED_1000;
   8550 					break;
   8551 				default:
   8552 					/*
   8553 					 * fiber?
   8554 					 * Shoud not enter here.
   8555 					 */
   8556 					printf("unknown media (%x)\n", active);
   8557 					break;
   8558 				}
   8559 				if (active & IFM_FDX)
   8560 					sc->sc_ctrl |= CTRL_FD;
   8561 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8562 			}
   8563 		} else if (sc->sc_type == WM_T_PCH) {
   8564 			wm_k1_gig_workaround_hv(sc,
   8565 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   8566 		}
   8567 
   8568 		if ((sc->sc_phytype == WMPHY_82578)
   8569 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   8570 			== IFM_1000_T)) {
   8571 
   8572 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   8573 				delay(200*1000); /* XXX too big */
   8574 
   8575 				/* Link stall fix for link up */
   8576 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8577 				    HV_MUX_DATA_CTRL,
   8578 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   8579 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   8580 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8581 				    HV_MUX_DATA_CTRL,
   8582 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   8583 			}
   8584 		}
   8585 		/*
   8586 		 * I217 Packet Loss issue:
   8587 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
   8588 		 * on power up.
   8589 		 * Set the Beacon Duration for I217 to 8 usec
   8590 		 */
   8591 		if ((sc->sc_type == WM_T_PCH_LPT)
   8592 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8593 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   8594 			reg &= ~FEXTNVM4_BEACON_DURATION;
   8595 			reg |= FEXTNVM4_BEACON_DURATION_8US;
   8596 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   8597 		}
   8598 
   8599 		/* XXX Work-around I218 hang issue */
   8600 		/* e1000_k1_workaround_lpt_lp() */
   8601 
   8602 		if ((sc->sc_type == WM_T_PCH_LPT)
   8603 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8604 			/*
   8605 			 * Set platform power management values for Latency
   8606 			 * Tolerance Reporting (LTR)
   8607 			 */
   8608 			wm_platform_pm_pch_lpt(sc,
   8609 				((sc->sc_mii.mii_media_status & IFM_ACTIVE)
   8610 				    != 0));
   8611 		}
   8612 
   8613 		/* FEXTNVM6 K1-off workaround */
   8614 		if (sc->sc_type == WM_T_PCH_SPT) {
   8615 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
   8616 			if (CSR_READ(sc, WMREG_PCIEANACFG)
   8617 			    & FEXTNVM6_K1_OFF_ENABLE)
   8618 				reg |= FEXTNVM6_K1_OFF_ENABLE;
   8619 			else
   8620 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   8621 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   8622 		}
   8623 	} else if (icr & ICR_RXSEQ) {
   8624 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   8625 			device_xname(sc->sc_dev)));
   8626 	}
   8627 }
   8628 
   8629 /*
   8630  * wm_linkintr_tbi:
   8631  *
   8632  *	Helper; handle link interrupts for TBI mode.
   8633  */
   8634 static void
   8635 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   8636 {
   8637 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8638 	uint32_t status;
   8639 
   8640 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8641 		__func__));
   8642 
   8643 	status = CSR_READ(sc, WMREG_STATUS);
   8644 	if (icr & ICR_LSC) {
   8645 		if (status & STATUS_LU) {
   8646 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8647 			    device_xname(sc->sc_dev),
   8648 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   8649 			/*
   8650 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   8651 			 * so we should update sc->sc_ctrl
   8652 			 */
   8653 
   8654 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   8655 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8656 			sc->sc_fcrtl &= ~FCRTL_XONE;
   8657 			if (status & STATUS_FD)
   8658 				sc->sc_tctl |=
   8659 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8660 			else
   8661 				sc->sc_tctl |=
   8662 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8663 			if (sc->sc_ctrl & CTRL_TFCE)
   8664 				sc->sc_fcrtl |= FCRTL_XONE;
   8665 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8666 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   8667 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   8668 				      sc->sc_fcrtl);
   8669 			sc->sc_tbi_linkup = 1;
   8670 			if_link_state_change(ifp, LINK_STATE_UP);
   8671 		} else {
   8672 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8673 			    device_xname(sc->sc_dev)));
   8674 			sc->sc_tbi_linkup = 0;
   8675 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8676 		}
   8677 		/* Update LED */
   8678 		wm_tbi_serdes_set_linkled(sc);
   8679 	} else if (icr & ICR_RXSEQ) {
   8680 		DPRINTF(WM_DEBUG_LINK,
   8681 		    ("%s: LINK: Receive sequence error\n",
   8682 		    device_xname(sc->sc_dev)));
   8683 	}
   8684 }
   8685 
   8686 /*
   8687  * wm_linkintr_serdes:
   8688  *
   8689  *	Helper; handle link interrupts for TBI mode.
   8690  */
   8691 static void
   8692 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   8693 {
   8694 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8695 	struct mii_data *mii = &sc->sc_mii;
   8696 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8697 	uint32_t pcs_adv, pcs_lpab, reg;
   8698 
   8699 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8700 		__func__));
   8701 
   8702 	if (icr & ICR_LSC) {
   8703 		/* Check PCS */
   8704 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8705 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   8706 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   8707 				device_xname(sc->sc_dev)));
   8708 			mii->mii_media_status |= IFM_ACTIVE;
   8709 			sc->sc_tbi_linkup = 1;
   8710 			if_link_state_change(ifp, LINK_STATE_UP);
   8711 		} else {
   8712 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8713 				device_xname(sc->sc_dev)));
   8714 			mii->mii_media_status |= IFM_NONE;
   8715 			sc->sc_tbi_linkup = 0;
   8716 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8717 			wm_tbi_serdes_set_linkled(sc);
   8718 			return;
   8719 		}
   8720 		mii->mii_media_active |= IFM_1000_SX;
   8721 		if ((reg & PCS_LSTS_FDX) != 0)
   8722 			mii->mii_media_active |= IFM_FDX;
   8723 		else
   8724 			mii->mii_media_active |= IFM_HDX;
   8725 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   8726 			/* Check flow */
   8727 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8728 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   8729 				DPRINTF(WM_DEBUG_LINK,
   8730 				    ("XXX LINKOK but not ACOMP\n"));
   8731 				return;
   8732 			}
   8733 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   8734 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   8735 			DPRINTF(WM_DEBUG_LINK,
   8736 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   8737 			if ((pcs_adv & TXCW_SYM_PAUSE)
   8738 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   8739 				mii->mii_media_active |= IFM_FLOW
   8740 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   8741 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   8742 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8743 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   8744 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8745 				mii->mii_media_active |= IFM_FLOW
   8746 				    | IFM_ETH_TXPAUSE;
   8747 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   8748 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8749 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   8750 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8751 				mii->mii_media_active |= IFM_FLOW
   8752 				    | IFM_ETH_RXPAUSE;
   8753 		}
   8754 		/* Update LED */
   8755 		wm_tbi_serdes_set_linkled(sc);
   8756 	} else {
   8757 		DPRINTF(WM_DEBUG_LINK,
   8758 		    ("%s: LINK: Receive sequence error\n",
   8759 		    device_xname(sc->sc_dev)));
   8760 	}
   8761 }
   8762 
   8763 /*
   8764  * wm_linkintr:
   8765  *
   8766  *	Helper; handle link interrupts.
   8767  */
   8768 static void
   8769 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   8770 {
   8771 
   8772 	KASSERT(WM_CORE_LOCKED(sc));
   8773 
   8774 	if (sc->sc_flags & WM_F_HAS_MII)
   8775 		wm_linkintr_gmii(sc, icr);
   8776 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   8777 	    && (sc->sc_type >= WM_T_82575))
   8778 		wm_linkintr_serdes(sc, icr);
   8779 	else
   8780 		wm_linkintr_tbi(sc, icr);
   8781 }
   8782 
   8783 /*
   8784  * wm_intr_legacy:
   8785  *
   8786  *	Interrupt service routine for INTx and MSI.
   8787  */
   8788 static int
   8789 wm_intr_legacy(void *arg)
   8790 {
   8791 	struct wm_softc *sc = arg;
   8792 	struct wm_queue *wmq = &sc->sc_queue[0];
   8793 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8794 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8795 	uint32_t icr, rndval = 0;
   8796 	int handled = 0;
   8797 
   8798 	while (1 /* CONSTCOND */) {
   8799 		icr = CSR_READ(sc, WMREG_ICR);
   8800 		if ((icr & sc->sc_icr) == 0)
   8801 			break;
   8802 		if (handled == 0) {
   8803 			DPRINTF(WM_DEBUG_TX,
   8804 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   8805 		}
   8806 		if (rndval == 0)
   8807 			rndval = icr;
   8808 
   8809 		mutex_enter(rxq->rxq_lock);
   8810 
   8811 		if (rxq->rxq_stopping) {
   8812 			mutex_exit(rxq->rxq_lock);
   8813 			break;
   8814 		}
   8815 
   8816 		handled = 1;
   8817 
   8818 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8819 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   8820 			DPRINTF(WM_DEBUG_RX,
   8821 			    ("%s: RX: got Rx intr 0x%08x\n",
   8822 			    device_xname(sc->sc_dev),
   8823 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
   8824 			WM_Q_EVCNT_INCR(rxq, rxintr);
   8825 		}
   8826 #endif
   8827 		/*
   8828 		 * wm_rxeof() does *not* call upper layer functions directly,
   8829 		 * as if_percpuq_enqueue() just call softint_schedule().
   8830 		 * So, we can call wm_rxeof() in interrupt context.
   8831 		 */
   8832 		wm_rxeof(rxq, UINT_MAX);
   8833 
   8834 		mutex_exit(rxq->rxq_lock);
   8835 		mutex_enter(txq->txq_lock);
   8836 
   8837 		if (txq->txq_stopping) {
   8838 			mutex_exit(txq->txq_lock);
   8839 			break;
   8840 		}
   8841 
   8842 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8843 		if (icr & ICR_TXDW) {
   8844 			DPRINTF(WM_DEBUG_TX,
   8845 			    ("%s: TX: got TXDW interrupt\n",
   8846 			    device_xname(sc->sc_dev)));
   8847 			WM_Q_EVCNT_INCR(txq, txdw);
   8848 		}
   8849 #endif
   8850 		wm_txeof(sc, txq);
   8851 
   8852 		mutex_exit(txq->txq_lock);
   8853 		WM_CORE_LOCK(sc);
   8854 
   8855 		if (sc->sc_core_stopping) {
   8856 			WM_CORE_UNLOCK(sc);
   8857 			break;
   8858 		}
   8859 
   8860 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   8861 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8862 			wm_linkintr(sc, icr);
   8863 		}
   8864 
   8865 		WM_CORE_UNLOCK(sc);
   8866 
   8867 		if (icr & ICR_RXO) {
   8868 #if defined(WM_DEBUG)
   8869 			log(LOG_WARNING, "%s: Receive overrun\n",
   8870 			    device_xname(sc->sc_dev));
   8871 #endif /* defined(WM_DEBUG) */
   8872 		}
   8873 	}
   8874 
   8875 	rnd_add_uint32(&sc->rnd_source, rndval);
   8876 
   8877 	if (handled) {
   8878 		/* Try to get more packets going. */
   8879 		softint_schedule(wmq->wmq_si);
   8880 	}
   8881 
   8882 	return handled;
   8883 }
   8884 
   8885 static inline void
   8886 wm_txrxintr_disable(struct wm_queue *wmq)
   8887 {
   8888 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8889 
   8890 	if (sc->sc_type == WM_T_82574)
   8891 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8892 	else if (sc->sc_type == WM_T_82575)
   8893 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8894 	else
   8895 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   8896 }
   8897 
   8898 static inline void
   8899 wm_txrxintr_enable(struct wm_queue *wmq)
   8900 {
   8901 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8902 
   8903 	wm_itrs_calculate(sc, wmq);
   8904 
   8905 	if (sc->sc_type == WM_T_82574)
   8906 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8907 	else if (sc->sc_type == WM_T_82575)
   8908 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8909 	else
   8910 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   8911 }
   8912 
   8913 static int
   8914 wm_txrxintr_msix(void *arg)
   8915 {
   8916 	struct wm_queue *wmq = arg;
   8917 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8918 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8919 	struct wm_softc *sc = txq->txq_sc;
   8920 	u_int limit = sc->sc_rx_intr_process_limit;
   8921 
   8922 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   8923 
   8924 	DPRINTF(WM_DEBUG_TX,
   8925 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   8926 
   8927 	wm_txrxintr_disable(wmq);
   8928 
   8929 	mutex_enter(txq->txq_lock);
   8930 
   8931 	if (txq->txq_stopping) {
   8932 		mutex_exit(txq->txq_lock);
   8933 		return 0;
   8934 	}
   8935 
   8936 	WM_Q_EVCNT_INCR(txq, txdw);
   8937 	wm_txeof(sc, txq);
   8938 	/* wm_deferred start() is done in wm_handle_queue(). */
   8939 	mutex_exit(txq->txq_lock);
   8940 
   8941 	DPRINTF(WM_DEBUG_RX,
   8942 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   8943 	mutex_enter(rxq->rxq_lock);
   8944 
   8945 	if (rxq->rxq_stopping) {
   8946 		mutex_exit(rxq->rxq_lock);
   8947 		return 0;
   8948 	}
   8949 
   8950 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8951 	wm_rxeof(rxq, limit);
   8952 	mutex_exit(rxq->rxq_lock);
   8953 
   8954 	wm_itrs_writereg(sc, wmq);
   8955 
   8956 	softint_schedule(wmq->wmq_si);
   8957 
   8958 	return 1;
   8959 }
   8960 
   8961 static void
   8962 wm_handle_queue(void *arg)
   8963 {
   8964 	struct wm_queue *wmq = arg;
   8965 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8966 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8967 	struct wm_softc *sc = txq->txq_sc;
   8968 	u_int limit = sc->sc_rx_process_limit;
   8969 
   8970 	mutex_enter(txq->txq_lock);
   8971 	if (txq->txq_stopping) {
   8972 		mutex_exit(txq->txq_lock);
   8973 		return;
   8974 	}
   8975 	wm_txeof(sc, txq);
   8976 	wm_deferred_start_locked(txq);
   8977 	mutex_exit(txq->txq_lock);
   8978 
   8979 	mutex_enter(rxq->rxq_lock);
   8980 	if (rxq->rxq_stopping) {
   8981 		mutex_exit(rxq->rxq_lock);
   8982 		return;
   8983 	}
   8984 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8985 	wm_rxeof(rxq, limit);
   8986 	mutex_exit(rxq->rxq_lock);
   8987 
   8988 	wm_txrxintr_enable(wmq);
   8989 }
   8990 
   8991 /*
   8992  * wm_linkintr_msix:
   8993  *
   8994  *	Interrupt service routine for link status change for MSI-X.
   8995  */
   8996 static int
   8997 wm_linkintr_msix(void *arg)
   8998 {
   8999 	struct wm_softc *sc = arg;
   9000 	uint32_t reg;
   9001 
   9002 	DPRINTF(WM_DEBUG_LINK,
   9003 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   9004 
   9005 	reg = CSR_READ(sc, WMREG_ICR);
   9006 	WM_CORE_LOCK(sc);
   9007 	if ((sc->sc_core_stopping) || ((reg & ICR_LSC) == 0))
   9008 		goto out;
   9009 
   9010 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9011 	wm_linkintr(sc, ICR_LSC);
   9012 
   9013 out:
   9014 	WM_CORE_UNLOCK(sc);
   9015 
   9016 	if (sc->sc_type == WM_T_82574)
   9017 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   9018 	else if (sc->sc_type == WM_T_82575)
   9019 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   9020 	else
   9021 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   9022 
   9023 	return 1;
   9024 }
   9025 
   9026 /*
   9027  * Media related.
   9028  * GMII, SGMII, TBI (and SERDES)
   9029  */
   9030 
   9031 /* Common */
   9032 
   9033 /*
   9034  * wm_tbi_serdes_set_linkled:
   9035  *
   9036  *	Update the link LED on TBI and SERDES devices.
   9037  */
   9038 static void
   9039 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   9040 {
   9041 
   9042 	if (sc->sc_tbi_linkup)
   9043 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   9044 	else
   9045 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   9046 
   9047 	/* 82540 or newer devices are active low */
   9048 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   9049 
   9050 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9051 }
   9052 
   9053 /* GMII related */
   9054 
   9055 /*
   9056  * wm_gmii_reset:
   9057  *
   9058  *	Reset the PHY.
   9059  */
   9060 static void
   9061 wm_gmii_reset(struct wm_softc *sc)
   9062 {
   9063 	uint32_t reg;
   9064 	int rv;
   9065 
   9066 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9067 		device_xname(sc->sc_dev), __func__));
   9068 
   9069 	rv = sc->phy.acquire(sc);
   9070 	if (rv != 0) {
   9071 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9072 		    __func__);
   9073 		return;
   9074 	}
   9075 
   9076 	switch (sc->sc_type) {
   9077 	case WM_T_82542_2_0:
   9078 	case WM_T_82542_2_1:
   9079 		/* null */
   9080 		break;
   9081 	case WM_T_82543:
   9082 		/*
   9083 		 * With 82543, we need to force speed and duplex on the MAC
   9084 		 * equal to what the PHY speed and duplex configuration is.
   9085 		 * In addition, we need to perform a hardware reset on the PHY
   9086 		 * to take it out of reset.
   9087 		 */
   9088 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9089 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9090 
   9091 		/* The PHY reset pin is active-low. */
   9092 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9093 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   9094 		    CTRL_EXT_SWDPIN(4));
   9095 		reg |= CTRL_EXT_SWDPIO(4);
   9096 
   9097 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9098 		CSR_WRITE_FLUSH(sc);
   9099 		delay(10*1000);
   9100 
   9101 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   9102 		CSR_WRITE_FLUSH(sc);
   9103 		delay(150);
   9104 #if 0
   9105 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   9106 #endif
   9107 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   9108 		break;
   9109 	case WM_T_82544:	/* reset 10000us */
   9110 	case WM_T_82540:
   9111 	case WM_T_82545:
   9112 	case WM_T_82545_3:
   9113 	case WM_T_82546:
   9114 	case WM_T_82546_3:
   9115 	case WM_T_82541:
   9116 	case WM_T_82541_2:
   9117 	case WM_T_82547:
   9118 	case WM_T_82547_2:
   9119 	case WM_T_82571:	/* reset 100us */
   9120 	case WM_T_82572:
   9121 	case WM_T_82573:
   9122 	case WM_T_82574:
   9123 	case WM_T_82575:
   9124 	case WM_T_82576:
   9125 	case WM_T_82580:
   9126 	case WM_T_I350:
   9127 	case WM_T_I354:
   9128 	case WM_T_I210:
   9129 	case WM_T_I211:
   9130 	case WM_T_82583:
   9131 	case WM_T_80003:
   9132 		/* generic reset */
   9133 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9134 		CSR_WRITE_FLUSH(sc);
   9135 		delay(20000);
   9136 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9137 		CSR_WRITE_FLUSH(sc);
   9138 		delay(20000);
   9139 
   9140 		if ((sc->sc_type == WM_T_82541)
   9141 		    || (sc->sc_type == WM_T_82541_2)
   9142 		    || (sc->sc_type == WM_T_82547)
   9143 		    || (sc->sc_type == WM_T_82547_2)) {
   9144 			/* workaround for igp are done in igp_reset() */
   9145 			/* XXX add code to set LED after phy reset */
   9146 		}
   9147 		break;
   9148 	case WM_T_ICH8:
   9149 	case WM_T_ICH9:
   9150 	case WM_T_ICH10:
   9151 	case WM_T_PCH:
   9152 	case WM_T_PCH2:
   9153 	case WM_T_PCH_LPT:
   9154 	case WM_T_PCH_SPT:
   9155 		/* generic reset */
   9156 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9157 		CSR_WRITE_FLUSH(sc);
   9158 		delay(100);
   9159 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9160 		CSR_WRITE_FLUSH(sc);
   9161 		delay(150);
   9162 		break;
   9163 	default:
   9164 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   9165 		    __func__);
   9166 		break;
   9167 	}
   9168 
   9169 	sc->phy.release(sc);
   9170 
   9171 	/* get_cfg_done */
   9172 	wm_get_cfg_done(sc);
   9173 
   9174 	/* extra setup */
   9175 	switch (sc->sc_type) {
   9176 	case WM_T_82542_2_0:
   9177 	case WM_T_82542_2_1:
   9178 	case WM_T_82543:
   9179 	case WM_T_82544:
   9180 	case WM_T_82540:
   9181 	case WM_T_82545:
   9182 	case WM_T_82545_3:
   9183 	case WM_T_82546:
   9184 	case WM_T_82546_3:
   9185 	case WM_T_82541_2:
   9186 	case WM_T_82547_2:
   9187 	case WM_T_82571:
   9188 	case WM_T_82572:
   9189 	case WM_T_82573:
   9190 	case WM_T_82574:
   9191 	case WM_T_82583:
   9192 	case WM_T_82575:
   9193 	case WM_T_82576:
   9194 	case WM_T_82580:
   9195 	case WM_T_I350:
   9196 	case WM_T_I354:
   9197 	case WM_T_I210:
   9198 	case WM_T_I211:
   9199 	case WM_T_80003:
   9200 		/* null */
   9201 		break;
   9202 	case WM_T_82541:
   9203 	case WM_T_82547:
   9204 		/* XXX Configure actively LED after PHY reset */
   9205 		break;
   9206 	case WM_T_ICH8:
   9207 	case WM_T_ICH9:
   9208 	case WM_T_ICH10:
   9209 	case WM_T_PCH:
   9210 	case WM_T_PCH2:
   9211 	case WM_T_PCH_LPT:
   9212 	case WM_T_PCH_SPT:
   9213 		wm_phy_post_reset(sc);
   9214 		break;
   9215 	default:
   9216 		panic("%s: unknown type\n", __func__);
   9217 		break;
   9218 	}
   9219 }
   9220 
   9221 /*
   9222  * Setup sc_phytype and mii_{read|write}reg.
   9223  *
   9224  *  To identify PHY type, correct read/write function should be selected.
   9225  * To select correct read/write function, PCI ID or MAC type are required
   9226  * without accessing PHY registers.
   9227  *
   9228  *  On the first call of this function, PHY ID is not known yet. Check
   9229  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   9230  * result might be incorrect.
   9231  *
   9232  *  In the second call, PHY OUI and model is used to identify PHY type.
   9233  * It might not be perfpect because of the lack of compared entry, but it
   9234  * would be better than the first call.
   9235  *
   9236  *  If the detected new result and previous assumption is different,
   9237  * diagnous message will be printed.
   9238  */
   9239 static void
   9240 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   9241     uint16_t phy_model)
   9242 {
   9243 	device_t dev = sc->sc_dev;
   9244 	struct mii_data *mii = &sc->sc_mii;
   9245 	uint16_t new_phytype = WMPHY_UNKNOWN;
   9246 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   9247 	mii_readreg_t new_readreg;
   9248 	mii_writereg_t new_writereg;
   9249 
   9250 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9251 		device_xname(sc->sc_dev), __func__));
   9252 
   9253 	if (mii->mii_readreg == NULL) {
   9254 		/*
   9255 		 *  This is the first call of this function. For ICH and PCH
   9256 		 * variants, it's difficult to determine the PHY access method
   9257 		 * by sc_type, so use the PCI product ID for some devices.
   9258 		 */
   9259 
   9260 		switch (sc->sc_pcidevid) {
   9261 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   9262 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   9263 			/* 82577 */
   9264 			new_phytype = WMPHY_82577;
   9265 			break;
   9266 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   9267 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   9268 			/* 82578 */
   9269 			new_phytype = WMPHY_82578;
   9270 			break;
   9271 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   9272 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   9273 			/* 82579 */
   9274 			new_phytype = WMPHY_82579;
   9275 			break;
   9276 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   9277 		case PCI_PRODUCT_INTEL_82801I_BM:
   9278 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   9279 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   9280 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   9281 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   9282 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   9283 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   9284 			/* ICH8, 9, 10 with 82567 */
   9285 			new_phytype = WMPHY_BM;
   9286 			break;
   9287 		default:
   9288 			break;
   9289 		}
   9290 	} else {
   9291 		/* It's not the first call. Use PHY OUI and model */
   9292 		switch (phy_oui) {
   9293 		case MII_OUI_ATHEROS: /* XXX ??? */
   9294 			switch (phy_model) {
   9295 			case 0x0004: /* XXX */
   9296 				new_phytype = WMPHY_82578;
   9297 				break;
   9298 			default:
   9299 				break;
   9300 			}
   9301 			break;
   9302 		case MII_OUI_xxMARVELL:
   9303 			switch (phy_model) {
   9304 			case MII_MODEL_xxMARVELL_I210:
   9305 				new_phytype = WMPHY_I210;
   9306 				break;
   9307 			case MII_MODEL_xxMARVELL_E1011:
   9308 			case MII_MODEL_xxMARVELL_E1000_3:
   9309 			case MII_MODEL_xxMARVELL_E1000_5:
   9310 			case MII_MODEL_xxMARVELL_E1112:
   9311 				new_phytype = WMPHY_M88;
   9312 				break;
   9313 			case MII_MODEL_xxMARVELL_E1149:
   9314 				new_phytype = WMPHY_BM;
   9315 				break;
   9316 			case MII_MODEL_xxMARVELL_E1111:
   9317 			case MII_MODEL_xxMARVELL_I347:
   9318 			case MII_MODEL_xxMARVELL_E1512:
   9319 			case MII_MODEL_xxMARVELL_E1340M:
   9320 			case MII_MODEL_xxMARVELL_E1543:
   9321 				new_phytype = WMPHY_M88;
   9322 				break;
   9323 			case MII_MODEL_xxMARVELL_I82563:
   9324 				new_phytype = WMPHY_GG82563;
   9325 				break;
   9326 			default:
   9327 				break;
   9328 			}
   9329 			break;
   9330 		case MII_OUI_INTEL:
   9331 			switch (phy_model) {
   9332 			case MII_MODEL_INTEL_I82577:
   9333 				new_phytype = WMPHY_82577;
   9334 				break;
   9335 			case MII_MODEL_INTEL_I82579:
   9336 				new_phytype = WMPHY_82579;
   9337 				break;
   9338 			case MII_MODEL_INTEL_I217:
   9339 				new_phytype = WMPHY_I217;
   9340 				break;
   9341 			case MII_MODEL_INTEL_I82580:
   9342 			case MII_MODEL_INTEL_I350:
   9343 				new_phytype = WMPHY_82580;
   9344 				break;
   9345 			default:
   9346 				break;
   9347 			}
   9348 			break;
   9349 		case MII_OUI_yyINTEL:
   9350 			switch (phy_model) {
   9351 			case MII_MODEL_yyINTEL_I82562G:
   9352 			case MII_MODEL_yyINTEL_I82562EM:
   9353 			case MII_MODEL_yyINTEL_I82562ET:
   9354 				new_phytype = WMPHY_IFE;
   9355 				break;
   9356 			case MII_MODEL_yyINTEL_IGP01E1000:
   9357 				new_phytype = WMPHY_IGP;
   9358 				break;
   9359 			case MII_MODEL_yyINTEL_I82566:
   9360 				new_phytype = WMPHY_IGP_3;
   9361 				break;
   9362 			default:
   9363 				break;
   9364 			}
   9365 			break;
   9366 		default:
   9367 			break;
   9368 		}
   9369 		if (new_phytype == WMPHY_UNKNOWN)
   9370 			aprint_verbose_dev(dev, "%s: unknown PHY model\n",
   9371 			    __func__);
   9372 
   9373 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9374 		    && (sc->sc_phytype != new_phytype )) {
   9375 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9376 			    "was incorrect. PHY type from PHY ID = %u\n",
   9377 			    sc->sc_phytype, new_phytype);
   9378 		}
   9379 	}
   9380 
   9381 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   9382 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   9383 		/* SGMII */
   9384 		new_readreg = wm_sgmii_readreg;
   9385 		new_writereg = wm_sgmii_writereg;
   9386 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   9387 		/* BM2 (phyaddr == 1) */
   9388 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9389 		    && (new_phytype != WMPHY_BM)
   9390 		    && (new_phytype != WMPHY_UNKNOWN))
   9391 			doubt_phytype = new_phytype;
   9392 		new_phytype = WMPHY_BM;
   9393 		new_readreg = wm_gmii_bm_readreg;
   9394 		new_writereg = wm_gmii_bm_writereg;
   9395 	} else if (sc->sc_type >= WM_T_PCH) {
   9396 		/* All PCH* use _hv_ */
   9397 		new_readreg = wm_gmii_hv_readreg;
   9398 		new_writereg = wm_gmii_hv_writereg;
   9399 	} else if (sc->sc_type >= WM_T_ICH8) {
   9400 		/* non-82567 ICH8, 9 and 10 */
   9401 		new_readreg = wm_gmii_i82544_readreg;
   9402 		new_writereg = wm_gmii_i82544_writereg;
   9403 	} else if (sc->sc_type >= WM_T_80003) {
   9404 		/* 80003 */
   9405 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9406 		    && (new_phytype != WMPHY_GG82563)
   9407 		    && (new_phytype != WMPHY_UNKNOWN))
   9408 			doubt_phytype = new_phytype;
   9409 		new_phytype = WMPHY_GG82563;
   9410 		new_readreg = wm_gmii_i80003_readreg;
   9411 		new_writereg = wm_gmii_i80003_writereg;
   9412 	} else if (sc->sc_type >= WM_T_I210) {
   9413 		/* I210 and I211 */
   9414 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9415 		    && (new_phytype != WMPHY_I210)
   9416 		    && (new_phytype != WMPHY_UNKNOWN))
   9417 			doubt_phytype = new_phytype;
   9418 		new_phytype = WMPHY_I210;
   9419 		new_readreg = wm_gmii_gs40g_readreg;
   9420 		new_writereg = wm_gmii_gs40g_writereg;
   9421 	} else if (sc->sc_type >= WM_T_82580) {
   9422 		/* 82580, I350 and I354 */
   9423 		new_readreg = wm_gmii_82580_readreg;
   9424 		new_writereg = wm_gmii_82580_writereg;
   9425 	} else if (sc->sc_type >= WM_T_82544) {
   9426 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   9427 		new_readreg = wm_gmii_i82544_readreg;
   9428 		new_writereg = wm_gmii_i82544_writereg;
   9429 	} else {
   9430 		new_readreg = wm_gmii_i82543_readreg;
   9431 		new_writereg = wm_gmii_i82543_writereg;
   9432 	}
   9433 
   9434 	if (new_phytype == WMPHY_BM) {
   9435 		/* All BM use _bm_ */
   9436 		new_readreg = wm_gmii_bm_readreg;
   9437 		new_writereg = wm_gmii_bm_writereg;
   9438 	}
   9439 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
   9440 		/* All PCH* use _hv_ */
   9441 		new_readreg = wm_gmii_hv_readreg;
   9442 		new_writereg = wm_gmii_hv_writereg;
   9443 	}
   9444 
   9445 	/* Diag output */
   9446 	if (doubt_phytype != WMPHY_UNKNOWN)
   9447 		aprint_error_dev(dev, "Assumed new PHY type was "
   9448 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   9449 		    new_phytype);
   9450 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9451 	    && (sc->sc_phytype != new_phytype ))
   9452 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9453 		    "was incorrect. New PHY type = %u\n",
   9454 		    sc->sc_phytype, new_phytype);
   9455 
   9456 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   9457 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   9458 
   9459 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   9460 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   9461 		    "function was incorrect.\n");
   9462 
   9463 	/* Update now */
   9464 	sc->sc_phytype = new_phytype;
   9465 	mii->mii_readreg = new_readreg;
   9466 	mii->mii_writereg = new_writereg;
   9467 }
   9468 
   9469 /*
   9470  * wm_get_phy_id_82575:
   9471  *
   9472  * Return PHY ID. Return -1 if it failed.
   9473  */
   9474 static int
   9475 wm_get_phy_id_82575(struct wm_softc *sc)
   9476 {
   9477 	uint32_t reg;
   9478 	int phyid = -1;
   9479 
   9480 	/* XXX */
   9481 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   9482 		return -1;
   9483 
   9484 	if (wm_sgmii_uses_mdio(sc)) {
   9485 		switch (sc->sc_type) {
   9486 		case WM_T_82575:
   9487 		case WM_T_82576:
   9488 			reg = CSR_READ(sc, WMREG_MDIC);
   9489 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   9490 			break;
   9491 		case WM_T_82580:
   9492 		case WM_T_I350:
   9493 		case WM_T_I354:
   9494 		case WM_T_I210:
   9495 		case WM_T_I211:
   9496 			reg = CSR_READ(sc, WMREG_MDICNFG);
   9497 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   9498 			break;
   9499 		default:
   9500 			return -1;
   9501 		}
   9502 	}
   9503 
   9504 	return phyid;
   9505 }
   9506 
   9507 
   9508 /*
   9509  * wm_gmii_mediainit:
   9510  *
   9511  *	Initialize media for use on 1000BASE-T devices.
   9512  */
   9513 static void
   9514 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   9515 {
   9516 	device_t dev = sc->sc_dev;
   9517 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9518 	struct mii_data *mii = &sc->sc_mii;
   9519 	uint32_t reg;
   9520 
   9521 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9522 		device_xname(sc->sc_dev), __func__));
   9523 
   9524 	/* We have GMII. */
   9525 	sc->sc_flags |= WM_F_HAS_MII;
   9526 
   9527 	if (sc->sc_type == WM_T_80003)
   9528 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9529 	else
   9530 		sc->sc_tipg = TIPG_1000T_DFLT;
   9531 
   9532 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   9533 	if ((sc->sc_type == WM_T_82580)
   9534 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   9535 	    || (sc->sc_type == WM_T_I211)) {
   9536 		reg = CSR_READ(sc, WMREG_PHPM);
   9537 		reg &= ~PHPM_GO_LINK_D;
   9538 		CSR_WRITE(sc, WMREG_PHPM, reg);
   9539 	}
   9540 
   9541 	/*
   9542 	 * Let the chip set speed/duplex on its own based on
   9543 	 * signals from the PHY.
   9544 	 * XXXbouyer - I'm not sure this is right for the 80003,
   9545 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   9546 	 */
   9547 	sc->sc_ctrl |= CTRL_SLU;
   9548 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9549 
   9550 	/* Initialize our media structures and probe the GMII. */
   9551 	mii->mii_ifp = ifp;
   9552 
   9553 	mii->mii_statchg = wm_gmii_statchg;
   9554 
   9555 	/* get PHY control from SMBus to PCIe */
   9556 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   9557 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
   9558 		wm_smbustopci(sc);
   9559 
   9560 	wm_gmii_reset(sc);
   9561 
   9562 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9563 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   9564 	    wm_gmii_mediastatus);
   9565 
   9566 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   9567 	    || (sc->sc_type == WM_T_82580)
   9568 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   9569 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   9570 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   9571 			/* Attach only one port */
   9572 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   9573 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9574 		} else {
   9575 			int i, id;
   9576 			uint32_t ctrl_ext;
   9577 
   9578 			id = wm_get_phy_id_82575(sc);
   9579 			if (id != -1) {
   9580 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   9581 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   9582 			}
   9583 			if ((id == -1)
   9584 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9585 				/* Power on sgmii phy if it is disabled */
   9586 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9587 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   9588 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   9589 				CSR_WRITE_FLUSH(sc);
   9590 				delay(300*1000); /* XXX too long */
   9591 
   9592 				/* from 1 to 8 */
   9593 				for (i = 1; i < 8; i++)
   9594 					mii_attach(sc->sc_dev, &sc->sc_mii,
   9595 					    0xffffffff, i, MII_OFFSET_ANY,
   9596 					    MIIF_DOPAUSE);
   9597 
   9598 				/* restore previous sfp cage power state */
   9599 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9600 			}
   9601 		}
   9602 	} else {
   9603 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9604 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9605 	}
   9606 
   9607 	/*
   9608 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   9609 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   9610 	 */
   9611 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   9612 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9613 		wm_set_mdio_slow_mode_hv(sc);
   9614 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9615 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9616 	}
   9617 
   9618 	/*
   9619 	 * (For ICH8 variants)
   9620 	 * If PHY detection failed, use BM's r/w function and retry.
   9621 	 */
   9622 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9623 		/* if failed, retry with *_bm_* */
   9624 		aprint_verbose_dev(dev, "Assumed PHY access function "
   9625 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   9626 		    sc->sc_phytype);
   9627 		sc->sc_phytype = WMPHY_BM;
   9628 		mii->mii_readreg = wm_gmii_bm_readreg;
   9629 		mii->mii_writereg = wm_gmii_bm_writereg;
   9630 
   9631 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9632 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9633 	}
   9634 
   9635 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9636 		/* Any PHY wasn't find */
   9637 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   9638 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   9639 		sc->sc_phytype = WMPHY_NONE;
   9640 	} else {
   9641 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   9642 
   9643 		/*
   9644 		 * PHY Found! Check PHY type again by the second call of
   9645 		 * wm_gmii_setup_phytype.
   9646 		 */
   9647 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   9648 		    child->mii_mpd_model);
   9649 
   9650 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   9651 	}
   9652 }
   9653 
   9654 /*
   9655  * wm_gmii_mediachange:	[ifmedia interface function]
   9656  *
   9657  *	Set hardware to newly-selected media on a 1000BASE-T device.
   9658  */
   9659 static int
   9660 wm_gmii_mediachange(struct ifnet *ifp)
   9661 {
   9662 	struct wm_softc *sc = ifp->if_softc;
   9663 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9664 	int rc;
   9665 
   9666 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9667 		device_xname(sc->sc_dev), __func__));
   9668 	if ((ifp->if_flags & IFF_UP) == 0)
   9669 		return 0;
   9670 
   9671 	/* Disable D0 LPLU. */
   9672 	wm_lplu_d0_disable(sc);
   9673 
   9674 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9675 	sc->sc_ctrl |= CTRL_SLU;
   9676 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9677 	    || (sc->sc_type > WM_T_82543)) {
   9678 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   9679 	} else {
   9680 		sc->sc_ctrl &= ~CTRL_ASDE;
   9681 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9682 		if (ife->ifm_media & IFM_FDX)
   9683 			sc->sc_ctrl |= CTRL_FD;
   9684 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   9685 		case IFM_10_T:
   9686 			sc->sc_ctrl |= CTRL_SPEED_10;
   9687 			break;
   9688 		case IFM_100_TX:
   9689 			sc->sc_ctrl |= CTRL_SPEED_100;
   9690 			break;
   9691 		case IFM_1000_T:
   9692 			sc->sc_ctrl |= CTRL_SPEED_1000;
   9693 			break;
   9694 		default:
   9695 			panic("wm_gmii_mediachange: bad media 0x%x",
   9696 			    ife->ifm_media);
   9697 		}
   9698 	}
   9699 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9700 	CSR_WRITE_FLUSH(sc);
   9701 	if (sc->sc_type <= WM_T_82543)
   9702 		wm_gmii_reset(sc);
   9703 
   9704 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   9705 		return 0;
   9706 	return rc;
   9707 }
   9708 
   9709 /*
   9710  * wm_gmii_mediastatus:	[ifmedia interface function]
   9711  *
   9712  *	Get the current interface media status on a 1000BASE-T device.
   9713  */
   9714 static void
   9715 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9716 {
   9717 	struct wm_softc *sc = ifp->if_softc;
   9718 
   9719 	ether_mediastatus(ifp, ifmr);
   9720 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9721 	    | sc->sc_flowflags;
   9722 }
   9723 
   9724 #define	MDI_IO		CTRL_SWDPIN(2)
   9725 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   9726 #define	MDI_CLK		CTRL_SWDPIN(3)
   9727 
   9728 static void
   9729 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   9730 {
   9731 	uint32_t i, v;
   9732 
   9733 	v = CSR_READ(sc, WMREG_CTRL);
   9734 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9735 	v |= MDI_DIR | CTRL_SWDPIO(3);
   9736 
   9737 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   9738 		if (data & i)
   9739 			v |= MDI_IO;
   9740 		else
   9741 			v &= ~MDI_IO;
   9742 		CSR_WRITE(sc, WMREG_CTRL, v);
   9743 		CSR_WRITE_FLUSH(sc);
   9744 		delay(10);
   9745 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9746 		CSR_WRITE_FLUSH(sc);
   9747 		delay(10);
   9748 		CSR_WRITE(sc, WMREG_CTRL, v);
   9749 		CSR_WRITE_FLUSH(sc);
   9750 		delay(10);
   9751 	}
   9752 }
   9753 
   9754 static uint32_t
   9755 wm_i82543_mii_recvbits(struct wm_softc *sc)
   9756 {
   9757 	uint32_t v, i, data = 0;
   9758 
   9759 	v = CSR_READ(sc, WMREG_CTRL);
   9760 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9761 	v |= CTRL_SWDPIO(3);
   9762 
   9763 	CSR_WRITE(sc, WMREG_CTRL, v);
   9764 	CSR_WRITE_FLUSH(sc);
   9765 	delay(10);
   9766 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9767 	CSR_WRITE_FLUSH(sc);
   9768 	delay(10);
   9769 	CSR_WRITE(sc, WMREG_CTRL, v);
   9770 	CSR_WRITE_FLUSH(sc);
   9771 	delay(10);
   9772 
   9773 	for (i = 0; i < 16; i++) {
   9774 		data <<= 1;
   9775 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9776 		CSR_WRITE_FLUSH(sc);
   9777 		delay(10);
   9778 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   9779 			data |= 1;
   9780 		CSR_WRITE(sc, WMREG_CTRL, v);
   9781 		CSR_WRITE_FLUSH(sc);
   9782 		delay(10);
   9783 	}
   9784 
   9785 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9786 	CSR_WRITE_FLUSH(sc);
   9787 	delay(10);
   9788 	CSR_WRITE(sc, WMREG_CTRL, v);
   9789 	CSR_WRITE_FLUSH(sc);
   9790 	delay(10);
   9791 
   9792 	return data;
   9793 }
   9794 
   9795 #undef MDI_IO
   9796 #undef MDI_DIR
   9797 #undef MDI_CLK
   9798 
   9799 /*
   9800  * wm_gmii_i82543_readreg:	[mii interface function]
   9801  *
   9802  *	Read a PHY register on the GMII (i82543 version).
   9803  */
   9804 static int
   9805 wm_gmii_i82543_readreg(device_t dev, int phy, int reg)
   9806 {
   9807 	struct wm_softc *sc = device_private(dev);
   9808 	int rv;
   9809 
   9810 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9811 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   9812 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   9813 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   9814 
   9815 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   9816 	    device_xname(dev), phy, reg, rv));
   9817 
   9818 	return rv;
   9819 }
   9820 
   9821 /*
   9822  * wm_gmii_i82543_writereg:	[mii interface function]
   9823  *
   9824  *	Write a PHY register on the GMII (i82543 version).
   9825  */
   9826 static void
   9827 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, int val)
   9828 {
   9829 	struct wm_softc *sc = device_private(dev);
   9830 
   9831 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9832 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   9833 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   9834 	    (MII_COMMAND_START << 30), 32);
   9835 }
   9836 
   9837 /*
   9838  * wm_gmii_mdic_readreg:	[mii interface function]
   9839  *
   9840  *	Read a PHY register on the GMII.
   9841  */
   9842 static int
   9843 wm_gmii_mdic_readreg(device_t dev, int phy, int reg)
   9844 {
   9845 	struct wm_softc *sc = device_private(dev);
   9846 	uint32_t mdic = 0;
   9847 	int i, rv;
   9848 
   9849 	if (reg > MII_ADDRMASK) {
   9850 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   9851 		    __func__, sc->sc_phytype, reg);
   9852 		reg &= MII_ADDRMASK;
   9853 	}
   9854 
   9855 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   9856 	    MDIC_REGADD(reg));
   9857 
   9858 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9859 		mdic = CSR_READ(sc, WMREG_MDIC);
   9860 		if (mdic & MDIC_READY)
   9861 			break;
   9862 		delay(50);
   9863 	}
   9864 
   9865 	if ((mdic & MDIC_READY) == 0) {
   9866 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   9867 		    device_xname(dev), phy, reg);
   9868 		rv = 0;
   9869 	} else if (mdic & MDIC_E) {
   9870 #if 0 /* This is normal if no PHY is present. */
   9871 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   9872 		    device_xname(dev), phy, reg);
   9873 #endif
   9874 		rv = 0;
   9875 	} else {
   9876 		rv = MDIC_DATA(mdic);
   9877 		if (rv == 0xffff)
   9878 			rv = 0;
   9879 	}
   9880 
   9881 	return rv;
   9882 }
   9883 
   9884 /*
   9885  * wm_gmii_mdic_writereg:	[mii interface function]
   9886  *
   9887  *	Write a PHY register on the GMII.
   9888  */
   9889 static void
   9890 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, int val)
   9891 {
   9892 	struct wm_softc *sc = device_private(dev);
   9893 	uint32_t mdic = 0;
   9894 	int i;
   9895 
   9896 	if (reg > MII_ADDRMASK) {
   9897 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   9898 		    __func__, sc->sc_phytype, reg);
   9899 		reg &= MII_ADDRMASK;
   9900 	}
   9901 
   9902 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   9903 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   9904 
   9905 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9906 		mdic = CSR_READ(sc, WMREG_MDIC);
   9907 		if (mdic & MDIC_READY)
   9908 			break;
   9909 		delay(50);
   9910 	}
   9911 
   9912 	if ((mdic & MDIC_READY) == 0)
   9913 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   9914 		    device_xname(dev), phy, reg);
   9915 	else if (mdic & MDIC_E)
   9916 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   9917 		    device_xname(dev), phy, reg);
   9918 }
   9919 
   9920 /*
   9921  * wm_gmii_i82544_readreg:	[mii interface function]
   9922  *
   9923  *	Read a PHY register on the GMII.
   9924  */
   9925 static int
   9926 wm_gmii_i82544_readreg(device_t dev, int phy, int reg)
   9927 {
   9928 	struct wm_softc *sc = device_private(dev);
   9929 	int rv;
   9930 
   9931 	if (sc->phy.acquire(sc)) {
   9932 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   9933 		return 0;
   9934 	}
   9935 
   9936 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9937 		switch (sc->sc_phytype) {
   9938 		case WMPHY_IGP:
   9939 		case WMPHY_IGP_2:
   9940 		case WMPHY_IGP_3:
   9941 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT, reg);
   9942 			break;
   9943 		default:
   9944 #ifdef WM_DEBUG
   9945 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   9946 			    __func__, sc->sc_phytype, reg);
   9947 #endif
   9948 			break;
   9949 		}
   9950 	}
   9951 
   9952 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   9953 	sc->phy.release(sc);
   9954 
   9955 	return rv;
   9956 }
   9957 
   9958 /*
   9959  * wm_gmii_i82544_writereg:	[mii interface function]
   9960  *
   9961  *	Write a PHY register on the GMII.
   9962  */
   9963 static void
   9964 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, int val)
   9965 {
   9966 	struct wm_softc *sc = device_private(dev);
   9967 
   9968 	if (sc->phy.acquire(sc)) {
   9969 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   9970 		return;
   9971 	}
   9972 
   9973 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9974 		switch (sc->sc_phytype) {
   9975 		case WMPHY_IGP:
   9976 		case WMPHY_IGP_2:
   9977 		case WMPHY_IGP_3:
   9978 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT, reg);
   9979 			break;
   9980 		default:
   9981 #ifdef WM_DEBUG
   9982 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   9983 			    __func__, sc->sc_phytype, reg);
   9984 #endif
   9985 			break;
   9986 		}
   9987 	}
   9988 
   9989 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   9990 	sc->phy.release(sc);
   9991 }
   9992 
   9993 /*
   9994  * wm_gmii_i80003_readreg:	[mii interface function]
   9995  *
   9996  *	Read a PHY register on the kumeran
   9997  * This could be handled by the PHY layer if we didn't have to lock the
   9998  * ressource ...
   9999  */
   10000 static int
   10001 wm_gmii_i80003_readreg(device_t dev, int phy, int reg)
   10002 {
   10003 	struct wm_softc *sc = device_private(dev);
   10004 	int page_select, temp;
   10005 	int rv;
   10006 
   10007 	if (phy != 1) /* only one PHY on kumeran bus */
   10008 		return 0;
   10009 
   10010 	if (sc->phy.acquire(sc)) {
   10011 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10012 		return 0;
   10013 	}
   10014 
   10015 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10016 		page_select = GG82563_PHY_PAGE_SELECT;
   10017 	else {
   10018 		/*
   10019 		 * Use Alternative Page Select register to access registers
   10020 		 * 30 and 31.
   10021 		 */
   10022 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10023 	}
   10024 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10025 	wm_gmii_mdic_writereg(dev, phy, page_select, temp);
   10026 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10027 		/*
   10028 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10029 		 * register.
   10030 		 */
   10031 		delay(200);
   10032 		if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) {
   10033 			device_printf(dev, "%s failed\n", __func__);
   10034 			rv = 0; /* XXX */
   10035 			goto out;
   10036 		}
   10037 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10038 		delay(200);
   10039 	} else
   10040 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10041 
   10042 out:
   10043 	sc->phy.release(sc);
   10044 	return rv;
   10045 }
   10046 
   10047 /*
   10048  * wm_gmii_i80003_writereg:	[mii interface function]
   10049  *
   10050  *	Write a PHY register on the kumeran.
   10051  * This could be handled by the PHY layer if we didn't have to lock the
   10052  * ressource ...
   10053  */
   10054 static void
   10055 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, int val)
   10056 {
   10057 	struct wm_softc *sc = device_private(dev);
   10058 	int page_select, temp;
   10059 
   10060 	if (phy != 1) /* only one PHY on kumeran bus */
   10061 		return;
   10062 
   10063 	if (sc->phy.acquire(sc)) {
   10064 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10065 		return;
   10066 	}
   10067 
   10068 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10069 		page_select = GG82563_PHY_PAGE_SELECT;
   10070 	else {
   10071 		/*
   10072 		 * Use Alternative Page Select register to access registers
   10073 		 * 30 and 31.
   10074 		 */
   10075 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10076 	}
   10077 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10078 	wm_gmii_mdic_writereg(dev, phy, page_select, temp);
   10079 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10080 		/*
   10081 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10082 		 * register.
   10083 		 */
   10084 		delay(200);
   10085 		if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) {
   10086 			device_printf(dev, "%s failed\n", __func__);
   10087 			goto out;
   10088 		}
   10089 		wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10090 		delay(200);
   10091 	} else
   10092 		wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10093 
   10094 out:
   10095 	sc->phy.release(sc);
   10096 }
   10097 
   10098 /*
   10099  * wm_gmii_bm_readreg:	[mii interface function]
   10100  *
   10101  *	Read a PHY register on the kumeran
   10102  * This could be handled by the PHY layer if we didn't have to lock the
   10103  * ressource ...
   10104  */
   10105 static int
   10106 wm_gmii_bm_readreg(device_t dev, int phy, int reg)
   10107 {
   10108 	struct wm_softc *sc = device_private(dev);
   10109 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10110 	uint16_t val;
   10111 	int rv;
   10112 
   10113 	if (sc->phy.acquire(sc)) {
   10114 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10115 		return 0;
   10116 	}
   10117 
   10118 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10119 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10120 		    || (reg == 31)) ? 1 : phy;
   10121 	/* Page 800 works differently than the rest so it has its own func */
   10122 	if (page == BM_WUC_PAGE) {
   10123 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
   10124 		rv = val;
   10125 		goto release;
   10126 	}
   10127 
   10128 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10129 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10130 		    && (sc->sc_type != WM_T_82583))
   10131 			wm_gmii_mdic_writereg(dev, phy,
   10132 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10133 		else
   10134 			wm_gmii_mdic_writereg(dev, phy,
   10135 			    BME1000_PHY_PAGE_SELECT, page);
   10136 	}
   10137 
   10138 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10139 
   10140 release:
   10141 	sc->phy.release(sc);
   10142 	return rv;
   10143 }
   10144 
   10145 /*
   10146  * wm_gmii_bm_writereg:	[mii interface function]
   10147  *
   10148  *	Write a PHY register on the kumeran.
   10149  * This could be handled by the PHY layer if we didn't have to lock the
   10150  * ressource ...
   10151  */
   10152 static void
   10153 wm_gmii_bm_writereg(device_t dev, int phy, int reg, int val)
   10154 {
   10155 	struct wm_softc *sc = device_private(dev);
   10156 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10157 
   10158 	if (sc->phy.acquire(sc)) {
   10159 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10160 		return;
   10161 	}
   10162 
   10163 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10164 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10165 		    || (reg == 31)) ? 1 : phy;
   10166 	/* Page 800 works differently than the rest so it has its own func */
   10167 	if (page == BM_WUC_PAGE) {
   10168 		uint16_t tmp;
   10169 
   10170 		tmp = val;
   10171 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   10172 		goto release;
   10173 	}
   10174 
   10175 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10176 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10177 		    && (sc->sc_type != WM_T_82583))
   10178 			wm_gmii_mdic_writereg(dev, phy,
   10179 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10180 		else
   10181 			wm_gmii_mdic_writereg(dev, phy,
   10182 			    BME1000_PHY_PAGE_SELECT, page);
   10183 	}
   10184 
   10185 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10186 
   10187 release:
   10188 	sc->phy.release(sc);
   10189 }
   10190 
   10191 static void
   10192 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd)
   10193 {
   10194 	struct wm_softc *sc = device_private(dev);
   10195 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   10196 	uint16_t wuce, reg;
   10197 
   10198 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10199 		device_xname(dev), __func__));
   10200 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   10201 	if (sc->sc_type == WM_T_PCH) {
   10202 		/* XXX e1000 driver do nothing... why? */
   10203 	}
   10204 
   10205 	/*
   10206 	 * 1) Enable PHY wakeup register first.
   10207 	 * See e1000_enable_phy_wakeup_reg_access_bm().
   10208 	 */
   10209 
   10210 	/* Set page 769 */
   10211 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10212 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   10213 
   10214 	/* Read WUCE and save it */
   10215 	wuce = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG);
   10216 
   10217 	reg = wuce | BM_WUC_ENABLE_BIT;
   10218 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   10219 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, reg);
   10220 
   10221 	/* Select page 800 */
   10222 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10223 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   10224 
   10225 	/*
   10226 	 * 2) Access PHY wakeup register.
   10227 	 * See e1000_access_phy_wakeup_reg_bm.
   10228 	 */
   10229 
   10230 	/* Write page 800 */
   10231 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   10232 
   10233 	if (rd)
   10234 		*val = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE);
   10235 	else
   10236 		wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   10237 
   10238 	/*
   10239 	 * 3) Disable PHY wakeup register.
   10240 	 * See e1000_disable_phy_wakeup_reg_access_bm().
   10241 	 */
   10242 	/* Set page 769 */
   10243 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10244 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   10245 
   10246 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, wuce);
   10247 }
   10248 
   10249 /*
   10250  * wm_gmii_hv_readreg:	[mii interface function]
   10251  *
   10252  *	Read a PHY register on the kumeran
   10253  * This could be handled by the PHY layer if we didn't have to lock the
   10254  * ressource ...
   10255  */
   10256 static int
   10257 wm_gmii_hv_readreg(device_t dev, int phy, int reg)
   10258 {
   10259 	struct wm_softc *sc = device_private(dev);
   10260 	int rv;
   10261 
   10262 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10263 		device_xname(dev), __func__));
   10264 	if (sc->phy.acquire(sc)) {
   10265 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10266 		return 0;
   10267 	}
   10268 
   10269 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg);
   10270 	sc->phy.release(sc);
   10271 	return rv;
   10272 }
   10273 
   10274 static int
   10275 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg)
   10276 {
   10277 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10278 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10279 	uint16_t val;
   10280 	int rv;
   10281 
   10282 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10283 
   10284 	/* Page 800 works differently than the rest so it has its own func */
   10285 	if (page == BM_WUC_PAGE) {
   10286 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
   10287 		return val;
   10288 	}
   10289 
   10290 	/*
   10291 	 * Lower than page 768 works differently than the rest so it has its
   10292 	 * own func
   10293 	 */
   10294 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10295 		printf("gmii_hv_readreg!!!\n");
   10296 		return 0;
   10297 	}
   10298 
   10299 	/*
   10300 	 * XXX I21[789] documents say that the SMBus Address register is at
   10301 	 * PHY address 01, Page 0 (not 768), Register 26.
   10302 	 */
   10303 	if (page == HV_INTC_FC_PAGE_START)
   10304 		page = 0;
   10305 
   10306 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10307 		wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10308 		    page << BME1000_PAGE_SHIFT);
   10309 	}
   10310 
   10311 	rv = wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK);
   10312 	return rv;
   10313 }
   10314 
   10315 /*
   10316  * wm_gmii_hv_writereg:	[mii interface function]
   10317  *
   10318  *	Write a PHY register on the kumeran.
   10319  * This could be handled by the PHY layer if we didn't have to lock the
   10320  * ressource ...
   10321  */
   10322 static void
   10323 wm_gmii_hv_writereg(device_t dev, int phy, int reg, int val)
   10324 {
   10325 	struct wm_softc *sc = device_private(dev);
   10326 
   10327 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10328 		device_xname(dev), __func__));
   10329 
   10330 	if (sc->phy.acquire(sc)) {
   10331 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10332 		return;
   10333 	}
   10334 
   10335 	wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   10336 	sc->phy.release(sc);
   10337 }
   10338 
   10339 static void
   10340 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, int val)
   10341 {
   10342 	struct wm_softc *sc = device_private(dev);
   10343 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10344 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10345 
   10346 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10347 
   10348 	/* Page 800 works differently than the rest so it has its own func */
   10349 	if (page == BM_WUC_PAGE) {
   10350 		uint16_t tmp;
   10351 
   10352 		tmp = val;
   10353 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   10354 		return;
   10355 	}
   10356 
   10357 	/*
   10358 	 * Lower than page 768 works differently than the rest so it has its
   10359 	 * own func
   10360 	 */
   10361 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10362 		printf("gmii_hv_writereg!!!\n");
   10363 		return;
   10364 	}
   10365 
   10366 	{
   10367 		/*
   10368 		 * XXX I21[789] documents say that the SMBus Address register
   10369 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   10370 		 */
   10371 		if (page == HV_INTC_FC_PAGE_START)
   10372 			page = 0;
   10373 
   10374 		/*
   10375 		 * XXX Workaround MDIO accesses being disabled after entering
   10376 		 * IEEE Power Down (whenever bit 11 of the PHY control
   10377 		 * register is set)
   10378 		 */
   10379 		if (sc->sc_phytype == WMPHY_82578) {
   10380 			struct mii_softc *child;
   10381 
   10382 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   10383 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   10384 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   10385 			    && ((val & (1 << 11)) != 0)) {
   10386 				printf("XXX need workaround\n");
   10387 			}
   10388 		}
   10389 
   10390 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10391 			wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10392 			    page << BME1000_PAGE_SHIFT);
   10393 		}
   10394 	}
   10395 
   10396 	wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   10397 }
   10398 
   10399 /*
   10400  * wm_gmii_82580_readreg:	[mii interface function]
   10401  *
   10402  *	Read a PHY register on the 82580 and I350.
   10403  * This could be handled by the PHY layer if we didn't have to lock the
   10404  * ressource ...
   10405  */
   10406 static int
   10407 wm_gmii_82580_readreg(device_t dev, int phy, int reg)
   10408 {
   10409 	struct wm_softc *sc = device_private(dev);
   10410 	int rv;
   10411 
   10412 	if (sc->phy.acquire(sc) != 0) {
   10413 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10414 		return 0;
   10415 	}
   10416 
   10417 #ifdef DIAGNOSTIC
   10418 	if (reg > MII_ADDRMASK) {
   10419 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10420 		    __func__, sc->sc_phytype, reg);
   10421 		reg &= MII_ADDRMASK;
   10422 	}
   10423 #endif
   10424 	rv = wm_gmii_mdic_readreg(dev, phy, reg);
   10425 
   10426 	sc->phy.release(sc);
   10427 	return rv;
   10428 }
   10429 
   10430 /*
   10431  * wm_gmii_82580_writereg:	[mii interface function]
   10432  *
   10433  *	Write a PHY register on the 82580 and I350.
   10434  * This could be handled by the PHY layer if we didn't have to lock the
   10435  * ressource ...
   10436  */
   10437 static void
   10438 wm_gmii_82580_writereg(device_t dev, int phy, int reg, int val)
   10439 {
   10440 	struct wm_softc *sc = device_private(dev);
   10441 
   10442 	if (sc->phy.acquire(sc) != 0) {
   10443 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10444 		return;
   10445 	}
   10446 
   10447 #ifdef DIAGNOSTIC
   10448 	if (reg > MII_ADDRMASK) {
   10449 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10450 		    __func__, sc->sc_phytype, reg);
   10451 		reg &= MII_ADDRMASK;
   10452 	}
   10453 #endif
   10454 	wm_gmii_mdic_writereg(dev, phy, reg, val);
   10455 
   10456 	sc->phy.release(sc);
   10457 }
   10458 
   10459 /*
   10460  * wm_gmii_gs40g_readreg:	[mii interface function]
   10461  *
   10462  *	Read a PHY register on the I2100 and I211.
   10463  * This could be handled by the PHY layer if we didn't have to lock the
   10464  * ressource ...
   10465  */
   10466 static int
   10467 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg)
   10468 {
   10469 	struct wm_softc *sc = device_private(dev);
   10470 	int page, offset;
   10471 	int rv;
   10472 
   10473 	/* Acquire semaphore */
   10474 	if (sc->phy.acquire(sc)) {
   10475 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10476 		return 0;
   10477 	}
   10478 
   10479 	/* Page select */
   10480 	page = reg >> GS40G_PAGE_SHIFT;
   10481 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10482 
   10483 	/* Read reg */
   10484 	offset = reg & GS40G_OFFSET_MASK;
   10485 	rv = wm_gmii_mdic_readreg(dev, phy, offset);
   10486 
   10487 	sc->phy.release(sc);
   10488 	return rv;
   10489 }
   10490 
   10491 /*
   10492  * wm_gmii_gs40g_writereg:	[mii interface function]
   10493  *
   10494  *	Write a PHY register on the I210 and I211.
   10495  * This could be handled by the PHY layer if we didn't have to lock the
   10496  * ressource ...
   10497  */
   10498 static void
   10499 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, int val)
   10500 {
   10501 	struct wm_softc *sc = device_private(dev);
   10502 	int page, offset;
   10503 
   10504 	/* Acquire semaphore */
   10505 	if (sc->phy.acquire(sc)) {
   10506 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10507 		return;
   10508 	}
   10509 
   10510 	/* Page select */
   10511 	page = reg >> GS40G_PAGE_SHIFT;
   10512 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10513 
   10514 	/* Write reg */
   10515 	offset = reg & GS40G_OFFSET_MASK;
   10516 	wm_gmii_mdic_writereg(dev, phy, offset, val);
   10517 
   10518 	/* Release semaphore */
   10519 	sc->phy.release(sc);
   10520 }
   10521 
   10522 /*
   10523  * wm_gmii_statchg:	[mii interface function]
   10524  *
   10525  *	Callback from MII layer when media changes.
   10526  */
   10527 static void
   10528 wm_gmii_statchg(struct ifnet *ifp)
   10529 {
   10530 	struct wm_softc *sc = ifp->if_softc;
   10531 	struct mii_data *mii = &sc->sc_mii;
   10532 
   10533 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   10534 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10535 	sc->sc_fcrtl &= ~FCRTL_XONE;
   10536 
   10537 	/*
   10538 	 * Get flow control negotiation result.
   10539 	 */
   10540 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   10541 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   10542 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   10543 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   10544 	}
   10545 
   10546 	if (sc->sc_flowflags & IFM_FLOW) {
   10547 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   10548 			sc->sc_ctrl |= CTRL_TFCE;
   10549 			sc->sc_fcrtl |= FCRTL_XONE;
   10550 		}
   10551 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   10552 			sc->sc_ctrl |= CTRL_RFCE;
   10553 	}
   10554 
   10555 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   10556 		DPRINTF(WM_DEBUG_LINK,
   10557 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   10558 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10559 	} else {
   10560 		DPRINTF(WM_DEBUG_LINK,
   10561 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   10562 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10563 	}
   10564 
   10565 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10566 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10567 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   10568 						 : WMREG_FCRTL, sc->sc_fcrtl);
   10569 	if (sc->sc_type == WM_T_80003) {
   10570 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   10571 		case IFM_1000_T:
   10572 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10573 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   10574 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10575 			break;
   10576 		default:
   10577 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10578 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   10579 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   10580 			break;
   10581 		}
   10582 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   10583 	}
   10584 }
   10585 
   10586 /* kumeran related (80003, ICH* and PCH*) */
   10587 
   10588 /*
   10589  * wm_kmrn_readreg:
   10590  *
   10591  *	Read a kumeran register
   10592  */
   10593 static int
   10594 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   10595 {
   10596 	int rv;
   10597 
   10598 	if (sc->sc_type == WM_T_80003)
   10599 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10600 	else
   10601 		rv = sc->phy.acquire(sc);
   10602 	if (rv != 0) {
   10603 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   10604 		    __func__);
   10605 		return rv;
   10606 	}
   10607 
   10608 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   10609 
   10610 	if (sc->sc_type == WM_T_80003)
   10611 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10612 	else
   10613 		sc->phy.release(sc);
   10614 
   10615 	return rv;
   10616 }
   10617 
   10618 static int
   10619 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   10620 {
   10621 
   10622 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10623 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10624 	    KUMCTRLSTA_REN);
   10625 	CSR_WRITE_FLUSH(sc);
   10626 	delay(2);
   10627 
   10628 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   10629 
   10630 	return 0;
   10631 }
   10632 
   10633 /*
   10634  * wm_kmrn_writereg:
   10635  *
   10636  *	Write a kumeran register
   10637  */
   10638 static int
   10639 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   10640 {
   10641 	int rv;
   10642 
   10643 	if (sc->sc_type == WM_T_80003)
   10644 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10645 	else
   10646 		rv = sc->phy.acquire(sc);
   10647 	if (rv != 0) {
   10648 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   10649 		    __func__);
   10650 		return rv;
   10651 	}
   10652 
   10653 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   10654 
   10655 	if (sc->sc_type == WM_T_80003)
   10656 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10657 	else
   10658 		sc->phy.release(sc);
   10659 
   10660 	return rv;
   10661 }
   10662 
   10663 static int
   10664 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   10665 {
   10666 
   10667 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10668 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   10669 
   10670 	return 0;
   10671 }
   10672 
   10673 /* SGMII related */
   10674 
   10675 /*
   10676  * wm_sgmii_uses_mdio
   10677  *
   10678  * Check whether the transaction is to the internal PHY or the external
   10679  * MDIO interface. Return true if it's MDIO.
   10680  */
   10681 static bool
   10682 wm_sgmii_uses_mdio(struct wm_softc *sc)
   10683 {
   10684 	uint32_t reg;
   10685 	bool ismdio = false;
   10686 
   10687 	switch (sc->sc_type) {
   10688 	case WM_T_82575:
   10689 	case WM_T_82576:
   10690 		reg = CSR_READ(sc, WMREG_MDIC);
   10691 		ismdio = ((reg & MDIC_DEST) != 0);
   10692 		break;
   10693 	case WM_T_82580:
   10694 	case WM_T_I350:
   10695 	case WM_T_I354:
   10696 	case WM_T_I210:
   10697 	case WM_T_I211:
   10698 		reg = CSR_READ(sc, WMREG_MDICNFG);
   10699 		ismdio = ((reg & MDICNFG_DEST) != 0);
   10700 		break;
   10701 	default:
   10702 		break;
   10703 	}
   10704 
   10705 	return ismdio;
   10706 }
   10707 
   10708 /*
   10709  * wm_sgmii_readreg:	[mii interface function]
   10710  *
   10711  *	Read a PHY register on the SGMII
   10712  * This could be handled by the PHY layer if we didn't have to lock the
   10713  * ressource ...
   10714  */
   10715 static int
   10716 wm_sgmii_readreg(device_t dev, int phy, int reg)
   10717 {
   10718 	struct wm_softc *sc = device_private(dev);
   10719 	uint32_t i2ccmd;
   10720 	int i, rv;
   10721 
   10722 	if (sc->phy.acquire(sc)) {
   10723 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10724 		return 0;
   10725 	}
   10726 
   10727 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10728 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10729 	    | I2CCMD_OPCODE_READ;
   10730 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10731 
   10732 	/* Poll the ready bit */
   10733 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10734 		delay(50);
   10735 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10736 		if (i2ccmd & I2CCMD_READY)
   10737 			break;
   10738 	}
   10739 	if ((i2ccmd & I2CCMD_READY) == 0)
   10740 		device_printf(dev, "I2CCMD Read did not complete\n");
   10741 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10742 		device_printf(dev, "I2CCMD Error bit set\n");
   10743 
   10744 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   10745 
   10746 	sc->phy.release(sc);
   10747 	return rv;
   10748 }
   10749 
   10750 /*
   10751  * wm_sgmii_writereg:	[mii interface function]
   10752  *
   10753  *	Write a PHY register on the SGMII.
   10754  * This could be handled by the PHY layer if we didn't have to lock the
   10755  * ressource ...
   10756  */
   10757 static void
   10758 wm_sgmii_writereg(device_t dev, int phy, int reg, int val)
   10759 {
   10760 	struct wm_softc *sc = device_private(dev);
   10761 	uint32_t i2ccmd;
   10762 	int i;
   10763 	int val_swapped;
   10764 
   10765 	if (sc->phy.acquire(sc) != 0) {
   10766 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10767 		return;
   10768 	}
   10769 	/* Swap the data bytes for the I2C interface */
   10770 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   10771 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10772 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10773 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   10774 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10775 
   10776 	/* Poll the ready bit */
   10777 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10778 		delay(50);
   10779 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10780 		if (i2ccmd & I2CCMD_READY)
   10781 			break;
   10782 	}
   10783 	if ((i2ccmd & I2CCMD_READY) == 0)
   10784 		device_printf(dev, "I2CCMD Write did not complete\n");
   10785 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10786 		device_printf(dev, "I2CCMD Error bit set\n");
   10787 
   10788 	sc->phy.release(sc);
   10789 }
   10790 
   10791 /* TBI related */
   10792 
   10793 /*
   10794  * wm_tbi_mediainit:
   10795  *
   10796  *	Initialize media for use on 1000BASE-X devices.
   10797  */
   10798 static void
   10799 wm_tbi_mediainit(struct wm_softc *sc)
   10800 {
   10801 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10802 	const char *sep = "";
   10803 
   10804 	if (sc->sc_type < WM_T_82543)
   10805 		sc->sc_tipg = TIPG_WM_DFLT;
   10806 	else
   10807 		sc->sc_tipg = TIPG_LG_DFLT;
   10808 
   10809 	sc->sc_tbi_serdes_anegticks = 5;
   10810 
   10811 	/* Initialize our media structures */
   10812 	sc->sc_mii.mii_ifp = ifp;
   10813 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10814 
   10815 	if ((sc->sc_type >= WM_T_82575)
   10816 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   10817 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10818 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   10819 	else
   10820 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10821 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   10822 
   10823 	/*
   10824 	 * SWD Pins:
   10825 	 *
   10826 	 *	0 = Link LED (output)
   10827 	 *	1 = Loss Of Signal (input)
   10828 	 */
   10829 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   10830 
   10831 	/* XXX Perhaps this is only for TBI */
   10832 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   10833 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   10834 
   10835 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   10836 		sc->sc_ctrl &= ~CTRL_LRST;
   10837 
   10838 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10839 
   10840 #define	ADD(ss, mm, dd)							\
   10841 do {									\
   10842 	aprint_normal("%s%s", sep, ss);					\
   10843 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   10844 	sep = ", ";							\
   10845 } while (/*CONSTCOND*/0)
   10846 
   10847 	aprint_normal_dev(sc->sc_dev, "");
   10848 
   10849 	if (sc->sc_type == WM_T_I354) {
   10850 		uint32_t status;
   10851 
   10852 		status = CSR_READ(sc, WMREG_STATUS);
   10853 		if (((status & STATUS_2P5_SKU) != 0)
   10854 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   10855 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   10856 		} else
   10857 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   10858 	} else if (sc->sc_type == WM_T_82545) {
   10859 		/* Only 82545 is LX (XXX except SFP) */
   10860 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   10861 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   10862 	} else {
   10863 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   10864 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   10865 	}
   10866 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   10867 	aprint_normal("\n");
   10868 
   10869 #undef ADD
   10870 
   10871 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   10872 }
   10873 
   10874 /*
   10875  * wm_tbi_mediachange:	[ifmedia interface function]
   10876  *
   10877  *	Set hardware to newly-selected media on a 1000BASE-X device.
   10878  */
   10879 static int
   10880 wm_tbi_mediachange(struct ifnet *ifp)
   10881 {
   10882 	struct wm_softc *sc = ifp->if_softc;
   10883 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10884 	uint32_t status;
   10885 	int i;
   10886 
   10887 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   10888 		/* XXX need some work for >= 82571 and < 82575 */
   10889 		if (sc->sc_type < WM_T_82575)
   10890 			return 0;
   10891 	}
   10892 
   10893 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   10894 	    || (sc->sc_type >= WM_T_82575))
   10895 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   10896 
   10897 	sc->sc_ctrl &= ~CTRL_LRST;
   10898 	sc->sc_txcw = TXCW_ANE;
   10899 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10900 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   10901 	else if (ife->ifm_media & IFM_FDX)
   10902 		sc->sc_txcw |= TXCW_FD;
   10903 	else
   10904 		sc->sc_txcw |= TXCW_HD;
   10905 
   10906 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   10907 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   10908 
   10909 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   10910 		    device_xname(sc->sc_dev), sc->sc_txcw));
   10911 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10912 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10913 	CSR_WRITE_FLUSH(sc);
   10914 	delay(1000);
   10915 
   10916 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   10917 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   10918 
   10919 	/*
   10920 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   10921 	 * optics detect a signal, 0 if they don't.
   10922 	 */
   10923 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   10924 		/* Have signal; wait for the link to come up. */
   10925 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   10926 			delay(10000);
   10927 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   10928 				break;
   10929 		}
   10930 
   10931 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   10932 			    device_xname(sc->sc_dev),i));
   10933 
   10934 		status = CSR_READ(sc, WMREG_STATUS);
   10935 		DPRINTF(WM_DEBUG_LINK,
   10936 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   10937 			device_xname(sc->sc_dev),status, STATUS_LU));
   10938 		if (status & STATUS_LU) {
   10939 			/* Link is up. */
   10940 			DPRINTF(WM_DEBUG_LINK,
   10941 			    ("%s: LINK: set media -> link up %s\n",
   10942 			    device_xname(sc->sc_dev),
   10943 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   10944 
   10945 			/*
   10946 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   10947 			 * so we should update sc->sc_ctrl
   10948 			 */
   10949 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   10950 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10951 			sc->sc_fcrtl &= ~FCRTL_XONE;
   10952 			if (status & STATUS_FD)
   10953 				sc->sc_tctl |=
   10954 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10955 			else
   10956 				sc->sc_tctl |=
   10957 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10958 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   10959 				sc->sc_fcrtl |= FCRTL_XONE;
   10960 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10961 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   10962 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   10963 				      sc->sc_fcrtl);
   10964 			sc->sc_tbi_linkup = 1;
   10965 		} else {
   10966 			if (i == WM_LINKUP_TIMEOUT)
   10967 				wm_check_for_link(sc);
   10968 			/* Link is down. */
   10969 			DPRINTF(WM_DEBUG_LINK,
   10970 			    ("%s: LINK: set media -> link down\n",
   10971 			    device_xname(sc->sc_dev)));
   10972 			sc->sc_tbi_linkup = 0;
   10973 		}
   10974 	} else {
   10975 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   10976 		    device_xname(sc->sc_dev)));
   10977 		sc->sc_tbi_linkup = 0;
   10978 	}
   10979 
   10980 	wm_tbi_serdes_set_linkled(sc);
   10981 
   10982 	return 0;
   10983 }
   10984 
   10985 /*
   10986  * wm_tbi_mediastatus:	[ifmedia interface function]
   10987  *
   10988  *	Get the current interface media status on a 1000BASE-X device.
   10989  */
   10990 static void
   10991 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10992 {
   10993 	struct wm_softc *sc = ifp->if_softc;
   10994 	uint32_t ctrl, status;
   10995 
   10996 	ifmr->ifm_status = IFM_AVALID;
   10997 	ifmr->ifm_active = IFM_ETHER;
   10998 
   10999 	status = CSR_READ(sc, WMREG_STATUS);
   11000 	if ((status & STATUS_LU) == 0) {
   11001 		ifmr->ifm_active |= IFM_NONE;
   11002 		return;
   11003 	}
   11004 
   11005 	ifmr->ifm_status |= IFM_ACTIVE;
   11006 	/* Only 82545 is LX */
   11007 	if (sc->sc_type == WM_T_82545)
   11008 		ifmr->ifm_active |= IFM_1000_LX;
   11009 	else
   11010 		ifmr->ifm_active |= IFM_1000_SX;
   11011 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   11012 		ifmr->ifm_active |= IFM_FDX;
   11013 	else
   11014 		ifmr->ifm_active |= IFM_HDX;
   11015 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11016 	if (ctrl & CTRL_RFCE)
   11017 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   11018 	if (ctrl & CTRL_TFCE)
   11019 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   11020 }
   11021 
   11022 /* XXX TBI only */
   11023 static int
   11024 wm_check_for_link(struct wm_softc *sc)
   11025 {
   11026 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11027 	uint32_t rxcw;
   11028 	uint32_t ctrl;
   11029 	uint32_t status;
   11030 	uint32_t sig;
   11031 
   11032 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11033 		/* XXX need some work for >= 82571 */
   11034 		if (sc->sc_type >= WM_T_82571) {
   11035 			sc->sc_tbi_linkup = 1;
   11036 			return 0;
   11037 		}
   11038 	}
   11039 
   11040 	rxcw = CSR_READ(sc, WMREG_RXCW);
   11041 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11042 	status = CSR_READ(sc, WMREG_STATUS);
   11043 
   11044 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   11045 
   11046 	DPRINTF(WM_DEBUG_LINK,
   11047 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   11048 		device_xname(sc->sc_dev), __func__,
   11049 		((ctrl & CTRL_SWDPIN(1)) == sig),
   11050 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   11051 
   11052 	/*
   11053 	 * SWDPIN   LU RXCW
   11054 	 *      0    0    0
   11055 	 *      0    0    1	(should not happen)
   11056 	 *      0    1    0	(should not happen)
   11057 	 *      0    1    1	(should not happen)
   11058 	 *      1    0    0	Disable autonego and force linkup
   11059 	 *      1    0    1	got /C/ but not linkup yet
   11060 	 *      1    1    0	(linkup)
   11061 	 *      1    1    1	If IFM_AUTO, back to autonego
   11062 	 *
   11063 	 */
   11064 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   11065 	    && ((status & STATUS_LU) == 0)
   11066 	    && ((rxcw & RXCW_C) == 0)) {
   11067 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   11068 			__func__));
   11069 		sc->sc_tbi_linkup = 0;
   11070 		/* Disable auto-negotiation in the TXCW register */
   11071 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   11072 
   11073 		/*
   11074 		 * Force link-up and also force full-duplex.
   11075 		 *
   11076 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   11077 		 * so we should update sc->sc_ctrl
   11078 		 */
   11079 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   11080 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11081 	} else if (((status & STATUS_LU) != 0)
   11082 	    && ((rxcw & RXCW_C) != 0)
   11083 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   11084 		sc->sc_tbi_linkup = 1;
   11085 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   11086 			__func__));
   11087 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11088 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   11089 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   11090 	    && ((rxcw & RXCW_C) != 0)) {
   11091 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   11092 	} else {
   11093 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   11094 			status));
   11095 	}
   11096 
   11097 	return 0;
   11098 }
   11099 
   11100 /*
   11101  * wm_tbi_tick:
   11102  *
   11103  *	Check the link on TBI devices.
   11104  *	This function acts as mii_tick().
   11105  */
   11106 static void
   11107 wm_tbi_tick(struct wm_softc *sc)
   11108 {
   11109 	struct mii_data *mii = &sc->sc_mii;
   11110 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11111 	uint32_t status;
   11112 
   11113 	KASSERT(WM_CORE_LOCKED(sc));
   11114 
   11115 	status = CSR_READ(sc, WMREG_STATUS);
   11116 
   11117 	/* XXX is this needed? */
   11118 	(void)CSR_READ(sc, WMREG_RXCW);
   11119 	(void)CSR_READ(sc, WMREG_CTRL);
   11120 
   11121 	/* set link status */
   11122 	if ((status & STATUS_LU) == 0) {
   11123 		DPRINTF(WM_DEBUG_LINK,
   11124 		    ("%s: LINK: checklink -> down\n",
   11125 			device_xname(sc->sc_dev)));
   11126 		sc->sc_tbi_linkup = 0;
   11127 	} else if (sc->sc_tbi_linkup == 0) {
   11128 		DPRINTF(WM_DEBUG_LINK,
   11129 		    ("%s: LINK: checklink -> up %s\n",
   11130 			device_xname(sc->sc_dev),
   11131 			(status & STATUS_FD) ? "FDX" : "HDX"));
   11132 		sc->sc_tbi_linkup = 1;
   11133 		sc->sc_tbi_serdes_ticks = 0;
   11134 	}
   11135 
   11136 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   11137 		goto setled;
   11138 
   11139 	if ((status & STATUS_LU) == 0) {
   11140 		sc->sc_tbi_linkup = 0;
   11141 		/* If the timer expired, retry autonegotiation */
   11142 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11143 		    && (++sc->sc_tbi_serdes_ticks
   11144 			>= sc->sc_tbi_serdes_anegticks)) {
   11145 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11146 			sc->sc_tbi_serdes_ticks = 0;
   11147 			/*
   11148 			 * Reset the link, and let autonegotiation do
   11149 			 * its thing
   11150 			 */
   11151 			sc->sc_ctrl |= CTRL_LRST;
   11152 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11153 			CSR_WRITE_FLUSH(sc);
   11154 			delay(1000);
   11155 			sc->sc_ctrl &= ~CTRL_LRST;
   11156 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11157 			CSR_WRITE_FLUSH(sc);
   11158 			delay(1000);
   11159 			CSR_WRITE(sc, WMREG_TXCW,
   11160 			    sc->sc_txcw & ~TXCW_ANE);
   11161 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11162 		}
   11163 	}
   11164 
   11165 setled:
   11166 	wm_tbi_serdes_set_linkled(sc);
   11167 }
   11168 
   11169 /* SERDES related */
   11170 static void
   11171 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   11172 {
   11173 	uint32_t reg;
   11174 
   11175 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11176 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   11177 		return;
   11178 
   11179 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   11180 	reg |= PCS_CFG_PCS_EN;
   11181 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   11182 
   11183 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11184 	reg &= ~CTRL_EXT_SWDPIN(3);
   11185 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11186 	CSR_WRITE_FLUSH(sc);
   11187 }
   11188 
   11189 static int
   11190 wm_serdes_mediachange(struct ifnet *ifp)
   11191 {
   11192 	struct wm_softc *sc = ifp->if_softc;
   11193 	bool pcs_autoneg = true; /* XXX */
   11194 	uint32_t ctrl_ext, pcs_lctl, reg;
   11195 
   11196 	/* XXX Currently, this function is not called on 8257[12] */
   11197 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11198 	    || (sc->sc_type >= WM_T_82575))
   11199 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11200 
   11201 	wm_serdes_power_up_link_82575(sc);
   11202 
   11203 	sc->sc_ctrl |= CTRL_SLU;
   11204 
   11205 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   11206 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   11207 
   11208 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11209 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   11210 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   11211 	case CTRL_EXT_LINK_MODE_SGMII:
   11212 		pcs_autoneg = true;
   11213 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   11214 		break;
   11215 	case CTRL_EXT_LINK_MODE_1000KX:
   11216 		pcs_autoneg = false;
   11217 		/* FALLTHROUGH */
   11218 	default:
   11219 		if ((sc->sc_type == WM_T_82575)
   11220 		    || (sc->sc_type == WM_T_82576)) {
   11221 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   11222 				pcs_autoneg = false;
   11223 		}
   11224 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   11225 		    | CTRL_FRCFDX;
   11226 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   11227 	}
   11228 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11229 
   11230 	if (pcs_autoneg) {
   11231 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   11232 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   11233 
   11234 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   11235 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   11236 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   11237 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   11238 	} else
   11239 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   11240 
   11241 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   11242 
   11243 
   11244 	return 0;
   11245 }
   11246 
   11247 static void
   11248 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11249 {
   11250 	struct wm_softc *sc = ifp->if_softc;
   11251 	struct mii_data *mii = &sc->sc_mii;
   11252 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11253 	uint32_t pcs_adv, pcs_lpab, reg;
   11254 
   11255 	ifmr->ifm_status = IFM_AVALID;
   11256 	ifmr->ifm_active = IFM_ETHER;
   11257 
   11258 	/* Check PCS */
   11259 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11260 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   11261 		ifmr->ifm_active |= IFM_NONE;
   11262 		sc->sc_tbi_linkup = 0;
   11263 		goto setled;
   11264 	}
   11265 
   11266 	sc->sc_tbi_linkup = 1;
   11267 	ifmr->ifm_status |= IFM_ACTIVE;
   11268 	if (sc->sc_type == WM_T_I354) {
   11269 		uint32_t status;
   11270 
   11271 		status = CSR_READ(sc, WMREG_STATUS);
   11272 		if (((status & STATUS_2P5_SKU) != 0)
   11273 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11274 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
   11275 		} else
   11276 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
   11277 	} else {
   11278 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   11279 		case PCS_LSTS_SPEED_10:
   11280 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   11281 			break;
   11282 		case PCS_LSTS_SPEED_100:
   11283 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   11284 			break;
   11285 		case PCS_LSTS_SPEED_1000:
   11286 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11287 			break;
   11288 		default:
   11289 			device_printf(sc->sc_dev, "Unknown speed\n");
   11290 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11291 			break;
   11292 		}
   11293 	}
   11294 	if ((reg & PCS_LSTS_FDX) != 0)
   11295 		ifmr->ifm_active |= IFM_FDX;
   11296 	else
   11297 		ifmr->ifm_active |= IFM_HDX;
   11298 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   11299 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   11300 		/* Check flow */
   11301 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11302 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   11303 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   11304 			goto setled;
   11305 		}
   11306 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   11307 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   11308 		DPRINTF(WM_DEBUG_LINK,
   11309 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   11310 		if ((pcs_adv & TXCW_SYM_PAUSE)
   11311 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   11312 			mii->mii_media_active |= IFM_FLOW
   11313 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   11314 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   11315 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11316 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   11317 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11318 			mii->mii_media_active |= IFM_FLOW
   11319 			    | IFM_ETH_TXPAUSE;
   11320 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   11321 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11322 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   11323 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11324 			mii->mii_media_active |= IFM_FLOW
   11325 			    | IFM_ETH_RXPAUSE;
   11326 		}
   11327 	}
   11328 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   11329 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   11330 setled:
   11331 	wm_tbi_serdes_set_linkled(sc);
   11332 }
   11333 
   11334 /*
   11335  * wm_serdes_tick:
   11336  *
   11337  *	Check the link on serdes devices.
   11338  */
   11339 static void
   11340 wm_serdes_tick(struct wm_softc *sc)
   11341 {
   11342 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11343 	struct mii_data *mii = &sc->sc_mii;
   11344 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11345 	uint32_t reg;
   11346 
   11347 	KASSERT(WM_CORE_LOCKED(sc));
   11348 
   11349 	mii->mii_media_status = IFM_AVALID;
   11350 	mii->mii_media_active = IFM_ETHER;
   11351 
   11352 	/* Check PCS */
   11353 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11354 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   11355 		mii->mii_media_status |= IFM_ACTIVE;
   11356 		sc->sc_tbi_linkup = 1;
   11357 		sc->sc_tbi_serdes_ticks = 0;
   11358 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   11359 		if ((reg & PCS_LSTS_FDX) != 0)
   11360 			mii->mii_media_active |= IFM_FDX;
   11361 		else
   11362 			mii->mii_media_active |= IFM_HDX;
   11363 	} else {
   11364 		mii->mii_media_status |= IFM_NONE;
   11365 		sc->sc_tbi_linkup = 0;
   11366 		/* If the timer expired, retry autonegotiation */
   11367 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11368 		    && (++sc->sc_tbi_serdes_ticks
   11369 			>= sc->sc_tbi_serdes_anegticks)) {
   11370 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11371 			sc->sc_tbi_serdes_ticks = 0;
   11372 			/* XXX */
   11373 			wm_serdes_mediachange(ifp);
   11374 		}
   11375 	}
   11376 
   11377 	wm_tbi_serdes_set_linkled(sc);
   11378 }
   11379 
   11380 /* SFP related */
   11381 
   11382 static int
   11383 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   11384 {
   11385 	uint32_t i2ccmd;
   11386 	int i;
   11387 
   11388 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11389 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11390 
   11391 	/* Poll the ready bit */
   11392 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11393 		delay(50);
   11394 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11395 		if (i2ccmd & I2CCMD_READY)
   11396 			break;
   11397 	}
   11398 	if ((i2ccmd & I2CCMD_READY) == 0)
   11399 		return -1;
   11400 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   11401 		return -1;
   11402 
   11403 	*data = i2ccmd & 0x00ff;
   11404 
   11405 	return 0;
   11406 }
   11407 
   11408 static uint32_t
   11409 wm_sfp_get_media_type(struct wm_softc *sc)
   11410 {
   11411 	uint32_t ctrl_ext;
   11412 	uint8_t val = 0;
   11413 	int timeout = 3;
   11414 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   11415 	int rv = -1;
   11416 
   11417 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11418 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   11419 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   11420 	CSR_WRITE_FLUSH(sc);
   11421 
   11422 	/* Read SFP module data */
   11423 	while (timeout) {
   11424 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   11425 		if (rv == 0)
   11426 			break;
   11427 		delay(100*1000); /* XXX too big */
   11428 		timeout--;
   11429 	}
   11430 	if (rv != 0)
   11431 		goto out;
   11432 	switch (val) {
   11433 	case SFF_SFP_ID_SFF:
   11434 		aprint_normal_dev(sc->sc_dev,
   11435 		    "Module/Connector soldered to board\n");
   11436 		break;
   11437 	case SFF_SFP_ID_SFP:
   11438 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   11439 		break;
   11440 	case SFF_SFP_ID_UNKNOWN:
   11441 		goto out;
   11442 	default:
   11443 		break;
   11444 	}
   11445 
   11446 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   11447 	if (rv != 0) {
   11448 		goto out;
   11449 	}
   11450 
   11451 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   11452 		mediatype = WM_MEDIATYPE_SERDES;
   11453 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   11454 		sc->sc_flags |= WM_F_SGMII;
   11455 		mediatype = WM_MEDIATYPE_COPPER;
   11456 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   11457 		sc->sc_flags |= WM_F_SGMII;
   11458 		mediatype = WM_MEDIATYPE_SERDES;
   11459 	}
   11460 
   11461 out:
   11462 	/* Restore I2C interface setting */
   11463 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11464 
   11465 	return mediatype;
   11466 }
   11467 
   11468 /*
   11469  * NVM related.
   11470  * Microwire, SPI (w/wo EERD) and Flash.
   11471  */
   11472 
   11473 /* Both spi and uwire */
   11474 
   11475 /*
   11476  * wm_eeprom_sendbits:
   11477  *
   11478  *	Send a series of bits to the EEPROM.
   11479  */
   11480 static void
   11481 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   11482 {
   11483 	uint32_t reg;
   11484 	int x;
   11485 
   11486 	reg = CSR_READ(sc, WMREG_EECD);
   11487 
   11488 	for (x = nbits; x > 0; x--) {
   11489 		if (bits & (1U << (x - 1)))
   11490 			reg |= EECD_DI;
   11491 		else
   11492 			reg &= ~EECD_DI;
   11493 		CSR_WRITE(sc, WMREG_EECD, reg);
   11494 		CSR_WRITE_FLUSH(sc);
   11495 		delay(2);
   11496 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11497 		CSR_WRITE_FLUSH(sc);
   11498 		delay(2);
   11499 		CSR_WRITE(sc, WMREG_EECD, reg);
   11500 		CSR_WRITE_FLUSH(sc);
   11501 		delay(2);
   11502 	}
   11503 }
   11504 
   11505 /*
   11506  * wm_eeprom_recvbits:
   11507  *
   11508  *	Receive a series of bits from the EEPROM.
   11509  */
   11510 static void
   11511 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   11512 {
   11513 	uint32_t reg, val;
   11514 	int x;
   11515 
   11516 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   11517 
   11518 	val = 0;
   11519 	for (x = nbits; x > 0; x--) {
   11520 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11521 		CSR_WRITE_FLUSH(sc);
   11522 		delay(2);
   11523 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   11524 			val |= (1U << (x - 1));
   11525 		CSR_WRITE(sc, WMREG_EECD, reg);
   11526 		CSR_WRITE_FLUSH(sc);
   11527 		delay(2);
   11528 	}
   11529 	*valp = val;
   11530 }
   11531 
   11532 /* Microwire */
   11533 
   11534 /*
   11535  * wm_nvm_read_uwire:
   11536  *
   11537  *	Read a word from the EEPROM using the MicroWire protocol.
   11538  */
   11539 static int
   11540 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11541 {
   11542 	uint32_t reg, val;
   11543 	int i;
   11544 
   11545 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11546 		device_xname(sc->sc_dev), __func__));
   11547 
   11548 	if (sc->nvm.acquire(sc) != 0)
   11549 		return -1;
   11550 
   11551 	for (i = 0; i < wordcnt; i++) {
   11552 		/* Clear SK and DI. */
   11553 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   11554 		CSR_WRITE(sc, WMREG_EECD, reg);
   11555 
   11556 		/*
   11557 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   11558 		 * and Xen.
   11559 		 *
   11560 		 * We use this workaround only for 82540 because qemu's
   11561 		 * e1000 act as 82540.
   11562 		 */
   11563 		if (sc->sc_type == WM_T_82540) {
   11564 			reg |= EECD_SK;
   11565 			CSR_WRITE(sc, WMREG_EECD, reg);
   11566 			reg &= ~EECD_SK;
   11567 			CSR_WRITE(sc, WMREG_EECD, reg);
   11568 			CSR_WRITE_FLUSH(sc);
   11569 			delay(2);
   11570 		}
   11571 		/* XXX: end of workaround */
   11572 
   11573 		/* Set CHIP SELECT. */
   11574 		reg |= EECD_CS;
   11575 		CSR_WRITE(sc, WMREG_EECD, reg);
   11576 		CSR_WRITE_FLUSH(sc);
   11577 		delay(2);
   11578 
   11579 		/* Shift in the READ command. */
   11580 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   11581 
   11582 		/* Shift in address. */
   11583 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   11584 
   11585 		/* Shift out the data. */
   11586 		wm_eeprom_recvbits(sc, &val, 16);
   11587 		data[i] = val & 0xffff;
   11588 
   11589 		/* Clear CHIP SELECT. */
   11590 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   11591 		CSR_WRITE(sc, WMREG_EECD, reg);
   11592 		CSR_WRITE_FLUSH(sc);
   11593 		delay(2);
   11594 	}
   11595 
   11596 	sc->nvm.release(sc);
   11597 	return 0;
   11598 }
   11599 
   11600 /* SPI */
   11601 
   11602 /*
   11603  * Set SPI and FLASH related information from the EECD register.
   11604  * For 82541 and 82547, the word size is taken from EEPROM.
   11605  */
   11606 static int
   11607 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   11608 {
   11609 	int size;
   11610 	uint32_t reg;
   11611 	uint16_t data;
   11612 
   11613 	reg = CSR_READ(sc, WMREG_EECD);
   11614 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   11615 
   11616 	/* Read the size of NVM from EECD by default */
   11617 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11618 	switch (sc->sc_type) {
   11619 	case WM_T_82541:
   11620 	case WM_T_82541_2:
   11621 	case WM_T_82547:
   11622 	case WM_T_82547_2:
   11623 		/* Set dummy value to access EEPROM */
   11624 		sc->sc_nvm_wordsize = 64;
   11625 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   11626 			aprint_error_dev(sc->sc_dev,
   11627 			    "%s: failed to read EEPROM size\n", __func__);
   11628 		}
   11629 		reg = data;
   11630 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11631 		if (size == 0)
   11632 			size = 6; /* 64 word size */
   11633 		else
   11634 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   11635 		break;
   11636 	case WM_T_80003:
   11637 	case WM_T_82571:
   11638 	case WM_T_82572:
   11639 	case WM_T_82573: /* SPI case */
   11640 	case WM_T_82574: /* SPI case */
   11641 	case WM_T_82583: /* SPI case */
   11642 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11643 		if (size > 14)
   11644 			size = 14;
   11645 		break;
   11646 	case WM_T_82575:
   11647 	case WM_T_82576:
   11648 	case WM_T_82580:
   11649 	case WM_T_I350:
   11650 	case WM_T_I354:
   11651 	case WM_T_I210:
   11652 	case WM_T_I211:
   11653 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11654 		if (size > 15)
   11655 			size = 15;
   11656 		break;
   11657 	default:
   11658 		aprint_error_dev(sc->sc_dev,
   11659 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   11660 		return -1;
   11661 		break;
   11662 	}
   11663 
   11664 	sc->sc_nvm_wordsize = 1 << size;
   11665 
   11666 	return 0;
   11667 }
   11668 
   11669 /*
   11670  * wm_nvm_ready_spi:
   11671  *
   11672  *	Wait for a SPI EEPROM to be ready for commands.
   11673  */
   11674 static int
   11675 wm_nvm_ready_spi(struct wm_softc *sc)
   11676 {
   11677 	uint32_t val;
   11678 	int usec;
   11679 
   11680 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11681 		device_xname(sc->sc_dev), __func__));
   11682 
   11683 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   11684 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   11685 		wm_eeprom_recvbits(sc, &val, 8);
   11686 		if ((val & SPI_SR_RDY) == 0)
   11687 			break;
   11688 	}
   11689 	if (usec >= SPI_MAX_RETRIES) {
   11690 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   11691 		return -1;
   11692 	}
   11693 	return 0;
   11694 }
   11695 
   11696 /*
   11697  * wm_nvm_read_spi:
   11698  *
   11699  *	Read a work from the EEPROM using the SPI protocol.
   11700  */
   11701 static int
   11702 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11703 {
   11704 	uint32_t reg, val;
   11705 	int i;
   11706 	uint8_t opc;
   11707 	int rv = 0;
   11708 
   11709 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11710 		device_xname(sc->sc_dev), __func__));
   11711 
   11712 	if (sc->nvm.acquire(sc) != 0)
   11713 		return -1;
   11714 
   11715 	/* Clear SK and CS. */
   11716 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   11717 	CSR_WRITE(sc, WMREG_EECD, reg);
   11718 	CSR_WRITE_FLUSH(sc);
   11719 	delay(2);
   11720 
   11721 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   11722 		goto out;
   11723 
   11724 	/* Toggle CS to flush commands. */
   11725 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   11726 	CSR_WRITE_FLUSH(sc);
   11727 	delay(2);
   11728 	CSR_WRITE(sc, WMREG_EECD, reg);
   11729 	CSR_WRITE_FLUSH(sc);
   11730 	delay(2);
   11731 
   11732 	opc = SPI_OPC_READ;
   11733 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   11734 		opc |= SPI_OPC_A8;
   11735 
   11736 	wm_eeprom_sendbits(sc, opc, 8);
   11737 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   11738 
   11739 	for (i = 0; i < wordcnt; i++) {
   11740 		wm_eeprom_recvbits(sc, &val, 16);
   11741 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   11742 	}
   11743 
   11744 	/* Raise CS and clear SK. */
   11745 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   11746 	CSR_WRITE(sc, WMREG_EECD, reg);
   11747 	CSR_WRITE_FLUSH(sc);
   11748 	delay(2);
   11749 
   11750 out:
   11751 	sc->nvm.release(sc);
   11752 	return rv;
   11753 }
   11754 
   11755 /* Using with EERD */
   11756 
   11757 static int
   11758 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   11759 {
   11760 	uint32_t attempts = 100000;
   11761 	uint32_t i, reg = 0;
   11762 	int32_t done = -1;
   11763 
   11764 	for (i = 0; i < attempts; i++) {
   11765 		reg = CSR_READ(sc, rw);
   11766 
   11767 		if (reg & EERD_DONE) {
   11768 			done = 0;
   11769 			break;
   11770 		}
   11771 		delay(5);
   11772 	}
   11773 
   11774 	return done;
   11775 }
   11776 
   11777 static int
   11778 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   11779     uint16_t *data)
   11780 {
   11781 	int i, eerd = 0;
   11782 	int rv = 0;
   11783 
   11784 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11785 		device_xname(sc->sc_dev), __func__));
   11786 
   11787 	if (sc->nvm.acquire(sc) != 0)
   11788 		return -1;
   11789 
   11790 	for (i = 0; i < wordcnt; i++) {
   11791 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   11792 		CSR_WRITE(sc, WMREG_EERD, eerd);
   11793 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   11794 		if (rv != 0) {
   11795 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   11796 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   11797 			break;
   11798 		}
   11799 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   11800 	}
   11801 
   11802 	sc->nvm.release(sc);
   11803 	return rv;
   11804 }
   11805 
   11806 /* Flash */
   11807 
   11808 static int
   11809 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   11810 {
   11811 	uint32_t eecd;
   11812 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   11813 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   11814 	uint8_t sig_byte = 0;
   11815 
   11816 	switch (sc->sc_type) {
   11817 	case WM_T_PCH_SPT:
   11818 		/*
   11819 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
   11820 		 * sector valid bits from the NVM.
   11821 		 */
   11822 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
   11823 		if ((*bank == 0) || (*bank == 1)) {
   11824 			aprint_error_dev(sc->sc_dev,
   11825 			    "%s: no valid NVM bank present (%u)\n", __func__,
   11826 				*bank);
   11827 			return -1;
   11828 		} else {
   11829 			*bank = *bank - 2;
   11830 			return 0;
   11831 		}
   11832 	case WM_T_ICH8:
   11833 	case WM_T_ICH9:
   11834 		eecd = CSR_READ(sc, WMREG_EECD);
   11835 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   11836 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   11837 			return 0;
   11838 		}
   11839 		/* FALLTHROUGH */
   11840 	default:
   11841 		/* Default to 0 */
   11842 		*bank = 0;
   11843 
   11844 		/* Check bank 0 */
   11845 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   11846 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11847 			*bank = 0;
   11848 			return 0;
   11849 		}
   11850 
   11851 		/* Check bank 1 */
   11852 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   11853 		    &sig_byte);
   11854 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11855 			*bank = 1;
   11856 			return 0;
   11857 		}
   11858 	}
   11859 
   11860 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   11861 		device_xname(sc->sc_dev)));
   11862 	return -1;
   11863 }
   11864 
   11865 /******************************************************************************
   11866  * This function does initial flash setup so that a new read/write/erase cycle
   11867  * can be started.
   11868  *
   11869  * sc - The pointer to the hw structure
   11870  ****************************************************************************/
   11871 static int32_t
   11872 wm_ich8_cycle_init(struct wm_softc *sc)
   11873 {
   11874 	uint16_t hsfsts;
   11875 	int32_t error = 1;
   11876 	int32_t i     = 0;
   11877 
   11878 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11879 
   11880 	/* May be check the Flash Des Valid bit in Hw status */
   11881 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   11882 		return error;
   11883 	}
   11884 
   11885 	/* Clear FCERR in Hw status by writing 1 */
   11886 	/* Clear DAEL in Hw status by writing a 1 */
   11887 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   11888 
   11889 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11890 
   11891 	/*
   11892 	 * Either we should have a hardware SPI cycle in progress bit to check
   11893 	 * against, in order to start a new cycle or FDONE bit should be
   11894 	 * changed in the hardware so that it is 1 after harware reset, which
   11895 	 * can then be used as an indication whether a cycle is in progress or
   11896 	 * has been completed .. we should also have some software semaphore
   11897 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   11898 	 * threads access to those bits can be sequentiallized or a way so that
   11899 	 * 2 threads dont start the cycle at the same time
   11900 	 */
   11901 
   11902 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11903 		/*
   11904 		 * There is no cycle running at present, so we can start a
   11905 		 * cycle
   11906 		 */
   11907 
   11908 		/* Begin by setting Flash Cycle Done. */
   11909 		hsfsts |= HSFSTS_DONE;
   11910 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11911 		error = 0;
   11912 	} else {
   11913 		/*
   11914 		 * otherwise poll for sometime so the current cycle has a
   11915 		 * chance to end before giving up.
   11916 		 */
   11917 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   11918 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11919 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11920 				error = 0;
   11921 				break;
   11922 			}
   11923 			delay(1);
   11924 		}
   11925 		if (error == 0) {
   11926 			/*
   11927 			 * Successful in waiting for previous cycle to timeout,
   11928 			 * now set the Flash Cycle Done.
   11929 			 */
   11930 			hsfsts |= HSFSTS_DONE;
   11931 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11932 		}
   11933 	}
   11934 	return error;
   11935 }
   11936 
   11937 /******************************************************************************
   11938  * This function starts a flash cycle and waits for its completion
   11939  *
   11940  * sc - The pointer to the hw structure
   11941  ****************************************************************************/
   11942 static int32_t
   11943 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   11944 {
   11945 	uint16_t hsflctl;
   11946 	uint16_t hsfsts;
   11947 	int32_t error = 1;
   11948 	uint32_t i = 0;
   11949 
   11950 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   11951 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11952 	hsflctl |= HSFCTL_GO;
   11953 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11954 
   11955 	/* Wait till FDONE bit is set to 1 */
   11956 	do {
   11957 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11958 		if (hsfsts & HSFSTS_DONE)
   11959 			break;
   11960 		delay(1);
   11961 		i++;
   11962 	} while (i < timeout);
   11963 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   11964 		error = 0;
   11965 
   11966 	return error;
   11967 }
   11968 
   11969 /******************************************************************************
   11970  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   11971  *
   11972  * sc - The pointer to the hw structure
   11973  * index - The index of the byte or word to read.
   11974  * size - Size of data to read, 1=byte 2=word, 4=dword
   11975  * data - Pointer to the word to store the value read.
   11976  *****************************************************************************/
   11977 static int32_t
   11978 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   11979     uint32_t size, uint32_t *data)
   11980 {
   11981 	uint16_t hsfsts;
   11982 	uint16_t hsflctl;
   11983 	uint32_t flash_linear_address;
   11984 	uint32_t flash_data = 0;
   11985 	int32_t error = 1;
   11986 	int32_t count = 0;
   11987 
   11988 	if (size < 1  || size > 4 || data == 0x0 ||
   11989 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   11990 		return error;
   11991 
   11992 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   11993 	    sc->sc_ich8_flash_base;
   11994 
   11995 	do {
   11996 		delay(1);
   11997 		/* Steps */
   11998 		error = wm_ich8_cycle_init(sc);
   11999 		if (error)
   12000 			break;
   12001 
   12002 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12003 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   12004 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   12005 		    & HSFCTL_BCOUNT_MASK;
   12006 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   12007 		if (sc->sc_type == WM_T_PCH_SPT) {
   12008 			/*
   12009 			 * In SPT, This register is in Lan memory space, not
   12010 			 * flash. Therefore, only 32 bit access is supported.
   12011 			 */
   12012 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
   12013 			    (uint32_t)hsflctl);
   12014 		} else
   12015 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12016 
   12017 		/*
   12018 		 * Write the last 24 bits of index into Flash Linear address
   12019 		 * field in Flash Address
   12020 		 */
   12021 		/* TODO: TBD maybe check the index against the size of flash */
   12022 
   12023 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   12024 
   12025 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   12026 
   12027 		/*
   12028 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   12029 		 * the whole sequence a few more times, else read in (shift in)
   12030 		 * the Flash Data0, the order is least significant byte first
   12031 		 * msb to lsb
   12032 		 */
   12033 		if (error == 0) {
   12034 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   12035 			if (size == 1)
   12036 				*data = (uint8_t)(flash_data & 0x000000FF);
   12037 			else if (size == 2)
   12038 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   12039 			else if (size == 4)
   12040 				*data = (uint32_t)flash_data;
   12041 			break;
   12042 		} else {
   12043 			/*
   12044 			 * If we've gotten here, then things are probably
   12045 			 * completely hosed, but if the error condition is
   12046 			 * detected, it won't hurt to give it another try...
   12047 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   12048 			 */
   12049 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12050 			if (hsfsts & HSFSTS_ERR) {
   12051 				/* Repeat for some time before giving up. */
   12052 				continue;
   12053 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   12054 				break;
   12055 		}
   12056 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   12057 
   12058 	return error;
   12059 }
   12060 
   12061 /******************************************************************************
   12062  * Reads a single byte from the NVM using the ICH8 flash access registers.
   12063  *
   12064  * sc - pointer to wm_hw structure
   12065  * index - The index of the byte to read.
   12066  * data - Pointer to a byte to store the value read.
   12067  *****************************************************************************/
   12068 static int32_t
   12069 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   12070 {
   12071 	int32_t status;
   12072 	uint32_t word = 0;
   12073 
   12074 	status = wm_read_ich8_data(sc, index, 1, &word);
   12075 	if (status == 0)
   12076 		*data = (uint8_t)word;
   12077 	else
   12078 		*data = 0;
   12079 
   12080 	return status;
   12081 }
   12082 
   12083 /******************************************************************************
   12084  * Reads a word from the NVM using the ICH8 flash access registers.
   12085  *
   12086  * sc - pointer to wm_hw structure
   12087  * index - The starting byte index of the word to read.
   12088  * data - Pointer to a word to store the value read.
   12089  *****************************************************************************/
   12090 static int32_t
   12091 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   12092 {
   12093 	int32_t status;
   12094 	uint32_t word = 0;
   12095 
   12096 	status = wm_read_ich8_data(sc, index, 2, &word);
   12097 	if (status == 0)
   12098 		*data = (uint16_t)word;
   12099 	else
   12100 		*data = 0;
   12101 
   12102 	return status;
   12103 }
   12104 
   12105 /******************************************************************************
   12106  * Reads a dword from the NVM using the ICH8 flash access registers.
   12107  *
   12108  * sc - pointer to wm_hw structure
   12109  * index - The starting byte index of the word to read.
   12110  * data - Pointer to a word to store the value read.
   12111  *****************************************************************************/
   12112 static int32_t
   12113 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   12114 {
   12115 	int32_t status;
   12116 
   12117 	status = wm_read_ich8_data(sc, index, 4, data);
   12118 	return status;
   12119 }
   12120 
   12121 /******************************************************************************
   12122  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   12123  * register.
   12124  *
   12125  * sc - Struct containing variables accessed by shared code
   12126  * offset - offset of word in the EEPROM to read
   12127  * data - word read from the EEPROM
   12128  * words - number of words to read
   12129  *****************************************************************************/
   12130 static int
   12131 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12132 {
   12133 	int32_t  rv = 0;
   12134 	uint32_t flash_bank = 0;
   12135 	uint32_t act_offset = 0;
   12136 	uint32_t bank_offset = 0;
   12137 	uint16_t word = 0;
   12138 	uint16_t i = 0;
   12139 
   12140 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12141 		device_xname(sc->sc_dev), __func__));
   12142 
   12143 	if (sc->nvm.acquire(sc) != 0)
   12144 		return -1;
   12145 
   12146 	/*
   12147 	 * We need to know which is the valid flash bank.  In the event
   12148 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12149 	 * managing flash_bank.  So it cannot be trusted and needs
   12150 	 * to be updated with each read.
   12151 	 */
   12152 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   12153 	if (rv) {
   12154 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   12155 			device_xname(sc->sc_dev)));
   12156 		flash_bank = 0;
   12157 	}
   12158 
   12159 	/*
   12160 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   12161 	 * size
   12162 	 */
   12163 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   12164 
   12165 	for (i = 0; i < words; i++) {
   12166 		/* The NVM part needs a byte offset, hence * 2 */
   12167 		act_offset = bank_offset + ((offset + i) * 2);
   12168 		rv = wm_read_ich8_word(sc, act_offset, &word);
   12169 		if (rv) {
   12170 			aprint_error_dev(sc->sc_dev,
   12171 			    "%s: failed to read NVM\n", __func__);
   12172 			break;
   12173 		}
   12174 		data[i] = word;
   12175 	}
   12176 
   12177 	sc->nvm.release(sc);
   12178 	return rv;
   12179 }
   12180 
   12181 /******************************************************************************
   12182  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   12183  * register.
   12184  *
   12185  * sc - Struct containing variables accessed by shared code
   12186  * offset - offset of word in the EEPROM to read
   12187  * data - word read from the EEPROM
   12188  * words - number of words to read
   12189  *****************************************************************************/
   12190 static int
   12191 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12192 {
   12193 	int32_t  rv = 0;
   12194 	uint32_t flash_bank = 0;
   12195 	uint32_t act_offset = 0;
   12196 	uint32_t bank_offset = 0;
   12197 	uint32_t dword = 0;
   12198 	uint16_t i = 0;
   12199 
   12200 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12201 		device_xname(sc->sc_dev), __func__));
   12202 
   12203 	if (sc->nvm.acquire(sc) != 0)
   12204 		return -1;
   12205 
   12206 	/*
   12207 	 * We need to know which is the valid flash bank.  In the event
   12208 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12209 	 * managing flash_bank.  So it cannot be trusted and needs
   12210 	 * to be updated with each read.
   12211 	 */
   12212 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   12213 	if (rv) {
   12214 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   12215 			device_xname(sc->sc_dev)));
   12216 		flash_bank = 0;
   12217 	}
   12218 
   12219 	/*
   12220 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   12221 	 * size
   12222 	 */
   12223 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   12224 
   12225 	for (i = 0; i < words; i++) {
   12226 		/* The NVM part needs a byte offset, hence * 2 */
   12227 		act_offset = bank_offset + ((offset + i) * 2);
   12228 		/* but we must read dword aligned, so mask ... */
   12229 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   12230 		if (rv) {
   12231 			aprint_error_dev(sc->sc_dev,
   12232 			    "%s: failed to read NVM\n", __func__);
   12233 			break;
   12234 		}
   12235 		/* ... and pick out low or high word */
   12236 		if ((act_offset & 0x2) == 0)
   12237 			data[i] = (uint16_t)(dword & 0xFFFF);
   12238 		else
   12239 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   12240 	}
   12241 
   12242 	sc->nvm.release(sc);
   12243 	return rv;
   12244 }
   12245 
   12246 /* iNVM */
   12247 
   12248 static int
   12249 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   12250 {
   12251 	int32_t  rv = 0;
   12252 	uint32_t invm_dword;
   12253 	uint16_t i;
   12254 	uint8_t record_type, word_address;
   12255 
   12256 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12257 		device_xname(sc->sc_dev), __func__));
   12258 
   12259 	for (i = 0; i < INVM_SIZE; i++) {
   12260 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   12261 		/* Get record type */
   12262 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   12263 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   12264 			break;
   12265 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   12266 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   12267 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   12268 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   12269 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   12270 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   12271 			if (word_address == address) {
   12272 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   12273 				rv = 0;
   12274 				break;
   12275 			}
   12276 		}
   12277 	}
   12278 
   12279 	return rv;
   12280 }
   12281 
   12282 static int
   12283 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12284 {
   12285 	int rv = 0;
   12286 	int i;
   12287 
   12288 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12289 		device_xname(sc->sc_dev), __func__));
   12290 
   12291 	if (sc->nvm.acquire(sc) != 0)
   12292 		return -1;
   12293 
   12294 	for (i = 0; i < words; i++) {
   12295 		switch (offset + i) {
   12296 		case NVM_OFF_MACADDR:
   12297 		case NVM_OFF_MACADDR1:
   12298 		case NVM_OFF_MACADDR2:
   12299 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   12300 			if (rv != 0) {
   12301 				data[i] = 0xffff;
   12302 				rv = -1;
   12303 			}
   12304 			break;
   12305 		case NVM_OFF_CFG2:
   12306 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12307 			if (rv != 0) {
   12308 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   12309 				rv = 0;
   12310 			}
   12311 			break;
   12312 		case NVM_OFF_CFG4:
   12313 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12314 			if (rv != 0) {
   12315 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   12316 				rv = 0;
   12317 			}
   12318 			break;
   12319 		case NVM_OFF_LED_1_CFG:
   12320 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12321 			if (rv != 0) {
   12322 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   12323 				rv = 0;
   12324 			}
   12325 			break;
   12326 		case NVM_OFF_LED_0_2_CFG:
   12327 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12328 			if (rv != 0) {
   12329 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   12330 				rv = 0;
   12331 			}
   12332 			break;
   12333 		case NVM_OFF_ID_LED_SETTINGS:
   12334 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12335 			if (rv != 0) {
   12336 				*data = ID_LED_RESERVED_FFFF;
   12337 				rv = 0;
   12338 			}
   12339 			break;
   12340 		default:
   12341 			DPRINTF(WM_DEBUG_NVM,
   12342 			    ("NVM word 0x%02x is not mapped.\n", offset));
   12343 			*data = NVM_RESERVED_WORD;
   12344 			break;
   12345 		}
   12346 	}
   12347 
   12348 	sc->nvm.release(sc);
   12349 	return rv;
   12350 }
   12351 
   12352 /* Lock, detecting NVM type, validate checksum, version and read */
   12353 
   12354 static int
   12355 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   12356 {
   12357 	uint32_t eecd = 0;
   12358 
   12359 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   12360 	    || sc->sc_type == WM_T_82583) {
   12361 		eecd = CSR_READ(sc, WMREG_EECD);
   12362 
   12363 		/* Isolate bits 15 & 16 */
   12364 		eecd = ((eecd >> 15) & 0x03);
   12365 
   12366 		/* If both bits are set, device is Flash type */
   12367 		if (eecd == 0x03)
   12368 			return 0;
   12369 	}
   12370 	return 1;
   12371 }
   12372 
   12373 static int
   12374 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   12375 {
   12376 	uint32_t eec;
   12377 
   12378 	eec = CSR_READ(sc, WMREG_EEC);
   12379 	if ((eec & EEC_FLASH_DETECTED) != 0)
   12380 		return 1;
   12381 
   12382 	return 0;
   12383 }
   12384 
   12385 /*
   12386  * wm_nvm_validate_checksum
   12387  *
   12388  * The checksum is defined as the sum of the first 64 (16 bit) words.
   12389  */
   12390 static int
   12391 wm_nvm_validate_checksum(struct wm_softc *sc)
   12392 {
   12393 	uint16_t checksum;
   12394 	uint16_t eeprom_data;
   12395 #ifdef WM_DEBUG
   12396 	uint16_t csum_wordaddr, valid_checksum;
   12397 #endif
   12398 	int i;
   12399 
   12400 	checksum = 0;
   12401 
   12402 	/* Don't check for I211 */
   12403 	if (sc->sc_type == WM_T_I211)
   12404 		return 0;
   12405 
   12406 #ifdef WM_DEBUG
   12407 	if (sc->sc_type == WM_T_PCH_LPT) {
   12408 		csum_wordaddr = NVM_OFF_COMPAT;
   12409 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   12410 	} else {
   12411 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   12412 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   12413 	}
   12414 
   12415 	/* Dump EEPROM image for debug */
   12416 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12417 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12418 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   12419 		/* XXX PCH_SPT? */
   12420 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   12421 		if ((eeprom_data & valid_checksum) == 0) {
   12422 			DPRINTF(WM_DEBUG_NVM,
   12423 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   12424 				device_xname(sc->sc_dev), eeprom_data,
   12425 				    valid_checksum));
   12426 		}
   12427 	}
   12428 
   12429 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   12430 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   12431 		for (i = 0; i < NVM_SIZE; i++) {
   12432 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12433 				printf("XXXX ");
   12434 			else
   12435 				printf("%04hx ", eeprom_data);
   12436 			if (i % 8 == 7)
   12437 				printf("\n");
   12438 		}
   12439 	}
   12440 
   12441 #endif /* WM_DEBUG */
   12442 
   12443 	for (i = 0; i < NVM_SIZE; i++) {
   12444 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12445 			return 1;
   12446 		checksum += eeprom_data;
   12447 	}
   12448 
   12449 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   12450 #ifdef WM_DEBUG
   12451 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   12452 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   12453 #endif
   12454 	}
   12455 
   12456 	return 0;
   12457 }
   12458 
   12459 static void
   12460 wm_nvm_version_invm(struct wm_softc *sc)
   12461 {
   12462 	uint32_t dword;
   12463 
   12464 	/*
   12465 	 * Linux's code to decode version is very strange, so we don't
   12466 	 * obey that algorithm and just use word 61 as the document.
   12467 	 * Perhaps it's not perfect though...
   12468 	 *
   12469 	 * Example:
   12470 	 *
   12471 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   12472 	 */
   12473 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   12474 	dword = __SHIFTOUT(dword, INVM_VER_1);
   12475 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   12476 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   12477 }
   12478 
   12479 static void
   12480 wm_nvm_version(struct wm_softc *sc)
   12481 {
   12482 	uint16_t major, minor, build, patch;
   12483 	uint16_t uid0, uid1;
   12484 	uint16_t nvm_data;
   12485 	uint16_t off;
   12486 	bool check_version = false;
   12487 	bool check_optionrom = false;
   12488 	bool have_build = false;
   12489 	bool have_uid = true;
   12490 
   12491 	/*
   12492 	 * Version format:
   12493 	 *
   12494 	 * XYYZ
   12495 	 * X0YZ
   12496 	 * X0YY
   12497 	 *
   12498 	 * Example:
   12499 	 *
   12500 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   12501 	 *	82571	0x50a6	5.10.6?
   12502 	 *	82572	0x506a	5.6.10?
   12503 	 *	82572EI	0x5069	5.6.9?
   12504 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   12505 	 *		0x2013	2.1.3?
   12506 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   12507 	 */
   12508 
   12509 	/*
   12510 	 * XXX
   12511 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   12512 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   12513 	 */
   12514 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   12515 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   12516 		have_uid = false;
   12517 
   12518 	switch (sc->sc_type) {
   12519 	case WM_T_82571:
   12520 	case WM_T_82572:
   12521 	case WM_T_82574:
   12522 	case WM_T_82583:
   12523 		check_version = true;
   12524 		check_optionrom = true;
   12525 		have_build = true;
   12526 		break;
   12527 	case WM_T_82575:
   12528 	case WM_T_82576:
   12529 	case WM_T_82580:
   12530 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   12531 			check_version = true;
   12532 		break;
   12533 	case WM_T_I211:
   12534 		wm_nvm_version_invm(sc);
   12535 		have_uid = false;
   12536 		goto printver;
   12537 	case WM_T_I210:
   12538 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   12539 			wm_nvm_version_invm(sc);
   12540 			have_uid = false;
   12541 			goto printver;
   12542 		}
   12543 		/* FALLTHROUGH */
   12544 	case WM_T_I350:
   12545 	case WM_T_I354:
   12546 		check_version = true;
   12547 		check_optionrom = true;
   12548 		break;
   12549 	default:
   12550 		return;
   12551 	}
   12552 	if (check_version
   12553 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   12554 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   12555 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   12556 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   12557 			build = nvm_data & NVM_BUILD_MASK;
   12558 			have_build = true;
   12559 		} else
   12560 			minor = nvm_data & 0x00ff;
   12561 
   12562 		/* Decimal */
   12563 		minor = (minor / 16) * 10 + (minor % 16);
   12564 		sc->sc_nvm_ver_major = major;
   12565 		sc->sc_nvm_ver_minor = minor;
   12566 
   12567 printver:
   12568 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   12569 		    sc->sc_nvm_ver_minor);
   12570 		if (have_build) {
   12571 			sc->sc_nvm_ver_build = build;
   12572 			aprint_verbose(".%d", build);
   12573 		}
   12574 	}
   12575 
   12576 	/* Assume the Option ROM area is at avove NVM_SIZE */
   12577 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   12578 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   12579 		/* Option ROM Version */
   12580 		if ((off != 0x0000) && (off != 0xffff)) {
   12581 			int rv;
   12582 
   12583 			off += NVM_COMBO_VER_OFF;
   12584 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   12585 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   12586 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   12587 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   12588 				/* 16bits */
   12589 				major = uid0 >> 8;
   12590 				build = (uid0 << 8) | (uid1 >> 8);
   12591 				patch = uid1 & 0x00ff;
   12592 				aprint_verbose(", option ROM Version %d.%d.%d",
   12593 				    major, build, patch);
   12594 			}
   12595 		}
   12596 	}
   12597 
   12598 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   12599 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   12600 }
   12601 
   12602 /*
   12603  * wm_nvm_read:
   12604  *
   12605  *	Read data from the serial EEPROM.
   12606  */
   12607 static int
   12608 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12609 {
   12610 	int rv;
   12611 
   12612 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12613 		device_xname(sc->sc_dev), __func__));
   12614 
   12615 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   12616 		return -1;
   12617 
   12618 	rv = sc->nvm.read(sc, word, wordcnt, data);
   12619 
   12620 	return rv;
   12621 }
   12622 
   12623 /*
   12624  * Hardware semaphores.
   12625  * Very complexed...
   12626  */
   12627 
   12628 static int
   12629 wm_get_null(struct wm_softc *sc)
   12630 {
   12631 
   12632 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12633 		device_xname(sc->sc_dev), __func__));
   12634 	return 0;
   12635 }
   12636 
   12637 static void
   12638 wm_put_null(struct wm_softc *sc)
   12639 {
   12640 
   12641 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12642 		device_xname(sc->sc_dev), __func__));
   12643 	return;
   12644 }
   12645 
   12646 static int
   12647 wm_get_eecd(struct wm_softc *sc)
   12648 {
   12649 	uint32_t reg;
   12650 	int x;
   12651 
   12652 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   12653 		device_xname(sc->sc_dev), __func__));
   12654 
   12655 	reg = CSR_READ(sc, WMREG_EECD);
   12656 
   12657 	/* Request EEPROM access. */
   12658 	reg |= EECD_EE_REQ;
   12659 	CSR_WRITE(sc, WMREG_EECD, reg);
   12660 
   12661 	/* ..and wait for it to be granted. */
   12662 	for (x = 0; x < 1000; x++) {
   12663 		reg = CSR_READ(sc, WMREG_EECD);
   12664 		if (reg & EECD_EE_GNT)
   12665 			break;
   12666 		delay(5);
   12667 	}
   12668 	if ((reg & EECD_EE_GNT) == 0) {
   12669 		aprint_error_dev(sc->sc_dev,
   12670 		    "could not acquire EEPROM GNT\n");
   12671 		reg &= ~EECD_EE_REQ;
   12672 		CSR_WRITE(sc, WMREG_EECD, reg);
   12673 		return -1;
   12674 	}
   12675 
   12676 	return 0;
   12677 }
   12678 
   12679 static void
   12680 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   12681 {
   12682 
   12683 	*eecd |= EECD_SK;
   12684 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   12685 	CSR_WRITE_FLUSH(sc);
   12686 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   12687 		delay(1);
   12688 	else
   12689 		delay(50);
   12690 }
   12691 
   12692 static void
   12693 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   12694 {
   12695 
   12696 	*eecd &= ~EECD_SK;
   12697 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   12698 	CSR_WRITE_FLUSH(sc);
   12699 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   12700 		delay(1);
   12701 	else
   12702 		delay(50);
   12703 }
   12704 
   12705 static void
   12706 wm_put_eecd(struct wm_softc *sc)
   12707 {
   12708 	uint32_t reg;
   12709 
   12710 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12711 		device_xname(sc->sc_dev), __func__));
   12712 
   12713 	/* Stop nvm */
   12714 	reg = CSR_READ(sc, WMREG_EECD);
   12715 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   12716 		/* Pull CS high */
   12717 		reg |= EECD_CS;
   12718 		wm_nvm_eec_clock_lower(sc, &reg);
   12719 	} else {
   12720 		/* CS on Microwire is active-high */
   12721 		reg &= ~(EECD_CS | EECD_DI);
   12722 		CSR_WRITE(sc, WMREG_EECD, reg);
   12723 		wm_nvm_eec_clock_raise(sc, &reg);
   12724 		wm_nvm_eec_clock_lower(sc, &reg);
   12725 	}
   12726 
   12727 	reg = CSR_READ(sc, WMREG_EECD);
   12728 	reg &= ~EECD_EE_REQ;
   12729 	CSR_WRITE(sc, WMREG_EECD, reg);
   12730 
   12731 	return;
   12732 }
   12733 
   12734 /*
   12735  * Get hardware semaphore.
   12736  * Same as e1000_get_hw_semaphore_generic()
   12737  */
   12738 static int
   12739 wm_get_swsm_semaphore(struct wm_softc *sc)
   12740 {
   12741 	int32_t timeout;
   12742 	uint32_t swsm;
   12743 
   12744 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12745 		device_xname(sc->sc_dev), __func__));
   12746 	KASSERT(sc->sc_nvm_wordsize > 0);
   12747 
   12748 retry:
   12749 	/* Get the SW semaphore. */
   12750 	timeout = sc->sc_nvm_wordsize + 1;
   12751 	while (timeout) {
   12752 		swsm = CSR_READ(sc, WMREG_SWSM);
   12753 
   12754 		if ((swsm & SWSM_SMBI) == 0)
   12755 			break;
   12756 
   12757 		delay(50);
   12758 		timeout--;
   12759 	}
   12760 
   12761 	if (timeout == 0) {
   12762 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   12763 			/*
   12764 			 * In rare circumstances, the SW semaphore may already
   12765 			 * be held unintentionally. Clear the semaphore once
   12766 			 * before giving up.
   12767 			 */
   12768 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   12769 			wm_put_swsm_semaphore(sc);
   12770 			goto retry;
   12771 		}
   12772 		aprint_error_dev(sc->sc_dev,
   12773 		    "could not acquire SWSM SMBI\n");
   12774 		return 1;
   12775 	}
   12776 
   12777 	/* Get the FW semaphore. */
   12778 	timeout = sc->sc_nvm_wordsize + 1;
   12779 	while (timeout) {
   12780 		swsm = CSR_READ(sc, WMREG_SWSM);
   12781 		swsm |= SWSM_SWESMBI;
   12782 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   12783 		/* If we managed to set the bit we got the semaphore. */
   12784 		swsm = CSR_READ(sc, WMREG_SWSM);
   12785 		if (swsm & SWSM_SWESMBI)
   12786 			break;
   12787 
   12788 		delay(50);
   12789 		timeout--;
   12790 	}
   12791 
   12792 	if (timeout == 0) {
   12793 		aprint_error_dev(sc->sc_dev,
   12794 		    "could not acquire SWSM SWESMBI\n");
   12795 		/* Release semaphores */
   12796 		wm_put_swsm_semaphore(sc);
   12797 		return 1;
   12798 	}
   12799 	return 0;
   12800 }
   12801 
   12802 /*
   12803  * Put hardware semaphore.
   12804  * Same as e1000_put_hw_semaphore_generic()
   12805  */
   12806 static void
   12807 wm_put_swsm_semaphore(struct wm_softc *sc)
   12808 {
   12809 	uint32_t swsm;
   12810 
   12811 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12812 		device_xname(sc->sc_dev), __func__));
   12813 
   12814 	swsm = CSR_READ(sc, WMREG_SWSM);
   12815 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   12816 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   12817 }
   12818 
   12819 /*
   12820  * Get SW/FW semaphore.
   12821  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   12822  */
   12823 static int
   12824 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12825 {
   12826 	uint32_t swfw_sync;
   12827 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   12828 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   12829 	int timeout;
   12830 
   12831 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12832 		device_xname(sc->sc_dev), __func__));
   12833 
   12834 	if (sc->sc_type == WM_T_80003)
   12835 		timeout = 50;
   12836 	else
   12837 		timeout = 200;
   12838 
   12839 	for (timeout = 0; timeout < 200; timeout++) {
   12840 		if (wm_get_swsm_semaphore(sc)) {
   12841 			aprint_error_dev(sc->sc_dev,
   12842 			    "%s: failed to get semaphore\n",
   12843 			    __func__);
   12844 			return 1;
   12845 		}
   12846 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12847 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   12848 			swfw_sync |= swmask;
   12849 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12850 			wm_put_swsm_semaphore(sc);
   12851 			return 0;
   12852 		}
   12853 		wm_put_swsm_semaphore(sc);
   12854 		delay(5000);
   12855 	}
   12856 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   12857 	    device_xname(sc->sc_dev), mask, swfw_sync);
   12858 	return 1;
   12859 }
   12860 
   12861 static void
   12862 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12863 {
   12864 	uint32_t swfw_sync;
   12865 
   12866 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12867 		device_xname(sc->sc_dev), __func__));
   12868 
   12869 	while (wm_get_swsm_semaphore(sc) != 0)
   12870 		continue;
   12871 
   12872 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12873 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   12874 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12875 
   12876 	wm_put_swsm_semaphore(sc);
   12877 }
   12878 
   12879 static int
   12880 wm_get_nvm_80003(struct wm_softc *sc)
   12881 {
   12882 	int rv;
   12883 
   12884 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   12885 		device_xname(sc->sc_dev), __func__));
   12886 
   12887 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   12888 		aprint_error_dev(sc->sc_dev,
   12889 		    "%s: failed to get semaphore(SWFW)\n",
   12890 		    __func__);
   12891 		return rv;
   12892 	}
   12893 
   12894 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   12895 	    && (rv = wm_get_eecd(sc)) != 0) {
   12896 		aprint_error_dev(sc->sc_dev,
   12897 		    "%s: failed to get semaphore(EECD)\n",
   12898 		    __func__);
   12899 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   12900 		return rv;
   12901 	}
   12902 
   12903 	return 0;
   12904 }
   12905 
   12906 static void
   12907 wm_put_nvm_80003(struct wm_softc *sc)
   12908 {
   12909 
   12910 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12911 		device_xname(sc->sc_dev), __func__));
   12912 
   12913 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   12914 		wm_put_eecd(sc);
   12915 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   12916 }
   12917 
   12918 static int
   12919 wm_get_nvm_82571(struct wm_softc *sc)
   12920 {
   12921 	int rv;
   12922 
   12923 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12924 		device_xname(sc->sc_dev), __func__));
   12925 
   12926 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   12927 		return rv;
   12928 
   12929 	switch (sc->sc_type) {
   12930 	case WM_T_82573:
   12931 		break;
   12932 	default:
   12933 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   12934 			rv = wm_get_eecd(sc);
   12935 		break;
   12936 	}
   12937 
   12938 	if (rv != 0) {
   12939 		aprint_error_dev(sc->sc_dev,
   12940 		    "%s: failed to get semaphore\n",
   12941 		    __func__);
   12942 		wm_put_swsm_semaphore(sc);
   12943 	}
   12944 
   12945 	return rv;
   12946 }
   12947 
   12948 static void
   12949 wm_put_nvm_82571(struct wm_softc *sc)
   12950 {
   12951 
   12952 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12953 		device_xname(sc->sc_dev), __func__));
   12954 
   12955 	switch (sc->sc_type) {
   12956 	case WM_T_82573:
   12957 		break;
   12958 	default:
   12959 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   12960 			wm_put_eecd(sc);
   12961 		break;
   12962 	}
   12963 
   12964 	wm_put_swsm_semaphore(sc);
   12965 }
   12966 
   12967 static int
   12968 wm_get_phy_82575(struct wm_softc *sc)
   12969 {
   12970 
   12971 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12972 		device_xname(sc->sc_dev), __func__));
   12973 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   12974 }
   12975 
   12976 static void
   12977 wm_put_phy_82575(struct wm_softc *sc)
   12978 {
   12979 
   12980 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12981 		device_xname(sc->sc_dev), __func__));
   12982 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   12983 }
   12984 
   12985 static int
   12986 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   12987 {
   12988 	uint32_t ext_ctrl;
   12989 	int timeout = 200;
   12990 
   12991 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12992 		device_xname(sc->sc_dev), __func__));
   12993 
   12994 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12995 	for (timeout = 0; timeout < 200; timeout++) {
   12996 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12997 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12998 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12999 
   13000 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13001 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   13002 			return 0;
   13003 		delay(5000);
   13004 	}
   13005 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   13006 	    device_xname(sc->sc_dev), ext_ctrl);
   13007 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13008 	return 1;
   13009 }
   13010 
   13011 static void
   13012 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   13013 {
   13014 	uint32_t ext_ctrl;
   13015 
   13016 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13017 		device_xname(sc->sc_dev), __func__));
   13018 
   13019 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13020 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13021 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13022 
   13023 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13024 }
   13025 
   13026 static int
   13027 wm_get_swflag_ich8lan(struct wm_softc *sc)
   13028 {
   13029 	uint32_t ext_ctrl;
   13030 	int timeout;
   13031 
   13032 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13033 		device_xname(sc->sc_dev), __func__));
   13034 	mutex_enter(sc->sc_ich_phymtx);
   13035 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   13036 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13037 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   13038 			break;
   13039 		delay(1000);
   13040 	}
   13041 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   13042 		printf("%s: SW has already locked the resource\n",
   13043 		    device_xname(sc->sc_dev));
   13044 		goto out;
   13045 	}
   13046 
   13047 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13048 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13049 	for (timeout = 0; timeout < 1000; timeout++) {
   13050 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13051 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   13052 			break;
   13053 		delay(1000);
   13054 	}
   13055 	if (timeout >= 1000) {
   13056 		printf("%s: failed to acquire semaphore\n",
   13057 		    device_xname(sc->sc_dev));
   13058 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13059 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13060 		goto out;
   13061 	}
   13062 	return 0;
   13063 
   13064 out:
   13065 	mutex_exit(sc->sc_ich_phymtx);
   13066 	return 1;
   13067 }
   13068 
   13069 static void
   13070 wm_put_swflag_ich8lan(struct wm_softc *sc)
   13071 {
   13072 	uint32_t ext_ctrl;
   13073 
   13074 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13075 		device_xname(sc->sc_dev), __func__));
   13076 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13077 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   13078 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13079 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13080 	} else {
   13081 		printf("%s: Semaphore unexpectedly released\n",
   13082 		    device_xname(sc->sc_dev));
   13083 	}
   13084 
   13085 	mutex_exit(sc->sc_ich_phymtx);
   13086 }
   13087 
   13088 static int
   13089 wm_get_nvm_ich8lan(struct wm_softc *sc)
   13090 {
   13091 
   13092 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13093 		device_xname(sc->sc_dev), __func__));
   13094 	mutex_enter(sc->sc_ich_nvmmtx);
   13095 
   13096 	return 0;
   13097 }
   13098 
   13099 static void
   13100 wm_put_nvm_ich8lan(struct wm_softc *sc)
   13101 {
   13102 
   13103 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13104 		device_xname(sc->sc_dev), __func__));
   13105 	mutex_exit(sc->sc_ich_nvmmtx);
   13106 }
   13107 
   13108 static int
   13109 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   13110 {
   13111 	int i = 0;
   13112 	uint32_t reg;
   13113 
   13114 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13115 		device_xname(sc->sc_dev), __func__));
   13116 
   13117 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13118 	do {
   13119 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   13120 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   13121 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13122 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   13123 			break;
   13124 		delay(2*1000);
   13125 		i++;
   13126 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   13127 
   13128 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   13129 		wm_put_hw_semaphore_82573(sc);
   13130 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   13131 		    device_xname(sc->sc_dev));
   13132 		return -1;
   13133 	}
   13134 
   13135 	return 0;
   13136 }
   13137 
   13138 static void
   13139 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   13140 {
   13141 	uint32_t reg;
   13142 
   13143 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13144 		device_xname(sc->sc_dev), __func__));
   13145 
   13146 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13147 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13148 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   13149 }
   13150 
   13151 /*
   13152  * Management mode and power management related subroutines.
   13153  * BMC, AMT, suspend/resume and EEE.
   13154  */
   13155 
   13156 #ifdef WM_WOL
   13157 static int
   13158 wm_check_mng_mode(struct wm_softc *sc)
   13159 {
   13160 	int rv;
   13161 
   13162 	switch (sc->sc_type) {
   13163 	case WM_T_ICH8:
   13164 	case WM_T_ICH9:
   13165 	case WM_T_ICH10:
   13166 	case WM_T_PCH:
   13167 	case WM_T_PCH2:
   13168 	case WM_T_PCH_LPT:
   13169 	case WM_T_PCH_SPT:
   13170 		rv = wm_check_mng_mode_ich8lan(sc);
   13171 		break;
   13172 	case WM_T_82574:
   13173 	case WM_T_82583:
   13174 		rv = wm_check_mng_mode_82574(sc);
   13175 		break;
   13176 	case WM_T_82571:
   13177 	case WM_T_82572:
   13178 	case WM_T_82573:
   13179 	case WM_T_80003:
   13180 		rv = wm_check_mng_mode_generic(sc);
   13181 		break;
   13182 	default:
   13183 		/* noting to do */
   13184 		rv = 0;
   13185 		break;
   13186 	}
   13187 
   13188 	return rv;
   13189 }
   13190 
   13191 static int
   13192 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   13193 {
   13194 	uint32_t fwsm;
   13195 
   13196 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13197 
   13198 	if (((fwsm & FWSM_FW_VALID) != 0)
   13199 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   13200 		return 1;
   13201 
   13202 	return 0;
   13203 }
   13204 
   13205 static int
   13206 wm_check_mng_mode_82574(struct wm_softc *sc)
   13207 {
   13208 	uint16_t data;
   13209 
   13210 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   13211 
   13212 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   13213 		return 1;
   13214 
   13215 	return 0;
   13216 }
   13217 
   13218 static int
   13219 wm_check_mng_mode_generic(struct wm_softc *sc)
   13220 {
   13221 	uint32_t fwsm;
   13222 
   13223 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13224 
   13225 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   13226 		return 1;
   13227 
   13228 	return 0;
   13229 }
   13230 #endif /* WM_WOL */
   13231 
   13232 static int
   13233 wm_enable_mng_pass_thru(struct wm_softc *sc)
   13234 {
   13235 	uint32_t manc, fwsm, factps;
   13236 
   13237 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   13238 		return 0;
   13239 
   13240 	manc = CSR_READ(sc, WMREG_MANC);
   13241 
   13242 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   13243 		device_xname(sc->sc_dev), manc));
   13244 	if ((manc & MANC_RECV_TCO_EN) == 0)
   13245 		return 0;
   13246 
   13247 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   13248 		fwsm = CSR_READ(sc, WMREG_FWSM);
   13249 		factps = CSR_READ(sc, WMREG_FACTPS);
   13250 		if (((factps & FACTPS_MNGCG) == 0)
   13251 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   13252 			return 1;
   13253 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   13254 		uint16_t data;
   13255 
   13256 		factps = CSR_READ(sc, WMREG_FACTPS);
   13257 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   13258 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   13259 			device_xname(sc->sc_dev), factps, data));
   13260 		if (((factps & FACTPS_MNGCG) == 0)
   13261 		    && ((data & NVM_CFG2_MNGM_MASK)
   13262 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   13263 			return 1;
   13264 	} else if (((manc & MANC_SMBUS_EN) != 0)
   13265 	    && ((manc & MANC_ASF_EN) == 0))
   13266 		return 1;
   13267 
   13268 	return 0;
   13269 }
   13270 
   13271 static bool
   13272 wm_phy_resetisblocked(struct wm_softc *sc)
   13273 {
   13274 	bool blocked = false;
   13275 	uint32_t reg;
   13276 	int i = 0;
   13277 
   13278 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13279 		device_xname(sc->sc_dev), __func__));
   13280 
   13281 	switch (sc->sc_type) {
   13282 	case WM_T_ICH8:
   13283 	case WM_T_ICH9:
   13284 	case WM_T_ICH10:
   13285 	case WM_T_PCH:
   13286 	case WM_T_PCH2:
   13287 	case WM_T_PCH_LPT:
   13288 	case WM_T_PCH_SPT:
   13289 		do {
   13290 			reg = CSR_READ(sc, WMREG_FWSM);
   13291 			if ((reg & FWSM_RSPCIPHY) == 0) {
   13292 				blocked = true;
   13293 				delay(10*1000);
   13294 				continue;
   13295 			}
   13296 			blocked = false;
   13297 		} while (blocked && (i++ < 30));
   13298 		return blocked;
   13299 		break;
   13300 	case WM_T_82571:
   13301 	case WM_T_82572:
   13302 	case WM_T_82573:
   13303 	case WM_T_82574:
   13304 	case WM_T_82583:
   13305 	case WM_T_80003:
   13306 		reg = CSR_READ(sc, WMREG_MANC);
   13307 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   13308 			return true;
   13309 		else
   13310 			return false;
   13311 		break;
   13312 	default:
   13313 		/* no problem */
   13314 		break;
   13315 	}
   13316 
   13317 	return false;
   13318 }
   13319 
   13320 static void
   13321 wm_get_hw_control(struct wm_softc *sc)
   13322 {
   13323 	uint32_t reg;
   13324 
   13325 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13326 		device_xname(sc->sc_dev), __func__));
   13327 
   13328 	if (sc->sc_type == WM_T_82573) {
   13329 		reg = CSR_READ(sc, WMREG_SWSM);
   13330 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   13331 	} else if (sc->sc_type >= WM_T_82571) {
   13332 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13333 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   13334 	}
   13335 }
   13336 
   13337 static void
   13338 wm_release_hw_control(struct wm_softc *sc)
   13339 {
   13340 	uint32_t reg;
   13341 
   13342 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13343 		device_xname(sc->sc_dev), __func__));
   13344 
   13345 	if (sc->sc_type == WM_T_82573) {
   13346 		reg = CSR_READ(sc, WMREG_SWSM);
   13347 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   13348 	} else if (sc->sc_type >= WM_T_82571) {
   13349 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13350 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   13351 	}
   13352 }
   13353 
   13354 static void
   13355 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   13356 {
   13357 	uint32_t reg;
   13358 
   13359 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13360 		device_xname(sc->sc_dev), __func__));
   13361 
   13362 	if (sc->sc_type < WM_T_PCH2)
   13363 		return;
   13364 
   13365 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13366 
   13367 	if (gate)
   13368 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   13369 	else
   13370 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   13371 
   13372 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   13373 }
   13374 
   13375 static void
   13376 wm_smbustopci(struct wm_softc *sc)
   13377 {
   13378 	uint32_t fwsm, reg;
   13379 	int rv = 0;
   13380 
   13381 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13382 		device_xname(sc->sc_dev), __func__));
   13383 
   13384 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   13385 	wm_gate_hw_phy_config_ich8lan(sc, true);
   13386 
   13387 	/* Disable ULP */
   13388 	wm_ulp_disable(sc);
   13389 
   13390 	/* Acquire PHY semaphore */
   13391 	sc->phy.acquire(sc);
   13392 
   13393 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13394 	switch (sc->sc_type) {
   13395 	case WM_T_PCH_LPT:
   13396 	case WM_T_PCH_SPT:
   13397 		if (wm_phy_is_accessible_pchlan(sc))
   13398 			break;
   13399 
   13400 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13401 		reg |= CTRL_EXT_FORCE_SMBUS;
   13402 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13403 #if 0
   13404 		/* XXX Isn't this required??? */
   13405 		CSR_WRITE_FLUSH(sc);
   13406 #endif
   13407 		delay(50 * 1000);
   13408 		/* FALLTHROUGH */
   13409 	case WM_T_PCH2:
   13410 		if (wm_phy_is_accessible_pchlan(sc) == true)
   13411 			break;
   13412 		/* FALLTHROUGH */
   13413 	case WM_T_PCH:
   13414 		if (sc->sc_type == WM_T_PCH)
   13415 			if ((fwsm & FWSM_FW_VALID) != 0)
   13416 				break;
   13417 
   13418 		if (wm_phy_resetisblocked(sc) == true) {
   13419 			printf("XXX reset is blocked(3)\n");
   13420 			break;
   13421 		}
   13422 
   13423 		wm_toggle_lanphypc_pch_lpt(sc);
   13424 
   13425 		if (sc->sc_type >= WM_T_PCH_LPT) {
   13426 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13427 				break;
   13428 
   13429 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13430 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   13431 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13432 
   13433 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13434 				break;
   13435 			rv = -1;
   13436 		}
   13437 		break;
   13438 	default:
   13439 		break;
   13440 	}
   13441 
   13442 	/* Release semaphore */
   13443 	sc->phy.release(sc);
   13444 
   13445 	if (rv == 0) {
   13446 		if (wm_phy_resetisblocked(sc)) {
   13447 			printf("XXX reset is blocked(4)\n");
   13448 			goto out;
   13449 		}
   13450 		wm_reset_phy(sc);
   13451 		if (wm_phy_resetisblocked(sc))
   13452 			printf("XXX reset is blocked(4)\n");
   13453 	}
   13454 
   13455 out:
   13456 	/*
   13457 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   13458 	 */
   13459 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   13460 		delay(10*1000);
   13461 		wm_gate_hw_phy_config_ich8lan(sc, false);
   13462 	}
   13463 }
   13464 
   13465 static void
   13466 wm_init_manageability(struct wm_softc *sc)
   13467 {
   13468 
   13469 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13470 		device_xname(sc->sc_dev), __func__));
   13471 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13472 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   13473 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13474 
   13475 		/* Disable hardware interception of ARP */
   13476 		manc &= ~MANC_ARP_EN;
   13477 
   13478 		/* Enable receiving management packets to the host */
   13479 		if (sc->sc_type >= WM_T_82571) {
   13480 			manc |= MANC_EN_MNG2HOST;
   13481 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   13482 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   13483 		}
   13484 
   13485 		CSR_WRITE(sc, WMREG_MANC, manc);
   13486 	}
   13487 }
   13488 
   13489 static void
   13490 wm_release_manageability(struct wm_softc *sc)
   13491 {
   13492 
   13493 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13494 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13495 
   13496 		manc |= MANC_ARP_EN;
   13497 		if (sc->sc_type >= WM_T_82571)
   13498 			manc &= ~MANC_EN_MNG2HOST;
   13499 
   13500 		CSR_WRITE(sc, WMREG_MANC, manc);
   13501 	}
   13502 }
   13503 
   13504 static void
   13505 wm_get_wakeup(struct wm_softc *sc)
   13506 {
   13507 
   13508 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   13509 	switch (sc->sc_type) {
   13510 	case WM_T_82573:
   13511 	case WM_T_82583:
   13512 		sc->sc_flags |= WM_F_HAS_AMT;
   13513 		/* FALLTHROUGH */
   13514 	case WM_T_80003:
   13515 	case WM_T_82575:
   13516 	case WM_T_82576:
   13517 	case WM_T_82580:
   13518 	case WM_T_I350:
   13519 	case WM_T_I354:
   13520 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   13521 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   13522 		/* FALLTHROUGH */
   13523 	case WM_T_82541:
   13524 	case WM_T_82541_2:
   13525 	case WM_T_82547:
   13526 	case WM_T_82547_2:
   13527 	case WM_T_82571:
   13528 	case WM_T_82572:
   13529 	case WM_T_82574:
   13530 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13531 		break;
   13532 	case WM_T_ICH8:
   13533 	case WM_T_ICH9:
   13534 	case WM_T_ICH10:
   13535 	case WM_T_PCH:
   13536 	case WM_T_PCH2:
   13537 	case WM_T_PCH_LPT:
   13538 	case WM_T_PCH_SPT:
   13539 		sc->sc_flags |= WM_F_HAS_AMT;
   13540 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13541 		break;
   13542 	default:
   13543 		break;
   13544 	}
   13545 
   13546 	/* 1: HAS_MANAGE */
   13547 	if (wm_enable_mng_pass_thru(sc) != 0)
   13548 		sc->sc_flags |= WM_F_HAS_MANAGE;
   13549 
   13550 	/*
   13551 	 * Note that the WOL flags is set after the resetting of the eeprom
   13552 	 * stuff
   13553 	 */
   13554 }
   13555 
   13556 /*
   13557  * Unconfigure Ultra Low Power mode.
   13558  * Only for I217 and newer (see below).
   13559  */
   13560 static void
   13561 wm_ulp_disable(struct wm_softc *sc)
   13562 {
   13563 	uint32_t reg;
   13564 	int i = 0;
   13565 
   13566 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13567 		device_xname(sc->sc_dev), __func__));
   13568 	/* Exclude old devices */
   13569 	if ((sc->sc_type < WM_T_PCH_LPT)
   13570 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   13571 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   13572 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   13573 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   13574 		return;
   13575 
   13576 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   13577 		/* Request ME un-configure ULP mode in the PHY */
   13578 		reg = CSR_READ(sc, WMREG_H2ME);
   13579 		reg &= ~H2ME_ULP;
   13580 		reg |= H2ME_ENFORCE_SETTINGS;
   13581 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13582 
   13583 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   13584 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   13585 			if (i++ == 30) {
   13586 				printf("%s timed out\n", __func__);
   13587 				return;
   13588 			}
   13589 			delay(10 * 1000);
   13590 		}
   13591 		reg = CSR_READ(sc, WMREG_H2ME);
   13592 		reg &= ~H2ME_ENFORCE_SETTINGS;
   13593 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13594 
   13595 		return;
   13596 	}
   13597 
   13598 	/* Acquire semaphore */
   13599 	sc->phy.acquire(sc);
   13600 
   13601 	/* Toggle LANPHYPC */
   13602 	wm_toggle_lanphypc_pch_lpt(sc);
   13603 
   13604 	/* Unforce SMBus mode in PHY */
   13605 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13606 	if (reg == 0x0000 || reg == 0xffff) {
   13607 		uint32_t reg2;
   13608 
   13609 		printf("%s: Force SMBus first.\n", __func__);
   13610 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   13611 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   13612 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   13613 		delay(50 * 1000);
   13614 
   13615 		reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13616 	}
   13617 	reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13618 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg);
   13619 
   13620 	/* Unforce SMBus mode in MAC */
   13621 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13622 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   13623 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13624 
   13625 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL);
   13626 	reg |= HV_PM_CTRL_K1_ENA;
   13627 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg);
   13628 
   13629 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1);
   13630 	reg &= ~(I218_ULP_CONFIG1_IND
   13631 	    | I218_ULP_CONFIG1_STICKY_ULP
   13632 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   13633 	    | I218_ULP_CONFIG1_WOL_HOST
   13634 	    | I218_ULP_CONFIG1_INBAND_EXIT
   13635 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   13636 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   13637 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   13638 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13639 	reg |= I218_ULP_CONFIG1_START;
   13640 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13641 
   13642 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   13643 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   13644 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   13645 
   13646 	/* Release semaphore */
   13647 	sc->phy.release(sc);
   13648 	wm_gmii_reset(sc);
   13649 	delay(50 * 1000);
   13650 }
   13651 
   13652 /* WOL in the newer chipset interfaces (pchlan) */
   13653 static void
   13654 wm_enable_phy_wakeup(struct wm_softc *sc)
   13655 {
   13656 #if 0
   13657 	uint16_t preg;
   13658 
   13659 	/* Copy MAC RARs to PHY RARs */
   13660 
   13661 	/* Copy MAC MTA to PHY MTA */
   13662 
   13663 	/* Configure PHY Rx Control register */
   13664 
   13665 	/* Enable PHY wakeup in MAC register */
   13666 
   13667 	/* Configure and enable PHY wakeup in PHY registers */
   13668 
   13669 	/* Activate PHY wakeup */
   13670 
   13671 	/* XXX */
   13672 #endif
   13673 }
   13674 
   13675 /* Power down workaround on D3 */
   13676 static void
   13677 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   13678 {
   13679 	uint32_t reg;
   13680 	int i;
   13681 
   13682 	for (i = 0; i < 2; i++) {
   13683 		/* Disable link */
   13684 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13685 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13686 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13687 
   13688 		/*
   13689 		 * Call gig speed drop workaround on Gig disable before
   13690 		 * accessing any PHY registers
   13691 		 */
   13692 		if (sc->sc_type == WM_T_ICH8)
   13693 			wm_gig_downshift_workaround_ich8lan(sc);
   13694 
   13695 		/* Write VR power-down enable */
   13696 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13697 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13698 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   13699 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   13700 
   13701 		/* Read it back and test */
   13702 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13703 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13704 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   13705 			break;
   13706 
   13707 		/* Issue PHY reset and repeat at most one more time */
   13708 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   13709 	}
   13710 }
   13711 
   13712 static void
   13713 wm_enable_wakeup(struct wm_softc *sc)
   13714 {
   13715 	uint32_t reg, pmreg;
   13716 	pcireg_t pmode;
   13717 
   13718 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13719 		device_xname(sc->sc_dev), __func__));
   13720 
   13721 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   13722 		&pmreg, NULL) == 0)
   13723 		return;
   13724 
   13725 	/* Advertise the wakeup capability */
   13726 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   13727 	    | CTRL_SWDPIN(3));
   13728 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   13729 
   13730 	/* ICH workaround */
   13731 	switch (sc->sc_type) {
   13732 	case WM_T_ICH8:
   13733 	case WM_T_ICH9:
   13734 	case WM_T_ICH10:
   13735 	case WM_T_PCH:
   13736 	case WM_T_PCH2:
   13737 	case WM_T_PCH_LPT:
   13738 	case WM_T_PCH_SPT:
   13739 		/* Disable gig during WOL */
   13740 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13741 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   13742 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13743 		if (sc->sc_type == WM_T_PCH)
   13744 			wm_gmii_reset(sc);
   13745 
   13746 		/* Power down workaround */
   13747 		if (sc->sc_phytype == WMPHY_82577) {
   13748 			struct mii_softc *child;
   13749 
   13750 			/* Assume that the PHY is copper */
   13751 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13752 			if ((child != NULL) && (child->mii_mpd_rev <= 2))
   13753 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   13754 				    (768 << 5) | 25, 0x0444); /* magic num */
   13755 		}
   13756 		break;
   13757 	default:
   13758 		break;
   13759 	}
   13760 
   13761 	/* Keep the laser running on fiber adapters */
   13762 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   13763 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   13764 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13765 		reg |= CTRL_EXT_SWDPIN(3);
   13766 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13767 	}
   13768 
   13769 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   13770 #if 0	/* for the multicast packet */
   13771 	reg |= WUFC_MC;
   13772 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   13773 #endif
   13774 
   13775 	if (sc->sc_type >= WM_T_PCH)
   13776 		wm_enable_phy_wakeup(sc);
   13777 	else {
   13778 		CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
   13779 		CSR_WRITE(sc, WMREG_WUFC, reg);
   13780 	}
   13781 
   13782 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13783 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13784 		|| (sc->sc_type == WM_T_PCH2))
   13785 		    && (sc->sc_phytype == WMPHY_IGP_3))
   13786 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   13787 
   13788 	/* Request PME */
   13789 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   13790 #if 0
   13791 	/* Disable WOL */
   13792 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   13793 #else
   13794 	/* For WOL */
   13795 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   13796 #endif
   13797 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   13798 }
   13799 
   13800 /* LPLU */
   13801 
   13802 static void
   13803 wm_lplu_d0_disable(struct wm_softc *sc)
   13804 {
   13805 	struct mii_data *mii = &sc->sc_mii;
   13806 	uint32_t reg;
   13807 
   13808 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13809 		device_xname(sc->sc_dev), __func__));
   13810 
   13811 	if (sc->sc_phytype == WMPHY_IFE)
   13812 		return;
   13813 
   13814 	switch (sc->sc_type) {
   13815 	case WM_T_82571:
   13816 	case WM_T_82572:
   13817 	case WM_T_82573:
   13818 	case WM_T_82575:
   13819 	case WM_T_82576:
   13820 		reg = mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT);
   13821 		reg &= ~PMR_D0_LPLU;
   13822 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, reg);
   13823 		break;
   13824 	case WM_T_82580:
   13825 	case WM_T_I350:
   13826 	case WM_T_I210:
   13827 	case WM_T_I211:
   13828 		reg = CSR_READ(sc, WMREG_PHPM);
   13829 		reg &= ~PHPM_D0A_LPLU;
   13830 		CSR_WRITE(sc, WMREG_PHPM, reg);
   13831 		break;
   13832 	case WM_T_82574:
   13833 	case WM_T_82583:
   13834 	case WM_T_ICH8:
   13835 	case WM_T_ICH9:
   13836 	case WM_T_ICH10:
   13837 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13838 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   13839 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13840 		CSR_WRITE_FLUSH(sc);
   13841 		break;
   13842 	case WM_T_PCH:
   13843 	case WM_T_PCH2:
   13844 	case WM_T_PCH_LPT:
   13845 	case WM_T_PCH_SPT:
   13846 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   13847 		reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   13848 		if (wm_phy_resetisblocked(sc) == false)
   13849 			reg |= HV_OEM_BITS_ANEGNOW;
   13850 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   13851 		break;
   13852 	default:
   13853 		break;
   13854 	}
   13855 }
   13856 
   13857 /* EEE */
   13858 
   13859 static void
   13860 wm_set_eee_i350(struct wm_softc *sc)
   13861 {
   13862 	uint32_t ipcnfg, eeer;
   13863 
   13864 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   13865 	eeer = CSR_READ(sc, WMREG_EEER);
   13866 
   13867 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   13868 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   13869 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   13870 		    | EEER_LPI_FC);
   13871 	} else {
   13872 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   13873 		ipcnfg &= ~IPCNFG_10BASE_TE;
   13874 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   13875 		    | EEER_LPI_FC);
   13876 	}
   13877 
   13878 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   13879 	CSR_WRITE(sc, WMREG_EEER, eeer);
   13880 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   13881 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   13882 }
   13883 
   13884 /*
   13885  * Workarounds (mainly PHY related).
   13886  * Basically, PHY's workarounds are in the PHY drivers.
   13887  */
   13888 
   13889 /* Work-around for 82566 Kumeran PCS lock loss */
   13890 static void
   13891 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   13892 {
   13893 	struct mii_data *mii = &sc->sc_mii;
   13894 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   13895 	int i;
   13896 	int reg;
   13897 
   13898 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13899 		device_xname(sc->sc_dev), __func__));
   13900 
   13901 	/* If the link is not up, do nothing */
   13902 	if ((status & STATUS_LU) == 0)
   13903 		return;
   13904 
   13905 	/* Nothing to do if the link is other than 1Gbps */
   13906 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   13907 		return;
   13908 
   13909 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13910 	for (i = 0; i < 10; i++) {
   13911 		/* read twice */
   13912 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   13913 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   13914 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   13915 			goto out;	/* GOOD! */
   13916 
   13917 		/* Reset the PHY */
   13918 		wm_reset_phy(sc);
   13919 		delay(5*1000);
   13920 	}
   13921 
   13922 	/* Disable GigE link negotiation */
   13923 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13924 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13925 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13926 
   13927 	/*
   13928 	 * Call gig speed drop workaround on Gig disable before accessing
   13929 	 * any PHY registers.
   13930 	 */
   13931 	wm_gig_downshift_workaround_ich8lan(sc);
   13932 
   13933 out:
   13934 	return;
   13935 }
   13936 
   13937 /* WOL from S5 stops working */
   13938 static void
   13939 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   13940 {
   13941 	uint16_t kmreg;
   13942 
   13943 	/* Only for igp3 */
   13944 	if (sc->sc_phytype == WMPHY_IGP_3) {
   13945 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   13946 			return;
   13947 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   13948 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   13949 			return;
   13950 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   13951 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   13952 	}
   13953 }
   13954 
   13955 /*
   13956  * Workaround for pch's PHYs
   13957  * XXX should be moved to new PHY driver?
   13958  */
   13959 static void
   13960 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   13961 {
   13962 
   13963 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13964 		device_xname(sc->sc_dev), __func__));
   13965 	KASSERT(sc->sc_type == WM_T_PCH);
   13966 
   13967 	if (sc->sc_phytype == WMPHY_82577)
   13968 		wm_set_mdio_slow_mode_hv(sc);
   13969 
   13970 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   13971 
   13972 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   13973 
   13974 	/* 82578 */
   13975 	if (sc->sc_phytype == WMPHY_82578) {
   13976 		struct mii_softc *child;
   13977 
   13978 		/*
   13979 		 * Return registers to default by doing a soft reset then
   13980 		 * writing 0x3140 to the control register
   13981 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   13982 		 */
   13983 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13984 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   13985 			PHY_RESET(child);
   13986 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   13987 			    0x3140);
   13988 		}
   13989 	}
   13990 
   13991 	/* Select page 0 */
   13992 	sc->phy.acquire(sc);
   13993 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   13994 	sc->phy.release(sc);
   13995 
   13996 	/*
   13997 	 * Configure the K1 Si workaround during phy reset assuming there is
   13998 	 * link so that it disables K1 if link is in 1Gbps.
   13999 	 */
   14000 	wm_k1_gig_workaround_hv(sc, 1);
   14001 }
   14002 
   14003 static void
   14004 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   14005 {
   14006 
   14007 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14008 		device_xname(sc->sc_dev), __func__));
   14009 	KASSERT(sc->sc_type == WM_T_PCH2);
   14010 
   14011 	wm_set_mdio_slow_mode_hv(sc);
   14012 }
   14013 
   14014 static int
   14015 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   14016 {
   14017 	int k1_enable = sc->sc_nvm_k1_enabled;
   14018 
   14019 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14020 		device_xname(sc->sc_dev), __func__));
   14021 
   14022 	if (sc->phy.acquire(sc) != 0)
   14023 		return -1;
   14024 
   14025 	if (link) {
   14026 		k1_enable = 0;
   14027 
   14028 		/* Link stall fix for link up */
   14029 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   14030 	} else {
   14031 		/* Link stall fix for link down */
   14032 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   14033 	}
   14034 
   14035 	wm_configure_k1_ich8lan(sc, k1_enable);
   14036 	sc->phy.release(sc);
   14037 
   14038 	return 0;
   14039 }
   14040 
   14041 static void
   14042 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   14043 {
   14044 	uint32_t reg;
   14045 
   14046 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   14047 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   14048 	    reg | HV_KMRN_MDIO_SLOW);
   14049 }
   14050 
   14051 static void
   14052 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   14053 {
   14054 	uint32_t ctrl, ctrl_ext, tmp;
   14055 	uint16_t kmreg;
   14056 	int rv;
   14057 
   14058 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   14059 	if (rv != 0)
   14060 		return;
   14061 
   14062 	if (k1_enable)
   14063 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   14064 	else
   14065 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   14066 
   14067 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   14068 	if (rv != 0)
   14069 		return;
   14070 
   14071 	delay(20);
   14072 
   14073 	ctrl = CSR_READ(sc, WMREG_CTRL);
   14074 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   14075 
   14076 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   14077 	tmp |= CTRL_FRCSPD;
   14078 
   14079 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   14080 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   14081 	CSR_WRITE_FLUSH(sc);
   14082 	delay(20);
   14083 
   14084 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   14085 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   14086 	CSR_WRITE_FLUSH(sc);
   14087 	delay(20);
   14088 
   14089 	return;
   14090 }
   14091 
   14092 /* special case - for 82575 - need to do manual init ... */
   14093 static void
   14094 wm_reset_init_script_82575(struct wm_softc *sc)
   14095 {
   14096 	/*
   14097 	 * remark: this is untested code - we have no board without EEPROM
   14098 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   14099 	 */
   14100 
   14101 	/* SerDes configuration via SERDESCTRL */
   14102 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   14103 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   14104 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   14105 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   14106 
   14107 	/* CCM configuration via CCMCTL register */
   14108 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   14109 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   14110 
   14111 	/* PCIe lanes configuration */
   14112 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   14113 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   14114 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   14115 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   14116 
   14117 	/* PCIe PLL Configuration */
   14118 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   14119 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   14120 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   14121 }
   14122 
   14123 static void
   14124 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   14125 {
   14126 	uint32_t reg;
   14127 	uint16_t nvmword;
   14128 	int rv;
   14129 
   14130 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   14131 		return;
   14132 
   14133 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   14134 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   14135 	if (rv != 0) {
   14136 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   14137 		    __func__);
   14138 		return;
   14139 	}
   14140 
   14141 	reg = CSR_READ(sc, WMREG_MDICNFG);
   14142 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   14143 		reg |= MDICNFG_DEST;
   14144 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   14145 		reg |= MDICNFG_COM_MDIO;
   14146 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   14147 }
   14148 
   14149 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   14150 
   14151 static bool
   14152 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   14153 {
   14154 	int i;
   14155 	uint32_t reg;
   14156 	uint16_t id1, id2;
   14157 
   14158 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14159 		device_xname(sc->sc_dev), __func__));
   14160 	id1 = id2 = 0xffff;
   14161 	for (i = 0; i < 2; i++) {
   14162 		id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1);
   14163 		if (MII_INVALIDID(id1))
   14164 			continue;
   14165 		id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2);
   14166 		if (MII_INVALIDID(id2))
   14167 			continue;
   14168 		break;
   14169 	}
   14170 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) {
   14171 		goto out;
   14172 	}
   14173 
   14174 	if (sc->sc_type < WM_T_PCH_LPT) {
   14175 		sc->phy.release(sc);
   14176 		wm_set_mdio_slow_mode_hv(sc);
   14177 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
   14178 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
   14179 		sc->phy.acquire(sc);
   14180 	}
   14181 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   14182 		printf("XXX return with false\n");
   14183 		return false;
   14184 	}
   14185 out:
   14186 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   14187 		/* Only unforce SMBus if ME is not active */
   14188 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   14189 			/* Unforce SMBus mode in PHY */
   14190 			reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   14191 			    CV_SMB_CTRL);
   14192 			reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   14193 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   14194 			    CV_SMB_CTRL, reg);
   14195 
   14196 			/* Unforce SMBus mode in MAC */
   14197 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14198 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   14199 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14200 		}
   14201 	}
   14202 	return true;
   14203 }
   14204 
   14205 static void
   14206 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   14207 {
   14208 	uint32_t reg;
   14209 	int i;
   14210 
   14211 	/* Set PHY Config Counter to 50msec */
   14212 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   14213 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   14214 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   14215 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   14216 
   14217 	/* Toggle LANPHYPC */
   14218 	reg = CSR_READ(sc, WMREG_CTRL);
   14219 	reg |= CTRL_LANPHYPC_OVERRIDE;
   14220 	reg &= ~CTRL_LANPHYPC_VALUE;
   14221 	CSR_WRITE(sc, WMREG_CTRL, reg);
   14222 	CSR_WRITE_FLUSH(sc);
   14223 	delay(1000);
   14224 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   14225 	CSR_WRITE(sc, WMREG_CTRL, reg);
   14226 	CSR_WRITE_FLUSH(sc);
   14227 
   14228 	if (sc->sc_type < WM_T_PCH_LPT)
   14229 		delay(50 * 1000);
   14230 	else {
   14231 		i = 20;
   14232 
   14233 		do {
   14234 			delay(5 * 1000);
   14235 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   14236 		    && i--);
   14237 
   14238 		delay(30 * 1000);
   14239 	}
   14240 }
   14241 
   14242 static int
   14243 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   14244 {
   14245 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   14246 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   14247 	uint32_t rxa;
   14248 	uint16_t scale = 0, lat_enc = 0;
   14249 	int32_t obff_hwm = 0;
   14250 	int64_t lat_ns, value;
   14251 
   14252 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14253 		device_xname(sc->sc_dev), __func__));
   14254 
   14255 	if (link) {
   14256 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   14257 		uint32_t status;
   14258 		uint16_t speed;
   14259 		pcireg_t preg;
   14260 
   14261 		status = CSR_READ(sc, WMREG_STATUS);
   14262 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   14263 		case STATUS_SPEED_10:
   14264 			speed = 10;
   14265 			break;
   14266 		case STATUS_SPEED_100:
   14267 			speed = 100;
   14268 			break;
   14269 		case STATUS_SPEED_1000:
   14270 			speed = 1000;
   14271 			break;
   14272 		default:
   14273 			device_printf(sc->sc_dev, "Unknown speed "
   14274 			    "(status = %08x)\n", status);
   14275 			return -1;
   14276 		}
   14277 
   14278 		/* Rx Packet Buffer Allocation size (KB) */
   14279 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   14280 
   14281 		/*
   14282 		 * Determine the maximum latency tolerated by the device.
   14283 		 *
   14284 		 * Per the PCIe spec, the tolerated latencies are encoded as
   14285 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   14286 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   14287 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   14288 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   14289 		 */
   14290 		lat_ns = ((int64_t)rxa * 1024 -
   14291 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   14292 			+ ETHER_HDR_LEN))) * 8 * 1000;
   14293 		if (lat_ns < 0)
   14294 			lat_ns = 0;
   14295 		else
   14296 			lat_ns /= speed;
   14297 		value = lat_ns;
   14298 
   14299 		while (value > LTRV_VALUE) {
   14300 			scale ++;
   14301 			value = howmany(value, __BIT(5));
   14302 		}
   14303 		if (scale > LTRV_SCALE_MAX) {
   14304 			printf("%s: Invalid LTR latency scale %d\n",
   14305 			    device_xname(sc->sc_dev), scale);
   14306 			return -1;
   14307 		}
   14308 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   14309 
   14310 		/* Determine the maximum latency tolerated by the platform */
   14311 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14312 		    WM_PCI_LTR_CAP_LPT);
   14313 		max_snoop = preg & 0xffff;
   14314 		max_nosnoop = preg >> 16;
   14315 
   14316 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   14317 
   14318 		if (lat_enc > max_ltr_enc) {
   14319 			lat_enc = max_ltr_enc;
   14320 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   14321 			    * PCI_LTR_SCALETONS(
   14322 				    __SHIFTOUT(lat_enc,
   14323 					PCI_LTR_MAXSNOOPLAT_SCALE));
   14324 		}
   14325 
   14326 		if (lat_ns) {
   14327 			lat_ns *= speed * 1000;
   14328 			lat_ns /= 8;
   14329 			lat_ns /= 1000000000;
   14330 			obff_hwm = (int32_t)(rxa - lat_ns);
   14331 		}
   14332 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   14333 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   14334 			    "(rxa = %d, lat_ns = %d)\n",
   14335 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   14336 			return -1;
   14337 		}
   14338 	}
   14339 	/* Snoop and No-Snoop latencies the same */
   14340 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   14341 	CSR_WRITE(sc, WMREG_LTRV, reg);
   14342 
   14343 	/* Set OBFF high water mark */
   14344 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   14345 	reg |= obff_hwm;
   14346 	CSR_WRITE(sc, WMREG_SVT, reg);
   14347 
   14348 	/* Enable OBFF */
   14349 	reg = CSR_READ(sc, WMREG_SVCR);
   14350 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   14351 	CSR_WRITE(sc, WMREG_SVCR, reg);
   14352 
   14353 	return 0;
   14354 }
   14355 
   14356 /*
   14357  * I210 Errata 25 and I211 Errata 10
   14358  * Slow System Clock.
   14359  */
   14360 static void
   14361 wm_pll_workaround_i210(struct wm_softc *sc)
   14362 {
   14363 	uint32_t mdicnfg, wuc;
   14364 	uint32_t reg;
   14365 	pcireg_t pcireg;
   14366 	uint32_t pmreg;
   14367 	uint16_t nvmword, tmp_nvmword;
   14368 	int phyval;
   14369 	bool wa_done = false;
   14370 	int i;
   14371 
   14372 	/* Save WUC and MDICNFG registers */
   14373 	wuc = CSR_READ(sc, WMREG_WUC);
   14374 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   14375 
   14376 	reg = mdicnfg & ~MDICNFG_DEST;
   14377 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   14378 
   14379 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   14380 		nvmword = INVM_DEFAULT_AL;
   14381 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   14382 
   14383 	/* Get Power Management cap offset */
   14384 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   14385 		&pmreg, NULL) == 0)
   14386 		return;
   14387 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   14388 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   14389 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   14390 
   14391 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   14392 			break; /* OK */
   14393 		}
   14394 
   14395 		wa_done = true;
   14396 		/* Directly reset the internal PHY */
   14397 		reg = CSR_READ(sc, WMREG_CTRL);
   14398 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   14399 
   14400 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14401 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   14402 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14403 
   14404 		CSR_WRITE(sc, WMREG_WUC, 0);
   14405 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   14406 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   14407 
   14408 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14409 		    pmreg + PCI_PMCSR);
   14410 		pcireg |= PCI_PMCSR_STATE_D3;
   14411 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14412 		    pmreg + PCI_PMCSR, pcireg);
   14413 		delay(1000);
   14414 		pcireg &= ~PCI_PMCSR_STATE_D3;
   14415 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14416 		    pmreg + PCI_PMCSR, pcireg);
   14417 
   14418 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   14419 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   14420 
   14421 		/* Restore WUC register */
   14422 		CSR_WRITE(sc, WMREG_WUC, wuc);
   14423 	}
   14424 
   14425 	/* Restore MDICNFG setting */
   14426 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   14427 	if (wa_done)
   14428 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   14429 }
   14430 
   14431 static void
   14432 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   14433 {
   14434 	uint32_t reg;
   14435 
   14436 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14437 		device_xname(sc->sc_dev), __func__));
   14438 	KASSERT(sc->sc_type == WM_T_PCH_SPT);
   14439 
   14440 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   14441 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   14442 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   14443 
   14444 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   14445 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   14446 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   14447 }
   14448