Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.532
      1 /*	$NetBSD: if_wm.c,v 1.532 2017/07/26 08:09:59 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet)
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  *	- Image Unique ID
     83  */
     84 
     85 #include <sys/cdefs.h>
     86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.532 2017/07/26 08:09:59 msaitoh Exp $");
     87 
     88 #ifdef _KERNEL_OPT
     89 #include "opt_net_mpsafe.h"
     90 #include "opt_if_wm.h"
     91 #endif
     92 
     93 #include <sys/param.h>
     94 #include <sys/systm.h>
     95 #include <sys/callout.h>
     96 #include <sys/mbuf.h>
     97 #include <sys/malloc.h>
     98 #include <sys/kmem.h>
     99 #include <sys/kernel.h>
    100 #include <sys/socket.h>
    101 #include <sys/ioctl.h>
    102 #include <sys/errno.h>
    103 #include <sys/device.h>
    104 #include <sys/queue.h>
    105 #include <sys/syslog.h>
    106 #include <sys/interrupt.h>
    107 #include <sys/cpu.h>
    108 #include <sys/pcq.h>
    109 
    110 #include <sys/rndsource.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <netinet/in.h>			/* XXX for struct ip */
    120 #include <netinet/in_systm.h>		/* XXX for struct ip */
    121 #include <netinet/ip.h>			/* XXX for struct ip */
    122 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    123 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    124 
    125 #include <sys/bus.h>
    126 #include <sys/intr.h>
    127 #include <machine/endian.h>
    128 
    129 #include <dev/mii/mii.h>
    130 #include <dev/mii/miivar.h>
    131 #include <dev/mii/miidevs.h>
    132 #include <dev/mii/mii_bitbang.h>
    133 #include <dev/mii/ikphyreg.h>
    134 #include <dev/mii/igphyreg.h>
    135 #include <dev/mii/igphyvar.h>
    136 #include <dev/mii/inbmphyreg.h>
    137 #include <dev/mii/ihphyreg.h>
    138 
    139 #include <dev/pci/pcireg.h>
    140 #include <dev/pci/pcivar.h>
    141 #include <dev/pci/pcidevs.h>
    142 
    143 #include <dev/pci/if_wmreg.h>
    144 #include <dev/pci/if_wmvar.h>
    145 
    146 #ifdef WM_DEBUG
    147 #define	WM_DEBUG_LINK		__BIT(0)
    148 #define	WM_DEBUG_TX		__BIT(1)
    149 #define	WM_DEBUG_RX		__BIT(2)
    150 #define	WM_DEBUG_GMII		__BIT(3)
    151 #define	WM_DEBUG_MANAGE		__BIT(4)
    152 #define	WM_DEBUG_NVM		__BIT(5)
    153 #define	WM_DEBUG_INIT		__BIT(6)
    154 #define	WM_DEBUG_LOCK		__BIT(7)
    155 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    156     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    157 
    158 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    159 #else
    160 #define	DPRINTF(x, y)	/* nothing */
    161 #endif /* WM_DEBUG */
    162 
    163 #ifdef NET_MPSAFE
    164 #define WM_MPSAFE	1
    165 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    166 #else
    167 #define CALLOUT_FLAGS	0
    168 #endif
    169 
    170 /*
    171  * This device driver's max interrupt numbers.
    172  */
    173 #define WM_MAX_NQUEUEINTR	16
    174 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    175 
    176 #ifndef WM_DISABLE_MSI
    177 #define	WM_DISABLE_MSI 0
    178 #endif
    179 #ifndef WM_DISABLE_MSIX
    180 #define	WM_DISABLE_MSIX 0
    181 #endif
    182 
    183 int wm_disable_msi = WM_DISABLE_MSI;
    184 int wm_disable_msix = WM_DISABLE_MSIX;
    185 
    186 /*
    187  * Transmit descriptor list size.  Due to errata, we can only have
    188  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    189  * on >= 82544.  We tell the upper layers that they can queue a lot
    190  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    191  * of them at a time.
    192  *
    193  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    194  * chains containing many small mbufs have been observed in zero-copy
    195  * situations with jumbo frames.
    196  */
    197 #define	WM_NTXSEGS		256
    198 #define	WM_IFQUEUELEN		256
    199 #define	WM_TXQUEUELEN_MAX	64
    200 #define	WM_TXQUEUELEN_MAX_82547	16
    201 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    202 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    203 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    204 #define	WM_NTXDESC_82542	256
    205 #define	WM_NTXDESC_82544	4096
    206 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    207 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    208 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    209 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    210 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    211 
    212 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    213 
    214 #define	WM_TXINTERQSIZE		256
    215 
    216 /*
    217  * Receive descriptor list size.  We have one Rx buffer for normal
    218  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    219  * packet.  We allocate 256 receive descriptors, each with a 2k
    220  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    221  */
    222 #define	WM_NRXDESC		256
    223 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    224 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    225 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    226 
    227 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    228 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    229 #endif
    230 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    231 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    232 #endif
    233 
    234 typedef union txdescs {
    235 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    236 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    237 } txdescs_t;
    238 
    239 typedef union rxdescs {
    240 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    241 	ext_rxdesc_t      sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    242 	nq_rxdesc_t      sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    243 } rxdescs_t;
    244 
    245 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    246 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    247 
    248 /*
    249  * Software state for transmit jobs.
    250  */
    251 struct wm_txsoft {
    252 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    253 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    254 	int txs_firstdesc;		/* first descriptor in packet */
    255 	int txs_lastdesc;		/* last descriptor in packet */
    256 	int txs_ndesc;			/* # of descriptors used */
    257 };
    258 
    259 /*
    260  * Software state for receive buffers.  Each descriptor gets a
    261  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    262  * more than one buffer, we chain them together.
    263  */
    264 struct wm_rxsoft {
    265 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    266 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    267 };
    268 
    269 #define WM_LINKUP_TIMEOUT	50
    270 
    271 static uint16_t swfwphysem[] = {
    272 	SWFW_PHY0_SM,
    273 	SWFW_PHY1_SM,
    274 	SWFW_PHY2_SM,
    275 	SWFW_PHY3_SM
    276 };
    277 
    278 static const uint32_t wm_82580_rxpbs_table[] = {
    279 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    280 };
    281 
    282 struct wm_softc;
    283 
    284 #ifdef WM_EVENT_COUNTERS
    285 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    286 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    287 	struct evcnt qname##_ev_##evname;
    288 
    289 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    290 	do{								\
    291 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    292 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    293 		    "%s%02d%s", #qname, (qnum), #evname);		\
    294 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    295 		    (evtype), NULL, (xname),				\
    296 		    (q)->qname##_##evname##_evcnt_name);		\
    297 	}while(0)
    298 
    299 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    300 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    301 
    302 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    303 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    304 
    305 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    306 	evcnt_detach(&(q)->qname##_ev_##evname);
    307 #endif /* WM_EVENT_COUNTERS */
    308 
    309 struct wm_txqueue {
    310 	kmutex_t *txq_lock;		/* lock for tx operations */
    311 
    312 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    313 
    314 	/* Software state for the transmit descriptors. */
    315 	int txq_num;			/* must be a power of two */
    316 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    317 
    318 	/* TX control data structures. */
    319 	int txq_ndesc;			/* must be a power of two */
    320 	size_t txq_descsize;		/* a tx descriptor size */
    321 	txdescs_t *txq_descs_u;
    322         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    323 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    324 	int txq_desc_rseg;		/* real number of control segment */
    325 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    326 #define	txq_descs	txq_descs_u->sctxu_txdescs
    327 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    328 
    329 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    330 
    331 	int txq_free;			/* number of free Tx descriptors */
    332 	int txq_next;			/* next ready Tx descriptor */
    333 
    334 	int txq_sfree;			/* number of free Tx jobs */
    335 	int txq_snext;			/* next free Tx job */
    336 	int txq_sdirty;			/* dirty Tx jobs */
    337 
    338 	/* These 4 variables are used only on the 82547. */
    339 	int txq_fifo_size;		/* Tx FIFO size */
    340 	int txq_fifo_head;		/* current head of FIFO */
    341 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    342 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    343 
    344 	/*
    345 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    346 	 * CPUs. This queue intermediate them without block.
    347 	 */
    348 	pcq_t *txq_interq;
    349 
    350 	/*
    351 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    352 	 * to manage Tx H/W queue's busy flag.
    353 	 */
    354 	int txq_flags;			/* flags for H/W queue, see below */
    355 #define	WM_TXQ_NO_SPACE	0x1
    356 
    357 	bool txq_stopping;
    358 
    359 	uint32_t txq_packets;		/* for AIM */
    360 	uint32_t txq_bytes;		/* for AIM */
    361 #ifdef WM_EVENT_COUNTERS
    362 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
    363 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
    364 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
    365 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
    366 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
    367 						/* XXX not used? */
    368 
    369 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
    370 	WM_Q_EVCNT_DEFINE(txq,txtusum)		/* TCP/UDP cksums comp. out-bound */
    371 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
    372 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
    373 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
    374 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
    375 
    376 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
    377 
    378 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
    379 
    380 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    381 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    382 #endif /* WM_EVENT_COUNTERS */
    383 };
    384 
    385 struct wm_rxqueue {
    386 	kmutex_t *rxq_lock;		/* lock for rx operations */
    387 
    388 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    389 
    390 	/* Software state for the receive descriptors. */
    391 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    392 
    393 	/* RX control data structures. */
    394 	int rxq_ndesc;			/* must be a power of two */
    395 	size_t rxq_descsize;		/* a rx descriptor size */
    396 	rxdescs_t *rxq_descs_u;
    397 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    398 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    399 	int rxq_desc_rseg;		/* real number of control segment */
    400 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    401 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    402 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    403 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    404 
    405 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    406 
    407 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    408 	int rxq_discard;
    409 	int rxq_len;
    410 	struct mbuf *rxq_head;
    411 	struct mbuf *rxq_tail;
    412 	struct mbuf **rxq_tailp;
    413 
    414 	bool rxq_stopping;
    415 
    416 	uint32_t rxq_packets;		/* for AIM */
    417 	uint32_t rxq_bytes;		/* for AIM */
    418 #ifdef WM_EVENT_COUNTERS
    419 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
    420 
    421 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
    422 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
    423 #endif
    424 };
    425 
    426 struct wm_queue {
    427 	int wmq_id;			/* index of transmit and receive queues */
    428 	int wmq_intr_idx;		/* index of MSI-X tables */
    429 
    430 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    431 	bool wmq_set_itr;
    432 
    433 	struct wm_txqueue wmq_txq;
    434 	struct wm_rxqueue wmq_rxq;
    435 
    436 	void *wmq_si;
    437 };
    438 
    439 struct wm_phyop {
    440 	int (*acquire)(struct wm_softc *);
    441 	void (*release)(struct wm_softc *);
    442 	int reset_delay_us;
    443 };
    444 
    445 struct wm_nvmop {
    446 	int (*acquire)(struct wm_softc *);
    447 	void (*release)(struct wm_softc *);
    448 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    449 };
    450 
    451 /*
    452  * Software state per device.
    453  */
    454 struct wm_softc {
    455 	device_t sc_dev;		/* generic device information */
    456 	bus_space_tag_t sc_st;		/* bus space tag */
    457 	bus_space_handle_t sc_sh;	/* bus space handle */
    458 	bus_size_t sc_ss;		/* bus space size */
    459 	bus_space_tag_t sc_iot;		/* I/O space tag */
    460 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    461 	bus_size_t sc_ios;		/* I/O space size */
    462 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    463 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    464 	bus_size_t sc_flashs;		/* flash registers space size */
    465 	off_t sc_flashreg_offset;	/*
    466 					 * offset to flash registers from
    467 					 * start of BAR
    468 					 */
    469 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    470 
    471 	struct ethercom sc_ethercom;	/* ethernet common data */
    472 	struct mii_data sc_mii;		/* MII/media information */
    473 
    474 	pci_chipset_tag_t sc_pc;
    475 	pcitag_t sc_pcitag;
    476 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    477 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    478 
    479 	uint16_t sc_pcidevid;		/* PCI device ID */
    480 	wm_chip_type sc_type;		/* MAC type */
    481 	int sc_rev;			/* MAC revision */
    482 	wm_phy_type sc_phytype;		/* PHY type */
    483 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    484 #define	WM_MEDIATYPE_UNKNOWN		0x00
    485 #define	WM_MEDIATYPE_FIBER		0x01
    486 #define	WM_MEDIATYPE_COPPER		0x02
    487 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    488 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    489 	int sc_flags;			/* flags; see below */
    490 	int sc_if_flags;		/* last if_flags */
    491 	int sc_flowflags;		/* 802.3x flow control flags */
    492 	int sc_align_tweak;
    493 
    494 	void *sc_ihs[WM_MAX_NINTR];	/*
    495 					 * interrupt cookie.
    496 					 * - legacy and msi use sc_ihs[0] only
    497 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    498 					 */
    499 	pci_intr_handle_t *sc_intrs;	/*
    500 					 * legacy and msi use sc_intrs[0] only
    501 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    502 					 */
    503 	int sc_nintrs;			/* number of interrupts */
    504 
    505 	int sc_link_intr_idx;		/* index of MSI-X tables */
    506 
    507 	callout_t sc_tick_ch;		/* tick callout */
    508 	bool sc_core_stopping;
    509 
    510 	int sc_nvm_ver_major;
    511 	int sc_nvm_ver_minor;
    512 	int sc_nvm_ver_build;
    513 	int sc_nvm_addrbits;		/* NVM address bits */
    514 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    515 	int sc_ich8_flash_base;
    516 	int sc_ich8_flash_bank_size;
    517 	int sc_nvm_k1_enabled;
    518 
    519 	int sc_nqueues;
    520 	struct wm_queue *sc_queue;
    521 	u_int sc_rx_process_limit;	/* Rx processing repeat limit in softint */
    522 	u_int sc_rx_intr_process_limit;	/* Rx processing repeat limit in H/W intr */
    523 
    524 	int sc_affinity_offset;
    525 
    526 #ifdef WM_EVENT_COUNTERS
    527 	/* Event counters. */
    528 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    529 
    530         /* WM_T_82542_2_1 only */
    531 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    532 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    533 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    534 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    535 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    536 #endif /* WM_EVENT_COUNTERS */
    537 
    538 	/* This variable are used only on the 82547. */
    539 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    540 
    541 	uint32_t sc_ctrl;		/* prototype CTRL register */
    542 #if 0
    543 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    544 #endif
    545 	uint32_t sc_icr;		/* prototype interrupt bits */
    546 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    547 	uint32_t sc_tctl;		/* prototype TCTL register */
    548 	uint32_t sc_rctl;		/* prototype RCTL register */
    549 	uint32_t sc_txcw;		/* prototype TXCW register */
    550 	uint32_t sc_tipg;		/* prototype TIPG register */
    551 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    552 	uint32_t sc_pba;		/* prototype PBA register */
    553 
    554 	int sc_tbi_linkup;		/* TBI link status */
    555 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    556 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    557 
    558 	int sc_mchash_type;		/* multicast filter offset */
    559 
    560 	krndsource_t rnd_source;	/* random source */
    561 
    562 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    563 
    564 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    565 	kmutex_t *sc_ich_phymtx;	/*
    566 					 * 82574/82583/ICH/PCH specific PHY
    567 					 * mutex. For 82574/82583, the mutex
    568 					 * is used for both PHY and NVM.
    569 					 */
    570 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    571 
    572 	struct wm_phyop phy;
    573 	struct wm_nvmop nvm;
    574 };
    575 
    576 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    577 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    578 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    579 
    580 #define	WM_RXCHAIN_RESET(rxq)						\
    581 do {									\
    582 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    583 	*(rxq)->rxq_tailp = NULL;					\
    584 	(rxq)->rxq_len = 0;						\
    585 } while (/*CONSTCOND*/0)
    586 
    587 #define	WM_RXCHAIN_LINK(rxq, m)						\
    588 do {									\
    589 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    590 	(rxq)->rxq_tailp = &(m)->m_next;				\
    591 } while (/*CONSTCOND*/0)
    592 
    593 #ifdef WM_EVENT_COUNTERS
    594 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    595 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    596 
    597 #define WM_Q_EVCNT_INCR(qname, evname)			\
    598 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    599 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    600 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    601 #else /* !WM_EVENT_COUNTERS */
    602 #define	WM_EVCNT_INCR(ev)	/* nothing */
    603 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    604 
    605 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    606 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    607 #endif /* !WM_EVENT_COUNTERS */
    608 
    609 #define	CSR_READ(sc, reg)						\
    610 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    611 #define	CSR_WRITE(sc, reg, val)						\
    612 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    613 #define	CSR_WRITE_FLUSH(sc)						\
    614 	(void) CSR_READ((sc), WMREG_STATUS)
    615 
    616 #define ICH8_FLASH_READ32(sc, reg)					\
    617 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    618 	    (reg) + sc->sc_flashreg_offset)
    619 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    620 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    621 	    (reg) + sc->sc_flashreg_offset, (data))
    622 
    623 #define ICH8_FLASH_READ16(sc, reg)					\
    624 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    625 	    (reg) + sc->sc_flashreg_offset)
    626 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    627 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    628 	    (reg) + sc->sc_flashreg_offset, (data))
    629 
    630 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    631 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    632 
    633 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    634 #define	WM_CDTXADDR_HI(txq, x)						\
    635 	(sizeof(bus_addr_t) == 8 ?					\
    636 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    637 
    638 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    639 #define	WM_CDRXADDR_HI(rxq, x)						\
    640 	(sizeof(bus_addr_t) == 8 ?					\
    641 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    642 
    643 /*
    644  * Register read/write functions.
    645  * Other than CSR_{READ|WRITE}().
    646  */
    647 #if 0
    648 static inline uint32_t wm_io_read(struct wm_softc *, int);
    649 #endif
    650 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    651 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    652 	uint32_t, uint32_t);
    653 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    654 
    655 /*
    656  * Descriptor sync/init functions.
    657  */
    658 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    659 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    660 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    661 
    662 /*
    663  * Device driver interface functions and commonly used functions.
    664  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    665  */
    666 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    667 static int	wm_match(device_t, cfdata_t, void *);
    668 static void	wm_attach(device_t, device_t, void *);
    669 static int	wm_detach(device_t, int);
    670 static bool	wm_suspend(device_t, const pmf_qual_t *);
    671 static bool	wm_resume(device_t, const pmf_qual_t *);
    672 static void	wm_watchdog(struct ifnet *);
    673 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
    674 static void	wm_tick(void *);
    675 static int	wm_ifflags_cb(struct ethercom *);
    676 static int	wm_ioctl(struct ifnet *, u_long, void *);
    677 /* MAC address related */
    678 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    679 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    680 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    681 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    682 static void	wm_set_filter(struct wm_softc *);
    683 /* Reset and init related */
    684 static void	wm_set_vlan(struct wm_softc *);
    685 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    686 static void	wm_get_auto_rd_done(struct wm_softc *);
    687 static void	wm_lan_init_done(struct wm_softc *);
    688 static void	wm_get_cfg_done(struct wm_softc *);
    689 static void	wm_phy_post_reset(struct wm_softc *);
    690 static void	wm_write_smbus_addr(struct wm_softc *);
    691 static void	wm_init_lcd_from_nvm(struct wm_softc *);
    692 static void	wm_initialize_hardware_bits(struct wm_softc *);
    693 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    694 static void	wm_reset_phy(struct wm_softc *);
    695 static void	wm_flush_desc_rings(struct wm_softc *);
    696 static void	wm_reset(struct wm_softc *);
    697 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    698 static void	wm_rxdrain(struct wm_rxqueue *);
    699 static void	wm_rss_getkey(uint8_t *);
    700 static void	wm_init_rss(struct wm_softc *);
    701 static void	wm_adjust_qnum(struct wm_softc *, int);
    702 static inline bool	wm_is_using_msix(struct wm_softc *);
    703 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    704 static int	wm_softint_establish(struct wm_softc *, int, int);
    705 static int	wm_setup_legacy(struct wm_softc *);
    706 static int	wm_setup_msix(struct wm_softc *);
    707 static int	wm_init(struct ifnet *);
    708 static int	wm_init_locked(struct ifnet *);
    709 static void	wm_turnon(struct wm_softc *);
    710 static void	wm_turnoff(struct wm_softc *);
    711 static void	wm_stop(struct ifnet *, int);
    712 static void	wm_stop_locked(struct ifnet *, int);
    713 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    714 static void	wm_82547_txfifo_stall(void *);
    715 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    716 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    717 /* DMA related */
    718 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    719 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    720 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    721 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    722     struct wm_txqueue *);
    723 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    724 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    725 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    726     struct wm_rxqueue *);
    727 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    728 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    729 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    730 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    731 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    732 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    733 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    734     struct wm_txqueue *);
    735 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    736     struct wm_rxqueue *);
    737 static int	wm_alloc_txrx_queues(struct wm_softc *);
    738 static void	wm_free_txrx_queues(struct wm_softc *);
    739 static int	wm_init_txrx_queues(struct wm_softc *);
    740 /* Start */
    741 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    742     struct wm_txsoft *, uint32_t *, uint8_t *);
    743 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    744 static void	wm_start(struct ifnet *);
    745 static void	wm_start_locked(struct ifnet *);
    746 static int	wm_transmit(struct ifnet *, struct mbuf *);
    747 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    748 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    749 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    750     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    751 static void	wm_nq_start(struct ifnet *);
    752 static void	wm_nq_start_locked(struct ifnet *);
    753 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    754 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    755 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    756 static void	wm_deferred_start_locked(struct wm_txqueue *);
    757 static void	wm_handle_queue(void *);
    758 /* Interrupt */
    759 static int	wm_txeof(struct wm_softc *, struct wm_txqueue *);
    760 static void	wm_rxeof(struct wm_rxqueue *, u_int);
    761 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    762 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    763 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    764 static void	wm_linkintr(struct wm_softc *, uint32_t);
    765 static int	wm_intr_legacy(void *);
    766 static inline void	wm_txrxintr_disable(struct wm_queue *);
    767 static inline void	wm_txrxintr_enable(struct wm_queue *);
    768 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    769 static int	wm_txrxintr_msix(void *);
    770 static int	wm_linkintr_msix(void *);
    771 
    772 /*
    773  * Media related.
    774  * GMII, SGMII, TBI, SERDES and SFP.
    775  */
    776 /* Common */
    777 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    778 /* GMII related */
    779 static void	wm_gmii_reset(struct wm_softc *);
    780 static void	wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t, uint16_t);
    781 static int	wm_get_phy_id_82575(struct wm_softc *);
    782 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    783 static int	wm_gmii_mediachange(struct ifnet *);
    784 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    785 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    786 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    787 static int	wm_gmii_i82543_readreg(device_t, int, int);
    788 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    789 static int	wm_gmii_mdic_readreg(device_t, int, int);
    790 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    791 static int	wm_gmii_i82544_readreg(device_t, int, int);
    792 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    793 static int	wm_gmii_i80003_readreg(device_t, int, int);
    794 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    795 static int	wm_gmii_bm_readreg(device_t, int, int);
    796 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    797 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    798 static int	wm_gmii_hv_readreg(device_t, int, int);
    799 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
    800 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    801 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
    802 static int	wm_gmii_82580_readreg(device_t, int, int);
    803 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    804 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    805 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    806 static void	wm_gmii_statchg(struct ifnet *);
    807 /*
    808  * kumeran related (80003, ICH* and PCH*).
    809  * These functions are not for accessing MII registers but for accessing
    810  * kumeran specific registers.
    811  */
    812 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    813 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    814 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    815 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    816 /* SGMII */
    817 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    818 static int	wm_sgmii_readreg(device_t, int, int);
    819 static void	wm_sgmii_writereg(device_t, int, int, int);
    820 /* TBI related */
    821 static void	wm_tbi_mediainit(struct wm_softc *);
    822 static int	wm_tbi_mediachange(struct ifnet *);
    823 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    824 static int	wm_check_for_link(struct wm_softc *);
    825 static void	wm_tbi_tick(struct wm_softc *);
    826 /* SERDES related */
    827 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    828 static int	wm_serdes_mediachange(struct ifnet *);
    829 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    830 static void	wm_serdes_tick(struct wm_softc *);
    831 /* SFP related */
    832 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    833 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    834 
    835 /*
    836  * NVM related.
    837  * Microwire, SPI (w/wo EERD) and Flash.
    838  */
    839 /* Misc functions */
    840 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    841 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    842 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    843 /* Microwire */
    844 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    845 /* SPI */
    846 static int	wm_nvm_ready_spi(struct wm_softc *);
    847 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    848 /* Using with EERD */
    849 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    850 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    851 /* Flash */
    852 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    853     unsigned int *);
    854 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    855 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    856 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    857 	uint32_t *);
    858 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    859 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    860 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    861 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    862 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    863 /* iNVM */
    864 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    865 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    866 /* Lock, detecting NVM type, validate checksum and read */
    867 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    868 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    869 static int	wm_nvm_validate_checksum(struct wm_softc *);
    870 static void	wm_nvm_version_invm(struct wm_softc *);
    871 static void	wm_nvm_version(struct wm_softc *);
    872 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    873 
    874 /*
    875  * Hardware semaphores.
    876  * Very complexed...
    877  */
    878 static int	wm_get_null(struct wm_softc *);
    879 static void	wm_put_null(struct wm_softc *);
    880 static int	wm_get_eecd(struct wm_softc *);
    881 static void	wm_put_eecd(struct wm_softc *);
    882 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    883 static void	wm_put_swsm_semaphore(struct wm_softc *);
    884 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    885 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    886 static int	wm_get_nvm_80003(struct wm_softc *);
    887 static void	wm_put_nvm_80003(struct wm_softc *);
    888 static int	wm_get_nvm_82571(struct wm_softc *);
    889 static void	wm_put_nvm_82571(struct wm_softc *);
    890 static int	wm_get_phy_82575(struct wm_softc *);
    891 static void	wm_put_phy_82575(struct wm_softc *);
    892 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    893 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    894 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    895 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    896 static int	wm_get_nvm_ich8lan(struct wm_softc *);
    897 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    898 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    899 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    900 
    901 /*
    902  * Management mode and power management related subroutines.
    903  * BMC, AMT, suspend/resume and EEE.
    904  */
    905 #if 0
    906 static int	wm_check_mng_mode(struct wm_softc *);
    907 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    908 static int	wm_check_mng_mode_82574(struct wm_softc *);
    909 static int	wm_check_mng_mode_generic(struct wm_softc *);
    910 #endif
    911 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    912 static bool	wm_phy_resetisblocked(struct wm_softc *);
    913 static void	wm_get_hw_control(struct wm_softc *);
    914 static void	wm_release_hw_control(struct wm_softc *);
    915 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    916 static void	wm_smbustopci(struct wm_softc *);
    917 static void	wm_init_manageability(struct wm_softc *);
    918 static void	wm_release_manageability(struct wm_softc *);
    919 static void	wm_get_wakeup(struct wm_softc *);
    920 static void	wm_ulp_disable(struct wm_softc *);
    921 static void	wm_enable_phy_wakeup(struct wm_softc *);
    922 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    923 static void	wm_enable_wakeup(struct wm_softc *);
    924 /* LPLU (Low Power Link Up) */
    925 static void	wm_lplu_d0_disable(struct wm_softc *);
    926 /* EEE */
    927 static void	wm_set_eee_i350(struct wm_softc *);
    928 
    929 /*
    930  * Workarounds (mainly PHY related).
    931  * Basically, PHY's workarounds are in the PHY drivers.
    932  */
    933 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    934 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    935 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    936 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    937 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    938 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    939 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    940 static void	wm_reset_init_script_82575(struct wm_softc *);
    941 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    942 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
    943 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
    944 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
    945 static void	wm_pll_workaround_i210(struct wm_softc *);
    946 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
    947 
    948 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    949     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    950 
    951 /*
    952  * Devices supported by this driver.
    953  */
    954 static const struct wm_product {
    955 	pci_vendor_id_t		wmp_vendor;
    956 	pci_product_id_t	wmp_product;
    957 	const char		*wmp_name;
    958 	wm_chip_type		wmp_type;
    959 	uint32_t		wmp_flags;
    960 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    961 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    962 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    963 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    964 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    965 } wm_products[] = {
    966 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    967 	  "Intel i82542 1000BASE-X Ethernet",
    968 	  WM_T_82542_2_1,	WMP_F_FIBER },
    969 
    970 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    971 	  "Intel i82543GC 1000BASE-X Ethernet",
    972 	  WM_T_82543,		WMP_F_FIBER },
    973 
    974 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    975 	  "Intel i82543GC 1000BASE-T Ethernet",
    976 	  WM_T_82543,		WMP_F_COPPER },
    977 
    978 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    979 	  "Intel i82544EI 1000BASE-T Ethernet",
    980 	  WM_T_82544,		WMP_F_COPPER },
    981 
    982 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    983 	  "Intel i82544EI 1000BASE-X Ethernet",
    984 	  WM_T_82544,		WMP_F_FIBER },
    985 
    986 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    987 	  "Intel i82544GC 1000BASE-T Ethernet",
    988 	  WM_T_82544,		WMP_F_COPPER },
    989 
    990 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    991 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    992 	  WM_T_82544,		WMP_F_COPPER },
    993 
    994 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    995 	  "Intel i82540EM 1000BASE-T Ethernet",
    996 	  WM_T_82540,		WMP_F_COPPER },
    997 
    998 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    999 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1000 	  WM_T_82540,		WMP_F_COPPER },
   1001 
   1002 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1003 	  "Intel i82540EP 1000BASE-T Ethernet",
   1004 	  WM_T_82540,		WMP_F_COPPER },
   1005 
   1006 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1007 	  "Intel i82540EP 1000BASE-T Ethernet",
   1008 	  WM_T_82540,		WMP_F_COPPER },
   1009 
   1010 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1011 	  "Intel i82540EP 1000BASE-T Ethernet",
   1012 	  WM_T_82540,		WMP_F_COPPER },
   1013 
   1014 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1015 	  "Intel i82545EM 1000BASE-T Ethernet",
   1016 	  WM_T_82545,		WMP_F_COPPER },
   1017 
   1018 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1019 	  "Intel i82545GM 1000BASE-T Ethernet",
   1020 	  WM_T_82545_3,		WMP_F_COPPER },
   1021 
   1022 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1023 	  "Intel i82545GM 1000BASE-X Ethernet",
   1024 	  WM_T_82545_3,		WMP_F_FIBER },
   1025 
   1026 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1027 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1028 	  WM_T_82545_3,		WMP_F_SERDES },
   1029 
   1030 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1031 	  "Intel i82546EB 1000BASE-T Ethernet",
   1032 	  WM_T_82546,		WMP_F_COPPER },
   1033 
   1034 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1035 	  "Intel i82546EB 1000BASE-T Ethernet",
   1036 	  WM_T_82546,		WMP_F_COPPER },
   1037 
   1038 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1039 	  "Intel i82545EM 1000BASE-X Ethernet",
   1040 	  WM_T_82545,		WMP_F_FIBER },
   1041 
   1042 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1043 	  "Intel i82546EB 1000BASE-X Ethernet",
   1044 	  WM_T_82546,		WMP_F_FIBER },
   1045 
   1046 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1047 	  "Intel i82546GB 1000BASE-T Ethernet",
   1048 	  WM_T_82546_3,		WMP_F_COPPER },
   1049 
   1050 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1051 	  "Intel i82546GB 1000BASE-X Ethernet",
   1052 	  WM_T_82546_3,		WMP_F_FIBER },
   1053 
   1054 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1055 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1056 	  WM_T_82546_3,		WMP_F_SERDES },
   1057 
   1058 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1059 	  "i82546GB quad-port Gigabit Ethernet",
   1060 	  WM_T_82546_3,		WMP_F_COPPER },
   1061 
   1062 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1063 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1064 	  WM_T_82546_3,		WMP_F_COPPER },
   1065 
   1066 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1067 	  "Intel PRO/1000MT (82546GB)",
   1068 	  WM_T_82546_3,		WMP_F_COPPER },
   1069 
   1070 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1071 	  "Intel i82541EI 1000BASE-T Ethernet",
   1072 	  WM_T_82541,		WMP_F_COPPER },
   1073 
   1074 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1075 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1076 	  WM_T_82541,		WMP_F_COPPER },
   1077 
   1078 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1079 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1080 	  WM_T_82541,		WMP_F_COPPER },
   1081 
   1082 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1083 	  "Intel i82541ER 1000BASE-T Ethernet",
   1084 	  WM_T_82541_2,		WMP_F_COPPER },
   1085 
   1086 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1087 	  "Intel i82541GI 1000BASE-T Ethernet",
   1088 	  WM_T_82541_2,		WMP_F_COPPER },
   1089 
   1090 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1091 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1092 	  WM_T_82541_2,		WMP_F_COPPER },
   1093 
   1094 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1095 	  "Intel i82541PI 1000BASE-T Ethernet",
   1096 	  WM_T_82541_2,		WMP_F_COPPER },
   1097 
   1098 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1099 	  "Intel i82547EI 1000BASE-T Ethernet",
   1100 	  WM_T_82547,		WMP_F_COPPER },
   1101 
   1102 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1103 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1104 	  WM_T_82547,		WMP_F_COPPER },
   1105 
   1106 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1107 	  "Intel i82547GI 1000BASE-T Ethernet",
   1108 	  WM_T_82547_2,		WMP_F_COPPER },
   1109 
   1110 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1111 	  "Intel PRO/1000 PT (82571EB)",
   1112 	  WM_T_82571,		WMP_F_COPPER },
   1113 
   1114 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1115 	  "Intel PRO/1000 PF (82571EB)",
   1116 	  WM_T_82571,		WMP_F_FIBER },
   1117 
   1118 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1119 	  "Intel PRO/1000 PB (82571EB)",
   1120 	  WM_T_82571,		WMP_F_SERDES },
   1121 
   1122 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1123 	  "Intel PRO/1000 QT (82571EB)",
   1124 	  WM_T_82571,		WMP_F_COPPER },
   1125 
   1126 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1127 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1128 	  WM_T_82571,		WMP_F_COPPER, },
   1129 
   1130 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1131 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1132 	  WM_T_82571,		WMP_F_COPPER, },
   1133 
   1134 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1135 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1136 	  WM_T_82571,		WMP_F_SERDES, },
   1137 
   1138 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1139 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1140 	  WM_T_82571,		WMP_F_SERDES, },
   1141 
   1142 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1143 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1144 	  WM_T_82571,		WMP_F_FIBER, },
   1145 
   1146 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1147 	  "Intel i82572EI 1000baseT Ethernet",
   1148 	  WM_T_82572,		WMP_F_COPPER },
   1149 
   1150 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1151 	  "Intel i82572EI 1000baseX Ethernet",
   1152 	  WM_T_82572,		WMP_F_FIBER },
   1153 
   1154 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1155 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1156 	  WM_T_82572,		WMP_F_SERDES },
   1157 
   1158 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1159 	  "Intel i82572EI 1000baseT Ethernet",
   1160 	  WM_T_82572,		WMP_F_COPPER },
   1161 
   1162 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1163 	  "Intel i82573E",
   1164 	  WM_T_82573,		WMP_F_COPPER },
   1165 
   1166 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1167 	  "Intel i82573E IAMT",
   1168 	  WM_T_82573,		WMP_F_COPPER },
   1169 
   1170 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1171 	  "Intel i82573L Gigabit Ethernet",
   1172 	  WM_T_82573,		WMP_F_COPPER },
   1173 
   1174 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1175 	  "Intel i82574L",
   1176 	  WM_T_82574,		WMP_F_COPPER },
   1177 
   1178 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1179 	  "Intel i82574L",
   1180 	  WM_T_82574,		WMP_F_COPPER },
   1181 
   1182 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1183 	  "Intel i82583V",
   1184 	  WM_T_82583,		WMP_F_COPPER },
   1185 
   1186 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1187 	  "i80003 dual 1000baseT Ethernet",
   1188 	  WM_T_80003,		WMP_F_COPPER },
   1189 
   1190 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1191 	  "i80003 dual 1000baseX Ethernet",
   1192 	  WM_T_80003,		WMP_F_COPPER },
   1193 
   1194 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1195 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1196 	  WM_T_80003,		WMP_F_SERDES },
   1197 
   1198 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1199 	  "Intel i80003 1000baseT Ethernet",
   1200 	  WM_T_80003,		WMP_F_COPPER },
   1201 
   1202 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1203 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1204 	  WM_T_80003,		WMP_F_SERDES },
   1205 
   1206 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1207 	  "Intel i82801H (M_AMT) LAN Controller",
   1208 	  WM_T_ICH8,		WMP_F_COPPER },
   1209 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1210 	  "Intel i82801H (AMT) LAN Controller",
   1211 	  WM_T_ICH8,		WMP_F_COPPER },
   1212 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1213 	  "Intel i82801H LAN Controller",
   1214 	  WM_T_ICH8,		WMP_F_COPPER },
   1215 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1216 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1217 	  WM_T_ICH8,		WMP_F_COPPER },
   1218 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1219 	  "Intel i82801H (M) LAN Controller",
   1220 	  WM_T_ICH8,		WMP_F_COPPER },
   1221 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1222 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1223 	  WM_T_ICH8,		WMP_F_COPPER },
   1224 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1225 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1226 	  WM_T_ICH8,		WMP_F_COPPER },
   1227 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1228 	  "82567V-3 LAN Controller",
   1229 	  WM_T_ICH8,		WMP_F_COPPER },
   1230 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1231 	  "82801I (AMT) LAN Controller",
   1232 	  WM_T_ICH9,		WMP_F_COPPER },
   1233 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1234 	  "82801I 10/100 LAN Controller",
   1235 	  WM_T_ICH9,		WMP_F_COPPER },
   1236 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1237 	  "82801I (G) 10/100 LAN Controller",
   1238 	  WM_T_ICH9,		WMP_F_COPPER },
   1239 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1240 	  "82801I (GT) 10/100 LAN Controller",
   1241 	  WM_T_ICH9,		WMP_F_COPPER },
   1242 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1243 	  "82801I (C) LAN Controller",
   1244 	  WM_T_ICH9,		WMP_F_COPPER },
   1245 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1246 	  "82801I mobile LAN Controller",
   1247 	  WM_T_ICH9,		WMP_F_COPPER },
   1248 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1249 	  "82801I mobile (V) LAN Controller",
   1250 	  WM_T_ICH9,		WMP_F_COPPER },
   1251 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1252 	  "82801I mobile (AMT) LAN Controller",
   1253 	  WM_T_ICH9,		WMP_F_COPPER },
   1254 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1255 	  "82567LM-4 LAN Controller",
   1256 	  WM_T_ICH9,		WMP_F_COPPER },
   1257 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1258 	  "82567LM-2 LAN Controller",
   1259 	  WM_T_ICH10,		WMP_F_COPPER },
   1260 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1261 	  "82567LF-2 LAN Controller",
   1262 	  WM_T_ICH10,		WMP_F_COPPER },
   1263 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1264 	  "82567LM-3 LAN Controller",
   1265 	  WM_T_ICH10,		WMP_F_COPPER },
   1266 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1267 	  "82567LF-3 LAN Controller",
   1268 	  WM_T_ICH10,		WMP_F_COPPER },
   1269 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1270 	  "82567V-2 LAN Controller",
   1271 	  WM_T_ICH10,		WMP_F_COPPER },
   1272 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1273 	  "82567V-3? LAN Controller",
   1274 	  WM_T_ICH10,		WMP_F_COPPER },
   1275 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1276 	  "HANKSVILLE LAN Controller",
   1277 	  WM_T_ICH10,		WMP_F_COPPER },
   1278 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1279 	  "PCH LAN (82577LM) Controller",
   1280 	  WM_T_PCH,		WMP_F_COPPER },
   1281 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1282 	  "PCH LAN (82577LC) Controller",
   1283 	  WM_T_PCH,		WMP_F_COPPER },
   1284 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1285 	  "PCH LAN (82578DM) Controller",
   1286 	  WM_T_PCH,		WMP_F_COPPER },
   1287 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1288 	  "PCH LAN (82578DC) Controller",
   1289 	  WM_T_PCH,		WMP_F_COPPER },
   1290 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1291 	  "PCH2 LAN (82579LM) Controller",
   1292 	  WM_T_PCH2,		WMP_F_COPPER },
   1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1294 	  "PCH2 LAN (82579V) Controller",
   1295 	  WM_T_PCH2,		WMP_F_COPPER },
   1296 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1297 	  "82575EB dual-1000baseT Ethernet",
   1298 	  WM_T_82575,		WMP_F_COPPER },
   1299 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1300 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1301 	  WM_T_82575,		WMP_F_SERDES },
   1302 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1303 	  "82575GB quad-1000baseT Ethernet",
   1304 	  WM_T_82575,		WMP_F_COPPER },
   1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1306 	  "82575GB quad-1000baseT Ethernet (PM)",
   1307 	  WM_T_82575,		WMP_F_COPPER },
   1308 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1309 	  "82576 1000BaseT Ethernet",
   1310 	  WM_T_82576,		WMP_F_COPPER },
   1311 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1312 	  "82576 1000BaseX Ethernet",
   1313 	  WM_T_82576,		WMP_F_FIBER },
   1314 
   1315 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1316 	  "82576 gigabit Ethernet (SERDES)",
   1317 	  WM_T_82576,		WMP_F_SERDES },
   1318 
   1319 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1320 	  "82576 quad-1000BaseT Ethernet",
   1321 	  WM_T_82576,		WMP_F_COPPER },
   1322 
   1323 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1324 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1325 	  WM_T_82576,		WMP_F_COPPER },
   1326 
   1327 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1328 	  "82576 gigabit Ethernet",
   1329 	  WM_T_82576,		WMP_F_COPPER },
   1330 
   1331 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1332 	  "82576 gigabit Ethernet (SERDES)",
   1333 	  WM_T_82576,		WMP_F_SERDES },
   1334 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1335 	  "82576 quad-gigabit Ethernet (SERDES)",
   1336 	  WM_T_82576,		WMP_F_SERDES },
   1337 
   1338 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1339 	  "82580 1000BaseT Ethernet",
   1340 	  WM_T_82580,		WMP_F_COPPER },
   1341 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1342 	  "82580 1000BaseX Ethernet",
   1343 	  WM_T_82580,		WMP_F_FIBER },
   1344 
   1345 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1346 	  "82580 1000BaseT Ethernet (SERDES)",
   1347 	  WM_T_82580,		WMP_F_SERDES },
   1348 
   1349 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1350 	  "82580 gigabit Ethernet (SGMII)",
   1351 	  WM_T_82580,		WMP_F_COPPER },
   1352 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1353 	  "82580 dual-1000BaseT Ethernet",
   1354 	  WM_T_82580,		WMP_F_COPPER },
   1355 
   1356 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1357 	  "82580 quad-1000BaseX Ethernet",
   1358 	  WM_T_82580,		WMP_F_FIBER },
   1359 
   1360 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1361 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1362 	  WM_T_82580,		WMP_F_COPPER },
   1363 
   1364 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1365 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1366 	  WM_T_82580,		WMP_F_SERDES },
   1367 
   1368 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1369 	  "DH89XXCC 1000BASE-KX Ethernet",
   1370 	  WM_T_82580,		WMP_F_SERDES },
   1371 
   1372 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1373 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1374 	  WM_T_82580,		WMP_F_SERDES },
   1375 
   1376 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1377 	  "I350 Gigabit Network Connection",
   1378 	  WM_T_I350,		WMP_F_COPPER },
   1379 
   1380 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1381 	  "I350 Gigabit Fiber Network Connection",
   1382 	  WM_T_I350,		WMP_F_FIBER },
   1383 
   1384 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1385 	  "I350 Gigabit Backplane Connection",
   1386 	  WM_T_I350,		WMP_F_SERDES },
   1387 
   1388 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1389 	  "I350 Quad Port Gigabit Ethernet",
   1390 	  WM_T_I350,		WMP_F_SERDES },
   1391 
   1392 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1393 	  "I350 Gigabit Connection",
   1394 	  WM_T_I350,		WMP_F_COPPER },
   1395 
   1396 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1397 	  "I354 Gigabit Ethernet (KX)",
   1398 	  WM_T_I354,		WMP_F_SERDES },
   1399 
   1400 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1401 	  "I354 Gigabit Ethernet (SGMII)",
   1402 	  WM_T_I354,		WMP_F_COPPER },
   1403 
   1404 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1405 	  "I354 Gigabit Ethernet (2.5G)",
   1406 	  WM_T_I354,		WMP_F_COPPER },
   1407 
   1408 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1409 	  "I210-T1 Ethernet Server Adapter",
   1410 	  WM_T_I210,		WMP_F_COPPER },
   1411 
   1412 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1413 	  "I210 Ethernet (Copper OEM)",
   1414 	  WM_T_I210,		WMP_F_COPPER },
   1415 
   1416 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1417 	  "I210 Ethernet (Copper IT)",
   1418 	  WM_T_I210,		WMP_F_COPPER },
   1419 
   1420 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1421 	  "I210 Ethernet (FLASH less)",
   1422 	  WM_T_I210,		WMP_F_COPPER },
   1423 
   1424 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1425 	  "I210 Gigabit Ethernet (Fiber)",
   1426 	  WM_T_I210,		WMP_F_FIBER },
   1427 
   1428 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1429 	  "I210 Gigabit Ethernet (SERDES)",
   1430 	  WM_T_I210,		WMP_F_SERDES },
   1431 
   1432 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1433 	  "I210 Gigabit Ethernet (FLASH less)",
   1434 	  WM_T_I210,		WMP_F_SERDES },
   1435 
   1436 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1437 	  "I210 Gigabit Ethernet (SGMII)",
   1438 	  WM_T_I210,		WMP_F_COPPER },
   1439 
   1440 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1441 	  "I211 Ethernet (COPPER)",
   1442 	  WM_T_I211,		WMP_F_COPPER },
   1443 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1444 	  "I217 V Ethernet Connection",
   1445 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1446 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1447 	  "I217 LM Ethernet Connection",
   1448 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1449 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1450 	  "I218 V Ethernet Connection",
   1451 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1452 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1453 	  "I218 V Ethernet Connection",
   1454 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1455 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1456 	  "I218 V Ethernet Connection",
   1457 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1458 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1459 	  "I218 LM Ethernet Connection",
   1460 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1461 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1462 	  "I218 LM Ethernet Connection",
   1463 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1464 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1465 	  "I218 LM Ethernet Connection",
   1466 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1467 #if 0
   1468 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1469 	  "I219 V Ethernet Connection",
   1470 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1471 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1472 	  "I219 V Ethernet Connection",
   1473 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1474 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1475 	  "I219 V Ethernet Connection",
   1476 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1477 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1478 	  "I219 V Ethernet Connection",
   1479 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1480 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1481 	  "I219 LM Ethernet Connection",
   1482 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1483 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1484 	  "I219 LM Ethernet Connection",
   1485 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1486 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1487 	  "I219 LM Ethernet Connection",
   1488 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1489 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1490 	  "I219 LM Ethernet Connection",
   1491 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1492 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1493 	  "I219 LM Ethernet Connection",
   1494 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1495 #endif
   1496 	{ 0,			0,
   1497 	  NULL,
   1498 	  0,			0 },
   1499 };
   1500 
   1501 /*
   1502  * Register read/write functions.
   1503  * Other than CSR_{READ|WRITE}().
   1504  */
   1505 
   1506 #if 0 /* Not currently used */
   1507 static inline uint32_t
   1508 wm_io_read(struct wm_softc *sc, int reg)
   1509 {
   1510 
   1511 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1512 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1513 }
   1514 #endif
   1515 
   1516 static inline void
   1517 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1518 {
   1519 
   1520 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1521 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1522 }
   1523 
   1524 static inline void
   1525 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1526     uint32_t data)
   1527 {
   1528 	uint32_t regval;
   1529 	int i;
   1530 
   1531 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1532 
   1533 	CSR_WRITE(sc, reg, regval);
   1534 
   1535 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1536 		delay(5);
   1537 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1538 			break;
   1539 	}
   1540 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1541 		aprint_error("%s: WARNING:"
   1542 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1543 		    device_xname(sc->sc_dev), reg);
   1544 	}
   1545 }
   1546 
   1547 static inline void
   1548 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1549 {
   1550 	wa->wa_low = htole32(v & 0xffffffffU);
   1551 	if (sizeof(bus_addr_t) == 8)
   1552 		wa->wa_high = htole32((uint64_t) v >> 32);
   1553 	else
   1554 		wa->wa_high = 0;
   1555 }
   1556 
   1557 /*
   1558  * Descriptor sync/init functions.
   1559  */
   1560 static inline void
   1561 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1562 {
   1563 	struct wm_softc *sc = txq->txq_sc;
   1564 
   1565 	/* If it will wrap around, sync to the end of the ring. */
   1566 	if ((start + num) > WM_NTXDESC(txq)) {
   1567 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1568 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1569 		    (WM_NTXDESC(txq) - start), ops);
   1570 		num -= (WM_NTXDESC(txq) - start);
   1571 		start = 0;
   1572 	}
   1573 
   1574 	/* Now sync whatever is left. */
   1575 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1576 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1577 }
   1578 
   1579 static inline void
   1580 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1581 {
   1582 	struct wm_softc *sc = rxq->rxq_sc;
   1583 
   1584 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1585 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1586 }
   1587 
   1588 static inline void
   1589 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1590 {
   1591 	struct wm_softc *sc = rxq->rxq_sc;
   1592 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1593 	struct mbuf *m = rxs->rxs_mbuf;
   1594 
   1595 	/*
   1596 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1597 	 * so that the payload after the Ethernet header is aligned
   1598 	 * to a 4-byte boundary.
   1599 
   1600 	 * XXX BRAINDAMAGE ALERT!
   1601 	 * The stupid chip uses the same size for every buffer, which
   1602 	 * is set in the Receive Control register.  We are using the 2K
   1603 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1604 	 * reason, we can't "scoot" packets longer than the standard
   1605 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1606 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1607 	 * the upper layer copy the headers.
   1608 	 */
   1609 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1610 
   1611 	if (sc->sc_type == WM_T_82574) {
   1612 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1613 		rxd->erx_data.erxd_addr =
   1614 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1615 		rxd->erx_data.erxd_dd = 0;
   1616 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1617 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1618 
   1619 		rxd->nqrx_data.nrxd_paddr =
   1620 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1621 		/* Currently, split header is not supported. */
   1622 		rxd->nqrx_data.nrxd_haddr = 0;
   1623 	} else {
   1624 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1625 
   1626 		wm_set_dma_addr(&rxd->wrx_addr,
   1627 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1628 		rxd->wrx_len = 0;
   1629 		rxd->wrx_cksum = 0;
   1630 		rxd->wrx_status = 0;
   1631 		rxd->wrx_errors = 0;
   1632 		rxd->wrx_special = 0;
   1633 	}
   1634 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1635 
   1636 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1637 }
   1638 
   1639 /*
   1640  * Device driver interface functions and commonly used functions.
   1641  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1642  */
   1643 
   1644 /* Lookup supported device table */
   1645 static const struct wm_product *
   1646 wm_lookup(const struct pci_attach_args *pa)
   1647 {
   1648 	const struct wm_product *wmp;
   1649 
   1650 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1651 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1652 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1653 			return wmp;
   1654 	}
   1655 	return NULL;
   1656 }
   1657 
   1658 /* The match function (ca_match) */
   1659 static int
   1660 wm_match(device_t parent, cfdata_t cf, void *aux)
   1661 {
   1662 	struct pci_attach_args *pa = aux;
   1663 
   1664 	if (wm_lookup(pa) != NULL)
   1665 		return 1;
   1666 
   1667 	return 0;
   1668 }
   1669 
   1670 /* The attach function (ca_attach) */
   1671 static void
   1672 wm_attach(device_t parent, device_t self, void *aux)
   1673 {
   1674 	struct wm_softc *sc = device_private(self);
   1675 	struct pci_attach_args *pa = aux;
   1676 	prop_dictionary_t dict;
   1677 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1678 	pci_chipset_tag_t pc = pa->pa_pc;
   1679 	int counts[PCI_INTR_TYPE_SIZE];
   1680 	pci_intr_type_t max_type;
   1681 	const char *eetype, *xname;
   1682 	bus_space_tag_t memt;
   1683 	bus_space_handle_t memh;
   1684 	bus_size_t memsize;
   1685 	int memh_valid;
   1686 	int i, error;
   1687 	const struct wm_product *wmp;
   1688 	prop_data_t ea;
   1689 	prop_number_t pn;
   1690 	uint8_t enaddr[ETHER_ADDR_LEN];
   1691 	char buf[256];
   1692 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1693 	pcireg_t preg, memtype;
   1694 	uint16_t eeprom_data, apme_mask;
   1695 	bool force_clear_smbi;
   1696 	uint32_t link_mode;
   1697 	uint32_t reg;
   1698 
   1699 	sc->sc_dev = self;
   1700 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1701 	sc->sc_core_stopping = false;
   1702 
   1703 	wmp = wm_lookup(pa);
   1704 #ifdef DIAGNOSTIC
   1705 	if (wmp == NULL) {
   1706 		printf("\n");
   1707 		panic("wm_attach: impossible");
   1708 	}
   1709 #endif
   1710 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1711 
   1712 	sc->sc_pc = pa->pa_pc;
   1713 	sc->sc_pcitag = pa->pa_tag;
   1714 
   1715 	if (pci_dma64_available(pa))
   1716 		sc->sc_dmat = pa->pa_dmat64;
   1717 	else
   1718 		sc->sc_dmat = pa->pa_dmat;
   1719 
   1720 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1721 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1722 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1723 
   1724 	sc->sc_type = wmp->wmp_type;
   1725 
   1726 	/* Set default function pointers */
   1727 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1728 	sc->phy.release = sc->nvm.release = wm_put_null;
   1729 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1730 
   1731 	if (sc->sc_type < WM_T_82543) {
   1732 		if (sc->sc_rev < 2) {
   1733 			aprint_error_dev(sc->sc_dev,
   1734 			    "i82542 must be at least rev. 2\n");
   1735 			return;
   1736 		}
   1737 		if (sc->sc_rev < 3)
   1738 			sc->sc_type = WM_T_82542_2_0;
   1739 	}
   1740 
   1741 	/*
   1742 	 * Disable MSI for Errata:
   1743 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1744 	 *
   1745 	 *  82544: Errata 25
   1746 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1747 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1748 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1749 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1750 	 *
   1751 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1752 	 *
   1753 	 *  82571 & 82572: Errata 63
   1754 	 */
   1755 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1756 	    || (sc->sc_type == WM_T_82572))
   1757 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1758 
   1759 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1760 	    || (sc->sc_type == WM_T_82580)
   1761 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1762 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1763 		sc->sc_flags |= WM_F_NEWQUEUE;
   1764 
   1765 	/* Set device properties (mactype) */
   1766 	dict = device_properties(sc->sc_dev);
   1767 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1768 
   1769 	/*
   1770 	 * Map the device.  All devices support memory-mapped acccess,
   1771 	 * and it is really required for normal operation.
   1772 	 */
   1773 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1774 	switch (memtype) {
   1775 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1776 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1777 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1778 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1779 		break;
   1780 	default:
   1781 		memh_valid = 0;
   1782 		break;
   1783 	}
   1784 
   1785 	if (memh_valid) {
   1786 		sc->sc_st = memt;
   1787 		sc->sc_sh = memh;
   1788 		sc->sc_ss = memsize;
   1789 	} else {
   1790 		aprint_error_dev(sc->sc_dev,
   1791 		    "unable to map device registers\n");
   1792 		return;
   1793 	}
   1794 
   1795 	/*
   1796 	 * In addition, i82544 and later support I/O mapped indirect
   1797 	 * register access.  It is not desirable (nor supported in
   1798 	 * this driver) to use it for normal operation, though it is
   1799 	 * required to work around bugs in some chip versions.
   1800 	 */
   1801 	if (sc->sc_type >= WM_T_82544) {
   1802 		/* First we have to find the I/O BAR. */
   1803 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1804 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1805 			if (memtype == PCI_MAPREG_TYPE_IO)
   1806 				break;
   1807 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1808 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1809 				i += 4;	/* skip high bits, too */
   1810 		}
   1811 		if (i < PCI_MAPREG_END) {
   1812 			/*
   1813 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1814 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1815 			 * It's no problem because newer chips has no this
   1816 			 * bug.
   1817 			 *
   1818 			 * The i8254x doesn't apparently respond when the
   1819 			 * I/O BAR is 0, which looks somewhat like it's not
   1820 			 * been configured.
   1821 			 */
   1822 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1823 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1824 				aprint_error_dev(sc->sc_dev,
   1825 				    "WARNING: I/O BAR at zero.\n");
   1826 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1827 					0, &sc->sc_iot, &sc->sc_ioh,
   1828 					NULL, &sc->sc_ios) == 0) {
   1829 				sc->sc_flags |= WM_F_IOH_VALID;
   1830 			} else {
   1831 				aprint_error_dev(sc->sc_dev,
   1832 				    "WARNING: unable to map I/O space\n");
   1833 			}
   1834 		}
   1835 
   1836 	}
   1837 
   1838 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1839 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1840 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1841 	if (sc->sc_type < WM_T_82542_2_1)
   1842 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1843 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1844 
   1845 	/* power up chip */
   1846 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1847 	    NULL)) && error != EOPNOTSUPP) {
   1848 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1849 		return;
   1850 	}
   1851 
   1852 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1853 
   1854 	/* Allocation settings */
   1855 	max_type = PCI_INTR_TYPE_MSIX;
   1856 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1;
   1857 	counts[PCI_INTR_TYPE_MSI] = 1;
   1858 	counts[PCI_INTR_TYPE_INTX] = 1;
   1859 	/* overridden by disable flags */
   1860 	if (wm_disable_msi != 0) {
   1861 		counts[PCI_INTR_TYPE_MSI] = 0;
   1862 		if (wm_disable_msix != 0) {
   1863 			max_type = PCI_INTR_TYPE_INTX;
   1864 			counts[PCI_INTR_TYPE_MSIX] = 0;
   1865 		}
   1866 	} else if (wm_disable_msix != 0) {
   1867 		max_type = PCI_INTR_TYPE_MSI;
   1868 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1869 	}
   1870 
   1871 alloc_retry:
   1872 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1873 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1874 		return;
   1875 	}
   1876 
   1877 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1878 		error = wm_setup_msix(sc);
   1879 		if (error) {
   1880 			pci_intr_release(pc, sc->sc_intrs,
   1881 			    counts[PCI_INTR_TYPE_MSIX]);
   1882 
   1883 			/* Setup for MSI: Disable MSI-X */
   1884 			max_type = PCI_INTR_TYPE_MSI;
   1885 			counts[PCI_INTR_TYPE_MSI] = 1;
   1886 			counts[PCI_INTR_TYPE_INTX] = 1;
   1887 			goto alloc_retry;
   1888 		}
   1889 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1890 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1891 		error = wm_setup_legacy(sc);
   1892 		if (error) {
   1893 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1894 			    counts[PCI_INTR_TYPE_MSI]);
   1895 
   1896 			/* The next try is for INTx: Disable MSI */
   1897 			max_type = PCI_INTR_TYPE_INTX;
   1898 			counts[PCI_INTR_TYPE_INTX] = 1;
   1899 			goto alloc_retry;
   1900 		}
   1901 	} else {
   1902 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1903 		error = wm_setup_legacy(sc);
   1904 		if (error) {
   1905 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1906 			    counts[PCI_INTR_TYPE_INTX]);
   1907 			return;
   1908 		}
   1909 	}
   1910 
   1911 	/*
   1912 	 * Check the function ID (unit number of the chip).
   1913 	 */
   1914 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1915 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1916 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1917 	    || (sc->sc_type == WM_T_82580)
   1918 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1919 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1920 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1921 	else
   1922 		sc->sc_funcid = 0;
   1923 
   1924 	/*
   1925 	 * Determine a few things about the bus we're connected to.
   1926 	 */
   1927 	if (sc->sc_type < WM_T_82543) {
   1928 		/* We don't really know the bus characteristics here. */
   1929 		sc->sc_bus_speed = 33;
   1930 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1931 		/*
   1932 		 * CSA (Communication Streaming Architecture) is about as fast
   1933 		 * a 32-bit 66MHz PCI Bus.
   1934 		 */
   1935 		sc->sc_flags |= WM_F_CSA;
   1936 		sc->sc_bus_speed = 66;
   1937 		aprint_verbose_dev(sc->sc_dev,
   1938 		    "Communication Streaming Architecture\n");
   1939 		if (sc->sc_type == WM_T_82547) {
   1940 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1941 			callout_setfunc(&sc->sc_txfifo_ch,
   1942 					wm_82547_txfifo_stall, sc);
   1943 			aprint_verbose_dev(sc->sc_dev,
   1944 			    "using 82547 Tx FIFO stall work-around\n");
   1945 		}
   1946 	} else if (sc->sc_type >= WM_T_82571) {
   1947 		sc->sc_flags |= WM_F_PCIE;
   1948 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1949 		    && (sc->sc_type != WM_T_ICH10)
   1950 		    && (sc->sc_type != WM_T_PCH)
   1951 		    && (sc->sc_type != WM_T_PCH2)
   1952 		    && (sc->sc_type != WM_T_PCH_LPT)
   1953 		    && (sc->sc_type != WM_T_PCH_SPT)) {
   1954 			/* ICH* and PCH* have no PCIe capability registers */
   1955 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1956 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1957 				NULL) == 0)
   1958 				aprint_error_dev(sc->sc_dev,
   1959 				    "unable to find PCIe capability\n");
   1960 		}
   1961 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1962 	} else {
   1963 		reg = CSR_READ(sc, WMREG_STATUS);
   1964 		if (reg & STATUS_BUS64)
   1965 			sc->sc_flags |= WM_F_BUS64;
   1966 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1967 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1968 
   1969 			sc->sc_flags |= WM_F_PCIX;
   1970 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1971 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1972 				aprint_error_dev(sc->sc_dev,
   1973 				    "unable to find PCIX capability\n");
   1974 			else if (sc->sc_type != WM_T_82545_3 &&
   1975 				 sc->sc_type != WM_T_82546_3) {
   1976 				/*
   1977 				 * Work around a problem caused by the BIOS
   1978 				 * setting the max memory read byte count
   1979 				 * incorrectly.
   1980 				 */
   1981 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1982 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1983 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1984 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1985 
   1986 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1987 				    PCIX_CMD_BYTECNT_SHIFT;
   1988 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1989 				    PCIX_STATUS_MAXB_SHIFT;
   1990 				if (bytecnt > maxb) {
   1991 					aprint_verbose_dev(sc->sc_dev,
   1992 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1993 					    512 << bytecnt, 512 << maxb);
   1994 					pcix_cmd = (pcix_cmd &
   1995 					    ~PCIX_CMD_BYTECNT_MASK) |
   1996 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1997 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1998 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1999 					    pcix_cmd);
   2000 				}
   2001 			}
   2002 		}
   2003 		/*
   2004 		 * The quad port adapter is special; it has a PCIX-PCIX
   2005 		 * bridge on the board, and can run the secondary bus at
   2006 		 * a higher speed.
   2007 		 */
   2008 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2009 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2010 								      : 66;
   2011 		} else if (sc->sc_flags & WM_F_PCIX) {
   2012 			switch (reg & STATUS_PCIXSPD_MASK) {
   2013 			case STATUS_PCIXSPD_50_66:
   2014 				sc->sc_bus_speed = 66;
   2015 				break;
   2016 			case STATUS_PCIXSPD_66_100:
   2017 				sc->sc_bus_speed = 100;
   2018 				break;
   2019 			case STATUS_PCIXSPD_100_133:
   2020 				sc->sc_bus_speed = 133;
   2021 				break;
   2022 			default:
   2023 				aprint_error_dev(sc->sc_dev,
   2024 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2025 				    reg & STATUS_PCIXSPD_MASK);
   2026 				sc->sc_bus_speed = 66;
   2027 				break;
   2028 			}
   2029 		} else
   2030 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2031 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2032 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2033 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2034 	}
   2035 
   2036 	/* clear interesting stat counters */
   2037 	CSR_READ(sc, WMREG_COLC);
   2038 	CSR_READ(sc, WMREG_RXERRC);
   2039 
   2040 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2041 	    || (sc->sc_type >= WM_T_ICH8))
   2042 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2043 	if (sc->sc_type >= WM_T_ICH8)
   2044 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2045 
   2046 	/* Set PHY, NVM mutex related stuff */
   2047 	switch (sc->sc_type) {
   2048 	case WM_T_82542_2_0:
   2049 	case WM_T_82542_2_1:
   2050 	case WM_T_82543:
   2051 	case WM_T_82544:
   2052 		/* Microwire */
   2053 		sc->nvm.read = wm_nvm_read_uwire;
   2054 		sc->sc_nvm_wordsize = 64;
   2055 		sc->sc_nvm_addrbits = 6;
   2056 		break;
   2057 	case WM_T_82540:
   2058 	case WM_T_82545:
   2059 	case WM_T_82545_3:
   2060 	case WM_T_82546:
   2061 	case WM_T_82546_3:
   2062 		/* Microwire */
   2063 		sc->nvm.read = wm_nvm_read_uwire;
   2064 		reg = CSR_READ(sc, WMREG_EECD);
   2065 		if (reg & EECD_EE_SIZE) {
   2066 			sc->sc_nvm_wordsize = 256;
   2067 			sc->sc_nvm_addrbits = 8;
   2068 		} else {
   2069 			sc->sc_nvm_wordsize = 64;
   2070 			sc->sc_nvm_addrbits = 6;
   2071 		}
   2072 		sc->sc_flags |= WM_F_LOCK_EECD;
   2073 		sc->nvm.acquire = wm_get_eecd;
   2074 		sc->nvm.release = wm_put_eecd;
   2075 		break;
   2076 	case WM_T_82541:
   2077 	case WM_T_82541_2:
   2078 	case WM_T_82547:
   2079 	case WM_T_82547_2:
   2080 		reg = CSR_READ(sc, WMREG_EECD);
   2081 		/*
   2082 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2083 		 * on 8254[17], so set flags and functios before calling it.
   2084 		 */
   2085 		sc->sc_flags |= WM_F_LOCK_EECD;
   2086 		sc->nvm.acquire = wm_get_eecd;
   2087 		sc->nvm.release = wm_put_eecd;
   2088 		if (reg & EECD_EE_TYPE) {
   2089 			/* SPI */
   2090 			sc->nvm.read = wm_nvm_read_spi;
   2091 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2092 			wm_nvm_set_addrbits_size_eecd(sc);
   2093 		} else {
   2094 			/* Microwire */
   2095 			sc->nvm.read = wm_nvm_read_uwire;
   2096 			if ((reg & EECD_EE_ABITS) != 0) {
   2097 				sc->sc_nvm_wordsize = 256;
   2098 				sc->sc_nvm_addrbits = 8;
   2099 			} else {
   2100 				sc->sc_nvm_wordsize = 64;
   2101 				sc->sc_nvm_addrbits = 6;
   2102 			}
   2103 		}
   2104 		break;
   2105 	case WM_T_82571:
   2106 	case WM_T_82572:
   2107 		/* SPI */
   2108 		sc->nvm.read = wm_nvm_read_eerd;
   2109 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2110 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2111 		wm_nvm_set_addrbits_size_eecd(sc);
   2112 		sc->phy.acquire = wm_get_swsm_semaphore;
   2113 		sc->phy.release = wm_put_swsm_semaphore;
   2114 		sc->nvm.acquire = wm_get_nvm_82571;
   2115 		sc->nvm.release = wm_put_nvm_82571;
   2116 		break;
   2117 	case WM_T_82573:
   2118 	case WM_T_82574:
   2119 	case WM_T_82583:
   2120 		sc->nvm.read = wm_nvm_read_eerd;
   2121 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2122 		if (sc->sc_type == WM_T_82573) {
   2123 			sc->phy.acquire = wm_get_swsm_semaphore;
   2124 			sc->phy.release = wm_put_swsm_semaphore;
   2125 			sc->nvm.acquire = wm_get_nvm_82571;
   2126 			sc->nvm.release = wm_put_nvm_82571;
   2127 		} else {
   2128 			/* Both PHY and NVM use the same semaphore. */
   2129 			sc->phy.acquire = sc->nvm.acquire
   2130 			    = wm_get_swfwhw_semaphore;
   2131 			sc->phy.release = sc->nvm.release
   2132 			    = wm_put_swfwhw_semaphore;
   2133 		}
   2134 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2135 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2136 			sc->sc_nvm_wordsize = 2048;
   2137 		} else {
   2138 			/* SPI */
   2139 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2140 			wm_nvm_set_addrbits_size_eecd(sc);
   2141 		}
   2142 		break;
   2143 	case WM_T_82575:
   2144 	case WM_T_82576:
   2145 	case WM_T_82580:
   2146 	case WM_T_I350:
   2147 	case WM_T_I354:
   2148 	case WM_T_80003:
   2149 		/* SPI */
   2150 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2151 		wm_nvm_set_addrbits_size_eecd(sc);
   2152 		if((sc->sc_type == WM_T_80003)
   2153 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2154 			sc->nvm.read = wm_nvm_read_eerd;
   2155 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2156 		} else {
   2157 			sc->nvm.read = wm_nvm_read_spi;
   2158 			sc->sc_flags |= WM_F_LOCK_EECD;
   2159 		}
   2160 		sc->phy.acquire = wm_get_phy_82575;
   2161 		sc->phy.release = wm_put_phy_82575;
   2162 		sc->nvm.acquire = wm_get_nvm_80003;
   2163 		sc->nvm.release = wm_put_nvm_80003;
   2164 		break;
   2165 	case WM_T_ICH8:
   2166 	case WM_T_ICH9:
   2167 	case WM_T_ICH10:
   2168 	case WM_T_PCH:
   2169 	case WM_T_PCH2:
   2170 	case WM_T_PCH_LPT:
   2171 		sc->nvm.read = wm_nvm_read_ich8;
   2172 		/* FLASH */
   2173 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2174 		sc->sc_nvm_wordsize = 2048;
   2175 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2176 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2177 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2178 			aprint_error_dev(sc->sc_dev,
   2179 			    "can't map FLASH registers\n");
   2180 			goto out;
   2181 		}
   2182 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2183 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2184 		    ICH_FLASH_SECTOR_SIZE;
   2185 		sc->sc_ich8_flash_bank_size =
   2186 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2187 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2188 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2189 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2190 		sc->sc_flashreg_offset = 0;
   2191 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2192 		sc->phy.release = wm_put_swflag_ich8lan;
   2193 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2194 		sc->nvm.release = wm_put_nvm_ich8lan;
   2195 		break;
   2196 	case WM_T_PCH_SPT:
   2197 		sc->nvm.read = wm_nvm_read_spt;
   2198 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2199 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2200 		sc->sc_flasht = sc->sc_st;
   2201 		sc->sc_flashh = sc->sc_sh;
   2202 		sc->sc_ich8_flash_base = 0;
   2203 		sc->sc_nvm_wordsize =
   2204 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2205 			* NVM_SIZE_MULTIPLIER;
   2206 		/* It is size in bytes, we want words */
   2207 		sc->sc_nvm_wordsize /= 2;
   2208 		/* assume 2 banks */
   2209 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2210 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2211 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2212 		sc->phy.release = wm_put_swflag_ich8lan;
   2213 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2214 		sc->nvm.release = wm_put_nvm_ich8lan;
   2215 		break;
   2216 	case WM_T_I210:
   2217 	case WM_T_I211:
   2218 		if (wm_nvm_get_flash_presence_i210(sc)) {
   2219 			sc->nvm.read = wm_nvm_read_eerd;
   2220 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2221 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2222 			wm_nvm_set_addrbits_size_eecd(sc);
   2223 		} else {
   2224 			sc->nvm.read = wm_nvm_read_invm;
   2225 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2226 			sc->sc_nvm_wordsize = INVM_SIZE;
   2227 		}
   2228 		sc->phy.acquire = wm_get_phy_82575;
   2229 		sc->phy.release = wm_put_phy_82575;
   2230 		sc->nvm.acquire = wm_get_nvm_80003;
   2231 		sc->nvm.release = wm_put_nvm_80003;
   2232 		break;
   2233 	default:
   2234 		break;
   2235 	}
   2236 
   2237 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2238 	switch (sc->sc_type) {
   2239 	case WM_T_82571:
   2240 	case WM_T_82572:
   2241 		reg = CSR_READ(sc, WMREG_SWSM2);
   2242 		if ((reg & SWSM2_LOCK) == 0) {
   2243 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2244 			force_clear_smbi = true;
   2245 		} else
   2246 			force_clear_smbi = false;
   2247 		break;
   2248 	case WM_T_82573:
   2249 	case WM_T_82574:
   2250 	case WM_T_82583:
   2251 		force_clear_smbi = true;
   2252 		break;
   2253 	default:
   2254 		force_clear_smbi = false;
   2255 		break;
   2256 	}
   2257 	if (force_clear_smbi) {
   2258 		reg = CSR_READ(sc, WMREG_SWSM);
   2259 		if ((reg & SWSM_SMBI) != 0)
   2260 			aprint_error_dev(sc->sc_dev,
   2261 			    "Please update the Bootagent\n");
   2262 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2263 	}
   2264 
   2265 	/*
   2266 	 * Defer printing the EEPROM type until after verifying the checksum
   2267 	 * This allows the EEPROM type to be printed correctly in the case
   2268 	 * that no EEPROM is attached.
   2269 	 */
   2270 	/*
   2271 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2272 	 * this for later, so we can fail future reads from the EEPROM.
   2273 	 */
   2274 	if (wm_nvm_validate_checksum(sc)) {
   2275 		/*
   2276 		 * Read twice again because some PCI-e parts fail the
   2277 		 * first check due to the link being in sleep state.
   2278 		 */
   2279 		if (wm_nvm_validate_checksum(sc))
   2280 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2281 	}
   2282 
   2283 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2284 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2285 	else {
   2286 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2287 		    sc->sc_nvm_wordsize);
   2288 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2289 			aprint_verbose("iNVM");
   2290 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2291 			aprint_verbose("FLASH(HW)");
   2292 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2293 			aprint_verbose("FLASH");
   2294 		else {
   2295 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2296 				eetype = "SPI";
   2297 			else
   2298 				eetype = "MicroWire";
   2299 			aprint_verbose("(%d address bits) %s EEPROM",
   2300 			    sc->sc_nvm_addrbits, eetype);
   2301 		}
   2302 	}
   2303 	wm_nvm_version(sc);
   2304 	aprint_verbose("\n");
   2305 
   2306 	/*
   2307 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2308 	 * incorrect.
   2309 	 */
   2310 	wm_gmii_setup_phytype(sc, 0, 0);
   2311 
   2312 	/* Reset the chip to a known state. */
   2313 	wm_reset(sc);
   2314 
   2315 	/* Check for I21[01] PLL workaround */
   2316 	if (sc->sc_type == WM_T_I210)
   2317 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2318 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2319 		/* NVM image release 3.25 has a workaround */
   2320 		if ((sc->sc_nvm_ver_major < 3)
   2321 		    || ((sc->sc_nvm_ver_major == 3)
   2322 			&& (sc->sc_nvm_ver_minor < 25))) {
   2323 			aprint_verbose_dev(sc->sc_dev,
   2324 			    "ROM image version %d.%d is older than 3.25\n",
   2325 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2326 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2327 		}
   2328 	}
   2329 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2330 		wm_pll_workaround_i210(sc);
   2331 
   2332 	wm_get_wakeup(sc);
   2333 
   2334 	/* Non-AMT based hardware can now take control from firmware */
   2335 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2336 		wm_get_hw_control(sc);
   2337 
   2338 	/*
   2339 	 * Read the Ethernet address from the EEPROM, if not first found
   2340 	 * in device properties.
   2341 	 */
   2342 	ea = prop_dictionary_get(dict, "mac-address");
   2343 	if (ea != NULL) {
   2344 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2345 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2346 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2347 	} else {
   2348 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2349 			aprint_error_dev(sc->sc_dev,
   2350 			    "unable to read Ethernet address\n");
   2351 			goto out;
   2352 		}
   2353 	}
   2354 
   2355 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2356 	    ether_sprintf(enaddr));
   2357 
   2358 	/*
   2359 	 * Read the config info from the EEPROM, and set up various
   2360 	 * bits in the control registers based on their contents.
   2361 	 */
   2362 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2363 	if (pn != NULL) {
   2364 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2365 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2366 	} else {
   2367 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2368 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2369 			goto out;
   2370 		}
   2371 	}
   2372 
   2373 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2374 	if (pn != NULL) {
   2375 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2376 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2377 	} else {
   2378 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2379 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2380 			goto out;
   2381 		}
   2382 	}
   2383 
   2384 	/* check for WM_F_WOL */
   2385 	switch (sc->sc_type) {
   2386 	case WM_T_82542_2_0:
   2387 	case WM_T_82542_2_1:
   2388 	case WM_T_82543:
   2389 		/* dummy? */
   2390 		eeprom_data = 0;
   2391 		apme_mask = NVM_CFG3_APME;
   2392 		break;
   2393 	case WM_T_82544:
   2394 		apme_mask = NVM_CFG2_82544_APM_EN;
   2395 		eeprom_data = cfg2;
   2396 		break;
   2397 	case WM_T_82546:
   2398 	case WM_T_82546_3:
   2399 	case WM_T_82571:
   2400 	case WM_T_82572:
   2401 	case WM_T_82573:
   2402 	case WM_T_82574:
   2403 	case WM_T_82583:
   2404 	case WM_T_80003:
   2405 	default:
   2406 		apme_mask = NVM_CFG3_APME;
   2407 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2408 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2409 		break;
   2410 	case WM_T_82575:
   2411 	case WM_T_82576:
   2412 	case WM_T_82580:
   2413 	case WM_T_I350:
   2414 	case WM_T_I354: /* XXX ok? */
   2415 	case WM_T_ICH8:
   2416 	case WM_T_ICH9:
   2417 	case WM_T_ICH10:
   2418 	case WM_T_PCH:
   2419 	case WM_T_PCH2:
   2420 	case WM_T_PCH_LPT:
   2421 	case WM_T_PCH_SPT:
   2422 		/* XXX The funcid should be checked on some devices */
   2423 		apme_mask = WUC_APME;
   2424 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2425 		break;
   2426 	}
   2427 
   2428 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2429 	if ((eeprom_data & apme_mask) != 0)
   2430 		sc->sc_flags |= WM_F_WOL;
   2431 
   2432 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2433 		/* Check NVM for autonegotiation */
   2434 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2435 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2436 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2437 		}
   2438 	}
   2439 
   2440 	/*
   2441 	 * XXX need special handling for some multiple port cards
   2442 	 * to disable a paticular port.
   2443 	 */
   2444 
   2445 	if (sc->sc_type >= WM_T_82544) {
   2446 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2447 		if (pn != NULL) {
   2448 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2449 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2450 		} else {
   2451 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2452 				aprint_error_dev(sc->sc_dev,
   2453 				    "unable to read SWDPIN\n");
   2454 				goto out;
   2455 			}
   2456 		}
   2457 	}
   2458 
   2459 	if (cfg1 & NVM_CFG1_ILOS)
   2460 		sc->sc_ctrl |= CTRL_ILOS;
   2461 
   2462 	/*
   2463 	 * XXX
   2464 	 * This code isn't correct because pin 2 and 3 are located
   2465 	 * in different position on newer chips. Check all datasheet.
   2466 	 *
   2467 	 * Until resolve this problem, check if a chip < 82580
   2468 	 */
   2469 	if (sc->sc_type <= WM_T_82580) {
   2470 		if (sc->sc_type >= WM_T_82544) {
   2471 			sc->sc_ctrl |=
   2472 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2473 			    CTRL_SWDPIO_SHIFT;
   2474 			sc->sc_ctrl |=
   2475 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2476 			    CTRL_SWDPINS_SHIFT;
   2477 		} else {
   2478 			sc->sc_ctrl |=
   2479 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2480 			    CTRL_SWDPIO_SHIFT;
   2481 		}
   2482 	}
   2483 
   2484 	/* XXX For other than 82580? */
   2485 	if (sc->sc_type == WM_T_82580) {
   2486 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2487 		if (nvmword & __BIT(13))
   2488 			sc->sc_ctrl |= CTRL_ILOS;
   2489 	}
   2490 
   2491 #if 0
   2492 	if (sc->sc_type >= WM_T_82544) {
   2493 		if (cfg1 & NVM_CFG1_IPS0)
   2494 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2495 		if (cfg1 & NVM_CFG1_IPS1)
   2496 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2497 		sc->sc_ctrl_ext |=
   2498 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2499 		    CTRL_EXT_SWDPIO_SHIFT;
   2500 		sc->sc_ctrl_ext |=
   2501 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2502 		    CTRL_EXT_SWDPINS_SHIFT;
   2503 	} else {
   2504 		sc->sc_ctrl_ext |=
   2505 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2506 		    CTRL_EXT_SWDPIO_SHIFT;
   2507 	}
   2508 #endif
   2509 
   2510 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2511 #if 0
   2512 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2513 #endif
   2514 
   2515 	if (sc->sc_type == WM_T_PCH) {
   2516 		uint16_t val;
   2517 
   2518 		/* Save the NVM K1 bit setting */
   2519 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2520 
   2521 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2522 			sc->sc_nvm_k1_enabled = 1;
   2523 		else
   2524 			sc->sc_nvm_k1_enabled = 0;
   2525 	}
   2526 
   2527 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2528 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2529 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2530 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2531 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
   2532 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2533 		/* Copper only */
   2534 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2535 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2536 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2537 	    || (sc->sc_type ==WM_T_I211)) {
   2538 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2539 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2540 		switch (link_mode) {
   2541 		case CTRL_EXT_LINK_MODE_1000KX:
   2542 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2543 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2544 			break;
   2545 		case CTRL_EXT_LINK_MODE_SGMII:
   2546 			if (wm_sgmii_uses_mdio(sc)) {
   2547 				aprint_verbose_dev(sc->sc_dev,
   2548 				    "SGMII(MDIO)\n");
   2549 				sc->sc_flags |= WM_F_SGMII;
   2550 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2551 				break;
   2552 			}
   2553 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2554 			/*FALLTHROUGH*/
   2555 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2556 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2557 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2558 				if (link_mode
   2559 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2560 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2561 					sc->sc_flags |= WM_F_SGMII;
   2562 				} else {
   2563 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2564 					aprint_verbose_dev(sc->sc_dev,
   2565 					    "SERDES\n");
   2566 				}
   2567 				break;
   2568 			}
   2569 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2570 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2571 
   2572 			/* Change current link mode setting */
   2573 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2574 			switch (sc->sc_mediatype) {
   2575 			case WM_MEDIATYPE_COPPER:
   2576 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2577 				break;
   2578 			case WM_MEDIATYPE_SERDES:
   2579 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2580 				break;
   2581 			default:
   2582 				break;
   2583 			}
   2584 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2585 			break;
   2586 		case CTRL_EXT_LINK_MODE_GMII:
   2587 		default:
   2588 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2589 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2590 			break;
   2591 		}
   2592 
   2593 		reg &= ~CTRL_EXT_I2C_ENA;
   2594 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2595 			reg |= CTRL_EXT_I2C_ENA;
   2596 		else
   2597 			reg &= ~CTRL_EXT_I2C_ENA;
   2598 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2599 	} else if (sc->sc_type < WM_T_82543 ||
   2600 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2601 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2602 			aprint_error_dev(sc->sc_dev,
   2603 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2604 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2605 		}
   2606 	} else {
   2607 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2608 			aprint_error_dev(sc->sc_dev,
   2609 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2610 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2611 		}
   2612 	}
   2613 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2614 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2615 
   2616 	/* Set device properties (macflags) */
   2617 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2618 
   2619 	/* Initialize the media structures accordingly. */
   2620 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2621 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2622 	else
   2623 		wm_tbi_mediainit(sc); /* All others */
   2624 
   2625 	ifp = &sc->sc_ethercom.ec_if;
   2626 	xname = device_xname(sc->sc_dev);
   2627 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2628 	ifp->if_softc = sc;
   2629 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2630 #ifdef WM_MPSAFE
   2631 	ifp->if_extflags = IFEF_START_MPSAFE;
   2632 #endif
   2633 	ifp->if_ioctl = wm_ioctl;
   2634 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2635 		ifp->if_start = wm_nq_start;
   2636 		/*
   2637 		 * When the number of CPUs is one and the controller can use
   2638 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2639 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2640 		 * and the other is used for link status changing.
   2641 		 * In this situation, wm_nq_transmit() is disadvantageous
   2642 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2643 		 */
   2644 		if (wm_is_using_multiqueue(sc))
   2645 			ifp->if_transmit = wm_nq_transmit;
   2646 	} else {
   2647 		ifp->if_start = wm_start;
   2648 		/*
   2649 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2650 		 */
   2651 		if (wm_is_using_multiqueue(sc))
   2652 			ifp->if_transmit = wm_transmit;
   2653 	}
   2654 	ifp->if_watchdog = wm_watchdog;
   2655 	ifp->if_init = wm_init;
   2656 	ifp->if_stop = wm_stop;
   2657 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2658 	IFQ_SET_READY(&ifp->if_snd);
   2659 
   2660 	/* Check for jumbo frame */
   2661 	switch (sc->sc_type) {
   2662 	case WM_T_82573:
   2663 		/* XXX limited to 9234 if ASPM is disabled */
   2664 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2665 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2666 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2667 		break;
   2668 	case WM_T_82571:
   2669 	case WM_T_82572:
   2670 	case WM_T_82574:
   2671 	case WM_T_82575:
   2672 	case WM_T_82576:
   2673 	case WM_T_82580:
   2674 	case WM_T_I350:
   2675 	case WM_T_I354: /* XXXX ok? */
   2676 	case WM_T_I210:
   2677 	case WM_T_I211:
   2678 	case WM_T_80003:
   2679 	case WM_T_ICH9:
   2680 	case WM_T_ICH10:
   2681 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2682 	case WM_T_PCH_LPT:
   2683 	case WM_T_PCH_SPT:
   2684 		/* XXX limited to 9234 */
   2685 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2686 		break;
   2687 	case WM_T_PCH:
   2688 		/* XXX limited to 4096 */
   2689 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2690 		break;
   2691 	case WM_T_82542_2_0:
   2692 	case WM_T_82542_2_1:
   2693 	case WM_T_82583:
   2694 	case WM_T_ICH8:
   2695 		/* No support for jumbo frame */
   2696 		break;
   2697 	default:
   2698 		/* ETHER_MAX_LEN_JUMBO */
   2699 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2700 		break;
   2701 	}
   2702 
   2703 	/* If we're a i82543 or greater, we can support VLANs. */
   2704 	if (sc->sc_type >= WM_T_82543)
   2705 		sc->sc_ethercom.ec_capabilities |=
   2706 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2707 
   2708 	/*
   2709 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2710 	 * on i82543 and later.
   2711 	 */
   2712 	if (sc->sc_type >= WM_T_82543) {
   2713 		ifp->if_capabilities |=
   2714 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2715 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2716 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2717 		    IFCAP_CSUM_TCPv6_Tx |
   2718 		    IFCAP_CSUM_UDPv6_Tx;
   2719 	}
   2720 
   2721 	/*
   2722 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2723 	 *
   2724 	 *	82541GI (8086:1076) ... no
   2725 	 *	82572EI (8086:10b9) ... yes
   2726 	 */
   2727 	if (sc->sc_type >= WM_T_82571) {
   2728 		ifp->if_capabilities |=
   2729 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2730 	}
   2731 
   2732 	/*
   2733 	 * If we're a i82544 or greater (except i82547), we can do
   2734 	 * TCP segmentation offload.
   2735 	 */
   2736 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2737 		ifp->if_capabilities |= IFCAP_TSOv4;
   2738 	}
   2739 
   2740 	if (sc->sc_type >= WM_T_82571) {
   2741 		ifp->if_capabilities |= IFCAP_TSOv6;
   2742 	}
   2743 
   2744 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2745 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2746 
   2747 #ifdef WM_MPSAFE
   2748 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2749 #else
   2750 	sc->sc_core_lock = NULL;
   2751 #endif
   2752 
   2753 	/* Attach the interface. */
   2754 	if_initialize(ifp);
   2755 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2756 	ether_ifattach(ifp, enaddr);
   2757 	if_register(ifp);
   2758 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2759 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2760 			  RND_FLAG_DEFAULT);
   2761 
   2762 #ifdef WM_EVENT_COUNTERS
   2763 	/* Attach event counters. */
   2764 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2765 	    NULL, xname, "linkintr");
   2766 
   2767 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2768 	    NULL, xname, "tx_xoff");
   2769 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2770 	    NULL, xname, "tx_xon");
   2771 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2772 	    NULL, xname, "rx_xoff");
   2773 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2774 	    NULL, xname, "rx_xon");
   2775 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2776 	    NULL, xname, "rx_macctl");
   2777 #endif /* WM_EVENT_COUNTERS */
   2778 
   2779 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2780 		pmf_class_network_register(self, ifp);
   2781 	else
   2782 		aprint_error_dev(self, "couldn't establish power handler\n");
   2783 
   2784 	sc->sc_flags |= WM_F_ATTACHED;
   2785  out:
   2786 	return;
   2787 }
   2788 
   2789 /* The detach function (ca_detach) */
   2790 static int
   2791 wm_detach(device_t self, int flags __unused)
   2792 {
   2793 	struct wm_softc *sc = device_private(self);
   2794 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2795 	int i;
   2796 
   2797 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2798 		return 0;
   2799 
   2800 	/* Stop the interface. Callouts are stopped in it. */
   2801 	wm_stop(ifp, 1);
   2802 
   2803 	pmf_device_deregister(self);
   2804 
   2805 #ifdef WM_EVENT_COUNTERS
   2806 	evcnt_detach(&sc->sc_ev_linkintr);
   2807 
   2808 	evcnt_detach(&sc->sc_ev_tx_xoff);
   2809 	evcnt_detach(&sc->sc_ev_tx_xon);
   2810 	evcnt_detach(&sc->sc_ev_rx_xoff);
   2811 	evcnt_detach(&sc->sc_ev_rx_xon);
   2812 	evcnt_detach(&sc->sc_ev_rx_macctl);
   2813 #endif /* WM_EVENT_COUNTERS */
   2814 
   2815 	/* Tell the firmware about the release */
   2816 	WM_CORE_LOCK(sc);
   2817 	wm_release_manageability(sc);
   2818 	wm_release_hw_control(sc);
   2819 	wm_enable_wakeup(sc);
   2820 	WM_CORE_UNLOCK(sc);
   2821 
   2822 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2823 
   2824 	/* Delete all remaining media. */
   2825 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2826 
   2827 	ether_ifdetach(ifp);
   2828 	if_detach(ifp);
   2829 	if_percpuq_destroy(sc->sc_ipq);
   2830 
   2831 	/* Unload RX dmamaps and free mbufs */
   2832 	for (i = 0; i < sc->sc_nqueues; i++) {
   2833 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2834 		mutex_enter(rxq->rxq_lock);
   2835 		wm_rxdrain(rxq);
   2836 		mutex_exit(rxq->rxq_lock);
   2837 	}
   2838 	/* Must unlock here */
   2839 
   2840 	/* Disestablish the interrupt handler */
   2841 	for (i = 0; i < sc->sc_nintrs; i++) {
   2842 		if (sc->sc_ihs[i] != NULL) {
   2843 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2844 			sc->sc_ihs[i] = NULL;
   2845 		}
   2846 	}
   2847 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2848 
   2849 	wm_free_txrx_queues(sc);
   2850 
   2851 	/* Unmap the registers */
   2852 	if (sc->sc_ss) {
   2853 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2854 		sc->sc_ss = 0;
   2855 	}
   2856 	if (sc->sc_ios) {
   2857 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2858 		sc->sc_ios = 0;
   2859 	}
   2860 	if (sc->sc_flashs) {
   2861 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2862 		sc->sc_flashs = 0;
   2863 	}
   2864 
   2865 	if (sc->sc_core_lock)
   2866 		mutex_obj_free(sc->sc_core_lock);
   2867 	if (sc->sc_ich_phymtx)
   2868 		mutex_obj_free(sc->sc_ich_phymtx);
   2869 	if (sc->sc_ich_nvmmtx)
   2870 		mutex_obj_free(sc->sc_ich_nvmmtx);
   2871 
   2872 	return 0;
   2873 }
   2874 
   2875 static bool
   2876 wm_suspend(device_t self, const pmf_qual_t *qual)
   2877 {
   2878 	struct wm_softc *sc = device_private(self);
   2879 
   2880 	wm_release_manageability(sc);
   2881 	wm_release_hw_control(sc);
   2882 	wm_enable_wakeup(sc);
   2883 
   2884 	return true;
   2885 }
   2886 
   2887 static bool
   2888 wm_resume(device_t self, const pmf_qual_t *qual)
   2889 {
   2890 	struct wm_softc *sc = device_private(self);
   2891 
   2892 	wm_init_manageability(sc);
   2893 
   2894 	return true;
   2895 }
   2896 
   2897 /*
   2898  * wm_watchdog:		[ifnet interface function]
   2899  *
   2900  *	Watchdog timer handler.
   2901  */
   2902 static void
   2903 wm_watchdog(struct ifnet *ifp)
   2904 {
   2905 	int qid;
   2906 	struct wm_softc *sc = ifp->if_softc;
   2907 
   2908 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2909 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2910 
   2911 		wm_watchdog_txq(ifp, txq);
   2912 	}
   2913 
   2914 	/* Reset the interface. */
   2915 	(void) wm_init(ifp);
   2916 
   2917 	/*
   2918 	 * There are still some upper layer processing which call
   2919 	 * ifp->if_start(). e.g. ALTQ or one CPU system
   2920 	 */
   2921 	/* Try to get more packets going. */
   2922 	ifp->if_start(ifp);
   2923 }
   2924 
   2925 static void
   2926 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
   2927 {
   2928 	struct wm_softc *sc = ifp->if_softc;
   2929 
   2930 	/*
   2931 	 * Since we're using delayed interrupts, sweep up
   2932 	 * before we report an error.
   2933 	 */
   2934 	mutex_enter(txq->txq_lock);
   2935 	wm_txeof(sc, txq);
   2936 	mutex_exit(txq->txq_lock);
   2937 
   2938 	if (txq->txq_free != WM_NTXDESC(txq)) {
   2939 #ifdef WM_DEBUG
   2940 		int i, j;
   2941 		struct wm_txsoft *txs;
   2942 #endif
   2943 		log(LOG_ERR,
   2944 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2945 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   2946 		    txq->txq_next);
   2947 		ifp->if_oerrors++;
   2948 #ifdef WM_DEBUG
   2949 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   2950 		    i = WM_NEXTTXS(txq, i)) {
   2951 		    txs = &txq->txq_soft[i];
   2952 		    printf("txs %d tx %d -> %d\n",
   2953 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2954 		    for (j = txs->txs_firstdesc; ;
   2955 			j = WM_NEXTTX(txq, j)) {
   2956 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2957 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   2958 			printf("\t %#08x%08x\n",
   2959 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   2960 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   2961 			if (j == txs->txs_lastdesc)
   2962 				break;
   2963 			}
   2964 		}
   2965 #endif
   2966 	}
   2967 }
   2968 
   2969 /*
   2970  * wm_tick:
   2971  *
   2972  *	One second timer, used to check link status, sweep up
   2973  *	completed transmit jobs, etc.
   2974  */
   2975 static void
   2976 wm_tick(void *arg)
   2977 {
   2978 	struct wm_softc *sc = arg;
   2979 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2980 #ifndef WM_MPSAFE
   2981 	int s = splnet();
   2982 #endif
   2983 
   2984 	WM_CORE_LOCK(sc);
   2985 
   2986 	if (sc->sc_core_stopping)
   2987 		goto out;
   2988 
   2989 	if (sc->sc_type >= WM_T_82542_2_1) {
   2990 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2991 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2992 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2993 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2994 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2995 	}
   2996 
   2997 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2998 	ifp->if_ierrors += 0ULL /* ensure quad_t */
   2999 	    + CSR_READ(sc, WMREG_CRCERRS)
   3000 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3001 	    + CSR_READ(sc, WMREG_SYMERRC)
   3002 	    + CSR_READ(sc, WMREG_RXERRC)
   3003 	    + CSR_READ(sc, WMREG_SEC)
   3004 	    + CSR_READ(sc, WMREG_CEXTERR)
   3005 	    + CSR_READ(sc, WMREG_RLEC);
   3006 	/*
   3007 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3008 	 * memory. It does not mean the number of dropped packet. Because
   3009 	 * ethernet controller can receive packets in such case if there is
   3010 	 * space in phy's FIFO.
   3011 	 *
   3012 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3013 	 * own EVCNT instead of if_iqdrops.
   3014 	 */
   3015 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   3016 
   3017 	if (sc->sc_flags & WM_F_HAS_MII)
   3018 		mii_tick(&sc->sc_mii);
   3019 	else if ((sc->sc_type >= WM_T_82575)
   3020 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3021 		wm_serdes_tick(sc);
   3022 	else
   3023 		wm_tbi_tick(sc);
   3024 
   3025 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   3026 out:
   3027 	WM_CORE_UNLOCK(sc);
   3028 #ifndef WM_MPSAFE
   3029 	splx(s);
   3030 #endif
   3031 }
   3032 
   3033 static int
   3034 wm_ifflags_cb(struct ethercom *ec)
   3035 {
   3036 	struct ifnet *ifp = &ec->ec_if;
   3037 	struct wm_softc *sc = ifp->if_softc;
   3038 	int rc = 0;
   3039 
   3040 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3041 		device_xname(sc->sc_dev), __func__));
   3042 
   3043 	WM_CORE_LOCK(sc);
   3044 
   3045 	int change = ifp->if_flags ^ sc->sc_if_flags;
   3046 	sc->sc_if_flags = ifp->if_flags;
   3047 
   3048 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3049 		rc = ENETRESET;
   3050 		goto out;
   3051 	}
   3052 
   3053 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   3054 		wm_set_filter(sc);
   3055 
   3056 	wm_set_vlan(sc);
   3057 
   3058 out:
   3059 	WM_CORE_UNLOCK(sc);
   3060 
   3061 	return rc;
   3062 }
   3063 
   3064 /*
   3065  * wm_ioctl:		[ifnet interface function]
   3066  *
   3067  *	Handle control requests from the operator.
   3068  */
   3069 static int
   3070 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3071 {
   3072 	struct wm_softc *sc = ifp->if_softc;
   3073 	struct ifreq *ifr = (struct ifreq *) data;
   3074 	struct ifaddr *ifa = (struct ifaddr *)data;
   3075 	struct sockaddr_dl *sdl;
   3076 	int s, error;
   3077 
   3078 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3079 		device_xname(sc->sc_dev), __func__));
   3080 
   3081 #ifndef WM_MPSAFE
   3082 	s = splnet();
   3083 #endif
   3084 	switch (cmd) {
   3085 	case SIOCSIFMEDIA:
   3086 	case SIOCGIFMEDIA:
   3087 		WM_CORE_LOCK(sc);
   3088 		/* Flow control requires full-duplex mode. */
   3089 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3090 		    (ifr->ifr_media & IFM_FDX) == 0)
   3091 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3092 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3093 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3094 				/* We can do both TXPAUSE and RXPAUSE. */
   3095 				ifr->ifr_media |=
   3096 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3097 			}
   3098 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3099 		}
   3100 		WM_CORE_UNLOCK(sc);
   3101 #ifdef WM_MPSAFE
   3102 		s = splnet();
   3103 #endif
   3104 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3105 #ifdef WM_MPSAFE
   3106 		splx(s);
   3107 #endif
   3108 		break;
   3109 	case SIOCINITIFADDR:
   3110 		WM_CORE_LOCK(sc);
   3111 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3112 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3113 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3114 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3115 			/* unicast address is first multicast entry */
   3116 			wm_set_filter(sc);
   3117 			error = 0;
   3118 			WM_CORE_UNLOCK(sc);
   3119 			break;
   3120 		}
   3121 		WM_CORE_UNLOCK(sc);
   3122 		/*FALLTHROUGH*/
   3123 	default:
   3124 #ifdef WM_MPSAFE
   3125 		s = splnet();
   3126 #endif
   3127 		/* It may call wm_start, so unlock here */
   3128 		error = ether_ioctl(ifp, cmd, data);
   3129 #ifdef WM_MPSAFE
   3130 		splx(s);
   3131 #endif
   3132 		if (error != ENETRESET)
   3133 			break;
   3134 
   3135 		error = 0;
   3136 
   3137 		if (cmd == SIOCSIFCAP) {
   3138 			error = (*ifp->if_init)(ifp);
   3139 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3140 			;
   3141 		else if (ifp->if_flags & IFF_RUNNING) {
   3142 			/*
   3143 			 * Multicast list has changed; set the hardware filter
   3144 			 * accordingly.
   3145 			 */
   3146 			WM_CORE_LOCK(sc);
   3147 			wm_set_filter(sc);
   3148 			WM_CORE_UNLOCK(sc);
   3149 		}
   3150 		break;
   3151 	}
   3152 
   3153 #ifndef WM_MPSAFE
   3154 	splx(s);
   3155 #endif
   3156 	return error;
   3157 }
   3158 
   3159 /* MAC address related */
   3160 
   3161 /*
   3162  * Get the offset of MAC address and return it.
   3163  * If error occured, use offset 0.
   3164  */
   3165 static uint16_t
   3166 wm_check_alt_mac_addr(struct wm_softc *sc)
   3167 {
   3168 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3169 	uint16_t offset = NVM_OFF_MACADDR;
   3170 
   3171 	/* Try to read alternative MAC address pointer */
   3172 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3173 		return 0;
   3174 
   3175 	/* Check pointer if it's valid or not. */
   3176 	if ((offset == 0x0000) || (offset == 0xffff))
   3177 		return 0;
   3178 
   3179 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3180 	/*
   3181 	 * Check whether alternative MAC address is valid or not.
   3182 	 * Some cards have non 0xffff pointer but those don't use
   3183 	 * alternative MAC address in reality.
   3184 	 *
   3185 	 * Check whether the broadcast bit is set or not.
   3186 	 */
   3187 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3188 		if (((myea[0] & 0xff) & 0x01) == 0)
   3189 			return offset; /* Found */
   3190 
   3191 	/* Not found */
   3192 	return 0;
   3193 }
   3194 
   3195 static int
   3196 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3197 {
   3198 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3199 	uint16_t offset = NVM_OFF_MACADDR;
   3200 	int do_invert = 0;
   3201 
   3202 	switch (sc->sc_type) {
   3203 	case WM_T_82580:
   3204 	case WM_T_I350:
   3205 	case WM_T_I354:
   3206 		/* EEPROM Top Level Partitioning */
   3207 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3208 		break;
   3209 	case WM_T_82571:
   3210 	case WM_T_82575:
   3211 	case WM_T_82576:
   3212 	case WM_T_80003:
   3213 	case WM_T_I210:
   3214 	case WM_T_I211:
   3215 		offset = wm_check_alt_mac_addr(sc);
   3216 		if (offset == 0)
   3217 			if ((sc->sc_funcid & 0x01) == 1)
   3218 				do_invert = 1;
   3219 		break;
   3220 	default:
   3221 		if ((sc->sc_funcid & 0x01) == 1)
   3222 			do_invert = 1;
   3223 		break;
   3224 	}
   3225 
   3226 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3227 		goto bad;
   3228 
   3229 	enaddr[0] = myea[0] & 0xff;
   3230 	enaddr[1] = myea[0] >> 8;
   3231 	enaddr[2] = myea[1] & 0xff;
   3232 	enaddr[3] = myea[1] >> 8;
   3233 	enaddr[4] = myea[2] & 0xff;
   3234 	enaddr[5] = myea[2] >> 8;
   3235 
   3236 	/*
   3237 	 * Toggle the LSB of the MAC address on the second port
   3238 	 * of some dual port cards.
   3239 	 */
   3240 	if (do_invert != 0)
   3241 		enaddr[5] ^= 1;
   3242 
   3243 	return 0;
   3244 
   3245  bad:
   3246 	return -1;
   3247 }
   3248 
   3249 /*
   3250  * wm_set_ral:
   3251  *
   3252  *	Set an entery in the receive address list.
   3253  */
   3254 static void
   3255 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3256 {
   3257 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3258 	uint32_t wlock_mac;
   3259 	int rv;
   3260 
   3261 	if (enaddr != NULL) {
   3262 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3263 		    (enaddr[3] << 24);
   3264 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3265 		ral_hi |= RAL_AV;
   3266 	} else {
   3267 		ral_lo = 0;
   3268 		ral_hi = 0;
   3269 	}
   3270 
   3271 	switch (sc->sc_type) {
   3272 	case WM_T_82542_2_0:
   3273 	case WM_T_82542_2_1:
   3274 	case WM_T_82543:
   3275 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3276 		CSR_WRITE_FLUSH(sc);
   3277 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3278 		CSR_WRITE_FLUSH(sc);
   3279 		break;
   3280 	case WM_T_PCH2:
   3281 	case WM_T_PCH_LPT:
   3282 	case WM_T_PCH_SPT:
   3283 		if (idx == 0) {
   3284 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3285 			CSR_WRITE_FLUSH(sc);
   3286 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3287 			CSR_WRITE_FLUSH(sc);
   3288 			return;
   3289 		}
   3290 		if (sc->sc_type != WM_T_PCH2) {
   3291 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3292 			    FWSM_WLOCK_MAC);
   3293 			addrl = WMREG_SHRAL(idx - 1);
   3294 			addrh = WMREG_SHRAH(idx - 1);
   3295 		} else {
   3296 			wlock_mac = 0;
   3297 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3298 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3299 		}
   3300 
   3301 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3302 			rv = wm_get_swflag_ich8lan(sc);
   3303 			if (rv != 0)
   3304 				return;
   3305 			CSR_WRITE(sc, addrl, ral_lo);
   3306 			CSR_WRITE_FLUSH(sc);
   3307 			CSR_WRITE(sc, addrh, ral_hi);
   3308 			CSR_WRITE_FLUSH(sc);
   3309 			wm_put_swflag_ich8lan(sc);
   3310 		}
   3311 
   3312 		break;
   3313 	default:
   3314 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3315 		CSR_WRITE_FLUSH(sc);
   3316 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3317 		CSR_WRITE_FLUSH(sc);
   3318 		break;
   3319 	}
   3320 }
   3321 
   3322 /*
   3323  * wm_mchash:
   3324  *
   3325  *	Compute the hash of the multicast address for the 4096-bit
   3326  *	multicast filter.
   3327  */
   3328 static uint32_t
   3329 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3330 {
   3331 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3332 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3333 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3334 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3335 	uint32_t hash;
   3336 
   3337 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3338 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3339 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3340 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   3341 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3342 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3343 		return (hash & 0x3ff);
   3344 	}
   3345 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3346 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3347 
   3348 	return (hash & 0xfff);
   3349 }
   3350 
   3351 /*
   3352  * wm_set_filter:
   3353  *
   3354  *	Set up the receive filter.
   3355  */
   3356 static void
   3357 wm_set_filter(struct wm_softc *sc)
   3358 {
   3359 	struct ethercom *ec = &sc->sc_ethercom;
   3360 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3361 	struct ether_multi *enm;
   3362 	struct ether_multistep step;
   3363 	bus_addr_t mta_reg;
   3364 	uint32_t hash, reg, bit;
   3365 	int i, size, ralmax;
   3366 
   3367 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3368 		device_xname(sc->sc_dev), __func__));
   3369 
   3370 	if (sc->sc_type >= WM_T_82544)
   3371 		mta_reg = WMREG_CORDOVA_MTA;
   3372 	else
   3373 		mta_reg = WMREG_MTA;
   3374 
   3375 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3376 
   3377 	if (ifp->if_flags & IFF_BROADCAST)
   3378 		sc->sc_rctl |= RCTL_BAM;
   3379 	if (ifp->if_flags & IFF_PROMISC) {
   3380 		sc->sc_rctl |= RCTL_UPE;
   3381 		goto allmulti;
   3382 	}
   3383 
   3384 	/*
   3385 	 * Set the station address in the first RAL slot, and
   3386 	 * clear the remaining slots.
   3387 	 */
   3388 	if (sc->sc_type == WM_T_ICH8)
   3389 		size = WM_RAL_TABSIZE_ICH8 -1;
   3390 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3391 	    || (sc->sc_type == WM_T_PCH))
   3392 		size = WM_RAL_TABSIZE_ICH8;
   3393 	else if (sc->sc_type == WM_T_PCH2)
   3394 		size = WM_RAL_TABSIZE_PCH2;
   3395 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
   3396 		size = WM_RAL_TABSIZE_PCH_LPT;
   3397 	else if (sc->sc_type == WM_T_82575)
   3398 		size = WM_RAL_TABSIZE_82575;
   3399 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3400 		size = WM_RAL_TABSIZE_82576;
   3401 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3402 		size = WM_RAL_TABSIZE_I350;
   3403 	else
   3404 		size = WM_RAL_TABSIZE;
   3405 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3406 
   3407 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   3408 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3409 		switch (i) {
   3410 		case 0:
   3411 			/* We can use all entries */
   3412 			ralmax = size;
   3413 			break;
   3414 		case 1:
   3415 			/* Only RAR[0] */
   3416 			ralmax = 1;
   3417 			break;
   3418 		default:
   3419 			/* available SHRA + RAR[0] */
   3420 			ralmax = i + 1;
   3421 		}
   3422 	} else
   3423 		ralmax = size;
   3424 	for (i = 1; i < size; i++) {
   3425 		if (i < ralmax)
   3426 			wm_set_ral(sc, NULL, i);
   3427 	}
   3428 
   3429 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3430 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3431 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3432 	    || (sc->sc_type == WM_T_PCH_SPT))
   3433 		size = WM_ICH8_MC_TABSIZE;
   3434 	else
   3435 		size = WM_MC_TABSIZE;
   3436 	/* Clear out the multicast table. */
   3437 	for (i = 0; i < size; i++) {
   3438 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3439 		CSR_WRITE_FLUSH(sc);
   3440 	}
   3441 
   3442 	ETHER_LOCK(ec);
   3443 	ETHER_FIRST_MULTI(step, ec, enm);
   3444 	while (enm != NULL) {
   3445 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3446 			ETHER_UNLOCK(ec);
   3447 			/*
   3448 			 * We must listen to a range of multicast addresses.
   3449 			 * For now, just accept all multicasts, rather than
   3450 			 * trying to set only those filter bits needed to match
   3451 			 * the range.  (At this time, the only use of address
   3452 			 * ranges is for IP multicast routing, for which the
   3453 			 * range is big enough to require all bits set.)
   3454 			 */
   3455 			goto allmulti;
   3456 		}
   3457 
   3458 		hash = wm_mchash(sc, enm->enm_addrlo);
   3459 
   3460 		reg = (hash >> 5);
   3461 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3462 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3463 		    || (sc->sc_type == WM_T_PCH2)
   3464 		    || (sc->sc_type == WM_T_PCH_LPT)
   3465 		    || (sc->sc_type == WM_T_PCH_SPT))
   3466 			reg &= 0x1f;
   3467 		else
   3468 			reg &= 0x7f;
   3469 		bit = hash & 0x1f;
   3470 
   3471 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3472 		hash |= 1U << bit;
   3473 
   3474 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3475 			/*
   3476 			 * 82544 Errata 9: Certain register cannot be written
   3477 			 * with particular alignments in PCI-X bus operation
   3478 			 * (FCAH, MTA and VFTA).
   3479 			 */
   3480 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3481 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3482 			CSR_WRITE_FLUSH(sc);
   3483 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3484 			CSR_WRITE_FLUSH(sc);
   3485 		} else {
   3486 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3487 			CSR_WRITE_FLUSH(sc);
   3488 		}
   3489 
   3490 		ETHER_NEXT_MULTI(step, enm);
   3491 	}
   3492 	ETHER_UNLOCK(ec);
   3493 
   3494 	ifp->if_flags &= ~IFF_ALLMULTI;
   3495 	goto setit;
   3496 
   3497  allmulti:
   3498 	ifp->if_flags |= IFF_ALLMULTI;
   3499 	sc->sc_rctl |= RCTL_MPE;
   3500 
   3501  setit:
   3502 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3503 }
   3504 
   3505 /* Reset and init related */
   3506 
   3507 static void
   3508 wm_set_vlan(struct wm_softc *sc)
   3509 {
   3510 
   3511 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3512 		device_xname(sc->sc_dev), __func__));
   3513 
   3514 	/* Deal with VLAN enables. */
   3515 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3516 		sc->sc_ctrl |= CTRL_VME;
   3517 	else
   3518 		sc->sc_ctrl &= ~CTRL_VME;
   3519 
   3520 	/* Write the control registers. */
   3521 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3522 }
   3523 
   3524 static void
   3525 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3526 {
   3527 	uint32_t gcr;
   3528 	pcireg_t ctrl2;
   3529 
   3530 	gcr = CSR_READ(sc, WMREG_GCR);
   3531 
   3532 	/* Only take action if timeout value is defaulted to 0 */
   3533 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3534 		goto out;
   3535 
   3536 	if ((gcr & GCR_CAP_VER2) == 0) {
   3537 		gcr |= GCR_CMPL_TMOUT_10MS;
   3538 		goto out;
   3539 	}
   3540 
   3541 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3542 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3543 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3544 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3545 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3546 
   3547 out:
   3548 	/* Disable completion timeout resend */
   3549 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3550 
   3551 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3552 }
   3553 
   3554 void
   3555 wm_get_auto_rd_done(struct wm_softc *sc)
   3556 {
   3557 	int i;
   3558 
   3559 	/* wait for eeprom to reload */
   3560 	switch (sc->sc_type) {
   3561 	case WM_T_82571:
   3562 	case WM_T_82572:
   3563 	case WM_T_82573:
   3564 	case WM_T_82574:
   3565 	case WM_T_82583:
   3566 	case WM_T_82575:
   3567 	case WM_T_82576:
   3568 	case WM_T_82580:
   3569 	case WM_T_I350:
   3570 	case WM_T_I354:
   3571 	case WM_T_I210:
   3572 	case WM_T_I211:
   3573 	case WM_T_80003:
   3574 	case WM_T_ICH8:
   3575 	case WM_T_ICH9:
   3576 		for (i = 0; i < 10; i++) {
   3577 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3578 				break;
   3579 			delay(1000);
   3580 		}
   3581 		if (i == 10) {
   3582 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3583 			    "complete\n", device_xname(sc->sc_dev));
   3584 		}
   3585 		break;
   3586 	default:
   3587 		break;
   3588 	}
   3589 }
   3590 
   3591 void
   3592 wm_lan_init_done(struct wm_softc *sc)
   3593 {
   3594 	uint32_t reg = 0;
   3595 	int i;
   3596 
   3597 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3598 		device_xname(sc->sc_dev), __func__));
   3599 
   3600 	/* Wait for eeprom to reload */
   3601 	switch (sc->sc_type) {
   3602 	case WM_T_ICH10:
   3603 	case WM_T_PCH:
   3604 	case WM_T_PCH2:
   3605 	case WM_T_PCH_LPT:
   3606 	case WM_T_PCH_SPT:
   3607 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3608 			reg = CSR_READ(sc, WMREG_STATUS);
   3609 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3610 				break;
   3611 			delay(100);
   3612 		}
   3613 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3614 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3615 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3616 		}
   3617 		break;
   3618 	default:
   3619 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3620 		    __func__);
   3621 		break;
   3622 	}
   3623 
   3624 	reg &= ~STATUS_LAN_INIT_DONE;
   3625 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3626 }
   3627 
   3628 void
   3629 wm_get_cfg_done(struct wm_softc *sc)
   3630 {
   3631 	int mask;
   3632 	uint32_t reg;
   3633 	int i;
   3634 
   3635 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3636 		device_xname(sc->sc_dev), __func__));
   3637 
   3638 	/* Wait for eeprom to reload */
   3639 	switch (sc->sc_type) {
   3640 	case WM_T_82542_2_0:
   3641 	case WM_T_82542_2_1:
   3642 		/* null */
   3643 		break;
   3644 	case WM_T_82543:
   3645 	case WM_T_82544:
   3646 	case WM_T_82540:
   3647 	case WM_T_82545:
   3648 	case WM_T_82545_3:
   3649 	case WM_T_82546:
   3650 	case WM_T_82546_3:
   3651 	case WM_T_82541:
   3652 	case WM_T_82541_2:
   3653 	case WM_T_82547:
   3654 	case WM_T_82547_2:
   3655 	case WM_T_82573:
   3656 	case WM_T_82574:
   3657 	case WM_T_82583:
   3658 		/* generic */
   3659 		delay(10*1000);
   3660 		break;
   3661 	case WM_T_80003:
   3662 	case WM_T_82571:
   3663 	case WM_T_82572:
   3664 	case WM_T_82575:
   3665 	case WM_T_82576:
   3666 	case WM_T_82580:
   3667 	case WM_T_I350:
   3668 	case WM_T_I354:
   3669 	case WM_T_I210:
   3670 	case WM_T_I211:
   3671 		if (sc->sc_type == WM_T_82571) {
   3672 			/* Only 82571 shares port 0 */
   3673 			mask = EEMNGCTL_CFGDONE_0;
   3674 		} else
   3675 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3676 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3677 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3678 				break;
   3679 			delay(1000);
   3680 		}
   3681 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3682 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3683 				device_xname(sc->sc_dev), __func__));
   3684 		}
   3685 		break;
   3686 	case WM_T_ICH8:
   3687 	case WM_T_ICH9:
   3688 	case WM_T_ICH10:
   3689 	case WM_T_PCH:
   3690 	case WM_T_PCH2:
   3691 	case WM_T_PCH_LPT:
   3692 	case WM_T_PCH_SPT:
   3693 		delay(10*1000);
   3694 		if (sc->sc_type >= WM_T_ICH10)
   3695 			wm_lan_init_done(sc);
   3696 		else
   3697 			wm_get_auto_rd_done(sc);
   3698 
   3699 		reg = CSR_READ(sc, WMREG_STATUS);
   3700 		if ((reg & STATUS_PHYRA) != 0)
   3701 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3702 		break;
   3703 	default:
   3704 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3705 		    __func__);
   3706 		break;
   3707 	}
   3708 }
   3709 
   3710 void
   3711 wm_phy_post_reset(struct wm_softc *sc)
   3712 {
   3713 	uint32_t reg;
   3714 
   3715 	/* This function is only for ICH8 and newer. */
   3716 	if (sc->sc_type < WM_T_ICH8)
   3717 		return;
   3718 
   3719 	if (wm_phy_resetisblocked(sc)) {
   3720 		/* XXX */
   3721 		device_printf(sc->sc_dev, "PHY is blocked\n");
   3722 		return;
   3723 	}
   3724 
   3725 	/* Allow time for h/w to get to quiescent state after reset */
   3726 	delay(10*1000);
   3727 
   3728 	/* Perform any necessary post-reset workarounds */
   3729 	if (sc->sc_type == WM_T_PCH)
   3730 		wm_hv_phy_workaround_ich8lan(sc);
   3731 	if (sc->sc_type == WM_T_PCH2)
   3732 		wm_lv_phy_workaround_ich8lan(sc);
   3733 
   3734 	/* Clear the host wakeup bit after lcd reset */
   3735 	if (sc->sc_type >= WM_T_PCH) {
   3736 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   3737 		    BM_PORT_GEN_CFG);
   3738 		reg &= ~BM_WUC_HOST_WU_BIT;
   3739 		wm_gmii_hv_writereg(sc->sc_dev, 2,
   3740 		    BM_PORT_GEN_CFG, reg);
   3741 	}
   3742 
   3743 	/* Configure the LCD with the extended configuration region in NVM */
   3744 	wm_init_lcd_from_nvm(sc);
   3745 
   3746 	/* Configure the LCD with the OEM bits in NVM */
   3747 }
   3748 
   3749 /* Only for PCH and newer */
   3750 static void
   3751 wm_write_smbus_addr(struct wm_softc *sc)
   3752 {
   3753 	uint32_t strap, freq;
   3754 	uint32_t phy_data;
   3755 
   3756 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3757 		device_xname(sc->sc_dev), __func__));
   3758 
   3759 	strap = CSR_READ(sc, WMREG_STRAP);
   3760 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   3761 
   3762 	phy_data = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR);
   3763 
   3764 	phy_data &= ~HV_SMB_ADDR_ADDR;
   3765 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   3766 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   3767 
   3768 	if (sc->sc_phytype == WMPHY_I217) {
   3769 		/* Restore SMBus frequency */
   3770 		if (freq --) {
   3771 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   3772 			    | HV_SMB_ADDR_FREQ_HIGH);
   3773 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   3774 			    HV_SMB_ADDR_FREQ_LOW);
   3775 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   3776 			    HV_SMB_ADDR_FREQ_HIGH);
   3777 		} else {
   3778 			DPRINTF(WM_DEBUG_INIT,
   3779 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   3780 				device_xname(sc->sc_dev), __func__));
   3781 		}
   3782 	}
   3783 
   3784 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR, phy_data);
   3785 }
   3786 
   3787 void
   3788 wm_init_lcd_from_nvm(struct wm_softc *sc)
   3789 {
   3790 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   3791 	uint16_t phy_page = 0;
   3792 
   3793 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3794 		device_xname(sc->sc_dev), __func__));
   3795 
   3796 	switch (sc->sc_type) {
   3797 	case WM_T_ICH8:
   3798 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   3799 		    || (sc->sc_phytype != WMPHY_IGP_3))
   3800 			return;
   3801 
   3802 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   3803 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   3804 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   3805 			break;
   3806 		}
   3807 		/* FALLTHROUGH */
   3808 	case WM_T_PCH:
   3809 	case WM_T_PCH2:
   3810 	case WM_T_PCH_LPT:
   3811 	case WM_T_PCH_SPT:
   3812 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   3813 		break;
   3814 	default:
   3815 		return;
   3816 	}
   3817 
   3818 	sc->phy.acquire(sc);
   3819 
   3820 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   3821 	if ((reg & sw_cfg_mask) == 0)
   3822 		goto release;
   3823 
   3824 	/*
   3825 	 * Make sure HW does not configure LCD from PHY extended configuration
   3826 	 * before SW configuration
   3827 	 */
   3828 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   3829 	if ((sc->sc_type < WM_T_PCH2)
   3830 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   3831 		goto release;
   3832 
   3833 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   3834 		device_xname(sc->sc_dev), __func__));
   3835 	/* word_addr is in DWORD */
   3836 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   3837 
   3838 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   3839 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   3840 
   3841 	if (((sc->sc_type == WM_T_PCH)
   3842 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   3843 	    || (sc->sc_type > WM_T_PCH)) {
   3844 		/*
   3845 		 * HW configures the SMBus address and LEDs when the OEM and
   3846 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   3847 		 * are cleared, SW will configure them instead.
   3848 		 */
   3849 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   3850 			device_xname(sc->sc_dev), __func__));
   3851 		wm_write_smbus_addr(sc);
   3852 
   3853 		reg = CSR_READ(sc, WMREG_LEDCTL);
   3854 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG, reg);
   3855 	}
   3856 
   3857 	/* Configure LCD from extended configuration region. */
   3858 	for (i = 0; i < cnf_size; i++) {
   3859 		uint16_t reg_data, reg_addr;
   3860 
   3861 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   3862 			goto release;
   3863 
   3864 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   3865 			goto release;
   3866 
   3867 		if (reg_addr == MII_IGPHY_PAGE_SELECT)
   3868 			phy_page = reg_data;
   3869 
   3870 		reg_addr &= IGPHY_MAXREGADDR;
   3871 		reg_addr |= phy_page;
   3872 
   3873 		sc->phy.release(sc); /* XXX */
   3874 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, reg_addr, reg_data);
   3875 		sc->phy.acquire(sc); /* XXX */
   3876 	}
   3877 
   3878 release:
   3879 	sc->phy.release(sc);
   3880 	return;
   3881 }
   3882 
   3883 
   3884 /* Init hardware bits */
   3885 void
   3886 wm_initialize_hardware_bits(struct wm_softc *sc)
   3887 {
   3888 	uint32_t tarc0, tarc1, reg;
   3889 
   3890 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3891 		device_xname(sc->sc_dev), __func__));
   3892 
   3893 	/* For 82571 variant, 80003 and ICHs */
   3894 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3895 	    || (sc->sc_type >= WM_T_80003)) {
   3896 
   3897 		/* Transmit Descriptor Control 0 */
   3898 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3899 		reg |= TXDCTL_COUNT_DESC;
   3900 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3901 
   3902 		/* Transmit Descriptor Control 1 */
   3903 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3904 		reg |= TXDCTL_COUNT_DESC;
   3905 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3906 
   3907 		/* TARC0 */
   3908 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3909 		switch (sc->sc_type) {
   3910 		case WM_T_82571:
   3911 		case WM_T_82572:
   3912 		case WM_T_82573:
   3913 		case WM_T_82574:
   3914 		case WM_T_82583:
   3915 		case WM_T_80003:
   3916 			/* Clear bits 30..27 */
   3917 			tarc0 &= ~__BITS(30, 27);
   3918 			break;
   3919 		default:
   3920 			break;
   3921 		}
   3922 
   3923 		switch (sc->sc_type) {
   3924 		case WM_T_82571:
   3925 		case WM_T_82572:
   3926 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3927 
   3928 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3929 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3930 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3931 			/* 8257[12] Errata No.7 */
   3932 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3933 
   3934 			/* TARC1 bit 28 */
   3935 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3936 				tarc1 &= ~__BIT(28);
   3937 			else
   3938 				tarc1 |= __BIT(28);
   3939 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3940 
   3941 			/*
   3942 			 * 8257[12] Errata No.13
   3943 			 * Disable Dyamic Clock Gating.
   3944 			 */
   3945 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3946 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3947 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3948 			break;
   3949 		case WM_T_82573:
   3950 		case WM_T_82574:
   3951 		case WM_T_82583:
   3952 			if ((sc->sc_type == WM_T_82574)
   3953 			    || (sc->sc_type == WM_T_82583))
   3954 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3955 
   3956 			/* Extended Device Control */
   3957 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3958 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3959 			reg |= __BIT(22);	/* Set bit 22 */
   3960 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3961 
   3962 			/* Device Control */
   3963 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   3964 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3965 
   3966 			/* PCIe Control Register */
   3967 			/*
   3968 			 * 82573 Errata (unknown).
   3969 			 *
   3970 			 * 82574 Errata 25 and 82583 Errata 12
   3971 			 * "Dropped Rx Packets":
   3972 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   3973 			 */
   3974 			reg = CSR_READ(sc, WMREG_GCR);
   3975 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   3976 			CSR_WRITE(sc, WMREG_GCR, reg);
   3977 
   3978 			if ((sc->sc_type == WM_T_82574)
   3979 			    || (sc->sc_type == WM_T_82583)) {
   3980 				/*
   3981 				 * Document says this bit must be set for
   3982 				 * proper operation.
   3983 				 */
   3984 				reg = CSR_READ(sc, WMREG_GCR);
   3985 				reg |= __BIT(22);
   3986 				CSR_WRITE(sc, WMREG_GCR, reg);
   3987 
   3988 				/*
   3989 				 * Apply workaround for hardware errata
   3990 				 * documented in errata docs Fixes issue where
   3991 				 * some error prone or unreliable PCIe
   3992 				 * completions are occurring, particularly
   3993 				 * with ASPM enabled. Without fix, issue can
   3994 				 * cause Tx timeouts.
   3995 				 */
   3996 				reg = CSR_READ(sc, WMREG_GCR2);
   3997 				reg |= __BIT(0);
   3998 				CSR_WRITE(sc, WMREG_GCR2, reg);
   3999 			}
   4000 			break;
   4001 		case WM_T_80003:
   4002 			/* TARC0 */
   4003 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4004 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4005 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4006 
   4007 			/* TARC1 bit 28 */
   4008 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4009 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4010 				tarc1 &= ~__BIT(28);
   4011 			else
   4012 				tarc1 |= __BIT(28);
   4013 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4014 			break;
   4015 		case WM_T_ICH8:
   4016 		case WM_T_ICH9:
   4017 		case WM_T_ICH10:
   4018 		case WM_T_PCH:
   4019 		case WM_T_PCH2:
   4020 		case WM_T_PCH_LPT:
   4021 		case WM_T_PCH_SPT:
   4022 			/* TARC0 */
   4023 			if ((sc->sc_type == WM_T_ICH8)
   4024 			    || (sc->sc_type == WM_T_PCH_SPT)) {
   4025 				/* Set TARC0 bits 29 and 28 */
   4026 				tarc0 |= __BITS(29, 28);
   4027 			}
   4028 			/* Set TARC0 bits 23,24,26,27 */
   4029 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4030 
   4031 			/* CTRL_EXT */
   4032 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4033 			reg |= __BIT(22);	/* Set bit 22 */
   4034 			/*
   4035 			 * Enable PHY low-power state when MAC is at D3
   4036 			 * w/o WoL
   4037 			 */
   4038 			if (sc->sc_type >= WM_T_PCH)
   4039 				reg |= CTRL_EXT_PHYPDEN;
   4040 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4041 
   4042 			/* TARC1 */
   4043 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4044 			/* bit 28 */
   4045 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4046 				tarc1 &= ~__BIT(28);
   4047 			else
   4048 				tarc1 |= __BIT(28);
   4049 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4050 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4051 
   4052 			/* Device Status */
   4053 			if (sc->sc_type == WM_T_ICH8) {
   4054 				reg = CSR_READ(sc, WMREG_STATUS);
   4055 				reg &= ~__BIT(31);
   4056 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4057 
   4058 			}
   4059 
   4060 			/* IOSFPC */
   4061 			if (sc->sc_type == WM_T_PCH_SPT) {
   4062 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4063 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4064 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4065 			}
   4066 			/*
   4067 			 * Work-around descriptor data corruption issue during
   4068 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4069 			 * capability.
   4070 			 */
   4071 			reg = CSR_READ(sc, WMREG_RFCTL);
   4072 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4073 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4074 			break;
   4075 		default:
   4076 			break;
   4077 		}
   4078 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4079 
   4080 		switch (sc->sc_type) {
   4081 		/*
   4082 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4083 		 * Avoid RSS Hash Value bug.
   4084 		 */
   4085 		case WM_T_82571:
   4086 		case WM_T_82572:
   4087 		case WM_T_82573:
   4088 		case WM_T_80003:
   4089 		case WM_T_ICH8:
   4090 			reg = CSR_READ(sc, WMREG_RFCTL);
   4091 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4092 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4093 			break;
   4094 		case WM_T_82574:
   4095 			/* use extened Rx descriptor. */
   4096 			reg = CSR_READ(sc, WMREG_RFCTL);
   4097 			reg |= WMREG_RFCTL_EXSTEN;
   4098 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4099 			break;
   4100 		default:
   4101 			break;
   4102 		}
   4103 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4104 		/*
   4105 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4106 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4107 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4108 		 * Correctly by the Device"
   4109 		 *
   4110 		 * I354(C2000) Errata AVR53:
   4111 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4112 		 * Hang"
   4113 		 */
   4114 		reg = CSR_READ(sc, WMREG_RFCTL);
   4115 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4116 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4117 	}
   4118 }
   4119 
   4120 static uint32_t
   4121 wm_rxpbs_adjust_82580(uint32_t val)
   4122 {
   4123 	uint32_t rv = 0;
   4124 
   4125 	if (val < __arraycount(wm_82580_rxpbs_table))
   4126 		rv = wm_82580_rxpbs_table[val];
   4127 
   4128 	return rv;
   4129 }
   4130 
   4131 /*
   4132  * wm_reset_phy:
   4133  *
   4134  *	generic PHY reset function.
   4135  *	Same as e1000_phy_hw_reset_generic()
   4136  */
   4137 static void
   4138 wm_reset_phy(struct wm_softc *sc)
   4139 {
   4140 	uint32_t reg;
   4141 
   4142 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4143 		device_xname(sc->sc_dev), __func__));
   4144 	if (wm_phy_resetisblocked(sc))
   4145 		return;
   4146 
   4147 	sc->phy.acquire(sc);
   4148 
   4149 	reg = CSR_READ(sc, WMREG_CTRL);
   4150 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4151 	CSR_WRITE_FLUSH(sc);
   4152 
   4153 	delay(sc->phy.reset_delay_us);
   4154 
   4155 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4156 	CSR_WRITE_FLUSH(sc);
   4157 
   4158 	delay(150);
   4159 
   4160 	sc->phy.release(sc);
   4161 
   4162 	wm_get_cfg_done(sc);
   4163 	wm_phy_post_reset(sc);
   4164 }
   4165 
   4166 static void
   4167 wm_flush_desc_rings(struct wm_softc *sc)
   4168 {
   4169 	pcireg_t preg;
   4170 	uint32_t reg;
   4171 	struct wm_txqueue *txq;
   4172 	wiseman_txdesc_t *txd;
   4173 	int nexttx;
   4174 	uint32_t rctl;
   4175 
   4176 	/* First, disable MULR fix in FEXTNVM11 */
   4177 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4178 	reg |= FEXTNVM11_DIS_MULRFIX;
   4179 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4180 
   4181 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4182 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4183 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4184 		return;
   4185 
   4186 	/* TX */
   4187 	printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   4188 	    device_xname(sc->sc_dev), preg, reg);
   4189 	reg = CSR_READ(sc, WMREG_TCTL);
   4190 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4191 
   4192 	txq = &sc->sc_queue[0].wmq_txq;
   4193 	nexttx = txq->txq_next;
   4194 	txd = &txq->txq_descs[nexttx];
   4195 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4196 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS| 512);
   4197 	txd->wtx_fields.wtxu_status = 0;
   4198 	txd->wtx_fields.wtxu_options = 0;
   4199 	txd->wtx_fields.wtxu_vlan = 0;
   4200 
   4201 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4202 	    BUS_SPACE_BARRIER_WRITE);
   4203 
   4204 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4205 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4206 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4207 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4208 	delay(250);
   4209 
   4210 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4211 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4212 		return;
   4213 
   4214 	/* RX */
   4215 	printf("%s: Need RX flush (reg = %08x)\n",
   4216 	    device_xname(sc->sc_dev), preg);
   4217 	rctl = CSR_READ(sc, WMREG_RCTL);
   4218 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4219 	CSR_WRITE_FLUSH(sc);
   4220 	delay(150);
   4221 
   4222 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4223 	/* zero the lower 14 bits (prefetch and host thresholds) */
   4224 	reg &= 0xffffc000;
   4225 	/*
   4226 	 * update thresholds: prefetch threshold to 31, host threshold
   4227 	 * to 1 and make sure the granularity is "descriptors" and not
   4228 	 * "cache lines"
   4229 	 */
   4230 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4231 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4232 
   4233 	/*
   4234 	 * momentarily enable the RX ring for the changes to take
   4235 	 * effect
   4236 	 */
   4237 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4238 	CSR_WRITE_FLUSH(sc);
   4239 	delay(150);
   4240 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4241 }
   4242 
   4243 /*
   4244  * wm_reset:
   4245  *
   4246  *	Reset the i82542 chip.
   4247  */
   4248 static void
   4249 wm_reset(struct wm_softc *sc)
   4250 {
   4251 	int phy_reset = 0;
   4252 	int i, error = 0;
   4253 	uint32_t reg;
   4254 	uint16_t kmreg;
   4255 	int rv;
   4256 
   4257 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4258 		device_xname(sc->sc_dev), __func__));
   4259 	KASSERT(sc->sc_type != 0);
   4260 
   4261 	/*
   4262 	 * Allocate on-chip memory according to the MTU size.
   4263 	 * The Packet Buffer Allocation register must be written
   4264 	 * before the chip is reset.
   4265 	 */
   4266 	switch (sc->sc_type) {
   4267 	case WM_T_82547:
   4268 	case WM_T_82547_2:
   4269 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4270 		    PBA_22K : PBA_30K;
   4271 		for (i = 0; i < sc->sc_nqueues; i++) {
   4272 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4273 			txq->txq_fifo_head = 0;
   4274 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4275 			txq->txq_fifo_size =
   4276 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4277 			txq->txq_fifo_stall = 0;
   4278 		}
   4279 		break;
   4280 	case WM_T_82571:
   4281 	case WM_T_82572:
   4282 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4283 	case WM_T_80003:
   4284 		sc->sc_pba = PBA_32K;
   4285 		break;
   4286 	case WM_T_82573:
   4287 		sc->sc_pba = PBA_12K;
   4288 		break;
   4289 	case WM_T_82574:
   4290 	case WM_T_82583:
   4291 		sc->sc_pba = PBA_20K;
   4292 		break;
   4293 	case WM_T_82576:
   4294 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4295 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4296 		break;
   4297 	case WM_T_82580:
   4298 	case WM_T_I350:
   4299 	case WM_T_I354:
   4300 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4301 		break;
   4302 	case WM_T_I210:
   4303 	case WM_T_I211:
   4304 		sc->sc_pba = PBA_34K;
   4305 		break;
   4306 	case WM_T_ICH8:
   4307 		/* Workaround for a bit corruption issue in FIFO memory */
   4308 		sc->sc_pba = PBA_8K;
   4309 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4310 		break;
   4311 	case WM_T_ICH9:
   4312 	case WM_T_ICH10:
   4313 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4314 		    PBA_14K : PBA_10K;
   4315 		break;
   4316 	case WM_T_PCH:
   4317 	case WM_T_PCH2:
   4318 	case WM_T_PCH_LPT:
   4319 	case WM_T_PCH_SPT:
   4320 		sc->sc_pba = PBA_26K;
   4321 		break;
   4322 	default:
   4323 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4324 		    PBA_40K : PBA_48K;
   4325 		break;
   4326 	}
   4327 	/*
   4328 	 * Only old or non-multiqueue devices have the PBA register
   4329 	 * XXX Need special handling for 82575.
   4330 	 */
   4331 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4332 	    || (sc->sc_type == WM_T_82575))
   4333 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4334 
   4335 	/* Prevent the PCI-E bus from sticking */
   4336 	if (sc->sc_flags & WM_F_PCIE) {
   4337 		int timeout = 800;
   4338 
   4339 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4340 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4341 
   4342 		while (timeout--) {
   4343 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4344 			    == 0)
   4345 				break;
   4346 			delay(100);
   4347 		}
   4348 		if (timeout == 0)
   4349 			device_printf(sc->sc_dev,
   4350 			    "failed to disable busmastering\n");
   4351 	}
   4352 
   4353 	/* Set the completion timeout for interface */
   4354 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4355 	    || (sc->sc_type == WM_T_82580)
   4356 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4357 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4358 		wm_set_pcie_completion_timeout(sc);
   4359 
   4360 	/* Clear interrupt */
   4361 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4362 	if (wm_is_using_msix(sc)) {
   4363 		if (sc->sc_type != WM_T_82574) {
   4364 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4365 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4366 		} else {
   4367 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4368 		}
   4369 	}
   4370 
   4371 	/* Stop the transmit and receive processes. */
   4372 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4373 	sc->sc_rctl &= ~RCTL_EN;
   4374 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4375 	CSR_WRITE_FLUSH(sc);
   4376 
   4377 	/* XXX set_tbi_sbp_82543() */
   4378 
   4379 	delay(10*1000);
   4380 
   4381 	/* Must acquire the MDIO ownership before MAC reset */
   4382 	switch (sc->sc_type) {
   4383 	case WM_T_82573:
   4384 	case WM_T_82574:
   4385 	case WM_T_82583:
   4386 		error = wm_get_hw_semaphore_82573(sc);
   4387 		break;
   4388 	default:
   4389 		break;
   4390 	}
   4391 
   4392 	/*
   4393 	 * 82541 Errata 29? & 82547 Errata 28?
   4394 	 * See also the description about PHY_RST bit in CTRL register
   4395 	 * in 8254x_GBe_SDM.pdf.
   4396 	 */
   4397 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4398 		CSR_WRITE(sc, WMREG_CTRL,
   4399 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4400 		CSR_WRITE_FLUSH(sc);
   4401 		delay(5000);
   4402 	}
   4403 
   4404 	switch (sc->sc_type) {
   4405 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4406 	case WM_T_82541:
   4407 	case WM_T_82541_2:
   4408 	case WM_T_82547:
   4409 	case WM_T_82547_2:
   4410 		/*
   4411 		 * On some chipsets, a reset through a memory-mapped write
   4412 		 * cycle can cause the chip to reset before completing the
   4413 		 * write cycle.  This causes major headache that can be
   4414 		 * avoided by issuing the reset via indirect register writes
   4415 		 * through I/O space.
   4416 		 *
   4417 		 * So, if we successfully mapped the I/O BAR at attach time,
   4418 		 * use that.  Otherwise, try our luck with a memory-mapped
   4419 		 * reset.
   4420 		 */
   4421 		if (sc->sc_flags & WM_F_IOH_VALID)
   4422 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4423 		else
   4424 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4425 		break;
   4426 	case WM_T_82545_3:
   4427 	case WM_T_82546_3:
   4428 		/* Use the shadow control register on these chips. */
   4429 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4430 		break;
   4431 	case WM_T_80003:
   4432 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4433 		sc->phy.acquire(sc);
   4434 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4435 		sc->phy.release(sc);
   4436 		break;
   4437 	case WM_T_ICH8:
   4438 	case WM_T_ICH9:
   4439 	case WM_T_ICH10:
   4440 	case WM_T_PCH:
   4441 	case WM_T_PCH2:
   4442 	case WM_T_PCH_LPT:
   4443 	case WM_T_PCH_SPT:
   4444 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4445 		if (wm_phy_resetisblocked(sc) == false) {
   4446 			/*
   4447 			 * Gate automatic PHY configuration by hardware on
   4448 			 * non-managed 82579
   4449 			 */
   4450 			if ((sc->sc_type == WM_T_PCH2)
   4451 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4452 				== 0))
   4453 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4454 
   4455 			reg |= CTRL_PHY_RESET;
   4456 			phy_reset = 1;
   4457 		} else
   4458 			printf("XXX reset is blocked!!!\n");
   4459 		sc->phy.acquire(sc);
   4460 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4461 		/* Don't insert a completion barrier when reset */
   4462 		delay(20*1000);
   4463 		mutex_exit(sc->sc_ich_phymtx);
   4464 		break;
   4465 	case WM_T_82580:
   4466 	case WM_T_I350:
   4467 	case WM_T_I354:
   4468 	case WM_T_I210:
   4469 	case WM_T_I211:
   4470 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4471 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4472 			CSR_WRITE_FLUSH(sc);
   4473 		delay(5000);
   4474 		break;
   4475 	case WM_T_82542_2_0:
   4476 	case WM_T_82542_2_1:
   4477 	case WM_T_82543:
   4478 	case WM_T_82540:
   4479 	case WM_T_82545:
   4480 	case WM_T_82546:
   4481 	case WM_T_82571:
   4482 	case WM_T_82572:
   4483 	case WM_T_82573:
   4484 	case WM_T_82574:
   4485 	case WM_T_82575:
   4486 	case WM_T_82576:
   4487 	case WM_T_82583:
   4488 	default:
   4489 		/* Everything else can safely use the documented method. */
   4490 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4491 		break;
   4492 	}
   4493 
   4494 	/* Must release the MDIO ownership after MAC reset */
   4495 	switch (sc->sc_type) {
   4496 	case WM_T_82573:
   4497 	case WM_T_82574:
   4498 	case WM_T_82583:
   4499 		if (error == 0)
   4500 			wm_put_hw_semaphore_82573(sc);
   4501 		break;
   4502 	default:
   4503 		break;
   4504 	}
   4505 
   4506 	if (phy_reset != 0)
   4507 		wm_get_cfg_done(sc);
   4508 
   4509 	/* reload EEPROM */
   4510 	switch (sc->sc_type) {
   4511 	case WM_T_82542_2_0:
   4512 	case WM_T_82542_2_1:
   4513 	case WM_T_82543:
   4514 	case WM_T_82544:
   4515 		delay(10);
   4516 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4517 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4518 		CSR_WRITE_FLUSH(sc);
   4519 		delay(2000);
   4520 		break;
   4521 	case WM_T_82540:
   4522 	case WM_T_82545:
   4523 	case WM_T_82545_3:
   4524 	case WM_T_82546:
   4525 	case WM_T_82546_3:
   4526 		delay(5*1000);
   4527 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4528 		break;
   4529 	case WM_T_82541:
   4530 	case WM_T_82541_2:
   4531 	case WM_T_82547:
   4532 	case WM_T_82547_2:
   4533 		delay(20000);
   4534 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4535 		break;
   4536 	case WM_T_82571:
   4537 	case WM_T_82572:
   4538 	case WM_T_82573:
   4539 	case WM_T_82574:
   4540 	case WM_T_82583:
   4541 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4542 			delay(10);
   4543 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4544 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4545 			CSR_WRITE_FLUSH(sc);
   4546 		}
   4547 		/* check EECD_EE_AUTORD */
   4548 		wm_get_auto_rd_done(sc);
   4549 		/*
   4550 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4551 		 * is set.
   4552 		 */
   4553 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4554 		    || (sc->sc_type == WM_T_82583))
   4555 			delay(25*1000);
   4556 		break;
   4557 	case WM_T_82575:
   4558 	case WM_T_82576:
   4559 	case WM_T_82580:
   4560 	case WM_T_I350:
   4561 	case WM_T_I354:
   4562 	case WM_T_I210:
   4563 	case WM_T_I211:
   4564 	case WM_T_80003:
   4565 		/* check EECD_EE_AUTORD */
   4566 		wm_get_auto_rd_done(sc);
   4567 		break;
   4568 	case WM_T_ICH8:
   4569 	case WM_T_ICH9:
   4570 	case WM_T_ICH10:
   4571 	case WM_T_PCH:
   4572 	case WM_T_PCH2:
   4573 	case WM_T_PCH_LPT:
   4574 	case WM_T_PCH_SPT:
   4575 		break;
   4576 	default:
   4577 		panic("%s: unknown type\n", __func__);
   4578 	}
   4579 
   4580 	/* Check whether EEPROM is present or not */
   4581 	switch (sc->sc_type) {
   4582 	case WM_T_82575:
   4583 	case WM_T_82576:
   4584 	case WM_T_82580:
   4585 	case WM_T_I350:
   4586 	case WM_T_I354:
   4587 	case WM_T_ICH8:
   4588 	case WM_T_ICH9:
   4589 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4590 			/* Not found */
   4591 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4592 			if (sc->sc_type == WM_T_82575)
   4593 				wm_reset_init_script_82575(sc);
   4594 		}
   4595 		break;
   4596 	default:
   4597 		break;
   4598 	}
   4599 
   4600 	if (phy_reset != 0)
   4601 		wm_phy_post_reset(sc);
   4602 
   4603 	if ((sc->sc_type == WM_T_82580)
   4604 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4605 		/* clear global device reset status bit */
   4606 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4607 	}
   4608 
   4609 	/* Clear any pending interrupt events. */
   4610 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4611 	reg = CSR_READ(sc, WMREG_ICR);
   4612 	if (wm_is_using_msix(sc)) {
   4613 		if (sc->sc_type != WM_T_82574) {
   4614 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4615 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4616 		} else
   4617 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4618 	}
   4619 
   4620 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4621 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4622 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4623 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   4624 		reg = CSR_READ(sc, WMREG_KABGTXD);
   4625 		reg |= KABGTXD_BGSQLBIAS;
   4626 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   4627 	}
   4628 
   4629 	/* reload sc_ctrl */
   4630 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4631 
   4632 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4633 		wm_set_eee_i350(sc);
   4634 
   4635 	/*
   4636 	 * For PCH, this write will make sure that any noise will be detected
   4637 	 * as a CRC error and be dropped rather than show up as a bad packet
   4638 	 * to the DMA engine
   4639 	 */
   4640 	if (sc->sc_type == WM_T_PCH)
   4641 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4642 
   4643 	if (sc->sc_type >= WM_T_82544)
   4644 		CSR_WRITE(sc, WMREG_WUC, 0);
   4645 
   4646 	wm_reset_mdicnfg_82580(sc);
   4647 
   4648 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4649 		wm_pll_workaround_i210(sc);
   4650 
   4651 	if (sc->sc_type == WM_T_80003) {
   4652 		/* default to TRUE to enable the MDIC W/A */
   4653 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   4654 
   4655 		rv = wm_kmrn_readreg(sc,
   4656 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   4657 		if (rv == 0) {
   4658 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   4659 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   4660 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   4661 			else
   4662 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   4663 		}
   4664 	}
   4665 }
   4666 
   4667 /*
   4668  * wm_add_rxbuf:
   4669  *
   4670  *	Add a receive buffer to the indiciated descriptor.
   4671  */
   4672 static int
   4673 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4674 {
   4675 	struct wm_softc *sc = rxq->rxq_sc;
   4676 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4677 	struct mbuf *m;
   4678 	int error;
   4679 
   4680 	KASSERT(mutex_owned(rxq->rxq_lock));
   4681 
   4682 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4683 	if (m == NULL)
   4684 		return ENOBUFS;
   4685 
   4686 	MCLGET(m, M_DONTWAIT);
   4687 	if ((m->m_flags & M_EXT) == 0) {
   4688 		m_freem(m);
   4689 		return ENOBUFS;
   4690 	}
   4691 
   4692 	if (rxs->rxs_mbuf != NULL)
   4693 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4694 
   4695 	rxs->rxs_mbuf = m;
   4696 
   4697 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4698 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4699 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4700 	if (error) {
   4701 		/* XXX XXX XXX */
   4702 		aprint_error_dev(sc->sc_dev,
   4703 		    "unable to load rx DMA map %d, error = %d\n",
   4704 		    idx, error);
   4705 		panic("wm_add_rxbuf");
   4706 	}
   4707 
   4708 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4709 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4710 
   4711 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4712 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4713 			wm_init_rxdesc(rxq, idx);
   4714 	} else
   4715 		wm_init_rxdesc(rxq, idx);
   4716 
   4717 	return 0;
   4718 }
   4719 
   4720 /*
   4721  * wm_rxdrain:
   4722  *
   4723  *	Drain the receive queue.
   4724  */
   4725 static void
   4726 wm_rxdrain(struct wm_rxqueue *rxq)
   4727 {
   4728 	struct wm_softc *sc = rxq->rxq_sc;
   4729 	struct wm_rxsoft *rxs;
   4730 	int i;
   4731 
   4732 	KASSERT(mutex_owned(rxq->rxq_lock));
   4733 
   4734 	for (i = 0; i < WM_NRXDESC; i++) {
   4735 		rxs = &rxq->rxq_soft[i];
   4736 		if (rxs->rxs_mbuf != NULL) {
   4737 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4738 			m_freem(rxs->rxs_mbuf);
   4739 			rxs->rxs_mbuf = NULL;
   4740 		}
   4741 	}
   4742 }
   4743 
   4744 
   4745 /*
   4746  * XXX copy from FreeBSD's sys/net/rss_config.c
   4747  */
   4748 /*
   4749  * RSS secret key, intended to prevent attacks on load-balancing.  Its
   4750  * effectiveness may be limited by algorithm choice and available entropy
   4751  * during the boot.
   4752  *
   4753  * XXXRW: And that we don't randomize it yet!
   4754  *
   4755  * This is the default Microsoft RSS specification key which is also
   4756  * the Chelsio T5 firmware default key.
   4757  */
   4758 #define RSS_KEYSIZE 40
   4759 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
   4760 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
   4761 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
   4762 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
   4763 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
   4764 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
   4765 };
   4766 
   4767 /*
   4768  * Caller must pass an array of size sizeof(rss_key).
   4769  *
   4770  * XXX
   4771  * As if_ixgbe may use this function, this function should not be
   4772  * if_wm specific function.
   4773  */
   4774 static void
   4775 wm_rss_getkey(uint8_t *key)
   4776 {
   4777 
   4778 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
   4779 }
   4780 
   4781 /*
   4782  * Setup registers for RSS.
   4783  *
   4784  * XXX not yet VMDq support
   4785  */
   4786 static void
   4787 wm_init_rss(struct wm_softc *sc)
   4788 {
   4789 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4790 	int i;
   4791 
   4792 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
   4793 
   4794 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4795 		int qid, reta_ent;
   4796 
   4797 		qid  = i % sc->sc_nqueues;
   4798 		switch(sc->sc_type) {
   4799 		case WM_T_82574:
   4800 			reta_ent = __SHIFTIN(qid,
   4801 			    RETA_ENT_QINDEX_MASK_82574);
   4802 			break;
   4803 		case WM_T_82575:
   4804 			reta_ent = __SHIFTIN(qid,
   4805 			    RETA_ENT_QINDEX1_MASK_82575);
   4806 			break;
   4807 		default:
   4808 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4809 			break;
   4810 		}
   4811 
   4812 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4813 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4814 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4815 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4816 	}
   4817 
   4818 	wm_rss_getkey((uint8_t *)rss_key);
   4819 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4820 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4821 
   4822 	if (sc->sc_type == WM_T_82574)
   4823 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4824 	else
   4825 		mrqc = MRQC_ENABLE_RSS_MQ;
   4826 
   4827 	/*
   4828 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   4829 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   4830 	 */
   4831 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4832 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4833 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4834 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4835 
   4836 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4837 }
   4838 
   4839 /*
   4840  * Adjust TX and RX queue numbers which the system actulally uses.
   4841  *
   4842  * The numbers are affected by below parameters.
   4843  *     - The nubmer of hardware queues
   4844  *     - The number of MSI-X vectors (= "nvectors" argument)
   4845  *     - ncpu
   4846  */
   4847 static void
   4848 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4849 {
   4850 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4851 
   4852 	if (nvectors < 2) {
   4853 		sc->sc_nqueues = 1;
   4854 		return;
   4855 	}
   4856 
   4857 	switch(sc->sc_type) {
   4858 	case WM_T_82572:
   4859 		hw_ntxqueues = 2;
   4860 		hw_nrxqueues = 2;
   4861 		break;
   4862 	case WM_T_82574:
   4863 		hw_ntxqueues = 2;
   4864 		hw_nrxqueues = 2;
   4865 		break;
   4866 	case WM_T_82575:
   4867 		hw_ntxqueues = 4;
   4868 		hw_nrxqueues = 4;
   4869 		break;
   4870 	case WM_T_82576:
   4871 		hw_ntxqueues = 16;
   4872 		hw_nrxqueues = 16;
   4873 		break;
   4874 	case WM_T_82580:
   4875 	case WM_T_I350:
   4876 	case WM_T_I354:
   4877 		hw_ntxqueues = 8;
   4878 		hw_nrxqueues = 8;
   4879 		break;
   4880 	case WM_T_I210:
   4881 		hw_ntxqueues = 4;
   4882 		hw_nrxqueues = 4;
   4883 		break;
   4884 	case WM_T_I211:
   4885 		hw_ntxqueues = 2;
   4886 		hw_nrxqueues = 2;
   4887 		break;
   4888 		/*
   4889 		 * As below ethernet controllers does not support MSI-X,
   4890 		 * this driver let them not use multiqueue.
   4891 		 *     - WM_T_80003
   4892 		 *     - WM_T_ICH8
   4893 		 *     - WM_T_ICH9
   4894 		 *     - WM_T_ICH10
   4895 		 *     - WM_T_PCH
   4896 		 *     - WM_T_PCH2
   4897 		 *     - WM_T_PCH_LPT
   4898 		 */
   4899 	default:
   4900 		hw_ntxqueues = 1;
   4901 		hw_nrxqueues = 1;
   4902 		break;
   4903 	}
   4904 
   4905 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
   4906 
   4907 	/*
   4908 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   4909 	 * the number of queues used actually.
   4910 	 */
   4911 	if (nvectors < hw_nqueues + 1) {
   4912 		sc->sc_nqueues = nvectors - 1;
   4913 	} else {
   4914 		sc->sc_nqueues = hw_nqueues;
   4915 	}
   4916 
   4917 	/*
   4918 	 * As queues more then cpus cannot improve scaling, we limit
   4919 	 * the number of queues used actually.
   4920 	 */
   4921 	if (ncpu < sc->sc_nqueues)
   4922 		sc->sc_nqueues = ncpu;
   4923 }
   4924 
   4925 static inline bool
   4926 wm_is_using_msix(struct wm_softc *sc)
   4927 {
   4928 
   4929 	return (sc->sc_nintrs > 1);
   4930 }
   4931 
   4932 static inline bool
   4933 wm_is_using_multiqueue(struct wm_softc *sc)
   4934 {
   4935 
   4936 	return (sc->sc_nqueues > 1);
   4937 }
   4938 
   4939 static int
   4940 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   4941 {
   4942 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   4943 	wmq->wmq_id = qidx;
   4944 	wmq->wmq_intr_idx = intr_idx;
   4945 	wmq->wmq_si = softint_establish(SOFTINT_NET
   4946 #ifdef WM_MPSAFE
   4947 	    | SOFTINT_MPSAFE
   4948 #endif
   4949 	    , wm_handle_queue, wmq);
   4950 	if (wmq->wmq_si != NULL)
   4951 		return 0;
   4952 
   4953 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   4954 	    wmq->wmq_id);
   4955 
   4956 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   4957 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4958 	return ENOMEM;
   4959 }
   4960 
   4961 /*
   4962  * Both single interrupt MSI and INTx can use this function.
   4963  */
   4964 static int
   4965 wm_setup_legacy(struct wm_softc *sc)
   4966 {
   4967 	pci_chipset_tag_t pc = sc->sc_pc;
   4968 	const char *intrstr = NULL;
   4969 	char intrbuf[PCI_INTRSTR_LEN];
   4970 	int error;
   4971 
   4972 	error = wm_alloc_txrx_queues(sc);
   4973 	if (error) {
   4974 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4975 		    error);
   4976 		return ENOMEM;
   4977 	}
   4978 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   4979 	    sizeof(intrbuf));
   4980 #ifdef WM_MPSAFE
   4981 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   4982 #endif
   4983 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   4984 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   4985 	if (sc->sc_ihs[0] == NULL) {
   4986 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   4987 		    (pci_intr_type(pc, sc->sc_intrs[0])
   4988 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   4989 		return ENOMEM;
   4990 	}
   4991 
   4992 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   4993 	sc->sc_nintrs = 1;
   4994 
   4995 	return wm_softint_establish(sc, 0, 0);
   4996 }
   4997 
   4998 static int
   4999 wm_setup_msix(struct wm_softc *sc)
   5000 {
   5001 	void *vih;
   5002 	kcpuset_t *affinity;
   5003 	int qidx, error, intr_idx, txrx_established;
   5004 	pci_chipset_tag_t pc = sc->sc_pc;
   5005 	const char *intrstr = NULL;
   5006 	char intrbuf[PCI_INTRSTR_LEN];
   5007 	char intr_xname[INTRDEVNAMEBUF];
   5008 
   5009 	if (sc->sc_nqueues < ncpu) {
   5010 		/*
   5011 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5012 		 * interrupts start from CPU#1.
   5013 		 */
   5014 		sc->sc_affinity_offset = 1;
   5015 	} else {
   5016 		/*
   5017 		 * In this case, this device use all CPUs. So, we unify
   5018 		 * affinitied cpu_index to msix vector number for readability.
   5019 		 */
   5020 		sc->sc_affinity_offset = 0;
   5021 	}
   5022 
   5023 	error = wm_alloc_txrx_queues(sc);
   5024 	if (error) {
   5025 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5026 		    error);
   5027 		return ENOMEM;
   5028 	}
   5029 
   5030 	kcpuset_create(&affinity, false);
   5031 	intr_idx = 0;
   5032 
   5033 	/*
   5034 	 * TX and RX
   5035 	 */
   5036 	txrx_established = 0;
   5037 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5038 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5039 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5040 
   5041 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5042 		    sizeof(intrbuf));
   5043 #ifdef WM_MPSAFE
   5044 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5045 		    PCI_INTR_MPSAFE, true);
   5046 #endif
   5047 		memset(intr_xname, 0, sizeof(intr_xname));
   5048 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5049 		    device_xname(sc->sc_dev), qidx);
   5050 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5051 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5052 		if (vih == NULL) {
   5053 			aprint_error_dev(sc->sc_dev,
   5054 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5055 			    intrstr ? " at " : "",
   5056 			    intrstr ? intrstr : "");
   5057 
   5058 			goto fail;
   5059 		}
   5060 		kcpuset_zero(affinity);
   5061 		/* Round-robin affinity */
   5062 		kcpuset_set(affinity, affinity_to);
   5063 		error = interrupt_distribute(vih, affinity, NULL);
   5064 		if (error == 0) {
   5065 			aprint_normal_dev(sc->sc_dev,
   5066 			    "for TX and RX interrupting at %s affinity to %u\n",
   5067 			    intrstr, affinity_to);
   5068 		} else {
   5069 			aprint_normal_dev(sc->sc_dev,
   5070 			    "for TX and RX interrupting at %s\n", intrstr);
   5071 		}
   5072 		sc->sc_ihs[intr_idx] = vih;
   5073 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   5074 			goto fail;
   5075 		txrx_established++;
   5076 		intr_idx++;
   5077 	}
   5078 
   5079 	/*
   5080 	 * LINK
   5081 	 */
   5082 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5083 	    sizeof(intrbuf));
   5084 #ifdef WM_MPSAFE
   5085 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5086 #endif
   5087 	memset(intr_xname, 0, sizeof(intr_xname));
   5088 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5089 	    device_xname(sc->sc_dev));
   5090 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5091 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5092 	if (vih == NULL) {
   5093 		aprint_error_dev(sc->sc_dev,
   5094 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5095 		    intrstr ? " at " : "",
   5096 		    intrstr ? intrstr : "");
   5097 
   5098 		goto fail;
   5099 	}
   5100 	/* keep default affinity to LINK interrupt */
   5101 	aprint_normal_dev(sc->sc_dev,
   5102 	    "for LINK interrupting at %s\n", intrstr);
   5103 	sc->sc_ihs[intr_idx] = vih;
   5104 	sc->sc_link_intr_idx = intr_idx;
   5105 
   5106 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5107 	kcpuset_destroy(affinity);
   5108 	return 0;
   5109 
   5110  fail:
   5111 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5112 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5113 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5114 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5115 	}
   5116 
   5117 	kcpuset_destroy(affinity);
   5118 	return ENOMEM;
   5119 }
   5120 
   5121 static void
   5122 wm_turnon(struct wm_softc *sc)
   5123 {
   5124 	int i;
   5125 
   5126 	KASSERT(WM_CORE_LOCKED(sc));
   5127 
   5128 	/*
   5129 	 * must unset stopping flags in ascending order.
   5130 	 */
   5131 	for(i = 0; i < sc->sc_nqueues; i++) {
   5132 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5133 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5134 
   5135 		mutex_enter(txq->txq_lock);
   5136 		txq->txq_stopping = false;
   5137 		mutex_exit(txq->txq_lock);
   5138 
   5139 		mutex_enter(rxq->rxq_lock);
   5140 		rxq->rxq_stopping = false;
   5141 		mutex_exit(rxq->rxq_lock);
   5142 	}
   5143 
   5144 	sc->sc_core_stopping = false;
   5145 }
   5146 
   5147 static void
   5148 wm_turnoff(struct wm_softc *sc)
   5149 {
   5150 	int i;
   5151 
   5152 	KASSERT(WM_CORE_LOCKED(sc));
   5153 
   5154 	sc->sc_core_stopping = true;
   5155 
   5156 	/*
   5157 	 * must set stopping flags in ascending order.
   5158 	 */
   5159 	for(i = 0; i < sc->sc_nqueues; i++) {
   5160 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5161 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5162 
   5163 		mutex_enter(rxq->rxq_lock);
   5164 		rxq->rxq_stopping = true;
   5165 		mutex_exit(rxq->rxq_lock);
   5166 
   5167 		mutex_enter(txq->txq_lock);
   5168 		txq->txq_stopping = true;
   5169 		mutex_exit(txq->txq_lock);
   5170 	}
   5171 }
   5172 
   5173 /*
   5174  * write interrupt interval value to ITR or EITR
   5175  */
   5176 static void
   5177 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5178 {
   5179 
   5180 	if (!wmq->wmq_set_itr)
   5181 		return;
   5182 
   5183 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5184 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5185 
   5186 		/*
   5187 		 * 82575 doesn't have CNT_INGR field.
   5188 		 * So, overwrite counter field by software.
   5189 		 */
   5190 		if (sc->sc_type == WM_T_82575)
   5191 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5192 		else
   5193 			eitr |= EITR_CNT_INGR;
   5194 
   5195 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5196 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5197 		/*
   5198 		 * 82574 has both ITR and EITR. SET EITR when we use
   5199 		 * the multi queue function with MSI-X.
   5200 		 */
   5201 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5202 			    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5203 	} else {
   5204 		KASSERT(wmq->wmq_id == 0);
   5205 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5206 	}
   5207 
   5208 	wmq->wmq_set_itr = false;
   5209 }
   5210 
   5211 /*
   5212  * TODO
   5213  * Below dynamic calculation of itr is almost the same as linux igb,
   5214  * however it does not fit to wm(4). So, we will have been disable AIM
   5215  * until we will find appropriate calculation of itr.
   5216  */
   5217 /*
   5218  * calculate interrupt interval value to be going to write register in
   5219  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5220  */
   5221 static void
   5222 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5223 {
   5224 #ifdef NOTYET
   5225 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5226 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5227 	uint32_t avg_size = 0;
   5228 	uint32_t new_itr;
   5229 
   5230 	if (rxq->rxq_packets)
   5231 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5232 	if (txq->txq_packets)
   5233 		avg_size = max(avg_size, txq->txq_bytes / txq->txq_packets);
   5234 
   5235 	if (avg_size == 0) {
   5236 		new_itr = 450; /* restore default value */
   5237 		goto out;
   5238 	}
   5239 
   5240 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5241 	avg_size += 24;
   5242 
   5243 	/* Don't starve jumbo frames */
   5244 	avg_size = min(avg_size, 3000);
   5245 
   5246 	/* Give a little boost to mid-size frames */
   5247 	if ((avg_size > 300) && (avg_size < 1200))
   5248 		new_itr = avg_size / 3;
   5249 	else
   5250 		new_itr = avg_size / 2;
   5251 
   5252 out:
   5253 	/*
   5254 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5255 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5256 	 */
   5257 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5258 		new_itr *= 4;
   5259 
   5260 	if (new_itr != wmq->wmq_itr) {
   5261 		wmq->wmq_itr = new_itr;
   5262 		wmq->wmq_set_itr = true;
   5263 	} else
   5264 		wmq->wmq_set_itr = false;
   5265 
   5266 	rxq->rxq_packets = 0;
   5267 	rxq->rxq_bytes = 0;
   5268 	txq->txq_packets = 0;
   5269 	txq->txq_bytes = 0;
   5270 #endif
   5271 }
   5272 
   5273 /*
   5274  * wm_init:		[ifnet interface function]
   5275  *
   5276  *	Initialize the interface.
   5277  */
   5278 static int
   5279 wm_init(struct ifnet *ifp)
   5280 {
   5281 	struct wm_softc *sc = ifp->if_softc;
   5282 	int ret;
   5283 
   5284 	WM_CORE_LOCK(sc);
   5285 	ret = wm_init_locked(ifp);
   5286 	WM_CORE_UNLOCK(sc);
   5287 
   5288 	return ret;
   5289 }
   5290 
   5291 static int
   5292 wm_init_locked(struct ifnet *ifp)
   5293 {
   5294 	struct wm_softc *sc = ifp->if_softc;
   5295 	int i, j, trynum, error = 0;
   5296 	uint32_t reg;
   5297 
   5298 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5299 		device_xname(sc->sc_dev), __func__));
   5300 	KASSERT(WM_CORE_LOCKED(sc));
   5301 
   5302 	/*
   5303 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5304 	 * There is a small but measurable benefit to avoiding the adjusment
   5305 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5306 	 * on such platforms.  One possibility is that the DMA itself is
   5307 	 * slightly more efficient if the front of the entire packet (instead
   5308 	 * of the front of the headers) is aligned.
   5309 	 *
   5310 	 * Note we must always set align_tweak to 0 if we are using
   5311 	 * jumbo frames.
   5312 	 */
   5313 #ifdef __NO_STRICT_ALIGNMENT
   5314 	sc->sc_align_tweak = 0;
   5315 #else
   5316 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5317 		sc->sc_align_tweak = 0;
   5318 	else
   5319 		sc->sc_align_tweak = 2;
   5320 #endif /* __NO_STRICT_ALIGNMENT */
   5321 
   5322 	/* Cancel any pending I/O. */
   5323 	wm_stop_locked(ifp, 0);
   5324 
   5325 	/* update statistics before reset */
   5326 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   5327 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   5328 
   5329 	/* PCH_SPT hardware workaround */
   5330 	if (sc->sc_type == WM_T_PCH_SPT)
   5331 		wm_flush_desc_rings(sc);
   5332 
   5333 	/* Reset the chip to a known state. */
   5334 	wm_reset(sc);
   5335 
   5336 	/*
   5337 	 * AMT based hardware can now take control from firmware
   5338 	 * Do this after reset.
   5339 	 */
   5340 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5341 		wm_get_hw_control(sc);
   5342 
   5343 	if ((sc->sc_type == WM_T_PCH_SPT) &&
   5344 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5345 		wm_legacy_irq_quirk_spt(sc);
   5346 
   5347 	/* Init hardware bits */
   5348 	wm_initialize_hardware_bits(sc);
   5349 
   5350 	/* Reset the PHY. */
   5351 	if (sc->sc_flags & WM_F_HAS_MII)
   5352 		wm_gmii_reset(sc);
   5353 
   5354 	/* Calculate (E)ITR value */
   5355 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5356 		/*
   5357 		 * For NEWQUEUE's EITR (except for 82575).
   5358 		 * 82575's EITR should be set same throttling value as other
   5359 		 * old controllers' ITR because the interrupt/sec calculation
   5360 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5361 		 *
   5362 		 * 82574's EITR should be set same throttling value as ITR.
   5363 		 *
   5364 		 * For N interrupts/sec, set this value to:
   5365 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5366 		 */
   5367 		sc->sc_itr_init = 450;
   5368 	} else if (sc->sc_type >= WM_T_82543) {
   5369 		/*
   5370 		 * Set up the interrupt throttling register (units of 256ns)
   5371 		 * Note that a footnote in Intel's documentation says this
   5372 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5373 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5374 		 * that that is also true for the 1024ns units of the other
   5375 		 * interrupt-related timer registers -- so, really, we ought
   5376 		 * to divide this value by 4 when the link speed is low.
   5377 		 *
   5378 		 * XXX implement this division at link speed change!
   5379 		 */
   5380 
   5381 		/*
   5382 		 * For N interrupts/sec, set this value to:
   5383 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5384 		 * absolute and packet timer values to this value
   5385 		 * divided by 4 to get "simple timer" behavior.
   5386 		 */
   5387 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5388 	}
   5389 
   5390 	error = wm_init_txrx_queues(sc);
   5391 	if (error)
   5392 		goto out;
   5393 
   5394 	/*
   5395 	 * Clear out the VLAN table -- we don't use it (yet).
   5396 	 */
   5397 	CSR_WRITE(sc, WMREG_VET, 0);
   5398 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5399 		trynum = 10; /* Due to hw errata */
   5400 	else
   5401 		trynum = 1;
   5402 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5403 		for (j = 0; j < trynum; j++)
   5404 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5405 
   5406 	/*
   5407 	 * Set up flow-control parameters.
   5408 	 *
   5409 	 * XXX Values could probably stand some tuning.
   5410 	 */
   5411 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5412 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5413 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5414 	    && (sc->sc_type != WM_T_PCH_SPT)) {
   5415 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5416 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5417 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5418 	}
   5419 
   5420 	sc->sc_fcrtl = FCRTL_DFLT;
   5421 	if (sc->sc_type < WM_T_82543) {
   5422 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5423 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5424 	} else {
   5425 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5426 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5427 	}
   5428 
   5429 	if (sc->sc_type == WM_T_80003)
   5430 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5431 	else
   5432 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5433 
   5434 	/* Writes the control register. */
   5435 	wm_set_vlan(sc);
   5436 
   5437 	if (sc->sc_flags & WM_F_HAS_MII) {
   5438 		uint16_t kmreg;
   5439 
   5440 		switch (sc->sc_type) {
   5441 		case WM_T_80003:
   5442 		case WM_T_ICH8:
   5443 		case WM_T_ICH9:
   5444 		case WM_T_ICH10:
   5445 		case WM_T_PCH:
   5446 		case WM_T_PCH2:
   5447 		case WM_T_PCH_LPT:
   5448 		case WM_T_PCH_SPT:
   5449 			/*
   5450 			 * Set the mac to wait the maximum time between each
   5451 			 * iteration and increase the max iterations when
   5452 			 * polling the phy; this fixes erroneous timeouts at
   5453 			 * 10Mbps.
   5454 			 */
   5455 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5456 			    0xFFFF);
   5457 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5458 			    &kmreg);
   5459 			kmreg |= 0x3F;
   5460 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5461 			    kmreg);
   5462 			break;
   5463 		default:
   5464 			break;
   5465 		}
   5466 
   5467 		if (sc->sc_type == WM_T_80003) {
   5468 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5469 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   5470 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5471 
   5472 			/* Bypass RX and TX FIFO's */
   5473 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5474 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5475 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5476 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5477 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5478 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5479 		}
   5480 	}
   5481 #if 0
   5482 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5483 #endif
   5484 
   5485 	/* Set up checksum offload parameters. */
   5486 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5487 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5488 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5489 		reg |= RXCSUM_IPOFL;
   5490 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5491 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5492 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5493 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5494 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5495 
   5496 	/* Set registers about MSI-X */
   5497 	if (wm_is_using_msix(sc)) {
   5498 		uint32_t ivar;
   5499 		struct wm_queue *wmq;
   5500 		int qid, qintr_idx;
   5501 
   5502 		if (sc->sc_type == WM_T_82575) {
   5503 			/* Interrupt control */
   5504 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5505 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5506 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5507 
   5508 			/* TX and RX */
   5509 			for (i = 0; i < sc->sc_nqueues; i++) {
   5510 				wmq = &sc->sc_queue[i];
   5511 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5512 				    EITR_TX_QUEUE(wmq->wmq_id)
   5513 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5514 			}
   5515 			/* Link status */
   5516 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5517 			    EITR_OTHER);
   5518 		} else if (sc->sc_type == WM_T_82574) {
   5519 			/* Interrupt control */
   5520 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5521 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5522 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5523 
   5524 			/*
   5525 			 * workaround issue with spurious interrupts
   5526 			 * in MSI-X mode.
   5527 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5528 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5529 			 */
   5530 			reg = CSR_READ(sc, WMREG_RFCTL);
   5531 			reg |= WMREG_RFCTL_ACKDIS;
   5532 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5533 
   5534 			ivar = 0;
   5535 			/* TX and RX */
   5536 			for (i = 0; i < sc->sc_nqueues; i++) {
   5537 				wmq = &sc->sc_queue[i];
   5538 				qid = wmq->wmq_id;
   5539 				qintr_idx = wmq->wmq_intr_idx;
   5540 
   5541 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5542 				    IVAR_TX_MASK_Q_82574(qid));
   5543 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5544 				    IVAR_RX_MASK_Q_82574(qid));
   5545 			}
   5546 			/* Link status */
   5547 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5548 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5549 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5550 		} else {
   5551 			/* Interrupt control */
   5552 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5553 			    | GPIE_EIAME | GPIE_PBA);
   5554 
   5555 			switch (sc->sc_type) {
   5556 			case WM_T_82580:
   5557 			case WM_T_I350:
   5558 			case WM_T_I354:
   5559 			case WM_T_I210:
   5560 			case WM_T_I211:
   5561 				/* TX and RX */
   5562 				for (i = 0; i < sc->sc_nqueues; i++) {
   5563 					wmq = &sc->sc_queue[i];
   5564 					qid = wmq->wmq_id;
   5565 					qintr_idx = wmq->wmq_intr_idx;
   5566 
   5567 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5568 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5569 					ivar |= __SHIFTIN((qintr_idx
   5570 						| IVAR_VALID),
   5571 					    IVAR_TX_MASK_Q(qid));
   5572 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5573 					ivar |= __SHIFTIN((qintr_idx
   5574 						| IVAR_VALID),
   5575 					    IVAR_RX_MASK_Q(qid));
   5576 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5577 				}
   5578 				break;
   5579 			case WM_T_82576:
   5580 				/* TX and RX */
   5581 				for (i = 0; i < sc->sc_nqueues; i++) {
   5582 					wmq = &sc->sc_queue[i];
   5583 					qid = wmq->wmq_id;
   5584 					qintr_idx = wmq->wmq_intr_idx;
   5585 
   5586 					ivar = CSR_READ(sc,
   5587 					    WMREG_IVAR_Q_82576(qid));
   5588 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   5589 					ivar |= __SHIFTIN((qintr_idx
   5590 						| IVAR_VALID),
   5591 					    IVAR_TX_MASK_Q_82576(qid));
   5592 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   5593 					ivar |= __SHIFTIN((qintr_idx
   5594 						| IVAR_VALID),
   5595 					    IVAR_RX_MASK_Q_82576(qid));
   5596 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   5597 					    ivar);
   5598 				}
   5599 				break;
   5600 			default:
   5601 				break;
   5602 			}
   5603 
   5604 			/* Link status */
   5605 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   5606 			    IVAR_MISC_OTHER);
   5607 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   5608 		}
   5609 
   5610 		if (wm_is_using_multiqueue(sc)) {
   5611 			wm_init_rss(sc);
   5612 
   5613 			/*
   5614 			** NOTE: Receive Full-Packet Checksum Offload
   5615 			** is mutually exclusive with Multiqueue. However
   5616 			** this is not the same as TCP/IP checksums which
   5617 			** still work.
   5618 			*/
   5619 			reg = CSR_READ(sc, WMREG_RXCSUM);
   5620 			reg |= RXCSUM_PCSD;
   5621 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5622 		}
   5623 	}
   5624 
   5625 	/* Set up the interrupt registers. */
   5626 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5627 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   5628 	    ICR_RXO | ICR_RXT0;
   5629 	if (wm_is_using_msix(sc)) {
   5630 		uint32_t mask;
   5631 		struct wm_queue *wmq;
   5632 
   5633 		switch (sc->sc_type) {
   5634 		case WM_T_82574:
   5635 			mask = 0;
   5636 			for (i = 0; i < sc->sc_nqueues; i++) {
   5637 				wmq = &sc->sc_queue[i];
   5638 				mask |= ICR_TXQ(wmq->wmq_id);
   5639 				mask |= ICR_RXQ(wmq->wmq_id);
   5640 			}
   5641 			mask |= ICR_OTHER;
   5642 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   5643 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   5644 			break;
   5645 		default:
   5646 			if (sc->sc_type == WM_T_82575) {
   5647 				mask = 0;
   5648 				for (i = 0; i < sc->sc_nqueues; i++) {
   5649 					wmq = &sc->sc_queue[i];
   5650 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   5651 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   5652 				}
   5653 				mask |= EITR_OTHER;
   5654 			} else {
   5655 				mask = 0;
   5656 				for (i = 0; i < sc->sc_nqueues; i++) {
   5657 					wmq = &sc->sc_queue[i];
   5658 					mask |= 1 << wmq->wmq_intr_idx;
   5659 				}
   5660 				mask |= 1 << sc->sc_link_intr_idx;
   5661 			}
   5662 			CSR_WRITE(sc, WMREG_EIAC, mask);
   5663 			CSR_WRITE(sc, WMREG_EIAM, mask);
   5664 			CSR_WRITE(sc, WMREG_EIMS, mask);
   5665 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   5666 			break;
   5667 		}
   5668 	} else
   5669 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5670 
   5671 	/* Set up the inter-packet gap. */
   5672 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   5673 
   5674 	if (sc->sc_type >= WM_T_82543) {
   5675 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5676 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   5677 			wm_itrs_writereg(sc, wmq);
   5678 		}
   5679 		/*
   5680 		 * Link interrupts occur much less than TX
   5681 		 * interrupts and RX interrupts. So, we don't
   5682 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   5683 		 * FreeBSD's if_igb.
   5684 		 */
   5685 	}
   5686 
   5687 	/* Set the VLAN ethernetype. */
   5688 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   5689 
   5690 	/*
   5691 	 * Set up the transmit control register; we start out with
   5692 	 * a collision distance suitable for FDX, but update it whe
   5693 	 * we resolve the media type.
   5694 	 */
   5695 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   5696 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   5697 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5698 	if (sc->sc_type >= WM_T_82571)
   5699 		sc->sc_tctl |= TCTL_MULR;
   5700 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5701 
   5702 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5703 		/* Write TDT after TCTL.EN is set. See the document. */
   5704 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   5705 	}
   5706 
   5707 	if (sc->sc_type == WM_T_80003) {
   5708 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   5709 		reg &= ~TCTL_EXT_GCEX_MASK;
   5710 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   5711 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   5712 	}
   5713 
   5714 	/* Set the media. */
   5715 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   5716 		goto out;
   5717 
   5718 	/* Configure for OS presence */
   5719 	wm_init_manageability(sc);
   5720 
   5721 	/*
   5722 	 * Set up the receive control register; we actually program
   5723 	 * the register when we set the receive filter.  Use multicast
   5724 	 * address offset type 0.
   5725 	 *
   5726 	 * Only the i82544 has the ability to strip the incoming
   5727 	 * CRC, so we don't enable that feature.
   5728 	 */
   5729 	sc->sc_mchash_type = 0;
   5730 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   5731 	    | RCTL_MO(sc->sc_mchash_type);
   5732 
   5733 	/*
   5734 	 * 82574 use one buffer extended Rx descriptor.
   5735 	 */
   5736 	if (sc->sc_type == WM_T_82574)
   5737 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   5738 
   5739 	/*
   5740 	 * The I350 has a bug where it always strips the CRC whether
   5741 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   5742 	 */
   5743 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5744 	    || (sc->sc_type == WM_T_I210))
   5745 		sc->sc_rctl |= RCTL_SECRC;
   5746 
   5747 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5748 	    && (ifp->if_mtu > ETHERMTU)) {
   5749 		sc->sc_rctl |= RCTL_LPE;
   5750 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5751 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5752 	}
   5753 
   5754 	if (MCLBYTES == 2048) {
   5755 		sc->sc_rctl |= RCTL_2k;
   5756 	} else {
   5757 		if (sc->sc_type >= WM_T_82543) {
   5758 			switch (MCLBYTES) {
   5759 			case 4096:
   5760 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5761 				break;
   5762 			case 8192:
   5763 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5764 				break;
   5765 			case 16384:
   5766 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5767 				break;
   5768 			default:
   5769 				panic("wm_init: MCLBYTES %d unsupported",
   5770 				    MCLBYTES);
   5771 				break;
   5772 			}
   5773 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   5774 	}
   5775 
   5776 	/* Enable ECC */
   5777 	switch (sc->sc_type) {
   5778 	case WM_T_82571:
   5779 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5780 		reg |= PBA_ECC_CORR_EN;
   5781 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5782 		break;
   5783 	case WM_T_PCH_LPT:
   5784 	case WM_T_PCH_SPT:
   5785 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5786 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5787 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5788 
   5789 		sc->sc_ctrl |= CTRL_MEHE;
   5790 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5791 		break;
   5792 	default:
   5793 		break;
   5794 	}
   5795 
   5796 	/* On 575 and later set RDT only if RX enabled */
   5797 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5798 		int qidx;
   5799 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5800 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5801 			for (i = 0; i < WM_NRXDESC; i++) {
   5802 				mutex_enter(rxq->rxq_lock);
   5803 				wm_init_rxdesc(rxq, i);
   5804 				mutex_exit(rxq->rxq_lock);
   5805 
   5806 			}
   5807 		}
   5808 	}
   5809 
   5810 	/* Set the receive filter. */
   5811 	wm_set_filter(sc);
   5812 
   5813 	wm_turnon(sc);
   5814 
   5815 	/* Start the one second link check clock. */
   5816 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5817 
   5818 	/* ...all done! */
   5819 	ifp->if_flags |= IFF_RUNNING;
   5820 	ifp->if_flags &= ~IFF_OACTIVE;
   5821 
   5822  out:
   5823 	sc->sc_if_flags = ifp->if_flags;
   5824 	if (error)
   5825 		log(LOG_ERR, "%s: interface not running\n",
   5826 		    device_xname(sc->sc_dev));
   5827 	return error;
   5828 }
   5829 
   5830 /*
   5831  * wm_stop:		[ifnet interface function]
   5832  *
   5833  *	Stop transmission on the interface.
   5834  */
   5835 static void
   5836 wm_stop(struct ifnet *ifp, int disable)
   5837 {
   5838 	struct wm_softc *sc = ifp->if_softc;
   5839 
   5840 	WM_CORE_LOCK(sc);
   5841 	wm_stop_locked(ifp, disable);
   5842 	WM_CORE_UNLOCK(sc);
   5843 }
   5844 
   5845 static void
   5846 wm_stop_locked(struct ifnet *ifp, int disable)
   5847 {
   5848 	struct wm_softc *sc = ifp->if_softc;
   5849 	struct wm_txsoft *txs;
   5850 	int i, qidx;
   5851 
   5852 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5853 		device_xname(sc->sc_dev), __func__));
   5854 	KASSERT(WM_CORE_LOCKED(sc));
   5855 
   5856 	wm_turnoff(sc);
   5857 
   5858 	/* Stop the one second clock. */
   5859 	callout_stop(&sc->sc_tick_ch);
   5860 
   5861 	/* Stop the 82547 Tx FIFO stall check timer. */
   5862 	if (sc->sc_type == WM_T_82547)
   5863 		callout_stop(&sc->sc_txfifo_ch);
   5864 
   5865 	if (sc->sc_flags & WM_F_HAS_MII) {
   5866 		/* Down the MII. */
   5867 		mii_down(&sc->sc_mii);
   5868 	} else {
   5869 #if 0
   5870 		/* Should we clear PHY's status properly? */
   5871 		wm_reset(sc);
   5872 #endif
   5873 	}
   5874 
   5875 	/* Stop the transmit and receive processes. */
   5876 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5877 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5878 	sc->sc_rctl &= ~RCTL_EN;
   5879 
   5880 	/*
   5881 	 * Clear the interrupt mask to ensure the device cannot assert its
   5882 	 * interrupt line.
   5883 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5884 	 * service any currently pending or shared interrupt.
   5885 	 */
   5886 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5887 	sc->sc_icr = 0;
   5888 	if (wm_is_using_msix(sc)) {
   5889 		if (sc->sc_type != WM_T_82574) {
   5890 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5891 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5892 		} else
   5893 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5894 	}
   5895 
   5896 	/* Release any queued transmit buffers. */
   5897 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5898 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5899 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5900 		mutex_enter(txq->txq_lock);
   5901 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5902 			txs = &txq->txq_soft[i];
   5903 			if (txs->txs_mbuf != NULL) {
   5904 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   5905 				m_freem(txs->txs_mbuf);
   5906 				txs->txs_mbuf = NULL;
   5907 			}
   5908 		}
   5909 		mutex_exit(txq->txq_lock);
   5910 	}
   5911 
   5912 	/* Mark the interface as down and cancel the watchdog timer. */
   5913 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5914 	ifp->if_timer = 0;
   5915 
   5916 	if (disable) {
   5917 		for (i = 0; i < sc->sc_nqueues; i++) {
   5918 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5919 			mutex_enter(rxq->rxq_lock);
   5920 			wm_rxdrain(rxq);
   5921 			mutex_exit(rxq->rxq_lock);
   5922 		}
   5923 	}
   5924 
   5925 #if 0 /* notyet */
   5926 	if (sc->sc_type >= WM_T_82544)
   5927 		CSR_WRITE(sc, WMREG_WUC, 0);
   5928 #endif
   5929 }
   5930 
   5931 static void
   5932 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   5933 {
   5934 	struct mbuf *m;
   5935 	int i;
   5936 
   5937 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   5938 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   5939 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   5940 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   5941 		    m->m_data, m->m_len, m->m_flags);
   5942 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   5943 	    i, i == 1 ? "" : "s");
   5944 }
   5945 
   5946 /*
   5947  * wm_82547_txfifo_stall:
   5948  *
   5949  *	Callout used to wait for the 82547 Tx FIFO to drain,
   5950  *	reset the FIFO pointers, and restart packet transmission.
   5951  */
   5952 static void
   5953 wm_82547_txfifo_stall(void *arg)
   5954 {
   5955 	struct wm_softc *sc = arg;
   5956 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5957 
   5958 	mutex_enter(txq->txq_lock);
   5959 
   5960 	if (txq->txq_stopping)
   5961 		goto out;
   5962 
   5963 	if (txq->txq_fifo_stall) {
   5964 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   5965 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   5966 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   5967 			/*
   5968 			 * Packets have drained.  Stop transmitter, reset
   5969 			 * FIFO pointers, restart transmitter, and kick
   5970 			 * the packet queue.
   5971 			 */
   5972 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   5973 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   5974 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   5975 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   5976 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   5977 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   5978 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   5979 			CSR_WRITE_FLUSH(sc);
   5980 
   5981 			txq->txq_fifo_head = 0;
   5982 			txq->txq_fifo_stall = 0;
   5983 			wm_start_locked(&sc->sc_ethercom.ec_if);
   5984 		} else {
   5985 			/*
   5986 			 * Still waiting for packets to drain; try again in
   5987 			 * another tick.
   5988 			 */
   5989 			callout_schedule(&sc->sc_txfifo_ch, 1);
   5990 		}
   5991 	}
   5992 
   5993 out:
   5994 	mutex_exit(txq->txq_lock);
   5995 }
   5996 
   5997 /*
   5998  * wm_82547_txfifo_bugchk:
   5999  *
   6000  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6001  *	prevent enqueueing a packet that would wrap around the end
   6002  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6003  *
   6004  *	We do this by checking the amount of space before the end
   6005  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   6006  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6007  *	the internal FIFO pointers to the beginning, and restart
   6008  *	transmission on the interface.
   6009  */
   6010 #define	WM_FIFO_HDR		0x10
   6011 #define	WM_82547_PAD_LEN	0x3e0
   6012 static int
   6013 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6014 {
   6015 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6016 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6017 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6018 
   6019 	/* Just return if already stalled. */
   6020 	if (txq->txq_fifo_stall)
   6021 		return 1;
   6022 
   6023 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6024 		/* Stall only occurs in half-duplex mode. */
   6025 		goto send_packet;
   6026 	}
   6027 
   6028 	if (len >= WM_82547_PAD_LEN + space) {
   6029 		txq->txq_fifo_stall = 1;
   6030 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6031 		return 1;
   6032 	}
   6033 
   6034  send_packet:
   6035 	txq->txq_fifo_head += len;
   6036 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6037 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6038 
   6039 	return 0;
   6040 }
   6041 
   6042 static int
   6043 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6044 {
   6045 	int error;
   6046 
   6047 	/*
   6048 	 * Allocate the control data structures, and create and load the
   6049 	 * DMA map for it.
   6050 	 *
   6051 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6052 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6053 	 * both sets within the same 4G segment.
   6054 	 */
   6055 	if (sc->sc_type < WM_T_82544)
   6056 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6057 	else
   6058 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6059 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6060 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6061 	else
   6062 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6063 
   6064 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6065 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6066 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6067 		aprint_error_dev(sc->sc_dev,
   6068 		    "unable to allocate TX control data, error = %d\n",
   6069 		    error);
   6070 		goto fail_0;
   6071 	}
   6072 
   6073 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6074 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6075 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6076 		aprint_error_dev(sc->sc_dev,
   6077 		    "unable to map TX control data, error = %d\n", error);
   6078 		goto fail_1;
   6079 	}
   6080 
   6081 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6082 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6083 		aprint_error_dev(sc->sc_dev,
   6084 		    "unable to create TX control data DMA map, error = %d\n",
   6085 		    error);
   6086 		goto fail_2;
   6087 	}
   6088 
   6089 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6090 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6091 		aprint_error_dev(sc->sc_dev,
   6092 		    "unable to load TX control data DMA map, error = %d\n",
   6093 		    error);
   6094 		goto fail_3;
   6095 	}
   6096 
   6097 	return 0;
   6098 
   6099  fail_3:
   6100 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6101  fail_2:
   6102 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6103 	    WM_TXDESCS_SIZE(txq));
   6104  fail_1:
   6105 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6106  fail_0:
   6107 	return error;
   6108 }
   6109 
   6110 static void
   6111 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6112 {
   6113 
   6114 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6115 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6116 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6117 	    WM_TXDESCS_SIZE(txq));
   6118 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6119 }
   6120 
   6121 static int
   6122 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6123 {
   6124 	int error;
   6125 	size_t rxq_descs_size;
   6126 
   6127 	/*
   6128 	 * Allocate the control data structures, and create and load the
   6129 	 * DMA map for it.
   6130 	 *
   6131 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6132 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6133 	 * both sets within the same 4G segment.
   6134 	 */
   6135 	rxq->rxq_ndesc = WM_NRXDESC;
   6136 	if (sc->sc_type == WM_T_82574)
   6137 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6138 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6139 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6140 	else
   6141 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6142 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6143 
   6144 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6145 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6146 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6147 		aprint_error_dev(sc->sc_dev,
   6148 		    "unable to allocate RX control data, error = %d\n",
   6149 		    error);
   6150 		goto fail_0;
   6151 	}
   6152 
   6153 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6154 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6155 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6156 		aprint_error_dev(sc->sc_dev,
   6157 		    "unable to map RX control data, error = %d\n", error);
   6158 		goto fail_1;
   6159 	}
   6160 
   6161 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6162 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6163 		aprint_error_dev(sc->sc_dev,
   6164 		    "unable to create RX control data DMA map, error = %d\n",
   6165 		    error);
   6166 		goto fail_2;
   6167 	}
   6168 
   6169 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6170 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6171 		aprint_error_dev(sc->sc_dev,
   6172 		    "unable to load RX control data DMA map, error = %d\n",
   6173 		    error);
   6174 		goto fail_3;
   6175 	}
   6176 
   6177 	return 0;
   6178 
   6179  fail_3:
   6180 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6181  fail_2:
   6182 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6183 	    rxq_descs_size);
   6184  fail_1:
   6185 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6186  fail_0:
   6187 	return error;
   6188 }
   6189 
   6190 static void
   6191 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6192 {
   6193 
   6194 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6195 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6196 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6197 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6198 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6199 }
   6200 
   6201 
   6202 static int
   6203 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6204 {
   6205 	int i, error;
   6206 
   6207 	/* Create the transmit buffer DMA maps. */
   6208 	WM_TXQUEUELEN(txq) =
   6209 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6210 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6211 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6212 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6213 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6214 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6215 			aprint_error_dev(sc->sc_dev,
   6216 			    "unable to create Tx DMA map %d, error = %d\n",
   6217 			    i, error);
   6218 			goto fail;
   6219 		}
   6220 	}
   6221 
   6222 	return 0;
   6223 
   6224  fail:
   6225 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6226 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6227 			bus_dmamap_destroy(sc->sc_dmat,
   6228 			    txq->txq_soft[i].txs_dmamap);
   6229 	}
   6230 	return error;
   6231 }
   6232 
   6233 static void
   6234 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6235 {
   6236 	int i;
   6237 
   6238 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6239 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6240 			bus_dmamap_destroy(sc->sc_dmat,
   6241 			    txq->txq_soft[i].txs_dmamap);
   6242 	}
   6243 }
   6244 
   6245 static int
   6246 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6247 {
   6248 	int i, error;
   6249 
   6250 	/* Create the receive buffer DMA maps. */
   6251 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6252 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6253 			    MCLBYTES, 0, 0,
   6254 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6255 			aprint_error_dev(sc->sc_dev,
   6256 			    "unable to create Rx DMA map %d error = %d\n",
   6257 			    i, error);
   6258 			goto fail;
   6259 		}
   6260 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6261 	}
   6262 
   6263 	return 0;
   6264 
   6265  fail:
   6266 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6267 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6268 			bus_dmamap_destroy(sc->sc_dmat,
   6269 			    rxq->rxq_soft[i].rxs_dmamap);
   6270 	}
   6271 	return error;
   6272 }
   6273 
   6274 static void
   6275 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6276 {
   6277 	int i;
   6278 
   6279 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6280 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6281 			bus_dmamap_destroy(sc->sc_dmat,
   6282 			    rxq->rxq_soft[i].rxs_dmamap);
   6283 	}
   6284 }
   6285 
   6286 /*
   6287  * wm_alloc_quques:
   6288  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6289  */
   6290 static int
   6291 wm_alloc_txrx_queues(struct wm_softc *sc)
   6292 {
   6293 	int i, error, tx_done, rx_done;
   6294 
   6295 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6296 	    KM_SLEEP);
   6297 	if (sc->sc_queue == NULL) {
   6298 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6299 		error = ENOMEM;
   6300 		goto fail_0;
   6301 	}
   6302 
   6303 	/*
   6304 	 * For transmission
   6305 	 */
   6306 	error = 0;
   6307 	tx_done = 0;
   6308 	for (i = 0; i < sc->sc_nqueues; i++) {
   6309 #ifdef WM_EVENT_COUNTERS
   6310 		int j;
   6311 		const char *xname;
   6312 #endif
   6313 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6314 		txq->txq_sc = sc;
   6315 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6316 
   6317 		error = wm_alloc_tx_descs(sc, txq);
   6318 		if (error)
   6319 			break;
   6320 		error = wm_alloc_tx_buffer(sc, txq);
   6321 		if (error) {
   6322 			wm_free_tx_descs(sc, txq);
   6323 			break;
   6324 		}
   6325 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6326 		if (txq->txq_interq == NULL) {
   6327 			wm_free_tx_descs(sc, txq);
   6328 			wm_free_tx_buffer(sc, txq);
   6329 			error = ENOMEM;
   6330 			break;
   6331 		}
   6332 
   6333 #ifdef WM_EVENT_COUNTERS
   6334 		xname = device_xname(sc->sc_dev);
   6335 
   6336 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6337 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6338 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
   6339 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6340 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6341 
   6342 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
   6343 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
   6344 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
   6345 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
   6346 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
   6347 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
   6348 
   6349 		for (j = 0; j < WM_NTXSEGS; j++) {
   6350 			snprintf(txq->txq_txseg_evcnt_names[j],
   6351 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6352 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6353 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6354 		}
   6355 
   6356 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
   6357 
   6358 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
   6359 #endif /* WM_EVENT_COUNTERS */
   6360 
   6361 		tx_done++;
   6362 	}
   6363 	if (error)
   6364 		goto fail_1;
   6365 
   6366 	/*
   6367 	 * For recieve
   6368 	 */
   6369 	error = 0;
   6370 	rx_done = 0;
   6371 	for (i = 0; i < sc->sc_nqueues; i++) {
   6372 #ifdef WM_EVENT_COUNTERS
   6373 		const char *xname;
   6374 #endif
   6375 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6376 		rxq->rxq_sc = sc;
   6377 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6378 
   6379 		error = wm_alloc_rx_descs(sc, rxq);
   6380 		if (error)
   6381 			break;
   6382 
   6383 		error = wm_alloc_rx_buffer(sc, rxq);
   6384 		if (error) {
   6385 			wm_free_rx_descs(sc, rxq);
   6386 			break;
   6387 		}
   6388 
   6389 #ifdef WM_EVENT_COUNTERS
   6390 		xname = device_xname(sc->sc_dev);
   6391 
   6392 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
   6393 
   6394 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
   6395 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
   6396 #endif /* WM_EVENT_COUNTERS */
   6397 
   6398 		rx_done++;
   6399 	}
   6400 	if (error)
   6401 		goto fail_2;
   6402 
   6403 	return 0;
   6404 
   6405  fail_2:
   6406 	for (i = 0; i < rx_done; i++) {
   6407 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6408 		wm_free_rx_buffer(sc, rxq);
   6409 		wm_free_rx_descs(sc, rxq);
   6410 		if (rxq->rxq_lock)
   6411 			mutex_obj_free(rxq->rxq_lock);
   6412 	}
   6413  fail_1:
   6414 	for (i = 0; i < tx_done; i++) {
   6415 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6416 		pcq_destroy(txq->txq_interq);
   6417 		wm_free_tx_buffer(sc, txq);
   6418 		wm_free_tx_descs(sc, txq);
   6419 		if (txq->txq_lock)
   6420 			mutex_obj_free(txq->txq_lock);
   6421 	}
   6422 
   6423 	kmem_free(sc->sc_queue,
   6424 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6425  fail_0:
   6426 	return error;
   6427 }
   6428 
   6429 /*
   6430  * wm_free_quques:
   6431  *	Free {tx,rx}descs and {tx,rx} buffers
   6432  */
   6433 static void
   6434 wm_free_txrx_queues(struct wm_softc *sc)
   6435 {
   6436 	int i;
   6437 
   6438 	for (i = 0; i < sc->sc_nqueues; i++) {
   6439 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6440 
   6441 #ifdef WM_EVENT_COUNTERS
   6442 		WM_Q_EVCNT_DETACH(rxq, rxintr, rxq, i);
   6443 		WM_Q_EVCNT_DETACH(rxq, rxipsum, rxq, i);
   6444 		WM_Q_EVCNT_DETACH(rxq, rxtusum, rxq, i);
   6445 #endif /* WM_EVENT_COUNTERS */
   6446 
   6447 		wm_free_rx_buffer(sc, rxq);
   6448 		wm_free_rx_descs(sc, rxq);
   6449 		if (rxq->rxq_lock)
   6450 			mutex_obj_free(rxq->rxq_lock);
   6451 	}
   6452 
   6453 	for (i = 0; i < sc->sc_nqueues; i++) {
   6454 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6455 		struct mbuf *m;
   6456 #ifdef WM_EVENT_COUNTERS
   6457 		int j;
   6458 
   6459 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6460 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6461 		WM_Q_EVCNT_DETACH(txq, txfifo_stall, txq, i);
   6462 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6463 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6464 		WM_Q_EVCNT_DETACH(txq, txipsum, txq, i);
   6465 		WM_Q_EVCNT_DETACH(txq, txtusum, txq, i);
   6466 		WM_Q_EVCNT_DETACH(txq, txtusum6, txq, i);
   6467 		WM_Q_EVCNT_DETACH(txq, txtso, txq, i);
   6468 		WM_Q_EVCNT_DETACH(txq, txtso6, txq, i);
   6469 		WM_Q_EVCNT_DETACH(txq, txtsopain, txq, i);
   6470 
   6471 		for (j = 0; j < WM_NTXSEGS; j++)
   6472 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6473 
   6474 		WM_Q_EVCNT_DETACH(txq, txdrop, txq, i);
   6475 		WM_Q_EVCNT_DETACH(txq, tu, txq, i);
   6476 #endif /* WM_EVENT_COUNTERS */
   6477 
   6478 		/* drain txq_interq */
   6479 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6480 			m_freem(m);
   6481 		pcq_destroy(txq->txq_interq);
   6482 
   6483 		wm_free_tx_buffer(sc, txq);
   6484 		wm_free_tx_descs(sc, txq);
   6485 		if (txq->txq_lock)
   6486 			mutex_obj_free(txq->txq_lock);
   6487 	}
   6488 
   6489 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6490 }
   6491 
   6492 static void
   6493 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6494 {
   6495 
   6496 	KASSERT(mutex_owned(txq->txq_lock));
   6497 
   6498 	/* Initialize the transmit descriptor ring. */
   6499 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6500 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6501 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6502 	txq->txq_free = WM_NTXDESC(txq);
   6503 	txq->txq_next = 0;
   6504 }
   6505 
   6506 static void
   6507 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6508     struct wm_txqueue *txq)
   6509 {
   6510 
   6511 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6512 		device_xname(sc->sc_dev), __func__));
   6513 	KASSERT(mutex_owned(txq->txq_lock));
   6514 
   6515 	if (sc->sc_type < WM_T_82543) {
   6516 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6517 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6518 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6519 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6520 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6521 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6522 	} else {
   6523 		int qid = wmq->wmq_id;
   6524 
   6525 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6526 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6527 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6528 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6529 
   6530 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6531 			/*
   6532 			 * Don't write TDT before TCTL.EN is set.
   6533 			 * See the document.
   6534 			 */
   6535 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6536 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6537 			    | TXDCTL_WTHRESH(0));
   6538 		else {
   6539 			/* XXX should update with AIM? */
   6540 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6541 			if (sc->sc_type >= WM_T_82540) {
   6542 				/* should be same */
   6543 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6544 			}
   6545 
   6546 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6547 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6548 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6549 		}
   6550 	}
   6551 }
   6552 
   6553 static void
   6554 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6555 {
   6556 	int i;
   6557 
   6558 	KASSERT(mutex_owned(txq->txq_lock));
   6559 
   6560 	/* Initialize the transmit job descriptors. */
   6561 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6562 		txq->txq_soft[i].txs_mbuf = NULL;
   6563 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6564 	txq->txq_snext = 0;
   6565 	txq->txq_sdirty = 0;
   6566 }
   6567 
   6568 static void
   6569 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6570     struct wm_txqueue *txq)
   6571 {
   6572 
   6573 	KASSERT(mutex_owned(txq->txq_lock));
   6574 
   6575 	/*
   6576 	 * Set up some register offsets that are different between
   6577 	 * the i82542 and the i82543 and later chips.
   6578 	 */
   6579 	if (sc->sc_type < WM_T_82543)
   6580 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   6581 	else
   6582 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   6583 
   6584 	wm_init_tx_descs(sc, txq);
   6585 	wm_init_tx_regs(sc, wmq, txq);
   6586 	wm_init_tx_buffer(sc, txq);
   6587 }
   6588 
   6589 static void
   6590 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6591     struct wm_rxqueue *rxq)
   6592 {
   6593 
   6594 	KASSERT(mutex_owned(rxq->rxq_lock));
   6595 
   6596 	/*
   6597 	 * Initialize the receive descriptor and receive job
   6598 	 * descriptor rings.
   6599 	 */
   6600 	if (sc->sc_type < WM_T_82543) {
   6601 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   6602 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   6603 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   6604 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6605 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   6606 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   6607 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   6608 
   6609 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   6610 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   6611 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   6612 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   6613 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   6614 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   6615 	} else {
   6616 		int qid = wmq->wmq_id;
   6617 
   6618 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   6619 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   6620 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_descsize * rxq->rxq_ndesc);
   6621 
   6622 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6623 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   6624 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   6625 
   6626 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   6627 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   6628 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   6629 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   6630 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   6631 			    | RXDCTL_WTHRESH(1));
   6632 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6633 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6634 		} else {
   6635 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6636 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6637 			/* XXX should update with AIM? */
   6638 			CSR_WRITE(sc, WMREG_RDTR, (wmq->wmq_itr / 4) | RDTR_FPD);
   6639 			/* MUST be same */
   6640 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   6641 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   6642 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   6643 		}
   6644 	}
   6645 }
   6646 
   6647 static int
   6648 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6649 {
   6650 	struct wm_rxsoft *rxs;
   6651 	int error, i;
   6652 
   6653 	KASSERT(mutex_owned(rxq->rxq_lock));
   6654 
   6655 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6656 		rxs = &rxq->rxq_soft[i];
   6657 		if (rxs->rxs_mbuf == NULL) {
   6658 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   6659 				log(LOG_ERR, "%s: unable to allocate or map "
   6660 				    "rx buffer %d, error = %d\n",
   6661 				    device_xname(sc->sc_dev), i, error);
   6662 				/*
   6663 				 * XXX Should attempt to run with fewer receive
   6664 				 * XXX buffers instead of just failing.
   6665 				 */
   6666 				wm_rxdrain(rxq);
   6667 				return ENOMEM;
   6668 			}
   6669 		} else {
   6670 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   6671 				wm_init_rxdesc(rxq, i);
   6672 			/*
   6673 			 * For 82575 and newer device, the RX descriptors
   6674 			 * must be initialized after the setting of RCTL.EN in
   6675 			 * wm_set_filter()
   6676 			 */
   6677 		}
   6678 	}
   6679 	rxq->rxq_ptr = 0;
   6680 	rxq->rxq_discard = 0;
   6681 	WM_RXCHAIN_RESET(rxq);
   6682 
   6683 	return 0;
   6684 }
   6685 
   6686 static int
   6687 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6688     struct wm_rxqueue *rxq)
   6689 {
   6690 
   6691 	KASSERT(mutex_owned(rxq->rxq_lock));
   6692 
   6693 	/*
   6694 	 * Set up some register offsets that are different between
   6695 	 * the i82542 and the i82543 and later chips.
   6696 	 */
   6697 	if (sc->sc_type < WM_T_82543)
   6698 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   6699 	else
   6700 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   6701 
   6702 	wm_init_rx_regs(sc, wmq, rxq);
   6703 	return wm_init_rx_buffer(sc, rxq);
   6704 }
   6705 
   6706 /*
   6707  * wm_init_quques:
   6708  *	Initialize {tx,rx}descs and {tx,rx} buffers
   6709  */
   6710 static int
   6711 wm_init_txrx_queues(struct wm_softc *sc)
   6712 {
   6713 	int i, error = 0;
   6714 
   6715 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6716 		device_xname(sc->sc_dev), __func__));
   6717 
   6718 	for (i = 0; i < sc->sc_nqueues; i++) {
   6719 		struct wm_queue *wmq = &sc->sc_queue[i];
   6720 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6721 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6722 
   6723 		/*
   6724 		 * TODO
   6725 		 * Currently, use constant variable instead of AIM.
   6726 		 * Furthermore, the interrupt interval of multiqueue which use
   6727 		 * polling mode is less than default value.
   6728 		 * More tuning and AIM are required.
   6729 		 */
   6730 		if (wm_is_using_multiqueue(sc))
   6731 			wmq->wmq_itr = 50;
   6732 		else
   6733 			wmq->wmq_itr = sc->sc_itr_init;
   6734 		wmq->wmq_set_itr = true;
   6735 
   6736 		mutex_enter(txq->txq_lock);
   6737 		wm_init_tx_queue(sc, wmq, txq);
   6738 		mutex_exit(txq->txq_lock);
   6739 
   6740 		mutex_enter(rxq->rxq_lock);
   6741 		error = wm_init_rx_queue(sc, wmq, rxq);
   6742 		mutex_exit(rxq->rxq_lock);
   6743 		if (error)
   6744 			break;
   6745 	}
   6746 
   6747 	return error;
   6748 }
   6749 
   6750 /*
   6751  * wm_tx_offload:
   6752  *
   6753  *	Set up TCP/IP checksumming parameters for the
   6754  *	specified packet.
   6755  */
   6756 static int
   6757 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   6758     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   6759 {
   6760 	struct mbuf *m0 = txs->txs_mbuf;
   6761 	struct livengood_tcpip_ctxdesc *t;
   6762 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   6763 	uint32_t ipcse;
   6764 	struct ether_header *eh;
   6765 	int offset, iphl;
   6766 	uint8_t fields;
   6767 
   6768 	/*
   6769 	 * XXX It would be nice if the mbuf pkthdr had offset
   6770 	 * fields for the protocol headers.
   6771 	 */
   6772 
   6773 	eh = mtod(m0, struct ether_header *);
   6774 	switch (htons(eh->ether_type)) {
   6775 	case ETHERTYPE_IP:
   6776 	case ETHERTYPE_IPV6:
   6777 		offset = ETHER_HDR_LEN;
   6778 		break;
   6779 
   6780 	case ETHERTYPE_VLAN:
   6781 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6782 		break;
   6783 
   6784 	default:
   6785 		/*
   6786 		 * Don't support this protocol or encapsulation.
   6787 		 */
   6788 		*fieldsp = 0;
   6789 		*cmdp = 0;
   6790 		return 0;
   6791 	}
   6792 
   6793 	if ((m0->m_pkthdr.csum_flags &
   6794 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6795 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6796 	} else {
   6797 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6798 	}
   6799 	ipcse = offset + iphl - 1;
   6800 
   6801 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   6802 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   6803 	seg = 0;
   6804 	fields = 0;
   6805 
   6806 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6807 		int hlen = offset + iphl;
   6808 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6809 
   6810 		if (__predict_false(m0->m_len <
   6811 				    (hlen + sizeof(struct tcphdr)))) {
   6812 			/*
   6813 			 * TCP/IP headers are not in the first mbuf; we need
   6814 			 * to do this the slow and painful way.  Let's just
   6815 			 * hope this doesn't happen very often.
   6816 			 */
   6817 			struct tcphdr th;
   6818 
   6819 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6820 
   6821 			m_copydata(m0, hlen, sizeof(th), &th);
   6822 			if (v4) {
   6823 				struct ip ip;
   6824 
   6825 				m_copydata(m0, offset, sizeof(ip), &ip);
   6826 				ip.ip_len = 0;
   6827 				m_copyback(m0,
   6828 				    offset + offsetof(struct ip, ip_len),
   6829 				    sizeof(ip.ip_len), &ip.ip_len);
   6830 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6831 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6832 			} else {
   6833 				struct ip6_hdr ip6;
   6834 
   6835 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6836 				ip6.ip6_plen = 0;
   6837 				m_copyback(m0,
   6838 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6839 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6840 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6841 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6842 			}
   6843 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6844 			    sizeof(th.th_sum), &th.th_sum);
   6845 
   6846 			hlen += th.th_off << 2;
   6847 		} else {
   6848 			/*
   6849 			 * TCP/IP headers are in the first mbuf; we can do
   6850 			 * this the easy way.
   6851 			 */
   6852 			struct tcphdr *th;
   6853 
   6854 			if (v4) {
   6855 				struct ip *ip =
   6856 				    (void *)(mtod(m0, char *) + offset);
   6857 				th = (void *)(mtod(m0, char *) + hlen);
   6858 
   6859 				ip->ip_len = 0;
   6860 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6861 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6862 			} else {
   6863 				struct ip6_hdr *ip6 =
   6864 				    (void *)(mtod(m0, char *) + offset);
   6865 				th = (void *)(mtod(m0, char *) + hlen);
   6866 
   6867 				ip6->ip6_plen = 0;
   6868 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6869 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6870 			}
   6871 			hlen += th->th_off << 2;
   6872 		}
   6873 
   6874 		if (v4) {
   6875 			WM_Q_EVCNT_INCR(txq, txtso);
   6876 			cmdlen |= WTX_TCPIP_CMD_IP;
   6877 		} else {
   6878 			WM_Q_EVCNT_INCR(txq, txtso6);
   6879 			ipcse = 0;
   6880 		}
   6881 		cmd |= WTX_TCPIP_CMD_TSE;
   6882 		cmdlen |= WTX_TCPIP_CMD_TSE |
   6883 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   6884 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   6885 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   6886 	}
   6887 
   6888 	/*
   6889 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   6890 	 * offload feature, if we load the context descriptor, we
   6891 	 * MUST provide valid values for IPCSS and TUCSS fields.
   6892 	 */
   6893 
   6894 	ipcs = WTX_TCPIP_IPCSS(offset) |
   6895 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   6896 	    WTX_TCPIP_IPCSE(ipcse);
   6897 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   6898 		WM_Q_EVCNT_INCR(txq, txipsum);
   6899 		fields |= WTX_IXSM;
   6900 	}
   6901 
   6902 	offset += iphl;
   6903 
   6904 	if (m0->m_pkthdr.csum_flags &
   6905 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   6906 		WM_Q_EVCNT_INCR(txq, txtusum);
   6907 		fields |= WTX_TXSM;
   6908 		tucs = WTX_TCPIP_TUCSS(offset) |
   6909 		    WTX_TCPIP_TUCSO(offset +
   6910 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   6911 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6912 	} else if ((m0->m_pkthdr.csum_flags &
   6913 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   6914 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6915 		fields |= WTX_TXSM;
   6916 		tucs = WTX_TCPIP_TUCSS(offset) |
   6917 		    WTX_TCPIP_TUCSO(offset +
   6918 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   6919 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6920 	} else {
   6921 		/* Just initialize it to a valid TCP context. */
   6922 		tucs = WTX_TCPIP_TUCSS(offset) |
   6923 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   6924 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6925 	}
   6926 
   6927 	/*
   6928 	 * We don't have to write context descriptor for every packet
   6929 	 * except for 82574. For 82574, we must write context descriptor
   6930 	 * for every packet when we use two descriptor queues.
   6931 	 * It would be overhead to write context descriptor for every packet,
   6932 	 * however it does not cause problems.
   6933 	 */
   6934 	/* Fill in the context descriptor. */
   6935 	t = (struct livengood_tcpip_ctxdesc *)
   6936 	    &txq->txq_descs[txq->txq_next];
   6937 	t->tcpip_ipcs = htole32(ipcs);
   6938 	t->tcpip_tucs = htole32(tucs);
   6939 	t->tcpip_cmdlen = htole32(cmdlen);
   6940 	t->tcpip_seg = htole32(seg);
   6941 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6942 
   6943 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6944 	txs->txs_ndesc++;
   6945 
   6946 	*cmdp = cmd;
   6947 	*fieldsp = fields;
   6948 
   6949 	return 0;
   6950 }
   6951 
   6952 static inline int
   6953 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   6954 {
   6955 	struct wm_softc *sc = ifp->if_softc;
   6956 	u_int cpuid = cpu_index(curcpu());
   6957 
   6958 	/*
   6959 	 * Currently, simple distribute strategy.
   6960 	 * TODO:
   6961 	 * distribute by flowid(RSS has value).
   6962 	 */
   6963         return (cpuid + ncpu - sc->sc_affinity_offset) % sc->sc_nqueues;
   6964 }
   6965 
   6966 /*
   6967  * wm_start:		[ifnet interface function]
   6968  *
   6969  *	Start packet transmission on the interface.
   6970  */
   6971 static void
   6972 wm_start(struct ifnet *ifp)
   6973 {
   6974 	struct wm_softc *sc = ifp->if_softc;
   6975 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6976 
   6977 #ifdef WM_MPSAFE
   6978 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6979 #endif
   6980 	/*
   6981 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   6982 	 */
   6983 
   6984 	mutex_enter(txq->txq_lock);
   6985 	if (!txq->txq_stopping)
   6986 		wm_start_locked(ifp);
   6987 	mutex_exit(txq->txq_lock);
   6988 }
   6989 
   6990 static void
   6991 wm_start_locked(struct ifnet *ifp)
   6992 {
   6993 	struct wm_softc *sc = ifp->if_softc;
   6994 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6995 
   6996 	wm_send_common_locked(ifp, txq, false);
   6997 }
   6998 
   6999 static int
   7000 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7001 {
   7002 	int qid;
   7003 	struct wm_softc *sc = ifp->if_softc;
   7004 	struct wm_txqueue *txq;
   7005 
   7006 	qid = wm_select_txqueue(ifp, m);
   7007 	txq = &sc->sc_queue[qid].wmq_txq;
   7008 
   7009 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7010 		m_freem(m);
   7011 		WM_Q_EVCNT_INCR(txq, txdrop);
   7012 		return ENOBUFS;
   7013 	}
   7014 
   7015 	/*
   7016 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7017 	 */
   7018 	ifp->if_obytes += m->m_pkthdr.len;
   7019 	if (m->m_flags & M_MCAST)
   7020 		ifp->if_omcasts++;
   7021 
   7022 	if (mutex_tryenter(txq->txq_lock)) {
   7023 		if (!txq->txq_stopping)
   7024 			wm_transmit_locked(ifp, txq);
   7025 		mutex_exit(txq->txq_lock);
   7026 	}
   7027 
   7028 	return 0;
   7029 }
   7030 
   7031 static void
   7032 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7033 {
   7034 
   7035 	wm_send_common_locked(ifp, txq, true);
   7036 }
   7037 
   7038 static void
   7039 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7040     bool is_transmit)
   7041 {
   7042 	struct wm_softc *sc = ifp->if_softc;
   7043 	struct mbuf *m0;
   7044 	struct m_tag *mtag;
   7045 	struct wm_txsoft *txs;
   7046 	bus_dmamap_t dmamap;
   7047 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7048 	bus_addr_t curaddr;
   7049 	bus_size_t seglen, curlen;
   7050 	uint32_t cksumcmd;
   7051 	uint8_t cksumfields;
   7052 
   7053 	KASSERT(mutex_owned(txq->txq_lock));
   7054 
   7055 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7056 		return;
   7057 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7058 		return;
   7059 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7060 		return;
   7061 
   7062 	/* Remember the previous number of free descriptors. */
   7063 	ofree = txq->txq_free;
   7064 
   7065 	/*
   7066 	 * Loop through the send queue, setting up transmit descriptors
   7067 	 * until we drain the queue, or use up all available transmit
   7068 	 * descriptors.
   7069 	 */
   7070 	for (;;) {
   7071 		m0 = NULL;
   7072 
   7073 		/* Get a work queue entry. */
   7074 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7075 			wm_txeof(sc, txq);
   7076 			if (txq->txq_sfree == 0) {
   7077 				DPRINTF(WM_DEBUG_TX,
   7078 				    ("%s: TX: no free job descriptors\n",
   7079 					device_xname(sc->sc_dev)));
   7080 				WM_Q_EVCNT_INCR(txq, txsstall);
   7081 				break;
   7082 			}
   7083 		}
   7084 
   7085 		/* Grab a packet off the queue. */
   7086 		if (is_transmit)
   7087 			m0 = pcq_get(txq->txq_interq);
   7088 		else
   7089 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7090 		if (m0 == NULL)
   7091 			break;
   7092 
   7093 		DPRINTF(WM_DEBUG_TX,
   7094 		    ("%s: TX: have packet to transmit: %p\n",
   7095 		    device_xname(sc->sc_dev), m0));
   7096 
   7097 		txs = &txq->txq_soft[txq->txq_snext];
   7098 		dmamap = txs->txs_dmamap;
   7099 
   7100 		use_tso = (m0->m_pkthdr.csum_flags &
   7101 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7102 
   7103 		/*
   7104 		 * So says the Linux driver:
   7105 		 * The controller does a simple calculation to make sure
   7106 		 * there is enough room in the FIFO before initiating the
   7107 		 * DMA for each buffer.  The calc is:
   7108 		 *	4 = ceil(buffer len / MSS)
   7109 		 * To make sure we don't overrun the FIFO, adjust the max
   7110 		 * buffer len if the MSS drops.
   7111 		 */
   7112 		dmamap->dm_maxsegsz =
   7113 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7114 		    ? m0->m_pkthdr.segsz << 2
   7115 		    : WTX_MAX_LEN;
   7116 
   7117 		/*
   7118 		 * Load the DMA map.  If this fails, the packet either
   7119 		 * didn't fit in the allotted number of segments, or we
   7120 		 * were short on resources.  For the too-many-segments
   7121 		 * case, we simply report an error and drop the packet,
   7122 		 * since we can't sanely copy a jumbo packet to a single
   7123 		 * buffer.
   7124 		 */
   7125 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7126 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7127 		if (error) {
   7128 			if (error == EFBIG) {
   7129 				WM_Q_EVCNT_INCR(txq, txdrop);
   7130 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7131 				    "DMA segments, dropping...\n",
   7132 				    device_xname(sc->sc_dev));
   7133 				wm_dump_mbuf_chain(sc, m0);
   7134 				m_freem(m0);
   7135 				continue;
   7136 			}
   7137 			/*  Short on resources, just stop for now. */
   7138 			DPRINTF(WM_DEBUG_TX,
   7139 			    ("%s: TX: dmamap load failed: %d\n",
   7140 			    device_xname(sc->sc_dev), error));
   7141 			break;
   7142 		}
   7143 
   7144 		segs_needed = dmamap->dm_nsegs;
   7145 		if (use_tso) {
   7146 			/* For sentinel descriptor; see below. */
   7147 			segs_needed++;
   7148 		}
   7149 
   7150 		/*
   7151 		 * Ensure we have enough descriptors free to describe
   7152 		 * the packet.  Note, we always reserve one descriptor
   7153 		 * at the end of the ring due to the semantics of the
   7154 		 * TDT register, plus one more in the event we need
   7155 		 * to load offload context.
   7156 		 */
   7157 		if (segs_needed > txq->txq_free - 2) {
   7158 			/*
   7159 			 * Not enough free descriptors to transmit this
   7160 			 * packet.  We haven't committed anything yet,
   7161 			 * so just unload the DMA map, put the packet
   7162 			 * pack on the queue, and punt.  Notify the upper
   7163 			 * layer that there are no more slots left.
   7164 			 */
   7165 			DPRINTF(WM_DEBUG_TX,
   7166 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7167 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7168 			    segs_needed, txq->txq_free - 1));
   7169 			if (!is_transmit)
   7170 				ifp->if_flags |= IFF_OACTIVE;
   7171 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7172 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7173 			WM_Q_EVCNT_INCR(txq, txdstall);
   7174 			break;
   7175 		}
   7176 
   7177 		/*
   7178 		 * Check for 82547 Tx FIFO bug.  We need to do this
   7179 		 * once we know we can transmit the packet, since we
   7180 		 * do some internal FIFO space accounting here.
   7181 		 */
   7182 		if (sc->sc_type == WM_T_82547 &&
   7183 		    wm_82547_txfifo_bugchk(sc, m0)) {
   7184 			DPRINTF(WM_DEBUG_TX,
   7185 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   7186 			    device_xname(sc->sc_dev)));
   7187 			if (!is_transmit)
   7188 				ifp->if_flags |= IFF_OACTIVE;
   7189 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7190 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7191 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
   7192 			break;
   7193 		}
   7194 
   7195 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7196 
   7197 		DPRINTF(WM_DEBUG_TX,
   7198 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7199 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7200 
   7201 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7202 
   7203 		/*
   7204 		 * Store a pointer to the packet so that we can free it
   7205 		 * later.
   7206 		 *
   7207 		 * Initially, we consider the number of descriptors the
   7208 		 * packet uses the number of DMA segments.  This may be
   7209 		 * incremented by 1 if we do checksum offload (a descriptor
   7210 		 * is used to set the checksum context).
   7211 		 */
   7212 		txs->txs_mbuf = m0;
   7213 		txs->txs_firstdesc = txq->txq_next;
   7214 		txs->txs_ndesc = segs_needed;
   7215 
   7216 		/* Set up offload parameters for this packet. */
   7217 		if (m0->m_pkthdr.csum_flags &
   7218 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7219 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7220 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7221 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   7222 					  &cksumfields) != 0) {
   7223 				/* Error message already displayed. */
   7224 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7225 				continue;
   7226 			}
   7227 		} else {
   7228 			cksumcmd = 0;
   7229 			cksumfields = 0;
   7230 		}
   7231 
   7232 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7233 
   7234 		/* Sync the DMA map. */
   7235 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7236 		    BUS_DMASYNC_PREWRITE);
   7237 
   7238 		/* Initialize the transmit descriptor. */
   7239 		for (nexttx = txq->txq_next, seg = 0;
   7240 		     seg < dmamap->dm_nsegs; seg++) {
   7241 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7242 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7243 			     seglen != 0;
   7244 			     curaddr += curlen, seglen -= curlen,
   7245 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7246 				curlen = seglen;
   7247 
   7248 				/*
   7249 				 * So says the Linux driver:
   7250 				 * Work around for premature descriptor
   7251 				 * write-backs in TSO mode.  Append a
   7252 				 * 4-byte sentinel descriptor.
   7253 				 */
   7254 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7255 				    curlen > 8)
   7256 					curlen -= 4;
   7257 
   7258 				wm_set_dma_addr(
   7259 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7260 				txq->txq_descs[nexttx].wtx_cmdlen
   7261 				    = htole32(cksumcmd | curlen);
   7262 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7263 				    = 0;
   7264 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7265 				    = cksumfields;
   7266 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7267 				lasttx = nexttx;
   7268 
   7269 				DPRINTF(WM_DEBUG_TX,
   7270 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7271 				     "len %#04zx\n",
   7272 				    device_xname(sc->sc_dev), nexttx,
   7273 				    (uint64_t)curaddr, curlen));
   7274 			}
   7275 		}
   7276 
   7277 		KASSERT(lasttx != -1);
   7278 
   7279 		/*
   7280 		 * Set up the command byte on the last descriptor of
   7281 		 * the packet.  If we're in the interrupt delay window,
   7282 		 * delay the interrupt.
   7283 		 */
   7284 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7285 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7286 
   7287 		/*
   7288 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7289 		 * up the descriptor to encapsulate the packet for us.
   7290 		 *
   7291 		 * This is only valid on the last descriptor of the packet.
   7292 		 */
   7293 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   7294 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7295 			    htole32(WTX_CMD_VLE);
   7296 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7297 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   7298 		}
   7299 
   7300 		txs->txs_lastdesc = lasttx;
   7301 
   7302 		DPRINTF(WM_DEBUG_TX,
   7303 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7304 		    device_xname(sc->sc_dev),
   7305 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7306 
   7307 		/* Sync the descriptors we're using. */
   7308 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7309 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7310 
   7311 		/* Give the packet to the chip. */
   7312 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7313 
   7314 		DPRINTF(WM_DEBUG_TX,
   7315 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7316 
   7317 		DPRINTF(WM_DEBUG_TX,
   7318 		    ("%s: TX: finished transmitting packet, job %d\n",
   7319 		    device_xname(sc->sc_dev), txq->txq_snext));
   7320 
   7321 		/* Advance the tx pointer. */
   7322 		txq->txq_free -= txs->txs_ndesc;
   7323 		txq->txq_next = nexttx;
   7324 
   7325 		txq->txq_sfree--;
   7326 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7327 
   7328 		/* Pass the packet to any BPF listeners. */
   7329 		bpf_mtap(ifp, m0);
   7330 	}
   7331 
   7332 	if (m0 != NULL) {
   7333 		if (!is_transmit)
   7334 			ifp->if_flags |= IFF_OACTIVE;
   7335 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7336 		WM_Q_EVCNT_INCR(txq, txdrop);
   7337 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7338 			__func__));
   7339 		m_freem(m0);
   7340 	}
   7341 
   7342 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7343 		/* No more slots; notify upper layer. */
   7344 		if (!is_transmit)
   7345 			ifp->if_flags |= IFF_OACTIVE;
   7346 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7347 	}
   7348 
   7349 	if (txq->txq_free != ofree) {
   7350 		/* Set a watchdog timer in case the chip flakes out. */
   7351 		ifp->if_timer = 5;
   7352 	}
   7353 }
   7354 
   7355 /*
   7356  * wm_nq_tx_offload:
   7357  *
   7358  *	Set up TCP/IP checksumming parameters for the
   7359  *	specified packet, for NEWQUEUE devices
   7360  */
   7361 static int
   7362 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7363     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7364 {
   7365 	struct mbuf *m0 = txs->txs_mbuf;
   7366 	struct m_tag *mtag;
   7367 	uint32_t vl_len, mssidx, cmdc;
   7368 	struct ether_header *eh;
   7369 	int offset, iphl;
   7370 
   7371 	/*
   7372 	 * XXX It would be nice if the mbuf pkthdr had offset
   7373 	 * fields for the protocol headers.
   7374 	 */
   7375 	*cmdlenp = 0;
   7376 	*fieldsp = 0;
   7377 
   7378 	eh = mtod(m0, struct ether_header *);
   7379 	switch (htons(eh->ether_type)) {
   7380 	case ETHERTYPE_IP:
   7381 	case ETHERTYPE_IPV6:
   7382 		offset = ETHER_HDR_LEN;
   7383 		break;
   7384 
   7385 	case ETHERTYPE_VLAN:
   7386 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7387 		break;
   7388 
   7389 	default:
   7390 		/* Don't support this protocol or encapsulation. */
   7391 		*do_csum = false;
   7392 		return 0;
   7393 	}
   7394 	*do_csum = true;
   7395 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7396 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7397 
   7398 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7399 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7400 
   7401 	if ((m0->m_pkthdr.csum_flags &
   7402 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7403 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7404 	} else {
   7405 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   7406 	}
   7407 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7408 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7409 
   7410 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   7411 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   7412 		     << NQTXC_VLLEN_VLAN_SHIFT);
   7413 		*cmdlenp |= NQTX_CMD_VLE;
   7414 	}
   7415 
   7416 	mssidx = 0;
   7417 
   7418 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7419 		int hlen = offset + iphl;
   7420 		int tcp_hlen;
   7421 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7422 
   7423 		if (__predict_false(m0->m_len <
   7424 				    (hlen + sizeof(struct tcphdr)))) {
   7425 			/*
   7426 			 * TCP/IP headers are not in the first mbuf; we need
   7427 			 * to do this the slow and painful way.  Let's just
   7428 			 * hope this doesn't happen very often.
   7429 			 */
   7430 			struct tcphdr th;
   7431 
   7432 			WM_Q_EVCNT_INCR(txq, txtsopain);
   7433 
   7434 			m_copydata(m0, hlen, sizeof(th), &th);
   7435 			if (v4) {
   7436 				struct ip ip;
   7437 
   7438 				m_copydata(m0, offset, sizeof(ip), &ip);
   7439 				ip.ip_len = 0;
   7440 				m_copyback(m0,
   7441 				    offset + offsetof(struct ip, ip_len),
   7442 				    sizeof(ip.ip_len), &ip.ip_len);
   7443 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7444 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7445 			} else {
   7446 				struct ip6_hdr ip6;
   7447 
   7448 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7449 				ip6.ip6_plen = 0;
   7450 				m_copyback(m0,
   7451 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7452 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7453 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7454 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7455 			}
   7456 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7457 			    sizeof(th.th_sum), &th.th_sum);
   7458 
   7459 			tcp_hlen = th.th_off << 2;
   7460 		} else {
   7461 			/*
   7462 			 * TCP/IP headers are in the first mbuf; we can do
   7463 			 * this the easy way.
   7464 			 */
   7465 			struct tcphdr *th;
   7466 
   7467 			if (v4) {
   7468 				struct ip *ip =
   7469 				    (void *)(mtod(m0, char *) + offset);
   7470 				th = (void *)(mtod(m0, char *) + hlen);
   7471 
   7472 				ip->ip_len = 0;
   7473 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7474 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7475 			} else {
   7476 				struct ip6_hdr *ip6 =
   7477 				    (void *)(mtod(m0, char *) + offset);
   7478 				th = (void *)(mtod(m0, char *) + hlen);
   7479 
   7480 				ip6->ip6_plen = 0;
   7481 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7482 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7483 			}
   7484 			tcp_hlen = th->th_off << 2;
   7485 		}
   7486 		hlen += tcp_hlen;
   7487 		*cmdlenp |= NQTX_CMD_TSE;
   7488 
   7489 		if (v4) {
   7490 			WM_Q_EVCNT_INCR(txq, txtso);
   7491 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7492 		} else {
   7493 			WM_Q_EVCNT_INCR(txq, txtso6);
   7494 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7495 		}
   7496 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7497 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7498 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7499 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7500 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7501 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7502 	} else {
   7503 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7504 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7505 	}
   7506 
   7507 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7508 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7509 		cmdc |= NQTXC_CMD_IP4;
   7510 	}
   7511 
   7512 	if (m0->m_pkthdr.csum_flags &
   7513 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7514 		WM_Q_EVCNT_INCR(txq, txtusum);
   7515 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7516 			cmdc |= NQTXC_CMD_TCP;
   7517 		} else {
   7518 			cmdc |= NQTXC_CMD_UDP;
   7519 		}
   7520 		cmdc |= NQTXC_CMD_IP4;
   7521 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7522 	}
   7523 	if (m0->m_pkthdr.csum_flags &
   7524 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7525 		WM_Q_EVCNT_INCR(txq, txtusum6);
   7526 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7527 			cmdc |= NQTXC_CMD_TCP;
   7528 		} else {
   7529 			cmdc |= NQTXC_CMD_UDP;
   7530 		}
   7531 		cmdc |= NQTXC_CMD_IP6;
   7532 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7533 	}
   7534 
   7535 	/*
   7536 	 * We don't have to write context descriptor for every packet to
   7537 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   7538 	 * I210 and I211. It is enough to write once per a Tx queue for these
   7539 	 * controllers.
   7540 	 * It would be overhead to write context descriptor for every packet,
   7541 	 * however it does not cause problems.
   7542 	 */
   7543 	/* Fill in the context descriptor. */
   7544 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   7545 	    htole32(vl_len);
   7546 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   7547 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   7548 	    htole32(cmdc);
   7549 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   7550 	    htole32(mssidx);
   7551 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7552 	DPRINTF(WM_DEBUG_TX,
   7553 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   7554 	    txq->txq_next, 0, vl_len));
   7555 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   7556 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7557 	txs->txs_ndesc++;
   7558 	return 0;
   7559 }
   7560 
   7561 /*
   7562  * wm_nq_start:		[ifnet interface function]
   7563  *
   7564  *	Start packet transmission on the interface for NEWQUEUE devices
   7565  */
   7566 static void
   7567 wm_nq_start(struct ifnet *ifp)
   7568 {
   7569 	struct wm_softc *sc = ifp->if_softc;
   7570 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7571 
   7572 #ifdef WM_MPSAFE
   7573 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   7574 #endif
   7575 	/*
   7576 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7577 	 */
   7578 
   7579 	mutex_enter(txq->txq_lock);
   7580 	if (!txq->txq_stopping)
   7581 		wm_nq_start_locked(ifp);
   7582 	mutex_exit(txq->txq_lock);
   7583 }
   7584 
   7585 static void
   7586 wm_nq_start_locked(struct ifnet *ifp)
   7587 {
   7588 	struct wm_softc *sc = ifp->if_softc;
   7589 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7590 
   7591 	wm_nq_send_common_locked(ifp, txq, false);
   7592 }
   7593 
   7594 static int
   7595 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   7596 {
   7597 	int qid;
   7598 	struct wm_softc *sc = ifp->if_softc;
   7599 	struct wm_txqueue *txq;
   7600 
   7601 	qid = wm_select_txqueue(ifp, m);
   7602 	txq = &sc->sc_queue[qid].wmq_txq;
   7603 
   7604 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7605 		m_freem(m);
   7606 		WM_Q_EVCNT_INCR(txq, txdrop);
   7607 		return ENOBUFS;
   7608 	}
   7609 
   7610 	/*
   7611 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7612 	 */
   7613 	ifp->if_obytes += m->m_pkthdr.len;
   7614 	if (m->m_flags & M_MCAST)
   7615 		ifp->if_omcasts++;
   7616 
   7617 	/*
   7618 	 * The situations which this mutex_tryenter() fails at running time
   7619 	 * are below two patterns.
   7620 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   7621 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   7622 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   7623 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   7624 	 * In the case of (2), the last packet enqueued to txq->txq_interq is also
   7625 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck, either.
   7626 	 */
   7627 	if (mutex_tryenter(txq->txq_lock)) {
   7628 		if (!txq->txq_stopping)
   7629 			wm_nq_transmit_locked(ifp, txq);
   7630 		mutex_exit(txq->txq_lock);
   7631 	}
   7632 
   7633 	return 0;
   7634 }
   7635 
   7636 static void
   7637 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7638 {
   7639 
   7640 	wm_nq_send_common_locked(ifp, txq, true);
   7641 }
   7642 
   7643 static void
   7644 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7645     bool is_transmit)
   7646 {
   7647 	struct wm_softc *sc = ifp->if_softc;
   7648 	struct mbuf *m0;
   7649 	struct m_tag *mtag;
   7650 	struct wm_txsoft *txs;
   7651 	bus_dmamap_t dmamap;
   7652 	int error, nexttx, lasttx = -1, seg, segs_needed;
   7653 	bool do_csum, sent;
   7654 
   7655 	KASSERT(mutex_owned(txq->txq_lock));
   7656 
   7657 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7658 		return;
   7659 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7660 		return;
   7661 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7662 		return;
   7663 
   7664 	sent = false;
   7665 
   7666 	/*
   7667 	 * Loop through the send queue, setting up transmit descriptors
   7668 	 * until we drain the queue, or use up all available transmit
   7669 	 * descriptors.
   7670 	 */
   7671 	for (;;) {
   7672 		m0 = NULL;
   7673 
   7674 		/* Get a work queue entry. */
   7675 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7676 			wm_txeof(sc, txq);
   7677 			if (txq->txq_sfree == 0) {
   7678 				DPRINTF(WM_DEBUG_TX,
   7679 				    ("%s: TX: no free job descriptors\n",
   7680 					device_xname(sc->sc_dev)));
   7681 				WM_Q_EVCNT_INCR(txq, txsstall);
   7682 				break;
   7683 			}
   7684 		}
   7685 
   7686 		/* Grab a packet off the queue. */
   7687 		if (is_transmit)
   7688 			m0 = pcq_get(txq->txq_interq);
   7689 		else
   7690 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7691 		if (m0 == NULL)
   7692 			break;
   7693 
   7694 		DPRINTF(WM_DEBUG_TX,
   7695 		    ("%s: TX: have packet to transmit: %p\n",
   7696 		    device_xname(sc->sc_dev), m0));
   7697 
   7698 		txs = &txq->txq_soft[txq->txq_snext];
   7699 		dmamap = txs->txs_dmamap;
   7700 
   7701 		/*
   7702 		 * Load the DMA map.  If this fails, the packet either
   7703 		 * didn't fit in the allotted number of segments, or we
   7704 		 * were short on resources.  For the too-many-segments
   7705 		 * case, we simply report an error and drop the packet,
   7706 		 * since we can't sanely copy a jumbo packet to a single
   7707 		 * buffer.
   7708 		 */
   7709 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7710 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7711 		if (error) {
   7712 			if (error == EFBIG) {
   7713 				WM_Q_EVCNT_INCR(txq, txdrop);
   7714 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7715 				    "DMA segments, dropping...\n",
   7716 				    device_xname(sc->sc_dev));
   7717 				wm_dump_mbuf_chain(sc, m0);
   7718 				m_freem(m0);
   7719 				continue;
   7720 			}
   7721 			/* Short on resources, just stop for now. */
   7722 			DPRINTF(WM_DEBUG_TX,
   7723 			    ("%s: TX: dmamap load failed: %d\n",
   7724 			    device_xname(sc->sc_dev), error));
   7725 			break;
   7726 		}
   7727 
   7728 		segs_needed = dmamap->dm_nsegs;
   7729 
   7730 		/*
   7731 		 * Ensure we have enough descriptors free to describe
   7732 		 * the packet.  Note, we always reserve one descriptor
   7733 		 * at the end of the ring due to the semantics of the
   7734 		 * TDT register, plus one more in the event we need
   7735 		 * to load offload context.
   7736 		 */
   7737 		if (segs_needed > txq->txq_free - 2) {
   7738 			/*
   7739 			 * Not enough free descriptors to transmit this
   7740 			 * packet.  We haven't committed anything yet,
   7741 			 * so just unload the DMA map, put the packet
   7742 			 * pack on the queue, and punt.  Notify the upper
   7743 			 * layer that there are no more slots left.
   7744 			 */
   7745 			DPRINTF(WM_DEBUG_TX,
   7746 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7747 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7748 			    segs_needed, txq->txq_free - 1));
   7749 			if (!is_transmit)
   7750 				ifp->if_flags |= IFF_OACTIVE;
   7751 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7752 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7753 			WM_Q_EVCNT_INCR(txq, txdstall);
   7754 			break;
   7755 		}
   7756 
   7757 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7758 
   7759 		DPRINTF(WM_DEBUG_TX,
   7760 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7761 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7762 
   7763 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7764 
   7765 		/*
   7766 		 * Store a pointer to the packet so that we can free it
   7767 		 * later.
   7768 		 *
   7769 		 * Initially, we consider the number of descriptors the
   7770 		 * packet uses the number of DMA segments.  This may be
   7771 		 * incremented by 1 if we do checksum offload (a descriptor
   7772 		 * is used to set the checksum context).
   7773 		 */
   7774 		txs->txs_mbuf = m0;
   7775 		txs->txs_firstdesc = txq->txq_next;
   7776 		txs->txs_ndesc = segs_needed;
   7777 
   7778 		/* Set up offload parameters for this packet. */
   7779 		uint32_t cmdlen, fields, dcmdlen;
   7780 		if (m0->m_pkthdr.csum_flags &
   7781 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7782 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7783 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7784 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   7785 			    &do_csum) != 0) {
   7786 				/* Error message already displayed. */
   7787 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7788 				continue;
   7789 			}
   7790 		} else {
   7791 			do_csum = false;
   7792 			cmdlen = 0;
   7793 			fields = 0;
   7794 		}
   7795 
   7796 		/* Sync the DMA map. */
   7797 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7798 		    BUS_DMASYNC_PREWRITE);
   7799 
   7800 		/* Initialize the first transmit descriptor. */
   7801 		nexttx = txq->txq_next;
   7802 		if (!do_csum) {
   7803 			/* setup a legacy descriptor */
   7804 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   7805 			    dmamap->dm_segs[0].ds_addr);
   7806 			txq->txq_descs[nexttx].wtx_cmdlen =
   7807 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   7808 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   7809 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   7810 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   7811 			    NULL) {
   7812 				txq->txq_descs[nexttx].wtx_cmdlen |=
   7813 				    htole32(WTX_CMD_VLE);
   7814 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   7815 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   7816 			} else {
   7817 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7818 			}
   7819 			dcmdlen = 0;
   7820 		} else {
   7821 			/* setup an advanced data descriptor */
   7822 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7823 			    htole64(dmamap->dm_segs[0].ds_addr);
   7824 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   7825 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7826 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   7827 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   7828 			    htole32(fields);
   7829 			DPRINTF(WM_DEBUG_TX,
   7830 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   7831 			    device_xname(sc->sc_dev), nexttx,
   7832 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   7833 			DPRINTF(WM_DEBUG_TX,
   7834 			    ("\t 0x%08x%08x\n", fields,
   7835 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   7836 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   7837 		}
   7838 
   7839 		lasttx = nexttx;
   7840 		nexttx = WM_NEXTTX(txq, nexttx);
   7841 		/*
   7842 		 * fill in the next descriptors. legacy or adcanced format
   7843 		 * is the same here
   7844 		 */
   7845 		for (seg = 1; seg < dmamap->dm_nsegs;
   7846 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   7847 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7848 			    htole64(dmamap->dm_segs[seg].ds_addr);
   7849 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7850 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   7851 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   7852 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   7853 			lasttx = nexttx;
   7854 
   7855 			DPRINTF(WM_DEBUG_TX,
   7856 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   7857 			     "len %#04zx\n",
   7858 			    device_xname(sc->sc_dev), nexttx,
   7859 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   7860 			    dmamap->dm_segs[seg].ds_len));
   7861 		}
   7862 
   7863 		KASSERT(lasttx != -1);
   7864 
   7865 		/*
   7866 		 * Set up the command byte on the last descriptor of
   7867 		 * the packet.  If we're in the interrupt delay window,
   7868 		 * delay the interrupt.
   7869 		 */
   7870 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   7871 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   7872 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7873 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7874 
   7875 		txs->txs_lastdesc = lasttx;
   7876 
   7877 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7878 		    device_xname(sc->sc_dev),
   7879 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7880 
   7881 		/* Sync the descriptors we're using. */
   7882 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7883 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7884 
   7885 		/* Give the packet to the chip. */
   7886 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7887 		sent = true;
   7888 
   7889 		DPRINTF(WM_DEBUG_TX,
   7890 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7891 
   7892 		DPRINTF(WM_DEBUG_TX,
   7893 		    ("%s: TX: finished transmitting packet, job %d\n",
   7894 		    device_xname(sc->sc_dev), txq->txq_snext));
   7895 
   7896 		/* Advance the tx pointer. */
   7897 		txq->txq_free -= txs->txs_ndesc;
   7898 		txq->txq_next = nexttx;
   7899 
   7900 		txq->txq_sfree--;
   7901 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7902 
   7903 		/* Pass the packet to any BPF listeners. */
   7904 		bpf_mtap(ifp, m0);
   7905 	}
   7906 
   7907 	if (m0 != NULL) {
   7908 		if (!is_transmit)
   7909 			ifp->if_flags |= IFF_OACTIVE;
   7910 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7911 		WM_Q_EVCNT_INCR(txq, txdrop);
   7912 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7913 			__func__));
   7914 		m_freem(m0);
   7915 	}
   7916 
   7917 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7918 		/* No more slots; notify upper layer. */
   7919 		if (!is_transmit)
   7920 			ifp->if_flags |= IFF_OACTIVE;
   7921 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7922 	}
   7923 
   7924 	if (sent) {
   7925 		/* Set a watchdog timer in case the chip flakes out. */
   7926 		ifp->if_timer = 5;
   7927 	}
   7928 }
   7929 
   7930 static void
   7931 wm_deferred_start_locked(struct wm_txqueue *txq)
   7932 {
   7933 	struct wm_softc *sc = txq->txq_sc;
   7934 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7935 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   7936 	int qid = wmq->wmq_id;
   7937 
   7938 	KASSERT(mutex_owned(txq->txq_lock));
   7939 
   7940 	if (txq->txq_stopping) {
   7941 		mutex_exit(txq->txq_lock);
   7942 		return;
   7943 	}
   7944 
   7945 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7946 		/* XXX need for ALTQ or one CPU system */
   7947 		if (qid == 0)
   7948 			wm_nq_start_locked(ifp);
   7949 		wm_nq_transmit_locked(ifp, txq);
   7950 	} else {
   7951 		/* XXX need for ALTQ or one CPU system */
   7952 		if (qid == 0)
   7953 			wm_start_locked(ifp);
   7954 		wm_transmit_locked(ifp, txq);
   7955 	}
   7956 }
   7957 
   7958 /* Interrupt */
   7959 
   7960 /*
   7961  * wm_txeof:
   7962  *
   7963  *	Helper; handle transmit interrupts.
   7964  */
   7965 static int
   7966 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
   7967 {
   7968 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7969 	struct wm_txsoft *txs;
   7970 	bool processed = false;
   7971 	int count = 0;
   7972 	int i;
   7973 	uint8_t status;
   7974 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   7975 
   7976 	KASSERT(mutex_owned(txq->txq_lock));
   7977 
   7978 	if (txq->txq_stopping)
   7979 		return 0;
   7980 
   7981 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   7982 	/* for ALTQ and legacy(not use multiqueue) ethernet controller */
   7983 	if (wmq->wmq_id == 0)
   7984 		ifp->if_flags &= ~IFF_OACTIVE;
   7985 
   7986 	/*
   7987 	 * Go through the Tx list and free mbufs for those
   7988 	 * frames which have been transmitted.
   7989 	 */
   7990 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   7991 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   7992 		txs = &txq->txq_soft[i];
   7993 
   7994 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   7995 			device_xname(sc->sc_dev), i));
   7996 
   7997 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   7998 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   7999 
   8000 		status =
   8001 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8002 		if ((status & WTX_ST_DD) == 0) {
   8003 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8004 			    BUS_DMASYNC_PREREAD);
   8005 			break;
   8006 		}
   8007 
   8008 		processed = true;
   8009 		count++;
   8010 		DPRINTF(WM_DEBUG_TX,
   8011 		    ("%s: TX: job %d done: descs %d..%d\n",
   8012 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8013 		    txs->txs_lastdesc));
   8014 
   8015 		/*
   8016 		 * XXX We should probably be using the statistics
   8017 		 * XXX registers, but I don't know if they exist
   8018 		 * XXX on chips before the i82544.
   8019 		 */
   8020 
   8021 #ifdef WM_EVENT_COUNTERS
   8022 		if (status & WTX_ST_TU)
   8023 			WM_Q_EVCNT_INCR(txq, tu);
   8024 #endif /* WM_EVENT_COUNTERS */
   8025 
   8026 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   8027 			ifp->if_oerrors++;
   8028 			if (status & WTX_ST_LC)
   8029 				log(LOG_WARNING, "%s: late collision\n",
   8030 				    device_xname(sc->sc_dev));
   8031 			else if (status & WTX_ST_EC) {
   8032 				ifp->if_collisions += 16;
   8033 				log(LOG_WARNING, "%s: excessive collisions\n",
   8034 				    device_xname(sc->sc_dev));
   8035 			}
   8036 		} else
   8037 			ifp->if_opackets++;
   8038 
   8039 		txq->txq_packets++;
   8040 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8041 
   8042 		txq->txq_free += txs->txs_ndesc;
   8043 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8044 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8045 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8046 		m_freem(txs->txs_mbuf);
   8047 		txs->txs_mbuf = NULL;
   8048 	}
   8049 
   8050 	/* Update the dirty transmit buffer pointer. */
   8051 	txq->txq_sdirty = i;
   8052 	DPRINTF(WM_DEBUG_TX,
   8053 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8054 
   8055 	if (count != 0)
   8056 		rnd_add_uint32(&sc->rnd_source, count);
   8057 
   8058 	/*
   8059 	 * If there are no more pending transmissions, cancel the watchdog
   8060 	 * timer.
   8061 	 */
   8062 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8063 		ifp->if_timer = 0;
   8064 
   8065 	return processed;
   8066 }
   8067 
   8068 static inline uint32_t
   8069 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8070 {
   8071 	struct wm_softc *sc = rxq->rxq_sc;
   8072 
   8073 	if (sc->sc_type == WM_T_82574)
   8074 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8075 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8076 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8077 	else
   8078 		return rxq->rxq_descs[idx].wrx_status;
   8079 }
   8080 
   8081 static inline uint32_t
   8082 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8083 {
   8084 	struct wm_softc *sc = rxq->rxq_sc;
   8085 
   8086 	if (sc->sc_type == WM_T_82574)
   8087 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8088 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8089 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8090 	else
   8091 		return rxq->rxq_descs[idx].wrx_errors;
   8092 }
   8093 
   8094 static inline uint16_t
   8095 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8096 {
   8097 	struct wm_softc *sc = rxq->rxq_sc;
   8098 
   8099 	if (sc->sc_type == WM_T_82574)
   8100 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8101 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8102 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8103 	else
   8104 		return rxq->rxq_descs[idx].wrx_special;
   8105 }
   8106 
   8107 static inline int
   8108 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8109 {
   8110 	struct wm_softc *sc = rxq->rxq_sc;
   8111 
   8112 	if (sc->sc_type == WM_T_82574)
   8113 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8114 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8115 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   8116 	else
   8117 		return rxq->rxq_descs[idx].wrx_len;
   8118 }
   8119 
   8120 #ifdef WM_DEBUG
   8121 static inline uint32_t
   8122 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   8123 {
   8124 	struct wm_softc *sc = rxq->rxq_sc;
   8125 
   8126 	if (sc->sc_type == WM_T_82574)
   8127 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   8128 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8129 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   8130 	else
   8131 		return 0;
   8132 }
   8133 
   8134 static inline uint8_t
   8135 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   8136 {
   8137 	struct wm_softc *sc = rxq->rxq_sc;
   8138 
   8139 	if (sc->sc_type == WM_T_82574)
   8140 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   8141 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8142 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   8143 	else
   8144 		return 0;
   8145 }
   8146 #endif /* WM_DEBUG */
   8147 
   8148 static inline bool
   8149 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   8150     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8151 {
   8152 
   8153 	if (sc->sc_type == WM_T_82574)
   8154 		return (status & ext_bit) != 0;
   8155 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8156 		return (status & nq_bit) != 0;
   8157 	else
   8158 		return (status & legacy_bit) != 0;
   8159 }
   8160 
   8161 static inline bool
   8162 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   8163     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8164 {
   8165 
   8166 	if (sc->sc_type == WM_T_82574)
   8167 		return (error & ext_bit) != 0;
   8168 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8169 		return (error & nq_bit) != 0;
   8170 	else
   8171 		return (error & legacy_bit) != 0;
   8172 }
   8173 
   8174 static inline bool
   8175 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   8176 {
   8177 
   8178 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8179 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   8180 		return true;
   8181 	else
   8182 		return false;
   8183 }
   8184 
   8185 static inline bool
   8186 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   8187 {
   8188 	struct wm_softc *sc = rxq->rxq_sc;
   8189 
   8190 	/* XXXX missing error bit for newqueue? */
   8191 	if (wm_rxdesc_is_set_error(sc, errors,
   8192 		WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE,
   8193 		EXTRXC_ERROR_CE|EXTRXC_ERROR_SE|EXTRXC_ERROR_SEQ|EXTRXC_ERROR_CXE|EXTRXC_ERROR_RXE,
   8194 		NQRXC_ERROR_RXE)) {
   8195 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE, EXTRXC_ERROR_SE, 0))
   8196 			log(LOG_WARNING, "%s: symbol error\n",
   8197 			    device_xname(sc->sc_dev));
   8198 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ, EXTRXC_ERROR_SEQ, 0))
   8199 			log(LOG_WARNING, "%s: receive sequence error\n",
   8200 			    device_xname(sc->sc_dev));
   8201 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE, EXTRXC_ERROR_CE, 0))
   8202 			log(LOG_WARNING, "%s: CRC error\n",
   8203 			    device_xname(sc->sc_dev));
   8204 		return true;
   8205 	}
   8206 
   8207 	return false;
   8208 }
   8209 
   8210 static inline bool
   8211 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8212 {
   8213 	struct wm_softc *sc = rxq->rxq_sc;
   8214 
   8215 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8216 		NQRXC_STATUS_DD)) {
   8217 		/* We have processed all of the receive descriptors. */
   8218 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8219 		return false;
   8220 	}
   8221 
   8222 	return true;
   8223 }
   8224 
   8225 static inline bool
   8226 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status, uint16_t vlantag,
   8227     struct mbuf *m)
   8228 {
   8229 	struct ifnet *ifp = &rxq->rxq_sc->sc_ethercom.ec_if;
   8230 
   8231 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8232 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8233 		VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), return false);
   8234 	}
   8235 
   8236 	return true;
   8237 }
   8238 
   8239 static inline void
   8240 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8241     uint32_t errors, struct mbuf *m)
   8242 {
   8243 	struct wm_softc *sc = rxq->rxq_sc;
   8244 
   8245 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8246 		if (wm_rxdesc_is_set_status(sc, status,
   8247 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8248 			WM_Q_EVCNT_INCR(rxq, rxipsum);
   8249 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8250 			if (wm_rxdesc_is_set_error(sc, errors,
   8251 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8252 				m->m_pkthdr.csum_flags |=
   8253 					M_CSUM_IPv4_BAD;
   8254 		}
   8255 		if (wm_rxdesc_is_set_status(sc, status,
   8256 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8257 			/*
   8258 			 * Note: we don't know if this was TCP or UDP,
   8259 			 * so we just set both bits, and expect the
   8260 			 * upper layers to deal.
   8261 			 */
   8262 			WM_Q_EVCNT_INCR(rxq, rxtusum);
   8263 			m->m_pkthdr.csum_flags |=
   8264 				M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8265 				M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8266 			if (wm_rxdesc_is_set_error(sc, errors,
   8267 				WRX_ER_TCPE, EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8268 				m->m_pkthdr.csum_flags |=
   8269 					M_CSUM_TCP_UDP_BAD;
   8270 		}
   8271 	}
   8272 }
   8273 
   8274 /*
   8275  * wm_rxeof:
   8276  *
   8277  *	Helper; handle receive interrupts.
   8278  */
   8279 static void
   8280 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8281 {
   8282 	struct wm_softc *sc = rxq->rxq_sc;
   8283 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8284 	struct wm_rxsoft *rxs;
   8285 	struct mbuf *m;
   8286 	int i, len;
   8287 	int count = 0;
   8288 	uint32_t status, errors;
   8289 	uint16_t vlantag;
   8290 
   8291 	KASSERT(mutex_owned(rxq->rxq_lock));
   8292 
   8293 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8294 		if (limit-- == 0) {
   8295 			rxq->rxq_ptr = i;
   8296 			break;
   8297 		}
   8298 
   8299 		rxs = &rxq->rxq_soft[i];
   8300 
   8301 		DPRINTF(WM_DEBUG_RX,
   8302 		    ("%s: RX: checking descriptor %d\n",
   8303 		    device_xname(sc->sc_dev), i));
   8304 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   8305 
   8306 		status = wm_rxdesc_get_status(rxq, i);
   8307 		errors = wm_rxdesc_get_errors(rxq, i);
   8308 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8309 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8310 #ifdef WM_DEBUG
   8311 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8312 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8313 #endif
   8314 
   8315 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8316 			/*
   8317 			 * Update the receive pointer holding rxq_lock
   8318 			 * consistent with increment counter.
   8319 			 */
   8320 			rxq->rxq_ptr = i;
   8321 			break;
   8322 		}
   8323 
   8324 		count++;
   8325 		if (__predict_false(rxq->rxq_discard)) {
   8326 			DPRINTF(WM_DEBUG_RX,
   8327 			    ("%s: RX: discarding contents of descriptor %d\n",
   8328 			    device_xname(sc->sc_dev), i));
   8329 			wm_init_rxdesc(rxq, i);
   8330 			if (wm_rxdesc_is_eop(rxq, status)) {
   8331 				/* Reset our state. */
   8332 				DPRINTF(WM_DEBUG_RX,
   8333 				    ("%s: RX: resetting rxdiscard -> 0\n",
   8334 				    device_xname(sc->sc_dev)));
   8335 				rxq->rxq_discard = 0;
   8336 			}
   8337 			continue;
   8338 		}
   8339 
   8340 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8341 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   8342 
   8343 		m = rxs->rxs_mbuf;
   8344 
   8345 		/*
   8346 		 * Add a new receive buffer to the ring, unless of
   8347 		 * course the length is zero. Treat the latter as a
   8348 		 * failed mapping.
   8349 		 */
   8350 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   8351 			/*
   8352 			 * Failed, throw away what we've done so
   8353 			 * far, and discard the rest of the packet.
   8354 			 */
   8355 			ifp->if_ierrors++;
   8356 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8357 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   8358 			wm_init_rxdesc(rxq, i);
   8359 			if (!wm_rxdesc_is_eop(rxq, status))
   8360 				rxq->rxq_discard = 1;
   8361 			if (rxq->rxq_head != NULL)
   8362 				m_freem(rxq->rxq_head);
   8363 			WM_RXCHAIN_RESET(rxq);
   8364 			DPRINTF(WM_DEBUG_RX,
   8365 			    ("%s: RX: Rx buffer allocation failed, "
   8366 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8367 			    rxq->rxq_discard ? " (discard)" : ""));
   8368 			continue;
   8369 		}
   8370 
   8371 		m->m_len = len;
   8372 		rxq->rxq_len += len;
   8373 		DPRINTF(WM_DEBUG_RX,
   8374 		    ("%s: RX: buffer at %p len %d\n",
   8375 		    device_xname(sc->sc_dev), m->m_data, len));
   8376 
   8377 		/* If this is not the end of the packet, keep looking. */
   8378 		if (!wm_rxdesc_is_eop(rxq, status)) {
   8379 			WM_RXCHAIN_LINK(rxq, m);
   8380 			DPRINTF(WM_DEBUG_RX,
   8381 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   8382 			    device_xname(sc->sc_dev), rxq->rxq_len));
   8383 			continue;
   8384 		}
   8385 
   8386 		/*
   8387 		 * Okay, we have the entire packet now.  The chip is
   8388 		 * configured to include the FCS except I350 and I21[01]
   8389 		 * (not all chips can be configured to strip it),
   8390 		 * so we need to trim it.
   8391 		 * May need to adjust length of previous mbuf in the
   8392 		 * chain if the current mbuf is too short.
   8393 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   8394 		 * is always set in I350, so we don't trim it.
   8395 		 */
   8396 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   8397 		    && (sc->sc_type != WM_T_I210)
   8398 		    && (sc->sc_type != WM_T_I211)) {
   8399 			if (m->m_len < ETHER_CRC_LEN) {
   8400 				rxq->rxq_tail->m_len
   8401 				    -= (ETHER_CRC_LEN - m->m_len);
   8402 				m->m_len = 0;
   8403 			} else
   8404 				m->m_len -= ETHER_CRC_LEN;
   8405 			len = rxq->rxq_len - ETHER_CRC_LEN;
   8406 		} else
   8407 			len = rxq->rxq_len;
   8408 
   8409 		WM_RXCHAIN_LINK(rxq, m);
   8410 
   8411 		*rxq->rxq_tailp = NULL;
   8412 		m = rxq->rxq_head;
   8413 
   8414 		WM_RXCHAIN_RESET(rxq);
   8415 
   8416 		DPRINTF(WM_DEBUG_RX,
   8417 		    ("%s: RX: have entire packet, len -> %d\n",
   8418 		    device_xname(sc->sc_dev), len));
   8419 
   8420 		/* If an error occurred, update stats and drop the packet. */
   8421 		if (wm_rxdesc_has_errors(rxq, errors)) {
   8422 			m_freem(m);
   8423 			continue;
   8424 		}
   8425 
   8426 		/* No errors.  Receive the packet. */
   8427 		m_set_rcvif(m, ifp);
   8428 		m->m_pkthdr.len = len;
   8429 		/*
   8430 		 * TODO
   8431 		 * should be save rsshash and rsstype to this mbuf.
   8432 		 */
   8433 		DPRINTF(WM_DEBUG_RX,
   8434 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   8435 			device_xname(sc->sc_dev), rsstype, rsshash));
   8436 
   8437 		/*
   8438 		 * If VLANs are enabled, VLAN packets have been unwrapped
   8439 		 * for us.  Associate the tag with the packet.
   8440 		 */
   8441 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   8442 			continue;
   8443 
   8444 		/* Set up checksum info for this packet. */
   8445 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   8446 		/*
   8447 		 * Update the receive pointer holding rxq_lock consistent with
   8448 		 * increment counter.
   8449 		 */
   8450 		rxq->rxq_ptr = i;
   8451 		rxq->rxq_packets++;
   8452 		rxq->rxq_bytes += len;
   8453 		mutex_exit(rxq->rxq_lock);
   8454 
   8455 		/* Pass it on. */
   8456 		if_percpuq_enqueue(sc->sc_ipq, m);
   8457 
   8458 		mutex_enter(rxq->rxq_lock);
   8459 
   8460 		if (rxq->rxq_stopping)
   8461 			break;
   8462 	}
   8463 
   8464 	if (count != 0)
   8465 		rnd_add_uint32(&sc->rnd_source, count);
   8466 
   8467 	DPRINTF(WM_DEBUG_RX,
   8468 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8469 }
   8470 
   8471 /*
   8472  * wm_linkintr_gmii:
   8473  *
   8474  *	Helper; handle link interrupts for GMII.
   8475  */
   8476 static void
   8477 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8478 {
   8479 
   8480 	KASSERT(WM_CORE_LOCKED(sc));
   8481 
   8482 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8483 		__func__));
   8484 
   8485 	if (icr & ICR_LSC) {
   8486 		uint32_t reg;
   8487 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   8488 
   8489 		if ((status & STATUS_LU) != 0) {
   8490 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8491 				device_xname(sc->sc_dev),
   8492 				(status & STATUS_FD) ? "FDX" : "HDX"));
   8493 		} else {
   8494 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8495 				device_xname(sc->sc_dev)));
   8496 		}
   8497 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   8498 			wm_gig_downshift_workaround_ich8lan(sc);
   8499 
   8500 		if ((sc->sc_type == WM_T_ICH8)
   8501 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   8502 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   8503 		}
   8504 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   8505 			device_xname(sc->sc_dev)));
   8506 		mii_pollstat(&sc->sc_mii);
   8507 		if (sc->sc_type == WM_T_82543) {
   8508 			int miistatus, active;
   8509 
   8510 			/*
   8511 			 * With 82543, we need to force speed and
   8512 			 * duplex on the MAC equal to what the PHY
   8513 			 * speed and duplex configuration is.
   8514 			 */
   8515 			miistatus = sc->sc_mii.mii_media_status;
   8516 
   8517 			if (miistatus & IFM_ACTIVE) {
   8518 				active = sc->sc_mii.mii_media_active;
   8519 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8520 				switch (IFM_SUBTYPE(active)) {
   8521 				case IFM_10_T:
   8522 					sc->sc_ctrl |= CTRL_SPEED_10;
   8523 					break;
   8524 				case IFM_100_TX:
   8525 					sc->sc_ctrl |= CTRL_SPEED_100;
   8526 					break;
   8527 				case IFM_1000_T:
   8528 					sc->sc_ctrl |= CTRL_SPEED_1000;
   8529 					break;
   8530 				default:
   8531 					/*
   8532 					 * fiber?
   8533 					 * Shoud not enter here.
   8534 					 */
   8535 					printf("unknown media (%x)\n", active);
   8536 					break;
   8537 				}
   8538 				if (active & IFM_FDX)
   8539 					sc->sc_ctrl |= CTRL_FD;
   8540 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8541 			}
   8542 		} else if (sc->sc_type == WM_T_PCH) {
   8543 			wm_k1_gig_workaround_hv(sc,
   8544 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   8545 		}
   8546 
   8547 		if ((sc->sc_phytype == WMPHY_82578)
   8548 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   8549 			== IFM_1000_T)) {
   8550 
   8551 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   8552 				delay(200*1000); /* XXX too big */
   8553 
   8554 				/* Link stall fix for link up */
   8555 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8556 				    HV_MUX_DATA_CTRL,
   8557 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   8558 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   8559 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8560 				    HV_MUX_DATA_CTRL,
   8561 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   8562 			}
   8563 		}
   8564 		/*
   8565 		 * I217 Packet Loss issue:
   8566 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
   8567 		 * on power up.
   8568 		 * Set the Beacon Duration for I217 to 8 usec
   8569 		 */
   8570 		if ((sc->sc_type == WM_T_PCH_LPT)
   8571 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8572 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   8573 			reg &= ~FEXTNVM4_BEACON_DURATION;
   8574 			reg |= FEXTNVM4_BEACON_DURATION_8US;
   8575 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   8576 		}
   8577 
   8578 		/* XXX Work-around I218 hang issue */
   8579 		/* e1000_k1_workaround_lpt_lp() */
   8580 
   8581 		if ((sc->sc_type == WM_T_PCH_LPT)
   8582 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8583 			/*
   8584 			 * Set platform power management values for Latency
   8585 			 * Tolerance Reporting (LTR)
   8586 			 */
   8587 			wm_platform_pm_pch_lpt(sc,
   8588 				((sc->sc_mii.mii_media_status & IFM_ACTIVE)
   8589 				    != 0));
   8590 		}
   8591 
   8592 		/* FEXTNVM6 K1-off workaround */
   8593 		if (sc->sc_type == WM_T_PCH_SPT) {
   8594 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
   8595 			if (CSR_READ(sc, WMREG_PCIEANACFG)
   8596 			    & FEXTNVM6_K1_OFF_ENABLE)
   8597 				reg |= FEXTNVM6_K1_OFF_ENABLE;
   8598 			else
   8599 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   8600 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   8601 		}
   8602 	} else if (icr & ICR_RXSEQ) {
   8603 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   8604 			device_xname(sc->sc_dev)));
   8605 	}
   8606 }
   8607 
   8608 /*
   8609  * wm_linkintr_tbi:
   8610  *
   8611  *	Helper; handle link interrupts for TBI mode.
   8612  */
   8613 static void
   8614 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   8615 {
   8616 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8617 	uint32_t status;
   8618 
   8619 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8620 		__func__));
   8621 
   8622 	status = CSR_READ(sc, WMREG_STATUS);
   8623 	if (icr & ICR_LSC) {
   8624 		if (status & STATUS_LU) {
   8625 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8626 			    device_xname(sc->sc_dev),
   8627 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   8628 			/*
   8629 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   8630 			 * so we should update sc->sc_ctrl
   8631 			 */
   8632 
   8633 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   8634 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8635 			sc->sc_fcrtl &= ~FCRTL_XONE;
   8636 			if (status & STATUS_FD)
   8637 				sc->sc_tctl |=
   8638 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8639 			else
   8640 				sc->sc_tctl |=
   8641 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8642 			if (sc->sc_ctrl & CTRL_TFCE)
   8643 				sc->sc_fcrtl |= FCRTL_XONE;
   8644 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8645 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   8646 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   8647 				      sc->sc_fcrtl);
   8648 			sc->sc_tbi_linkup = 1;
   8649 			if_link_state_change(ifp, LINK_STATE_UP);
   8650 		} else {
   8651 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8652 			    device_xname(sc->sc_dev)));
   8653 			sc->sc_tbi_linkup = 0;
   8654 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8655 		}
   8656 		/* Update LED */
   8657 		wm_tbi_serdes_set_linkled(sc);
   8658 	} else if (icr & ICR_RXSEQ) {
   8659 		DPRINTF(WM_DEBUG_LINK,
   8660 		    ("%s: LINK: Receive sequence error\n",
   8661 		    device_xname(sc->sc_dev)));
   8662 	}
   8663 }
   8664 
   8665 /*
   8666  * wm_linkintr_serdes:
   8667  *
   8668  *	Helper; handle link interrupts for TBI mode.
   8669  */
   8670 static void
   8671 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   8672 {
   8673 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8674 	struct mii_data *mii = &sc->sc_mii;
   8675 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8676 	uint32_t pcs_adv, pcs_lpab, reg;
   8677 
   8678 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8679 		__func__));
   8680 
   8681 	if (icr & ICR_LSC) {
   8682 		/* Check PCS */
   8683 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8684 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   8685 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   8686 				device_xname(sc->sc_dev)));
   8687 			mii->mii_media_status |= IFM_ACTIVE;
   8688 			sc->sc_tbi_linkup = 1;
   8689 			if_link_state_change(ifp, LINK_STATE_UP);
   8690 		} else {
   8691 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8692 				device_xname(sc->sc_dev)));
   8693 			mii->mii_media_status |= IFM_NONE;
   8694 			sc->sc_tbi_linkup = 0;
   8695 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8696 			wm_tbi_serdes_set_linkled(sc);
   8697 			return;
   8698 		}
   8699 		mii->mii_media_active |= IFM_1000_SX;
   8700 		if ((reg & PCS_LSTS_FDX) != 0)
   8701 			mii->mii_media_active |= IFM_FDX;
   8702 		else
   8703 			mii->mii_media_active |= IFM_HDX;
   8704 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   8705 			/* Check flow */
   8706 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8707 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   8708 				DPRINTF(WM_DEBUG_LINK,
   8709 				    ("XXX LINKOK but not ACOMP\n"));
   8710 				return;
   8711 			}
   8712 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   8713 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   8714 			DPRINTF(WM_DEBUG_LINK,
   8715 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   8716 			if ((pcs_adv & TXCW_SYM_PAUSE)
   8717 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   8718 				mii->mii_media_active |= IFM_FLOW
   8719 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   8720 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   8721 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8722 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   8723 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8724 				mii->mii_media_active |= IFM_FLOW
   8725 				    | IFM_ETH_TXPAUSE;
   8726 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   8727 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8728 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   8729 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8730 				mii->mii_media_active |= IFM_FLOW
   8731 				    | IFM_ETH_RXPAUSE;
   8732 		}
   8733 		/* Update LED */
   8734 		wm_tbi_serdes_set_linkled(sc);
   8735 	} else {
   8736 		DPRINTF(WM_DEBUG_LINK,
   8737 		    ("%s: LINK: Receive sequence error\n",
   8738 		    device_xname(sc->sc_dev)));
   8739 	}
   8740 }
   8741 
   8742 /*
   8743  * wm_linkintr:
   8744  *
   8745  *	Helper; handle link interrupts.
   8746  */
   8747 static void
   8748 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   8749 {
   8750 
   8751 	KASSERT(WM_CORE_LOCKED(sc));
   8752 
   8753 	if (sc->sc_flags & WM_F_HAS_MII)
   8754 		wm_linkintr_gmii(sc, icr);
   8755 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   8756 	    && (sc->sc_type >= WM_T_82575))
   8757 		wm_linkintr_serdes(sc, icr);
   8758 	else
   8759 		wm_linkintr_tbi(sc, icr);
   8760 }
   8761 
   8762 /*
   8763  * wm_intr_legacy:
   8764  *
   8765  *	Interrupt service routine for INTx and MSI.
   8766  */
   8767 static int
   8768 wm_intr_legacy(void *arg)
   8769 {
   8770 	struct wm_softc *sc = arg;
   8771 	struct wm_queue *wmq = &sc->sc_queue[0];
   8772 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8773 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8774 	uint32_t icr, rndval = 0;
   8775 	int handled = 0;
   8776 
   8777 	while (1 /* CONSTCOND */) {
   8778 		icr = CSR_READ(sc, WMREG_ICR);
   8779 		if ((icr & sc->sc_icr) == 0)
   8780 			break;
   8781 		if (handled == 0) {
   8782 			DPRINTF(WM_DEBUG_TX,
   8783 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   8784 		}
   8785 		if (rndval == 0)
   8786 			rndval = icr;
   8787 
   8788 		mutex_enter(rxq->rxq_lock);
   8789 
   8790 		if (rxq->rxq_stopping) {
   8791 			mutex_exit(rxq->rxq_lock);
   8792 			break;
   8793 		}
   8794 
   8795 		handled = 1;
   8796 
   8797 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8798 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   8799 			DPRINTF(WM_DEBUG_RX,
   8800 			    ("%s: RX: got Rx intr 0x%08x\n",
   8801 			    device_xname(sc->sc_dev),
   8802 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
   8803 			WM_Q_EVCNT_INCR(rxq, rxintr);
   8804 		}
   8805 #endif
   8806 		wm_rxeof(rxq, UINT_MAX);
   8807 
   8808 		mutex_exit(rxq->rxq_lock);
   8809 		mutex_enter(txq->txq_lock);
   8810 
   8811 		if (txq->txq_stopping) {
   8812 			mutex_exit(txq->txq_lock);
   8813 			break;
   8814 		}
   8815 
   8816 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8817 		if (icr & ICR_TXDW) {
   8818 			DPRINTF(WM_DEBUG_TX,
   8819 			    ("%s: TX: got TXDW interrupt\n",
   8820 			    device_xname(sc->sc_dev)));
   8821 			WM_Q_EVCNT_INCR(txq, txdw);
   8822 		}
   8823 #endif
   8824 		wm_txeof(sc, txq);
   8825 
   8826 		mutex_exit(txq->txq_lock);
   8827 		WM_CORE_LOCK(sc);
   8828 
   8829 		if (sc->sc_core_stopping) {
   8830 			WM_CORE_UNLOCK(sc);
   8831 			break;
   8832 		}
   8833 
   8834 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   8835 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8836 			wm_linkintr(sc, icr);
   8837 		}
   8838 
   8839 		WM_CORE_UNLOCK(sc);
   8840 
   8841 		if (icr & ICR_RXO) {
   8842 #if defined(WM_DEBUG)
   8843 			log(LOG_WARNING, "%s: Receive overrun\n",
   8844 			    device_xname(sc->sc_dev));
   8845 #endif /* defined(WM_DEBUG) */
   8846 		}
   8847 	}
   8848 
   8849 	rnd_add_uint32(&sc->rnd_source, rndval);
   8850 
   8851 	if (handled) {
   8852 		/* Try to get more packets going. */
   8853 		softint_schedule(wmq->wmq_si);
   8854 	}
   8855 
   8856 	return handled;
   8857 }
   8858 
   8859 static inline void
   8860 wm_txrxintr_disable(struct wm_queue *wmq)
   8861 {
   8862 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8863 
   8864 	if (sc->sc_type == WM_T_82574)
   8865 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8866 	else if (sc->sc_type == WM_T_82575)
   8867 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8868 	else
   8869 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   8870 }
   8871 
   8872 static inline void
   8873 wm_txrxintr_enable(struct wm_queue *wmq)
   8874 {
   8875 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8876 
   8877 	wm_itrs_calculate(sc, wmq);
   8878 
   8879 	if (sc->sc_type == WM_T_82574)
   8880 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8881 	else if (sc->sc_type == WM_T_82575)
   8882 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8883 	else
   8884 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   8885 }
   8886 
   8887 static int
   8888 wm_txrxintr_msix(void *arg)
   8889 {
   8890 	struct wm_queue *wmq = arg;
   8891 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8892 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8893 	struct wm_softc *sc = txq->txq_sc;
   8894 	u_int limit = sc->sc_rx_intr_process_limit;
   8895 
   8896 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   8897 
   8898 	DPRINTF(WM_DEBUG_TX,
   8899 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   8900 
   8901 	wm_txrxintr_disable(wmq);
   8902 
   8903 	mutex_enter(txq->txq_lock);
   8904 
   8905 	if (txq->txq_stopping) {
   8906 		mutex_exit(txq->txq_lock);
   8907 		return 0;
   8908 	}
   8909 
   8910 	WM_Q_EVCNT_INCR(txq, txdw);
   8911 	wm_txeof(sc, txq);
   8912 	/* wm_deferred start() is done in wm_handle_queue(). */
   8913 	mutex_exit(txq->txq_lock);
   8914 
   8915 	DPRINTF(WM_DEBUG_RX,
   8916 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   8917 	mutex_enter(rxq->rxq_lock);
   8918 
   8919 	if (rxq->rxq_stopping) {
   8920 		mutex_exit(rxq->rxq_lock);
   8921 		return 0;
   8922 	}
   8923 
   8924 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8925 	wm_rxeof(rxq, limit);
   8926 	mutex_exit(rxq->rxq_lock);
   8927 
   8928 	wm_itrs_writereg(sc, wmq);
   8929 
   8930 	softint_schedule(wmq->wmq_si);
   8931 
   8932 	return 1;
   8933 }
   8934 
   8935 static void
   8936 wm_handle_queue(void *arg)
   8937 {
   8938 	struct wm_queue *wmq = arg;
   8939 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8940 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8941 	struct wm_softc *sc = txq->txq_sc;
   8942 	u_int limit = sc->sc_rx_process_limit;
   8943 
   8944 	mutex_enter(txq->txq_lock);
   8945 	if (txq->txq_stopping) {
   8946 		mutex_exit(txq->txq_lock);
   8947 		return;
   8948 	}
   8949 	wm_txeof(sc, txq);
   8950 	wm_deferred_start_locked(txq);
   8951 	mutex_exit(txq->txq_lock);
   8952 
   8953 	mutex_enter(rxq->rxq_lock);
   8954 	if (rxq->rxq_stopping) {
   8955 		mutex_exit(rxq->rxq_lock);
   8956 		return;
   8957 	}
   8958 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8959 	wm_rxeof(rxq, limit);
   8960 	mutex_exit(rxq->rxq_lock);
   8961 
   8962 	wm_txrxintr_enable(wmq);
   8963 }
   8964 
   8965 /*
   8966  * wm_linkintr_msix:
   8967  *
   8968  *	Interrupt service routine for link status change for MSI-X.
   8969  */
   8970 static int
   8971 wm_linkintr_msix(void *arg)
   8972 {
   8973 	struct wm_softc *sc = arg;
   8974 	uint32_t reg;
   8975 
   8976 	DPRINTF(WM_DEBUG_LINK,
   8977 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   8978 
   8979 	reg = CSR_READ(sc, WMREG_ICR);
   8980 	WM_CORE_LOCK(sc);
   8981 	if ((sc->sc_core_stopping) || ((reg & ICR_LSC) == 0))
   8982 		goto out;
   8983 
   8984 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8985 	wm_linkintr(sc, ICR_LSC);
   8986 
   8987 out:
   8988 	WM_CORE_UNLOCK(sc);
   8989 
   8990 	if (sc->sc_type == WM_T_82574)
   8991 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   8992 	else if (sc->sc_type == WM_T_82575)
   8993 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   8994 	else
   8995 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   8996 
   8997 	return 1;
   8998 }
   8999 
   9000 /*
   9001  * Media related.
   9002  * GMII, SGMII, TBI (and SERDES)
   9003  */
   9004 
   9005 /* Common */
   9006 
   9007 /*
   9008  * wm_tbi_serdes_set_linkled:
   9009  *
   9010  *	Update the link LED on TBI and SERDES devices.
   9011  */
   9012 static void
   9013 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   9014 {
   9015 
   9016 	if (sc->sc_tbi_linkup)
   9017 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   9018 	else
   9019 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   9020 
   9021 	/* 82540 or newer devices are active low */
   9022 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   9023 
   9024 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9025 }
   9026 
   9027 /* GMII related */
   9028 
   9029 /*
   9030  * wm_gmii_reset:
   9031  *
   9032  *	Reset the PHY.
   9033  */
   9034 static void
   9035 wm_gmii_reset(struct wm_softc *sc)
   9036 {
   9037 	uint32_t reg;
   9038 	int rv;
   9039 
   9040 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9041 		device_xname(sc->sc_dev), __func__));
   9042 
   9043 	rv = sc->phy.acquire(sc);
   9044 	if (rv != 0) {
   9045 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9046 		    __func__);
   9047 		return;
   9048 	}
   9049 
   9050 	switch (sc->sc_type) {
   9051 	case WM_T_82542_2_0:
   9052 	case WM_T_82542_2_1:
   9053 		/* null */
   9054 		break;
   9055 	case WM_T_82543:
   9056 		/*
   9057 		 * With 82543, we need to force speed and duplex on the MAC
   9058 		 * equal to what the PHY speed and duplex configuration is.
   9059 		 * In addition, we need to perform a hardware reset on the PHY
   9060 		 * to take it out of reset.
   9061 		 */
   9062 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9063 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9064 
   9065 		/* The PHY reset pin is active-low. */
   9066 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9067 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   9068 		    CTRL_EXT_SWDPIN(4));
   9069 		reg |= CTRL_EXT_SWDPIO(4);
   9070 
   9071 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9072 		CSR_WRITE_FLUSH(sc);
   9073 		delay(10*1000);
   9074 
   9075 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   9076 		CSR_WRITE_FLUSH(sc);
   9077 		delay(150);
   9078 #if 0
   9079 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   9080 #endif
   9081 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   9082 		break;
   9083 	case WM_T_82544:	/* reset 10000us */
   9084 	case WM_T_82540:
   9085 	case WM_T_82545:
   9086 	case WM_T_82545_3:
   9087 	case WM_T_82546:
   9088 	case WM_T_82546_3:
   9089 	case WM_T_82541:
   9090 	case WM_T_82541_2:
   9091 	case WM_T_82547:
   9092 	case WM_T_82547_2:
   9093 	case WM_T_82571:	/* reset 100us */
   9094 	case WM_T_82572:
   9095 	case WM_T_82573:
   9096 	case WM_T_82574:
   9097 	case WM_T_82575:
   9098 	case WM_T_82576:
   9099 	case WM_T_82580:
   9100 	case WM_T_I350:
   9101 	case WM_T_I354:
   9102 	case WM_T_I210:
   9103 	case WM_T_I211:
   9104 	case WM_T_82583:
   9105 	case WM_T_80003:
   9106 		/* generic reset */
   9107 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9108 		CSR_WRITE_FLUSH(sc);
   9109 		delay(20000);
   9110 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9111 		CSR_WRITE_FLUSH(sc);
   9112 		delay(20000);
   9113 
   9114 		if ((sc->sc_type == WM_T_82541)
   9115 		    || (sc->sc_type == WM_T_82541_2)
   9116 		    || (sc->sc_type == WM_T_82547)
   9117 		    || (sc->sc_type == WM_T_82547_2)) {
   9118 			/* workaround for igp are done in igp_reset() */
   9119 			/* XXX add code to set LED after phy reset */
   9120 		}
   9121 		break;
   9122 	case WM_T_ICH8:
   9123 	case WM_T_ICH9:
   9124 	case WM_T_ICH10:
   9125 	case WM_T_PCH:
   9126 	case WM_T_PCH2:
   9127 	case WM_T_PCH_LPT:
   9128 	case WM_T_PCH_SPT:
   9129 		/* generic reset */
   9130 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9131 		CSR_WRITE_FLUSH(sc);
   9132 		delay(100);
   9133 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9134 		CSR_WRITE_FLUSH(sc);
   9135 		delay(150);
   9136 		break;
   9137 	default:
   9138 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   9139 		    __func__);
   9140 		break;
   9141 	}
   9142 
   9143 	sc->phy.release(sc);
   9144 
   9145 	/* get_cfg_done */
   9146 	wm_get_cfg_done(sc);
   9147 
   9148 	/* extra setup */
   9149 	switch (sc->sc_type) {
   9150 	case WM_T_82542_2_0:
   9151 	case WM_T_82542_2_1:
   9152 	case WM_T_82543:
   9153 	case WM_T_82544:
   9154 	case WM_T_82540:
   9155 	case WM_T_82545:
   9156 	case WM_T_82545_3:
   9157 	case WM_T_82546:
   9158 	case WM_T_82546_3:
   9159 	case WM_T_82541_2:
   9160 	case WM_T_82547_2:
   9161 	case WM_T_82571:
   9162 	case WM_T_82572:
   9163 	case WM_T_82573:
   9164 	case WM_T_82574:
   9165 	case WM_T_82583:
   9166 	case WM_T_82575:
   9167 	case WM_T_82576:
   9168 	case WM_T_82580:
   9169 	case WM_T_I350:
   9170 	case WM_T_I354:
   9171 	case WM_T_I210:
   9172 	case WM_T_I211:
   9173 	case WM_T_80003:
   9174 		/* null */
   9175 		break;
   9176 	case WM_T_82541:
   9177 	case WM_T_82547:
   9178 		/* XXX Configure actively LED after PHY reset */
   9179 		break;
   9180 	case WM_T_ICH8:
   9181 	case WM_T_ICH9:
   9182 	case WM_T_ICH10:
   9183 	case WM_T_PCH:
   9184 	case WM_T_PCH2:
   9185 	case WM_T_PCH_LPT:
   9186 	case WM_T_PCH_SPT:
   9187 		wm_phy_post_reset(sc);
   9188 		break;
   9189 	default:
   9190 		panic("%s: unknown type\n", __func__);
   9191 		break;
   9192 	}
   9193 }
   9194 
   9195 /*
   9196  * Setup sc_phytype and mii_{read|write}reg.
   9197  *
   9198  *  To identify PHY type, correct read/write function should be selected.
   9199  * To select correct read/write function, PCI ID or MAC type are required
   9200  * without accessing PHY registers.
   9201  *
   9202  *  On the first call of this function, PHY ID is not known yet. Check
   9203  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   9204  * result might be incorrect.
   9205  *
   9206  *  In the second call, PHY OUI and model is used to identify PHY type.
   9207  * It might not be perfpect because of the lack of compared entry, but it
   9208  * would be better than the first call.
   9209  *
   9210  *  If the detected new result and previous assumption is different,
   9211  * diagnous message will be printed.
   9212  */
   9213 static void
   9214 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   9215     uint16_t phy_model)
   9216 {
   9217 	device_t dev = sc->sc_dev;
   9218 	struct mii_data *mii = &sc->sc_mii;
   9219 	uint16_t new_phytype = WMPHY_UNKNOWN;
   9220 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   9221 	mii_readreg_t new_readreg;
   9222 	mii_writereg_t new_writereg;
   9223 
   9224 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9225 		device_xname(sc->sc_dev), __func__));
   9226 
   9227 	if (mii->mii_readreg == NULL) {
   9228 		/*
   9229 		 *  This is the first call of this function. For ICH and PCH
   9230 		 * variants, it's difficult to determine the PHY access method
   9231 		 * by sc_type, so use the PCI product ID for some devices.
   9232 		 */
   9233 
   9234 		switch (sc->sc_pcidevid) {
   9235 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   9236 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   9237 			/* 82577 */
   9238 			new_phytype = WMPHY_82577;
   9239 			break;
   9240 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   9241 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   9242 			/* 82578 */
   9243 			new_phytype = WMPHY_82578;
   9244 			break;
   9245 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   9246 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   9247 			/* 82579 */
   9248 			new_phytype = WMPHY_82579;
   9249 			break;
   9250 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   9251 		case PCI_PRODUCT_INTEL_82801I_BM:
   9252 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   9253 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   9254 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   9255 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   9256 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   9257 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   9258 			/* ICH8, 9, 10 with 82567 */
   9259 			new_phytype = WMPHY_BM;
   9260 			break;
   9261 		default:
   9262 			break;
   9263 		}
   9264 	} else {
   9265 		/* It's not the first call. Use PHY OUI and model */
   9266 		switch (phy_oui) {
   9267 		case MII_OUI_ATHEROS: /* XXX ??? */
   9268 			switch (phy_model) {
   9269 			case 0x0004: /* XXX */
   9270 				new_phytype = WMPHY_82578;
   9271 				break;
   9272 			default:
   9273 				break;
   9274 			}
   9275 			break;
   9276 		case MII_OUI_xxMARVELL:
   9277 			switch (phy_model) {
   9278 			case MII_MODEL_xxMARVELL_I210:
   9279 				new_phytype = WMPHY_I210;
   9280 				break;
   9281 			case MII_MODEL_xxMARVELL_E1011:
   9282 			case MII_MODEL_xxMARVELL_E1000_3:
   9283 			case MII_MODEL_xxMARVELL_E1000_5:
   9284 			case MII_MODEL_xxMARVELL_E1112:
   9285 				new_phytype = WMPHY_M88;
   9286 				break;
   9287 			case MII_MODEL_xxMARVELL_E1149:
   9288 				new_phytype = WMPHY_BM;
   9289 				break;
   9290 			case MII_MODEL_xxMARVELL_E1111:
   9291 			case MII_MODEL_xxMARVELL_I347:
   9292 			case MII_MODEL_xxMARVELL_E1512:
   9293 			case MII_MODEL_xxMARVELL_E1340M:
   9294 			case MII_MODEL_xxMARVELL_E1543:
   9295 				new_phytype = WMPHY_M88;
   9296 				break;
   9297 			case MII_MODEL_xxMARVELL_I82563:
   9298 				new_phytype = WMPHY_GG82563;
   9299 				break;
   9300 			default:
   9301 				break;
   9302 			}
   9303 			break;
   9304 		case MII_OUI_INTEL:
   9305 			switch (phy_model) {
   9306 			case MII_MODEL_INTEL_I82577:
   9307 				new_phytype = WMPHY_82577;
   9308 				break;
   9309 			case MII_MODEL_INTEL_I82579:
   9310 				new_phytype = WMPHY_82579;
   9311 				break;
   9312 			case MII_MODEL_INTEL_I217:
   9313 				new_phytype = WMPHY_I217;
   9314 				break;
   9315 			case MII_MODEL_INTEL_I82580:
   9316 			case MII_MODEL_INTEL_I350:
   9317 				new_phytype = WMPHY_82580;
   9318 				break;
   9319 			default:
   9320 				break;
   9321 			}
   9322 			break;
   9323 		case MII_OUI_yyINTEL:
   9324 			switch (phy_model) {
   9325 			case MII_MODEL_yyINTEL_I82562G:
   9326 			case MII_MODEL_yyINTEL_I82562EM:
   9327 			case MII_MODEL_yyINTEL_I82562ET:
   9328 				new_phytype = WMPHY_IFE;
   9329 				break;
   9330 			case MII_MODEL_yyINTEL_IGP01E1000:
   9331 				new_phytype = WMPHY_IGP;
   9332 				break;
   9333 			case MII_MODEL_yyINTEL_I82566:
   9334 				new_phytype = WMPHY_IGP_3;
   9335 				break;
   9336 			default:
   9337 				break;
   9338 			}
   9339 			break;
   9340 		default:
   9341 			break;
   9342 		}
   9343 		if (new_phytype == WMPHY_UNKNOWN)
   9344 			aprint_verbose_dev(dev, "%s: unknown PHY model\n",
   9345 			    __func__);
   9346 
   9347 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9348 		    && (sc->sc_phytype != new_phytype )) {
   9349 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9350 			    "was incorrect. PHY type from PHY ID = %u\n",
   9351 			    sc->sc_phytype, new_phytype);
   9352 		}
   9353 	}
   9354 
   9355 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   9356 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   9357 		/* SGMII */
   9358 		new_readreg = wm_sgmii_readreg;
   9359 		new_writereg = wm_sgmii_writereg;
   9360 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   9361 		/* BM2 (phyaddr == 1) */
   9362 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9363 		    && (new_phytype != WMPHY_BM)
   9364 		    && (new_phytype != WMPHY_UNKNOWN))
   9365 			doubt_phytype = new_phytype;
   9366 		new_phytype = WMPHY_BM;
   9367 		new_readreg = wm_gmii_bm_readreg;
   9368 		new_writereg = wm_gmii_bm_writereg;
   9369 	} else if (sc->sc_type >= WM_T_PCH) {
   9370 		/* All PCH* use _hv_ */
   9371 		new_readreg = wm_gmii_hv_readreg;
   9372 		new_writereg = wm_gmii_hv_writereg;
   9373 	} else if (sc->sc_type >= WM_T_ICH8) {
   9374 		/* non-82567 ICH8, 9 and 10 */
   9375 		new_readreg = wm_gmii_i82544_readreg;
   9376 		new_writereg = wm_gmii_i82544_writereg;
   9377 	} else if (sc->sc_type >= WM_T_80003) {
   9378 		/* 80003 */
   9379 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9380 		    && (new_phytype != WMPHY_GG82563)
   9381 		    && (new_phytype != WMPHY_UNKNOWN))
   9382 			doubt_phytype = new_phytype;
   9383 		new_phytype = WMPHY_GG82563;
   9384 		new_readreg = wm_gmii_i80003_readreg;
   9385 		new_writereg = wm_gmii_i80003_writereg;
   9386 	} else if (sc->sc_type >= WM_T_I210) {
   9387 		/* I210 and I211 */
   9388 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9389 		    && (new_phytype != WMPHY_I210)
   9390 		    && (new_phytype != WMPHY_UNKNOWN))
   9391 			doubt_phytype = new_phytype;
   9392 		new_phytype = WMPHY_I210;
   9393 		new_readreg = wm_gmii_gs40g_readreg;
   9394 		new_writereg = wm_gmii_gs40g_writereg;
   9395 	} else if (sc->sc_type >= WM_T_82580) {
   9396 		/* 82580, I350 and I354 */
   9397 		new_readreg = wm_gmii_82580_readreg;
   9398 		new_writereg = wm_gmii_82580_writereg;
   9399 	} else if (sc->sc_type >= WM_T_82544) {
   9400 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   9401 		new_readreg = wm_gmii_i82544_readreg;
   9402 		new_writereg = wm_gmii_i82544_writereg;
   9403 	} else {
   9404 		new_readreg = wm_gmii_i82543_readreg;
   9405 		new_writereg = wm_gmii_i82543_writereg;
   9406 	}
   9407 
   9408 	if (new_phytype == WMPHY_BM) {
   9409 		/* All BM use _bm_ */
   9410 		new_readreg = wm_gmii_bm_readreg;
   9411 		new_writereg = wm_gmii_bm_writereg;
   9412 	}
   9413 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
   9414 		/* All PCH* use _hv_ */
   9415 		new_readreg = wm_gmii_hv_readreg;
   9416 		new_writereg = wm_gmii_hv_writereg;
   9417 	}
   9418 
   9419 	/* Diag output */
   9420 	if (doubt_phytype != WMPHY_UNKNOWN)
   9421 		aprint_error_dev(dev, "Assumed new PHY type was "
   9422 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   9423 		    new_phytype);
   9424 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9425 	    && (sc->sc_phytype != new_phytype ))
   9426 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9427 		    "was incorrect. New PHY type = %u\n",
   9428 		    sc->sc_phytype, new_phytype);
   9429 
   9430 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   9431 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   9432 
   9433 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   9434 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   9435 		    "function was incorrect.\n");
   9436 
   9437 	/* Update now */
   9438 	sc->sc_phytype = new_phytype;
   9439 	mii->mii_readreg = new_readreg;
   9440 	mii->mii_writereg = new_writereg;
   9441 }
   9442 
   9443 /*
   9444  * wm_get_phy_id_82575:
   9445  *
   9446  * Return PHY ID. Return -1 if it failed.
   9447  */
   9448 static int
   9449 wm_get_phy_id_82575(struct wm_softc *sc)
   9450 {
   9451 	uint32_t reg;
   9452 	int phyid = -1;
   9453 
   9454 	/* XXX */
   9455 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   9456 		return -1;
   9457 
   9458 	if (wm_sgmii_uses_mdio(sc)) {
   9459 		switch (sc->sc_type) {
   9460 		case WM_T_82575:
   9461 		case WM_T_82576:
   9462 			reg = CSR_READ(sc, WMREG_MDIC);
   9463 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   9464 			break;
   9465 		case WM_T_82580:
   9466 		case WM_T_I350:
   9467 		case WM_T_I354:
   9468 		case WM_T_I210:
   9469 		case WM_T_I211:
   9470 			reg = CSR_READ(sc, WMREG_MDICNFG);
   9471 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   9472 			break;
   9473 		default:
   9474 			return -1;
   9475 		}
   9476 	}
   9477 
   9478 	return phyid;
   9479 }
   9480 
   9481 
   9482 /*
   9483  * wm_gmii_mediainit:
   9484  *
   9485  *	Initialize media for use on 1000BASE-T devices.
   9486  */
   9487 static void
   9488 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   9489 {
   9490 	device_t dev = sc->sc_dev;
   9491 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9492 	struct mii_data *mii = &sc->sc_mii;
   9493 	uint32_t reg;
   9494 
   9495 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9496 		device_xname(sc->sc_dev), __func__));
   9497 
   9498 	/* We have GMII. */
   9499 	sc->sc_flags |= WM_F_HAS_MII;
   9500 
   9501 	if (sc->sc_type == WM_T_80003)
   9502 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9503 	else
   9504 		sc->sc_tipg = TIPG_1000T_DFLT;
   9505 
   9506 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   9507 	if ((sc->sc_type == WM_T_82580)
   9508 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   9509 	    || (sc->sc_type == WM_T_I211)) {
   9510 		reg = CSR_READ(sc, WMREG_PHPM);
   9511 		reg &= ~PHPM_GO_LINK_D;
   9512 		CSR_WRITE(sc, WMREG_PHPM, reg);
   9513 	}
   9514 
   9515 	/*
   9516 	 * Let the chip set speed/duplex on its own based on
   9517 	 * signals from the PHY.
   9518 	 * XXXbouyer - I'm not sure this is right for the 80003,
   9519 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   9520 	 */
   9521 	sc->sc_ctrl |= CTRL_SLU;
   9522 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9523 
   9524 	/* Initialize our media structures and probe the GMII. */
   9525 	mii->mii_ifp = ifp;
   9526 
   9527 	mii->mii_statchg = wm_gmii_statchg;
   9528 
   9529 	/* get PHY control from SMBus to PCIe */
   9530 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   9531 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
   9532 		wm_smbustopci(sc);
   9533 
   9534 	wm_gmii_reset(sc);
   9535 
   9536 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9537 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   9538 	    wm_gmii_mediastatus);
   9539 
   9540 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   9541 	    || (sc->sc_type == WM_T_82580)
   9542 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   9543 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   9544 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   9545 			/* Attach only one port */
   9546 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   9547 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9548 		} else {
   9549 			int i, id;
   9550 			uint32_t ctrl_ext;
   9551 
   9552 			id = wm_get_phy_id_82575(sc);
   9553 			if (id != -1) {
   9554 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   9555 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   9556 			}
   9557 			if ((id == -1)
   9558 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9559 				/* Power on sgmii phy if it is disabled */
   9560 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9561 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   9562 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   9563 				CSR_WRITE_FLUSH(sc);
   9564 				delay(300*1000); /* XXX too long */
   9565 
   9566 				/* from 1 to 8 */
   9567 				for (i = 1; i < 8; i++)
   9568 					mii_attach(sc->sc_dev, &sc->sc_mii,
   9569 					    0xffffffff, i, MII_OFFSET_ANY,
   9570 					    MIIF_DOPAUSE);
   9571 
   9572 				/* restore previous sfp cage power state */
   9573 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9574 			}
   9575 		}
   9576 	} else {
   9577 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9578 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9579 	}
   9580 
   9581 	/*
   9582 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   9583 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   9584 	 */
   9585 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   9586 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9587 		wm_set_mdio_slow_mode_hv(sc);
   9588 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9589 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9590 	}
   9591 
   9592 	/*
   9593 	 * (For ICH8 variants)
   9594 	 * If PHY detection failed, use BM's r/w function and retry.
   9595 	 */
   9596 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9597 		/* if failed, retry with *_bm_* */
   9598 		aprint_verbose_dev(dev, "Assumed PHY access function "
   9599 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   9600 		    sc->sc_phytype);
   9601 		sc->sc_phytype = WMPHY_BM;
   9602 		mii->mii_readreg = wm_gmii_bm_readreg;
   9603 		mii->mii_writereg = wm_gmii_bm_writereg;
   9604 
   9605 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9606 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9607 	}
   9608 
   9609 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9610 		/* Any PHY wasn't find */
   9611 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   9612 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   9613 		sc->sc_phytype = WMPHY_NONE;
   9614 	} else {
   9615 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   9616 
   9617 		/*
   9618 		 * PHY Found! Check PHY type again by the second call of
   9619 		 * wm_gmii_setup_phytype.
   9620 		 */
   9621 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   9622 		    child->mii_mpd_model);
   9623 
   9624 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   9625 	}
   9626 }
   9627 
   9628 /*
   9629  * wm_gmii_mediachange:	[ifmedia interface function]
   9630  *
   9631  *	Set hardware to newly-selected media on a 1000BASE-T device.
   9632  */
   9633 static int
   9634 wm_gmii_mediachange(struct ifnet *ifp)
   9635 {
   9636 	struct wm_softc *sc = ifp->if_softc;
   9637 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9638 	int rc;
   9639 
   9640 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9641 		device_xname(sc->sc_dev), __func__));
   9642 	if ((ifp->if_flags & IFF_UP) == 0)
   9643 		return 0;
   9644 
   9645 	/* Disable D0 LPLU. */
   9646 	wm_lplu_d0_disable(sc);
   9647 
   9648 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9649 	sc->sc_ctrl |= CTRL_SLU;
   9650 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9651 	    || (sc->sc_type > WM_T_82543)) {
   9652 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   9653 	} else {
   9654 		sc->sc_ctrl &= ~CTRL_ASDE;
   9655 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9656 		if (ife->ifm_media & IFM_FDX)
   9657 			sc->sc_ctrl |= CTRL_FD;
   9658 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   9659 		case IFM_10_T:
   9660 			sc->sc_ctrl |= CTRL_SPEED_10;
   9661 			break;
   9662 		case IFM_100_TX:
   9663 			sc->sc_ctrl |= CTRL_SPEED_100;
   9664 			break;
   9665 		case IFM_1000_T:
   9666 			sc->sc_ctrl |= CTRL_SPEED_1000;
   9667 			break;
   9668 		default:
   9669 			panic("wm_gmii_mediachange: bad media 0x%x",
   9670 			    ife->ifm_media);
   9671 		}
   9672 	}
   9673 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9674 	CSR_WRITE_FLUSH(sc);
   9675 	if (sc->sc_type <= WM_T_82543)
   9676 		wm_gmii_reset(sc);
   9677 
   9678 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   9679 		return 0;
   9680 	return rc;
   9681 }
   9682 
   9683 /*
   9684  * wm_gmii_mediastatus:	[ifmedia interface function]
   9685  *
   9686  *	Get the current interface media status on a 1000BASE-T device.
   9687  */
   9688 static void
   9689 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9690 {
   9691 	struct wm_softc *sc = ifp->if_softc;
   9692 
   9693 	ether_mediastatus(ifp, ifmr);
   9694 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9695 	    | sc->sc_flowflags;
   9696 }
   9697 
   9698 #define	MDI_IO		CTRL_SWDPIN(2)
   9699 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   9700 #define	MDI_CLK		CTRL_SWDPIN(3)
   9701 
   9702 static void
   9703 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   9704 {
   9705 	uint32_t i, v;
   9706 
   9707 	v = CSR_READ(sc, WMREG_CTRL);
   9708 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9709 	v |= MDI_DIR | CTRL_SWDPIO(3);
   9710 
   9711 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   9712 		if (data & i)
   9713 			v |= MDI_IO;
   9714 		else
   9715 			v &= ~MDI_IO;
   9716 		CSR_WRITE(sc, WMREG_CTRL, v);
   9717 		CSR_WRITE_FLUSH(sc);
   9718 		delay(10);
   9719 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9720 		CSR_WRITE_FLUSH(sc);
   9721 		delay(10);
   9722 		CSR_WRITE(sc, WMREG_CTRL, v);
   9723 		CSR_WRITE_FLUSH(sc);
   9724 		delay(10);
   9725 	}
   9726 }
   9727 
   9728 static uint32_t
   9729 wm_i82543_mii_recvbits(struct wm_softc *sc)
   9730 {
   9731 	uint32_t v, i, data = 0;
   9732 
   9733 	v = CSR_READ(sc, WMREG_CTRL);
   9734 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9735 	v |= CTRL_SWDPIO(3);
   9736 
   9737 	CSR_WRITE(sc, WMREG_CTRL, v);
   9738 	CSR_WRITE_FLUSH(sc);
   9739 	delay(10);
   9740 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9741 	CSR_WRITE_FLUSH(sc);
   9742 	delay(10);
   9743 	CSR_WRITE(sc, WMREG_CTRL, v);
   9744 	CSR_WRITE_FLUSH(sc);
   9745 	delay(10);
   9746 
   9747 	for (i = 0; i < 16; i++) {
   9748 		data <<= 1;
   9749 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9750 		CSR_WRITE_FLUSH(sc);
   9751 		delay(10);
   9752 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   9753 			data |= 1;
   9754 		CSR_WRITE(sc, WMREG_CTRL, v);
   9755 		CSR_WRITE_FLUSH(sc);
   9756 		delay(10);
   9757 	}
   9758 
   9759 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9760 	CSR_WRITE_FLUSH(sc);
   9761 	delay(10);
   9762 	CSR_WRITE(sc, WMREG_CTRL, v);
   9763 	CSR_WRITE_FLUSH(sc);
   9764 	delay(10);
   9765 
   9766 	return data;
   9767 }
   9768 
   9769 #undef MDI_IO
   9770 #undef MDI_DIR
   9771 #undef MDI_CLK
   9772 
   9773 /*
   9774  * wm_gmii_i82543_readreg:	[mii interface function]
   9775  *
   9776  *	Read a PHY register on the GMII (i82543 version).
   9777  */
   9778 static int
   9779 wm_gmii_i82543_readreg(device_t dev, int phy, int reg)
   9780 {
   9781 	struct wm_softc *sc = device_private(dev);
   9782 	int rv;
   9783 
   9784 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9785 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   9786 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   9787 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   9788 
   9789 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   9790 	    device_xname(dev), phy, reg, rv));
   9791 
   9792 	return rv;
   9793 }
   9794 
   9795 /*
   9796  * wm_gmii_i82543_writereg:	[mii interface function]
   9797  *
   9798  *	Write a PHY register on the GMII (i82543 version).
   9799  */
   9800 static void
   9801 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, int val)
   9802 {
   9803 	struct wm_softc *sc = device_private(dev);
   9804 
   9805 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9806 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   9807 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   9808 	    (MII_COMMAND_START << 30), 32);
   9809 }
   9810 
   9811 /*
   9812  * wm_gmii_mdic_readreg:	[mii interface function]
   9813  *
   9814  *	Read a PHY register on the GMII.
   9815  */
   9816 static int
   9817 wm_gmii_mdic_readreg(device_t dev, int phy, int reg)
   9818 {
   9819 	struct wm_softc *sc = device_private(dev);
   9820 	uint32_t mdic = 0;
   9821 	int i, rv;
   9822 
   9823 	if (reg > MII_ADDRMASK) {
   9824 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   9825 		    __func__, sc->sc_phytype, reg);
   9826 		reg &= MII_ADDRMASK;
   9827 	}
   9828 
   9829 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   9830 	    MDIC_REGADD(reg));
   9831 
   9832 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9833 		mdic = CSR_READ(sc, WMREG_MDIC);
   9834 		if (mdic & MDIC_READY)
   9835 			break;
   9836 		delay(50);
   9837 	}
   9838 
   9839 	if ((mdic & MDIC_READY) == 0) {
   9840 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   9841 		    device_xname(dev), phy, reg);
   9842 		rv = 0;
   9843 	} else if (mdic & MDIC_E) {
   9844 #if 0 /* This is normal if no PHY is present. */
   9845 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   9846 		    device_xname(dev), phy, reg);
   9847 #endif
   9848 		rv = 0;
   9849 	} else {
   9850 		rv = MDIC_DATA(mdic);
   9851 		if (rv == 0xffff)
   9852 			rv = 0;
   9853 	}
   9854 
   9855 	return rv;
   9856 }
   9857 
   9858 /*
   9859  * wm_gmii_mdic_writereg:	[mii interface function]
   9860  *
   9861  *	Write a PHY register on the GMII.
   9862  */
   9863 static void
   9864 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, int val)
   9865 {
   9866 	struct wm_softc *sc = device_private(dev);
   9867 	uint32_t mdic = 0;
   9868 	int i;
   9869 
   9870 	if (reg > MII_ADDRMASK) {
   9871 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   9872 		    __func__, sc->sc_phytype, reg);
   9873 		reg &= MII_ADDRMASK;
   9874 	}
   9875 
   9876 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   9877 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   9878 
   9879 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9880 		mdic = CSR_READ(sc, WMREG_MDIC);
   9881 		if (mdic & MDIC_READY)
   9882 			break;
   9883 		delay(50);
   9884 	}
   9885 
   9886 	if ((mdic & MDIC_READY) == 0)
   9887 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   9888 		    device_xname(dev), phy, reg);
   9889 	else if (mdic & MDIC_E)
   9890 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   9891 		    device_xname(dev), phy, reg);
   9892 }
   9893 
   9894 /*
   9895  * wm_gmii_i82544_readreg:	[mii interface function]
   9896  *
   9897  *	Read a PHY register on the GMII.
   9898  */
   9899 static int
   9900 wm_gmii_i82544_readreg(device_t dev, int phy, int reg)
   9901 {
   9902 	struct wm_softc *sc = device_private(dev);
   9903 	int rv;
   9904 
   9905 	if (sc->phy.acquire(sc)) {
   9906 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   9907 		return 0;
   9908 	}
   9909 
   9910 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9911 		switch (sc->sc_phytype) {
   9912 		case WMPHY_IGP:
   9913 		case WMPHY_IGP_2:
   9914 		case WMPHY_IGP_3:
   9915 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT, reg);
   9916 			break;
   9917 		default:
   9918 #ifdef WM_DEBUG
   9919 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   9920 			    __func__, sc->sc_phytype, reg);
   9921 #endif
   9922 			break;
   9923 		}
   9924 	}
   9925 
   9926 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   9927 	sc->phy.release(sc);
   9928 
   9929 	return rv;
   9930 }
   9931 
   9932 /*
   9933  * wm_gmii_i82544_writereg:	[mii interface function]
   9934  *
   9935  *	Write a PHY register on the GMII.
   9936  */
   9937 static void
   9938 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, int val)
   9939 {
   9940 	struct wm_softc *sc = device_private(dev);
   9941 
   9942 	if (sc->phy.acquire(sc)) {
   9943 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   9944 		return;
   9945 	}
   9946 
   9947 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9948 		switch (sc->sc_phytype) {
   9949 		case WMPHY_IGP:
   9950 		case WMPHY_IGP_2:
   9951 		case WMPHY_IGP_3:
   9952 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT, reg);
   9953 			break;
   9954 		default:
   9955 #ifdef WM_DEBUG
   9956 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   9957 			    __func__, sc->sc_phytype, reg);
   9958 #endif
   9959 			break;
   9960 		}
   9961 	}
   9962 
   9963 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   9964 	sc->phy.release(sc);
   9965 }
   9966 
   9967 /*
   9968  * wm_gmii_i80003_readreg:	[mii interface function]
   9969  *
   9970  *	Read a PHY register on the kumeran
   9971  * This could be handled by the PHY layer if we didn't have to lock the
   9972  * ressource ...
   9973  */
   9974 static int
   9975 wm_gmii_i80003_readreg(device_t dev, int phy, int reg)
   9976 {
   9977 	struct wm_softc *sc = device_private(dev);
   9978 	int page_select, temp;
   9979 	int rv;
   9980 
   9981 	if (phy != 1) /* only one PHY on kumeran bus */
   9982 		return 0;
   9983 
   9984 	if (sc->phy.acquire(sc)) {
   9985 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   9986 		return 0;
   9987 	}
   9988 
   9989 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   9990 		page_select = GG82563_PHY_PAGE_SELECT;
   9991 	else {
   9992 		/*
   9993 		 * Use Alternative Page Select register to access registers
   9994 		 * 30 and 31.
   9995 		 */
   9996 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   9997 	}
   9998 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   9999 	wm_gmii_mdic_writereg(dev, phy, page_select, temp);
   10000 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10001 		/*
   10002 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10003 		 * register.
   10004 		 */
   10005 		delay(200);
   10006 		if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) {
   10007 			device_printf(dev, "%s failed\n", __func__);
   10008 			rv = 0; /* XXX */
   10009 			goto out;
   10010 		}
   10011 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10012 		delay(200);
   10013 	} else
   10014 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10015 
   10016 out:
   10017 	sc->phy.release(sc);
   10018 	return rv;
   10019 }
   10020 
   10021 /*
   10022  * wm_gmii_i80003_writereg:	[mii interface function]
   10023  *
   10024  *	Write a PHY register on the kumeran.
   10025  * This could be handled by the PHY layer if we didn't have to lock the
   10026  * ressource ...
   10027  */
   10028 static void
   10029 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, int val)
   10030 {
   10031 	struct wm_softc *sc = device_private(dev);
   10032 	int page_select, temp;
   10033 
   10034 	if (phy != 1) /* only one PHY on kumeran bus */
   10035 		return;
   10036 
   10037 	if (sc->phy.acquire(sc)) {
   10038 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10039 		return;
   10040 	}
   10041 
   10042 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10043 		page_select = GG82563_PHY_PAGE_SELECT;
   10044 	else {
   10045 		/*
   10046 		 * Use Alternative Page Select register to access registers
   10047 		 * 30 and 31.
   10048 		 */
   10049 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10050 	}
   10051 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10052 	wm_gmii_mdic_writereg(dev, phy, page_select, temp);
   10053 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10054 		/*
   10055 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10056 		 * register.
   10057 		 */
   10058 		delay(200);
   10059 		if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) {
   10060 			device_printf(dev, "%s failed\n", __func__);
   10061 			goto out;
   10062 		}
   10063 		wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10064 		delay(200);
   10065 	} else
   10066 		wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10067 
   10068 out:
   10069 	sc->phy.release(sc);
   10070 }
   10071 
   10072 /*
   10073  * wm_gmii_bm_readreg:	[mii interface function]
   10074  *
   10075  *	Read a PHY register on the kumeran
   10076  * This could be handled by the PHY layer if we didn't have to lock the
   10077  * ressource ...
   10078  */
   10079 static int
   10080 wm_gmii_bm_readreg(device_t dev, int phy, int reg)
   10081 {
   10082 	struct wm_softc *sc = device_private(dev);
   10083 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10084 	uint16_t val;
   10085 	int rv;
   10086 
   10087 	if (sc->phy.acquire(sc)) {
   10088 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10089 		return 0;
   10090 	}
   10091 
   10092 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10093 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10094 		    || (reg == 31)) ? 1 : phy;
   10095 	/* Page 800 works differently than the rest so it has its own func */
   10096 	if (page == BM_WUC_PAGE) {
   10097 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
   10098 		rv = val;
   10099 		goto release;
   10100 	}
   10101 
   10102 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10103 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10104 		    && (sc->sc_type != WM_T_82583))
   10105 			wm_gmii_mdic_writereg(dev, phy,
   10106 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10107 		else
   10108 			wm_gmii_mdic_writereg(dev, phy,
   10109 			    BME1000_PHY_PAGE_SELECT, page);
   10110 	}
   10111 
   10112 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10113 
   10114 release:
   10115 	sc->phy.release(sc);
   10116 	return rv;
   10117 }
   10118 
   10119 /*
   10120  * wm_gmii_bm_writereg:	[mii interface function]
   10121  *
   10122  *	Write a PHY register on the kumeran.
   10123  * This could be handled by the PHY layer if we didn't have to lock the
   10124  * ressource ...
   10125  */
   10126 static void
   10127 wm_gmii_bm_writereg(device_t dev, int phy, int reg, int val)
   10128 {
   10129 	struct wm_softc *sc = device_private(dev);
   10130 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10131 
   10132 	if (sc->phy.acquire(sc)) {
   10133 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10134 		return;
   10135 	}
   10136 
   10137 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10138 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10139 		    || (reg == 31)) ? 1 : phy;
   10140 	/* Page 800 works differently than the rest so it has its own func */
   10141 	if (page == BM_WUC_PAGE) {
   10142 		uint16_t tmp;
   10143 
   10144 		tmp = val;
   10145 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   10146 		goto release;
   10147 	}
   10148 
   10149 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10150 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10151 		    && (sc->sc_type != WM_T_82583))
   10152 			wm_gmii_mdic_writereg(dev, phy,
   10153 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10154 		else
   10155 			wm_gmii_mdic_writereg(dev, phy,
   10156 			    BME1000_PHY_PAGE_SELECT, page);
   10157 	}
   10158 
   10159 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10160 
   10161 release:
   10162 	sc->phy.release(sc);
   10163 }
   10164 
   10165 static void
   10166 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd)
   10167 {
   10168 	struct wm_softc *sc = device_private(dev);
   10169 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   10170 	uint16_t wuce, reg;
   10171 
   10172 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10173 		device_xname(dev), __func__));
   10174 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   10175 	if (sc->sc_type == WM_T_PCH) {
   10176 		/* XXX e1000 driver do nothing... why? */
   10177 	}
   10178 
   10179 	/*
   10180 	 * 1) Enable PHY wakeup register first.
   10181 	 * See e1000_enable_phy_wakeup_reg_access_bm().
   10182 	 */
   10183 
   10184 	/* Set page 769 */
   10185 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10186 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   10187 
   10188 	/* Read WUCE and save it */
   10189 	wuce = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG);
   10190 
   10191 	reg = wuce | BM_WUC_ENABLE_BIT;
   10192 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   10193 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, reg);
   10194 
   10195 	/* Select page 800 */
   10196 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10197 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   10198 
   10199 	/*
   10200 	 * 2) Access PHY wakeup register.
   10201 	 * See e1000_access_phy_wakeup_reg_bm.
   10202 	 */
   10203 
   10204 	/* Write page 800 */
   10205 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   10206 
   10207 	if (rd)
   10208 		*val = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE);
   10209 	else
   10210 		wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   10211 
   10212 	/*
   10213 	 * 3) Disable PHY wakeup register.
   10214 	 * See e1000_disable_phy_wakeup_reg_access_bm().
   10215 	 */
   10216 	/* Set page 769 */
   10217 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10218 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   10219 
   10220 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, wuce);
   10221 }
   10222 
   10223 /*
   10224  * wm_gmii_hv_readreg:	[mii interface function]
   10225  *
   10226  *	Read a PHY register on the kumeran
   10227  * This could be handled by the PHY layer if we didn't have to lock the
   10228  * ressource ...
   10229  */
   10230 static int
   10231 wm_gmii_hv_readreg(device_t dev, int phy, int reg)
   10232 {
   10233 	struct wm_softc *sc = device_private(dev);
   10234 	int rv;
   10235 
   10236 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10237 		device_xname(dev), __func__));
   10238 	if (sc->phy.acquire(sc)) {
   10239 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10240 		return 0;
   10241 	}
   10242 
   10243 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg);
   10244 	sc->phy.release(sc);
   10245 	return rv;
   10246 }
   10247 
   10248 static int
   10249 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg)
   10250 {
   10251 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10252 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10253 	uint16_t val;
   10254 	int rv;
   10255 
   10256 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10257 
   10258 	/* Page 800 works differently than the rest so it has its own func */
   10259 	if (page == BM_WUC_PAGE) {
   10260 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
   10261 		return val;
   10262 	}
   10263 
   10264 	/*
   10265 	 * Lower than page 768 works differently than the rest so it has its
   10266 	 * own func
   10267 	 */
   10268 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10269 		printf("gmii_hv_readreg!!!\n");
   10270 		return 0;
   10271 	}
   10272 
   10273 	/*
   10274 	 * XXX I21[789] documents say that the SMBus Address register is at
   10275 	 * PHY address 01, Page 0 (not 768), Register 26.
   10276 	 */
   10277 	if (page == HV_INTC_FC_PAGE_START)
   10278 		page = 0;
   10279 
   10280 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10281 		wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10282 		    page << BME1000_PAGE_SHIFT);
   10283 	}
   10284 
   10285 	rv = wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK);
   10286 	return rv;
   10287 }
   10288 
   10289 /*
   10290  * wm_gmii_hv_writereg:	[mii interface function]
   10291  *
   10292  *	Write a PHY register on the kumeran.
   10293  * This could be handled by the PHY layer if we didn't have to lock the
   10294  * ressource ...
   10295  */
   10296 static void
   10297 wm_gmii_hv_writereg(device_t dev, int phy, int reg, int val)
   10298 {
   10299 	struct wm_softc *sc = device_private(dev);
   10300 
   10301 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10302 		device_xname(dev), __func__));
   10303 
   10304 	if (sc->phy.acquire(sc)) {
   10305 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10306 		return;
   10307 	}
   10308 
   10309 	wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   10310 	sc->phy.release(sc);
   10311 }
   10312 
   10313 static void
   10314 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, int val)
   10315 {
   10316 	struct wm_softc *sc = device_private(dev);
   10317 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10318 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10319 
   10320 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10321 
   10322 	/* Page 800 works differently than the rest so it has its own func */
   10323 	if (page == BM_WUC_PAGE) {
   10324 		uint16_t tmp;
   10325 
   10326 		tmp = val;
   10327 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   10328 		return;
   10329 	}
   10330 
   10331 	/*
   10332 	 * Lower than page 768 works differently than the rest so it has its
   10333 	 * own func
   10334 	 */
   10335 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10336 		printf("gmii_hv_writereg!!!\n");
   10337 		return;
   10338 	}
   10339 
   10340 	{
   10341 		/*
   10342 		 * XXX I21[789] documents say that the SMBus Address register
   10343 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   10344 		 */
   10345 		if (page == HV_INTC_FC_PAGE_START)
   10346 			page = 0;
   10347 
   10348 		/*
   10349 		 * XXX Workaround MDIO accesses being disabled after entering
   10350 		 * IEEE Power Down (whenever bit 11 of the PHY control
   10351 		 * register is set)
   10352 		 */
   10353 		if (sc->sc_phytype == WMPHY_82578) {
   10354 			struct mii_softc *child;
   10355 
   10356 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   10357 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   10358 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   10359 			    && ((val & (1 << 11)) != 0)) {
   10360 				printf("XXX need workaround\n");
   10361 			}
   10362 		}
   10363 
   10364 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10365 			wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10366 			    page << BME1000_PAGE_SHIFT);
   10367 		}
   10368 	}
   10369 
   10370 	wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   10371 }
   10372 
   10373 /*
   10374  * wm_gmii_82580_readreg:	[mii interface function]
   10375  *
   10376  *	Read a PHY register on the 82580 and I350.
   10377  * This could be handled by the PHY layer if we didn't have to lock the
   10378  * ressource ...
   10379  */
   10380 static int
   10381 wm_gmii_82580_readreg(device_t dev, int phy, int reg)
   10382 {
   10383 	struct wm_softc *sc = device_private(dev);
   10384 	int rv;
   10385 
   10386 	if (sc->phy.acquire(sc) != 0) {
   10387 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10388 		return 0;
   10389 	}
   10390 
   10391 #ifdef DIAGNOSTIC
   10392 	if (reg > MII_ADDRMASK) {
   10393 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10394 		    __func__, sc->sc_phytype, reg);
   10395 		reg &= MII_ADDRMASK;
   10396 	}
   10397 #endif
   10398 	rv = wm_gmii_mdic_readreg(dev, phy, reg);
   10399 
   10400 	sc->phy.release(sc);
   10401 	return rv;
   10402 }
   10403 
   10404 /*
   10405  * wm_gmii_82580_writereg:	[mii interface function]
   10406  *
   10407  *	Write a PHY register on the 82580 and I350.
   10408  * This could be handled by the PHY layer if we didn't have to lock the
   10409  * ressource ...
   10410  */
   10411 static void
   10412 wm_gmii_82580_writereg(device_t dev, int phy, int reg, int val)
   10413 {
   10414 	struct wm_softc *sc = device_private(dev);
   10415 
   10416 	if (sc->phy.acquire(sc) != 0) {
   10417 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10418 		return;
   10419 	}
   10420 
   10421 #ifdef DIAGNOSTIC
   10422 	if (reg > MII_ADDRMASK) {
   10423 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10424 		    __func__, sc->sc_phytype, reg);
   10425 		reg &= MII_ADDRMASK;
   10426 	}
   10427 #endif
   10428 	wm_gmii_mdic_writereg(dev, phy, reg, val);
   10429 
   10430 	sc->phy.release(sc);
   10431 }
   10432 
   10433 /*
   10434  * wm_gmii_gs40g_readreg:	[mii interface function]
   10435  *
   10436  *	Read a PHY register on the I2100 and I211.
   10437  * This could be handled by the PHY layer if we didn't have to lock the
   10438  * ressource ...
   10439  */
   10440 static int
   10441 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg)
   10442 {
   10443 	struct wm_softc *sc = device_private(dev);
   10444 	int page, offset;
   10445 	int rv;
   10446 
   10447 	/* Acquire semaphore */
   10448 	if (sc->phy.acquire(sc)) {
   10449 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10450 		return 0;
   10451 	}
   10452 
   10453 	/* Page select */
   10454 	page = reg >> GS40G_PAGE_SHIFT;
   10455 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10456 
   10457 	/* Read reg */
   10458 	offset = reg & GS40G_OFFSET_MASK;
   10459 	rv = wm_gmii_mdic_readreg(dev, phy, offset);
   10460 
   10461 	sc->phy.release(sc);
   10462 	return rv;
   10463 }
   10464 
   10465 /*
   10466  * wm_gmii_gs40g_writereg:	[mii interface function]
   10467  *
   10468  *	Write a PHY register on the I210 and I211.
   10469  * This could be handled by the PHY layer if we didn't have to lock the
   10470  * ressource ...
   10471  */
   10472 static void
   10473 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, int val)
   10474 {
   10475 	struct wm_softc *sc = device_private(dev);
   10476 	int page, offset;
   10477 
   10478 	/* Acquire semaphore */
   10479 	if (sc->phy.acquire(sc)) {
   10480 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10481 		return;
   10482 	}
   10483 
   10484 	/* Page select */
   10485 	page = reg >> GS40G_PAGE_SHIFT;
   10486 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10487 
   10488 	/* Write reg */
   10489 	offset = reg & GS40G_OFFSET_MASK;
   10490 	wm_gmii_mdic_writereg(dev, phy, offset, val);
   10491 
   10492 	/* Release semaphore */
   10493 	sc->phy.release(sc);
   10494 }
   10495 
   10496 /*
   10497  * wm_gmii_statchg:	[mii interface function]
   10498  *
   10499  *	Callback from MII layer when media changes.
   10500  */
   10501 static void
   10502 wm_gmii_statchg(struct ifnet *ifp)
   10503 {
   10504 	struct wm_softc *sc = ifp->if_softc;
   10505 	struct mii_data *mii = &sc->sc_mii;
   10506 
   10507 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   10508 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10509 	sc->sc_fcrtl &= ~FCRTL_XONE;
   10510 
   10511 	/*
   10512 	 * Get flow control negotiation result.
   10513 	 */
   10514 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   10515 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   10516 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   10517 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   10518 	}
   10519 
   10520 	if (sc->sc_flowflags & IFM_FLOW) {
   10521 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   10522 			sc->sc_ctrl |= CTRL_TFCE;
   10523 			sc->sc_fcrtl |= FCRTL_XONE;
   10524 		}
   10525 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   10526 			sc->sc_ctrl |= CTRL_RFCE;
   10527 	}
   10528 
   10529 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   10530 		DPRINTF(WM_DEBUG_LINK,
   10531 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   10532 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10533 	} else {
   10534 		DPRINTF(WM_DEBUG_LINK,
   10535 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   10536 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10537 	}
   10538 
   10539 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10540 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10541 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   10542 						 : WMREG_FCRTL, sc->sc_fcrtl);
   10543 	if (sc->sc_type == WM_T_80003) {
   10544 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   10545 		case IFM_1000_T:
   10546 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10547 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   10548 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10549 			break;
   10550 		default:
   10551 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10552 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   10553 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   10554 			break;
   10555 		}
   10556 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   10557 	}
   10558 }
   10559 
   10560 /* kumeran related (80003, ICH* and PCH*) */
   10561 
   10562 /*
   10563  * wm_kmrn_readreg:
   10564  *
   10565  *	Read a kumeran register
   10566  */
   10567 static int
   10568 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   10569 {
   10570 	int rv;
   10571 
   10572 	if (sc->sc_type == WM_T_80003)
   10573 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10574 	else
   10575 		rv = sc->phy.acquire(sc);
   10576 	if (rv != 0) {
   10577 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   10578 		    __func__);
   10579 		return rv;
   10580 	}
   10581 
   10582 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   10583 
   10584 	if (sc->sc_type == WM_T_80003)
   10585 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10586 	else
   10587 		sc->phy.release(sc);
   10588 
   10589 	return rv;
   10590 }
   10591 
   10592 static int
   10593 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   10594 {
   10595 
   10596 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10597 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10598 	    KUMCTRLSTA_REN);
   10599 	CSR_WRITE_FLUSH(sc);
   10600 	delay(2);
   10601 
   10602 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   10603 
   10604 	return 0;
   10605 }
   10606 
   10607 /*
   10608  * wm_kmrn_writereg:
   10609  *
   10610  *	Write a kumeran register
   10611  */
   10612 static int
   10613 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   10614 {
   10615 	int rv;
   10616 
   10617 	if (sc->sc_type == WM_T_80003)
   10618 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10619 	else
   10620 		rv = sc->phy.acquire(sc);
   10621 	if (rv != 0) {
   10622 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   10623 		    __func__);
   10624 		return rv;
   10625 	}
   10626 
   10627 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   10628 
   10629 	if (sc->sc_type == WM_T_80003)
   10630 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10631 	else
   10632 		sc->phy.release(sc);
   10633 
   10634 	return rv;
   10635 }
   10636 
   10637 static int
   10638 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   10639 {
   10640 
   10641 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10642 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   10643 
   10644 	return 0;
   10645 }
   10646 
   10647 /* SGMII related */
   10648 
   10649 /*
   10650  * wm_sgmii_uses_mdio
   10651  *
   10652  * Check whether the transaction is to the internal PHY or the external
   10653  * MDIO interface. Return true if it's MDIO.
   10654  */
   10655 static bool
   10656 wm_sgmii_uses_mdio(struct wm_softc *sc)
   10657 {
   10658 	uint32_t reg;
   10659 	bool ismdio = false;
   10660 
   10661 	switch (sc->sc_type) {
   10662 	case WM_T_82575:
   10663 	case WM_T_82576:
   10664 		reg = CSR_READ(sc, WMREG_MDIC);
   10665 		ismdio = ((reg & MDIC_DEST) != 0);
   10666 		break;
   10667 	case WM_T_82580:
   10668 	case WM_T_I350:
   10669 	case WM_T_I354:
   10670 	case WM_T_I210:
   10671 	case WM_T_I211:
   10672 		reg = CSR_READ(sc, WMREG_MDICNFG);
   10673 		ismdio = ((reg & MDICNFG_DEST) != 0);
   10674 		break;
   10675 	default:
   10676 		break;
   10677 	}
   10678 
   10679 	return ismdio;
   10680 }
   10681 
   10682 /*
   10683  * wm_sgmii_readreg:	[mii interface function]
   10684  *
   10685  *	Read a PHY register on the SGMII
   10686  * This could be handled by the PHY layer if we didn't have to lock the
   10687  * ressource ...
   10688  */
   10689 static int
   10690 wm_sgmii_readreg(device_t dev, int phy, int reg)
   10691 {
   10692 	struct wm_softc *sc = device_private(dev);
   10693 	uint32_t i2ccmd;
   10694 	int i, rv;
   10695 
   10696 	if (sc->phy.acquire(sc)) {
   10697 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10698 		return 0;
   10699 	}
   10700 
   10701 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10702 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10703 	    | I2CCMD_OPCODE_READ;
   10704 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10705 
   10706 	/* Poll the ready bit */
   10707 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10708 		delay(50);
   10709 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10710 		if (i2ccmd & I2CCMD_READY)
   10711 			break;
   10712 	}
   10713 	if ((i2ccmd & I2CCMD_READY) == 0)
   10714 		device_printf(dev, "I2CCMD Read did not complete\n");
   10715 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10716 		device_printf(dev, "I2CCMD Error bit set\n");
   10717 
   10718 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   10719 
   10720 	sc->phy.release(sc);
   10721 	return rv;
   10722 }
   10723 
   10724 /*
   10725  * wm_sgmii_writereg:	[mii interface function]
   10726  *
   10727  *	Write a PHY register on the SGMII.
   10728  * This could be handled by the PHY layer if we didn't have to lock the
   10729  * ressource ...
   10730  */
   10731 static void
   10732 wm_sgmii_writereg(device_t dev, int phy, int reg, int val)
   10733 {
   10734 	struct wm_softc *sc = device_private(dev);
   10735 	uint32_t i2ccmd;
   10736 	int i;
   10737 	int val_swapped;
   10738 
   10739 	if (sc->phy.acquire(sc) != 0) {
   10740 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10741 		return;
   10742 	}
   10743 	/* Swap the data bytes for the I2C interface */
   10744 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   10745 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10746 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10747 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   10748 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10749 
   10750 	/* Poll the ready bit */
   10751 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10752 		delay(50);
   10753 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10754 		if (i2ccmd & I2CCMD_READY)
   10755 			break;
   10756 	}
   10757 	if ((i2ccmd & I2CCMD_READY) == 0)
   10758 		device_printf(dev, "I2CCMD Write did not complete\n");
   10759 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10760 		device_printf(dev, "I2CCMD Error bit set\n");
   10761 
   10762 	sc->phy.release(sc);
   10763 }
   10764 
   10765 /* TBI related */
   10766 
   10767 /*
   10768  * wm_tbi_mediainit:
   10769  *
   10770  *	Initialize media for use on 1000BASE-X devices.
   10771  */
   10772 static void
   10773 wm_tbi_mediainit(struct wm_softc *sc)
   10774 {
   10775 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10776 	const char *sep = "";
   10777 
   10778 	if (sc->sc_type < WM_T_82543)
   10779 		sc->sc_tipg = TIPG_WM_DFLT;
   10780 	else
   10781 		sc->sc_tipg = TIPG_LG_DFLT;
   10782 
   10783 	sc->sc_tbi_serdes_anegticks = 5;
   10784 
   10785 	/* Initialize our media structures */
   10786 	sc->sc_mii.mii_ifp = ifp;
   10787 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10788 
   10789 	if ((sc->sc_type >= WM_T_82575)
   10790 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   10791 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10792 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   10793 	else
   10794 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10795 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   10796 
   10797 	/*
   10798 	 * SWD Pins:
   10799 	 *
   10800 	 *	0 = Link LED (output)
   10801 	 *	1 = Loss Of Signal (input)
   10802 	 */
   10803 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   10804 
   10805 	/* XXX Perhaps this is only for TBI */
   10806 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   10807 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   10808 
   10809 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   10810 		sc->sc_ctrl &= ~CTRL_LRST;
   10811 
   10812 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10813 
   10814 #define	ADD(ss, mm, dd)							\
   10815 do {									\
   10816 	aprint_normal("%s%s", sep, ss);					\
   10817 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   10818 	sep = ", ";							\
   10819 } while (/*CONSTCOND*/0)
   10820 
   10821 	aprint_normal_dev(sc->sc_dev, "");
   10822 
   10823 	if (sc->sc_type == WM_T_I354) {
   10824 		uint32_t status;
   10825 
   10826 		status = CSR_READ(sc, WMREG_STATUS);
   10827 		if (((status & STATUS_2P5_SKU) != 0)
   10828 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   10829 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   10830 		} else
   10831 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   10832 	} else if (sc->sc_type == WM_T_82545) {
   10833 		/* Only 82545 is LX (XXX except SFP) */
   10834 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   10835 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   10836 	} else {
   10837 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   10838 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   10839 	}
   10840 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   10841 	aprint_normal("\n");
   10842 
   10843 #undef ADD
   10844 
   10845 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   10846 }
   10847 
   10848 /*
   10849  * wm_tbi_mediachange:	[ifmedia interface function]
   10850  *
   10851  *	Set hardware to newly-selected media on a 1000BASE-X device.
   10852  */
   10853 static int
   10854 wm_tbi_mediachange(struct ifnet *ifp)
   10855 {
   10856 	struct wm_softc *sc = ifp->if_softc;
   10857 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10858 	uint32_t status;
   10859 	int i;
   10860 
   10861 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   10862 		/* XXX need some work for >= 82571 and < 82575 */
   10863 		if (sc->sc_type < WM_T_82575)
   10864 			return 0;
   10865 	}
   10866 
   10867 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   10868 	    || (sc->sc_type >= WM_T_82575))
   10869 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   10870 
   10871 	sc->sc_ctrl &= ~CTRL_LRST;
   10872 	sc->sc_txcw = TXCW_ANE;
   10873 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10874 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   10875 	else if (ife->ifm_media & IFM_FDX)
   10876 		sc->sc_txcw |= TXCW_FD;
   10877 	else
   10878 		sc->sc_txcw |= TXCW_HD;
   10879 
   10880 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   10881 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   10882 
   10883 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   10884 		    device_xname(sc->sc_dev), sc->sc_txcw));
   10885 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10886 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10887 	CSR_WRITE_FLUSH(sc);
   10888 	delay(1000);
   10889 
   10890 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   10891 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   10892 
   10893 	/*
   10894 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   10895 	 * optics detect a signal, 0 if they don't.
   10896 	 */
   10897 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   10898 		/* Have signal; wait for the link to come up. */
   10899 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   10900 			delay(10000);
   10901 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   10902 				break;
   10903 		}
   10904 
   10905 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   10906 			    device_xname(sc->sc_dev),i));
   10907 
   10908 		status = CSR_READ(sc, WMREG_STATUS);
   10909 		DPRINTF(WM_DEBUG_LINK,
   10910 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   10911 			device_xname(sc->sc_dev),status, STATUS_LU));
   10912 		if (status & STATUS_LU) {
   10913 			/* Link is up. */
   10914 			DPRINTF(WM_DEBUG_LINK,
   10915 			    ("%s: LINK: set media -> link up %s\n",
   10916 			    device_xname(sc->sc_dev),
   10917 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   10918 
   10919 			/*
   10920 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   10921 			 * so we should update sc->sc_ctrl
   10922 			 */
   10923 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   10924 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10925 			sc->sc_fcrtl &= ~FCRTL_XONE;
   10926 			if (status & STATUS_FD)
   10927 				sc->sc_tctl |=
   10928 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10929 			else
   10930 				sc->sc_tctl |=
   10931 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10932 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   10933 				sc->sc_fcrtl |= FCRTL_XONE;
   10934 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10935 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   10936 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   10937 				      sc->sc_fcrtl);
   10938 			sc->sc_tbi_linkup = 1;
   10939 		} else {
   10940 			if (i == WM_LINKUP_TIMEOUT)
   10941 				wm_check_for_link(sc);
   10942 			/* Link is down. */
   10943 			DPRINTF(WM_DEBUG_LINK,
   10944 			    ("%s: LINK: set media -> link down\n",
   10945 			    device_xname(sc->sc_dev)));
   10946 			sc->sc_tbi_linkup = 0;
   10947 		}
   10948 	} else {
   10949 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   10950 		    device_xname(sc->sc_dev)));
   10951 		sc->sc_tbi_linkup = 0;
   10952 	}
   10953 
   10954 	wm_tbi_serdes_set_linkled(sc);
   10955 
   10956 	return 0;
   10957 }
   10958 
   10959 /*
   10960  * wm_tbi_mediastatus:	[ifmedia interface function]
   10961  *
   10962  *	Get the current interface media status on a 1000BASE-X device.
   10963  */
   10964 static void
   10965 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10966 {
   10967 	struct wm_softc *sc = ifp->if_softc;
   10968 	uint32_t ctrl, status;
   10969 
   10970 	ifmr->ifm_status = IFM_AVALID;
   10971 	ifmr->ifm_active = IFM_ETHER;
   10972 
   10973 	status = CSR_READ(sc, WMREG_STATUS);
   10974 	if ((status & STATUS_LU) == 0) {
   10975 		ifmr->ifm_active |= IFM_NONE;
   10976 		return;
   10977 	}
   10978 
   10979 	ifmr->ifm_status |= IFM_ACTIVE;
   10980 	/* Only 82545 is LX */
   10981 	if (sc->sc_type == WM_T_82545)
   10982 		ifmr->ifm_active |= IFM_1000_LX;
   10983 	else
   10984 		ifmr->ifm_active |= IFM_1000_SX;
   10985 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   10986 		ifmr->ifm_active |= IFM_FDX;
   10987 	else
   10988 		ifmr->ifm_active |= IFM_HDX;
   10989 	ctrl = CSR_READ(sc, WMREG_CTRL);
   10990 	if (ctrl & CTRL_RFCE)
   10991 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   10992 	if (ctrl & CTRL_TFCE)
   10993 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   10994 }
   10995 
   10996 /* XXX TBI only */
   10997 static int
   10998 wm_check_for_link(struct wm_softc *sc)
   10999 {
   11000 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11001 	uint32_t rxcw;
   11002 	uint32_t ctrl;
   11003 	uint32_t status;
   11004 	uint32_t sig;
   11005 
   11006 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11007 		/* XXX need some work for >= 82571 */
   11008 		if (sc->sc_type >= WM_T_82571) {
   11009 			sc->sc_tbi_linkup = 1;
   11010 			return 0;
   11011 		}
   11012 	}
   11013 
   11014 	rxcw = CSR_READ(sc, WMREG_RXCW);
   11015 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11016 	status = CSR_READ(sc, WMREG_STATUS);
   11017 
   11018 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   11019 
   11020 	DPRINTF(WM_DEBUG_LINK,
   11021 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   11022 		device_xname(sc->sc_dev), __func__,
   11023 		((ctrl & CTRL_SWDPIN(1)) == sig),
   11024 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   11025 
   11026 	/*
   11027 	 * SWDPIN   LU RXCW
   11028 	 *      0    0    0
   11029 	 *      0    0    1	(should not happen)
   11030 	 *      0    1    0	(should not happen)
   11031 	 *      0    1    1	(should not happen)
   11032 	 *      1    0    0	Disable autonego and force linkup
   11033 	 *      1    0    1	got /C/ but not linkup yet
   11034 	 *      1    1    0	(linkup)
   11035 	 *      1    1    1	If IFM_AUTO, back to autonego
   11036 	 *
   11037 	 */
   11038 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   11039 	    && ((status & STATUS_LU) == 0)
   11040 	    && ((rxcw & RXCW_C) == 0)) {
   11041 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   11042 			__func__));
   11043 		sc->sc_tbi_linkup = 0;
   11044 		/* Disable auto-negotiation in the TXCW register */
   11045 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   11046 
   11047 		/*
   11048 		 * Force link-up and also force full-duplex.
   11049 		 *
   11050 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   11051 		 * so we should update sc->sc_ctrl
   11052 		 */
   11053 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   11054 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11055 	} else if (((status & STATUS_LU) != 0)
   11056 	    && ((rxcw & RXCW_C) != 0)
   11057 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   11058 		sc->sc_tbi_linkup = 1;
   11059 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   11060 			__func__));
   11061 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11062 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   11063 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   11064 	    && ((rxcw & RXCW_C) != 0)) {
   11065 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   11066 	} else {
   11067 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   11068 			status));
   11069 	}
   11070 
   11071 	return 0;
   11072 }
   11073 
   11074 /*
   11075  * wm_tbi_tick:
   11076  *
   11077  *	Check the link on TBI devices.
   11078  *	This function acts as mii_tick().
   11079  */
   11080 static void
   11081 wm_tbi_tick(struct wm_softc *sc)
   11082 {
   11083 	struct mii_data *mii = &sc->sc_mii;
   11084 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11085 	uint32_t status;
   11086 
   11087 	KASSERT(WM_CORE_LOCKED(sc));
   11088 
   11089 	status = CSR_READ(sc, WMREG_STATUS);
   11090 
   11091 	/* XXX is this needed? */
   11092 	(void)CSR_READ(sc, WMREG_RXCW);
   11093 	(void)CSR_READ(sc, WMREG_CTRL);
   11094 
   11095 	/* set link status */
   11096 	if ((status & STATUS_LU) == 0) {
   11097 		DPRINTF(WM_DEBUG_LINK,
   11098 		    ("%s: LINK: checklink -> down\n",
   11099 			device_xname(sc->sc_dev)));
   11100 		sc->sc_tbi_linkup = 0;
   11101 	} else if (sc->sc_tbi_linkup == 0) {
   11102 		DPRINTF(WM_DEBUG_LINK,
   11103 		    ("%s: LINK: checklink -> up %s\n",
   11104 			device_xname(sc->sc_dev),
   11105 			(status & STATUS_FD) ? "FDX" : "HDX"));
   11106 		sc->sc_tbi_linkup = 1;
   11107 		sc->sc_tbi_serdes_ticks = 0;
   11108 	}
   11109 
   11110 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   11111 		goto setled;
   11112 
   11113 	if ((status & STATUS_LU) == 0) {
   11114 		sc->sc_tbi_linkup = 0;
   11115 		/* If the timer expired, retry autonegotiation */
   11116 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11117 		    && (++sc->sc_tbi_serdes_ticks
   11118 			>= sc->sc_tbi_serdes_anegticks)) {
   11119 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11120 			sc->sc_tbi_serdes_ticks = 0;
   11121 			/*
   11122 			 * Reset the link, and let autonegotiation do
   11123 			 * its thing
   11124 			 */
   11125 			sc->sc_ctrl |= CTRL_LRST;
   11126 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11127 			CSR_WRITE_FLUSH(sc);
   11128 			delay(1000);
   11129 			sc->sc_ctrl &= ~CTRL_LRST;
   11130 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11131 			CSR_WRITE_FLUSH(sc);
   11132 			delay(1000);
   11133 			CSR_WRITE(sc, WMREG_TXCW,
   11134 			    sc->sc_txcw & ~TXCW_ANE);
   11135 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11136 		}
   11137 	}
   11138 
   11139 setled:
   11140 	wm_tbi_serdes_set_linkled(sc);
   11141 }
   11142 
   11143 /* SERDES related */
   11144 static void
   11145 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   11146 {
   11147 	uint32_t reg;
   11148 
   11149 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11150 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   11151 		return;
   11152 
   11153 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   11154 	reg |= PCS_CFG_PCS_EN;
   11155 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   11156 
   11157 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11158 	reg &= ~CTRL_EXT_SWDPIN(3);
   11159 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11160 	CSR_WRITE_FLUSH(sc);
   11161 }
   11162 
   11163 static int
   11164 wm_serdes_mediachange(struct ifnet *ifp)
   11165 {
   11166 	struct wm_softc *sc = ifp->if_softc;
   11167 	bool pcs_autoneg = true; /* XXX */
   11168 	uint32_t ctrl_ext, pcs_lctl, reg;
   11169 
   11170 	/* XXX Currently, this function is not called on 8257[12] */
   11171 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11172 	    || (sc->sc_type >= WM_T_82575))
   11173 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11174 
   11175 	wm_serdes_power_up_link_82575(sc);
   11176 
   11177 	sc->sc_ctrl |= CTRL_SLU;
   11178 
   11179 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   11180 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   11181 
   11182 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11183 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   11184 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   11185 	case CTRL_EXT_LINK_MODE_SGMII:
   11186 		pcs_autoneg = true;
   11187 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   11188 		break;
   11189 	case CTRL_EXT_LINK_MODE_1000KX:
   11190 		pcs_autoneg = false;
   11191 		/* FALLTHROUGH */
   11192 	default:
   11193 		if ((sc->sc_type == WM_T_82575)
   11194 		    || (sc->sc_type == WM_T_82576)) {
   11195 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   11196 				pcs_autoneg = false;
   11197 		}
   11198 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   11199 		    | CTRL_FRCFDX;
   11200 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   11201 	}
   11202 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11203 
   11204 	if (pcs_autoneg) {
   11205 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   11206 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   11207 
   11208 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   11209 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   11210 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   11211 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   11212 	} else
   11213 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   11214 
   11215 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   11216 
   11217 
   11218 	return 0;
   11219 }
   11220 
   11221 static void
   11222 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11223 {
   11224 	struct wm_softc *sc = ifp->if_softc;
   11225 	struct mii_data *mii = &sc->sc_mii;
   11226 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11227 	uint32_t pcs_adv, pcs_lpab, reg;
   11228 
   11229 	ifmr->ifm_status = IFM_AVALID;
   11230 	ifmr->ifm_active = IFM_ETHER;
   11231 
   11232 	/* Check PCS */
   11233 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11234 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   11235 		ifmr->ifm_active |= IFM_NONE;
   11236 		sc->sc_tbi_linkup = 0;
   11237 		goto setled;
   11238 	}
   11239 
   11240 	sc->sc_tbi_linkup = 1;
   11241 	ifmr->ifm_status |= IFM_ACTIVE;
   11242 	if (sc->sc_type == WM_T_I354) {
   11243 		uint32_t status;
   11244 
   11245 		status = CSR_READ(sc, WMREG_STATUS);
   11246 		if (((status & STATUS_2P5_SKU) != 0)
   11247 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11248 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
   11249 		} else
   11250 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
   11251 	} else {
   11252 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   11253 		case PCS_LSTS_SPEED_10:
   11254 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   11255 			break;
   11256 		case PCS_LSTS_SPEED_100:
   11257 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   11258 			break;
   11259 		case PCS_LSTS_SPEED_1000:
   11260 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11261 			break;
   11262 		default:
   11263 			device_printf(sc->sc_dev, "Unknown speed\n");
   11264 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11265 			break;
   11266 		}
   11267 	}
   11268 	if ((reg & PCS_LSTS_FDX) != 0)
   11269 		ifmr->ifm_active |= IFM_FDX;
   11270 	else
   11271 		ifmr->ifm_active |= IFM_HDX;
   11272 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   11273 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   11274 		/* Check flow */
   11275 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11276 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   11277 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   11278 			goto setled;
   11279 		}
   11280 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   11281 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   11282 		DPRINTF(WM_DEBUG_LINK,
   11283 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   11284 		if ((pcs_adv & TXCW_SYM_PAUSE)
   11285 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   11286 			mii->mii_media_active |= IFM_FLOW
   11287 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   11288 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   11289 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11290 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   11291 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11292 			mii->mii_media_active |= IFM_FLOW
   11293 			    | IFM_ETH_TXPAUSE;
   11294 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   11295 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11296 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   11297 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11298 			mii->mii_media_active |= IFM_FLOW
   11299 			    | IFM_ETH_RXPAUSE;
   11300 		}
   11301 	}
   11302 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   11303 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   11304 setled:
   11305 	wm_tbi_serdes_set_linkled(sc);
   11306 }
   11307 
   11308 /*
   11309  * wm_serdes_tick:
   11310  *
   11311  *	Check the link on serdes devices.
   11312  */
   11313 static void
   11314 wm_serdes_tick(struct wm_softc *sc)
   11315 {
   11316 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11317 	struct mii_data *mii = &sc->sc_mii;
   11318 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11319 	uint32_t reg;
   11320 
   11321 	KASSERT(WM_CORE_LOCKED(sc));
   11322 
   11323 	mii->mii_media_status = IFM_AVALID;
   11324 	mii->mii_media_active = IFM_ETHER;
   11325 
   11326 	/* Check PCS */
   11327 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11328 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   11329 		mii->mii_media_status |= IFM_ACTIVE;
   11330 		sc->sc_tbi_linkup = 1;
   11331 		sc->sc_tbi_serdes_ticks = 0;
   11332 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   11333 		if ((reg & PCS_LSTS_FDX) != 0)
   11334 			mii->mii_media_active |= IFM_FDX;
   11335 		else
   11336 			mii->mii_media_active |= IFM_HDX;
   11337 	} else {
   11338 		mii->mii_media_status |= IFM_NONE;
   11339 		sc->sc_tbi_linkup = 0;
   11340 		/* If the timer expired, retry autonegotiation */
   11341 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11342 		    && (++sc->sc_tbi_serdes_ticks
   11343 			>= sc->sc_tbi_serdes_anegticks)) {
   11344 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11345 			sc->sc_tbi_serdes_ticks = 0;
   11346 			/* XXX */
   11347 			wm_serdes_mediachange(ifp);
   11348 		}
   11349 	}
   11350 
   11351 	wm_tbi_serdes_set_linkled(sc);
   11352 }
   11353 
   11354 /* SFP related */
   11355 
   11356 static int
   11357 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   11358 {
   11359 	uint32_t i2ccmd;
   11360 	int i;
   11361 
   11362 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11363 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11364 
   11365 	/* Poll the ready bit */
   11366 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11367 		delay(50);
   11368 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11369 		if (i2ccmd & I2CCMD_READY)
   11370 			break;
   11371 	}
   11372 	if ((i2ccmd & I2CCMD_READY) == 0)
   11373 		return -1;
   11374 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   11375 		return -1;
   11376 
   11377 	*data = i2ccmd & 0x00ff;
   11378 
   11379 	return 0;
   11380 }
   11381 
   11382 static uint32_t
   11383 wm_sfp_get_media_type(struct wm_softc *sc)
   11384 {
   11385 	uint32_t ctrl_ext;
   11386 	uint8_t val = 0;
   11387 	int timeout = 3;
   11388 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   11389 	int rv = -1;
   11390 
   11391 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11392 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   11393 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   11394 	CSR_WRITE_FLUSH(sc);
   11395 
   11396 	/* Read SFP module data */
   11397 	while (timeout) {
   11398 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   11399 		if (rv == 0)
   11400 			break;
   11401 		delay(100*1000); /* XXX too big */
   11402 		timeout--;
   11403 	}
   11404 	if (rv != 0)
   11405 		goto out;
   11406 	switch (val) {
   11407 	case SFF_SFP_ID_SFF:
   11408 		aprint_normal_dev(sc->sc_dev,
   11409 		    "Module/Connector soldered to board\n");
   11410 		break;
   11411 	case SFF_SFP_ID_SFP:
   11412 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   11413 		break;
   11414 	case SFF_SFP_ID_UNKNOWN:
   11415 		goto out;
   11416 	default:
   11417 		break;
   11418 	}
   11419 
   11420 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   11421 	if (rv != 0) {
   11422 		goto out;
   11423 	}
   11424 
   11425 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   11426 		mediatype = WM_MEDIATYPE_SERDES;
   11427 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   11428 		sc->sc_flags |= WM_F_SGMII;
   11429 		mediatype = WM_MEDIATYPE_COPPER;
   11430 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   11431 		sc->sc_flags |= WM_F_SGMII;
   11432 		mediatype = WM_MEDIATYPE_SERDES;
   11433 	}
   11434 
   11435 out:
   11436 	/* Restore I2C interface setting */
   11437 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11438 
   11439 	return mediatype;
   11440 }
   11441 
   11442 /*
   11443  * NVM related.
   11444  * Microwire, SPI (w/wo EERD) and Flash.
   11445  */
   11446 
   11447 /* Both spi and uwire */
   11448 
   11449 /*
   11450  * wm_eeprom_sendbits:
   11451  *
   11452  *	Send a series of bits to the EEPROM.
   11453  */
   11454 static void
   11455 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   11456 {
   11457 	uint32_t reg;
   11458 	int x;
   11459 
   11460 	reg = CSR_READ(sc, WMREG_EECD);
   11461 
   11462 	for (x = nbits; x > 0; x--) {
   11463 		if (bits & (1U << (x - 1)))
   11464 			reg |= EECD_DI;
   11465 		else
   11466 			reg &= ~EECD_DI;
   11467 		CSR_WRITE(sc, WMREG_EECD, reg);
   11468 		CSR_WRITE_FLUSH(sc);
   11469 		delay(2);
   11470 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11471 		CSR_WRITE_FLUSH(sc);
   11472 		delay(2);
   11473 		CSR_WRITE(sc, WMREG_EECD, reg);
   11474 		CSR_WRITE_FLUSH(sc);
   11475 		delay(2);
   11476 	}
   11477 }
   11478 
   11479 /*
   11480  * wm_eeprom_recvbits:
   11481  *
   11482  *	Receive a series of bits from the EEPROM.
   11483  */
   11484 static void
   11485 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   11486 {
   11487 	uint32_t reg, val;
   11488 	int x;
   11489 
   11490 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   11491 
   11492 	val = 0;
   11493 	for (x = nbits; x > 0; x--) {
   11494 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11495 		CSR_WRITE_FLUSH(sc);
   11496 		delay(2);
   11497 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   11498 			val |= (1U << (x - 1));
   11499 		CSR_WRITE(sc, WMREG_EECD, reg);
   11500 		CSR_WRITE_FLUSH(sc);
   11501 		delay(2);
   11502 	}
   11503 	*valp = val;
   11504 }
   11505 
   11506 /* Microwire */
   11507 
   11508 /*
   11509  * wm_nvm_read_uwire:
   11510  *
   11511  *	Read a word from the EEPROM using the MicroWire protocol.
   11512  */
   11513 static int
   11514 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11515 {
   11516 	uint32_t reg, val;
   11517 	int i;
   11518 
   11519 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11520 		device_xname(sc->sc_dev), __func__));
   11521 
   11522 	if (sc->nvm.acquire(sc) != 0)
   11523 		return -1;
   11524 
   11525 	for (i = 0; i < wordcnt; i++) {
   11526 		/* Clear SK and DI. */
   11527 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   11528 		CSR_WRITE(sc, WMREG_EECD, reg);
   11529 
   11530 		/*
   11531 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   11532 		 * and Xen.
   11533 		 *
   11534 		 * We use this workaround only for 82540 because qemu's
   11535 		 * e1000 act as 82540.
   11536 		 */
   11537 		if (sc->sc_type == WM_T_82540) {
   11538 			reg |= EECD_SK;
   11539 			CSR_WRITE(sc, WMREG_EECD, reg);
   11540 			reg &= ~EECD_SK;
   11541 			CSR_WRITE(sc, WMREG_EECD, reg);
   11542 			CSR_WRITE_FLUSH(sc);
   11543 			delay(2);
   11544 		}
   11545 		/* XXX: end of workaround */
   11546 
   11547 		/* Set CHIP SELECT. */
   11548 		reg |= EECD_CS;
   11549 		CSR_WRITE(sc, WMREG_EECD, reg);
   11550 		CSR_WRITE_FLUSH(sc);
   11551 		delay(2);
   11552 
   11553 		/* Shift in the READ command. */
   11554 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   11555 
   11556 		/* Shift in address. */
   11557 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   11558 
   11559 		/* Shift out the data. */
   11560 		wm_eeprom_recvbits(sc, &val, 16);
   11561 		data[i] = val & 0xffff;
   11562 
   11563 		/* Clear CHIP SELECT. */
   11564 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   11565 		CSR_WRITE(sc, WMREG_EECD, reg);
   11566 		CSR_WRITE_FLUSH(sc);
   11567 		delay(2);
   11568 	}
   11569 
   11570 	sc->nvm.release(sc);
   11571 	return 0;
   11572 }
   11573 
   11574 /* SPI */
   11575 
   11576 /*
   11577  * Set SPI and FLASH related information from the EECD register.
   11578  * For 82541 and 82547, the word size is taken from EEPROM.
   11579  */
   11580 static int
   11581 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   11582 {
   11583 	int size;
   11584 	uint32_t reg;
   11585 	uint16_t data;
   11586 
   11587 	reg = CSR_READ(sc, WMREG_EECD);
   11588 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   11589 
   11590 	/* Read the size of NVM from EECD by default */
   11591 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11592 	switch (sc->sc_type) {
   11593 	case WM_T_82541:
   11594 	case WM_T_82541_2:
   11595 	case WM_T_82547:
   11596 	case WM_T_82547_2:
   11597 		/* Set dummy value to access EEPROM */
   11598 		sc->sc_nvm_wordsize = 64;
   11599 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
   11600 		reg = data;
   11601 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11602 		if (size == 0)
   11603 			size = 6; /* 64 word size */
   11604 		else
   11605 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   11606 		break;
   11607 	case WM_T_80003:
   11608 	case WM_T_82571:
   11609 	case WM_T_82572:
   11610 	case WM_T_82573: /* SPI case */
   11611 	case WM_T_82574: /* SPI case */
   11612 	case WM_T_82583: /* SPI case */
   11613 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11614 		if (size > 14)
   11615 			size = 14;
   11616 		break;
   11617 	case WM_T_82575:
   11618 	case WM_T_82576:
   11619 	case WM_T_82580:
   11620 	case WM_T_I350:
   11621 	case WM_T_I354:
   11622 	case WM_T_I210:
   11623 	case WM_T_I211:
   11624 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11625 		if (size > 15)
   11626 			size = 15;
   11627 		break;
   11628 	default:
   11629 		aprint_error_dev(sc->sc_dev,
   11630 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   11631 		return -1;
   11632 		break;
   11633 	}
   11634 
   11635 	sc->sc_nvm_wordsize = 1 << size;
   11636 
   11637 	return 0;
   11638 }
   11639 
   11640 /*
   11641  * wm_nvm_ready_spi:
   11642  *
   11643  *	Wait for a SPI EEPROM to be ready for commands.
   11644  */
   11645 static int
   11646 wm_nvm_ready_spi(struct wm_softc *sc)
   11647 {
   11648 	uint32_t val;
   11649 	int usec;
   11650 
   11651 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11652 		device_xname(sc->sc_dev), __func__));
   11653 
   11654 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   11655 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   11656 		wm_eeprom_recvbits(sc, &val, 8);
   11657 		if ((val & SPI_SR_RDY) == 0)
   11658 			break;
   11659 	}
   11660 	if (usec >= SPI_MAX_RETRIES) {
   11661 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   11662 		return -1;
   11663 	}
   11664 	return 0;
   11665 }
   11666 
   11667 /*
   11668  * wm_nvm_read_spi:
   11669  *
   11670  *	Read a work from the EEPROM using the SPI protocol.
   11671  */
   11672 static int
   11673 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11674 {
   11675 	uint32_t reg, val;
   11676 	int i;
   11677 	uint8_t opc;
   11678 	int rv = 0;
   11679 
   11680 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11681 		device_xname(sc->sc_dev), __func__));
   11682 
   11683 	if (sc->nvm.acquire(sc) != 0)
   11684 		return -1;
   11685 
   11686 	/* Clear SK and CS. */
   11687 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   11688 	CSR_WRITE(sc, WMREG_EECD, reg);
   11689 	CSR_WRITE_FLUSH(sc);
   11690 	delay(2);
   11691 
   11692 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   11693 		goto out;
   11694 
   11695 	/* Toggle CS to flush commands. */
   11696 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   11697 	CSR_WRITE_FLUSH(sc);
   11698 	delay(2);
   11699 	CSR_WRITE(sc, WMREG_EECD, reg);
   11700 	CSR_WRITE_FLUSH(sc);
   11701 	delay(2);
   11702 
   11703 	opc = SPI_OPC_READ;
   11704 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   11705 		opc |= SPI_OPC_A8;
   11706 
   11707 	wm_eeprom_sendbits(sc, opc, 8);
   11708 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   11709 
   11710 	for (i = 0; i < wordcnt; i++) {
   11711 		wm_eeprom_recvbits(sc, &val, 16);
   11712 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   11713 	}
   11714 
   11715 	/* Raise CS and clear SK. */
   11716 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   11717 	CSR_WRITE(sc, WMREG_EECD, reg);
   11718 	CSR_WRITE_FLUSH(sc);
   11719 	delay(2);
   11720 
   11721 out:
   11722 	sc->nvm.release(sc);
   11723 	return rv;
   11724 }
   11725 
   11726 /* Using with EERD */
   11727 
   11728 static int
   11729 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   11730 {
   11731 	uint32_t attempts = 100000;
   11732 	uint32_t i, reg = 0;
   11733 	int32_t done = -1;
   11734 
   11735 	for (i = 0; i < attempts; i++) {
   11736 		reg = CSR_READ(sc, rw);
   11737 
   11738 		if (reg & EERD_DONE) {
   11739 			done = 0;
   11740 			break;
   11741 		}
   11742 		delay(5);
   11743 	}
   11744 
   11745 	return done;
   11746 }
   11747 
   11748 static int
   11749 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   11750     uint16_t *data)
   11751 {
   11752 	int i, eerd = 0;
   11753 	int rv = 0;
   11754 
   11755 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11756 		device_xname(sc->sc_dev), __func__));
   11757 
   11758 	if (sc->nvm.acquire(sc) != 0)
   11759 		return -1;
   11760 
   11761 	for (i = 0; i < wordcnt; i++) {
   11762 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   11763 		CSR_WRITE(sc, WMREG_EERD, eerd);
   11764 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   11765 		if (rv != 0) {
   11766 			aprint_error_dev(sc->sc_dev, "EERD polling failed\n");
   11767 			break;
   11768 		}
   11769 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   11770 	}
   11771 
   11772 	sc->nvm.release(sc);
   11773 	return rv;
   11774 }
   11775 
   11776 /* Flash */
   11777 
   11778 static int
   11779 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   11780 {
   11781 	uint32_t eecd;
   11782 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   11783 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   11784 	uint8_t sig_byte = 0;
   11785 
   11786 	switch (sc->sc_type) {
   11787 	case WM_T_PCH_SPT:
   11788 		/*
   11789 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
   11790 		 * sector valid bits from the NVM.
   11791 		 */
   11792 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
   11793 		if ((*bank == 0) || (*bank == 1)) {
   11794 			aprint_error_dev(sc->sc_dev,
   11795 			    "%s: no valid NVM bank present (%u)\n", __func__,
   11796 				*bank);
   11797 			return -1;
   11798 		} else {
   11799 			*bank = *bank - 2;
   11800 			return 0;
   11801 		}
   11802 	case WM_T_ICH8:
   11803 	case WM_T_ICH9:
   11804 		eecd = CSR_READ(sc, WMREG_EECD);
   11805 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   11806 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   11807 			return 0;
   11808 		}
   11809 		/* FALLTHROUGH */
   11810 	default:
   11811 		/* Default to 0 */
   11812 		*bank = 0;
   11813 
   11814 		/* Check bank 0 */
   11815 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   11816 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11817 			*bank = 0;
   11818 			return 0;
   11819 		}
   11820 
   11821 		/* Check bank 1 */
   11822 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   11823 		    &sig_byte);
   11824 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11825 			*bank = 1;
   11826 			return 0;
   11827 		}
   11828 	}
   11829 
   11830 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   11831 		device_xname(sc->sc_dev)));
   11832 	return -1;
   11833 }
   11834 
   11835 /******************************************************************************
   11836  * This function does initial flash setup so that a new read/write/erase cycle
   11837  * can be started.
   11838  *
   11839  * sc - The pointer to the hw structure
   11840  ****************************************************************************/
   11841 static int32_t
   11842 wm_ich8_cycle_init(struct wm_softc *sc)
   11843 {
   11844 	uint16_t hsfsts;
   11845 	int32_t error = 1;
   11846 	int32_t i     = 0;
   11847 
   11848 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11849 
   11850 	/* May be check the Flash Des Valid bit in Hw status */
   11851 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   11852 		return error;
   11853 	}
   11854 
   11855 	/* Clear FCERR in Hw status by writing 1 */
   11856 	/* Clear DAEL in Hw status by writing a 1 */
   11857 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   11858 
   11859 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11860 
   11861 	/*
   11862 	 * Either we should have a hardware SPI cycle in progress bit to check
   11863 	 * against, in order to start a new cycle or FDONE bit should be
   11864 	 * changed in the hardware so that it is 1 after harware reset, which
   11865 	 * can then be used as an indication whether a cycle is in progress or
   11866 	 * has been completed .. we should also have some software semaphore
   11867 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   11868 	 * threads access to those bits can be sequentiallized or a way so that
   11869 	 * 2 threads dont start the cycle at the same time
   11870 	 */
   11871 
   11872 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11873 		/*
   11874 		 * There is no cycle running at present, so we can start a
   11875 		 * cycle
   11876 		 */
   11877 
   11878 		/* Begin by setting Flash Cycle Done. */
   11879 		hsfsts |= HSFSTS_DONE;
   11880 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11881 		error = 0;
   11882 	} else {
   11883 		/*
   11884 		 * otherwise poll for sometime so the current cycle has a
   11885 		 * chance to end before giving up.
   11886 		 */
   11887 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   11888 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11889 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11890 				error = 0;
   11891 				break;
   11892 			}
   11893 			delay(1);
   11894 		}
   11895 		if (error == 0) {
   11896 			/*
   11897 			 * Successful in waiting for previous cycle to timeout,
   11898 			 * now set the Flash Cycle Done.
   11899 			 */
   11900 			hsfsts |= HSFSTS_DONE;
   11901 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11902 		}
   11903 	}
   11904 	return error;
   11905 }
   11906 
   11907 /******************************************************************************
   11908  * This function starts a flash cycle and waits for its completion
   11909  *
   11910  * sc - The pointer to the hw structure
   11911  ****************************************************************************/
   11912 static int32_t
   11913 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   11914 {
   11915 	uint16_t hsflctl;
   11916 	uint16_t hsfsts;
   11917 	int32_t error = 1;
   11918 	uint32_t i = 0;
   11919 
   11920 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   11921 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11922 	hsflctl |= HSFCTL_GO;
   11923 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11924 
   11925 	/* Wait till FDONE bit is set to 1 */
   11926 	do {
   11927 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11928 		if (hsfsts & HSFSTS_DONE)
   11929 			break;
   11930 		delay(1);
   11931 		i++;
   11932 	} while (i < timeout);
   11933 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   11934 		error = 0;
   11935 
   11936 	return error;
   11937 }
   11938 
   11939 /******************************************************************************
   11940  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   11941  *
   11942  * sc - The pointer to the hw structure
   11943  * index - The index of the byte or word to read.
   11944  * size - Size of data to read, 1=byte 2=word, 4=dword
   11945  * data - Pointer to the word to store the value read.
   11946  *****************************************************************************/
   11947 static int32_t
   11948 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   11949     uint32_t size, uint32_t *data)
   11950 {
   11951 	uint16_t hsfsts;
   11952 	uint16_t hsflctl;
   11953 	uint32_t flash_linear_address;
   11954 	uint32_t flash_data = 0;
   11955 	int32_t error = 1;
   11956 	int32_t count = 0;
   11957 
   11958 	if (size < 1  || size > 4 || data == 0x0 ||
   11959 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   11960 		return error;
   11961 
   11962 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   11963 	    sc->sc_ich8_flash_base;
   11964 
   11965 	do {
   11966 		delay(1);
   11967 		/* Steps */
   11968 		error = wm_ich8_cycle_init(sc);
   11969 		if (error)
   11970 			break;
   11971 
   11972 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11973 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   11974 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   11975 		    & HSFCTL_BCOUNT_MASK;
   11976 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   11977 		if (sc->sc_type == WM_T_PCH_SPT) {
   11978 			/*
   11979 			 * In SPT, This register is in Lan memory space, not
   11980 			 * flash. Therefore, only 32 bit access is supported.
   11981 			 */
   11982 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
   11983 			    (uint32_t)hsflctl);
   11984 		} else
   11985 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11986 
   11987 		/*
   11988 		 * Write the last 24 bits of index into Flash Linear address
   11989 		 * field in Flash Address
   11990 		 */
   11991 		/* TODO: TBD maybe check the index against the size of flash */
   11992 
   11993 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   11994 
   11995 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   11996 
   11997 		/*
   11998 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   11999 		 * the whole sequence a few more times, else read in (shift in)
   12000 		 * the Flash Data0, the order is least significant byte first
   12001 		 * msb to lsb
   12002 		 */
   12003 		if (error == 0) {
   12004 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   12005 			if (size == 1)
   12006 				*data = (uint8_t)(flash_data & 0x000000FF);
   12007 			else if (size == 2)
   12008 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   12009 			else if (size == 4)
   12010 				*data = (uint32_t)flash_data;
   12011 			break;
   12012 		} else {
   12013 			/*
   12014 			 * If we've gotten here, then things are probably
   12015 			 * completely hosed, but if the error condition is
   12016 			 * detected, it won't hurt to give it another try...
   12017 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   12018 			 */
   12019 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12020 			if (hsfsts & HSFSTS_ERR) {
   12021 				/* Repeat for some time before giving up. */
   12022 				continue;
   12023 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   12024 				break;
   12025 		}
   12026 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   12027 
   12028 	return error;
   12029 }
   12030 
   12031 /******************************************************************************
   12032  * Reads a single byte from the NVM using the ICH8 flash access registers.
   12033  *
   12034  * sc - pointer to wm_hw structure
   12035  * index - The index of the byte to read.
   12036  * data - Pointer to a byte to store the value read.
   12037  *****************************************************************************/
   12038 static int32_t
   12039 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   12040 {
   12041 	int32_t status;
   12042 	uint32_t word = 0;
   12043 
   12044 	status = wm_read_ich8_data(sc, index, 1, &word);
   12045 	if (status == 0)
   12046 		*data = (uint8_t)word;
   12047 	else
   12048 		*data = 0;
   12049 
   12050 	return status;
   12051 }
   12052 
   12053 /******************************************************************************
   12054  * Reads a word from the NVM using the ICH8 flash access registers.
   12055  *
   12056  * sc - pointer to wm_hw structure
   12057  * index - The starting byte index of the word to read.
   12058  * data - Pointer to a word to store the value read.
   12059  *****************************************************************************/
   12060 static int32_t
   12061 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   12062 {
   12063 	int32_t status;
   12064 	uint32_t word = 0;
   12065 
   12066 	status = wm_read_ich8_data(sc, index, 2, &word);
   12067 	if (status == 0)
   12068 		*data = (uint16_t)word;
   12069 	else
   12070 		*data = 0;
   12071 
   12072 	return status;
   12073 }
   12074 
   12075 /******************************************************************************
   12076  * Reads a dword from the NVM using the ICH8 flash access registers.
   12077  *
   12078  * sc - pointer to wm_hw structure
   12079  * index - The starting byte index of the word to read.
   12080  * data - Pointer to a word to store the value read.
   12081  *****************************************************************************/
   12082 static int32_t
   12083 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   12084 {
   12085 	int32_t status;
   12086 
   12087 	status = wm_read_ich8_data(sc, index, 4, data);
   12088 	return status;
   12089 }
   12090 
   12091 /******************************************************************************
   12092  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   12093  * register.
   12094  *
   12095  * sc - Struct containing variables accessed by shared code
   12096  * offset - offset of word in the EEPROM to read
   12097  * data - word read from the EEPROM
   12098  * words - number of words to read
   12099  *****************************************************************************/
   12100 static int
   12101 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12102 {
   12103 	int32_t  rv = 0;
   12104 	uint32_t flash_bank = 0;
   12105 	uint32_t act_offset = 0;
   12106 	uint32_t bank_offset = 0;
   12107 	uint16_t word = 0;
   12108 	uint16_t i = 0;
   12109 
   12110 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12111 		device_xname(sc->sc_dev), __func__));
   12112 
   12113 	if (sc->nvm.acquire(sc) != 0)
   12114 		return -1;
   12115 
   12116 	/*
   12117 	 * We need to know which is the valid flash bank.  In the event
   12118 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12119 	 * managing flash_bank.  So it cannot be trusted and needs
   12120 	 * to be updated with each read.
   12121 	 */
   12122 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   12123 	if (rv) {
   12124 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   12125 			device_xname(sc->sc_dev)));
   12126 		flash_bank = 0;
   12127 	}
   12128 
   12129 	/*
   12130 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   12131 	 * size
   12132 	 */
   12133 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   12134 
   12135 	for (i = 0; i < words; i++) {
   12136 		/* The NVM part needs a byte offset, hence * 2 */
   12137 		act_offset = bank_offset + ((offset + i) * 2);
   12138 		rv = wm_read_ich8_word(sc, act_offset, &word);
   12139 		if (rv) {
   12140 			aprint_error_dev(sc->sc_dev,
   12141 			    "%s: failed to read NVM\n", __func__);
   12142 			break;
   12143 		}
   12144 		data[i] = word;
   12145 	}
   12146 
   12147 	sc->nvm.release(sc);
   12148 	return rv;
   12149 }
   12150 
   12151 /******************************************************************************
   12152  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   12153  * register.
   12154  *
   12155  * sc - Struct containing variables accessed by shared code
   12156  * offset - offset of word in the EEPROM to read
   12157  * data - word read from the EEPROM
   12158  * words - number of words to read
   12159  *****************************************************************************/
   12160 static int
   12161 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12162 {
   12163 	int32_t  rv = 0;
   12164 	uint32_t flash_bank = 0;
   12165 	uint32_t act_offset = 0;
   12166 	uint32_t bank_offset = 0;
   12167 	uint32_t dword = 0;
   12168 	uint16_t i = 0;
   12169 
   12170 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12171 		device_xname(sc->sc_dev), __func__));
   12172 
   12173 	if (sc->nvm.acquire(sc) != 0)
   12174 		return -1;
   12175 
   12176 	/*
   12177 	 * We need to know which is the valid flash bank.  In the event
   12178 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12179 	 * managing flash_bank.  So it cannot be trusted and needs
   12180 	 * to be updated with each read.
   12181 	 */
   12182 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   12183 	if (rv) {
   12184 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   12185 			device_xname(sc->sc_dev)));
   12186 		flash_bank = 0;
   12187 	}
   12188 
   12189 	/*
   12190 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   12191 	 * size
   12192 	 */
   12193 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   12194 
   12195 	for (i = 0; i < words; i++) {
   12196 		/* The NVM part needs a byte offset, hence * 2 */
   12197 		act_offset = bank_offset + ((offset + i) * 2);
   12198 		/* but we must read dword aligned, so mask ... */
   12199 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   12200 		if (rv) {
   12201 			aprint_error_dev(sc->sc_dev,
   12202 			    "%s: failed to read NVM\n", __func__);
   12203 			break;
   12204 		}
   12205 		/* ... and pick out low or high word */
   12206 		if ((act_offset & 0x2) == 0)
   12207 			data[i] = (uint16_t)(dword & 0xFFFF);
   12208 		else
   12209 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   12210 	}
   12211 
   12212 	sc->nvm.release(sc);
   12213 	return rv;
   12214 }
   12215 
   12216 /* iNVM */
   12217 
   12218 static int
   12219 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   12220 {
   12221 	int32_t  rv = 0;
   12222 	uint32_t invm_dword;
   12223 	uint16_t i;
   12224 	uint8_t record_type, word_address;
   12225 
   12226 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12227 		device_xname(sc->sc_dev), __func__));
   12228 
   12229 	for (i = 0; i < INVM_SIZE; i++) {
   12230 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   12231 		/* Get record type */
   12232 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   12233 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   12234 			break;
   12235 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   12236 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   12237 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   12238 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   12239 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   12240 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   12241 			if (word_address == address) {
   12242 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   12243 				rv = 0;
   12244 				break;
   12245 			}
   12246 		}
   12247 	}
   12248 
   12249 	return rv;
   12250 }
   12251 
   12252 static int
   12253 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12254 {
   12255 	int rv = 0;
   12256 	int i;
   12257 
   12258 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12259 		device_xname(sc->sc_dev), __func__));
   12260 
   12261 	if (sc->nvm.acquire(sc) != 0)
   12262 		return -1;
   12263 
   12264 	for (i = 0; i < words; i++) {
   12265 		switch (offset + i) {
   12266 		case NVM_OFF_MACADDR:
   12267 		case NVM_OFF_MACADDR1:
   12268 		case NVM_OFF_MACADDR2:
   12269 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   12270 			if (rv != 0) {
   12271 				data[i] = 0xffff;
   12272 				rv = -1;
   12273 			}
   12274 			break;
   12275 		case NVM_OFF_CFG2:
   12276 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12277 			if (rv != 0) {
   12278 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   12279 				rv = 0;
   12280 			}
   12281 			break;
   12282 		case NVM_OFF_CFG4:
   12283 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12284 			if (rv != 0) {
   12285 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   12286 				rv = 0;
   12287 			}
   12288 			break;
   12289 		case NVM_OFF_LED_1_CFG:
   12290 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12291 			if (rv != 0) {
   12292 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   12293 				rv = 0;
   12294 			}
   12295 			break;
   12296 		case NVM_OFF_LED_0_2_CFG:
   12297 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12298 			if (rv != 0) {
   12299 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   12300 				rv = 0;
   12301 			}
   12302 			break;
   12303 		case NVM_OFF_ID_LED_SETTINGS:
   12304 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12305 			if (rv != 0) {
   12306 				*data = ID_LED_RESERVED_FFFF;
   12307 				rv = 0;
   12308 			}
   12309 			break;
   12310 		default:
   12311 			DPRINTF(WM_DEBUG_NVM,
   12312 			    ("NVM word 0x%02x is not mapped.\n", offset));
   12313 			*data = NVM_RESERVED_WORD;
   12314 			break;
   12315 		}
   12316 	}
   12317 
   12318 	sc->nvm.release(sc);
   12319 	return rv;
   12320 }
   12321 
   12322 /* Lock, detecting NVM type, validate checksum, version and read */
   12323 
   12324 static int
   12325 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   12326 {
   12327 	uint32_t eecd = 0;
   12328 
   12329 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   12330 	    || sc->sc_type == WM_T_82583) {
   12331 		eecd = CSR_READ(sc, WMREG_EECD);
   12332 
   12333 		/* Isolate bits 15 & 16 */
   12334 		eecd = ((eecd >> 15) & 0x03);
   12335 
   12336 		/* If both bits are set, device is Flash type */
   12337 		if (eecd == 0x03)
   12338 			return 0;
   12339 	}
   12340 	return 1;
   12341 }
   12342 
   12343 static int
   12344 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   12345 {
   12346 	uint32_t eec;
   12347 
   12348 	eec = CSR_READ(sc, WMREG_EEC);
   12349 	if ((eec & EEC_FLASH_DETECTED) != 0)
   12350 		return 1;
   12351 
   12352 	return 0;
   12353 }
   12354 
   12355 /*
   12356  * wm_nvm_validate_checksum
   12357  *
   12358  * The checksum is defined as the sum of the first 64 (16 bit) words.
   12359  */
   12360 static int
   12361 wm_nvm_validate_checksum(struct wm_softc *sc)
   12362 {
   12363 	uint16_t checksum;
   12364 	uint16_t eeprom_data;
   12365 #ifdef WM_DEBUG
   12366 	uint16_t csum_wordaddr, valid_checksum;
   12367 #endif
   12368 	int i;
   12369 
   12370 	checksum = 0;
   12371 
   12372 	/* Don't check for I211 */
   12373 	if (sc->sc_type == WM_T_I211)
   12374 		return 0;
   12375 
   12376 #ifdef WM_DEBUG
   12377 	if (sc->sc_type == WM_T_PCH_LPT) {
   12378 		csum_wordaddr = NVM_OFF_COMPAT;
   12379 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   12380 	} else {
   12381 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   12382 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   12383 	}
   12384 
   12385 	/* Dump EEPROM image for debug */
   12386 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12387 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12388 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   12389 		/* XXX PCH_SPT? */
   12390 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   12391 		if ((eeprom_data & valid_checksum) == 0) {
   12392 			DPRINTF(WM_DEBUG_NVM,
   12393 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   12394 				device_xname(sc->sc_dev), eeprom_data,
   12395 				    valid_checksum));
   12396 		}
   12397 	}
   12398 
   12399 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   12400 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   12401 		for (i = 0; i < NVM_SIZE; i++) {
   12402 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12403 				printf("XXXX ");
   12404 			else
   12405 				printf("%04hx ", eeprom_data);
   12406 			if (i % 8 == 7)
   12407 				printf("\n");
   12408 		}
   12409 	}
   12410 
   12411 #endif /* WM_DEBUG */
   12412 
   12413 	for (i = 0; i < NVM_SIZE; i++) {
   12414 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12415 			return 1;
   12416 		checksum += eeprom_data;
   12417 	}
   12418 
   12419 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   12420 #ifdef WM_DEBUG
   12421 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   12422 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   12423 #endif
   12424 	}
   12425 
   12426 	return 0;
   12427 }
   12428 
   12429 static void
   12430 wm_nvm_version_invm(struct wm_softc *sc)
   12431 {
   12432 	uint32_t dword;
   12433 
   12434 	/*
   12435 	 * Linux's code to decode version is very strange, so we don't
   12436 	 * obey that algorithm and just use word 61 as the document.
   12437 	 * Perhaps it's not perfect though...
   12438 	 *
   12439 	 * Example:
   12440 	 *
   12441 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   12442 	 */
   12443 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   12444 	dword = __SHIFTOUT(dword, INVM_VER_1);
   12445 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   12446 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   12447 }
   12448 
   12449 static void
   12450 wm_nvm_version(struct wm_softc *sc)
   12451 {
   12452 	uint16_t major, minor, build, patch;
   12453 	uint16_t uid0, uid1;
   12454 	uint16_t nvm_data;
   12455 	uint16_t off;
   12456 	bool check_version = false;
   12457 	bool check_optionrom = false;
   12458 	bool have_build = false;
   12459 	bool have_uid = true;
   12460 
   12461 	/*
   12462 	 * Version format:
   12463 	 *
   12464 	 * XYYZ
   12465 	 * X0YZ
   12466 	 * X0YY
   12467 	 *
   12468 	 * Example:
   12469 	 *
   12470 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   12471 	 *	82571	0x50a6	5.10.6?
   12472 	 *	82572	0x506a	5.6.10?
   12473 	 *	82572EI	0x5069	5.6.9?
   12474 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   12475 	 *		0x2013	2.1.3?
   12476 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   12477 	 */
   12478 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
   12479 	switch (sc->sc_type) {
   12480 	case WM_T_82571:
   12481 	case WM_T_82572:
   12482 	case WM_T_82574:
   12483 	case WM_T_82583:
   12484 		check_version = true;
   12485 		check_optionrom = true;
   12486 		have_build = true;
   12487 		break;
   12488 	case WM_T_82575:
   12489 	case WM_T_82576:
   12490 	case WM_T_82580:
   12491 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   12492 			check_version = true;
   12493 		break;
   12494 	case WM_T_I211:
   12495 		wm_nvm_version_invm(sc);
   12496 		have_uid = false;
   12497 		goto printver;
   12498 	case WM_T_I210:
   12499 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   12500 			wm_nvm_version_invm(sc);
   12501 			have_uid = false;
   12502 			goto printver;
   12503 		}
   12504 		/* FALLTHROUGH */
   12505 	case WM_T_I350:
   12506 	case WM_T_I354:
   12507 		check_version = true;
   12508 		check_optionrom = true;
   12509 		break;
   12510 	default:
   12511 		return;
   12512 	}
   12513 	if (check_version) {
   12514 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
   12515 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   12516 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   12517 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   12518 			build = nvm_data & NVM_BUILD_MASK;
   12519 			have_build = true;
   12520 		} else
   12521 			minor = nvm_data & 0x00ff;
   12522 
   12523 		/* Decimal */
   12524 		minor = (minor / 16) * 10 + (minor % 16);
   12525 		sc->sc_nvm_ver_major = major;
   12526 		sc->sc_nvm_ver_minor = minor;
   12527 
   12528 printver:
   12529 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   12530 		    sc->sc_nvm_ver_minor);
   12531 		if (have_build) {
   12532 			sc->sc_nvm_ver_build = build;
   12533 			aprint_verbose(".%d", build);
   12534 		}
   12535 	}
   12536 	if (check_optionrom) {
   12537 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
   12538 		/* Option ROM Version */
   12539 		if ((off != 0x0000) && (off != 0xffff)) {
   12540 			off += NVM_COMBO_VER_OFF;
   12541 			wm_nvm_read(sc, off + 1, 1, &uid1);
   12542 			wm_nvm_read(sc, off, 1, &uid0);
   12543 			if ((uid0 != 0) && (uid0 != 0xffff)
   12544 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   12545 				/* 16bits */
   12546 				major = uid0 >> 8;
   12547 				build = (uid0 << 8) | (uid1 >> 8);
   12548 				patch = uid1 & 0x00ff;
   12549 				aprint_verbose(", option ROM Version %d.%d.%d",
   12550 				    major, build, patch);
   12551 			}
   12552 		}
   12553 	}
   12554 
   12555 	if (have_uid) {
   12556 		wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
   12557 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   12558 	}
   12559 }
   12560 
   12561 /*
   12562  * wm_nvm_read:
   12563  *
   12564  *	Read data from the serial EEPROM.
   12565  */
   12566 static int
   12567 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12568 {
   12569 	int rv;
   12570 
   12571 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12572 		device_xname(sc->sc_dev), __func__));
   12573 
   12574 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   12575 		return -1;
   12576 
   12577 	rv = sc->nvm.read(sc, word, wordcnt, data);
   12578 
   12579 	return rv;
   12580 }
   12581 
   12582 /*
   12583  * Hardware semaphores.
   12584  * Very complexed...
   12585  */
   12586 
   12587 static int
   12588 wm_get_null(struct wm_softc *sc)
   12589 {
   12590 
   12591 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12592 		device_xname(sc->sc_dev), __func__));
   12593 	return 0;
   12594 }
   12595 
   12596 static void
   12597 wm_put_null(struct wm_softc *sc)
   12598 {
   12599 
   12600 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12601 		device_xname(sc->sc_dev), __func__));
   12602 	return;
   12603 }
   12604 
   12605 static int
   12606 wm_get_eecd(struct wm_softc *sc)
   12607 {
   12608 	uint32_t reg;
   12609 	int x;
   12610 
   12611 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   12612 		device_xname(sc->sc_dev), __func__));
   12613 
   12614 	reg = CSR_READ(sc, WMREG_EECD);
   12615 
   12616 	/* Request EEPROM access. */
   12617 	reg |= EECD_EE_REQ;
   12618 	CSR_WRITE(sc, WMREG_EECD, reg);
   12619 
   12620 	/* ..and wait for it to be granted. */
   12621 	for (x = 0; x < 1000; x++) {
   12622 		reg = CSR_READ(sc, WMREG_EECD);
   12623 		if (reg & EECD_EE_GNT)
   12624 			break;
   12625 		delay(5);
   12626 	}
   12627 	if ((reg & EECD_EE_GNT) == 0) {
   12628 		aprint_error_dev(sc->sc_dev,
   12629 		    "could not acquire EEPROM GNT\n");
   12630 		reg &= ~EECD_EE_REQ;
   12631 		CSR_WRITE(sc, WMREG_EECD, reg);
   12632 		return -1;
   12633 	}
   12634 
   12635 	return 0;
   12636 }
   12637 
   12638 static void
   12639 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   12640 {
   12641 
   12642 	*eecd |= EECD_SK;
   12643 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   12644 	CSR_WRITE_FLUSH(sc);
   12645 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   12646 		delay(1);
   12647 	else
   12648 		delay(50);
   12649 }
   12650 
   12651 static void
   12652 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   12653 {
   12654 
   12655 	*eecd &= ~EECD_SK;
   12656 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   12657 	CSR_WRITE_FLUSH(sc);
   12658 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   12659 		delay(1);
   12660 	else
   12661 		delay(50);
   12662 }
   12663 
   12664 static void
   12665 wm_put_eecd(struct wm_softc *sc)
   12666 {
   12667 	uint32_t reg;
   12668 
   12669 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12670 		device_xname(sc->sc_dev), __func__));
   12671 
   12672 	/* Stop nvm */
   12673 	reg = CSR_READ(sc, WMREG_EECD);
   12674 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   12675 		/* Pull CS high */
   12676 		reg |= EECD_CS;
   12677 		wm_nvm_eec_clock_lower(sc, &reg);
   12678 	} else {
   12679 		/* CS on Microwire is active-high */
   12680 		reg &= ~(EECD_CS | EECD_DI);
   12681 		CSR_WRITE(sc, WMREG_EECD, reg);
   12682 		wm_nvm_eec_clock_raise(sc, &reg);
   12683 		wm_nvm_eec_clock_lower(sc, &reg);
   12684 	}
   12685 
   12686 	reg = CSR_READ(sc, WMREG_EECD);
   12687 	reg &= ~EECD_EE_REQ;
   12688 	CSR_WRITE(sc, WMREG_EECD, reg);
   12689 
   12690 	return;
   12691 }
   12692 
   12693 /*
   12694  * Get hardware semaphore.
   12695  * Same as e1000_get_hw_semaphore_generic()
   12696  */
   12697 static int
   12698 wm_get_swsm_semaphore(struct wm_softc *sc)
   12699 {
   12700 	int32_t timeout;
   12701 	uint32_t swsm;
   12702 
   12703 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12704 		device_xname(sc->sc_dev), __func__));
   12705 	KASSERT(sc->sc_nvm_wordsize > 0);
   12706 
   12707 	/* Get the SW semaphore. */
   12708 	timeout = sc->sc_nvm_wordsize + 1;
   12709 	while (timeout) {
   12710 		swsm = CSR_READ(sc, WMREG_SWSM);
   12711 
   12712 		if ((swsm & SWSM_SMBI) == 0)
   12713 			break;
   12714 
   12715 		delay(50);
   12716 		timeout--;
   12717 	}
   12718 
   12719 	if (timeout == 0) {
   12720 		aprint_error_dev(sc->sc_dev,
   12721 		    "could not acquire SWSM SMBI\n");
   12722 		return 1;
   12723 	}
   12724 
   12725 	/* Get the FW semaphore. */
   12726 	timeout = sc->sc_nvm_wordsize + 1;
   12727 	while (timeout) {
   12728 		swsm = CSR_READ(sc, WMREG_SWSM);
   12729 		swsm |= SWSM_SWESMBI;
   12730 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   12731 		/* If we managed to set the bit we got the semaphore. */
   12732 		swsm = CSR_READ(sc, WMREG_SWSM);
   12733 		if (swsm & SWSM_SWESMBI)
   12734 			break;
   12735 
   12736 		delay(50);
   12737 		timeout--;
   12738 	}
   12739 
   12740 	if (timeout == 0) {
   12741 		aprint_error_dev(sc->sc_dev,
   12742 		    "could not acquire SWSM SWESMBI\n");
   12743 		/* Release semaphores */
   12744 		wm_put_swsm_semaphore(sc);
   12745 		return 1;
   12746 	}
   12747 	return 0;
   12748 }
   12749 
   12750 /*
   12751  * Put hardware semaphore.
   12752  * Same as e1000_put_hw_semaphore_generic()
   12753  */
   12754 static void
   12755 wm_put_swsm_semaphore(struct wm_softc *sc)
   12756 {
   12757 	uint32_t swsm;
   12758 
   12759 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12760 		device_xname(sc->sc_dev), __func__));
   12761 
   12762 	swsm = CSR_READ(sc, WMREG_SWSM);
   12763 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   12764 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   12765 }
   12766 
   12767 /*
   12768  * Get SW/FW semaphore.
   12769  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   12770  */
   12771 static int
   12772 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12773 {
   12774 	uint32_t swfw_sync;
   12775 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   12776 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   12777 	int timeout;
   12778 
   12779 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12780 		device_xname(sc->sc_dev), __func__));
   12781 
   12782 	if (sc->sc_type == WM_T_80003)
   12783 		timeout = 50;
   12784 	else
   12785 		timeout = 200;
   12786 
   12787 	for (timeout = 0; timeout < 200; timeout++) {
   12788 		if (wm_get_swsm_semaphore(sc)) {
   12789 			aprint_error_dev(sc->sc_dev,
   12790 			    "%s: failed to get semaphore\n",
   12791 			    __func__);
   12792 			return 1;
   12793 		}
   12794 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12795 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   12796 			swfw_sync |= swmask;
   12797 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12798 			wm_put_swsm_semaphore(sc);
   12799 			return 0;
   12800 		}
   12801 		wm_put_swsm_semaphore(sc);
   12802 		delay(5000);
   12803 	}
   12804 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   12805 	    device_xname(sc->sc_dev), mask, swfw_sync);
   12806 	return 1;
   12807 }
   12808 
   12809 static void
   12810 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12811 {
   12812 	uint32_t swfw_sync;
   12813 
   12814 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12815 		device_xname(sc->sc_dev), __func__));
   12816 
   12817 	while (wm_get_swsm_semaphore(sc) != 0)
   12818 		continue;
   12819 
   12820 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12821 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   12822 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12823 
   12824 	wm_put_swsm_semaphore(sc);
   12825 }
   12826 
   12827 static int
   12828 wm_get_nvm_80003(struct wm_softc *sc)
   12829 {
   12830 	int rv;
   12831 
   12832 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   12833 		device_xname(sc->sc_dev), __func__));
   12834 
   12835 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   12836 		aprint_error_dev(sc->sc_dev,
   12837 		    "%s: failed to get semaphore(SWFW)\n",
   12838 		    __func__);
   12839 		return rv;
   12840 	}
   12841 
   12842 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   12843 	    && (rv = wm_get_eecd(sc)) != 0) {
   12844 		aprint_error_dev(sc->sc_dev,
   12845 		    "%s: failed to get semaphore(EECD)\n",
   12846 		    __func__);
   12847 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   12848 		return rv;
   12849 	}
   12850 
   12851 	return 0;
   12852 }
   12853 
   12854 static void
   12855 wm_put_nvm_80003(struct wm_softc *sc)
   12856 {
   12857 
   12858 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12859 		device_xname(sc->sc_dev), __func__));
   12860 
   12861 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   12862 		wm_put_eecd(sc);
   12863 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   12864 }
   12865 
   12866 static int
   12867 wm_get_nvm_82571(struct wm_softc *sc)
   12868 {
   12869 	int rv;
   12870 
   12871 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12872 		device_xname(sc->sc_dev), __func__));
   12873 
   12874 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   12875 		return rv;
   12876 
   12877 	switch (sc->sc_type) {
   12878 	case WM_T_82573:
   12879 		break;
   12880 	default:
   12881 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   12882 			rv = wm_get_eecd(sc);
   12883 		break;
   12884 	}
   12885 
   12886 	if (rv != 0) {
   12887 		aprint_error_dev(sc->sc_dev,
   12888 		    "%s: failed to get semaphore\n",
   12889 		    __func__);
   12890 		wm_put_swsm_semaphore(sc);
   12891 	}
   12892 
   12893 	return rv;
   12894 }
   12895 
   12896 static void
   12897 wm_put_nvm_82571(struct wm_softc *sc)
   12898 {
   12899 
   12900 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12901 		device_xname(sc->sc_dev), __func__));
   12902 
   12903 	switch (sc->sc_type) {
   12904 	case WM_T_82573:
   12905 		break;
   12906 	default:
   12907 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   12908 			wm_put_eecd(sc);
   12909 		break;
   12910 	}
   12911 
   12912 	wm_put_swsm_semaphore(sc);
   12913 }
   12914 
   12915 static int
   12916 wm_get_phy_82575(struct wm_softc *sc)
   12917 {
   12918 
   12919 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12920 		device_xname(sc->sc_dev), __func__));
   12921 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   12922 }
   12923 
   12924 static void
   12925 wm_put_phy_82575(struct wm_softc *sc)
   12926 {
   12927 
   12928 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12929 		device_xname(sc->sc_dev), __func__));
   12930 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   12931 }
   12932 
   12933 static int
   12934 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   12935 {
   12936 	uint32_t ext_ctrl;
   12937 	int timeout = 200;
   12938 
   12939 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12940 		device_xname(sc->sc_dev), __func__));
   12941 
   12942 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12943 	for (timeout = 0; timeout < 200; timeout++) {
   12944 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12945 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12946 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12947 
   12948 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12949 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   12950 			return 0;
   12951 		delay(5000);
   12952 	}
   12953 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   12954 	    device_xname(sc->sc_dev), ext_ctrl);
   12955 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12956 	return 1;
   12957 }
   12958 
   12959 static void
   12960 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   12961 {
   12962 	uint32_t ext_ctrl;
   12963 
   12964 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12965 		device_xname(sc->sc_dev), __func__));
   12966 
   12967 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12968 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12969 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12970 
   12971 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12972 }
   12973 
   12974 static int
   12975 wm_get_swflag_ich8lan(struct wm_softc *sc)
   12976 {
   12977 	uint32_t ext_ctrl;
   12978 	int timeout;
   12979 
   12980 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12981 		device_xname(sc->sc_dev), __func__));
   12982 	mutex_enter(sc->sc_ich_phymtx);
   12983 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   12984 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12985 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   12986 			break;
   12987 		delay(1000);
   12988 	}
   12989 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   12990 		printf("%s: SW has already locked the resource\n",
   12991 		    device_xname(sc->sc_dev));
   12992 		goto out;
   12993 	}
   12994 
   12995 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12996 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12997 	for (timeout = 0; timeout < 1000; timeout++) {
   12998 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12999 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   13000 			break;
   13001 		delay(1000);
   13002 	}
   13003 	if (timeout >= 1000) {
   13004 		printf("%s: failed to acquire semaphore\n",
   13005 		    device_xname(sc->sc_dev));
   13006 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13007 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13008 		goto out;
   13009 	}
   13010 	return 0;
   13011 
   13012 out:
   13013 	mutex_exit(sc->sc_ich_phymtx);
   13014 	return 1;
   13015 }
   13016 
   13017 static void
   13018 wm_put_swflag_ich8lan(struct wm_softc *sc)
   13019 {
   13020 	uint32_t ext_ctrl;
   13021 
   13022 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13023 		device_xname(sc->sc_dev), __func__));
   13024 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13025 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   13026 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13027 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13028 	} else {
   13029 		printf("%s: Semaphore unexpectedly released\n",
   13030 		    device_xname(sc->sc_dev));
   13031 	}
   13032 
   13033 	mutex_exit(sc->sc_ich_phymtx);
   13034 }
   13035 
   13036 static int
   13037 wm_get_nvm_ich8lan(struct wm_softc *sc)
   13038 {
   13039 
   13040 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13041 		device_xname(sc->sc_dev), __func__));
   13042 	mutex_enter(sc->sc_ich_nvmmtx);
   13043 
   13044 	return 0;
   13045 }
   13046 
   13047 static void
   13048 wm_put_nvm_ich8lan(struct wm_softc *sc)
   13049 {
   13050 
   13051 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13052 		device_xname(sc->sc_dev), __func__));
   13053 	mutex_exit(sc->sc_ich_nvmmtx);
   13054 }
   13055 
   13056 static int
   13057 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   13058 {
   13059 	int i = 0;
   13060 	uint32_t reg;
   13061 
   13062 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13063 		device_xname(sc->sc_dev), __func__));
   13064 
   13065 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13066 	do {
   13067 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   13068 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   13069 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13070 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   13071 			break;
   13072 		delay(2*1000);
   13073 		i++;
   13074 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   13075 
   13076 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   13077 		wm_put_hw_semaphore_82573(sc);
   13078 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   13079 		    device_xname(sc->sc_dev));
   13080 		return -1;
   13081 	}
   13082 
   13083 	return 0;
   13084 }
   13085 
   13086 static void
   13087 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   13088 {
   13089 	uint32_t reg;
   13090 
   13091 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13092 		device_xname(sc->sc_dev), __func__));
   13093 
   13094 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13095 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13096 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   13097 }
   13098 
   13099 /*
   13100  * Management mode and power management related subroutines.
   13101  * BMC, AMT, suspend/resume and EEE.
   13102  */
   13103 
   13104 #ifdef WM_WOL
   13105 static int
   13106 wm_check_mng_mode(struct wm_softc *sc)
   13107 {
   13108 	int rv;
   13109 
   13110 	switch (sc->sc_type) {
   13111 	case WM_T_ICH8:
   13112 	case WM_T_ICH9:
   13113 	case WM_T_ICH10:
   13114 	case WM_T_PCH:
   13115 	case WM_T_PCH2:
   13116 	case WM_T_PCH_LPT:
   13117 	case WM_T_PCH_SPT:
   13118 		rv = wm_check_mng_mode_ich8lan(sc);
   13119 		break;
   13120 	case WM_T_82574:
   13121 	case WM_T_82583:
   13122 		rv = wm_check_mng_mode_82574(sc);
   13123 		break;
   13124 	case WM_T_82571:
   13125 	case WM_T_82572:
   13126 	case WM_T_82573:
   13127 	case WM_T_80003:
   13128 		rv = wm_check_mng_mode_generic(sc);
   13129 		break;
   13130 	default:
   13131 		/* noting to do */
   13132 		rv = 0;
   13133 		break;
   13134 	}
   13135 
   13136 	return rv;
   13137 }
   13138 
   13139 static int
   13140 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   13141 {
   13142 	uint32_t fwsm;
   13143 
   13144 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13145 
   13146 	if (((fwsm & FWSM_FW_VALID) != 0)
   13147 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   13148 		return 1;
   13149 
   13150 	return 0;
   13151 }
   13152 
   13153 static int
   13154 wm_check_mng_mode_82574(struct wm_softc *sc)
   13155 {
   13156 	uint16_t data;
   13157 
   13158 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   13159 
   13160 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   13161 		return 1;
   13162 
   13163 	return 0;
   13164 }
   13165 
   13166 static int
   13167 wm_check_mng_mode_generic(struct wm_softc *sc)
   13168 {
   13169 	uint32_t fwsm;
   13170 
   13171 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13172 
   13173 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   13174 		return 1;
   13175 
   13176 	return 0;
   13177 }
   13178 #endif /* WM_WOL */
   13179 
   13180 static int
   13181 wm_enable_mng_pass_thru(struct wm_softc *sc)
   13182 {
   13183 	uint32_t manc, fwsm, factps;
   13184 
   13185 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   13186 		return 0;
   13187 
   13188 	manc = CSR_READ(sc, WMREG_MANC);
   13189 
   13190 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   13191 		device_xname(sc->sc_dev), manc));
   13192 	if ((manc & MANC_RECV_TCO_EN) == 0)
   13193 		return 0;
   13194 
   13195 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   13196 		fwsm = CSR_READ(sc, WMREG_FWSM);
   13197 		factps = CSR_READ(sc, WMREG_FACTPS);
   13198 		if (((factps & FACTPS_MNGCG) == 0)
   13199 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   13200 			return 1;
   13201 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   13202 		uint16_t data;
   13203 
   13204 		factps = CSR_READ(sc, WMREG_FACTPS);
   13205 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   13206 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   13207 			device_xname(sc->sc_dev), factps, data));
   13208 		if (((factps & FACTPS_MNGCG) == 0)
   13209 		    && ((data & NVM_CFG2_MNGM_MASK)
   13210 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   13211 			return 1;
   13212 	} else if (((manc & MANC_SMBUS_EN) != 0)
   13213 	    && ((manc & MANC_ASF_EN) == 0))
   13214 		return 1;
   13215 
   13216 	return 0;
   13217 }
   13218 
   13219 static bool
   13220 wm_phy_resetisblocked(struct wm_softc *sc)
   13221 {
   13222 	bool blocked = false;
   13223 	uint32_t reg;
   13224 	int i = 0;
   13225 
   13226 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13227 		device_xname(sc->sc_dev), __func__));
   13228 
   13229 	switch (sc->sc_type) {
   13230 	case WM_T_ICH8:
   13231 	case WM_T_ICH9:
   13232 	case WM_T_ICH10:
   13233 	case WM_T_PCH:
   13234 	case WM_T_PCH2:
   13235 	case WM_T_PCH_LPT:
   13236 	case WM_T_PCH_SPT:
   13237 		do {
   13238 			reg = CSR_READ(sc, WMREG_FWSM);
   13239 			if ((reg & FWSM_RSPCIPHY) == 0) {
   13240 				blocked = true;
   13241 				delay(10*1000);
   13242 				continue;
   13243 			}
   13244 			blocked = false;
   13245 		} while (blocked && (i++ < 30));
   13246 		return blocked;
   13247 		break;
   13248 	case WM_T_82571:
   13249 	case WM_T_82572:
   13250 	case WM_T_82573:
   13251 	case WM_T_82574:
   13252 	case WM_T_82583:
   13253 	case WM_T_80003:
   13254 		reg = CSR_READ(sc, WMREG_MANC);
   13255 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   13256 			return true;
   13257 		else
   13258 			return false;
   13259 		break;
   13260 	default:
   13261 		/* no problem */
   13262 		break;
   13263 	}
   13264 
   13265 	return false;
   13266 }
   13267 
   13268 static void
   13269 wm_get_hw_control(struct wm_softc *sc)
   13270 {
   13271 	uint32_t reg;
   13272 
   13273 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13274 		device_xname(sc->sc_dev), __func__));
   13275 
   13276 	if (sc->sc_type == WM_T_82573) {
   13277 		reg = CSR_READ(sc, WMREG_SWSM);
   13278 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   13279 	} else if (sc->sc_type >= WM_T_82571) {
   13280 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13281 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   13282 	}
   13283 }
   13284 
   13285 static void
   13286 wm_release_hw_control(struct wm_softc *sc)
   13287 {
   13288 	uint32_t reg;
   13289 
   13290 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13291 		device_xname(sc->sc_dev), __func__));
   13292 
   13293 	if (sc->sc_type == WM_T_82573) {
   13294 		reg = CSR_READ(sc, WMREG_SWSM);
   13295 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   13296 	} else if (sc->sc_type >= WM_T_82571) {
   13297 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13298 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   13299 	}
   13300 }
   13301 
   13302 static void
   13303 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   13304 {
   13305 	uint32_t reg;
   13306 
   13307 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13308 		device_xname(sc->sc_dev), __func__));
   13309 
   13310 	if (sc->sc_type < WM_T_PCH2)
   13311 		return;
   13312 
   13313 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13314 
   13315 	if (gate)
   13316 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   13317 	else
   13318 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   13319 
   13320 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   13321 }
   13322 
   13323 static void
   13324 wm_smbustopci(struct wm_softc *sc)
   13325 {
   13326 	uint32_t fwsm, reg;
   13327 	int rv = 0;
   13328 
   13329 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13330 		device_xname(sc->sc_dev), __func__));
   13331 
   13332 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   13333 	wm_gate_hw_phy_config_ich8lan(sc, true);
   13334 
   13335 	/* Disable ULP */
   13336 	wm_ulp_disable(sc);
   13337 
   13338 	/* Acquire PHY semaphore */
   13339 	sc->phy.acquire(sc);
   13340 
   13341 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13342 	switch (sc->sc_type) {
   13343 	case WM_T_PCH_LPT:
   13344 	case WM_T_PCH_SPT:
   13345 		if (wm_phy_is_accessible_pchlan(sc))
   13346 			break;
   13347 
   13348 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13349 		reg |= CTRL_EXT_FORCE_SMBUS;
   13350 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13351 #if 0
   13352 		/* XXX Isn't this required??? */
   13353 		CSR_WRITE_FLUSH(sc);
   13354 #endif
   13355 		delay(50 * 1000);
   13356 		/* FALLTHROUGH */
   13357 	case WM_T_PCH2:
   13358 		if (wm_phy_is_accessible_pchlan(sc) == true)
   13359 			break;
   13360 		/* FALLTHROUGH */
   13361 	case WM_T_PCH:
   13362 		if (sc->sc_type == WM_T_PCH)
   13363 			if ((fwsm & FWSM_FW_VALID) != 0)
   13364 				break;
   13365 
   13366 		if (wm_phy_resetisblocked(sc) == true) {
   13367 			printf("XXX reset is blocked(3)\n");
   13368 			break;
   13369 		}
   13370 
   13371 		wm_toggle_lanphypc_pch_lpt(sc);
   13372 
   13373 		if (sc->sc_type >= WM_T_PCH_LPT) {
   13374 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13375 				break;
   13376 
   13377 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13378 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   13379 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13380 
   13381 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13382 				break;
   13383 			rv = -1;
   13384 		}
   13385 		break;
   13386 	default:
   13387 		break;
   13388 	}
   13389 
   13390 	/* Release semaphore */
   13391 	sc->phy.release(sc);
   13392 
   13393 	if (rv == 0) {
   13394 		if (wm_phy_resetisblocked(sc)) {
   13395 			printf("XXX reset is blocked(4)\n");
   13396 			goto out;
   13397 		}
   13398 		wm_reset_phy(sc);
   13399 		if (wm_phy_resetisblocked(sc))
   13400 			printf("XXX reset is blocked(4)\n");
   13401 	}
   13402 
   13403 out:
   13404 	/*
   13405 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   13406 	 */
   13407 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   13408 		delay(10*1000);
   13409 		wm_gate_hw_phy_config_ich8lan(sc, false);
   13410 	}
   13411 }
   13412 
   13413 static void
   13414 wm_init_manageability(struct wm_softc *sc)
   13415 {
   13416 
   13417 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13418 		device_xname(sc->sc_dev), __func__));
   13419 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13420 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   13421 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13422 
   13423 		/* Disable hardware interception of ARP */
   13424 		manc &= ~MANC_ARP_EN;
   13425 
   13426 		/* Enable receiving management packets to the host */
   13427 		if (sc->sc_type >= WM_T_82571) {
   13428 			manc |= MANC_EN_MNG2HOST;
   13429 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   13430 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   13431 		}
   13432 
   13433 		CSR_WRITE(sc, WMREG_MANC, manc);
   13434 	}
   13435 }
   13436 
   13437 static void
   13438 wm_release_manageability(struct wm_softc *sc)
   13439 {
   13440 
   13441 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13442 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13443 
   13444 		manc |= MANC_ARP_EN;
   13445 		if (sc->sc_type >= WM_T_82571)
   13446 			manc &= ~MANC_EN_MNG2HOST;
   13447 
   13448 		CSR_WRITE(sc, WMREG_MANC, manc);
   13449 	}
   13450 }
   13451 
   13452 static void
   13453 wm_get_wakeup(struct wm_softc *sc)
   13454 {
   13455 
   13456 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   13457 	switch (sc->sc_type) {
   13458 	case WM_T_82573:
   13459 	case WM_T_82583:
   13460 		sc->sc_flags |= WM_F_HAS_AMT;
   13461 		/* FALLTHROUGH */
   13462 	case WM_T_80003:
   13463 	case WM_T_82575:
   13464 	case WM_T_82576:
   13465 	case WM_T_82580:
   13466 	case WM_T_I350:
   13467 	case WM_T_I354:
   13468 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   13469 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   13470 		/* FALLTHROUGH */
   13471 	case WM_T_82541:
   13472 	case WM_T_82541_2:
   13473 	case WM_T_82547:
   13474 	case WM_T_82547_2:
   13475 	case WM_T_82571:
   13476 	case WM_T_82572:
   13477 	case WM_T_82574:
   13478 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13479 		break;
   13480 	case WM_T_ICH8:
   13481 	case WM_T_ICH9:
   13482 	case WM_T_ICH10:
   13483 	case WM_T_PCH:
   13484 	case WM_T_PCH2:
   13485 	case WM_T_PCH_LPT:
   13486 	case WM_T_PCH_SPT:
   13487 		sc->sc_flags |= WM_F_HAS_AMT;
   13488 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13489 		break;
   13490 	default:
   13491 		break;
   13492 	}
   13493 
   13494 	/* 1: HAS_MANAGE */
   13495 	if (wm_enable_mng_pass_thru(sc) != 0)
   13496 		sc->sc_flags |= WM_F_HAS_MANAGE;
   13497 
   13498 	/*
   13499 	 * Note that the WOL flags is set after the resetting of the eeprom
   13500 	 * stuff
   13501 	 */
   13502 }
   13503 
   13504 /*
   13505  * Unconfigure Ultra Low Power mode.
   13506  * Only for I217 and newer (see below).
   13507  */
   13508 static void
   13509 wm_ulp_disable(struct wm_softc *sc)
   13510 {
   13511 	uint32_t reg;
   13512 	int i = 0;
   13513 
   13514 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13515 		device_xname(sc->sc_dev), __func__));
   13516 	/* Exclude old devices */
   13517 	if ((sc->sc_type < WM_T_PCH_LPT)
   13518 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   13519 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   13520 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   13521 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   13522 		return;
   13523 
   13524 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   13525 		/* Request ME un-configure ULP mode in the PHY */
   13526 		reg = CSR_READ(sc, WMREG_H2ME);
   13527 		reg &= ~H2ME_ULP;
   13528 		reg |= H2ME_ENFORCE_SETTINGS;
   13529 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13530 
   13531 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   13532 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   13533 			if (i++ == 30) {
   13534 				printf("%s timed out\n", __func__);
   13535 				return;
   13536 			}
   13537 			delay(10 * 1000);
   13538 		}
   13539 		reg = CSR_READ(sc, WMREG_H2ME);
   13540 		reg &= ~H2ME_ENFORCE_SETTINGS;
   13541 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13542 
   13543 		return;
   13544 	}
   13545 
   13546 	/* Acquire semaphore */
   13547 	sc->phy.acquire(sc);
   13548 
   13549 	/* Toggle LANPHYPC */
   13550 	wm_toggle_lanphypc_pch_lpt(sc);
   13551 
   13552 	/* Unforce SMBus mode in PHY */
   13553 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13554 	if (reg == 0x0000 || reg == 0xffff) {
   13555 		uint32_t reg2;
   13556 
   13557 		printf("%s: Force SMBus first.\n", __func__);
   13558 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   13559 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   13560 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   13561 		delay(50 * 1000);
   13562 
   13563 		reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13564 	}
   13565 	reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13566 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg);
   13567 
   13568 	/* Unforce SMBus mode in MAC */
   13569 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13570 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   13571 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13572 
   13573 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL);
   13574 	reg |= HV_PM_CTRL_K1_ENA;
   13575 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg);
   13576 
   13577 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1);
   13578 	reg &= ~(I218_ULP_CONFIG1_IND
   13579 	    | I218_ULP_CONFIG1_STICKY_ULP
   13580 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   13581 	    | I218_ULP_CONFIG1_WOL_HOST
   13582 	    | I218_ULP_CONFIG1_INBAND_EXIT
   13583 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   13584 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   13585 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   13586 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13587 	reg |= I218_ULP_CONFIG1_START;
   13588 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13589 
   13590 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   13591 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   13592 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   13593 
   13594 	/* Release semaphore */
   13595 	sc->phy.release(sc);
   13596 	wm_gmii_reset(sc);
   13597 	delay(50 * 1000);
   13598 }
   13599 
   13600 /* WOL in the newer chipset interfaces (pchlan) */
   13601 static void
   13602 wm_enable_phy_wakeup(struct wm_softc *sc)
   13603 {
   13604 #if 0
   13605 	uint16_t preg;
   13606 
   13607 	/* Copy MAC RARs to PHY RARs */
   13608 
   13609 	/* Copy MAC MTA to PHY MTA */
   13610 
   13611 	/* Configure PHY Rx Control register */
   13612 
   13613 	/* Enable PHY wakeup in MAC register */
   13614 
   13615 	/* Configure and enable PHY wakeup in PHY registers */
   13616 
   13617 	/* Activate PHY wakeup */
   13618 
   13619 	/* XXX */
   13620 #endif
   13621 }
   13622 
   13623 /* Power down workaround on D3 */
   13624 static void
   13625 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   13626 {
   13627 	uint32_t reg;
   13628 	int i;
   13629 
   13630 	for (i = 0; i < 2; i++) {
   13631 		/* Disable link */
   13632 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13633 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13634 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13635 
   13636 		/*
   13637 		 * Call gig speed drop workaround on Gig disable before
   13638 		 * accessing any PHY registers
   13639 		 */
   13640 		if (sc->sc_type == WM_T_ICH8)
   13641 			wm_gig_downshift_workaround_ich8lan(sc);
   13642 
   13643 		/* Write VR power-down enable */
   13644 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13645 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13646 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   13647 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   13648 
   13649 		/* Read it back and test */
   13650 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13651 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13652 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   13653 			break;
   13654 
   13655 		/* Issue PHY reset and repeat at most one more time */
   13656 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   13657 	}
   13658 }
   13659 
   13660 static void
   13661 wm_enable_wakeup(struct wm_softc *sc)
   13662 {
   13663 	uint32_t reg, pmreg;
   13664 	pcireg_t pmode;
   13665 
   13666 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13667 		device_xname(sc->sc_dev), __func__));
   13668 
   13669 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   13670 		&pmreg, NULL) == 0)
   13671 		return;
   13672 
   13673 	/* Advertise the wakeup capability */
   13674 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   13675 	    | CTRL_SWDPIN(3));
   13676 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   13677 
   13678 	/* ICH workaround */
   13679 	switch (sc->sc_type) {
   13680 	case WM_T_ICH8:
   13681 	case WM_T_ICH9:
   13682 	case WM_T_ICH10:
   13683 	case WM_T_PCH:
   13684 	case WM_T_PCH2:
   13685 	case WM_T_PCH_LPT:
   13686 	case WM_T_PCH_SPT:
   13687 		/* Disable gig during WOL */
   13688 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13689 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   13690 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13691 		if (sc->sc_type == WM_T_PCH)
   13692 			wm_gmii_reset(sc);
   13693 
   13694 		/* Power down workaround */
   13695 		if (sc->sc_phytype == WMPHY_82577) {
   13696 			struct mii_softc *child;
   13697 
   13698 			/* Assume that the PHY is copper */
   13699 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13700 			if ((child != NULL) && (child->mii_mpd_rev <= 2))
   13701 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   13702 				    (768 << 5) | 25, 0x0444); /* magic num */
   13703 		}
   13704 		break;
   13705 	default:
   13706 		break;
   13707 	}
   13708 
   13709 	/* Keep the laser running on fiber adapters */
   13710 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   13711 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   13712 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13713 		reg |= CTRL_EXT_SWDPIN(3);
   13714 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13715 	}
   13716 
   13717 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   13718 #if 0	/* for the multicast packet */
   13719 	reg |= WUFC_MC;
   13720 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   13721 #endif
   13722 
   13723 	if (sc->sc_type >= WM_T_PCH)
   13724 		wm_enable_phy_wakeup(sc);
   13725 	else {
   13726 		CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
   13727 		CSR_WRITE(sc, WMREG_WUFC, reg);
   13728 	}
   13729 
   13730 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13731 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13732 		|| (sc->sc_type == WM_T_PCH2))
   13733 		    && (sc->sc_phytype == WMPHY_IGP_3))
   13734 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   13735 
   13736 	/* Request PME */
   13737 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   13738 #if 0
   13739 	/* Disable WOL */
   13740 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   13741 #else
   13742 	/* For WOL */
   13743 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   13744 #endif
   13745 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   13746 }
   13747 
   13748 /* LPLU */
   13749 
   13750 static void
   13751 wm_lplu_d0_disable(struct wm_softc *sc)
   13752 {
   13753 	struct mii_data *mii = &sc->sc_mii;
   13754 	uint32_t reg;
   13755 
   13756 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13757 		device_xname(sc->sc_dev), __func__));
   13758 
   13759 	if (sc->sc_phytype == WMPHY_IFE)
   13760 		return;
   13761 
   13762 	switch (sc->sc_type) {
   13763 	case WM_T_82571:
   13764 	case WM_T_82572:
   13765 	case WM_T_82573:
   13766 	case WM_T_82575:
   13767 	case WM_T_82576:
   13768 		reg = mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT);
   13769 		reg &= ~PMR_D0_LPLU;
   13770 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, reg);
   13771 		break;
   13772 	case WM_T_82580:
   13773 	case WM_T_I350:
   13774 	case WM_T_I210:
   13775 	case WM_T_I211:
   13776 		reg = CSR_READ(sc, WMREG_PHPM);
   13777 		reg &= ~PHPM_D0A_LPLU;
   13778 		CSR_WRITE(sc, WMREG_PHPM, reg);
   13779 		break;
   13780 	case WM_T_82574:
   13781 	case WM_T_82583:
   13782 	case WM_T_ICH8:
   13783 	case WM_T_ICH9:
   13784 	case WM_T_ICH10:
   13785 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13786 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   13787 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13788 		CSR_WRITE_FLUSH(sc);
   13789 		break;
   13790 	case WM_T_PCH:
   13791 	case WM_T_PCH2:
   13792 	case WM_T_PCH_LPT:
   13793 	case WM_T_PCH_SPT:
   13794 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   13795 		reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   13796 		if (wm_phy_resetisblocked(sc) == false)
   13797 			reg |= HV_OEM_BITS_ANEGNOW;
   13798 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   13799 		break;
   13800 	default:
   13801 		break;
   13802 	}
   13803 }
   13804 
   13805 /* EEE */
   13806 
   13807 static void
   13808 wm_set_eee_i350(struct wm_softc *sc)
   13809 {
   13810 	uint32_t ipcnfg, eeer;
   13811 
   13812 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   13813 	eeer = CSR_READ(sc, WMREG_EEER);
   13814 
   13815 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   13816 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   13817 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   13818 		    | EEER_LPI_FC);
   13819 	} else {
   13820 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   13821 		ipcnfg &= ~IPCNFG_10BASE_TE;
   13822 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   13823 		    | EEER_LPI_FC);
   13824 	}
   13825 
   13826 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   13827 	CSR_WRITE(sc, WMREG_EEER, eeer);
   13828 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   13829 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   13830 }
   13831 
   13832 /*
   13833  * Workarounds (mainly PHY related).
   13834  * Basically, PHY's workarounds are in the PHY drivers.
   13835  */
   13836 
   13837 /* Work-around for 82566 Kumeran PCS lock loss */
   13838 static void
   13839 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   13840 {
   13841 	struct mii_data *mii = &sc->sc_mii;
   13842 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   13843 	int i;
   13844 	int reg;
   13845 
   13846 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13847 		device_xname(sc->sc_dev), __func__));
   13848 
   13849 	/* If the link is not up, do nothing */
   13850 	if ((status & STATUS_LU) == 0)
   13851 		return;
   13852 
   13853 	/* Nothing to do if the link is other than 1Gbps */
   13854 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   13855 		return;
   13856 
   13857 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13858 	for (i = 0; i < 10; i++) {
   13859 		/* read twice */
   13860 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   13861 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   13862 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   13863 			goto out;	/* GOOD! */
   13864 
   13865 		/* Reset the PHY */
   13866 		wm_reset_phy(sc);
   13867 		delay(5*1000);
   13868 	}
   13869 
   13870 	/* Disable GigE link negotiation */
   13871 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13872 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13873 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13874 
   13875 	/*
   13876 	 * Call gig speed drop workaround on Gig disable before accessing
   13877 	 * any PHY registers.
   13878 	 */
   13879 	wm_gig_downshift_workaround_ich8lan(sc);
   13880 
   13881 out:
   13882 	return;
   13883 }
   13884 
   13885 /* WOL from S5 stops working */
   13886 static void
   13887 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   13888 {
   13889 	uint16_t kmreg;
   13890 
   13891 	/* Only for igp3 */
   13892 	if (sc->sc_phytype == WMPHY_IGP_3) {
   13893 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   13894 			return;
   13895 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   13896 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   13897 			return;
   13898 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   13899 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   13900 	}
   13901 }
   13902 
   13903 /*
   13904  * Workaround for pch's PHYs
   13905  * XXX should be moved to new PHY driver?
   13906  */
   13907 static void
   13908 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   13909 {
   13910 
   13911 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13912 		device_xname(sc->sc_dev), __func__));
   13913 	KASSERT(sc->sc_type == WM_T_PCH);
   13914 
   13915 	if (sc->sc_phytype == WMPHY_82577)
   13916 		wm_set_mdio_slow_mode_hv(sc);
   13917 
   13918 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   13919 
   13920 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   13921 
   13922 	/* 82578 */
   13923 	if (sc->sc_phytype == WMPHY_82578) {
   13924 		struct mii_softc *child;
   13925 
   13926 		/*
   13927 		 * Return registers to default by doing a soft reset then
   13928 		 * writing 0x3140 to the control register
   13929 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   13930 		 */
   13931 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13932 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   13933 			PHY_RESET(child);
   13934 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   13935 			    0x3140);
   13936 		}
   13937 	}
   13938 
   13939 	/* Select page 0 */
   13940 	sc->phy.acquire(sc);
   13941 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   13942 	sc->phy.release(sc);
   13943 
   13944 	/*
   13945 	 * Configure the K1 Si workaround during phy reset assuming there is
   13946 	 * link so that it disables K1 if link is in 1Gbps.
   13947 	 */
   13948 	wm_k1_gig_workaround_hv(sc, 1);
   13949 }
   13950 
   13951 static void
   13952 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   13953 {
   13954 
   13955 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13956 		device_xname(sc->sc_dev), __func__));
   13957 	KASSERT(sc->sc_type == WM_T_PCH2);
   13958 
   13959 	wm_set_mdio_slow_mode_hv(sc);
   13960 }
   13961 
   13962 static int
   13963 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   13964 {
   13965 	int k1_enable = sc->sc_nvm_k1_enabled;
   13966 
   13967 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13968 		device_xname(sc->sc_dev), __func__));
   13969 
   13970 	if (sc->phy.acquire(sc) != 0)
   13971 		return -1;
   13972 
   13973 	if (link) {
   13974 		k1_enable = 0;
   13975 
   13976 		/* Link stall fix for link up */
   13977 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   13978 	} else {
   13979 		/* Link stall fix for link down */
   13980 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   13981 	}
   13982 
   13983 	wm_configure_k1_ich8lan(sc, k1_enable);
   13984 	sc->phy.release(sc);
   13985 
   13986 	return 0;
   13987 }
   13988 
   13989 static void
   13990 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   13991 {
   13992 	uint32_t reg;
   13993 
   13994 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   13995 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   13996 	    reg | HV_KMRN_MDIO_SLOW);
   13997 }
   13998 
   13999 static void
   14000 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   14001 {
   14002 	uint32_t ctrl, ctrl_ext, tmp;
   14003 	uint16_t kmreg;
   14004 	int rv;
   14005 
   14006 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   14007 	if (rv != 0)
   14008 		return;
   14009 
   14010 	if (k1_enable)
   14011 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   14012 	else
   14013 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   14014 
   14015 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   14016 	if (rv != 0)
   14017 		return;
   14018 
   14019 	delay(20);
   14020 
   14021 	ctrl = CSR_READ(sc, WMREG_CTRL);
   14022 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   14023 
   14024 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   14025 	tmp |= CTRL_FRCSPD;
   14026 
   14027 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   14028 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   14029 	CSR_WRITE_FLUSH(sc);
   14030 	delay(20);
   14031 
   14032 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   14033 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   14034 	CSR_WRITE_FLUSH(sc);
   14035 	delay(20);
   14036 
   14037 	return;
   14038 }
   14039 
   14040 /* special case - for 82575 - need to do manual init ... */
   14041 static void
   14042 wm_reset_init_script_82575(struct wm_softc *sc)
   14043 {
   14044 	/*
   14045 	 * remark: this is untested code - we have no board without EEPROM
   14046 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   14047 	 */
   14048 
   14049 	/* SerDes configuration via SERDESCTRL */
   14050 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   14051 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   14052 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   14053 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   14054 
   14055 	/* CCM configuration via CCMCTL register */
   14056 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   14057 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   14058 
   14059 	/* PCIe lanes configuration */
   14060 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   14061 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   14062 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   14063 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   14064 
   14065 	/* PCIe PLL Configuration */
   14066 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   14067 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   14068 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   14069 }
   14070 
   14071 static void
   14072 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   14073 {
   14074 	uint32_t reg;
   14075 	uint16_t nvmword;
   14076 	int rv;
   14077 
   14078 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   14079 		return;
   14080 
   14081 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   14082 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   14083 	if (rv != 0) {
   14084 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   14085 		    __func__);
   14086 		return;
   14087 	}
   14088 
   14089 	reg = CSR_READ(sc, WMREG_MDICNFG);
   14090 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   14091 		reg |= MDICNFG_DEST;
   14092 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   14093 		reg |= MDICNFG_COM_MDIO;
   14094 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   14095 }
   14096 
   14097 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   14098 
   14099 static bool
   14100 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   14101 {
   14102 	int i;
   14103 	uint32_t reg;
   14104 	uint16_t id1, id2;
   14105 
   14106 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14107 		device_xname(sc->sc_dev), __func__));
   14108 	id1 = id2 = 0xffff;
   14109 	for (i = 0; i < 2; i++) {
   14110 		id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1);
   14111 		if (MII_INVALIDID(id1))
   14112 			continue;
   14113 		id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2);
   14114 		if (MII_INVALIDID(id2))
   14115 			continue;
   14116 		break;
   14117 	}
   14118 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) {
   14119 		goto out;
   14120 	}
   14121 
   14122 	if (sc->sc_type < WM_T_PCH_LPT) {
   14123 		sc->phy.release(sc);
   14124 		wm_set_mdio_slow_mode_hv(sc);
   14125 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
   14126 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
   14127 		sc->phy.acquire(sc);
   14128 	}
   14129 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   14130 		printf("XXX return with false\n");
   14131 		return false;
   14132 	}
   14133 out:
   14134 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   14135 		/* Only unforce SMBus if ME is not active */
   14136 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   14137 			/* Unforce SMBus mode in PHY */
   14138 			reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   14139 			    CV_SMB_CTRL);
   14140 			reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   14141 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   14142 			    CV_SMB_CTRL, reg);
   14143 
   14144 			/* Unforce SMBus mode in MAC */
   14145 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14146 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   14147 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14148 		}
   14149 	}
   14150 	return true;
   14151 }
   14152 
   14153 static void
   14154 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   14155 {
   14156 	uint32_t reg;
   14157 	int i;
   14158 
   14159 	/* Set PHY Config Counter to 50msec */
   14160 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   14161 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   14162 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   14163 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   14164 
   14165 	/* Toggle LANPHYPC */
   14166 	reg = CSR_READ(sc, WMREG_CTRL);
   14167 	reg |= CTRL_LANPHYPC_OVERRIDE;
   14168 	reg &= ~CTRL_LANPHYPC_VALUE;
   14169 	CSR_WRITE(sc, WMREG_CTRL, reg);
   14170 	CSR_WRITE_FLUSH(sc);
   14171 	delay(1000);
   14172 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   14173 	CSR_WRITE(sc, WMREG_CTRL, reg);
   14174 	CSR_WRITE_FLUSH(sc);
   14175 
   14176 	if (sc->sc_type < WM_T_PCH_LPT)
   14177 		delay(50 * 1000);
   14178 	else {
   14179 		i = 20;
   14180 
   14181 		do {
   14182 			delay(5 * 1000);
   14183 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   14184 		    && i--);
   14185 
   14186 		delay(30 * 1000);
   14187 	}
   14188 }
   14189 
   14190 static int
   14191 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   14192 {
   14193 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   14194 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   14195 	uint32_t rxa;
   14196 	uint16_t scale = 0, lat_enc = 0;
   14197 	int32_t obff_hwm = 0;
   14198 	int64_t lat_ns, value;
   14199 
   14200 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14201 		device_xname(sc->sc_dev), __func__));
   14202 
   14203 	if (link) {
   14204 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   14205 		uint32_t status;
   14206 		uint16_t speed;
   14207 		pcireg_t preg;
   14208 
   14209 		status = CSR_READ(sc, WMREG_STATUS);
   14210 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   14211 		case STATUS_SPEED_10:
   14212 			speed = 10;
   14213 			break;
   14214 		case STATUS_SPEED_100:
   14215 			speed = 100;
   14216 			break;
   14217 		case STATUS_SPEED_1000:
   14218 			speed = 1000;
   14219 			break;
   14220 		default:
   14221 			device_printf(sc->sc_dev, "Unknown speed "
   14222 			    "(status = %08x)\n", status);
   14223 			return -1;
   14224 		}
   14225 
   14226 		/* Rx Packet Buffer Allocation size (KB) */
   14227 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   14228 
   14229 		/*
   14230 		 * Determine the maximum latency tolerated by the device.
   14231 		 *
   14232 		 * Per the PCIe spec, the tolerated latencies are encoded as
   14233 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   14234 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   14235 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   14236 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   14237 		 */
   14238 		lat_ns = ((int64_t)rxa * 1024 -
   14239 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   14240 			+ ETHER_HDR_LEN))) * 8 * 1000;
   14241 		if (lat_ns < 0)
   14242 			lat_ns = 0;
   14243 		else
   14244 			lat_ns /= speed;
   14245 		value = lat_ns;
   14246 
   14247 		while (value > LTRV_VALUE) {
   14248 			scale ++;
   14249 			value = howmany(value, __BIT(5));
   14250 		}
   14251 		if (scale > LTRV_SCALE_MAX) {
   14252 			printf("%s: Invalid LTR latency scale %d\n",
   14253 			    device_xname(sc->sc_dev), scale);
   14254 			return -1;
   14255 		}
   14256 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   14257 
   14258 		/* Determine the maximum latency tolerated by the platform */
   14259 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14260 		    WM_PCI_LTR_CAP_LPT);
   14261 		max_snoop = preg & 0xffff;
   14262 		max_nosnoop = preg >> 16;
   14263 
   14264 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   14265 
   14266 		if (lat_enc > max_ltr_enc) {
   14267 			lat_enc = max_ltr_enc;
   14268 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   14269 			    * PCI_LTR_SCALETONS(
   14270 				    __SHIFTOUT(lat_enc,
   14271 					PCI_LTR_MAXSNOOPLAT_SCALE));
   14272 		}
   14273 
   14274 		if (lat_ns) {
   14275 			lat_ns *= speed * 1000;
   14276 			lat_ns /= 8;
   14277 			lat_ns /= 1000000000;
   14278 			obff_hwm = (int32_t)(rxa - lat_ns);
   14279 		}
   14280 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   14281 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   14282 			    "(rxa = %d, lat_ns = %d)\n",
   14283 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   14284 			return -1;
   14285 		}
   14286 	}
   14287 	/* Snoop and No-Snoop latencies the same */
   14288 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   14289 	CSR_WRITE(sc, WMREG_LTRV, reg);
   14290 
   14291 	/* Set OBFF high water mark */
   14292 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   14293 	reg |= obff_hwm;
   14294 	CSR_WRITE(sc, WMREG_SVT, reg);
   14295 
   14296 	/* Enable OBFF */
   14297 	reg = CSR_READ(sc, WMREG_SVCR);
   14298 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   14299 	CSR_WRITE(sc, WMREG_SVCR, reg);
   14300 
   14301 	return 0;
   14302 }
   14303 
   14304 /*
   14305  * I210 Errata 25 and I211 Errata 10
   14306  * Slow System Clock.
   14307  */
   14308 static void
   14309 wm_pll_workaround_i210(struct wm_softc *sc)
   14310 {
   14311 	uint32_t mdicnfg, wuc;
   14312 	uint32_t reg;
   14313 	pcireg_t pcireg;
   14314 	uint32_t pmreg;
   14315 	uint16_t nvmword, tmp_nvmword;
   14316 	int phyval;
   14317 	bool wa_done = false;
   14318 	int i;
   14319 
   14320 	/* Save WUC and MDICNFG registers */
   14321 	wuc = CSR_READ(sc, WMREG_WUC);
   14322 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   14323 
   14324 	reg = mdicnfg & ~MDICNFG_DEST;
   14325 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   14326 
   14327 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   14328 		nvmword = INVM_DEFAULT_AL;
   14329 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   14330 
   14331 	/* Get Power Management cap offset */
   14332 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   14333 		&pmreg, NULL) == 0)
   14334 		return;
   14335 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   14336 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   14337 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   14338 
   14339 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   14340 			break; /* OK */
   14341 		}
   14342 
   14343 		wa_done = true;
   14344 		/* Directly reset the internal PHY */
   14345 		reg = CSR_READ(sc, WMREG_CTRL);
   14346 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   14347 
   14348 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14349 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   14350 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14351 
   14352 		CSR_WRITE(sc, WMREG_WUC, 0);
   14353 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   14354 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   14355 
   14356 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14357 		    pmreg + PCI_PMCSR);
   14358 		pcireg |= PCI_PMCSR_STATE_D3;
   14359 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14360 		    pmreg + PCI_PMCSR, pcireg);
   14361 		delay(1000);
   14362 		pcireg &= ~PCI_PMCSR_STATE_D3;
   14363 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14364 		    pmreg + PCI_PMCSR, pcireg);
   14365 
   14366 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   14367 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   14368 
   14369 		/* Restore WUC register */
   14370 		CSR_WRITE(sc, WMREG_WUC, wuc);
   14371 	}
   14372 
   14373 	/* Restore MDICNFG setting */
   14374 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   14375 	if (wa_done)
   14376 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   14377 }
   14378 
   14379 static void
   14380 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   14381 {
   14382 	uint32_t reg;
   14383 
   14384 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14385 		device_xname(sc->sc_dev), __func__));
   14386 	KASSERT(sc->sc_type == WM_T_PCH_SPT);
   14387 
   14388 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   14389 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   14390 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   14391 
   14392 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   14393 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   14394 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   14395 }
   14396