Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.601
      1 /*	$NetBSD: if_wm.c,v 1.601 2018/11/22 15:09:46 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet)
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  *	- Image Unique ID
     83  */
     84 
     85 #include <sys/cdefs.h>
     86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.601 2018/11/22 15:09:46 msaitoh Exp $");
     87 
     88 #ifdef _KERNEL_OPT
     89 #include "opt_net_mpsafe.h"
     90 #include "opt_if_wm.h"
     91 #endif
     92 
     93 #include <sys/param.h>
     94 #include <sys/systm.h>
     95 #include <sys/callout.h>
     96 #include <sys/mbuf.h>
     97 #include <sys/malloc.h>
     98 #include <sys/kmem.h>
     99 #include <sys/kernel.h>
    100 #include <sys/socket.h>
    101 #include <sys/ioctl.h>
    102 #include <sys/errno.h>
    103 #include <sys/device.h>
    104 #include <sys/queue.h>
    105 #include <sys/syslog.h>
    106 #include <sys/interrupt.h>
    107 #include <sys/cpu.h>
    108 #include <sys/pcq.h>
    109 
    110 #include <sys/rndsource.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <net/rss_config.h>
    120 
    121 #include <netinet/in.h>			/* XXX for struct ip */
    122 #include <netinet/in_systm.h>		/* XXX for struct ip */
    123 #include <netinet/ip.h>			/* XXX for struct ip */
    124 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    125 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    126 
    127 #include <sys/bus.h>
    128 #include <sys/intr.h>
    129 #include <machine/endian.h>
    130 
    131 #include <dev/mii/mii.h>
    132 #include <dev/mii/miivar.h>
    133 #include <dev/mii/miidevs.h>
    134 #include <dev/mii/mii_bitbang.h>
    135 #include <dev/mii/ikphyreg.h>
    136 #include <dev/mii/igphyreg.h>
    137 #include <dev/mii/igphyvar.h>
    138 #include <dev/mii/inbmphyreg.h>
    139 #include <dev/mii/ihphyreg.h>
    140 
    141 #include <dev/pci/pcireg.h>
    142 #include <dev/pci/pcivar.h>
    143 #include <dev/pci/pcidevs.h>
    144 
    145 #include <dev/pci/if_wmreg.h>
    146 #include <dev/pci/if_wmvar.h>
    147 
    148 #ifdef WM_DEBUG
    149 #define	WM_DEBUG_LINK		__BIT(0)
    150 #define	WM_DEBUG_TX		__BIT(1)
    151 #define	WM_DEBUG_RX		__BIT(2)
    152 #define	WM_DEBUG_GMII		__BIT(3)
    153 #define	WM_DEBUG_MANAGE		__BIT(4)
    154 #define	WM_DEBUG_NVM		__BIT(5)
    155 #define	WM_DEBUG_INIT		__BIT(6)
    156 #define	WM_DEBUG_LOCK		__BIT(7)
    157 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    158     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    159 
    160 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    161 #else
    162 #define	DPRINTF(x, y)	/* nothing */
    163 #endif /* WM_DEBUG */
    164 
    165 #ifdef NET_MPSAFE
    166 #define WM_MPSAFE	1
    167 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    168 #else
    169 #define CALLOUT_FLAGS	0
    170 #endif
    171 
    172 /*
    173  * This device driver's max interrupt numbers.
    174  */
    175 #define WM_MAX_NQUEUEINTR	16
    176 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    177 
    178 #ifndef WM_DISABLE_MSI
    179 #define	WM_DISABLE_MSI 0
    180 #endif
    181 #ifndef WM_DISABLE_MSIX
    182 #define	WM_DISABLE_MSIX 0
    183 #endif
    184 
    185 int wm_disable_msi = WM_DISABLE_MSI;
    186 int wm_disable_msix = WM_DISABLE_MSIX;
    187 
    188 #ifndef WM_WATCHDOG_TIMEOUT
    189 #define WM_WATCHDOG_TIMEOUT 5
    190 #endif
    191 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    192 
    193 /*
    194  * Transmit descriptor list size.  Due to errata, we can only have
    195  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    196  * on >= 82544. We tell the upper layers that they can queue a lot
    197  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    198  * of them at a time.
    199  *
    200  * We allow up to 64 DMA segments per packet.  Pathological packet
    201  * chains containing many small mbufs have been observed in zero-copy
    202  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    203  * m_defrag() is called to reduce it.
    204  */
    205 #define	WM_NTXSEGS		64
    206 #define	WM_IFQUEUELEN		256
    207 #define	WM_TXQUEUELEN_MAX	64
    208 #define	WM_TXQUEUELEN_MAX_82547	16
    209 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    210 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    211 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    212 #define	WM_NTXDESC_82542	256
    213 #define	WM_NTXDESC_82544	4096
    214 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    215 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    216 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    217 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    218 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    219 
    220 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    221 
    222 #define	WM_TXINTERQSIZE		256
    223 
    224 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    225 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    226 #endif
    227 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    228 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    229 #endif
    230 
    231 /*
    232  * Receive descriptor list size.  We have one Rx buffer for normal
    233  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    234  * packet.  We allocate 256 receive descriptors, each with a 2k
    235  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    236  */
    237 #define	WM_NRXDESC		256
    238 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    239 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    240 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    241 
    242 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    243 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    244 #endif
    245 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    246 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    247 #endif
    248 
    249 typedef union txdescs {
    250 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    251 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    252 } txdescs_t;
    253 
    254 typedef union rxdescs {
    255 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    256 	ext_rxdesc_t	  sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    257 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    258 } rxdescs_t;
    259 
    260 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    261 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    262 
    263 /*
    264  * Software state for transmit jobs.
    265  */
    266 struct wm_txsoft {
    267 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    268 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    269 	int txs_firstdesc;		/* first descriptor in packet */
    270 	int txs_lastdesc;		/* last descriptor in packet */
    271 	int txs_ndesc;			/* # of descriptors used */
    272 };
    273 
    274 /*
    275  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    276  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    277  * them together.
    278  */
    279 struct wm_rxsoft {
    280 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    281 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    282 };
    283 
    284 #define WM_LINKUP_TIMEOUT	50
    285 
    286 static uint16_t swfwphysem[] = {
    287 	SWFW_PHY0_SM,
    288 	SWFW_PHY1_SM,
    289 	SWFW_PHY2_SM,
    290 	SWFW_PHY3_SM
    291 };
    292 
    293 static const uint32_t wm_82580_rxpbs_table[] = {
    294 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    295 };
    296 
    297 struct wm_softc;
    298 
    299 #ifdef WM_EVENT_COUNTERS
    300 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    301 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    302 	struct evcnt qname##_ev_##evname;
    303 
    304 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    305 	do {								\
    306 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    307 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    308 		    "%s%02d%s", #qname, (qnum), #evname);		\
    309 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    310 		    (evtype), NULL, (xname),				\
    311 		    (q)->qname##_##evname##_evcnt_name);		\
    312 	} while (0)
    313 
    314 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    315 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    316 
    317 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    318 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    319 
    320 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    321 	evcnt_detach(&(q)->qname##_ev_##evname);
    322 #endif /* WM_EVENT_COUNTERS */
    323 
    324 struct wm_txqueue {
    325 	kmutex_t *txq_lock;		/* lock for tx operations */
    326 
    327 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    328 
    329 	/* Software state for the transmit descriptors. */
    330 	int txq_num;			/* must be a power of two */
    331 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    332 
    333 	/* TX control data structures. */
    334 	int txq_ndesc;			/* must be a power of two */
    335 	size_t txq_descsize;		/* a tx descriptor size */
    336 	txdescs_t *txq_descs_u;
    337 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    338 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    339 	int txq_desc_rseg;		/* real number of control segment */
    340 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    341 #define	txq_descs	txq_descs_u->sctxu_txdescs
    342 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    343 
    344 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    345 
    346 	int txq_free;			/* number of free Tx descriptors */
    347 	int txq_next;			/* next ready Tx descriptor */
    348 
    349 	int txq_sfree;			/* number of free Tx jobs */
    350 	int txq_snext;			/* next free Tx job */
    351 	int txq_sdirty;			/* dirty Tx jobs */
    352 
    353 	/* These 4 variables are used only on the 82547. */
    354 	int txq_fifo_size;		/* Tx FIFO size */
    355 	int txq_fifo_head;		/* current head of FIFO */
    356 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    357 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    358 
    359 	/*
    360 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    361 	 * CPUs. This queue intermediate them without block.
    362 	 */
    363 	pcq_t *txq_interq;
    364 
    365 	/*
    366 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    367 	 * to manage Tx H/W queue's busy flag.
    368 	 */
    369 	int txq_flags;			/* flags for H/W queue, see below */
    370 #define	WM_TXQ_NO_SPACE	0x1
    371 
    372 	bool txq_stopping;
    373 
    374 	bool txq_sending;
    375 	time_t txq_lastsent;
    376 
    377 	uint32_t txq_packets;		/* for AIM */
    378 	uint32_t txq_bytes;		/* for AIM */
    379 #ifdef WM_EVENT_COUNTERS
    380 	/* TX event counters */
    381 	WM_Q_EVCNT_DEFINE(txq, txsstall)    /* Stalled due to no txs */
    382 	WM_Q_EVCNT_DEFINE(txq, txdstall)    /* Stalled due to no txd */
    383 	WM_Q_EVCNT_DEFINE(txq, fifo_stall)  /* FIFO stalls (82547) */
    384 	WM_Q_EVCNT_DEFINE(txq, txdw)	    /* Tx descriptor interrupts */
    385 	WM_Q_EVCNT_DEFINE(txq, txqe)	    /* Tx queue empty interrupts */
    386 					    /* XXX not used? */
    387 
    388 	WM_Q_EVCNT_DEFINE(txq, ipsum)	    /* IP checksums comp. */
    389 	WM_Q_EVCNT_DEFINE(txq, tusum)	    /* TCP/UDP cksums comp. */
    390 	WM_Q_EVCNT_DEFINE(txq, tusum6)	    /* TCP/UDP v6 cksums comp. */
    391 	WM_Q_EVCNT_DEFINE(txq, tso)	    /* TCP seg offload (IPv4) */
    392 	WM_Q_EVCNT_DEFINE(txq, tso6)	    /* TCP seg offload (IPv6) */
    393 	WM_Q_EVCNT_DEFINE(txq, tsopain)     /* Painful header manip. for TSO */
    394 	WM_Q_EVCNT_DEFINE(txq, pcqdrop)	    /* Pkt dropped in pcq */
    395 	WM_Q_EVCNT_DEFINE(txq, descdrop)    /* Pkt dropped in MAC desc ring */
    396 					    /* other than toomanyseg */
    397 
    398 	WM_Q_EVCNT_DEFINE(txq, toomanyseg)  /* Pkt dropped(toomany DMA segs) */
    399 	WM_Q_EVCNT_DEFINE(txq, defrag)	    /* m_defrag() */
    400 	WM_Q_EVCNT_DEFINE(txq, underrun)    /* Tx underrun */
    401 
    402 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    403 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    404 #endif /* WM_EVENT_COUNTERS */
    405 };
    406 
    407 struct wm_rxqueue {
    408 	kmutex_t *rxq_lock;		/* lock for rx operations */
    409 
    410 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    411 
    412 	/* Software state for the receive descriptors. */
    413 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    414 
    415 	/* RX control data structures. */
    416 	int rxq_ndesc;			/* must be a power of two */
    417 	size_t rxq_descsize;		/* a rx descriptor size */
    418 	rxdescs_t *rxq_descs_u;
    419 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    420 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    421 	int rxq_desc_rseg;		/* real number of control segment */
    422 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    423 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    424 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    425 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    426 
    427 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    428 
    429 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    430 	int rxq_discard;
    431 	int rxq_len;
    432 	struct mbuf *rxq_head;
    433 	struct mbuf *rxq_tail;
    434 	struct mbuf **rxq_tailp;
    435 
    436 	bool rxq_stopping;
    437 
    438 	uint32_t rxq_packets;		/* for AIM */
    439 	uint32_t rxq_bytes;		/* for AIM */
    440 #ifdef WM_EVENT_COUNTERS
    441 	/* RX event counters */
    442 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    443 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    444 
    445 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    446 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    447 #endif
    448 };
    449 
    450 struct wm_queue {
    451 	int wmq_id;			/* index of TX/RX queues */
    452 	int wmq_intr_idx;		/* index of MSI-X tables */
    453 
    454 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    455 	bool wmq_set_itr;
    456 
    457 	struct wm_txqueue wmq_txq;
    458 	struct wm_rxqueue wmq_rxq;
    459 
    460 	void *wmq_si;
    461 };
    462 
    463 struct wm_phyop {
    464 	int (*acquire)(struct wm_softc *);
    465 	void (*release)(struct wm_softc *);
    466 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    467 	int (*writereg_locked)(device_t, int, int, uint16_t);
    468 	int reset_delay_us;
    469 };
    470 
    471 struct wm_nvmop {
    472 	int (*acquire)(struct wm_softc *);
    473 	void (*release)(struct wm_softc *);
    474 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    475 };
    476 
    477 /*
    478  * Software state per device.
    479  */
    480 struct wm_softc {
    481 	device_t sc_dev;		/* generic device information */
    482 	bus_space_tag_t sc_st;		/* bus space tag */
    483 	bus_space_handle_t sc_sh;	/* bus space handle */
    484 	bus_size_t sc_ss;		/* bus space size */
    485 	bus_space_tag_t sc_iot;		/* I/O space tag */
    486 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    487 	bus_size_t sc_ios;		/* I/O space size */
    488 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    489 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    490 	bus_size_t sc_flashs;		/* flash registers space size */
    491 	off_t sc_flashreg_offset;	/*
    492 					 * offset to flash registers from
    493 					 * start of BAR
    494 					 */
    495 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    496 
    497 	struct ethercom sc_ethercom;	/* ethernet common data */
    498 	struct mii_data sc_mii;		/* MII/media information */
    499 
    500 	pci_chipset_tag_t sc_pc;
    501 	pcitag_t sc_pcitag;
    502 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    503 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    504 
    505 	uint16_t sc_pcidevid;		/* PCI device ID */
    506 	wm_chip_type sc_type;		/* MAC type */
    507 	int sc_rev;			/* MAC revision */
    508 	wm_phy_type sc_phytype;		/* PHY type */
    509 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    510 #define	WM_MEDIATYPE_UNKNOWN		0x00
    511 #define	WM_MEDIATYPE_FIBER		0x01
    512 #define	WM_MEDIATYPE_COPPER		0x02
    513 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    514 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    515 	int sc_flags;			/* flags; see below */
    516 	int sc_if_flags;		/* last if_flags */
    517 	int sc_flowflags;		/* 802.3x flow control flags */
    518 	int sc_align_tweak;
    519 
    520 	void *sc_ihs[WM_MAX_NINTR];	/*
    521 					 * interrupt cookie.
    522 					 * - legacy and msi use sc_ihs[0] only
    523 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    524 					 */
    525 	pci_intr_handle_t *sc_intrs;	/*
    526 					 * legacy and msi use sc_intrs[0] only
    527 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    528 					 */
    529 	int sc_nintrs;			/* number of interrupts */
    530 
    531 	int sc_link_intr_idx;		/* index of MSI-X tables */
    532 
    533 	callout_t sc_tick_ch;		/* tick callout */
    534 	bool sc_core_stopping;
    535 
    536 	int sc_nvm_ver_major;
    537 	int sc_nvm_ver_minor;
    538 	int sc_nvm_ver_build;
    539 	int sc_nvm_addrbits;		/* NVM address bits */
    540 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    541 	int sc_ich8_flash_base;
    542 	int sc_ich8_flash_bank_size;
    543 	int sc_nvm_k1_enabled;
    544 
    545 	int sc_nqueues;
    546 	struct wm_queue *sc_queue;
    547 	u_int sc_tx_process_limit;	/* Tx processing repeat limit in softint */
    548 	u_int sc_tx_intr_process_limit;	/* Tx processing repeat limit in H/W intr */
    549 	u_int sc_rx_process_limit;	/* Rx processing repeat limit in softint */
    550 	u_int sc_rx_intr_process_limit;	/* Rx processing repeat limit in H/W intr */
    551 
    552 	int sc_affinity_offset;
    553 
    554 #ifdef WM_EVENT_COUNTERS
    555 	/* Event counters. */
    556 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    557 
    558 	/* WM_T_82542_2_1 only */
    559 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    560 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    561 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    562 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    563 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    564 #endif /* WM_EVENT_COUNTERS */
    565 
    566 	/* This variable are used only on the 82547. */
    567 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    568 
    569 	uint32_t sc_ctrl;		/* prototype CTRL register */
    570 #if 0
    571 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    572 #endif
    573 	uint32_t sc_icr;		/* prototype interrupt bits */
    574 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    575 	uint32_t sc_tctl;		/* prototype TCTL register */
    576 	uint32_t sc_rctl;		/* prototype RCTL register */
    577 	uint32_t sc_txcw;		/* prototype TXCW register */
    578 	uint32_t sc_tipg;		/* prototype TIPG register */
    579 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    580 	uint32_t sc_pba;		/* prototype PBA register */
    581 
    582 	int sc_tbi_linkup;		/* TBI link status */
    583 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    584 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    585 
    586 	int sc_mchash_type;		/* multicast filter offset */
    587 
    588 	krndsource_t rnd_source;	/* random source */
    589 
    590 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    591 
    592 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    593 	kmutex_t *sc_ich_phymtx;	/*
    594 					 * 82574/82583/ICH/PCH specific PHY
    595 					 * mutex. For 82574/82583, the mutex
    596 					 * is used for both PHY and NVM.
    597 					 */
    598 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    599 
    600 	struct wm_phyop phy;
    601 	struct wm_nvmop nvm;
    602 };
    603 
    604 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    605 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    606 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    607 
    608 #define	WM_RXCHAIN_RESET(rxq)						\
    609 do {									\
    610 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    611 	*(rxq)->rxq_tailp = NULL;					\
    612 	(rxq)->rxq_len = 0;						\
    613 } while (/*CONSTCOND*/0)
    614 
    615 #define	WM_RXCHAIN_LINK(rxq, m)						\
    616 do {									\
    617 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    618 	(rxq)->rxq_tailp = &(m)->m_next;				\
    619 } while (/*CONSTCOND*/0)
    620 
    621 #ifdef WM_EVENT_COUNTERS
    622 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    623 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    624 
    625 #define WM_Q_EVCNT_INCR(qname, evname)			\
    626 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    627 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    628 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    629 #else /* !WM_EVENT_COUNTERS */
    630 #define	WM_EVCNT_INCR(ev)	/* nothing */
    631 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    632 
    633 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    634 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    635 #endif /* !WM_EVENT_COUNTERS */
    636 
    637 #define	CSR_READ(sc, reg)						\
    638 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    639 #define	CSR_WRITE(sc, reg, val)						\
    640 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    641 #define	CSR_WRITE_FLUSH(sc)						\
    642 	(void) CSR_READ((sc), WMREG_STATUS)
    643 
    644 #define ICH8_FLASH_READ32(sc, reg)					\
    645 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    646 	    (reg) + sc->sc_flashreg_offset)
    647 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    648 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    649 	    (reg) + sc->sc_flashreg_offset, (data))
    650 
    651 #define ICH8_FLASH_READ16(sc, reg)					\
    652 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    653 	    (reg) + sc->sc_flashreg_offset)
    654 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    655 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    656 	    (reg) + sc->sc_flashreg_offset, (data))
    657 
    658 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    659 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    660 
    661 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    662 #define	WM_CDTXADDR_HI(txq, x)						\
    663 	(sizeof(bus_addr_t) == 8 ?					\
    664 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    665 
    666 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    667 #define	WM_CDRXADDR_HI(rxq, x)						\
    668 	(sizeof(bus_addr_t) == 8 ?					\
    669 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    670 
    671 /*
    672  * Register read/write functions.
    673  * Other than CSR_{READ|WRITE}().
    674  */
    675 #if 0
    676 static inline uint32_t wm_io_read(struct wm_softc *, int);
    677 #endif
    678 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    679 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    680     uint32_t, uint32_t);
    681 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    682 
    683 /*
    684  * Descriptor sync/init functions.
    685  */
    686 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    687 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    688 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    689 
    690 /*
    691  * Device driver interface functions and commonly used functions.
    692  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    693  */
    694 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    695 static int	wm_match(device_t, cfdata_t, void *);
    696 static void	wm_attach(device_t, device_t, void *);
    697 static int	wm_detach(device_t, int);
    698 static bool	wm_suspend(device_t, const pmf_qual_t *);
    699 static bool	wm_resume(device_t, const pmf_qual_t *);
    700 static void	wm_watchdog(struct ifnet *);
    701 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    702     uint16_t *);
    703 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    704     uint16_t *);
    705 static void	wm_tick(void *);
    706 static int	wm_ifflags_cb(struct ethercom *);
    707 static int	wm_ioctl(struct ifnet *, u_long, void *);
    708 /* MAC address related */
    709 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    710 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    711 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    712 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    713 static void	wm_set_filter(struct wm_softc *);
    714 /* Reset and init related */
    715 static void	wm_set_vlan(struct wm_softc *);
    716 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    717 static void	wm_get_auto_rd_done(struct wm_softc *);
    718 static void	wm_lan_init_done(struct wm_softc *);
    719 static void	wm_get_cfg_done(struct wm_softc *);
    720 static void	wm_phy_post_reset(struct wm_softc *);
    721 static int	wm_write_smbus_addr(struct wm_softc *);
    722 static void	wm_init_lcd_from_nvm(struct wm_softc *);
    723 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    724 static void	wm_initialize_hardware_bits(struct wm_softc *);
    725 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    726 static void	wm_reset_phy(struct wm_softc *);
    727 static void	wm_flush_desc_rings(struct wm_softc *);
    728 static void	wm_reset(struct wm_softc *);
    729 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    730 static void	wm_rxdrain(struct wm_rxqueue *);
    731 static void	wm_init_rss(struct wm_softc *);
    732 static void	wm_adjust_qnum(struct wm_softc *, int);
    733 static inline bool	wm_is_using_msix(struct wm_softc *);
    734 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    735 static int	wm_softint_establish(struct wm_softc *, int, int);
    736 static int	wm_setup_legacy(struct wm_softc *);
    737 static int	wm_setup_msix(struct wm_softc *);
    738 static int	wm_init(struct ifnet *);
    739 static int	wm_init_locked(struct ifnet *);
    740 static void	wm_unset_stopping_flags(struct wm_softc *);
    741 static void	wm_set_stopping_flags(struct wm_softc *);
    742 static void	wm_stop(struct ifnet *, int);
    743 static void	wm_stop_locked(struct ifnet *, int);
    744 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    745 static void	wm_82547_txfifo_stall(void *);
    746 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    747 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    748 /* DMA related */
    749 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    750 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    751 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    752 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    753     struct wm_txqueue *);
    754 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    755 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    756 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    757     struct wm_rxqueue *);
    758 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    759 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    760 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    761 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    762 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    763 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    764 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    765     struct wm_txqueue *);
    766 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    767     struct wm_rxqueue *);
    768 static int	wm_alloc_txrx_queues(struct wm_softc *);
    769 static void	wm_free_txrx_queues(struct wm_softc *);
    770 static int	wm_init_txrx_queues(struct wm_softc *);
    771 /* Start */
    772 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    773     struct wm_txsoft *, uint32_t *, uint8_t *);
    774 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    775 static void	wm_start(struct ifnet *);
    776 static void	wm_start_locked(struct ifnet *);
    777 static int	wm_transmit(struct ifnet *, struct mbuf *);
    778 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    779 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    780     bool);
    781 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    782     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    783 static void	wm_nq_start(struct ifnet *);
    784 static void	wm_nq_start_locked(struct ifnet *);
    785 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    786 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    787 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    788     bool);
    789 static void	wm_deferred_start_locked(struct wm_txqueue *);
    790 static void	wm_handle_queue(void *);
    791 /* Interrupt */
    792 static bool	wm_txeof(struct wm_txqueue *, u_int);
    793 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    794 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    795 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    796 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    797 static void	wm_linkintr(struct wm_softc *, uint32_t);
    798 static int	wm_intr_legacy(void *);
    799 static inline void	wm_txrxintr_disable(struct wm_queue *);
    800 static inline void	wm_txrxintr_enable(struct wm_queue *);
    801 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    802 static int	wm_txrxintr_msix(void *);
    803 static int	wm_linkintr_msix(void *);
    804 
    805 /*
    806  * Media related.
    807  * GMII, SGMII, TBI, SERDES and SFP.
    808  */
    809 /* Common */
    810 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    811 /* GMII related */
    812 static void	wm_gmii_reset(struct wm_softc *);
    813 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    814 static int	wm_get_phy_id_82575(struct wm_softc *);
    815 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    816 static int	wm_gmii_mediachange(struct ifnet *);
    817 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    818 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    819 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    820 static int	wm_gmii_i82543_readreg(device_t, int, int);
    821 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    822 static int	wm_gmii_mdic_readreg(device_t, int, int);
    823 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    824 static int	wm_gmii_i82544_readreg(device_t, int, int);
    825 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    826 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    827 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    828 static int	wm_gmii_i80003_readreg(device_t, int, int);
    829 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    830 static int	wm_gmii_bm_readreg(device_t, int, int);
    831 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    832 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    833 static int	wm_gmii_hv_readreg(device_t, int, int);
    834 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    835 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    836 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
    837 static int	wm_gmii_82580_readreg(device_t, int, int);
    838 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    839 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    840 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    841 static void	wm_gmii_statchg(struct ifnet *);
    842 /*
    843  * kumeran related (80003, ICH* and PCH*).
    844  * These functions are not for accessing MII registers but for accessing
    845  * kumeran specific registers.
    846  */
    847 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    848 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    849 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    850 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    851 /* SGMII */
    852 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    853 static int	wm_sgmii_readreg(device_t, int, int);
    854 static void	wm_sgmii_writereg(device_t, int, int, int);
    855 /* TBI related */
    856 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
    857 static void	wm_tbi_mediainit(struct wm_softc *);
    858 static int	wm_tbi_mediachange(struct ifnet *);
    859 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    860 static int	wm_check_for_link(struct wm_softc *);
    861 static void	wm_tbi_tick(struct wm_softc *);
    862 /* SERDES related */
    863 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    864 static int	wm_serdes_mediachange(struct ifnet *);
    865 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    866 static void	wm_serdes_tick(struct wm_softc *);
    867 /* SFP related */
    868 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    869 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    870 
    871 /*
    872  * NVM related.
    873  * Microwire, SPI (w/wo EERD) and Flash.
    874  */
    875 /* Misc functions */
    876 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    877 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    878 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    879 /* Microwire */
    880 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    881 /* SPI */
    882 static int	wm_nvm_ready_spi(struct wm_softc *);
    883 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    884 /* Using with EERD */
    885 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    886 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    887 /* Flash */
    888 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    889     unsigned int *);
    890 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    891 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    892 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    893     uint32_t *);
    894 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    895 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    896 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    897 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    898 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    899 /* iNVM */
    900 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    901 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    902 /* Lock, detecting NVM type, validate checksum and read */
    903 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    904 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
    905 static int	wm_nvm_validate_checksum(struct wm_softc *);
    906 static void	wm_nvm_version_invm(struct wm_softc *);
    907 static void	wm_nvm_version(struct wm_softc *);
    908 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    909 
    910 /*
    911  * Hardware semaphores.
    912  * Very complexed...
    913  */
    914 static int	wm_get_null(struct wm_softc *);
    915 static void	wm_put_null(struct wm_softc *);
    916 static int	wm_get_eecd(struct wm_softc *);
    917 static void	wm_put_eecd(struct wm_softc *);
    918 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    919 static void	wm_put_swsm_semaphore(struct wm_softc *);
    920 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    921 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    922 static int	wm_get_nvm_80003(struct wm_softc *);
    923 static void	wm_put_nvm_80003(struct wm_softc *);
    924 static int	wm_get_nvm_82571(struct wm_softc *);
    925 static void	wm_put_nvm_82571(struct wm_softc *);
    926 static int	wm_get_phy_82575(struct wm_softc *);
    927 static void	wm_put_phy_82575(struct wm_softc *);
    928 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    929 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    930 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    931 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    932 static int	wm_get_nvm_ich8lan(struct wm_softc *);
    933 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    934 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    935 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    936 
    937 /*
    938  * Management mode and power management related subroutines.
    939  * BMC, AMT, suspend/resume and EEE.
    940  */
    941 #if 0
    942 static int	wm_check_mng_mode(struct wm_softc *);
    943 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    944 static int	wm_check_mng_mode_82574(struct wm_softc *);
    945 static int	wm_check_mng_mode_generic(struct wm_softc *);
    946 #endif
    947 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    948 static bool	wm_phy_resetisblocked(struct wm_softc *);
    949 static void	wm_get_hw_control(struct wm_softc *);
    950 static void	wm_release_hw_control(struct wm_softc *);
    951 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    952 static void	wm_smbustopci(struct wm_softc *);
    953 static void	wm_init_manageability(struct wm_softc *);
    954 static void	wm_release_manageability(struct wm_softc *);
    955 static void	wm_get_wakeup(struct wm_softc *);
    956 static int	wm_ulp_disable(struct wm_softc *);
    957 static void	wm_enable_phy_wakeup(struct wm_softc *);
    958 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    959 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
    960 static void	wm_enable_wakeup(struct wm_softc *);
    961 static void	wm_disable_aspm(struct wm_softc *);
    962 /* LPLU (Low Power Link Up) */
    963 static void	wm_lplu_d0_disable(struct wm_softc *);
    964 /* EEE */
    965 static void	wm_set_eee_i350(struct wm_softc *);
    966 
    967 /*
    968  * Workarounds (mainly PHY related).
    969  * Basically, PHY's workarounds are in the PHY drivers.
    970  */
    971 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    972 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    973 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    974 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    975 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
    976 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    977 static int	wm_k1_workaround_lv(struct wm_softc *);
    978 static int	wm_link_stall_workaround_hv(struct wm_softc *);
    979 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    980 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    981 static void	wm_reset_init_script_82575(struct wm_softc *);
    982 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    983 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
    984 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
    985 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
    986 static void	wm_pll_workaround_i210(struct wm_softc *);
    987 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
    988 
    989 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    990     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    991 
    992 /*
    993  * Devices supported by this driver.
    994  */
    995 static const struct wm_product {
    996 	pci_vendor_id_t		wmp_vendor;
    997 	pci_product_id_t	wmp_product;
    998 	const char		*wmp_name;
    999 	wm_chip_type		wmp_type;
   1000 	uint32_t		wmp_flags;
   1001 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1002 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1003 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1004 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1005 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1006 } wm_products[] = {
   1007 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1008 	  "Intel i82542 1000BASE-X Ethernet",
   1009 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1010 
   1011 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1012 	  "Intel i82543GC 1000BASE-X Ethernet",
   1013 	  WM_T_82543,		WMP_F_FIBER },
   1014 
   1015 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1016 	  "Intel i82543GC 1000BASE-T Ethernet",
   1017 	  WM_T_82543,		WMP_F_COPPER },
   1018 
   1019 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1020 	  "Intel i82544EI 1000BASE-T Ethernet",
   1021 	  WM_T_82544,		WMP_F_COPPER },
   1022 
   1023 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1024 	  "Intel i82544EI 1000BASE-X Ethernet",
   1025 	  WM_T_82544,		WMP_F_FIBER },
   1026 
   1027 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1028 	  "Intel i82544GC 1000BASE-T Ethernet",
   1029 	  WM_T_82544,		WMP_F_COPPER },
   1030 
   1031 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1032 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1033 	  WM_T_82544,		WMP_F_COPPER },
   1034 
   1035 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1036 	  "Intel i82540EM 1000BASE-T Ethernet",
   1037 	  WM_T_82540,		WMP_F_COPPER },
   1038 
   1039 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1040 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1041 	  WM_T_82540,		WMP_F_COPPER },
   1042 
   1043 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1044 	  "Intel i82540EP 1000BASE-T Ethernet",
   1045 	  WM_T_82540,		WMP_F_COPPER },
   1046 
   1047 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1048 	  "Intel i82540EP 1000BASE-T Ethernet",
   1049 	  WM_T_82540,		WMP_F_COPPER },
   1050 
   1051 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1052 	  "Intel i82540EP 1000BASE-T Ethernet",
   1053 	  WM_T_82540,		WMP_F_COPPER },
   1054 
   1055 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1056 	  "Intel i82545EM 1000BASE-T Ethernet",
   1057 	  WM_T_82545,		WMP_F_COPPER },
   1058 
   1059 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1060 	  "Intel i82545GM 1000BASE-T Ethernet",
   1061 	  WM_T_82545_3,		WMP_F_COPPER },
   1062 
   1063 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1064 	  "Intel i82545GM 1000BASE-X Ethernet",
   1065 	  WM_T_82545_3,		WMP_F_FIBER },
   1066 
   1067 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1068 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1069 	  WM_T_82545_3,		WMP_F_SERDES },
   1070 
   1071 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1072 	  "Intel i82546EB 1000BASE-T Ethernet",
   1073 	  WM_T_82546,		WMP_F_COPPER },
   1074 
   1075 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1076 	  "Intel i82546EB 1000BASE-T Ethernet",
   1077 	  WM_T_82546,		WMP_F_COPPER },
   1078 
   1079 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1080 	  "Intel i82545EM 1000BASE-X Ethernet",
   1081 	  WM_T_82545,		WMP_F_FIBER },
   1082 
   1083 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1084 	  "Intel i82546EB 1000BASE-X Ethernet",
   1085 	  WM_T_82546,		WMP_F_FIBER },
   1086 
   1087 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1088 	  "Intel i82546GB 1000BASE-T Ethernet",
   1089 	  WM_T_82546_3,		WMP_F_COPPER },
   1090 
   1091 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1092 	  "Intel i82546GB 1000BASE-X Ethernet",
   1093 	  WM_T_82546_3,		WMP_F_FIBER },
   1094 
   1095 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1096 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1097 	  WM_T_82546_3,		WMP_F_SERDES },
   1098 
   1099 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1100 	  "i82546GB quad-port Gigabit Ethernet",
   1101 	  WM_T_82546_3,		WMP_F_COPPER },
   1102 
   1103 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1104 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1105 	  WM_T_82546_3,		WMP_F_COPPER },
   1106 
   1107 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1108 	  "Intel PRO/1000MT (82546GB)",
   1109 	  WM_T_82546_3,		WMP_F_COPPER },
   1110 
   1111 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1112 	  "Intel i82541EI 1000BASE-T Ethernet",
   1113 	  WM_T_82541,		WMP_F_COPPER },
   1114 
   1115 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1116 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1117 	  WM_T_82541,		WMP_F_COPPER },
   1118 
   1119 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1120 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1121 	  WM_T_82541,		WMP_F_COPPER },
   1122 
   1123 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1124 	  "Intel i82541ER 1000BASE-T Ethernet",
   1125 	  WM_T_82541_2,		WMP_F_COPPER },
   1126 
   1127 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1128 	  "Intel i82541GI 1000BASE-T Ethernet",
   1129 	  WM_T_82541_2,		WMP_F_COPPER },
   1130 
   1131 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1132 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1133 	  WM_T_82541_2,		WMP_F_COPPER },
   1134 
   1135 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1136 	  "Intel i82541PI 1000BASE-T Ethernet",
   1137 	  WM_T_82541_2,		WMP_F_COPPER },
   1138 
   1139 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1140 	  "Intel i82547EI 1000BASE-T Ethernet",
   1141 	  WM_T_82547,		WMP_F_COPPER },
   1142 
   1143 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1144 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1145 	  WM_T_82547,		WMP_F_COPPER },
   1146 
   1147 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1148 	  "Intel i82547GI 1000BASE-T Ethernet",
   1149 	  WM_T_82547_2,		WMP_F_COPPER },
   1150 
   1151 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1152 	  "Intel PRO/1000 PT (82571EB)",
   1153 	  WM_T_82571,		WMP_F_COPPER },
   1154 
   1155 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1156 	  "Intel PRO/1000 PF (82571EB)",
   1157 	  WM_T_82571,		WMP_F_FIBER },
   1158 
   1159 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1160 	  "Intel PRO/1000 PB (82571EB)",
   1161 	  WM_T_82571,		WMP_F_SERDES },
   1162 
   1163 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1164 	  "Intel PRO/1000 QT (82571EB)",
   1165 	  WM_T_82571,		WMP_F_COPPER },
   1166 
   1167 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1168 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1169 	  WM_T_82571,		WMP_F_COPPER, },
   1170 
   1171 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1172 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1173 	  WM_T_82571,		WMP_F_COPPER, },
   1174 
   1175 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1176 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1177 	  WM_T_82571,		WMP_F_SERDES, },
   1178 
   1179 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1180 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1181 	  WM_T_82571,		WMP_F_SERDES, },
   1182 
   1183 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1184 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1185 	  WM_T_82571,		WMP_F_FIBER, },
   1186 
   1187 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1188 	  "Intel i82572EI 1000baseT Ethernet",
   1189 	  WM_T_82572,		WMP_F_COPPER },
   1190 
   1191 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1192 	  "Intel i82572EI 1000baseX Ethernet",
   1193 	  WM_T_82572,		WMP_F_FIBER },
   1194 
   1195 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1196 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1197 	  WM_T_82572,		WMP_F_SERDES },
   1198 
   1199 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1200 	  "Intel i82572EI 1000baseT Ethernet",
   1201 	  WM_T_82572,		WMP_F_COPPER },
   1202 
   1203 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1204 	  "Intel i82573E",
   1205 	  WM_T_82573,		WMP_F_COPPER },
   1206 
   1207 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1208 	  "Intel i82573E IAMT",
   1209 	  WM_T_82573,		WMP_F_COPPER },
   1210 
   1211 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1212 	  "Intel i82573L Gigabit Ethernet",
   1213 	  WM_T_82573,		WMP_F_COPPER },
   1214 
   1215 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1216 	  "Intel i82574L",
   1217 	  WM_T_82574,		WMP_F_COPPER },
   1218 
   1219 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1220 	  "Intel i82574L",
   1221 	  WM_T_82574,		WMP_F_COPPER },
   1222 
   1223 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1224 	  "Intel i82583V",
   1225 	  WM_T_82583,		WMP_F_COPPER },
   1226 
   1227 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1228 	  "i80003 dual 1000baseT Ethernet",
   1229 	  WM_T_80003,		WMP_F_COPPER },
   1230 
   1231 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1232 	  "i80003 dual 1000baseX Ethernet",
   1233 	  WM_T_80003,		WMP_F_COPPER },
   1234 
   1235 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1236 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1237 	  WM_T_80003,		WMP_F_SERDES },
   1238 
   1239 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1240 	  "Intel i80003 1000baseT Ethernet",
   1241 	  WM_T_80003,		WMP_F_COPPER },
   1242 
   1243 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1244 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1245 	  WM_T_80003,		WMP_F_SERDES },
   1246 
   1247 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1248 	  "Intel i82801H (M_AMT) LAN Controller",
   1249 	  WM_T_ICH8,		WMP_F_COPPER },
   1250 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1251 	  "Intel i82801H (AMT) LAN Controller",
   1252 	  WM_T_ICH8,		WMP_F_COPPER },
   1253 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1254 	  "Intel i82801H LAN Controller",
   1255 	  WM_T_ICH8,		WMP_F_COPPER },
   1256 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1257 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1258 	  WM_T_ICH8,		WMP_F_COPPER },
   1259 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1260 	  "Intel i82801H (M) LAN Controller",
   1261 	  WM_T_ICH8,		WMP_F_COPPER },
   1262 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1263 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1264 	  WM_T_ICH8,		WMP_F_COPPER },
   1265 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1266 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1267 	  WM_T_ICH8,		WMP_F_COPPER },
   1268 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1269 	  "82567V-3 LAN Controller",
   1270 	  WM_T_ICH8,		WMP_F_COPPER },
   1271 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1272 	  "82801I (AMT) LAN Controller",
   1273 	  WM_T_ICH9,		WMP_F_COPPER },
   1274 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1275 	  "82801I 10/100 LAN Controller",
   1276 	  WM_T_ICH9,		WMP_F_COPPER },
   1277 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1278 	  "82801I (G) 10/100 LAN Controller",
   1279 	  WM_T_ICH9,		WMP_F_COPPER },
   1280 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1281 	  "82801I (GT) 10/100 LAN Controller",
   1282 	  WM_T_ICH9,		WMP_F_COPPER },
   1283 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1284 	  "82801I (C) LAN Controller",
   1285 	  WM_T_ICH9,		WMP_F_COPPER },
   1286 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1287 	  "82801I mobile LAN Controller",
   1288 	  WM_T_ICH9,		WMP_F_COPPER },
   1289 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1290 	  "82801I mobile (V) LAN Controller",
   1291 	  WM_T_ICH9,		WMP_F_COPPER },
   1292 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1293 	  "82801I mobile (AMT) LAN Controller",
   1294 	  WM_T_ICH9,		WMP_F_COPPER },
   1295 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1296 	  "82567LM-4 LAN Controller",
   1297 	  WM_T_ICH9,		WMP_F_COPPER },
   1298 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1299 	  "82567LM-2 LAN Controller",
   1300 	  WM_T_ICH10,		WMP_F_COPPER },
   1301 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1302 	  "82567LF-2 LAN Controller",
   1303 	  WM_T_ICH10,		WMP_F_COPPER },
   1304 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1305 	  "82567LM-3 LAN Controller",
   1306 	  WM_T_ICH10,		WMP_F_COPPER },
   1307 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1308 	  "82567LF-3 LAN Controller",
   1309 	  WM_T_ICH10,		WMP_F_COPPER },
   1310 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1311 	  "82567V-2 LAN Controller",
   1312 	  WM_T_ICH10,		WMP_F_COPPER },
   1313 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1314 	  "82567V-3? LAN Controller",
   1315 	  WM_T_ICH10,		WMP_F_COPPER },
   1316 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1317 	  "HANKSVILLE LAN Controller",
   1318 	  WM_T_ICH10,		WMP_F_COPPER },
   1319 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1320 	  "PCH LAN (82577LM) Controller",
   1321 	  WM_T_PCH,		WMP_F_COPPER },
   1322 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1323 	  "PCH LAN (82577LC) Controller",
   1324 	  WM_T_PCH,		WMP_F_COPPER },
   1325 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1326 	  "PCH LAN (82578DM) Controller",
   1327 	  WM_T_PCH,		WMP_F_COPPER },
   1328 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1329 	  "PCH LAN (82578DC) Controller",
   1330 	  WM_T_PCH,		WMP_F_COPPER },
   1331 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1332 	  "PCH2 LAN (82579LM) Controller",
   1333 	  WM_T_PCH2,		WMP_F_COPPER },
   1334 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1335 	  "PCH2 LAN (82579V) Controller",
   1336 	  WM_T_PCH2,		WMP_F_COPPER },
   1337 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1338 	  "82575EB dual-1000baseT Ethernet",
   1339 	  WM_T_82575,		WMP_F_COPPER },
   1340 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1341 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1342 	  WM_T_82575,		WMP_F_SERDES },
   1343 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1344 	  "82575GB quad-1000baseT Ethernet",
   1345 	  WM_T_82575,		WMP_F_COPPER },
   1346 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1347 	  "82575GB quad-1000baseT Ethernet (PM)",
   1348 	  WM_T_82575,		WMP_F_COPPER },
   1349 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1350 	  "82576 1000BaseT Ethernet",
   1351 	  WM_T_82576,		WMP_F_COPPER },
   1352 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1353 	  "82576 1000BaseX Ethernet",
   1354 	  WM_T_82576,		WMP_F_FIBER },
   1355 
   1356 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1357 	  "82576 gigabit Ethernet (SERDES)",
   1358 	  WM_T_82576,		WMP_F_SERDES },
   1359 
   1360 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1361 	  "82576 quad-1000BaseT Ethernet",
   1362 	  WM_T_82576,		WMP_F_COPPER },
   1363 
   1364 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1365 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1366 	  WM_T_82576,		WMP_F_COPPER },
   1367 
   1368 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1369 	  "82576 gigabit Ethernet",
   1370 	  WM_T_82576,		WMP_F_COPPER },
   1371 
   1372 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1373 	  "82576 gigabit Ethernet (SERDES)",
   1374 	  WM_T_82576,		WMP_F_SERDES },
   1375 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1376 	  "82576 quad-gigabit Ethernet (SERDES)",
   1377 	  WM_T_82576,		WMP_F_SERDES },
   1378 
   1379 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1380 	  "82580 1000BaseT Ethernet",
   1381 	  WM_T_82580,		WMP_F_COPPER },
   1382 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1383 	  "82580 1000BaseX Ethernet",
   1384 	  WM_T_82580,		WMP_F_FIBER },
   1385 
   1386 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1387 	  "82580 1000BaseT Ethernet (SERDES)",
   1388 	  WM_T_82580,		WMP_F_SERDES },
   1389 
   1390 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1391 	  "82580 gigabit Ethernet (SGMII)",
   1392 	  WM_T_82580,		WMP_F_COPPER },
   1393 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1394 	  "82580 dual-1000BaseT Ethernet",
   1395 	  WM_T_82580,		WMP_F_COPPER },
   1396 
   1397 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1398 	  "82580 quad-1000BaseX Ethernet",
   1399 	  WM_T_82580,		WMP_F_FIBER },
   1400 
   1401 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1402 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1403 	  WM_T_82580,		WMP_F_COPPER },
   1404 
   1405 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1406 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1407 	  WM_T_82580,		WMP_F_SERDES },
   1408 
   1409 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1410 	  "DH89XXCC 1000BASE-KX Ethernet",
   1411 	  WM_T_82580,		WMP_F_SERDES },
   1412 
   1413 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1414 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1415 	  WM_T_82580,		WMP_F_SERDES },
   1416 
   1417 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1418 	  "I350 Gigabit Network Connection",
   1419 	  WM_T_I350,		WMP_F_COPPER },
   1420 
   1421 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1422 	  "I350 Gigabit Fiber Network Connection",
   1423 	  WM_T_I350,		WMP_F_FIBER },
   1424 
   1425 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1426 	  "I350 Gigabit Backplane Connection",
   1427 	  WM_T_I350,		WMP_F_SERDES },
   1428 
   1429 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1430 	  "I350 Quad Port Gigabit Ethernet",
   1431 	  WM_T_I350,		WMP_F_SERDES },
   1432 
   1433 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1434 	  "I350 Gigabit Connection",
   1435 	  WM_T_I350,		WMP_F_COPPER },
   1436 
   1437 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1438 	  "I354 Gigabit Ethernet (KX)",
   1439 	  WM_T_I354,		WMP_F_SERDES },
   1440 
   1441 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1442 	  "I354 Gigabit Ethernet (SGMII)",
   1443 	  WM_T_I354,		WMP_F_COPPER },
   1444 
   1445 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1446 	  "I354 Gigabit Ethernet (2.5G)",
   1447 	  WM_T_I354,		WMP_F_COPPER },
   1448 
   1449 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1450 	  "I210-T1 Ethernet Server Adapter",
   1451 	  WM_T_I210,		WMP_F_COPPER },
   1452 
   1453 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1454 	  "I210 Ethernet (Copper OEM)",
   1455 	  WM_T_I210,		WMP_F_COPPER },
   1456 
   1457 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1458 	  "I210 Ethernet (Copper IT)",
   1459 	  WM_T_I210,		WMP_F_COPPER },
   1460 
   1461 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1462 	  "I210 Ethernet (FLASH less)",
   1463 	  WM_T_I210,		WMP_F_COPPER },
   1464 
   1465 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1466 	  "I210 Gigabit Ethernet (Fiber)",
   1467 	  WM_T_I210,		WMP_F_FIBER },
   1468 
   1469 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1470 	  "I210 Gigabit Ethernet (SERDES)",
   1471 	  WM_T_I210,		WMP_F_SERDES },
   1472 
   1473 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1474 	  "I210 Gigabit Ethernet (FLASH less)",
   1475 	  WM_T_I210,		WMP_F_SERDES },
   1476 
   1477 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1478 	  "I210 Gigabit Ethernet (SGMII)",
   1479 	  WM_T_I210,		WMP_F_COPPER },
   1480 
   1481 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1482 	  "I211 Ethernet (COPPER)",
   1483 	  WM_T_I211,		WMP_F_COPPER },
   1484 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1485 	  "I217 V Ethernet Connection",
   1486 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1487 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1488 	  "I217 LM Ethernet Connection",
   1489 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1490 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1491 	  "I218 V Ethernet Connection",
   1492 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1493 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1494 	  "I218 V Ethernet Connection",
   1495 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1496 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1497 	  "I218 V Ethernet Connection",
   1498 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1499 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1500 	  "I218 LM Ethernet Connection",
   1501 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1502 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1503 	  "I218 LM Ethernet Connection",
   1504 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1505 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1506 	  "I218 LM Ethernet Connection",
   1507 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1508 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1509 	  "I219 V Ethernet Connection",
   1510 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1511 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1512 	  "I219 V Ethernet Connection",
   1513 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1514 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1515 	  "I219 V Ethernet Connection",
   1516 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1517 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1518 	  "I219 V Ethernet Connection",
   1519 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1520 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1521 	  "I219 LM Ethernet Connection",
   1522 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1523 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1524 	  "I219 LM Ethernet Connection",
   1525 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1526 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1527 	  "I219 LM Ethernet Connection",
   1528 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1529 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1530 	  "I219 LM Ethernet Connection",
   1531 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1532 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1533 	  "I219 LM Ethernet Connection",
   1534 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1535 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1536 	  "I219 V Ethernet Connection",
   1537 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1538 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1539 	  "I219 V Ethernet Connection",
   1540 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1541 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1542 	  "I219 LM Ethernet Connection",
   1543 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1544 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1545 	  "I219 LM Ethernet Connection",
   1546 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1547 	{ 0,			0,
   1548 	  NULL,
   1549 	  0,			0 },
   1550 };
   1551 
   1552 /*
   1553  * Register read/write functions.
   1554  * Other than CSR_{READ|WRITE}().
   1555  */
   1556 
   1557 #if 0 /* Not currently used */
   1558 static inline uint32_t
   1559 wm_io_read(struct wm_softc *sc, int reg)
   1560 {
   1561 
   1562 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1563 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1564 }
   1565 #endif
   1566 
   1567 static inline void
   1568 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1569 {
   1570 
   1571 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1572 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1573 }
   1574 
   1575 static inline void
   1576 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1577     uint32_t data)
   1578 {
   1579 	uint32_t regval;
   1580 	int i;
   1581 
   1582 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1583 
   1584 	CSR_WRITE(sc, reg, regval);
   1585 
   1586 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1587 		delay(5);
   1588 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1589 			break;
   1590 	}
   1591 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1592 		aprint_error("%s: WARNING:"
   1593 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1594 		    device_xname(sc->sc_dev), reg);
   1595 	}
   1596 }
   1597 
   1598 static inline void
   1599 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1600 {
   1601 	wa->wa_low = htole32(v & 0xffffffffU);
   1602 	if (sizeof(bus_addr_t) == 8)
   1603 		wa->wa_high = htole32((uint64_t) v >> 32);
   1604 	else
   1605 		wa->wa_high = 0;
   1606 }
   1607 
   1608 /*
   1609  * Descriptor sync/init functions.
   1610  */
   1611 static inline void
   1612 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1613 {
   1614 	struct wm_softc *sc = txq->txq_sc;
   1615 
   1616 	/* If it will wrap around, sync to the end of the ring. */
   1617 	if ((start + num) > WM_NTXDESC(txq)) {
   1618 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1619 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1620 		    (WM_NTXDESC(txq) - start), ops);
   1621 		num -= (WM_NTXDESC(txq) - start);
   1622 		start = 0;
   1623 	}
   1624 
   1625 	/* Now sync whatever is left. */
   1626 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1627 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1628 }
   1629 
   1630 static inline void
   1631 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1632 {
   1633 	struct wm_softc *sc = rxq->rxq_sc;
   1634 
   1635 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1636 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1637 }
   1638 
   1639 static inline void
   1640 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1641 {
   1642 	struct wm_softc *sc = rxq->rxq_sc;
   1643 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1644 	struct mbuf *m = rxs->rxs_mbuf;
   1645 
   1646 	/*
   1647 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1648 	 * so that the payload after the Ethernet header is aligned
   1649 	 * to a 4-byte boundary.
   1650 
   1651 	 * XXX BRAINDAMAGE ALERT!
   1652 	 * The stupid chip uses the same size for every buffer, which
   1653 	 * is set in the Receive Control register.  We are using the 2K
   1654 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1655 	 * reason, we can't "scoot" packets longer than the standard
   1656 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1657 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1658 	 * the upper layer copy the headers.
   1659 	 */
   1660 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1661 
   1662 	if (sc->sc_type == WM_T_82574) {
   1663 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1664 		rxd->erx_data.erxd_addr =
   1665 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1666 		rxd->erx_data.erxd_dd = 0;
   1667 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1668 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1669 
   1670 		rxd->nqrx_data.nrxd_paddr =
   1671 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1672 		/* Currently, split header is not supported. */
   1673 		rxd->nqrx_data.nrxd_haddr = 0;
   1674 	} else {
   1675 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1676 
   1677 		wm_set_dma_addr(&rxd->wrx_addr,
   1678 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1679 		rxd->wrx_len = 0;
   1680 		rxd->wrx_cksum = 0;
   1681 		rxd->wrx_status = 0;
   1682 		rxd->wrx_errors = 0;
   1683 		rxd->wrx_special = 0;
   1684 	}
   1685 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1686 
   1687 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1688 }
   1689 
   1690 /*
   1691  * Device driver interface functions and commonly used functions.
   1692  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1693  */
   1694 
   1695 /* Lookup supported device table */
   1696 static const struct wm_product *
   1697 wm_lookup(const struct pci_attach_args *pa)
   1698 {
   1699 	const struct wm_product *wmp;
   1700 
   1701 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1702 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1703 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1704 			return wmp;
   1705 	}
   1706 	return NULL;
   1707 }
   1708 
   1709 /* The match function (ca_match) */
   1710 static int
   1711 wm_match(device_t parent, cfdata_t cf, void *aux)
   1712 {
   1713 	struct pci_attach_args *pa = aux;
   1714 
   1715 	if (wm_lookup(pa) != NULL)
   1716 		return 1;
   1717 
   1718 	return 0;
   1719 }
   1720 
   1721 /* The attach function (ca_attach) */
   1722 static void
   1723 wm_attach(device_t parent, device_t self, void *aux)
   1724 {
   1725 	struct wm_softc *sc = device_private(self);
   1726 	struct pci_attach_args *pa = aux;
   1727 	prop_dictionary_t dict;
   1728 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1729 	pci_chipset_tag_t pc = pa->pa_pc;
   1730 	int counts[PCI_INTR_TYPE_SIZE];
   1731 	pci_intr_type_t max_type;
   1732 	const char *eetype, *xname;
   1733 	bus_space_tag_t memt;
   1734 	bus_space_handle_t memh;
   1735 	bus_size_t memsize;
   1736 	int memh_valid;
   1737 	int i, error;
   1738 	const struct wm_product *wmp;
   1739 	prop_data_t ea;
   1740 	prop_number_t pn;
   1741 	uint8_t enaddr[ETHER_ADDR_LEN];
   1742 	char buf[256];
   1743 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1744 	pcireg_t preg, memtype;
   1745 	uint16_t eeprom_data, apme_mask;
   1746 	bool force_clear_smbi;
   1747 	uint32_t link_mode;
   1748 	uint32_t reg;
   1749 
   1750 	sc->sc_dev = self;
   1751 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1752 	sc->sc_core_stopping = false;
   1753 
   1754 	wmp = wm_lookup(pa);
   1755 #ifdef DIAGNOSTIC
   1756 	if (wmp == NULL) {
   1757 		printf("\n");
   1758 		panic("wm_attach: impossible");
   1759 	}
   1760 #endif
   1761 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1762 
   1763 	sc->sc_pc = pa->pa_pc;
   1764 	sc->sc_pcitag = pa->pa_tag;
   1765 
   1766 	if (pci_dma64_available(pa))
   1767 		sc->sc_dmat = pa->pa_dmat64;
   1768 	else
   1769 		sc->sc_dmat = pa->pa_dmat;
   1770 
   1771 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1772 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1773 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1774 
   1775 	sc->sc_type = wmp->wmp_type;
   1776 
   1777 	/* Set default function pointers */
   1778 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1779 	sc->phy.release = sc->nvm.release = wm_put_null;
   1780 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1781 
   1782 	if (sc->sc_type < WM_T_82543) {
   1783 		if (sc->sc_rev < 2) {
   1784 			aprint_error_dev(sc->sc_dev,
   1785 			    "i82542 must be at least rev. 2\n");
   1786 			return;
   1787 		}
   1788 		if (sc->sc_rev < 3)
   1789 			sc->sc_type = WM_T_82542_2_0;
   1790 	}
   1791 
   1792 	/*
   1793 	 * Disable MSI for Errata:
   1794 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1795 	 *
   1796 	 *  82544: Errata 25
   1797 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1798 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1799 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1800 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1801 	 *
   1802 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1803 	 *
   1804 	 *  82571 & 82572: Errata 63
   1805 	 */
   1806 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1807 	    || (sc->sc_type == WM_T_82572))
   1808 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1809 
   1810 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1811 	    || (sc->sc_type == WM_T_82580)
   1812 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1813 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1814 		sc->sc_flags |= WM_F_NEWQUEUE;
   1815 
   1816 	/* Set device properties (mactype) */
   1817 	dict = device_properties(sc->sc_dev);
   1818 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1819 
   1820 	/*
   1821 	 * Map the device.  All devices support memory-mapped acccess,
   1822 	 * and it is really required for normal operation.
   1823 	 */
   1824 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1825 	switch (memtype) {
   1826 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1827 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1828 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1829 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1830 		break;
   1831 	default:
   1832 		memh_valid = 0;
   1833 		break;
   1834 	}
   1835 
   1836 	if (memh_valid) {
   1837 		sc->sc_st = memt;
   1838 		sc->sc_sh = memh;
   1839 		sc->sc_ss = memsize;
   1840 	} else {
   1841 		aprint_error_dev(sc->sc_dev,
   1842 		    "unable to map device registers\n");
   1843 		return;
   1844 	}
   1845 
   1846 	/*
   1847 	 * In addition, i82544 and later support I/O mapped indirect
   1848 	 * register access.  It is not desirable (nor supported in
   1849 	 * this driver) to use it for normal operation, though it is
   1850 	 * required to work around bugs in some chip versions.
   1851 	 */
   1852 	if (sc->sc_type >= WM_T_82544) {
   1853 		/* First we have to find the I/O BAR. */
   1854 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1855 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1856 			if (memtype == PCI_MAPREG_TYPE_IO)
   1857 				break;
   1858 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1859 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1860 				i += 4;	/* skip high bits, too */
   1861 		}
   1862 		if (i < PCI_MAPREG_END) {
   1863 			/*
   1864 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1865 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1866 			 * It's no problem because newer chips has no this
   1867 			 * bug.
   1868 			 *
   1869 			 * The i8254x doesn't apparently respond when the
   1870 			 * I/O BAR is 0, which looks somewhat like it's not
   1871 			 * been configured.
   1872 			 */
   1873 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1874 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1875 				aprint_error_dev(sc->sc_dev,
   1876 				    "WARNING: I/O BAR at zero.\n");
   1877 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1878 					0, &sc->sc_iot, &sc->sc_ioh,
   1879 					NULL, &sc->sc_ios) == 0) {
   1880 				sc->sc_flags |= WM_F_IOH_VALID;
   1881 			} else
   1882 				aprint_error_dev(sc->sc_dev,
   1883 				    "WARNING: unable to map I/O space\n");
   1884 		}
   1885 
   1886 	}
   1887 
   1888 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1889 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1890 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1891 	if (sc->sc_type < WM_T_82542_2_1)
   1892 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1893 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1894 
   1895 	/* power up chip */
   1896 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   1897 	    && error != EOPNOTSUPP) {
   1898 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1899 		return;
   1900 	}
   1901 
   1902 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1903 	/*
   1904 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   1905 	 * resource.
   1906 	 */
   1907 	if (sc->sc_nqueues > 1) {
   1908 		max_type = PCI_INTR_TYPE_MSIX;
   1909 		/*
   1910 		 *  82583 has a MSI-X capability in the PCI configuration space
   1911 		 * but it doesn't support it. At least the document doesn't
   1912 		 * say anything about MSI-X.
   1913 		 */
   1914 		counts[PCI_INTR_TYPE_MSIX]
   1915 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   1916 	} else {
   1917 		max_type = PCI_INTR_TYPE_MSI;
   1918 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1919 	}
   1920 
   1921 	/* Allocation settings */
   1922 	counts[PCI_INTR_TYPE_MSI] = 1;
   1923 	counts[PCI_INTR_TYPE_INTX] = 1;
   1924 	/* overridden by disable flags */
   1925 	if (wm_disable_msi != 0) {
   1926 		counts[PCI_INTR_TYPE_MSI] = 0;
   1927 		if (wm_disable_msix != 0) {
   1928 			max_type = PCI_INTR_TYPE_INTX;
   1929 			counts[PCI_INTR_TYPE_MSIX] = 0;
   1930 		}
   1931 	} else if (wm_disable_msix != 0) {
   1932 		max_type = PCI_INTR_TYPE_MSI;
   1933 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1934 	}
   1935 
   1936 alloc_retry:
   1937 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1938 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1939 		return;
   1940 	}
   1941 
   1942 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1943 		error = wm_setup_msix(sc);
   1944 		if (error) {
   1945 			pci_intr_release(pc, sc->sc_intrs,
   1946 			    counts[PCI_INTR_TYPE_MSIX]);
   1947 
   1948 			/* Setup for MSI: Disable MSI-X */
   1949 			max_type = PCI_INTR_TYPE_MSI;
   1950 			counts[PCI_INTR_TYPE_MSI] = 1;
   1951 			counts[PCI_INTR_TYPE_INTX] = 1;
   1952 			goto alloc_retry;
   1953 		}
   1954 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1955 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1956 		error = wm_setup_legacy(sc);
   1957 		if (error) {
   1958 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1959 			    counts[PCI_INTR_TYPE_MSI]);
   1960 
   1961 			/* The next try is for INTx: Disable MSI */
   1962 			max_type = PCI_INTR_TYPE_INTX;
   1963 			counts[PCI_INTR_TYPE_INTX] = 1;
   1964 			goto alloc_retry;
   1965 		}
   1966 	} else {
   1967 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1968 		error = wm_setup_legacy(sc);
   1969 		if (error) {
   1970 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1971 			    counts[PCI_INTR_TYPE_INTX]);
   1972 			return;
   1973 		}
   1974 	}
   1975 
   1976 	/*
   1977 	 * Check the function ID (unit number of the chip).
   1978 	 */
   1979 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1980 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   1981 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1982 	    || (sc->sc_type == WM_T_82580)
   1983 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1984 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1985 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1986 	else
   1987 		sc->sc_funcid = 0;
   1988 
   1989 	/*
   1990 	 * Determine a few things about the bus we're connected to.
   1991 	 */
   1992 	if (sc->sc_type < WM_T_82543) {
   1993 		/* We don't really know the bus characteristics here. */
   1994 		sc->sc_bus_speed = 33;
   1995 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1996 		/*
   1997 		 * CSA (Communication Streaming Architecture) is about as fast
   1998 		 * a 32-bit 66MHz PCI Bus.
   1999 		 */
   2000 		sc->sc_flags |= WM_F_CSA;
   2001 		sc->sc_bus_speed = 66;
   2002 		aprint_verbose_dev(sc->sc_dev,
   2003 		    "Communication Streaming Architecture\n");
   2004 		if (sc->sc_type == WM_T_82547) {
   2005 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   2006 			callout_setfunc(&sc->sc_txfifo_ch,
   2007 			    wm_82547_txfifo_stall, sc);
   2008 			aprint_verbose_dev(sc->sc_dev,
   2009 			    "using 82547 Tx FIFO stall work-around\n");
   2010 		}
   2011 	} else if (sc->sc_type >= WM_T_82571) {
   2012 		sc->sc_flags |= WM_F_PCIE;
   2013 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2014 		    && (sc->sc_type != WM_T_ICH10)
   2015 		    && (sc->sc_type != WM_T_PCH)
   2016 		    && (sc->sc_type != WM_T_PCH2)
   2017 		    && (sc->sc_type != WM_T_PCH_LPT)
   2018 		    && (sc->sc_type != WM_T_PCH_SPT)
   2019 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2020 			/* ICH* and PCH* have no PCIe capability registers */
   2021 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2022 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2023 				NULL) == 0)
   2024 				aprint_error_dev(sc->sc_dev,
   2025 				    "unable to find PCIe capability\n");
   2026 		}
   2027 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2028 	} else {
   2029 		reg = CSR_READ(sc, WMREG_STATUS);
   2030 		if (reg & STATUS_BUS64)
   2031 			sc->sc_flags |= WM_F_BUS64;
   2032 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2033 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2034 
   2035 			sc->sc_flags |= WM_F_PCIX;
   2036 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2037 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2038 				aprint_error_dev(sc->sc_dev,
   2039 				    "unable to find PCIX capability\n");
   2040 			else if (sc->sc_type != WM_T_82545_3 &&
   2041 				 sc->sc_type != WM_T_82546_3) {
   2042 				/*
   2043 				 * Work around a problem caused by the BIOS
   2044 				 * setting the max memory read byte count
   2045 				 * incorrectly.
   2046 				 */
   2047 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2048 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2049 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2050 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2051 
   2052 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2053 				    PCIX_CMD_BYTECNT_SHIFT;
   2054 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2055 				    PCIX_STATUS_MAXB_SHIFT;
   2056 				if (bytecnt > maxb) {
   2057 					aprint_verbose_dev(sc->sc_dev,
   2058 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2059 					    512 << bytecnt, 512 << maxb);
   2060 					pcix_cmd = (pcix_cmd &
   2061 					    ~PCIX_CMD_BYTECNT_MASK) |
   2062 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2063 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2064 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2065 					    pcix_cmd);
   2066 				}
   2067 			}
   2068 		}
   2069 		/*
   2070 		 * The quad port adapter is special; it has a PCIX-PCIX
   2071 		 * bridge on the board, and can run the secondary bus at
   2072 		 * a higher speed.
   2073 		 */
   2074 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2075 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2076 								      : 66;
   2077 		} else if (sc->sc_flags & WM_F_PCIX) {
   2078 			switch (reg & STATUS_PCIXSPD_MASK) {
   2079 			case STATUS_PCIXSPD_50_66:
   2080 				sc->sc_bus_speed = 66;
   2081 				break;
   2082 			case STATUS_PCIXSPD_66_100:
   2083 				sc->sc_bus_speed = 100;
   2084 				break;
   2085 			case STATUS_PCIXSPD_100_133:
   2086 				sc->sc_bus_speed = 133;
   2087 				break;
   2088 			default:
   2089 				aprint_error_dev(sc->sc_dev,
   2090 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2091 				    reg & STATUS_PCIXSPD_MASK);
   2092 				sc->sc_bus_speed = 66;
   2093 				break;
   2094 			}
   2095 		} else
   2096 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2097 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2098 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2099 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2100 	}
   2101 
   2102 	/* Disable ASPM L0s and/or L1 for workaround */
   2103 	wm_disable_aspm(sc);
   2104 
   2105 	/* clear interesting stat counters */
   2106 	CSR_READ(sc, WMREG_COLC);
   2107 	CSR_READ(sc, WMREG_RXERRC);
   2108 
   2109 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2110 	    || (sc->sc_type >= WM_T_ICH8))
   2111 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2112 	if (sc->sc_type >= WM_T_ICH8)
   2113 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2114 
   2115 	/* Set PHY, NVM mutex related stuff */
   2116 	switch (sc->sc_type) {
   2117 	case WM_T_82542_2_0:
   2118 	case WM_T_82542_2_1:
   2119 	case WM_T_82543:
   2120 	case WM_T_82544:
   2121 		/* Microwire */
   2122 		sc->nvm.read = wm_nvm_read_uwire;
   2123 		sc->sc_nvm_wordsize = 64;
   2124 		sc->sc_nvm_addrbits = 6;
   2125 		break;
   2126 	case WM_T_82540:
   2127 	case WM_T_82545:
   2128 	case WM_T_82545_3:
   2129 	case WM_T_82546:
   2130 	case WM_T_82546_3:
   2131 		/* Microwire */
   2132 		sc->nvm.read = wm_nvm_read_uwire;
   2133 		reg = CSR_READ(sc, WMREG_EECD);
   2134 		if (reg & EECD_EE_SIZE) {
   2135 			sc->sc_nvm_wordsize = 256;
   2136 			sc->sc_nvm_addrbits = 8;
   2137 		} else {
   2138 			sc->sc_nvm_wordsize = 64;
   2139 			sc->sc_nvm_addrbits = 6;
   2140 		}
   2141 		sc->sc_flags |= WM_F_LOCK_EECD;
   2142 		sc->nvm.acquire = wm_get_eecd;
   2143 		sc->nvm.release = wm_put_eecd;
   2144 		break;
   2145 	case WM_T_82541:
   2146 	case WM_T_82541_2:
   2147 	case WM_T_82547:
   2148 	case WM_T_82547_2:
   2149 		reg = CSR_READ(sc, WMREG_EECD);
   2150 		/*
   2151 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2152 		 * on 8254[17], so set flags and functios before calling it.
   2153 		 */
   2154 		sc->sc_flags |= WM_F_LOCK_EECD;
   2155 		sc->nvm.acquire = wm_get_eecd;
   2156 		sc->nvm.release = wm_put_eecd;
   2157 		if (reg & EECD_EE_TYPE) {
   2158 			/* SPI */
   2159 			sc->nvm.read = wm_nvm_read_spi;
   2160 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2161 			wm_nvm_set_addrbits_size_eecd(sc);
   2162 		} else {
   2163 			/* Microwire */
   2164 			sc->nvm.read = wm_nvm_read_uwire;
   2165 			if ((reg & EECD_EE_ABITS) != 0) {
   2166 				sc->sc_nvm_wordsize = 256;
   2167 				sc->sc_nvm_addrbits = 8;
   2168 			} else {
   2169 				sc->sc_nvm_wordsize = 64;
   2170 				sc->sc_nvm_addrbits = 6;
   2171 			}
   2172 		}
   2173 		break;
   2174 	case WM_T_82571:
   2175 	case WM_T_82572:
   2176 		/* SPI */
   2177 		sc->nvm.read = wm_nvm_read_eerd;
   2178 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2179 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2180 		wm_nvm_set_addrbits_size_eecd(sc);
   2181 		sc->phy.acquire = wm_get_swsm_semaphore;
   2182 		sc->phy.release = wm_put_swsm_semaphore;
   2183 		sc->nvm.acquire = wm_get_nvm_82571;
   2184 		sc->nvm.release = wm_put_nvm_82571;
   2185 		break;
   2186 	case WM_T_82573:
   2187 	case WM_T_82574:
   2188 	case WM_T_82583:
   2189 		sc->nvm.read = wm_nvm_read_eerd;
   2190 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2191 		if (sc->sc_type == WM_T_82573) {
   2192 			sc->phy.acquire = wm_get_swsm_semaphore;
   2193 			sc->phy.release = wm_put_swsm_semaphore;
   2194 			sc->nvm.acquire = wm_get_nvm_82571;
   2195 			sc->nvm.release = wm_put_nvm_82571;
   2196 		} else {
   2197 			/* Both PHY and NVM use the same semaphore. */
   2198 			sc->phy.acquire = sc->nvm.acquire
   2199 			    = wm_get_swfwhw_semaphore;
   2200 			sc->phy.release = sc->nvm.release
   2201 			    = wm_put_swfwhw_semaphore;
   2202 		}
   2203 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2204 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2205 			sc->sc_nvm_wordsize = 2048;
   2206 		} else {
   2207 			/* SPI */
   2208 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2209 			wm_nvm_set_addrbits_size_eecd(sc);
   2210 		}
   2211 		break;
   2212 	case WM_T_82575:
   2213 	case WM_T_82576:
   2214 	case WM_T_82580:
   2215 	case WM_T_I350:
   2216 	case WM_T_I354:
   2217 	case WM_T_80003:
   2218 		/* SPI */
   2219 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2220 		wm_nvm_set_addrbits_size_eecd(sc);
   2221 		if ((sc->sc_type == WM_T_80003)
   2222 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2223 			sc->nvm.read = wm_nvm_read_eerd;
   2224 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2225 		} else {
   2226 			sc->nvm.read = wm_nvm_read_spi;
   2227 			sc->sc_flags |= WM_F_LOCK_EECD;
   2228 		}
   2229 		sc->phy.acquire = wm_get_phy_82575;
   2230 		sc->phy.release = wm_put_phy_82575;
   2231 		sc->nvm.acquire = wm_get_nvm_80003;
   2232 		sc->nvm.release = wm_put_nvm_80003;
   2233 		break;
   2234 	case WM_T_ICH8:
   2235 	case WM_T_ICH9:
   2236 	case WM_T_ICH10:
   2237 	case WM_T_PCH:
   2238 	case WM_T_PCH2:
   2239 	case WM_T_PCH_LPT:
   2240 		sc->nvm.read = wm_nvm_read_ich8;
   2241 		/* FLASH */
   2242 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2243 		sc->sc_nvm_wordsize = 2048;
   2244 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2245 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2246 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2247 			aprint_error_dev(sc->sc_dev,
   2248 			    "can't map FLASH registers\n");
   2249 			goto out;
   2250 		}
   2251 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2252 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2253 		    ICH_FLASH_SECTOR_SIZE;
   2254 		sc->sc_ich8_flash_bank_size =
   2255 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2256 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2257 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2258 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2259 		sc->sc_flashreg_offset = 0;
   2260 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2261 		sc->phy.release = wm_put_swflag_ich8lan;
   2262 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2263 		sc->nvm.release = wm_put_nvm_ich8lan;
   2264 		break;
   2265 	case WM_T_PCH_SPT:
   2266 	case WM_T_PCH_CNP:
   2267 		sc->nvm.read = wm_nvm_read_spt;
   2268 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2269 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2270 		sc->sc_flasht = sc->sc_st;
   2271 		sc->sc_flashh = sc->sc_sh;
   2272 		sc->sc_ich8_flash_base = 0;
   2273 		sc->sc_nvm_wordsize =
   2274 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2275 		    * NVM_SIZE_MULTIPLIER;
   2276 		/* It is size in bytes, we want words */
   2277 		sc->sc_nvm_wordsize /= 2;
   2278 		/* assume 2 banks */
   2279 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2280 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2281 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2282 		sc->phy.release = wm_put_swflag_ich8lan;
   2283 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2284 		sc->nvm.release = wm_put_nvm_ich8lan;
   2285 		break;
   2286 	case WM_T_I210:
   2287 	case WM_T_I211:
   2288 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2289 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2290 		if (wm_nvm_flash_presence_i210(sc)) {
   2291 			sc->nvm.read = wm_nvm_read_eerd;
   2292 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2293 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2294 			wm_nvm_set_addrbits_size_eecd(sc);
   2295 		} else {
   2296 			sc->nvm.read = wm_nvm_read_invm;
   2297 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2298 			sc->sc_nvm_wordsize = INVM_SIZE;
   2299 		}
   2300 		sc->phy.acquire = wm_get_phy_82575;
   2301 		sc->phy.release = wm_put_phy_82575;
   2302 		sc->nvm.acquire = wm_get_nvm_80003;
   2303 		sc->nvm.release = wm_put_nvm_80003;
   2304 		break;
   2305 	default:
   2306 		break;
   2307 	}
   2308 
   2309 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2310 	switch (sc->sc_type) {
   2311 	case WM_T_82571:
   2312 	case WM_T_82572:
   2313 		reg = CSR_READ(sc, WMREG_SWSM2);
   2314 		if ((reg & SWSM2_LOCK) == 0) {
   2315 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2316 			force_clear_smbi = true;
   2317 		} else
   2318 			force_clear_smbi = false;
   2319 		break;
   2320 	case WM_T_82573:
   2321 	case WM_T_82574:
   2322 	case WM_T_82583:
   2323 		force_clear_smbi = true;
   2324 		break;
   2325 	default:
   2326 		force_clear_smbi = false;
   2327 		break;
   2328 	}
   2329 	if (force_clear_smbi) {
   2330 		reg = CSR_READ(sc, WMREG_SWSM);
   2331 		if ((reg & SWSM_SMBI) != 0)
   2332 			aprint_error_dev(sc->sc_dev,
   2333 			    "Please update the Bootagent\n");
   2334 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2335 	}
   2336 
   2337 	/*
   2338 	 * Defer printing the EEPROM type until after verifying the checksum
   2339 	 * This allows the EEPROM type to be printed correctly in the case
   2340 	 * that no EEPROM is attached.
   2341 	 */
   2342 	/*
   2343 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2344 	 * this for later, so we can fail future reads from the EEPROM.
   2345 	 */
   2346 	if (wm_nvm_validate_checksum(sc)) {
   2347 		/*
   2348 		 * Read twice again because some PCI-e parts fail the
   2349 		 * first check due to the link being in sleep state.
   2350 		 */
   2351 		if (wm_nvm_validate_checksum(sc))
   2352 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2353 	}
   2354 
   2355 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2356 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2357 	else {
   2358 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2359 		    sc->sc_nvm_wordsize);
   2360 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2361 			aprint_verbose("iNVM");
   2362 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2363 			aprint_verbose("FLASH(HW)");
   2364 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2365 			aprint_verbose("FLASH");
   2366 		else {
   2367 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2368 				eetype = "SPI";
   2369 			else
   2370 				eetype = "MicroWire";
   2371 			aprint_verbose("(%d address bits) %s EEPROM",
   2372 			    sc->sc_nvm_addrbits, eetype);
   2373 		}
   2374 	}
   2375 	wm_nvm_version(sc);
   2376 	aprint_verbose("\n");
   2377 
   2378 	/*
   2379 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2380 	 * incorrect.
   2381 	 */
   2382 	wm_gmii_setup_phytype(sc, 0, 0);
   2383 
   2384 	/* Reset the chip to a known state. */
   2385 	wm_reset(sc);
   2386 
   2387 	/*
   2388 	 * Check for I21[01] PLL workaround.
   2389 	 *
   2390 	 * Three cases:
   2391 	 * a) Chip is I211.
   2392 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2393 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2394 	 */
   2395 	if (sc->sc_type == WM_T_I211)
   2396 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2397 	if (sc->sc_type == WM_T_I210) {
   2398 		if (!wm_nvm_flash_presence_i210(sc))
   2399 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2400 		else if ((sc->sc_nvm_ver_major < 3)
   2401 		    || ((sc->sc_nvm_ver_major == 3)
   2402 			&& (sc->sc_nvm_ver_minor < 25))) {
   2403 			aprint_verbose_dev(sc->sc_dev,
   2404 			    "ROM image version %d.%d is older than 3.25\n",
   2405 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2406 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2407 		}
   2408 	}
   2409 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2410 		wm_pll_workaround_i210(sc);
   2411 
   2412 	wm_get_wakeup(sc);
   2413 
   2414 	/* Non-AMT based hardware can now take control from firmware */
   2415 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2416 		wm_get_hw_control(sc);
   2417 
   2418 	/*
   2419 	 * Read the Ethernet address from the EEPROM, if not first found
   2420 	 * in device properties.
   2421 	 */
   2422 	ea = prop_dictionary_get(dict, "mac-address");
   2423 	if (ea != NULL) {
   2424 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2425 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2426 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2427 	} else {
   2428 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2429 			aprint_error_dev(sc->sc_dev,
   2430 			    "unable to read Ethernet address\n");
   2431 			goto out;
   2432 		}
   2433 	}
   2434 
   2435 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2436 	    ether_sprintf(enaddr));
   2437 
   2438 	/*
   2439 	 * Read the config info from the EEPROM, and set up various
   2440 	 * bits in the control registers based on their contents.
   2441 	 */
   2442 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2443 	if (pn != NULL) {
   2444 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2445 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2446 	} else {
   2447 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2448 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2449 			goto out;
   2450 		}
   2451 	}
   2452 
   2453 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2454 	if (pn != NULL) {
   2455 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2456 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2457 	} else {
   2458 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2459 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2460 			goto out;
   2461 		}
   2462 	}
   2463 
   2464 	/* check for WM_F_WOL */
   2465 	switch (sc->sc_type) {
   2466 	case WM_T_82542_2_0:
   2467 	case WM_T_82542_2_1:
   2468 	case WM_T_82543:
   2469 		/* dummy? */
   2470 		eeprom_data = 0;
   2471 		apme_mask = NVM_CFG3_APME;
   2472 		break;
   2473 	case WM_T_82544:
   2474 		apme_mask = NVM_CFG2_82544_APM_EN;
   2475 		eeprom_data = cfg2;
   2476 		break;
   2477 	case WM_T_82546:
   2478 	case WM_T_82546_3:
   2479 	case WM_T_82571:
   2480 	case WM_T_82572:
   2481 	case WM_T_82573:
   2482 	case WM_T_82574:
   2483 	case WM_T_82583:
   2484 	case WM_T_80003:
   2485 	default:
   2486 		apme_mask = NVM_CFG3_APME;
   2487 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2488 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2489 		break;
   2490 	case WM_T_82575:
   2491 	case WM_T_82576:
   2492 	case WM_T_82580:
   2493 	case WM_T_I350:
   2494 	case WM_T_I354: /* XXX ok? */
   2495 	case WM_T_ICH8:
   2496 	case WM_T_ICH9:
   2497 	case WM_T_ICH10:
   2498 	case WM_T_PCH:
   2499 	case WM_T_PCH2:
   2500 	case WM_T_PCH_LPT:
   2501 	case WM_T_PCH_SPT:
   2502 	case WM_T_PCH_CNP:
   2503 		/* XXX The funcid should be checked on some devices */
   2504 		apme_mask = WUC_APME;
   2505 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2506 		break;
   2507 	}
   2508 
   2509 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2510 	if ((eeprom_data & apme_mask) != 0)
   2511 		sc->sc_flags |= WM_F_WOL;
   2512 
   2513 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2514 		/* Check NVM for autonegotiation */
   2515 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2516 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2517 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2518 		}
   2519 	}
   2520 
   2521 	/*
   2522 	 * XXX need special handling for some multiple port cards
   2523 	 * to disable a paticular port.
   2524 	 */
   2525 
   2526 	if (sc->sc_type >= WM_T_82544) {
   2527 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2528 		if (pn != NULL) {
   2529 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2530 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2531 		} else {
   2532 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2533 				aprint_error_dev(sc->sc_dev,
   2534 				    "unable to read SWDPIN\n");
   2535 				goto out;
   2536 			}
   2537 		}
   2538 	}
   2539 
   2540 	if (cfg1 & NVM_CFG1_ILOS)
   2541 		sc->sc_ctrl |= CTRL_ILOS;
   2542 
   2543 	/*
   2544 	 * XXX
   2545 	 * This code isn't correct because pin 2 and 3 are located
   2546 	 * in different position on newer chips. Check all datasheet.
   2547 	 *
   2548 	 * Until resolve this problem, check if a chip < 82580
   2549 	 */
   2550 	if (sc->sc_type <= WM_T_82580) {
   2551 		if (sc->sc_type >= WM_T_82544) {
   2552 			sc->sc_ctrl |=
   2553 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2554 			    CTRL_SWDPIO_SHIFT;
   2555 			sc->sc_ctrl |=
   2556 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2557 			    CTRL_SWDPINS_SHIFT;
   2558 		} else {
   2559 			sc->sc_ctrl |=
   2560 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2561 			    CTRL_SWDPIO_SHIFT;
   2562 		}
   2563 	}
   2564 
   2565 	/* XXX For other than 82580? */
   2566 	if (sc->sc_type == WM_T_82580) {
   2567 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2568 		if (nvmword & __BIT(13))
   2569 			sc->sc_ctrl |= CTRL_ILOS;
   2570 	}
   2571 
   2572 #if 0
   2573 	if (sc->sc_type >= WM_T_82544) {
   2574 		if (cfg1 & NVM_CFG1_IPS0)
   2575 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2576 		if (cfg1 & NVM_CFG1_IPS1)
   2577 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2578 		sc->sc_ctrl_ext |=
   2579 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2580 		    CTRL_EXT_SWDPIO_SHIFT;
   2581 		sc->sc_ctrl_ext |=
   2582 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2583 		    CTRL_EXT_SWDPINS_SHIFT;
   2584 	} else {
   2585 		sc->sc_ctrl_ext |=
   2586 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2587 		    CTRL_EXT_SWDPIO_SHIFT;
   2588 	}
   2589 #endif
   2590 
   2591 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2592 #if 0
   2593 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2594 #endif
   2595 
   2596 	if (sc->sc_type == WM_T_PCH) {
   2597 		uint16_t val;
   2598 
   2599 		/* Save the NVM K1 bit setting */
   2600 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2601 
   2602 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2603 			sc->sc_nvm_k1_enabled = 1;
   2604 		else
   2605 			sc->sc_nvm_k1_enabled = 0;
   2606 	}
   2607 
   2608 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2609 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2610 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2611 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2612 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2613 	    || sc->sc_type == WM_T_82573
   2614 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2615 		/* Copper only */
   2616 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2617 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2618 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2619 	    || (sc->sc_type ==WM_T_I211)) {
   2620 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2621 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2622 		switch (link_mode) {
   2623 		case CTRL_EXT_LINK_MODE_1000KX:
   2624 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2625 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2626 			break;
   2627 		case CTRL_EXT_LINK_MODE_SGMII:
   2628 			if (wm_sgmii_uses_mdio(sc)) {
   2629 				aprint_verbose_dev(sc->sc_dev,
   2630 				    "SGMII(MDIO)\n");
   2631 				sc->sc_flags |= WM_F_SGMII;
   2632 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2633 				break;
   2634 			}
   2635 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2636 			/*FALLTHROUGH*/
   2637 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2638 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2639 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2640 				if (link_mode
   2641 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2642 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2643 					sc->sc_flags |= WM_F_SGMII;
   2644 				} else {
   2645 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2646 					aprint_verbose_dev(sc->sc_dev,
   2647 					    "SERDES\n");
   2648 				}
   2649 				break;
   2650 			}
   2651 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2652 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2653 
   2654 			/* Change current link mode setting */
   2655 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2656 			switch (sc->sc_mediatype) {
   2657 			case WM_MEDIATYPE_COPPER:
   2658 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2659 				break;
   2660 			case WM_MEDIATYPE_SERDES:
   2661 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2662 				break;
   2663 			default:
   2664 				break;
   2665 			}
   2666 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2667 			break;
   2668 		case CTRL_EXT_LINK_MODE_GMII:
   2669 		default:
   2670 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2671 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2672 			break;
   2673 		}
   2674 
   2675 		reg &= ~CTRL_EXT_I2C_ENA;
   2676 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2677 			reg |= CTRL_EXT_I2C_ENA;
   2678 		else
   2679 			reg &= ~CTRL_EXT_I2C_ENA;
   2680 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2681 	} else if (sc->sc_type < WM_T_82543 ||
   2682 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2683 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2684 			aprint_error_dev(sc->sc_dev,
   2685 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2686 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2687 		}
   2688 	} else {
   2689 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2690 			aprint_error_dev(sc->sc_dev,
   2691 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2692 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2693 		}
   2694 	}
   2695 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2696 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2697 
   2698 	/* Set device properties (macflags) */
   2699 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2700 
   2701 	/* Initialize the media structures accordingly. */
   2702 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2703 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2704 	else
   2705 		wm_tbi_mediainit(sc); /* All others */
   2706 
   2707 	ifp = &sc->sc_ethercom.ec_if;
   2708 	xname = device_xname(sc->sc_dev);
   2709 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2710 	ifp->if_softc = sc;
   2711 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2712 #ifdef WM_MPSAFE
   2713 	ifp->if_extflags = IFEF_MPSAFE;
   2714 #endif
   2715 	ifp->if_ioctl = wm_ioctl;
   2716 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2717 		ifp->if_start = wm_nq_start;
   2718 		/*
   2719 		 * When the number of CPUs is one and the controller can use
   2720 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2721 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2722 		 * and the other is used for link status changing.
   2723 		 * In this situation, wm_nq_transmit() is disadvantageous
   2724 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2725 		 */
   2726 		if (wm_is_using_multiqueue(sc))
   2727 			ifp->if_transmit = wm_nq_transmit;
   2728 	} else {
   2729 		ifp->if_start = wm_start;
   2730 		/*
   2731 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2732 		 */
   2733 		if (wm_is_using_multiqueue(sc))
   2734 			ifp->if_transmit = wm_transmit;
   2735 	}
   2736 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   2737 	ifp->if_init = wm_init;
   2738 	ifp->if_stop = wm_stop;
   2739 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   2740 	IFQ_SET_READY(&ifp->if_snd);
   2741 
   2742 	/* Check for jumbo frame */
   2743 	switch (sc->sc_type) {
   2744 	case WM_T_82573:
   2745 		/* XXX limited to 9234 if ASPM is disabled */
   2746 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2747 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2748 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2749 		break;
   2750 	case WM_T_82571:
   2751 	case WM_T_82572:
   2752 	case WM_T_82574:
   2753 	case WM_T_82583:
   2754 	case WM_T_82575:
   2755 	case WM_T_82576:
   2756 	case WM_T_82580:
   2757 	case WM_T_I350:
   2758 	case WM_T_I354:
   2759 	case WM_T_I210:
   2760 	case WM_T_I211:
   2761 	case WM_T_80003:
   2762 	case WM_T_ICH9:
   2763 	case WM_T_ICH10:
   2764 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2765 	case WM_T_PCH_LPT:
   2766 	case WM_T_PCH_SPT:
   2767 	case WM_T_PCH_CNP:
   2768 		/* XXX limited to 9234 */
   2769 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2770 		break;
   2771 	case WM_T_PCH:
   2772 		/* XXX limited to 4096 */
   2773 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2774 		break;
   2775 	case WM_T_82542_2_0:
   2776 	case WM_T_82542_2_1:
   2777 	case WM_T_ICH8:
   2778 		/* No support for jumbo frame */
   2779 		break;
   2780 	default:
   2781 		/* ETHER_MAX_LEN_JUMBO */
   2782 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2783 		break;
   2784 	}
   2785 
   2786 	/* If we're a i82543 or greater, we can support VLANs. */
   2787 	if (sc->sc_type >= WM_T_82543)
   2788 		sc->sc_ethercom.ec_capabilities |=
   2789 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2790 
   2791 	/*
   2792 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2793 	 * on i82543 and later.
   2794 	 */
   2795 	if (sc->sc_type >= WM_T_82543) {
   2796 		ifp->if_capabilities |=
   2797 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2798 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2799 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2800 		    IFCAP_CSUM_TCPv6_Tx |
   2801 		    IFCAP_CSUM_UDPv6_Tx;
   2802 	}
   2803 
   2804 	/*
   2805 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2806 	 *
   2807 	 *	82541GI (8086:1076) ... no
   2808 	 *	82572EI (8086:10b9) ... yes
   2809 	 */
   2810 	if (sc->sc_type >= WM_T_82571) {
   2811 		ifp->if_capabilities |=
   2812 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2813 	}
   2814 
   2815 	/*
   2816 	 * If we're a i82544 or greater (except i82547), we can do
   2817 	 * TCP segmentation offload.
   2818 	 */
   2819 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2820 		ifp->if_capabilities |= IFCAP_TSOv4;
   2821 	}
   2822 
   2823 	if (sc->sc_type >= WM_T_82571) {
   2824 		ifp->if_capabilities |= IFCAP_TSOv6;
   2825 	}
   2826 
   2827 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   2828 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   2829 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2830 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2831 
   2832 #ifdef WM_MPSAFE
   2833 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2834 #else
   2835 	sc->sc_core_lock = NULL;
   2836 #endif
   2837 
   2838 	/* Attach the interface. */
   2839 	error = if_initialize(ifp);
   2840 	if (error != 0) {
   2841 		aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
   2842 		    error);
   2843 		return; /* Error */
   2844 	}
   2845 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2846 	ether_ifattach(ifp, enaddr);
   2847 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2848 	if_register(ifp);
   2849 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2850 	    RND_FLAG_DEFAULT);
   2851 
   2852 #ifdef WM_EVENT_COUNTERS
   2853 	/* Attach event counters. */
   2854 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2855 	    NULL, xname, "linkintr");
   2856 
   2857 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2858 	    NULL, xname, "tx_xoff");
   2859 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2860 	    NULL, xname, "tx_xon");
   2861 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2862 	    NULL, xname, "rx_xoff");
   2863 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2864 	    NULL, xname, "rx_xon");
   2865 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2866 	    NULL, xname, "rx_macctl");
   2867 #endif /* WM_EVENT_COUNTERS */
   2868 
   2869 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2870 		pmf_class_network_register(self, ifp);
   2871 	else
   2872 		aprint_error_dev(self, "couldn't establish power handler\n");
   2873 
   2874 	sc->sc_flags |= WM_F_ATTACHED;
   2875  out:
   2876 	return;
   2877 }
   2878 
   2879 /* The detach function (ca_detach) */
   2880 static int
   2881 wm_detach(device_t self, int flags __unused)
   2882 {
   2883 	struct wm_softc *sc = device_private(self);
   2884 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2885 	int i;
   2886 
   2887 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2888 		return 0;
   2889 
   2890 	/* Stop the interface. Callouts are stopped in it. */
   2891 	wm_stop(ifp, 1);
   2892 
   2893 	pmf_device_deregister(self);
   2894 
   2895 #ifdef WM_EVENT_COUNTERS
   2896 	evcnt_detach(&sc->sc_ev_linkintr);
   2897 
   2898 	evcnt_detach(&sc->sc_ev_tx_xoff);
   2899 	evcnt_detach(&sc->sc_ev_tx_xon);
   2900 	evcnt_detach(&sc->sc_ev_rx_xoff);
   2901 	evcnt_detach(&sc->sc_ev_rx_xon);
   2902 	evcnt_detach(&sc->sc_ev_rx_macctl);
   2903 #endif /* WM_EVENT_COUNTERS */
   2904 
   2905 	/* Tell the firmware about the release */
   2906 	WM_CORE_LOCK(sc);
   2907 	wm_release_manageability(sc);
   2908 	wm_release_hw_control(sc);
   2909 	wm_enable_wakeup(sc);
   2910 	WM_CORE_UNLOCK(sc);
   2911 
   2912 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2913 
   2914 	/* Delete all remaining media. */
   2915 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2916 
   2917 	ether_ifdetach(ifp);
   2918 	if_detach(ifp);
   2919 	if_percpuq_destroy(sc->sc_ipq);
   2920 
   2921 	/* Unload RX dmamaps and free mbufs */
   2922 	for (i = 0; i < sc->sc_nqueues; i++) {
   2923 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2924 		mutex_enter(rxq->rxq_lock);
   2925 		wm_rxdrain(rxq);
   2926 		mutex_exit(rxq->rxq_lock);
   2927 	}
   2928 	/* Must unlock here */
   2929 
   2930 	/* Disestablish the interrupt handler */
   2931 	for (i = 0; i < sc->sc_nintrs; i++) {
   2932 		if (sc->sc_ihs[i] != NULL) {
   2933 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2934 			sc->sc_ihs[i] = NULL;
   2935 		}
   2936 	}
   2937 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2938 
   2939 	wm_free_txrx_queues(sc);
   2940 
   2941 	/* Unmap the registers */
   2942 	if (sc->sc_ss) {
   2943 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2944 		sc->sc_ss = 0;
   2945 	}
   2946 	if (sc->sc_ios) {
   2947 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2948 		sc->sc_ios = 0;
   2949 	}
   2950 	if (sc->sc_flashs) {
   2951 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2952 		sc->sc_flashs = 0;
   2953 	}
   2954 
   2955 	if (sc->sc_core_lock)
   2956 		mutex_obj_free(sc->sc_core_lock);
   2957 	if (sc->sc_ich_phymtx)
   2958 		mutex_obj_free(sc->sc_ich_phymtx);
   2959 	if (sc->sc_ich_nvmmtx)
   2960 		mutex_obj_free(sc->sc_ich_nvmmtx);
   2961 
   2962 	return 0;
   2963 }
   2964 
   2965 static bool
   2966 wm_suspend(device_t self, const pmf_qual_t *qual)
   2967 {
   2968 	struct wm_softc *sc = device_private(self);
   2969 
   2970 	wm_release_manageability(sc);
   2971 	wm_release_hw_control(sc);
   2972 	wm_enable_wakeup(sc);
   2973 
   2974 	return true;
   2975 }
   2976 
   2977 static bool
   2978 wm_resume(device_t self, const pmf_qual_t *qual)
   2979 {
   2980 	struct wm_softc *sc = device_private(self);
   2981 
   2982 	/* Disable ASPM L0s and/or L1 for workaround */
   2983 	wm_disable_aspm(sc);
   2984 	wm_init_manageability(sc);
   2985 
   2986 	return true;
   2987 }
   2988 
   2989 /*
   2990  * wm_watchdog:		[ifnet interface function]
   2991  *
   2992  *	Watchdog timer handler.
   2993  */
   2994 static void
   2995 wm_watchdog(struct ifnet *ifp)
   2996 {
   2997 	int qid;
   2998 	struct wm_softc *sc = ifp->if_softc;
   2999 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3000 
   3001 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3002 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3003 
   3004 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3005 	}
   3006 
   3007 	/*
   3008 	 * IF any of queues hanged up, reset the interface.
   3009 	 */
   3010 	if (hang_queue != 0) {
   3011 		(void) wm_init(ifp);
   3012 
   3013 		/*
   3014 		 * There are still some upper layer processing which call
   3015 		 * ifp->if_start(). e.g. ALTQ or one CPU system
   3016 		 */
   3017 		/* Try to get more packets going. */
   3018 		ifp->if_start(ifp);
   3019 	}
   3020 }
   3021 
   3022 
   3023 static void
   3024 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3025 {
   3026 
   3027 	mutex_enter(txq->txq_lock);
   3028 	if (txq->txq_sending &&
   3029 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout) {
   3030 		wm_watchdog_txq_locked(ifp, txq, hang);
   3031 	}
   3032 	mutex_exit(txq->txq_lock);
   3033 }
   3034 
   3035 static void
   3036 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3037     uint16_t *hang)
   3038 {
   3039 	struct wm_softc *sc = ifp->if_softc;
   3040 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3041 
   3042 	KASSERT(mutex_owned(txq->txq_lock));
   3043 
   3044 	/*
   3045 	 * Since we're using delayed interrupts, sweep up
   3046 	 * before we report an error.
   3047 	 */
   3048 	wm_txeof(txq, UINT_MAX);
   3049 
   3050 	if (txq->txq_sending)
   3051 		*hang |= __BIT(wmq->wmq_id);
   3052 
   3053 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3054 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3055 		    device_xname(sc->sc_dev));
   3056 	} else {
   3057 #ifdef WM_DEBUG
   3058 		int i, j;
   3059 		struct wm_txsoft *txs;
   3060 #endif
   3061 		log(LOG_ERR,
   3062 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3063 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3064 		    txq->txq_next);
   3065 		ifp->if_oerrors++;
   3066 #ifdef WM_DEBUG
   3067 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3068 		    i = WM_NEXTTXS(txq, i)) {
   3069 		    txs = &txq->txq_soft[i];
   3070 		    printf("txs %d tx %d -> %d\n",
   3071 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   3072 		    for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3073 			    if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3074 				    printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3075 					txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3076 				    printf("\t %#08x%08x\n",
   3077 					txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3078 					txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3079 			    } else {
   3080 				    printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3081 					(uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3082 					txq->txq_descs[j].wtx_addr.wa_low);
   3083 				    printf("\t %#04x%02x%02x%08x\n",
   3084 					txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3085 					txq->txq_descs[j].wtx_fields.wtxu_options,
   3086 					txq->txq_descs[j].wtx_fields.wtxu_status,
   3087 					txq->txq_descs[j].wtx_cmdlen);
   3088 			    }
   3089 			if (j == txs->txs_lastdesc)
   3090 				break;
   3091 			}
   3092 		}
   3093 #endif
   3094 	}
   3095 }
   3096 
   3097 /*
   3098  * wm_tick:
   3099  *
   3100  *	One second timer, used to check link status, sweep up
   3101  *	completed transmit jobs, etc.
   3102  */
   3103 static void
   3104 wm_tick(void *arg)
   3105 {
   3106 	struct wm_softc *sc = arg;
   3107 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3108 #ifndef WM_MPSAFE
   3109 	int s = splnet();
   3110 #endif
   3111 
   3112 	WM_CORE_LOCK(sc);
   3113 
   3114 	if (sc->sc_core_stopping) {
   3115 		WM_CORE_UNLOCK(sc);
   3116 #ifndef WM_MPSAFE
   3117 		splx(s);
   3118 #endif
   3119 		return;
   3120 	}
   3121 
   3122 	if (sc->sc_type >= WM_T_82542_2_1) {
   3123 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3124 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3125 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3126 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3127 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3128 	}
   3129 
   3130 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   3131 	ifp->if_ierrors += 0ULL /* ensure quad_t */
   3132 	    + CSR_READ(sc, WMREG_CRCERRS)
   3133 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3134 	    + CSR_READ(sc, WMREG_SYMERRC)
   3135 	    + CSR_READ(sc, WMREG_RXERRC)
   3136 	    + CSR_READ(sc, WMREG_SEC)
   3137 	    + CSR_READ(sc, WMREG_CEXTERR)
   3138 	    + CSR_READ(sc, WMREG_RLEC);
   3139 	/*
   3140 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3141 	 * memory. It does not mean the number of dropped packet. Because
   3142 	 * ethernet controller can receive packets in such case if there is
   3143 	 * space in phy's FIFO.
   3144 	 *
   3145 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3146 	 * own EVCNT instead of if_iqdrops.
   3147 	 */
   3148 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   3149 
   3150 	if (sc->sc_flags & WM_F_HAS_MII)
   3151 		mii_tick(&sc->sc_mii);
   3152 	else if ((sc->sc_type >= WM_T_82575)
   3153 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3154 		wm_serdes_tick(sc);
   3155 	else
   3156 		wm_tbi_tick(sc);
   3157 
   3158 	WM_CORE_UNLOCK(sc);
   3159 
   3160 	wm_watchdog(ifp);
   3161 
   3162 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   3163 }
   3164 
   3165 static int
   3166 wm_ifflags_cb(struct ethercom *ec)
   3167 {
   3168 	struct ifnet *ifp = &ec->ec_if;
   3169 	struct wm_softc *sc = ifp->if_softc;
   3170 	int rc = 0;
   3171 
   3172 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3173 		device_xname(sc->sc_dev), __func__));
   3174 
   3175 	WM_CORE_LOCK(sc);
   3176 
   3177 	int change = ifp->if_flags ^ sc->sc_if_flags;
   3178 	sc->sc_if_flags = ifp->if_flags;
   3179 
   3180 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3181 		rc = ENETRESET;
   3182 		goto out;
   3183 	}
   3184 
   3185 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   3186 		wm_set_filter(sc);
   3187 
   3188 	wm_set_vlan(sc);
   3189 
   3190 out:
   3191 	WM_CORE_UNLOCK(sc);
   3192 
   3193 	return rc;
   3194 }
   3195 
   3196 /*
   3197  * wm_ioctl:		[ifnet interface function]
   3198  *
   3199  *	Handle control requests from the operator.
   3200  */
   3201 static int
   3202 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3203 {
   3204 	struct wm_softc *sc = ifp->if_softc;
   3205 	struct ifreq *ifr = (struct ifreq *) data;
   3206 	struct ifaddr *ifa = (struct ifaddr *)data;
   3207 	struct sockaddr_dl *sdl;
   3208 	int s, error;
   3209 
   3210 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3211 		device_xname(sc->sc_dev), __func__));
   3212 
   3213 #ifndef WM_MPSAFE
   3214 	s = splnet();
   3215 #endif
   3216 	switch (cmd) {
   3217 	case SIOCSIFMEDIA:
   3218 	case SIOCGIFMEDIA:
   3219 		WM_CORE_LOCK(sc);
   3220 		/* Flow control requires full-duplex mode. */
   3221 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3222 		    (ifr->ifr_media & IFM_FDX) == 0)
   3223 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3224 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3225 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3226 				/* We can do both TXPAUSE and RXPAUSE. */
   3227 				ifr->ifr_media |=
   3228 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3229 			}
   3230 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3231 		}
   3232 		WM_CORE_UNLOCK(sc);
   3233 #ifdef WM_MPSAFE
   3234 		s = splnet();
   3235 #endif
   3236 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3237 #ifdef WM_MPSAFE
   3238 		splx(s);
   3239 #endif
   3240 		break;
   3241 	case SIOCINITIFADDR:
   3242 		WM_CORE_LOCK(sc);
   3243 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3244 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3245 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3246 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3247 			/* unicast address is first multicast entry */
   3248 			wm_set_filter(sc);
   3249 			error = 0;
   3250 			WM_CORE_UNLOCK(sc);
   3251 			break;
   3252 		}
   3253 		WM_CORE_UNLOCK(sc);
   3254 		/*FALLTHROUGH*/
   3255 	default:
   3256 #ifdef WM_MPSAFE
   3257 		s = splnet();
   3258 #endif
   3259 		/* It may call wm_start, so unlock here */
   3260 		error = ether_ioctl(ifp, cmd, data);
   3261 #ifdef WM_MPSAFE
   3262 		splx(s);
   3263 #endif
   3264 		if (error != ENETRESET)
   3265 			break;
   3266 
   3267 		error = 0;
   3268 
   3269 		if (cmd == SIOCSIFCAP)
   3270 			error = (*ifp->if_init)(ifp);
   3271 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3272 			;
   3273 		else if (ifp->if_flags & IFF_RUNNING) {
   3274 			/*
   3275 			 * Multicast list has changed; set the hardware filter
   3276 			 * accordingly.
   3277 			 */
   3278 			WM_CORE_LOCK(sc);
   3279 			wm_set_filter(sc);
   3280 			WM_CORE_UNLOCK(sc);
   3281 		}
   3282 		break;
   3283 	}
   3284 
   3285 #ifndef WM_MPSAFE
   3286 	splx(s);
   3287 #endif
   3288 	return error;
   3289 }
   3290 
   3291 /* MAC address related */
   3292 
   3293 /*
   3294  * Get the offset of MAC address and return it.
   3295  * If error occured, use offset 0.
   3296  */
   3297 static uint16_t
   3298 wm_check_alt_mac_addr(struct wm_softc *sc)
   3299 {
   3300 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3301 	uint16_t offset = NVM_OFF_MACADDR;
   3302 
   3303 	/* Try to read alternative MAC address pointer */
   3304 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3305 		return 0;
   3306 
   3307 	/* Check pointer if it's valid or not. */
   3308 	if ((offset == 0x0000) || (offset == 0xffff))
   3309 		return 0;
   3310 
   3311 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3312 	/*
   3313 	 * Check whether alternative MAC address is valid or not.
   3314 	 * Some cards have non 0xffff pointer but those don't use
   3315 	 * alternative MAC address in reality.
   3316 	 *
   3317 	 * Check whether the broadcast bit is set or not.
   3318 	 */
   3319 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3320 		if (((myea[0] & 0xff) & 0x01) == 0)
   3321 			return offset; /* Found */
   3322 
   3323 	/* Not found */
   3324 	return 0;
   3325 }
   3326 
   3327 static int
   3328 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3329 {
   3330 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3331 	uint16_t offset = NVM_OFF_MACADDR;
   3332 	int do_invert = 0;
   3333 
   3334 	switch (sc->sc_type) {
   3335 	case WM_T_82580:
   3336 	case WM_T_I350:
   3337 	case WM_T_I354:
   3338 		/* EEPROM Top Level Partitioning */
   3339 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3340 		break;
   3341 	case WM_T_82571:
   3342 	case WM_T_82575:
   3343 	case WM_T_82576:
   3344 	case WM_T_80003:
   3345 	case WM_T_I210:
   3346 	case WM_T_I211:
   3347 		offset = wm_check_alt_mac_addr(sc);
   3348 		if (offset == 0)
   3349 			if ((sc->sc_funcid & 0x01) == 1)
   3350 				do_invert = 1;
   3351 		break;
   3352 	default:
   3353 		if ((sc->sc_funcid & 0x01) == 1)
   3354 			do_invert = 1;
   3355 		break;
   3356 	}
   3357 
   3358 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3359 		goto bad;
   3360 
   3361 	enaddr[0] = myea[0] & 0xff;
   3362 	enaddr[1] = myea[0] >> 8;
   3363 	enaddr[2] = myea[1] & 0xff;
   3364 	enaddr[3] = myea[1] >> 8;
   3365 	enaddr[4] = myea[2] & 0xff;
   3366 	enaddr[5] = myea[2] >> 8;
   3367 
   3368 	/*
   3369 	 * Toggle the LSB of the MAC address on the second port
   3370 	 * of some dual port cards.
   3371 	 */
   3372 	if (do_invert != 0)
   3373 		enaddr[5] ^= 1;
   3374 
   3375 	return 0;
   3376 
   3377  bad:
   3378 	return -1;
   3379 }
   3380 
   3381 /*
   3382  * wm_set_ral:
   3383  *
   3384  *	Set an entery in the receive address list.
   3385  */
   3386 static void
   3387 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3388 {
   3389 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3390 	uint32_t wlock_mac;
   3391 	int rv;
   3392 
   3393 	if (enaddr != NULL) {
   3394 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3395 		    (enaddr[3] << 24);
   3396 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3397 		ral_hi |= RAL_AV;
   3398 	} else {
   3399 		ral_lo = 0;
   3400 		ral_hi = 0;
   3401 	}
   3402 
   3403 	switch (sc->sc_type) {
   3404 	case WM_T_82542_2_0:
   3405 	case WM_T_82542_2_1:
   3406 	case WM_T_82543:
   3407 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3408 		CSR_WRITE_FLUSH(sc);
   3409 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3410 		CSR_WRITE_FLUSH(sc);
   3411 		break;
   3412 	case WM_T_PCH2:
   3413 	case WM_T_PCH_LPT:
   3414 	case WM_T_PCH_SPT:
   3415 	case WM_T_PCH_CNP:
   3416 		if (idx == 0) {
   3417 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3418 			CSR_WRITE_FLUSH(sc);
   3419 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3420 			CSR_WRITE_FLUSH(sc);
   3421 			return;
   3422 		}
   3423 		if (sc->sc_type != WM_T_PCH2) {
   3424 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3425 			    FWSM_WLOCK_MAC);
   3426 			addrl = WMREG_SHRAL(idx - 1);
   3427 			addrh = WMREG_SHRAH(idx - 1);
   3428 		} else {
   3429 			wlock_mac = 0;
   3430 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3431 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3432 		}
   3433 
   3434 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3435 			rv = wm_get_swflag_ich8lan(sc);
   3436 			if (rv != 0)
   3437 				return;
   3438 			CSR_WRITE(sc, addrl, ral_lo);
   3439 			CSR_WRITE_FLUSH(sc);
   3440 			CSR_WRITE(sc, addrh, ral_hi);
   3441 			CSR_WRITE_FLUSH(sc);
   3442 			wm_put_swflag_ich8lan(sc);
   3443 		}
   3444 
   3445 		break;
   3446 	default:
   3447 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3448 		CSR_WRITE_FLUSH(sc);
   3449 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3450 		CSR_WRITE_FLUSH(sc);
   3451 		break;
   3452 	}
   3453 }
   3454 
   3455 /*
   3456  * wm_mchash:
   3457  *
   3458  *	Compute the hash of the multicast address for the 4096-bit
   3459  *	multicast filter.
   3460  */
   3461 static uint32_t
   3462 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3463 {
   3464 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3465 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3466 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3467 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3468 	uint32_t hash;
   3469 
   3470 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3471 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3472 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3473 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   3474 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3475 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3476 		return (hash & 0x3ff);
   3477 	}
   3478 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3479 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3480 
   3481 	return (hash & 0xfff);
   3482 }
   3483 
   3484 /*
   3485  * wm_set_filter:
   3486  *
   3487  *	Set up the receive filter.
   3488  */
   3489 static void
   3490 wm_set_filter(struct wm_softc *sc)
   3491 {
   3492 	struct ethercom *ec = &sc->sc_ethercom;
   3493 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3494 	struct ether_multi *enm;
   3495 	struct ether_multistep step;
   3496 	bus_addr_t mta_reg;
   3497 	uint32_t hash, reg, bit;
   3498 	int i, size, ralmax;
   3499 
   3500 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3501 		device_xname(sc->sc_dev), __func__));
   3502 
   3503 	if (sc->sc_type >= WM_T_82544)
   3504 		mta_reg = WMREG_CORDOVA_MTA;
   3505 	else
   3506 		mta_reg = WMREG_MTA;
   3507 
   3508 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3509 
   3510 	if (ifp->if_flags & IFF_BROADCAST)
   3511 		sc->sc_rctl |= RCTL_BAM;
   3512 	if (ifp->if_flags & IFF_PROMISC) {
   3513 		sc->sc_rctl |= RCTL_UPE;
   3514 		goto allmulti;
   3515 	}
   3516 
   3517 	/*
   3518 	 * Set the station address in the first RAL slot, and
   3519 	 * clear the remaining slots.
   3520 	 */
   3521 	if (sc->sc_type == WM_T_ICH8)
   3522 		size = WM_RAL_TABSIZE_ICH8 -1;
   3523 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3524 	    || (sc->sc_type == WM_T_PCH))
   3525 		size = WM_RAL_TABSIZE_ICH8;
   3526 	else if (sc->sc_type == WM_T_PCH2)
   3527 		size = WM_RAL_TABSIZE_PCH2;
   3528 	else if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3529 	    || (sc->sc_type == WM_T_PCH_CNP))
   3530 		size = WM_RAL_TABSIZE_PCH_LPT;
   3531 	else if (sc->sc_type == WM_T_82575)
   3532 		size = WM_RAL_TABSIZE_82575;
   3533 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3534 		size = WM_RAL_TABSIZE_82576;
   3535 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3536 		size = WM_RAL_TABSIZE_I350;
   3537 	else
   3538 		size = WM_RAL_TABSIZE;
   3539 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3540 
   3541 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3542 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   3543 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3544 		switch (i) {
   3545 		case 0:
   3546 			/* We can use all entries */
   3547 			ralmax = size;
   3548 			break;
   3549 		case 1:
   3550 			/* Only RAR[0] */
   3551 			ralmax = 1;
   3552 			break;
   3553 		default:
   3554 			/* available SHRA + RAR[0] */
   3555 			ralmax = i + 1;
   3556 		}
   3557 	} else
   3558 		ralmax = size;
   3559 	for (i = 1; i < size; i++) {
   3560 		if (i < ralmax)
   3561 			wm_set_ral(sc, NULL, i);
   3562 	}
   3563 
   3564 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3565 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3566 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3567 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   3568 		size = WM_ICH8_MC_TABSIZE;
   3569 	else
   3570 		size = WM_MC_TABSIZE;
   3571 	/* Clear out the multicast table. */
   3572 	for (i = 0; i < size; i++) {
   3573 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3574 		CSR_WRITE_FLUSH(sc);
   3575 	}
   3576 
   3577 	ETHER_LOCK(ec);
   3578 	ETHER_FIRST_MULTI(step, ec, enm);
   3579 	while (enm != NULL) {
   3580 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3581 			ETHER_UNLOCK(ec);
   3582 			/*
   3583 			 * We must listen to a range of multicast addresses.
   3584 			 * For now, just accept all multicasts, rather than
   3585 			 * trying to set only those filter bits needed to match
   3586 			 * the range.  (At this time, the only use of address
   3587 			 * ranges is for IP multicast routing, for which the
   3588 			 * range is big enough to require all bits set.)
   3589 			 */
   3590 			goto allmulti;
   3591 		}
   3592 
   3593 		hash = wm_mchash(sc, enm->enm_addrlo);
   3594 
   3595 		reg = (hash >> 5);
   3596 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3597 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3598 		    || (sc->sc_type == WM_T_PCH2)
   3599 		    || (sc->sc_type == WM_T_PCH_LPT)
   3600 		    || (sc->sc_type == WM_T_PCH_SPT)
   3601 		    || (sc->sc_type == WM_T_PCH_CNP))
   3602 			reg &= 0x1f;
   3603 		else
   3604 			reg &= 0x7f;
   3605 		bit = hash & 0x1f;
   3606 
   3607 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3608 		hash |= 1U << bit;
   3609 
   3610 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3611 			/*
   3612 			 * 82544 Errata 9: Certain register cannot be written
   3613 			 * with particular alignments in PCI-X bus operation
   3614 			 * (FCAH, MTA and VFTA).
   3615 			 */
   3616 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3617 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3618 			CSR_WRITE_FLUSH(sc);
   3619 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3620 			CSR_WRITE_FLUSH(sc);
   3621 		} else {
   3622 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3623 			CSR_WRITE_FLUSH(sc);
   3624 		}
   3625 
   3626 		ETHER_NEXT_MULTI(step, enm);
   3627 	}
   3628 	ETHER_UNLOCK(ec);
   3629 
   3630 	ifp->if_flags &= ~IFF_ALLMULTI;
   3631 	goto setit;
   3632 
   3633  allmulti:
   3634 	ifp->if_flags |= IFF_ALLMULTI;
   3635 	sc->sc_rctl |= RCTL_MPE;
   3636 
   3637  setit:
   3638 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3639 }
   3640 
   3641 /* Reset and init related */
   3642 
   3643 static void
   3644 wm_set_vlan(struct wm_softc *sc)
   3645 {
   3646 
   3647 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3648 		device_xname(sc->sc_dev), __func__));
   3649 
   3650 	/* Deal with VLAN enables. */
   3651 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3652 		sc->sc_ctrl |= CTRL_VME;
   3653 	else
   3654 		sc->sc_ctrl &= ~CTRL_VME;
   3655 
   3656 	/* Write the control registers. */
   3657 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3658 }
   3659 
   3660 static void
   3661 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3662 {
   3663 	uint32_t gcr;
   3664 	pcireg_t ctrl2;
   3665 
   3666 	gcr = CSR_READ(sc, WMREG_GCR);
   3667 
   3668 	/* Only take action if timeout value is defaulted to 0 */
   3669 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3670 		goto out;
   3671 
   3672 	if ((gcr & GCR_CAP_VER2) == 0) {
   3673 		gcr |= GCR_CMPL_TMOUT_10MS;
   3674 		goto out;
   3675 	}
   3676 
   3677 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3678 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3679 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3680 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3681 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3682 
   3683 out:
   3684 	/* Disable completion timeout resend */
   3685 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3686 
   3687 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3688 }
   3689 
   3690 void
   3691 wm_get_auto_rd_done(struct wm_softc *sc)
   3692 {
   3693 	int i;
   3694 
   3695 	/* wait for eeprom to reload */
   3696 	switch (sc->sc_type) {
   3697 	case WM_T_82571:
   3698 	case WM_T_82572:
   3699 	case WM_T_82573:
   3700 	case WM_T_82574:
   3701 	case WM_T_82583:
   3702 	case WM_T_82575:
   3703 	case WM_T_82576:
   3704 	case WM_T_82580:
   3705 	case WM_T_I350:
   3706 	case WM_T_I354:
   3707 	case WM_T_I210:
   3708 	case WM_T_I211:
   3709 	case WM_T_80003:
   3710 	case WM_T_ICH8:
   3711 	case WM_T_ICH9:
   3712 		for (i = 0; i < 10; i++) {
   3713 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3714 				break;
   3715 			delay(1000);
   3716 		}
   3717 		if (i == 10) {
   3718 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3719 			    "complete\n", device_xname(sc->sc_dev));
   3720 		}
   3721 		break;
   3722 	default:
   3723 		break;
   3724 	}
   3725 }
   3726 
   3727 void
   3728 wm_lan_init_done(struct wm_softc *sc)
   3729 {
   3730 	uint32_t reg = 0;
   3731 	int i;
   3732 
   3733 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3734 		device_xname(sc->sc_dev), __func__));
   3735 
   3736 	/* Wait for eeprom to reload */
   3737 	switch (sc->sc_type) {
   3738 	case WM_T_ICH10:
   3739 	case WM_T_PCH:
   3740 	case WM_T_PCH2:
   3741 	case WM_T_PCH_LPT:
   3742 	case WM_T_PCH_SPT:
   3743 	case WM_T_PCH_CNP:
   3744 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3745 			reg = CSR_READ(sc, WMREG_STATUS);
   3746 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3747 				break;
   3748 			delay(100);
   3749 		}
   3750 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3751 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3752 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3753 		}
   3754 		break;
   3755 	default:
   3756 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3757 		    __func__);
   3758 		break;
   3759 	}
   3760 
   3761 	reg &= ~STATUS_LAN_INIT_DONE;
   3762 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3763 }
   3764 
   3765 void
   3766 wm_get_cfg_done(struct wm_softc *sc)
   3767 {
   3768 	int mask;
   3769 	uint32_t reg;
   3770 	int i;
   3771 
   3772 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3773 		device_xname(sc->sc_dev), __func__));
   3774 
   3775 	/* Wait for eeprom to reload */
   3776 	switch (sc->sc_type) {
   3777 	case WM_T_82542_2_0:
   3778 	case WM_T_82542_2_1:
   3779 		/* null */
   3780 		break;
   3781 	case WM_T_82543:
   3782 	case WM_T_82544:
   3783 	case WM_T_82540:
   3784 	case WM_T_82545:
   3785 	case WM_T_82545_3:
   3786 	case WM_T_82546:
   3787 	case WM_T_82546_3:
   3788 	case WM_T_82541:
   3789 	case WM_T_82541_2:
   3790 	case WM_T_82547:
   3791 	case WM_T_82547_2:
   3792 	case WM_T_82573:
   3793 	case WM_T_82574:
   3794 	case WM_T_82583:
   3795 		/* generic */
   3796 		delay(10*1000);
   3797 		break;
   3798 	case WM_T_80003:
   3799 	case WM_T_82571:
   3800 	case WM_T_82572:
   3801 	case WM_T_82575:
   3802 	case WM_T_82576:
   3803 	case WM_T_82580:
   3804 	case WM_T_I350:
   3805 	case WM_T_I354:
   3806 	case WM_T_I210:
   3807 	case WM_T_I211:
   3808 		if (sc->sc_type == WM_T_82571) {
   3809 			/* Only 82571 shares port 0 */
   3810 			mask = EEMNGCTL_CFGDONE_0;
   3811 		} else
   3812 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3813 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3814 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3815 				break;
   3816 			delay(1000);
   3817 		}
   3818 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3819 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3820 				device_xname(sc->sc_dev), __func__));
   3821 		}
   3822 		break;
   3823 	case WM_T_ICH8:
   3824 	case WM_T_ICH9:
   3825 	case WM_T_ICH10:
   3826 	case WM_T_PCH:
   3827 	case WM_T_PCH2:
   3828 	case WM_T_PCH_LPT:
   3829 	case WM_T_PCH_SPT:
   3830 	case WM_T_PCH_CNP:
   3831 		delay(10*1000);
   3832 		if (sc->sc_type >= WM_T_ICH10)
   3833 			wm_lan_init_done(sc);
   3834 		else
   3835 			wm_get_auto_rd_done(sc);
   3836 
   3837 		/* Clear PHY Reset Asserted bit */
   3838 		reg = CSR_READ(sc, WMREG_STATUS);
   3839 		if ((reg & STATUS_PHYRA) != 0)
   3840 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3841 		break;
   3842 	default:
   3843 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3844 		    __func__);
   3845 		break;
   3846 	}
   3847 }
   3848 
   3849 void
   3850 wm_phy_post_reset(struct wm_softc *sc)
   3851 {
   3852 	uint32_t reg;
   3853 
   3854 	/* This function is only for ICH8 and newer. */
   3855 	if (sc->sc_type < WM_T_ICH8)
   3856 		return;
   3857 
   3858 	if (wm_phy_resetisblocked(sc)) {
   3859 		/* XXX */
   3860 		device_printf(sc->sc_dev, "PHY is blocked\n");
   3861 		return;
   3862 	}
   3863 
   3864 	/* Allow time for h/w to get to quiescent state after reset */
   3865 	delay(10*1000);
   3866 
   3867 	/* Perform any necessary post-reset workarounds */
   3868 	if (sc->sc_type == WM_T_PCH)
   3869 		wm_hv_phy_workaround_ich8lan(sc);
   3870 	else if (sc->sc_type == WM_T_PCH2)
   3871 		wm_lv_phy_workaround_ich8lan(sc);
   3872 
   3873 	/* Clear the host wakeup bit after lcd reset */
   3874 	if (sc->sc_type >= WM_T_PCH) {
   3875 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   3876 		    BM_PORT_GEN_CFG);
   3877 		reg &= ~BM_WUC_HOST_WU_BIT;
   3878 		wm_gmii_hv_writereg(sc->sc_dev, 2,
   3879 		    BM_PORT_GEN_CFG, reg);
   3880 	}
   3881 
   3882 	/* Configure the LCD with the extended configuration region in NVM */
   3883 	wm_init_lcd_from_nvm(sc);
   3884 
   3885 	/* Configure the LCD with the OEM bits in NVM */
   3886 	wm_oem_bits_config_ich8lan(sc, true);
   3887 
   3888 	if (sc->sc_type == WM_T_PCH2) {
   3889 		/* Ungate automatic PHY configuration on non-managed 82579 */
   3890 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   3891 			delay(10 * 1000);
   3892 			wm_gate_hw_phy_config_ich8lan(sc, false);
   3893 		}
   3894 		/* XXX Set EEE LPI Update Timer to 200usec */
   3895 	}
   3896 }
   3897 
   3898 /* Only for PCH and newer */
   3899 static int
   3900 wm_write_smbus_addr(struct wm_softc *sc)
   3901 {
   3902 	uint32_t strap, freq;
   3903 	uint16_t phy_data;
   3904 	int rv;
   3905 
   3906 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3907 		device_xname(sc->sc_dev), __func__));
   3908 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   3909 
   3910 	strap = CSR_READ(sc, WMREG_STRAP);
   3911 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   3912 
   3913 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   3914 	if (rv != 0)
   3915 		return -1;
   3916 
   3917 	phy_data &= ~HV_SMB_ADDR_ADDR;
   3918 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   3919 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   3920 
   3921 	if (sc->sc_phytype == WMPHY_I217) {
   3922 		/* Restore SMBus frequency */
   3923 		if (freq --) {
   3924 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   3925 			    | HV_SMB_ADDR_FREQ_HIGH);
   3926 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   3927 			    HV_SMB_ADDR_FREQ_LOW);
   3928 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   3929 			    HV_SMB_ADDR_FREQ_HIGH);
   3930 		} else {
   3931 			DPRINTF(WM_DEBUG_INIT,
   3932 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   3933 				device_xname(sc->sc_dev), __func__));
   3934 		}
   3935 	}
   3936 
   3937 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   3938 	    phy_data);
   3939 }
   3940 
   3941 void
   3942 wm_init_lcd_from_nvm(struct wm_softc *sc)
   3943 {
   3944 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   3945 	uint16_t phy_page = 0;
   3946 
   3947 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3948 		device_xname(sc->sc_dev), __func__));
   3949 
   3950 	switch (sc->sc_type) {
   3951 	case WM_T_ICH8:
   3952 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   3953 		    || (sc->sc_phytype != WMPHY_IGP_3))
   3954 			return;
   3955 
   3956 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   3957 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   3958 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   3959 			break;
   3960 		}
   3961 		/* FALLTHROUGH */
   3962 	case WM_T_PCH:
   3963 	case WM_T_PCH2:
   3964 	case WM_T_PCH_LPT:
   3965 	case WM_T_PCH_SPT:
   3966 	case WM_T_PCH_CNP:
   3967 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   3968 		break;
   3969 	default:
   3970 		return;
   3971 	}
   3972 
   3973 	sc->phy.acquire(sc);
   3974 
   3975 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   3976 	if ((reg & sw_cfg_mask) == 0)
   3977 		goto release;
   3978 
   3979 	/*
   3980 	 * Make sure HW does not configure LCD from PHY extended configuration
   3981 	 * before SW configuration
   3982 	 */
   3983 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   3984 	if ((sc->sc_type < WM_T_PCH2)
   3985 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   3986 		goto release;
   3987 
   3988 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   3989 		device_xname(sc->sc_dev), __func__));
   3990 	/* word_addr is in DWORD */
   3991 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   3992 
   3993 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   3994 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   3995 	if (cnf_size == 0)
   3996 		goto release;
   3997 
   3998 	if (((sc->sc_type == WM_T_PCH)
   3999 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   4000 	    || (sc->sc_type > WM_T_PCH)) {
   4001 		/*
   4002 		 * HW configures the SMBus address and LEDs when the OEM and
   4003 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4004 		 * are cleared, SW will configure them instead.
   4005 		 */
   4006 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   4007 			device_xname(sc->sc_dev), __func__));
   4008 		wm_write_smbus_addr(sc);
   4009 
   4010 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4011 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG, reg);
   4012 	}
   4013 
   4014 	/* Configure LCD from extended configuration region. */
   4015 	for (i = 0; i < cnf_size; i++) {
   4016 		uint16_t reg_data, reg_addr;
   4017 
   4018 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4019 			goto release;
   4020 
   4021 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4022 			goto release;
   4023 
   4024 		if (reg_addr == MII_IGPHY_PAGE_SELECT)
   4025 			phy_page = reg_data;
   4026 
   4027 		reg_addr &= IGPHY_MAXREGADDR;
   4028 		reg_addr |= phy_page;
   4029 
   4030 		KASSERT(sc->phy.writereg_locked != NULL);
   4031 		sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr, reg_data);
   4032 	}
   4033 
   4034 release:
   4035 	sc->phy.release(sc);
   4036 	return;
   4037 }
   4038 
   4039 /*
   4040  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   4041  *  @sc:       pointer to the HW structure
   4042  *  @d0_state: boolean if entering d0 or d3 device state
   4043  *
   4044  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   4045  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   4046  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   4047  */
   4048 int
   4049 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   4050 {
   4051 	uint32_t mac_reg;
   4052 	uint16_t oem_reg;
   4053 	int rv;
   4054 
   4055 	if (sc->sc_type < WM_T_PCH)
   4056 		return 0;
   4057 
   4058 	rv = sc->phy.acquire(sc);
   4059 	if (rv != 0)
   4060 		return rv;
   4061 
   4062 	if (sc->sc_type == WM_T_PCH) {
   4063 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   4064 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   4065 			goto release;
   4066 	}
   4067 
   4068 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   4069 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   4070 		goto release;
   4071 
   4072 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   4073 
   4074 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   4075 	if (rv != 0)
   4076 		goto release;
   4077 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   4078 
   4079 	if (d0_state) {
   4080 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   4081 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4082 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   4083 			oem_reg |= HV_OEM_BITS_LPLU;
   4084 	} else {
   4085 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   4086 		    != 0)
   4087 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4088 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   4089 		    != 0)
   4090 			oem_reg |= HV_OEM_BITS_LPLU;
   4091 	}
   4092 
   4093 	/* Set Restart auto-neg to activate the bits */
   4094 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   4095 	    && (wm_phy_resetisblocked(sc) == false))
   4096 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   4097 
   4098 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   4099 
   4100 release:
   4101 	sc->phy.release(sc);
   4102 
   4103 	return rv;
   4104 }
   4105 
   4106 /* Init hardware bits */
   4107 void
   4108 wm_initialize_hardware_bits(struct wm_softc *sc)
   4109 {
   4110 	uint32_t tarc0, tarc1, reg;
   4111 
   4112 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4113 		device_xname(sc->sc_dev), __func__));
   4114 
   4115 	/* For 82571 variant, 80003 and ICHs */
   4116 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4117 	    || (sc->sc_type >= WM_T_80003)) {
   4118 
   4119 		/* Transmit Descriptor Control 0 */
   4120 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4121 		reg |= TXDCTL_COUNT_DESC;
   4122 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4123 
   4124 		/* Transmit Descriptor Control 1 */
   4125 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4126 		reg |= TXDCTL_COUNT_DESC;
   4127 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4128 
   4129 		/* TARC0 */
   4130 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4131 		switch (sc->sc_type) {
   4132 		case WM_T_82571:
   4133 		case WM_T_82572:
   4134 		case WM_T_82573:
   4135 		case WM_T_82574:
   4136 		case WM_T_82583:
   4137 		case WM_T_80003:
   4138 			/* Clear bits 30..27 */
   4139 			tarc0 &= ~__BITS(30, 27);
   4140 			break;
   4141 		default:
   4142 			break;
   4143 		}
   4144 
   4145 		switch (sc->sc_type) {
   4146 		case WM_T_82571:
   4147 		case WM_T_82572:
   4148 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   4149 
   4150 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4151 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   4152 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   4153 			/* 8257[12] Errata No.7 */
   4154 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   4155 
   4156 			/* TARC1 bit 28 */
   4157 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4158 				tarc1 &= ~__BIT(28);
   4159 			else
   4160 				tarc1 |= __BIT(28);
   4161 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4162 
   4163 			/*
   4164 			 * 8257[12] Errata No.13
   4165 			 * Disable Dyamic Clock Gating.
   4166 			 */
   4167 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4168 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   4169 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4170 			break;
   4171 		case WM_T_82573:
   4172 		case WM_T_82574:
   4173 		case WM_T_82583:
   4174 			if ((sc->sc_type == WM_T_82574)
   4175 			    || (sc->sc_type == WM_T_82583))
   4176 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   4177 
   4178 			/* Extended Device Control */
   4179 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4180 			reg &= ~__BIT(23);	/* Clear bit 23 */
   4181 			reg |= __BIT(22);	/* Set bit 22 */
   4182 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4183 
   4184 			/* Device Control */
   4185 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   4186 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4187 
   4188 			/* PCIe Control Register */
   4189 			/*
   4190 			 * 82573 Errata (unknown).
   4191 			 *
   4192 			 * 82574 Errata 25 and 82583 Errata 12
   4193 			 * "Dropped Rx Packets":
   4194 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   4195 			 */
   4196 			reg = CSR_READ(sc, WMREG_GCR);
   4197 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   4198 			CSR_WRITE(sc, WMREG_GCR, reg);
   4199 
   4200 			if ((sc->sc_type == WM_T_82574)
   4201 			    || (sc->sc_type == WM_T_82583)) {
   4202 				/*
   4203 				 * Document says this bit must be set for
   4204 				 * proper operation.
   4205 				 */
   4206 				reg = CSR_READ(sc, WMREG_GCR);
   4207 				reg |= __BIT(22);
   4208 				CSR_WRITE(sc, WMREG_GCR, reg);
   4209 
   4210 				/*
   4211 				 * Apply workaround for hardware errata
   4212 				 * documented in errata docs Fixes issue where
   4213 				 * some error prone or unreliable PCIe
   4214 				 * completions are occurring, particularly
   4215 				 * with ASPM enabled. Without fix, issue can
   4216 				 * cause Tx timeouts.
   4217 				 */
   4218 				reg = CSR_READ(sc, WMREG_GCR2);
   4219 				reg |= __BIT(0);
   4220 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4221 			}
   4222 			break;
   4223 		case WM_T_80003:
   4224 			/* TARC0 */
   4225 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4226 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4227 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4228 
   4229 			/* TARC1 bit 28 */
   4230 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4231 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4232 				tarc1 &= ~__BIT(28);
   4233 			else
   4234 				tarc1 |= __BIT(28);
   4235 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4236 			break;
   4237 		case WM_T_ICH8:
   4238 		case WM_T_ICH9:
   4239 		case WM_T_ICH10:
   4240 		case WM_T_PCH:
   4241 		case WM_T_PCH2:
   4242 		case WM_T_PCH_LPT:
   4243 		case WM_T_PCH_SPT:
   4244 		case WM_T_PCH_CNP:
   4245 			/* TARC0 */
   4246 			if (sc->sc_type == WM_T_ICH8) {
   4247 				/* Set TARC0 bits 29 and 28 */
   4248 				tarc0 |= __BITS(29, 28);
   4249 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4250 				tarc0 |= __BIT(29);
   4251 				/*
   4252 				 *  Drop bit 28. From Linux.
   4253 				 * See I218/I219 spec update
   4254 				 * "5. Buffer Overrun While the I219 is
   4255 				 * Processing DMA Transactions"
   4256 				 */
   4257 				tarc0 &= ~__BIT(28);
   4258 			}
   4259 			/* Set TARC0 bits 23,24,26,27 */
   4260 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4261 
   4262 			/* CTRL_EXT */
   4263 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4264 			reg |= __BIT(22);	/* Set bit 22 */
   4265 			/*
   4266 			 * Enable PHY low-power state when MAC is at D3
   4267 			 * w/o WoL
   4268 			 */
   4269 			if (sc->sc_type >= WM_T_PCH)
   4270 				reg |= CTRL_EXT_PHYPDEN;
   4271 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4272 
   4273 			/* TARC1 */
   4274 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4275 			/* bit 28 */
   4276 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4277 				tarc1 &= ~__BIT(28);
   4278 			else
   4279 				tarc1 |= __BIT(28);
   4280 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4281 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4282 
   4283 			/* Device Status */
   4284 			if (sc->sc_type == WM_T_ICH8) {
   4285 				reg = CSR_READ(sc, WMREG_STATUS);
   4286 				reg &= ~__BIT(31);
   4287 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4288 
   4289 			}
   4290 
   4291 			/* IOSFPC */
   4292 			if (sc->sc_type == WM_T_PCH_SPT) {
   4293 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4294 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4295 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4296 			}
   4297 			/*
   4298 			 * Work-around descriptor data corruption issue during
   4299 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4300 			 * capability.
   4301 			 */
   4302 			reg = CSR_READ(sc, WMREG_RFCTL);
   4303 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4304 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4305 			break;
   4306 		default:
   4307 			break;
   4308 		}
   4309 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4310 
   4311 		switch (sc->sc_type) {
   4312 		/*
   4313 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4314 		 * Avoid RSS Hash Value bug.
   4315 		 */
   4316 		case WM_T_82571:
   4317 		case WM_T_82572:
   4318 		case WM_T_82573:
   4319 		case WM_T_80003:
   4320 		case WM_T_ICH8:
   4321 			reg = CSR_READ(sc, WMREG_RFCTL);
   4322 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4323 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4324 			break;
   4325 		case WM_T_82574:
   4326 			/* use extened Rx descriptor. */
   4327 			reg = CSR_READ(sc, WMREG_RFCTL);
   4328 			reg |= WMREG_RFCTL_EXSTEN;
   4329 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4330 			break;
   4331 		default:
   4332 			break;
   4333 		}
   4334 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4335 		/*
   4336 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4337 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4338 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4339 		 * Correctly by the Device"
   4340 		 *
   4341 		 * I354(C2000) Errata AVR53:
   4342 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4343 		 * Hang"
   4344 		 */
   4345 		reg = CSR_READ(sc, WMREG_RFCTL);
   4346 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4347 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4348 	}
   4349 }
   4350 
   4351 static uint32_t
   4352 wm_rxpbs_adjust_82580(uint32_t val)
   4353 {
   4354 	uint32_t rv = 0;
   4355 
   4356 	if (val < __arraycount(wm_82580_rxpbs_table))
   4357 		rv = wm_82580_rxpbs_table[val];
   4358 
   4359 	return rv;
   4360 }
   4361 
   4362 /*
   4363  * wm_reset_phy:
   4364  *
   4365  *	generic PHY reset function.
   4366  *	Same as e1000_phy_hw_reset_generic()
   4367  */
   4368 static void
   4369 wm_reset_phy(struct wm_softc *sc)
   4370 {
   4371 	uint32_t reg;
   4372 
   4373 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4374 		device_xname(sc->sc_dev), __func__));
   4375 	if (wm_phy_resetisblocked(sc))
   4376 		return;
   4377 
   4378 	sc->phy.acquire(sc);
   4379 
   4380 	reg = CSR_READ(sc, WMREG_CTRL);
   4381 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4382 	CSR_WRITE_FLUSH(sc);
   4383 
   4384 	delay(sc->phy.reset_delay_us);
   4385 
   4386 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4387 	CSR_WRITE_FLUSH(sc);
   4388 
   4389 	delay(150);
   4390 
   4391 	sc->phy.release(sc);
   4392 
   4393 	wm_get_cfg_done(sc);
   4394 	wm_phy_post_reset(sc);
   4395 }
   4396 
   4397 /*
   4398  * Only used by WM_T_PCH_SPT which does not use multiqueue,
   4399  * so it is enough to check sc->sc_queue[0] only.
   4400  */
   4401 static void
   4402 wm_flush_desc_rings(struct wm_softc *sc)
   4403 {
   4404 	pcireg_t preg;
   4405 	uint32_t reg;
   4406 	struct wm_txqueue *txq;
   4407 	wiseman_txdesc_t *txd;
   4408 	int nexttx;
   4409 	uint32_t rctl;
   4410 
   4411 	/* First, disable MULR fix in FEXTNVM11 */
   4412 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4413 	reg |= FEXTNVM11_DIS_MULRFIX;
   4414 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4415 
   4416 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4417 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4418 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4419 		return;
   4420 
   4421 	/* TX */
   4422 	printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   4423 	    device_xname(sc->sc_dev), preg, reg);
   4424 	reg = CSR_READ(sc, WMREG_TCTL);
   4425 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4426 
   4427 	txq = &sc->sc_queue[0].wmq_txq;
   4428 	nexttx = txq->txq_next;
   4429 	txd = &txq->txq_descs[nexttx];
   4430 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4431 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   4432 	txd->wtx_fields.wtxu_status = 0;
   4433 	txd->wtx_fields.wtxu_options = 0;
   4434 	txd->wtx_fields.wtxu_vlan = 0;
   4435 
   4436 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4437 	    BUS_SPACE_BARRIER_WRITE);
   4438 
   4439 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4440 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4441 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4442 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4443 	delay(250);
   4444 
   4445 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4446 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4447 		return;
   4448 
   4449 	/* RX */
   4450 	printf("%s: Need RX flush (reg = %08x)\n",
   4451 	    device_xname(sc->sc_dev), preg);
   4452 	rctl = CSR_READ(sc, WMREG_RCTL);
   4453 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4454 	CSR_WRITE_FLUSH(sc);
   4455 	delay(150);
   4456 
   4457 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4458 	/* zero the lower 14 bits (prefetch and host thresholds) */
   4459 	reg &= 0xffffc000;
   4460 	/*
   4461 	 * update thresholds: prefetch threshold to 31, host threshold
   4462 	 * to 1 and make sure the granularity is "descriptors" and not
   4463 	 * "cache lines"
   4464 	 */
   4465 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4466 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4467 
   4468 	/*
   4469 	 * momentarily enable the RX ring for the changes to take
   4470 	 * effect
   4471 	 */
   4472 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4473 	CSR_WRITE_FLUSH(sc);
   4474 	delay(150);
   4475 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4476 }
   4477 
   4478 /*
   4479  * wm_reset:
   4480  *
   4481  *	Reset the i82542 chip.
   4482  */
   4483 static void
   4484 wm_reset(struct wm_softc *sc)
   4485 {
   4486 	int phy_reset = 0;
   4487 	int i, error = 0;
   4488 	uint32_t reg;
   4489 	uint16_t kmreg;
   4490 	int rv;
   4491 
   4492 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4493 		device_xname(sc->sc_dev), __func__));
   4494 	KASSERT(sc->sc_type != 0);
   4495 
   4496 	/*
   4497 	 * Allocate on-chip memory according to the MTU size.
   4498 	 * The Packet Buffer Allocation register must be written
   4499 	 * before the chip is reset.
   4500 	 */
   4501 	switch (sc->sc_type) {
   4502 	case WM_T_82547:
   4503 	case WM_T_82547_2:
   4504 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4505 		    PBA_22K : PBA_30K;
   4506 		for (i = 0; i < sc->sc_nqueues; i++) {
   4507 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4508 			txq->txq_fifo_head = 0;
   4509 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4510 			txq->txq_fifo_size =
   4511 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4512 			txq->txq_fifo_stall = 0;
   4513 		}
   4514 		break;
   4515 	case WM_T_82571:
   4516 	case WM_T_82572:
   4517 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4518 	case WM_T_80003:
   4519 		sc->sc_pba = PBA_32K;
   4520 		break;
   4521 	case WM_T_82573:
   4522 		sc->sc_pba = PBA_12K;
   4523 		break;
   4524 	case WM_T_82574:
   4525 	case WM_T_82583:
   4526 		sc->sc_pba = PBA_20K;
   4527 		break;
   4528 	case WM_T_82576:
   4529 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4530 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4531 		break;
   4532 	case WM_T_82580:
   4533 	case WM_T_I350:
   4534 	case WM_T_I354:
   4535 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4536 		break;
   4537 	case WM_T_I210:
   4538 	case WM_T_I211:
   4539 		sc->sc_pba = PBA_34K;
   4540 		break;
   4541 	case WM_T_ICH8:
   4542 		/* Workaround for a bit corruption issue in FIFO memory */
   4543 		sc->sc_pba = PBA_8K;
   4544 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4545 		break;
   4546 	case WM_T_ICH9:
   4547 	case WM_T_ICH10:
   4548 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4549 		    PBA_14K : PBA_10K;
   4550 		break;
   4551 	case WM_T_PCH:
   4552 	case WM_T_PCH2:	/* XXX 14K? */
   4553 	case WM_T_PCH_LPT:
   4554 	case WM_T_PCH_SPT:
   4555 	case WM_T_PCH_CNP:
   4556 		sc->sc_pba = PBA_26K;
   4557 		break;
   4558 	default:
   4559 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4560 		    PBA_40K : PBA_48K;
   4561 		break;
   4562 	}
   4563 	/*
   4564 	 * Only old or non-multiqueue devices have the PBA register
   4565 	 * XXX Need special handling for 82575.
   4566 	 */
   4567 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4568 	    || (sc->sc_type == WM_T_82575))
   4569 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4570 
   4571 	/* Prevent the PCI-E bus from sticking */
   4572 	if (sc->sc_flags & WM_F_PCIE) {
   4573 		int timeout = 800;
   4574 
   4575 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4576 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4577 
   4578 		while (timeout--) {
   4579 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4580 			    == 0)
   4581 				break;
   4582 			delay(100);
   4583 		}
   4584 		if (timeout == 0)
   4585 			device_printf(sc->sc_dev,
   4586 			    "failed to disable busmastering\n");
   4587 	}
   4588 
   4589 	/* Set the completion timeout for interface */
   4590 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4591 	    || (sc->sc_type == WM_T_82580)
   4592 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4593 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4594 		wm_set_pcie_completion_timeout(sc);
   4595 
   4596 	/* Clear interrupt */
   4597 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4598 	if (wm_is_using_msix(sc)) {
   4599 		if (sc->sc_type != WM_T_82574) {
   4600 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4601 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4602 		} else
   4603 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4604 	}
   4605 
   4606 	/* Stop the transmit and receive processes. */
   4607 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4608 	sc->sc_rctl &= ~RCTL_EN;
   4609 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4610 	CSR_WRITE_FLUSH(sc);
   4611 
   4612 	/* XXX set_tbi_sbp_82543() */
   4613 
   4614 	delay(10*1000);
   4615 
   4616 	/* Must acquire the MDIO ownership before MAC reset */
   4617 	switch (sc->sc_type) {
   4618 	case WM_T_82573:
   4619 	case WM_T_82574:
   4620 	case WM_T_82583:
   4621 		error = wm_get_hw_semaphore_82573(sc);
   4622 		break;
   4623 	default:
   4624 		break;
   4625 	}
   4626 
   4627 	/*
   4628 	 * 82541 Errata 29? & 82547 Errata 28?
   4629 	 * See also the description about PHY_RST bit in CTRL register
   4630 	 * in 8254x_GBe_SDM.pdf.
   4631 	 */
   4632 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4633 		CSR_WRITE(sc, WMREG_CTRL,
   4634 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4635 		CSR_WRITE_FLUSH(sc);
   4636 		delay(5000);
   4637 	}
   4638 
   4639 	switch (sc->sc_type) {
   4640 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4641 	case WM_T_82541:
   4642 	case WM_T_82541_2:
   4643 	case WM_T_82547:
   4644 	case WM_T_82547_2:
   4645 		/*
   4646 		 * On some chipsets, a reset through a memory-mapped write
   4647 		 * cycle can cause the chip to reset before completing the
   4648 		 * write cycle. This causes major headache that can be avoided
   4649 		 * by issuing the reset via indirect register writes through
   4650 		 * I/O space.
   4651 		 *
   4652 		 * So, if we successfully mapped the I/O BAR at attach time,
   4653 		 * use that. Otherwise, try our luck with a memory-mapped
   4654 		 * reset.
   4655 		 */
   4656 		if (sc->sc_flags & WM_F_IOH_VALID)
   4657 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4658 		else
   4659 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4660 		break;
   4661 	case WM_T_82545_3:
   4662 	case WM_T_82546_3:
   4663 		/* Use the shadow control register on these chips. */
   4664 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4665 		break;
   4666 	case WM_T_80003:
   4667 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4668 		sc->phy.acquire(sc);
   4669 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4670 		sc->phy.release(sc);
   4671 		break;
   4672 	case WM_T_ICH8:
   4673 	case WM_T_ICH9:
   4674 	case WM_T_ICH10:
   4675 	case WM_T_PCH:
   4676 	case WM_T_PCH2:
   4677 	case WM_T_PCH_LPT:
   4678 	case WM_T_PCH_SPT:
   4679 	case WM_T_PCH_CNP:
   4680 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4681 		if (wm_phy_resetisblocked(sc) == false) {
   4682 			/*
   4683 			 * Gate automatic PHY configuration by hardware on
   4684 			 * non-managed 82579
   4685 			 */
   4686 			if ((sc->sc_type == WM_T_PCH2)
   4687 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4688 				== 0))
   4689 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4690 
   4691 			reg |= CTRL_PHY_RESET;
   4692 			phy_reset = 1;
   4693 		} else
   4694 			printf("XXX reset is blocked!!!\n");
   4695 		sc->phy.acquire(sc);
   4696 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4697 		/* Don't insert a completion barrier when reset */
   4698 		delay(20*1000);
   4699 		mutex_exit(sc->sc_ich_phymtx);
   4700 		break;
   4701 	case WM_T_82580:
   4702 	case WM_T_I350:
   4703 	case WM_T_I354:
   4704 	case WM_T_I210:
   4705 	case WM_T_I211:
   4706 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4707 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4708 			CSR_WRITE_FLUSH(sc);
   4709 		delay(5000);
   4710 		break;
   4711 	case WM_T_82542_2_0:
   4712 	case WM_T_82542_2_1:
   4713 	case WM_T_82543:
   4714 	case WM_T_82540:
   4715 	case WM_T_82545:
   4716 	case WM_T_82546:
   4717 	case WM_T_82571:
   4718 	case WM_T_82572:
   4719 	case WM_T_82573:
   4720 	case WM_T_82574:
   4721 	case WM_T_82575:
   4722 	case WM_T_82576:
   4723 	case WM_T_82583:
   4724 	default:
   4725 		/* Everything else can safely use the documented method. */
   4726 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4727 		break;
   4728 	}
   4729 
   4730 	/* Must release the MDIO ownership after MAC reset */
   4731 	switch (sc->sc_type) {
   4732 	case WM_T_82573:
   4733 	case WM_T_82574:
   4734 	case WM_T_82583:
   4735 		if (error == 0)
   4736 			wm_put_hw_semaphore_82573(sc);
   4737 		break;
   4738 	default:
   4739 		break;
   4740 	}
   4741 
   4742 	/* Set Phy Config Counter to 50msec */
   4743 	if (sc->sc_type == WM_T_PCH2) {
   4744 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   4745 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   4746 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   4747 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   4748 	}
   4749 
   4750 	if (phy_reset != 0)
   4751 		wm_get_cfg_done(sc);
   4752 
   4753 	/* reload EEPROM */
   4754 	switch (sc->sc_type) {
   4755 	case WM_T_82542_2_0:
   4756 	case WM_T_82542_2_1:
   4757 	case WM_T_82543:
   4758 	case WM_T_82544:
   4759 		delay(10);
   4760 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4761 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4762 		CSR_WRITE_FLUSH(sc);
   4763 		delay(2000);
   4764 		break;
   4765 	case WM_T_82540:
   4766 	case WM_T_82545:
   4767 	case WM_T_82545_3:
   4768 	case WM_T_82546:
   4769 	case WM_T_82546_3:
   4770 		delay(5*1000);
   4771 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4772 		break;
   4773 	case WM_T_82541:
   4774 	case WM_T_82541_2:
   4775 	case WM_T_82547:
   4776 	case WM_T_82547_2:
   4777 		delay(20000);
   4778 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4779 		break;
   4780 	case WM_T_82571:
   4781 	case WM_T_82572:
   4782 	case WM_T_82573:
   4783 	case WM_T_82574:
   4784 	case WM_T_82583:
   4785 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4786 			delay(10);
   4787 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4788 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4789 			CSR_WRITE_FLUSH(sc);
   4790 		}
   4791 		/* check EECD_EE_AUTORD */
   4792 		wm_get_auto_rd_done(sc);
   4793 		/*
   4794 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4795 		 * is set.
   4796 		 */
   4797 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4798 		    || (sc->sc_type == WM_T_82583))
   4799 			delay(25*1000);
   4800 		break;
   4801 	case WM_T_82575:
   4802 	case WM_T_82576:
   4803 	case WM_T_82580:
   4804 	case WM_T_I350:
   4805 	case WM_T_I354:
   4806 	case WM_T_I210:
   4807 	case WM_T_I211:
   4808 	case WM_T_80003:
   4809 		/* check EECD_EE_AUTORD */
   4810 		wm_get_auto_rd_done(sc);
   4811 		break;
   4812 	case WM_T_ICH8:
   4813 	case WM_T_ICH9:
   4814 	case WM_T_ICH10:
   4815 	case WM_T_PCH:
   4816 	case WM_T_PCH2:
   4817 	case WM_T_PCH_LPT:
   4818 	case WM_T_PCH_SPT:
   4819 	case WM_T_PCH_CNP:
   4820 		break;
   4821 	default:
   4822 		panic("%s: unknown type\n", __func__);
   4823 	}
   4824 
   4825 	/* Check whether EEPROM is present or not */
   4826 	switch (sc->sc_type) {
   4827 	case WM_T_82575:
   4828 	case WM_T_82576:
   4829 	case WM_T_82580:
   4830 	case WM_T_I350:
   4831 	case WM_T_I354:
   4832 	case WM_T_ICH8:
   4833 	case WM_T_ICH9:
   4834 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4835 			/* Not found */
   4836 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4837 			if (sc->sc_type == WM_T_82575)
   4838 				wm_reset_init_script_82575(sc);
   4839 		}
   4840 		break;
   4841 	default:
   4842 		break;
   4843 	}
   4844 
   4845 	if (phy_reset != 0)
   4846 		wm_phy_post_reset(sc);
   4847 
   4848 	if ((sc->sc_type == WM_T_82580)
   4849 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4850 		/* clear global device reset status bit */
   4851 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4852 	}
   4853 
   4854 	/* Clear any pending interrupt events. */
   4855 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4856 	reg = CSR_READ(sc, WMREG_ICR);
   4857 	if (wm_is_using_msix(sc)) {
   4858 		if (sc->sc_type != WM_T_82574) {
   4859 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4860 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4861 		} else
   4862 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4863 	}
   4864 
   4865 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4866 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4867 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4868 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   4869 		reg = CSR_READ(sc, WMREG_KABGTXD);
   4870 		reg |= KABGTXD_BGSQLBIAS;
   4871 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   4872 	}
   4873 
   4874 	/* reload sc_ctrl */
   4875 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4876 
   4877 	if (sc->sc_type == WM_T_I354) {
   4878 #if 0
   4879 		/* I354 uses an external PHY */
   4880 		wm_set_eee_i354(sc);
   4881 #endif
   4882 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4883 		wm_set_eee_i350(sc);
   4884 
   4885 	/*
   4886 	 * For PCH, this write will make sure that any noise will be detected
   4887 	 * as a CRC error and be dropped rather than show up as a bad packet
   4888 	 * to the DMA engine
   4889 	 */
   4890 	if (sc->sc_type == WM_T_PCH)
   4891 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4892 
   4893 	if (sc->sc_type >= WM_T_82544)
   4894 		CSR_WRITE(sc, WMREG_WUC, 0);
   4895 
   4896 	wm_reset_mdicnfg_82580(sc);
   4897 
   4898 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4899 		wm_pll_workaround_i210(sc);
   4900 
   4901 	if (sc->sc_type == WM_T_80003) {
   4902 		/* default to TRUE to enable the MDIC W/A */
   4903 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   4904 
   4905 		rv = wm_kmrn_readreg(sc,
   4906 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   4907 		if (rv == 0) {
   4908 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   4909 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   4910 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   4911 			else
   4912 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   4913 		}
   4914 	}
   4915 }
   4916 
   4917 /*
   4918  * wm_add_rxbuf:
   4919  *
   4920  *	Add a receive buffer to the indiciated descriptor.
   4921  */
   4922 static int
   4923 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4924 {
   4925 	struct wm_softc *sc = rxq->rxq_sc;
   4926 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4927 	struct mbuf *m;
   4928 	int error;
   4929 
   4930 	KASSERT(mutex_owned(rxq->rxq_lock));
   4931 
   4932 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4933 	if (m == NULL)
   4934 		return ENOBUFS;
   4935 
   4936 	MCLGET(m, M_DONTWAIT);
   4937 	if ((m->m_flags & M_EXT) == 0) {
   4938 		m_freem(m);
   4939 		return ENOBUFS;
   4940 	}
   4941 
   4942 	if (rxs->rxs_mbuf != NULL)
   4943 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4944 
   4945 	rxs->rxs_mbuf = m;
   4946 
   4947 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4948 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4949 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4950 	if (error) {
   4951 		/* XXX XXX XXX */
   4952 		aprint_error_dev(sc->sc_dev,
   4953 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   4954 		panic("wm_add_rxbuf");
   4955 	}
   4956 
   4957 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4958 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4959 
   4960 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4961 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4962 			wm_init_rxdesc(rxq, idx);
   4963 	} else
   4964 		wm_init_rxdesc(rxq, idx);
   4965 
   4966 	return 0;
   4967 }
   4968 
   4969 /*
   4970  * wm_rxdrain:
   4971  *
   4972  *	Drain the receive queue.
   4973  */
   4974 static void
   4975 wm_rxdrain(struct wm_rxqueue *rxq)
   4976 {
   4977 	struct wm_softc *sc = rxq->rxq_sc;
   4978 	struct wm_rxsoft *rxs;
   4979 	int i;
   4980 
   4981 	KASSERT(mutex_owned(rxq->rxq_lock));
   4982 
   4983 	for (i = 0; i < WM_NRXDESC; i++) {
   4984 		rxs = &rxq->rxq_soft[i];
   4985 		if (rxs->rxs_mbuf != NULL) {
   4986 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4987 			m_freem(rxs->rxs_mbuf);
   4988 			rxs->rxs_mbuf = NULL;
   4989 		}
   4990 	}
   4991 }
   4992 
   4993 /*
   4994  * Setup registers for RSS.
   4995  *
   4996  * XXX not yet VMDq support
   4997  */
   4998 static void
   4999 wm_init_rss(struct wm_softc *sc)
   5000 {
   5001 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   5002 	int i;
   5003 
   5004 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   5005 
   5006 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   5007 		int qid, reta_ent;
   5008 
   5009 		qid  = i % sc->sc_nqueues;
   5010 		switch (sc->sc_type) {
   5011 		case WM_T_82574:
   5012 			reta_ent = __SHIFTIN(qid,
   5013 			    RETA_ENT_QINDEX_MASK_82574);
   5014 			break;
   5015 		case WM_T_82575:
   5016 			reta_ent = __SHIFTIN(qid,
   5017 			    RETA_ENT_QINDEX1_MASK_82575);
   5018 			break;
   5019 		default:
   5020 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   5021 			break;
   5022 		}
   5023 
   5024 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   5025 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   5026 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   5027 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   5028 	}
   5029 
   5030 	rss_getkey((uint8_t *)rss_key);
   5031 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   5032 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   5033 
   5034 	if (sc->sc_type == WM_T_82574)
   5035 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   5036 	else
   5037 		mrqc = MRQC_ENABLE_RSS_MQ;
   5038 
   5039 	/*
   5040 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   5041 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   5042 	 */
   5043 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   5044 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   5045 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   5046 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   5047 
   5048 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   5049 }
   5050 
   5051 /*
   5052  * Adjust TX and RX queue numbers which the system actulally uses.
   5053  *
   5054  * The numbers are affected by below parameters.
   5055  *     - The nubmer of hardware queues
   5056  *     - The number of MSI-X vectors (= "nvectors" argument)
   5057  *     - ncpu
   5058  */
   5059 static void
   5060 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   5061 {
   5062 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   5063 
   5064 	if (nvectors < 2) {
   5065 		sc->sc_nqueues = 1;
   5066 		return;
   5067 	}
   5068 
   5069 	switch (sc->sc_type) {
   5070 	case WM_T_82572:
   5071 		hw_ntxqueues = 2;
   5072 		hw_nrxqueues = 2;
   5073 		break;
   5074 	case WM_T_82574:
   5075 		hw_ntxqueues = 2;
   5076 		hw_nrxqueues = 2;
   5077 		break;
   5078 	case WM_T_82575:
   5079 		hw_ntxqueues = 4;
   5080 		hw_nrxqueues = 4;
   5081 		break;
   5082 	case WM_T_82576:
   5083 		hw_ntxqueues = 16;
   5084 		hw_nrxqueues = 16;
   5085 		break;
   5086 	case WM_T_82580:
   5087 	case WM_T_I350:
   5088 	case WM_T_I354:
   5089 		hw_ntxqueues = 8;
   5090 		hw_nrxqueues = 8;
   5091 		break;
   5092 	case WM_T_I210:
   5093 		hw_ntxqueues = 4;
   5094 		hw_nrxqueues = 4;
   5095 		break;
   5096 	case WM_T_I211:
   5097 		hw_ntxqueues = 2;
   5098 		hw_nrxqueues = 2;
   5099 		break;
   5100 		/*
   5101 		 * As below ethernet controllers does not support MSI-X,
   5102 		 * this driver let them not use multiqueue.
   5103 		 *     - WM_T_80003
   5104 		 *     - WM_T_ICH8
   5105 		 *     - WM_T_ICH9
   5106 		 *     - WM_T_ICH10
   5107 		 *     - WM_T_PCH
   5108 		 *     - WM_T_PCH2
   5109 		 *     - WM_T_PCH_LPT
   5110 		 */
   5111 	default:
   5112 		hw_ntxqueues = 1;
   5113 		hw_nrxqueues = 1;
   5114 		break;
   5115 	}
   5116 
   5117 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   5118 
   5119 	/*
   5120 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   5121 	 * the number of queues used actually.
   5122 	 */
   5123 	if (nvectors < hw_nqueues + 1)
   5124 		sc->sc_nqueues = nvectors - 1;
   5125 	else
   5126 		sc->sc_nqueues = hw_nqueues;
   5127 
   5128 	/*
   5129 	 * As queues more then cpus cannot improve scaling, we limit
   5130 	 * the number of queues used actually.
   5131 	 */
   5132 	if (ncpu < sc->sc_nqueues)
   5133 		sc->sc_nqueues = ncpu;
   5134 }
   5135 
   5136 static inline bool
   5137 wm_is_using_msix(struct wm_softc *sc)
   5138 {
   5139 
   5140 	return (sc->sc_nintrs > 1);
   5141 }
   5142 
   5143 static inline bool
   5144 wm_is_using_multiqueue(struct wm_softc *sc)
   5145 {
   5146 
   5147 	return (sc->sc_nqueues > 1);
   5148 }
   5149 
   5150 static int
   5151 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   5152 {
   5153 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   5154 	wmq->wmq_id = qidx;
   5155 	wmq->wmq_intr_idx = intr_idx;
   5156 	wmq->wmq_si = softint_establish(SOFTINT_NET
   5157 #ifdef WM_MPSAFE
   5158 	    | SOFTINT_MPSAFE
   5159 #endif
   5160 	    , wm_handle_queue, wmq);
   5161 	if (wmq->wmq_si != NULL)
   5162 		return 0;
   5163 
   5164 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   5165 	    wmq->wmq_id);
   5166 
   5167 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   5168 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5169 	return ENOMEM;
   5170 }
   5171 
   5172 /*
   5173  * Both single interrupt MSI and INTx can use this function.
   5174  */
   5175 static int
   5176 wm_setup_legacy(struct wm_softc *sc)
   5177 {
   5178 	pci_chipset_tag_t pc = sc->sc_pc;
   5179 	const char *intrstr = NULL;
   5180 	char intrbuf[PCI_INTRSTR_LEN];
   5181 	int error;
   5182 
   5183 	error = wm_alloc_txrx_queues(sc);
   5184 	if (error) {
   5185 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5186 		    error);
   5187 		return ENOMEM;
   5188 	}
   5189 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   5190 	    sizeof(intrbuf));
   5191 #ifdef WM_MPSAFE
   5192 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   5193 #endif
   5194 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   5195 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5196 	if (sc->sc_ihs[0] == NULL) {
   5197 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5198 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5199 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5200 		return ENOMEM;
   5201 	}
   5202 
   5203 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5204 	sc->sc_nintrs = 1;
   5205 
   5206 	return wm_softint_establish(sc, 0, 0);
   5207 }
   5208 
   5209 static int
   5210 wm_setup_msix(struct wm_softc *sc)
   5211 {
   5212 	void *vih;
   5213 	kcpuset_t *affinity;
   5214 	int qidx, error, intr_idx, txrx_established;
   5215 	pci_chipset_tag_t pc = sc->sc_pc;
   5216 	const char *intrstr = NULL;
   5217 	char intrbuf[PCI_INTRSTR_LEN];
   5218 	char intr_xname[INTRDEVNAMEBUF];
   5219 
   5220 	if (sc->sc_nqueues < ncpu) {
   5221 		/*
   5222 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5223 		 * interrupts start from CPU#1.
   5224 		 */
   5225 		sc->sc_affinity_offset = 1;
   5226 	} else {
   5227 		/*
   5228 		 * In this case, this device use all CPUs. So, we unify
   5229 		 * affinitied cpu_index to msix vector number for readability.
   5230 		 */
   5231 		sc->sc_affinity_offset = 0;
   5232 	}
   5233 
   5234 	error = wm_alloc_txrx_queues(sc);
   5235 	if (error) {
   5236 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5237 		    error);
   5238 		return ENOMEM;
   5239 	}
   5240 
   5241 	kcpuset_create(&affinity, false);
   5242 	intr_idx = 0;
   5243 
   5244 	/*
   5245 	 * TX and RX
   5246 	 */
   5247 	txrx_established = 0;
   5248 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5249 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5250 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5251 
   5252 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5253 		    sizeof(intrbuf));
   5254 #ifdef WM_MPSAFE
   5255 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5256 		    PCI_INTR_MPSAFE, true);
   5257 #endif
   5258 		memset(intr_xname, 0, sizeof(intr_xname));
   5259 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5260 		    device_xname(sc->sc_dev), qidx);
   5261 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5262 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5263 		if (vih == NULL) {
   5264 			aprint_error_dev(sc->sc_dev,
   5265 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5266 			    intrstr ? " at " : "",
   5267 			    intrstr ? intrstr : "");
   5268 
   5269 			goto fail;
   5270 		}
   5271 		kcpuset_zero(affinity);
   5272 		/* Round-robin affinity */
   5273 		kcpuset_set(affinity, affinity_to);
   5274 		error = interrupt_distribute(vih, affinity, NULL);
   5275 		if (error == 0) {
   5276 			aprint_normal_dev(sc->sc_dev,
   5277 			    "for TX and RX interrupting at %s affinity to %u\n",
   5278 			    intrstr, affinity_to);
   5279 		} else {
   5280 			aprint_normal_dev(sc->sc_dev,
   5281 			    "for TX and RX interrupting at %s\n", intrstr);
   5282 		}
   5283 		sc->sc_ihs[intr_idx] = vih;
   5284 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   5285 			goto fail;
   5286 		txrx_established++;
   5287 		intr_idx++;
   5288 	}
   5289 
   5290 	/*
   5291 	 * LINK
   5292 	 */
   5293 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5294 	    sizeof(intrbuf));
   5295 #ifdef WM_MPSAFE
   5296 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5297 #endif
   5298 	memset(intr_xname, 0, sizeof(intr_xname));
   5299 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5300 	    device_xname(sc->sc_dev));
   5301 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5302 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5303 	if (vih == NULL) {
   5304 		aprint_error_dev(sc->sc_dev,
   5305 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5306 		    intrstr ? " at " : "",
   5307 		    intrstr ? intrstr : "");
   5308 
   5309 		goto fail;
   5310 	}
   5311 	/* keep default affinity to LINK interrupt */
   5312 	aprint_normal_dev(sc->sc_dev,
   5313 	    "for LINK interrupting at %s\n", intrstr);
   5314 	sc->sc_ihs[intr_idx] = vih;
   5315 	sc->sc_link_intr_idx = intr_idx;
   5316 
   5317 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5318 	kcpuset_destroy(affinity);
   5319 	return 0;
   5320 
   5321  fail:
   5322 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5323 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5324 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5325 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5326 	}
   5327 
   5328 	kcpuset_destroy(affinity);
   5329 	return ENOMEM;
   5330 }
   5331 
   5332 static void
   5333 wm_unset_stopping_flags(struct wm_softc *sc)
   5334 {
   5335 	int i;
   5336 
   5337 	KASSERT(WM_CORE_LOCKED(sc));
   5338 
   5339 	/*
   5340 	 * must unset stopping flags in ascending order.
   5341 	 */
   5342 	for (i = 0; i < sc->sc_nqueues; i++) {
   5343 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5344 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5345 
   5346 		mutex_enter(txq->txq_lock);
   5347 		txq->txq_stopping = false;
   5348 		mutex_exit(txq->txq_lock);
   5349 
   5350 		mutex_enter(rxq->rxq_lock);
   5351 		rxq->rxq_stopping = false;
   5352 		mutex_exit(rxq->rxq_lock);
   5353 	}
   5354 
   5355 	sc->sc_core_stopping = false;
   5356 }
   5357 
   5358 static void
   5359 wm_set_stopping_flags(struct wm_softc *sc)
   5360 {
   5361 	int i;
   5362 
   5363 	KASSERT(WM_CORE_LOCKED(sc));
   5364 
   5365 	sc->sc_core_stopping = true;
   5366 
   5367 	/*
   5368 	 * must set stopping flags in ascending order.
   5369 	 */
   5370 	for (i = 0; i < sc->sc_nqueues; i++) {
   5371 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5372 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5373 
   5374 		mutex_enter(rxq->rxq_lock);
   5375 		rxq->rxq_stopping = true;
   5376 		mutex_exit(rxq->rxq_lock);
   5377 
   5378 		mutex_enter(txq->txq_lock);
   5379 		txq->txq_stopping = true;
   5380 		mutex_exit(txq->txq_lock);
   5381 	}
   5382 }
   5383 
   5384 /*
   5385  * write interrupt interval value to ITR or EITR
   5386  */
   5387 static void
   5388 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5389 {
   5390 
   5391 	if (!wmq->wmq_set_itr)
   5392 		return;
   5393 
   5394 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5395 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5396 
   5397 		/*
   5398 		 * 82575 doesn't have CNT_INGR field.
   5399 		 * So, overwrite counter field by software.
   5400 		 */
   5401 		if (sc->sc_type == WM_T_82575)
   5402 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5403 		else
   5404 			eitr |= EITR_CNT_INGR;
   5405 
   5406 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5407 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5408 		/*
   5409 		 * 82574 has both ITR and EITR. SET EITR when we use
   5410 		 * the multi queue function with MSI-X.
   5411 		 */
   5412 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5413 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5414 	} else {
   5415 		KASSERT(wmq->wmq_id == 0);
   5416 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5417 	}
   5418 
   5419 	wmq->wmq_set_itr = false;
   5420 }
   5421 
   5422 /*
   5423  * TODO
   5424  * Below dynamic calculation of itr is almost the same as linux igb,
   5425  * however it does not fit to wm(4). So, we will have been disable AIM
   5426  * until we will find appropriate calculation of itr.
   5427  */
   5428 /*
   5429  * calculate interrupt interval value to be going to write register in
   5430  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5431  */
   5432 static void
   5433 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5434 {
   5435 #ifdef NOTYET
   5436 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5437 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5438 	uint32_t avg_size = 0;
   5439 	uint32_t new_itr;
   5440 
   5441 	if (rxq->rxq_packets)
   5442 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5443 	if (txq->txq_packets)
   5444 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   5445 
   5446 	if (avg_size == 0) {
   5447 		new_itr = 450; /* restore default value */
   5448 		goto out;
   5449 	}
   5450 
   5451 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5452 	avg_size += 24;
   5453 
   5454 	/* Don't starve jumbo frames */
   5455 	avg_size = uimin(avg_size, 3000);
   5456 
   5457 	/* Give a little boost to mid-size frames */
   5458 	if ((avg_size > 300) && (avg_size < 1200))
   5459 		new_itr = avg_size / 3;
   5460 	else
   5461 		new_itr = avg_size / 2;
   5462 
   5463 out:
   5464 	/*
   5465 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5466 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5467 	 */
   5468 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5469 		new_itr *= 4;
   5470 
   5471 	if (new_itr != wmq->wmq_itr) {
   5472 		wmq->wmq_itr = new_itr;
   5473 		wmq->wmq_set_itr = true;
   5474 	} else
   5475 		wmq->wmq_set_itr = false;
   5476 
   5477 	rxq->rxq_packets = 0;
   5478 	rxq->rxq_bytes = 0;
   5479 	txq->txq_packets = 0;
   5480 	txq->txq_bytes = 0;
   5481 #endif
   5482 }
   5483 
   5484 /*
   5485  * wm_init:		[ifnet interface function]
   5486  *
   5487  *	Initialize the interface.
   5488  */
   5489 static int
   5490 wm_init(struct ifnet *ifp)
   5491 {
   5492 	struct wm_softc *sc = ifp->if_softc;
   5493 	int ret;
   5494 
   5495 	WM_CORE_LOCK(sc);
   5496 	ret = wm_init_locked(ifp);
   5497 	WM_CORE_UNLOCK(sc);
   5498 
   5499 	return ret;
   5500 }
   5501 
   5502 static int
   5503 wm_init_locked(struct ifnet *ifp)
   5504 {
   5505 	struct wm_softc *sc = ifp->if_softc;
   5506 	int i, j, trynum, error = 0;
   5507 	uint32_t reg;
   5508 
   5509 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5510 		device_xname(sc->sc_dev), __func__));
   5511 	KASSERT(WM_CORE_LOCKED(sc));
   5512 
   5513 	/*
   5514 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5515 	 * There is a small but measurable benefit to avoiding the adjusment
   5516 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5517 	 * on such platforms.  One possibility is that the DMA itself is
   5518 	 * slightly more efficient if the front of the entire packet (instead
   5519 	 * of the front of the headers) is aligned.
   5520 	 *
   5521 	 * Note we must always set align_tweak to 0 if we are using
   5522 	 * jumbo frames.
   5523 	 */
   5524 #ifdef __NO_STRICT_ALIGNMENT
   5525 	sc->sc_align_tweak = 0;
   5526 #else
   5527 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5528 		sc->sc_align_tweak = 0;
   5529 	else
   5530 		sc->sc_align_tweak = 2;
   5531 #endif /* __NO_STRICT_ALIGNMENT */
   5532 
   5533 	/* Cancel any pending I/O. */
   5534 	wm_stop_locked(ifp, 0);
   5535 
   5536 	/* update statistics before reset */
   5537 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   5538 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   5539 
   5540 	/* PCH_SPT hardware workaround */
   5541 	if (sc->sc_type == WM_T_PCH_SPT)
   5542 		wm_flush_desc_rings(sc);
   5543 
   5544 	/* Reset the chip to a known state. */
   5545 	wm_reset(sc);
   5546 
   5547 	/*
   5548 	 * AMT based hardware can now take control from firmware
   5549 	 * Do this after reset.
   5550 	 */
   5551 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5552 		wm_get_hw_control(sc);
   5553 
   5554 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   5555 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5556 		wm_legacy_irq_quirk_spt(sc);
   5557 
   5558 	/* Init hardware bits */
   5559 	wm_initialize_hardware_bits(sc);
   5560 
   5561 	/* Reset the PHY. */
   5562 	if (sc->sc_flags & WM_F_HAS_MII)
   5563 		wm_gmii_reset(sc);
   5564 
   5565 	if (sc->sc_type >= WM_T_ICH8) {
   5566 		reg = CSR_READ(sc, WMREG_GCR);
   5567 		/*
   5568 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   5569 		 * default after reset.
   5570 		 */
   5571 		if (sc->sc_type == WM_T_ICH8)
   5572 			reg |= GCR_NO_SNOOP_ALL;
   5573 		else
   5574 			reg &= ~GCR_NO_SNOOP_ALL;
   5575 		CSR_WRITE(sc, WMREG_GCR, reg);
   5576 	}
   5577 	if ((sc->sc_type >= WM_T_ICH8)
   5578 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   5579 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   5580 
   5581 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5582 		reg |= CTRL_EXT_RO_DIS;
   5583 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5584 	}
   5585 
   5586 	/* Calculate (E)ITR value */
   5587 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5588 		/*
   5589 		 * For NEWQUEUE's EITR (except for 82575).
   5590 		 * 82575's EITR should be set same throttling value as other
   5591 		 * old controllers' ITR because the interrupt/sec calculation
   5592 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5593 		 *
   5594 		 * 82574's EITR should be set same throttling value as ITR.
   5595 		 *
   5596 		 * For N interrupts/sec, set this value to:
   5597 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5598 		 */
   5599 		sc->sc_itr_init = 450;
   5600 	} else if (sc->sc_type >= WM_T_82543) {
   5601 		/*
   5602 		 * Set up the interrupt throttling register (units of 256ns)
   5603 		 * Note that a footnote in Intel's documentation says this
   5604 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5605 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5606 		 * that that is also true for the 1024ns units of the other
   5607 		 * interrupt-related timer registers -- so, really, we ought
   5608 		 * to divide this value by 4 when the link speed is low.
   5609 		 *
   5610 		 * XXX implement this division at link speed change!
   5611 		 */
   5612 
   5613 		/*
   5614 		 * For N interrupts/sec, set this value to:
   5615 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5616 		 * absolute and packet timer values to this value
   5617 		 * divided by 4 to get "simple timer" behavior.
   5618 		 */
   5619 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5620 	}
   5621 
   5622 	error = wm_init_txrx_queues(sc);
   5623 	if (error)
   5624 		goto out;
   5625 
   5626 	/*
   5627 	 * Clear out the VLAN table -- we don't use it (yet).
   5628 	 */
   5629 	CSR_WRITE(sc, WMREG_VET, 0);
   5630 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5631 		trynum = 10; /* Due to hw errata */
   5632 	else
   5633 		trynum = 1;
   5634 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5635 		for (j = 0; j < trynum; j++)
   5636 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5637 
   5638 	/*
   5639 	 * Set up flow-control parameters.
   5640 	 *
   5641 	 * XXX Values could probably stand some tuning.
   5642 	 */
   5643 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5644 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5645 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5646 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   5647 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5648 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5649 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5650 	}
   5651 
   5652 	sc->sc_fcrtl = FCRTL_DFLT;
   5653 	if (sc->sc_type < WM_T_82543) {
   5654 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5655 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5656 	} else {
   5657 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5658 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5659 	}
   5660 
   5661 	if (sc->sc_type == WM_T_80003)
   5662 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5663 	else
   5664 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5665 
   5666 	/* Writes the control register. */
   5667 	wm_set_vlan(sc);
   5668 
   5669 	if (sc->sc_flags & WM_F_HAS_MII) {
   5670 		uint16_t kmreg;
   5671 
   5672 		switch (sc->sc_type) {
   5673 		case WM_T_80003:
   5674 		case WM_T_ICH8:
   5675 		case WM_T_ICH9:
   5676 		case WM_T_ICH10:
   5677 		case WM_T_PCH:
   5678 		case WM_T_PCH2:
   5679 		case WM_T_PCH_LPT:
   5680 		case WM_T_PCH_SPT:
   5681 		case WM_T_PCH_CNP:
   5682 			/*
   5683 			 * Set the mac to wait the maximum time between each
   5684 			 * iteration and increase the max iterations when
   5685 			 * polling the phy; this fixes erroneous timeouts at
   5686 			 * 10Mbps.
   5687 			 */
   5688 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5689 			    0xFFFF);
   5690 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5691 			    &kmreg);
   5692 			kmreg |= 0x3F;
   5693 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5694 			    kmreg);
   5695 			break;
   5696 		default:
   5697 			break;
   5698 		}
   5699 
   5700 		if (sc->sc_type == WM_T_80003) {
   5701 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5702 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   5703 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5704 
   5705 			/* Bypass RX and TX FIFO's */
   5706 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5707 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5708 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5709 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5710 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5711 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5712 		}
   5713 	}
   5714 #if 0
   5715 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5716 #endif
   5717 
   5718 	/* Set up checksum offload parameters. */
   5719 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5720 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5721 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5722 		reg |= RXCSUM_IPOFL;
   5723 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5724 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5725 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5726 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5727 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5728 
   5729 	/* Set registers about MSI-X */
   5730 	if (wm_is_using_msix(sc)) {
   5731 		uint32_t ivar;
   5732 		struct wm_queue *wmq;
   5733 		int qid, qintr_idx;
   5734 
   5735 		if (sc->sc_type == WM_T_82575) {
   5736 			/* Interrupt control */
   5737 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5738 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5739 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5740 
   5741 			/* TX and RX */
   5742 			for (i = 0; i < sc->sc_nqueues; i++) {
   5743 				wmq = &sc->sc_queue[i];
   5744 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5745 				    EITR_TX_QUEUE(wmq->wmq_id)
   5746 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5747 			}
   5748 			/* Link status */
   5749 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5750 			    EITR_OTHER);
   5751 		} else if (sc->sc_type == WM_T_82574) {
   5752 			/* Interrupt control */
   5753 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5754 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5755 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5756 
   5757 			/*
   5758 			 * workaround issue with spurious interrupts
   5759 			 * in MSI-X mode.
   5760 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5761 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5762 			 */
   5763 			reg = CSR_READ(sc, WMREG_RFCTL);
   5764 			reg |= WMREG_RFCTL_ACKDIS;
   5765 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5766 
   5767 			ivar = 0;
   5768 			/* TX and RX */
   5769 			for (i = 0; i < sc->sc_nqueues; i++) {
   5770 				wmq = &sc->sc_queue[i];
   5771 				qid = wmq->wmq_id;
   5772 				qintr_idx = wmq->wmq_intr_idx;
   5773 
   5774 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5775 				    IVAR_TX_MASK_Q_82574(qid));
   5776 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5777 				    IVAR_RX_MASK_Q_82574(qid));
   5778 			}
   5779 			/* Link status */
   5780 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5781 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5782 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5783 		} else {
   5784 			/* Interrupt control */
   5785 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5786 			    | GPIE_EIAME | GPIE_PBA);
   5787 
   5788 			switch (sc->sc_type) {
   5789 			case WM_T_82580:
   5790 			case WM_T_I350:
   5791 			case WM_T_I354:
   5792 			case WM_T_I210:
   5793 			case WM_T_I211:
   5794 				/* TX and RX */
   5795 				for (i = 0; i < sc->sc_nqueues; i++) {
   5796 					wmq = &sc->sc_queue[i];
   5797 					qid = wmq->wmq_id;
   5798 					qintr_idx = wmq->wmq_intr_idx;
   5799 
   5800 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5801 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5802 					ivar |= __SHIFTIN((qintr_idx
   5803 						| IVAR_VALID),
   5804 					    IVAR_TX_MASK_Q(qid));
   5805 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5806 					ivar |= __SHIFTIN((qintr_idx
   5807 						| IVAR_VALID),
   5808 					    IVAR_RX_MASK_Q(qid));
   5809 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5810 				}
   5811 				break;
   5812 			case WM_T_82576:
   5813 				/* TX and RX */
   5814 				for (i = 0; i < sc->sc_nqueues; i++) {
   5815 					wmq = &sc->sc_queue[i];
   5816 					qid = wmq->wmq_id;
   5817 					qintr_idx = wmq->wmq_intr_idx;
   5818 
   5819 					ivar = CSR_READ(sc,
   5820 					    WMREG_IVAR_Q_82576(qid));
   5821 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   5822 					ivar |= __SHIFTIN((qintr_idx
   5823 						| IVAR_VALID),
   5824 					    IVAR_TX_MASK_Q_82576(qid));
   5825 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   5826 					ivar |= __SHIFTIN((qintr_idx
   5827 						| IVAR_VALID),
   5828 					    IVAR_RX_MASK_Q_82576(qid));
   5829 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   5830 					    ivar);
   5831 				}
   5832 				break;
   5833 			default:
   5834 				break;
   5835 			}
   5836 
   5837 			/* Link status */
   5838 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   5839 			    IVAR_MISC_OTHER);
   5840 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   5841 		}
   5842 
   5843 		if (wm_is_using_multiqueue(sc)) {
   5844 			wm_init_rss(sc);
   5845 
   5846 			/*
   5847 			** NOTE: Receive Full-Packet Checksum Offload
   5848 			** is mutually exclusive with Multiqueue. However
   5849 			** this is not the same as TCP/IP checksums which
   5850 			** still work.
   5851 			*/
   5852 			reg = CSR_READ(sc, WMREG_RXCSUM);
   5853 			reg |= RXCSUM_PCSD;
   5854 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5855 		}
   5856 	}
   5857 
   5858 	/* Set up the interrupt registers. */
   5859 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5860 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   5861 	    ICR_RXO | ICR_RXT0;
   5862 	if (wm_is_using_msix(sc)) {
   5863 		uint32_t mask;
   5864 		struct wm_queue *wmq;
   5865 
   5866 		switch (sc->sc_type) {
   5867 		case WM_T_82574:
   5868 			mask = 0;
   5869 			for (i = 0; i < sc->sc_nqueues; i++) {
   5870 				wmq = &sc->sc_queue[i];
   5871 				mask |= ICR_TXQ(wmq->wmq_id);
   5872 				mask |= ICR_RXQ(wmq->wmq_id);
   5873 			}
   5874 			mask |= ICR_OTHER;
   5875 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   5876 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   5877 			break;
   5878 		default:
   5879 			if (sc->sc_type == WM_T_82575) {
   5880 				mask = 0;
   5881 				for (i = 0; i < sc->sc_nqueues; i++) {
   5882 					wmq = &sc->sc_queue[i];
   5883 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   5884 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   5885 				}
   5886 				mask |= EITR_OTHER;
   5887 			} else {
   5888 				mask = 0;
   5889 				for (i = 0; i < sc->sc_nqueues; i++) {
   5890 					wmq = &sc->sc_queue[i];
   5891 					mask |= 1 << wmq->wmq_intr_idx;
   5892 				}
   5893 				mask |= 1 << sc->sc_link_intr_idx;
   5894 			}
   5895 			CSR_WRITE(sc, WMREG_EIAC, mask);
   5896 			CSR_WRITE(sc, WMREG_EIAM, mask);
   5897 			CSR_WRITE(sc, WMREG_EIMS, mask);
   5898 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   5899 			break;
   5900 		}
   5901 	} else
   5902 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5903 
   5904 	/* Set up the inter-packet gap. */
   5905 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   5906 
   5907 	if (sc->sc_type >= WM_T_82543) {
   5908 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5909 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   5910 			wm_itrs_writereg(sc, wmq);
   5911 		}
   5912 		/*
   5913 		 * Link interrupts occur much less than TX
   5914 		 * interrupts and RX interrupts. So, we don't
   5915 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   5916 		 * FreeBSD's if_igb.
   5917 		 */
   5918 	}
   5919 
   5920 	/* Set the VLAN ethernetype. */
   5921 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   5922 
   5923 	/*
   5924 	 * Set up the transmit control register; we start out with
   5925 	 * a collision distance suitable for FDX, but update it whe
   5926 	 * we resolve the media type.
   5927 	 */
   5928 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   5929 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   5930 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5931 	if (sc->sc_type >= WM_T_82571)
   5932 		sc->sc_tctl |= TCTL_MULR;
   5933 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5934 
   5935 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5936 		/* Write TDT after TCTL.EN is set. See the document. */
   5937 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   5938 	}
   5939 
   5940 	if (sc->sc_type == WM_T_80003) {
   5941 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   5942 		reg &= ~TCTL_EXT_GCEX_MASK;
   5943 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   5944 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   5945 	}
   5946 
   5947 	/* Set the media. */
   5948 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   5949 		goto out;
   5950 
   5951 	/* Configure for OS presence */
   5952 	wm_init_manageability(sc);
   5953 
   5954 	/*
   5955 	 * Set up the receive control register; we actually program the
   5956 	 * register when we set the receive filter. Use multicast address
   5957 	 * offset type 0.
   5958 	 *
   5959 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   5960 	 * don't enable that feature.
   5961 	 */
   5962 	sc->sc_mchash_type = 0;
   5963 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   5964 	    | RCTL_MO(sc->sc_mchash_type);
   5965 
   5966 	/*
   5967 	 * 82574 use one buffer extended Rx descriptor.
   5968 	 */
   5969 	if (sc->sc_type == WM_T_82574)
   5970 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   5971 
   5972 	/*
   5973 	 * The I350 has a bug where it always strips the CRC whether
   5974 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   5975 	 */
   5976 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5977 	    || (sc->sc_type == WM_T_I210))
   5978 		sc->sc_rctl |= RCTL_SECRC;
   5979 
   5980 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5981 	    && (ifp->if_mtu > ETHERMTU)) {
   5982 		sc->sc_rctl |= RCTL_LPE;
   5983 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5984 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5985 	}
   5986 
   5987 	if (MCLBYTES == 2048)
   5988 		sc->sc_rctl |= RCTL_2k;
   5989 	else {
   5990 		if (sc->sc_type >= WM_T_82543) {
   5991 			switch (MCLBYTES) {
   5992 			case 4096:
   5993 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5994 				break;
   5995 			case 8192:
   5996 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5997 				break;
   5998 			case 16384:
   5999 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   6000 				break;
   6001 			default:
   6002 				panic("wm_init: MCLBYTES %d unsupported",
   6003 				    MCLBYTES);
   6004 				break;
   6005 			}
   6006 		} else
   6007 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   6008 	}
   6009 
   6010 	/* Enable ECC */
   6011 	switch (sc->sc_type) {
   6012 	case WM_T_82571:
   6013 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   6014 		reg |= PBA_ECC_CORR_EN;
   6015 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   6016 		break;
   6017 	case WM_T_PCH_LPT:
   6018 	case WM_T_PCH_SPT:
   6019 	case WM_T_PCH_CNP:
   6020 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   6021 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   6022 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   6023 
   6024 		sc->sc_ctrl |= CTRL_MEHE;
   6025 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6026 		break;
   6027 	default:
   6028 		break;
   6029 	}
   6030 
   6031 	/*
   6032 	 * Set the receive filter.
   6033 	 *
   6034 	 * For 82575 and 82576, the RX descriptors must be initialized after
   6035 	 * the setting of RCTL.EN in wm_set_filter()
   6036 	 */
   6037 	wm_set_filter(sc);
   6038 
   6039 	/* On 575 and later set RDT only if RX enabled */
   6040 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6041 		int qidx;
   6042 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6043 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   6044 			for (i = 0; i < WM_NRXDESC; i++) {
   6045 				mutex_enter(rxq->rxq_lock);
   6046 				wm_init_rxdesc(rxq, i);
   6047 				mutex_exit(rxq->rxq_lock);
   6048 
   6049 			}
   6050 		}
   6051 	}
   6052 
   6053 	wm_unset_stopping_flags(sc);
   6054 
   6055 	/* Start the one second link check clock. */
   6056 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   6057 
   6058 	/* ...all done! */
   6059 	ifp->if_flags |= IFF_RUNNING;
   6060 	ifp->if_flags &= ~IFF_OACTIVE;
   6061 
   6062  out:
   6063 	sc->sc_if_flags = ifp->if_flags;
   6064 	if (error)
   6065 		log(LOG_ERR, "%s: interface not running\n",
   6066 		    device_xname(sc->sc_dev));
   6067 	return error;
   6068 }
   6069 
   6070 /*
   6071  * wm_stop:		[ifnet interface function]
   6072  *
   6073  *	Stop transmission on the interface.
   6074  */
   6075 static void
   6076 wm_stop(struct ifnet *ifp, int disable)
   6077 {
   6078 	struct wm_softc *sc = ifp->if_softc;
   6079 
   6080 	WM_CORE_LOCK(sc);
   6081 	wm_stop_locked(ifp, disable);
   6082 	WM_CORE_UNLOCK(sc);
   6083 }
   6084 
   6085 static void
   6086 wm_stop_locked(struct ifnet *ifp, int disable)
   6087 {
   6088 	struct wm_softc *sc = ifp->if_softc;
   6089 	struct wm_txsoft *txs;
   6090 	int i, qidx;
   6091 
   6092 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6093 		device_xname(sc->sc_dev), __func__));
   6094 	KASSERT(WM_CORE_LOCKED(sc));
   6095 
   6096 	wm_set_stopping_flags(sc);
   6097 
   6098 	/* Stop the one second clock. */
   6099 	callout_stop(&sc->sc_tick_ch);
   6100 
   6101 	/* Stop the 82547 Tx FIFO stall check timer. */
   6102 	if (sc->sc_type == WM_T_82547)
   6103 		callout_stop(&sc->sc_txfifo_ch);
   6104 
   6105 	if (sc->sc_flags & WM_F_HAS_MII) {
   6106 		/* Down the MII. */
   6107 		mii_down(&sc->sc_mii);
   6108 	} else {
   6109 #if 0
   6110 		/* Should we clear PHY's status properly? */
   6111 		wm_reset(sc);
   6112 #endif
   6113 	}
   6114 
   6115 	/* Stop the transmit and receive processes. */
   6116 	CSR_WRITE(sc, WMREG_TCTL, 0);
   6117 	CSR_WRITE(sc, WMREG_RCTL, 0);
   6118 	sc->sc_rctl &= ~RCTL_EN;
   6119 
   6120 	/*
   6121 	 * Clear the interrupt mask to ensure the device cannot assert its
   6122 	 * interrupt line.
   6123 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   6124 	 * service any currently pending or shared interrupt.
   6125 	 */
   6126 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6127 	sc->sc_icr = 0;
   6128 	if (wm_is_using_msix(sc)) {
   6129 		if (sc->sc_type != WM_T_82574) {
   6130 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   6131 			CSR_WRITE(sc, WMREG_EIAC, 0);
   6132 		} else
   6133 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   6134 	}
   6135 
   6136 	/* Release any queued transmit buffers. */
   6137 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6138 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6139 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6140 		mutex_enter(txq->txq_lock);
   6141 		txq->txq_sending = false; /* ensure watchdog disabled */
   6142 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6143 			txs = &txq->txq_soft[i];
   6144 			if (txs->txs_mbuf != NULL) {
   6145 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   6146 				m_freem(txs->txs_mbuf);
   6147 				txs->txs_mbuf = NULL;
   6148 			}
   6149 		}
   6150 		mutex_exit(txq->txq_lock);
   6151 	}
   6152 
   6153 	/* Mark the interface as down and cancel the watchdog timer. */
   6154 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   6155 
   6156 	if (disable) {
   6157 		for (i = 0; i < sc->sc_nqueues; i++) {
   6158 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6159 			mutex_enter(rxq->rxq_lock);
   6160 			wm_rxdrain(rxq);
   6161 			mutex_exit(rxq->rxq_lock);
   6162 		}
   6163 	}
   6164 
   6165 #if 0 /* notyet */
   6166 	if (sc->sc_type >= WM_T_82544)
   6167 		CSR_WRITE(sc, WMREG_WUC, 0);
   6168 #endif
   6169 }
   6170 
   6171 static void
   6172 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   6173 {
   6174 	struct mbuf *m;
   6175 	int i;
   6176 
   6177 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   6178 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   6179 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   6180 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   6181 		    m->m_data, m->m_len, m->m_flags);
   6182 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   6183 	    i, i == 1 ? "" : "s");
   6184 }
   6185 
   6186 /*
   6187  * wm_82547_txfifo_stall:
   6188  *
   6189  *	Callout used to wait for the 82547 Tx FIFO to drain,
   6190  *	reset the FIFO pointers, and restart packet transmission.
   6191  */
   6192 static void
   6193 wm_82547_txfifo_stall(void *arg)
   6194 {
   6195 	struct wm_softc *sc = arg;
   6196 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6197 
   6198 	mutex_enter(txq->txq_lock);
   6199 
   6200 	if (txq->txq_stopping)
   6201 		goto out;
   6202 
   6203 	if (txq->txq_fifo_stall) {
   6204 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   6205 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   6206 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   6207 			/*
   6208 			 * Packets have drained.  Stop transmitter, reset
   6209 			 * FIFO pointers, restart transmitter, and kick
   6210 			 * the packet queue.
   6211 			 */
   6212 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   6213 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   6214 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   6215 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   6216 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   6217 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   6218 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   6219 			CSR_WRITE_FLUSH(sc);
   6220 
   6221 			txq->txq_fifo_head = 0;
   6222 			txq->txq_fifo_stall = 0;
   6223 			wm_start_locked(&sc->sc_ethercom.ec_if);
   6224 		} else {
   6225 			/*
   6226 			 * Still waiting for packets to drain; try again in
   6227 			 * another tick.
   6228 			 */
   6229 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6230 		}
   6231 	}
   6232 
   6233 out:
   6234 	mutex_exit(txq->txq_lock);
   6235 }
   6236 
   6237 /*
   6238  * wm_82547_txfifo_bugchk:
   6239  *
   6240  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6241  *	prevent enqueueing a packet that would wrap around the end
   6242  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6243  *
   6244  *	We do this by checking the amount of space before the end
   6245  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   6246  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6247  *	the internal FIFO pointers to the beginning, and restart
   6248  *	transmission on the interface.
   6249  */
   6250 #define	WM_FIFO_HDR		0x10
   6251 #define	WM_82547_PAD_LEN	0x3e0
   6252 static int
   6253 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6254 {
   6255 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6256 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6257 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6258 
   6259 	/* Just return if already stalled. */
   6260 	if (txq->txq_fifo_stall)
   6261 		return 1;
   6262 
   6263 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6264 		/* Stall only occurs in half-duplex mode. */
   6265 		goto send_packet;
   6266 	}
   6267 
   6268 	if (len >= WM_82547_PAD_LEN + space) {
   6269 		txq->txq_fifo_stall = 1;
   6270 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6271 		return 1;
   6272 	}
   6273 
   6274  send_packet:
   6275 	txq->txq_fifo_head += len;
   6276 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6277 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6278 
   6279 	return 0;
   6280 }
   6281 
   6282 static int
   6283 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6284 {
   6285 	int error;
   6286 
   6287 	/*
   6288 	 * Allocate the control data structures, and create and load the
   6289 	 * DMA map for it.
   6290 	 *
   6291 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6292 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6293 	 * both sets within the same 4G segment.
   6294 	 */
   6295 	if (sc->sc_type < WM_T_82544)
   6296 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6297 	else
   6298 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6299 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6300 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6301 	else
   6302 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6303 
   6304 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6305 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6306 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6307 		aprint_error_dev(sc->sc_dev,
   6308 		    "unable to allocate TX control data, error = %d\n",
   6309 		    error);
   6310 		goto fail_0;
   6311 	}
   6312 
   6313 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6314 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6315 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6316 		aprint_error_dev(sc->sc_dev,
   6317 		    "unable to map TX control data, error = %d\n", error);
   6318 		goto fail_1;
   6319 	}
   6320 
   6321 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6322 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6323 		aprint_error_dev(sc->sc_dev,
   6324 		    "unable to create TX control data DMA map, error = %d\n",
   6325 		    error);
   6326 		goto fail_2;
   6327 	}
   6328 
   6329 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6330 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6331 		aprint_error_dev(sc->sc_dev,
   6332 		    "unable to load TX control data DMA map, error = %d\n",
   6333 		    error);
   6334 		goto fail_3;
   6335 	}
   6336 
   6337 	return 0;
   6338 
   6339  fail_3:
   6340 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6341  fail_2:
   6342 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6343 	    WM_TXDESCS_SIZE(txq));
   6344  fail_1:
   6345 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6346  fail_0:
   6347 	return error;
   6348 }
   6349 
   6350 static void
   6351 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6352 {
   6353 
   6354 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6355 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6356 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6357 	    WM_TXDESCS_SIZE(txq));
   6358 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6359 }
   6360 
   6361 static int
   6362 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6363 {
   6364 	int error;
   6365 	size_t rxq_descs_size;
   6366 
   6367 	/*
   6368 	 * Allocate the control data structures, and create and load the
   6369 	 * DMA map for it.
   6370 	 *
   6371 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6372 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6373 	 * both sets within the same 4G segment.
   6374 	 */
   6375 	rxq->rxq_ndesc = WM_NRXDESC;
   6376 	if (sc->sc_type == WM_T_82574)
   6377 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6378 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6379 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6380 	else
   6381 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6382 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6383 
   6384 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6385 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6386 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6387 		aprint_error_dev(sc->sc_dev,
   6388 		    "unable to allocate RX control data, error = %d\n",
   6389 		    error);
   6390 		goto fail_0;
   6391 	}
   6392 
   6393 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6394 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6395 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6396 		aprint_error_dev(sc->sc_dev,
   6397 		    "unable to map RX control data, error = %d\n", error);
   6398 		goto fail_1;
   6399 	}
   6400 
   6401 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6402 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6403 		aprint_error_dev(sc->sc_dev,
   6404 		    "unable to create RX control data DMA map, error = %d\n",
   6405 		    error);
   6406 		goto fail_2;
   6407 	}
   6408 
   6409 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6410 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6411 		aprint_error_dev(sc->sc_dev,
   6412 		    "unable to load RX control data DMA map, error = %d\n",
   6413 		    error);
   6414 		goto fail_3;
   6415 	}
   6416 
   6417 	return 0;
   6418 
   6419  fail_3:
   6420 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6421  fail_2:
   6422 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6423 	    rxq_descs_size);
   6424  fail_1:
   6425 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6426  fail_0:
   6427 	return error;
   6428 }
   6429 
   6430 static void
   6431 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6432 {
   6433 
   6434 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6435 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6436 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6437 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6438 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6439 }
   6440 
   6441 
   6442 static int
   6443 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6444 {
   6445 	int i, error;
   6446 
   6447 	/* Create the transmit buffer DMA maps. */
   6448 	WM_TXQUEUELEN(txq) =
   6449 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6450 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6451 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6452 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6453 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6454 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6455 			aprint_error_dev(sc->sc_dev,
   6456 			    "unable to create Tx DMA map %d, error = %d\n",
   6457 			    i, error);
   6458 			goto fail;
   6459 		}
   6460 	}
   6461 
   6462 	return 0;
   6463 
   6464  fail:
   6465 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6466 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6467 			bus_dmamap_destroy(sc->sc_dmat,
   6468 			    txq->txq_soft[i].txs_dmamap);
   6469 	}
   6470 	return error;
   6471 }
   6472 
   6473 static void
   6474 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6475 {
   6476 	int i;
   6477 
   6478 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6479 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6480 			bus_dmamap_destroy(sc->sc_dmat,
   6481 			    txq->txq_soft[i].txs_dmamap);
   6482 	}
   6483 }
   6484 
   6485 static int
   6486 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6487 {
   6488 	int i, error;
   6489 
   6490 	/* Create the receive buffer DMA maps. */
   6491 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6492 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6493 			    MCLBYTES, 0, 0,
   6494 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6495 			aprint_error_dev(sc->sc_dev,
   6496 			    "unable to create Rx DMA map %d error = %d\n",
   6497 			    i, error);
   6498 			goto fail;
   6499 		}
   6500 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6501 	}
   6502 
   6503 	return 0;
   6504 
   6505  fail:
   6506 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6507 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6508 			bus_dmamap_destroy(sc->sc_dmat,
   6509 			    rxq->rxq_soft[i].rxs_dmamap);
   6510 	}
   6511 	return error;
   6512 }
   6513 
   6514 static void
   6515 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6516 {
   6517 	int i;
   6518 
   6519 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6520 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6521 			bus_dmamap_destroy(sc->sc_dmat,
   6522 			    rxq->rxq_soft[i].rxs_dmamap);
   6523 	}
   6524 }
   6525 
   6526 /*
   6527  * wm_alloc_quques:
   6528  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6529  */
   6530 static int
   6531 wm_alloc_txrx_queues(struct wm_softc *sc)
   6532 {
   6533 	int i, error, tx_done, rx_done;
   6534 
   6535 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6536 	    KM_SLEEP);
   6537 	if (sc->sc_queue == NULL) {
   6538 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6539 		error = ENOMEM;
   6540 		goto fail_0;
   6541 	}
   6542 
   6543 	/*
   6544 	 * For transmission
   6545 	 */
   6546 	error = 0;
   6547 	tx_done = 0;
   6548 	for (i = 0; i < sc->sc_nqueues; i++) {
   6549 #ifdef WM_EVENT_COUNTERS
   6550 		int j;
   6551 		const char *xname;
   6552 #endif
   6553 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6554 		txq->txq_sc = sc;
   6555 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6556 
   6557 		error = wm_alloc_tx_descs(sc, txq);
   6558 		if (error)
   6559 			break;
   6560 		error = wm_alloc_tx_buffer(sc, txq);
   6561 		if (error) {
   6562 			wm_free_tx_descs(sc, txq);
   6563 			break;
   6564 		}
   6565 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6566 		if (txq->txq_interq == NULL) {
   6567 			wm_free_tx_descs(sc, txq);
   6568 			wm_free_tx_buffer(sc, txq);
   6569 			error = ENOMEM;
   6570 			break;
   6571 		}
   6572 
   6573 #ifdef WM_EVENT_COUNTERS
   6574 		xname = device_xname(sc->sc_dev);
   6575 
   6576 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6577 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6578 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   6579 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6580 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6581 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   6582 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   6583 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   6584 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   6585 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   6586 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   6587 
   6588 		for (j = 0; j < WM_NTXSEGS; j++) {
   6589 			snprintf(txq->txq_txseg_evcnt_names[j],
   6590 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6591 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6592 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6593 		}
   6594 
   6595 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   6596 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   6597 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   6598 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   6599 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   6600 #endif /* WM_EVENT_COUNTERS */
   6601 
   6602 		tx_done++;
   6603 	}
   6604 	if (error)
   6605 		goto fail_1;
   6606 
   6607 	/*
   6608 	 * For recieve
   6609 	 */
   6610 	error = 0;
   6611 	rx_done = 0;
   6612 	for (i = 0; i < sc->sc_nqueues; i++) {
   6613 #ifdef WM_EVENT_COUNTERS
   6614 		const char *xname;
   6615 #endif
   6616 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6617 		rxq->rxq_sc = sc;
   6618 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6619 
   6620 		error = wm_alloc_rx_descs(sc, rxq);
   6621 		if (error)
   6622 			break;
   6623 
   6624 		error = wm_alloc_rx_buffer(sc, rxq);
   6625 		if (error) {
   6626 			wm_free_rx_descs(sc, rxq);
   6627 			break;
   6628 		}
   6629 
   6630 #ifdef WM_EVENT_COUNTERS
   6631 		xname = device_xname(sc->sc_dev);
   6632 
   6633 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   6634 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   6635 
   6636 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   6637 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   6638 #endif /* WM_EVENT_COUNTERS */
   6639 
   6640 		rx_done++;
   6641 	}
   6642 	if (error)
   6643 		goto fail_2;
   6644 
   6645 	return 0;
   6646 
   6647  fail_2:
   6648 	for (i = 0; i < rx_done; i++) {
   6649 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6650 		wm_free_rx_buffer(sc, rxq);
   6651 		wm_free_rx_descs(sc, rxq);
   6652 		if (rxq->rxq_lock)
   6653 			mutex_obj_free(rxq->rxq_lock);
   6654 	}
   6655  fail_1:
   6656 	for (i = 0; i < tx_done; i++) {
   6657 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6658 		pcq_destroy(txq->txq_interq);
   6659 		wm_free_tx_buffer(sc, txq);
   6660 		wm_free_tx_descs(sc, txq);
   6661 		if (txq->txq_lock)
   6662 			mutex_obj_free(txq->txq_lock);
   6663 	}
   6664 
   6665 	kmem_free(sc->sc_queue,
   6666 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6667  fail_0:
   6668 	return error;
   6669 }
   6670 
   6671 /*
   6672  * wm_free_quques:
   6673  *	Free {tx,rx}descs and {tx,rx} buffers
   6674  */
   6675 static void
   6676 wm_free_txrx_queues(struct wm_softc *sc)
   6677 {
   6678 	int i;
   6679 
   6680 	for (i = 0; i < sc->sc_nqueues; i++) {
   6681 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6682 
   6683 #ifdef WM_EVENT_COUNTERS
   6684 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   6685 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   6686 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   6687 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   6688 #endif /* WM_EVENT_COUNTERS */
   6689 
   6690 		wm_free_rx_buffer(sc, rxq);
   6691 		wm_free_rx_descs(sc, rxq);
   6692 		if (rxq->rxq_lock)
   6693 			mutex_obj_free(rxq->rxq_lock);
   6694 	}
   6695 
   6696 	for (i = 0; i < sc->sc_nqueues; i++) {
   6697 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6698 		struct mbuf *m;
   6699 #ifdef WM_EVENT_COUNTERS
   6700 		int j;
   6701 
   6702 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6703 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6704 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   6705 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6706 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6707 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   6708 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   6709 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   6710 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   6711 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   6712 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   6713 
   6714 		for (j = 0; j < WM_NTXSEGS; j++)
   6715 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6716 
   6717 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   6718 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   6719 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   6720 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   6721 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   6722 #endif /* WM_EVENT_COUNTERS */
   6723 
   6724 		/* drain txq_interq */
   6725 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6726 			m_freem(m);
   6727 		pcq_destroy(txq->txq_interq);
   6728 
   6729 		wm_free_tx_buffer(sc, txq);
   6730 		wm_free_tx_descs(sc, txq);
   6731 		if (txq->txq_lock)
   6732 			mutex_obj_free(txq->txq_lock);
   6733 	}
   6734 
   6735 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6736 }
   6737 
   6738 static void
   6739 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6740 {
   6741 
   6742 	KASSERT(mutex_owned(txq->txq_lock));
   6743 
   6744 	/* Initialize the transmit descriptor ring. */
   6745 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6746 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6747 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6748 	txq->txq_free = WM_NTXDESC(txq);
   6749 	txq->txq_next = 0;
   6750 }
   6751 
   6752 static void
   6753 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6754     struct wm_txqueue *txq)
   6755 {
   6756 
   6757 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6758 		device_xname(sc->sc_dev), __func__));
   6759 	KASSERT(mutex_owned(txq->txq_lock));
   6760 
   6761 	if (sc->sc_type < WM_T_82543) {
   6762 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6763 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6764 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6765 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6766 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6767 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6768 	} else {
   6769 		int qid = wmq->wmq_id;
   6770 
   6771 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6772 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6773 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6774 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6775 
   6776 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6777 			/*
   6778 			 * Don't write TDT before TCTL.EN is set.
   6779 			 * See the document.
   6780 			 */
   6781 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6782 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6783 			    | TXDCTL_WTHRESH(0));
   6784 		else {
   6785 			/* XXX should update with AIM? */
   6786 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6787 			if (sc->sc_type >= WM_T_82540) {
   6788 				/* should be same */
   6789 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6790 			}
   6791 
   6792 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6793 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6794 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6795 		}
   6796 	}
   6797 }
   6798 
   6799 static void
   6800 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6801 {
   6802 	int i;
   6803 
   6804 	KASSERT(mutex_owned(txq->txq_lock));
   6805 
   6806 	/* Initialize the transmit job descriptors. */
   6807 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6808 		txq->txq_soft[i].txs_mbuf = NULL;
   6809 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6810 	txq->txq_snext = 0;
   6811 	txq->txq_sdirty = 0;
   6812 }
   6813 
   6814 static void
   6815 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6816     struct wm_txqueue *txq)
   6817 {
   6818 
   6819 	KASSERT(mutex_owned(txq->txq_lock));
   6820 
   6821 	/*
   6822 	 * Set up some register offsets that are different between
   6823 	 * the i82542 and the i82543 and later chips.
   6824 	 */
   6825 	if (sc->sc_type < WM_T_82543)
   6826 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   6827 	else
   6828 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   6829 
   6830 	wm_init_tx_descs(sc, txq);
   6831 	wm_init_tx_regs(sc, wmq, txq);
   6832 	wm_init_tx_buffer(sc, txq);
   6833 
   6834 	txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */
   6835 	txq->txq_sending = false;
   6836 }
   6837 
   6838 static void
   6839 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6840     struct wm_rxqueue *rxq)
   6841 {
   6842 
   6843 	KASSERT(mutex_owned(rxq->rxq_lock));
   6844 
   6845 	/*
   6846 	 * Initialize the receive descriptor and receive job
   6847 	 * descriptor rings.
   6848 	 */
   6849 	if (sc->sc_type < WM_T_82543) {
   6850 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   6851 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   6852 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   6853 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6854 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   6855 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   6856 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   6857 
   6858 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   6859 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   6860 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   6861 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   6862 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   6863 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   6864 	} else {
   6865 		int qid = wmq->wmq_id;
   6866 
   6867 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   6868 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   6869 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   6870 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6871 
   6872 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6873 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   6874 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   6875 
   6876 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   6877 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   6878 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   6879 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   6880 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   6881 			    | RXDCTL_WTHRESH(1));
   6882 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6883 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6884 		} else {
   6885 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6886 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6887 			/* XXX should update with AIM? */
   6888 			CSR_WRITE(sc, WMREG_RDTR,
   6889 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   6890 			/* MUST be same */
   6891 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   6892 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   6893 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   6894 		}
   6895 	}
   6896 }
   6897 
   6898 static int
   6899 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6900 {
   6901 	struct wm_rxsoft *rxs;
   6902 	int error, i;
   6903 
   6904 	KASSERT(mutex_owned(rxq->rxq_lock));
   6905 
   6906 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6907 		rxs = &rxq->rxq_soft[i];
   6908 		if (rxs->rxs_mbuf == NULL) {
   6909 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   6910 				log(LOG_ERR, "%s: unable to allocate or map "
   6911 				    "rx buffer %d, error = %d\n",
   6912 				    device_xname(sc->sc_dev), i, error);
   6913 				/*
   6914 				 * XXX Should attempt to run with fewer receive
   6915 				 * XXX buffers instead of just failing.
   6916 				 */
   6917 				wm_rxdrain(rxq);
   6918 				return ENOMEM;
   6919 			}
   6920 		} else {
   6921 			/*
   6922 			 * For 82575 and 82576, the RX descriptors must be
   6923 			 * initialized after the setting of RCTL.EN in
   6924 			 * wm_set_filter()
   6925 			 */
   6926 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   6927 				wm_init_rxdesc(rxq, i);
   6928 		}
   6929 	}
   6930 	rxq->rxq_ptr = 0;
   6931 	rxq->rxq_discard = 0;
   6932 	WM_RXCHAIN_RESET(rxq);
   6933 
   6934 	return 0;
   6935 }
   6936 
   6937 static int
   6938 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6939     struct wm_rxqueue *rxq)
   6940 {
   6941 
   6942 	KASSERT(mutex_owned(rxq->rxq_lock));
   6943 
   6944 	/*
   6945 	 * Set up some register offsets that are different between
   6946 	 * the i82542 and the i82543 and later chips.
   6947 	 */
   6948 	if (sc->sc_type < WM_T_82543)
   6949 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   6950 	else
   6951 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   6952 
   6953 	wm_init_rx_regs(sc, wmq, rxq);
   6954 	return wm_init_rx_buffer(sc, rxq);
   6955 }
   6956 
   6957 /*
   6958  * wm_init_quques:
   6959  *	Initialize {tx,rx}descs and {tx,rx} buffers
   6960  */
   6961 static int
   6962 wm_init_txrx_queues(struct wm_softc *sc)
   6963 {
   6964 	int i, error = 0;
   6965 
   6966 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6967 		device_xname(sc->sc_dev), __func__));
   6968 
   6969 	for (i = 0; i < sc->sc_nqueues; i++) {
   6970 		struct wm_queue *wmq = &sc->sc_queue[i];
   6971 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6972 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6973 
   6974 		/*
   6975 		 * TODO
   6976 		 * Currently, use constant variable instead of AIM.
   6977 		 * Furthermore, the interrupt interval of multiqueue which use
   6978 		 * polling mode is less than default value.
   6979 		 * More tuning and AIM are required.
   6980 		 */
   6981 		if (wm_is_using_multiqueue(sc))
   6982 			wmq->wmq_itr = 50;
   6983 		else
   6984 			wmq->wmq_itr = sc->sc_itr_init;
   6985 		wmq->wmq_set_itr = true;
   6986 
   6987 		mutex_enter(txq->txq_lock);
   6988 		wm_init_tx_queue(sc, wmq, txq);
   6989 		mutex_exit(txq->txq_lock);
   6990 
   6991 		mutex_enter(rxq->rxq_lock);
   6992 		error = wm_init_rx_queue(sc, wmq, rxq);
   6993 		mutex_exit(rxq->rxq_lock);
   6994 		if (error)
   6995 			break;
   6996 	}
   6997 
   6998 	return error;
   6999 }
   7000 
   7001 /*
   7002  * wm_tx_offload:
   7003  *
   7004  *	Set up TCP/IP checksumming parameters for the
   7005  *	specified packet.
   7006  */
   7007 static int
   7008 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7009     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   7010 {
   7011 	struct mbuf *m0 = txs->txs_mbuf;
   7012 	struct livengood_tcpip_ctxdesc *t;
   7013 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   7014 	uint32_t ipcse;
   7015 	struct ether_header *eh;
   7016 	int offset, iphl;
   7017 	uint8_t fields;
   7018 
   7019 	/*
   7020 	 * XXX It would be nice if the mbuf pkthdr had offset
   7021 	 * fields for the protocol headers.
   7022 	 */
   7023 
   7024 	eh = mtod(m0, struct ether_header *);
   7025 	switch (htons(eh->ether_type)) {
   7026 	case ETHERTYPE_IP:
   7027 	case ETHERTYPE_IPV6:
   7028 		offset = ETHER_HDR_LEN;
   7029 		break;
   7030 
   7031 	case ETHERTYPE_VLAN:
   7032 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7033 		break;
   7034 
   7035 	default:
   7036 		/*
   7037 		 * Don't support this protocol or encapsulation.
   7038 		 */
   7039 		*fieldsp = 0;
   7040 		*cmdp = 0;
   7041 		return 0;
   7042 	}
   7043 
   7044 	if ((m0->m_pkthdr.csum_flags &
   7045 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7046 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7047 	} else
   7048 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7049 
   7050 	ipcse = offset + iphl - 1;
   7051 
   7052 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   7053 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   7054 	seg = 0;
   7055 	fields = 0;
   7056 
   7057 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7058 		int hlen = offset + iphl;
   7059 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7060 
   7061 		if (__predict_false(m0->m_len <
   7062 				    (hlen + sizeof(struct tcphdr)))) {
   7063 			/*
   7064 			 * TCP/IP headers are not in the first mbuf; we need
   7065 			 * to do this the slow and painful way. Let's just
   7066 			 * hope this doesn't happen very often.
   7067 			 */
   7068 			struct tcphdr th;
   7069 
   7070 			WM_Q_EVCNT_INCR(txq, tsopain);
   7071 
   7072 			m_copydata(m0, hlen, sizeof(th), &th);
   7073 			if (v4) {
   7074 				struct ip ip;
   7075 
   7076 				m_copydata(m0, offset, sizeof(ip), &ip);
   7077 				ip.ip_len = 0;
   7078 				m_copyback(m0,
   7079 				    offset + offsetof(struct ip, ip_len),
   7080 				    sizeof(ip.ip_len), &ip.ip_len);
   7081 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7082 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7083 			} else {
   7084 				struct ip6_hdr ip6;
   7085 
   7086 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7087 				ip6.ip6_plen = 0;
   7088 				m_copyback(m0,
   7089 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7090 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7091 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7092 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7093 			}
   7094 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7095 			    sizeof(th.th_sum), &th.th_sum);
   7096 
   7097 			hlen += th.th_off << 2;
   7098 		} else {
   7099 			/*
   7100 			 * TCP/IP headers are in the first mbuf; we can do
   7101 			 * this the easy way.
   7102 			 */
   7103 			struct tcphdr *th;
   7104 
   7105 			if (v4) {
   7106 				struct ip *ip =
   7107 				    (void *)(mtod(m0, char *) + offset);
   7108 				th = (void *)(mtod(m0, char *) + hlen);
   7109 
   7110 				ip->ip_len = 0;
   7111 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7112 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7113 			} else {
   7114 				struct ip6_hdr *ip6 =
   7115 				    (void *)(mtod(m0, char *) + offset);
   7116 				th = (void *)(mtod(m0, char *) + hlen);
   7117 
   7118 				ip6->ip6_plen = 0;
   7119 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7120 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7121 			}
   7122 			hlen += th->th_off << 2;
   7123 		}
   7124 
   7125 		if (v4) {
   7126 			WM_Q_EVCNT_INCR(txq, tso);
   7127 			cmdlen |= WTX_TCPIP_CMD_IP;
   7128 		} else {
   7129 			WM_Q_EVCNT_INCR(txq, tso6);
   7130 			ipcse = 0;
   7131 		}
   7132 		cmd |= WTX_TCPIP_CMD_TSE;
   7133 		cmdlen |= WTX_TCPIP_CMD_TSE |
   7134 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   7135 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   7136 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   7137 	}
   7138 
   7139 	/*
   7140 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   7141 	 * offload feature, if we load the context descriptor, we
   7142 	 * MUST provide valid values for IPCSS and TUCSS fields.
   7143 	 */
   7144 
   7145 	ipcs = WTX_TCPIP_IPCSS(offset) |
   7146 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   7147 	    WTX_TCPIP_IPCSE(ipcse);
   7148 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   7149 		WM_Q_EVCNT_INCR(txq, ipsum);
   7150 		fields |= WTX_IXSM;
   7151 	}
   7152 
   7153 	offset += iphl;
   7154 
   7155 	if (m0->m_pkthdr.csum_flags &
   7156 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   7157 		WM_Q_EVCNT_INCR(txq, tusum);
   7158 		fields |= WTX_TXSM;
   7159 		tucs = WTX_TCPIP_TUCSS(offset) |
   7160 		    WTX_TCPIP_TUCSO(offset +
   7161 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   7162 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7163 	} else if ((m0->m_pkthdr.csum_flags &
   7164 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   7165 		WM_Q_EVCNT_INCR(txq, tusum6);
   7166 		fields |= WTX_TXSM;
   7167 		tucs = WTX_TCPIP_TUCSS(offset) |
   7168 		    WTX_TCPIP_TUCSO(offset +
   7169 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   7170 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7171 	} else {
   7172 		/* Just initialize it to a valid TCP context. */
   7173 		tucs = WTX_TCPIP_TUCSS(offset) |
   7174 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   7175 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7176 	}
   7177 
   7178 	/*
   7179 	 * We don't have to write context descriptor for every packet
   7180 	 * except for 82574. For 82574, we must write context descriptor
   7181 	 * for every packet when we use two descriptor queues.
   7182 	 * It would be overhead to write context descriptor for every packet,
   7183 	 * however it does not cause problems.
   7184 	 */
   7185 	/* Fill in the context descriptor. */
   7186 	t = (struct livengood_tcpip_ctxdesc *)
   7187 	    &txq->txq_descs[txq->txq_next];
   7188 	t->tcpip_ipcs = htole32(ipcs);
   7189 	t->tcpip_tucs = htole32(tucs);
   7190 	t->tcpip_cmdlen = htole32(cmdlen);
   7191 	t->tcpip_seg = htole32(seg);
   7192 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7193 
   7194 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7195 	txs->txs_ndesc++;
   7196 
   7197 	*cmdp = cmd;
   7198 	*fieldsp = fields;
   7199 
   7200 	return 0;
   7201 }
   7202 
   7203 static inline int
   7204 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   7205 {
   7206 	struct wm_softc *sc = ifp->if_softc;
   7207 	u_int cpuid = cpu_index(curcpu());
   7208 
   7209 	/*
   7210 	 * Currently, simple distribute strategy.
   7211 	 * TODO:
   7212 	 * distribute by flowid(RSS has value).
   7213 	 */
   7214 	return (cpuid + ncpu - sc->sc_affinity_offset) % sc->sc_nqueues;
   7215 }
   7216 
   7217 /*
   7218  * wm_start:		[ifnet interface function]
   7219  *
   7220  *	Start packet transmission on the interface.
   7221  */
   7222 static void
   7223 wm_start(struct ifnet *ifp)
   7224 {
   7225 	struct wm_softc *sc = ifp->if_softc;
   7226 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7227 
   7228 #ifdef WM_MPSAFE
   7229 	KASSERT(if_is_mpsafe(ifp));
   7230 #endif
   7231 	/*
   7232 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7233 	 */
   7234 
   7235 	mutex_enter(txq->txq_lock);
   7236 	if (!txq->txq_stopping)
   7237 		wm_start_locked(ifp);
   7238 	mutex_exit(txq->txq_lock);
   7239 }
   7240 
   7241 static void
   7242 wm_start_locked(struct ifnet *ifp)
   7243 {
   7244 	struct wm_softc *sc = ifp->if_softc;
   7245 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7246 
   7247 	wm_send_common_locked(ifp, txq, false);
   7248 }
   7249 
   7250 static int
   7251 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7252 {
   7253 	int qid;
   7254 	struct wm_softc *sc = ifp->if_softc;
   7255 	struct wm_txqueue *txq;
   7256 
   7257 	qid = wm_select_txqueue(ifp, m);
   7258 	txq = &sc->sc_queue[qid].wmq_txq;
   7259 
   7260 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7261 		m_freem(m);
   7262 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7263 		return ENOBUFS;
   7264 	}
   7265 
   7266 	/*
   7267 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7268 	 */
   7269 	ifp->if_obytes += m->m_pkthdr.len;
   7270 	if (m->m_flags & M_MCAST)
   7271 		ifp->if_omcasts++;
   7272 
   7273 	if (mutex_tryenter(txq->txq_lock)) {
   7274 		if (!txq->txq_stopping)
   7275 			wm_transmit_locked(ifp, txq);
   7276 		mutex_exit(txq->txq_lock);
   7277 	}
   7278 
   7279 	return 0;
   7280 }
   7281 
   7282 static void
   7283 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7284 {
   7285 
   7286 	wm_send_common_locked(ifp, txq, true);
   7287 }
   7288 
   7289 static void
   7290 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7291     bool is_transmit)
   7292 {
   7293 	struct wm_softc *sc = ifp->if_softc;
   7294 	struct mbuf *m0;
   7295 	struct wm_txsoft *txs;
   7296 	bus_dmamap_t dmamap;
   7297 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7298 	bus_addr_t curaddr;
   7299 	bus_size_t seglen, curlen;
   7300 	uint32_t cksumcmd;
   7301 	uint8_t cksumfields;
   7302 	bool remap = true;
   7303 
   7304 	KASSERT(mutex_owned(txq->txq_lock));
   7305 
   7306 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7307 		return;
   7308 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7309 		return;
   7310 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7311 		return;
   7312 
   7313 	/* Remember the previous number of free descriptors. */
   7314 	ofree = txq->txq_free;
   7315 
   7316 	/*
   7317 	 * Loop through the send queue, setting up transmit descriptors
   7318 	 * until we drain the queue, or use up all available transmit
   7319 	 * descriptors.
   7320 	 */
   7321 	for (;;) {
   7322 		m0 = NULL;
   7323 
   7324 		/* Get a work queue entry. */
   7325 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7326 			wm_txeof(txq, UINT_MAX);
   7327 			if (txq->txq_sfree == 0) {
   7328 				DPRINTF(WM_DEBUG_TX,
   7329 				    ("%s: TX: no free job descriptors\n",
   7330 					device_xname(sc->sc_dev)));
   7331 				WM_Q_EVCNT_INCR(txq, txsstall);
   7332 				break;
   7333 			}
   7334 		}
   7335 
   7336 		/* Grab a packet off the queue. */
   7337 		if (is_transmit)
   7338 			m0 = pcq_get(txq->txq_interq);
   7339 		else
   7340 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7341 		if (m0 == NULL)
   7342 			break;
   7343 
   7344 		DPRINTF(WM_DEBUG_TX,
   7345 		    ("%s: TX: have packet to transmit: %p\n",
   7346 			device_xname(sc->sc_dev), m0));
   7347 
   7348 		txs = &txq->txq_soft[txq->txq_snext];
   7349 		dmamap = txs->txs_dmamap;
   7350 
   7351 		use_tso = (m0->m_pkthdr.csum_flags &
   7352 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7353 
   7354 		/*
   7355 		 * So says the Linux driver:
   7356 		 * The controller does a simple calculation to make sure
   7357 		 * there is enough room in the FIFO before initiating the
   7358 		 * DMA for each buffer. The calc is:
   7359 		 *	4 = ceil(buffer len / MSS)
   7360 		 * To make sure we don't overrun the FIFO, adjust the max
   7361 		 * buffer len if the MSS drops.
   7362 		 */
   7363 		dmamap->dm_maxsegsz =
   7364 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7365 		    ? m0->m_pkthdr.segsz << 2
   7366 		    : WTX_MAX_LEN;
   7367 
   7368 		/*
   7369 		 * Load the DMA map.  If this fails, the packet either
   7370 		 * didn't fit in the allotted number of segments, or we
   7371 		 * were short on resources.  For the too-many-segments
   7372 		 * case, we simply report an error and drop the packet,
   7373 		 * since we can't sanely copy a jumbo packet to a single
   7374 		 * buffer.
   7375 		 */
   7376 retry:
   7377 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7378 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7379 		if (__predict_false(error)) {
   7380 			if (error == EFBIG) {
   7381 				if (remap == true) {
   7382 					struct mbuf *m;
   7383 
   7384 					remap = false;
   7385 					m = m_defrag(m0, M_NOWAIT);
   7386 					if (m != NULL) {
   7387 						WM_Q_EVCNT_INCR(txq, defrag);
   7388 						m0 = m;
   7389 						goto retry;
   7390 					}
   7391 				}
   7392 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   7393 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7394 				    "DMA segments, dropping...\n",
   7395 				    device_xname(sc->sc_dev));
   7396 				wm_dump_mbuf_chain(sc, m0);
   7397 				m_freem(m0);
   7398 				continue;
   7399 			}
   7400 			/*  Short on resources, just stop for now. */
   7401 			DPRINTF(WM_DEBUG_TX,
   7402 			    ("%s: TX: dmamap load failed: %d\n",
   7403 				device_xname(sc->sc_dev), error));
   7404 			break;
   7405 		}
   7406 
   7407 		segs_needed = dmamap->dm_nsegs;
   7408 		if (use_tso) {
   7409 			/* For sentinel descriptor; see below. */
   7410 			segs_needed++;
   7411 		}
   7412 
   7413 		/*
   7414 		 * Ensure we have enough descriptors free to describe
   7415 		 * the packet. Note, we always reserve one descriptor
   7416 		 * at the end of the ring due to the semantics of the
   7417 		 * TDT register, plus one more in the event we need
   7418 		 * to load offload context.
   7419 		 */
   7420 		if (segs_needed > txq->txq_free - 2) {
   7421 			/*
   7422 			 * Not enough free descriptors to transmit this
   7423 			 * packet.  We haven't committed anything yet,
   7424 			 * so just unload the DMA map, put the packet
   7425 			 * pack on the queue, and punt. Notify the upper
   7426 			 * layer that there are no more slots left.
   7427 			 */
   7428 			DPRINTF(WM_DEBUG_TX,
   7429 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7430 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7431 				segs_needed, txq->txq_free - 1));
   7432 			if (!is_transmit)
   7433 				ifp->if_flags |= IFF_OACTIVE;
   7434 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7435 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7436 			WM_Q_EVCNT_INCR(txq, txdstall);
   7437 			break;
   7438 		}
   7439 
   7440 		/*
   7441 		 * Check for 82547 Tx FIFO bug. We need to do this
   7442 		 * once we know we can transmit the packet, since we
   7443 		 * do some internal FIFO space accounting here.
   7444 		 */
   7445 		if (sc->sc_type == WM_T_82547 &&
   7446 		    wm_82547_txfifo_bugchk(sc, m0)) {
   7447 			DPRINTF(WM_DEBUG_TX,
   7448 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   7449 				device_xname(sc->sc_dev)));
   7450 			if (!is_transmit)
   7451 				ifp->if_flags |= IFF_OACTIVE;
   7452 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7453 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7454 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   7455 			break;
   7456 		}
   7457 
   7458 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7459 
   7460 		DPRINTF(WM_DEBUG_TX,
   7461 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7462 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7463 
   7464 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7465 
   7466 		/*
   7467 		 * Store a pointer to the packet so that we can free it
   7468 		 * later.
   7469 		 *
   7470 		 * Initially, we consider the number of descriptors the
   7471 		 * packet uses the number of DMA segments.  This may be
   7472 		 * incremented by 1 if we do checksum offload (a descriptor
   7473 		 * is used to set the checksum context).
   7474 		 */
   7475 		txs->txs_mbuf = m0;
   7476 		txs->txs_firstdesc = txq->txq_next;
   7477 		txs->txs_ndesc = segs_needed;
   7478 
   7479 		/* Set up offload parameters for this packet. */
   7480 		if (m0->m_pkthdr.csum_flags &
   7481 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7482 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7483 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7484 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   7485 					  &cksumfields) != 0) {
   7486 				/* Error message already displayed. */
   7487 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7488 				continue;
   7489 			}
   7490 		} else {
   7491 			cksumcmd = 0;
   7492 			cksumfields = 0;
   7493 		}
   7494 
   7495 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7496 
   7497 		/* Sync the DMA map. */
   7498 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7499 		    BUS_DMASYNC_PREWRITE);
   7500 
   7501 		/* Initialize the transmit descriptor. */
   7502 		for (nexttx = txq->txq_next, seg = 0;
   7503 		     seg < dmamap->dm_nsegs; seg++) {
   7504 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7505 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7506 			     seglen != 0;
   7507 			     curaddr += curlen, seglen -= curlen,
   7508 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7509 				curlen = seglen;
   7510 
   7511 				/*
   7512 				 * So says the Linux driver:
   7513 				 * Work around for premature descriptor
   7514 				 * write-backs in TSO mode.  Append a
   7515 				 * 4-byte sentinel descriptor.
   7516 				 */
   7517 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7518 				    curlen > 8)
   7519 					curlen -= 4;
   7520 
   7521 				wm_set_dma_addr(
   7522 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7523 				txq->txq_descs[nexttx].wtx_cmdlen
   7524 				    = htole32(cksumcmd | curlen);
   7525 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7526 				    = 0;
   7527 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7528 				    = cksumfields;
   7529 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7530 				lasttx = nexttx;
   7531 
   7532 				DPRINTF(WM_DEBUG_TX,
   7533 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7534 					"len %#04zx\n",
   7535 					device_xname(sc->sc_dev), nexttx,
   7536 					(uint64_t)curaddr, curlen));
   7537 			}
   7538 		}
   7539 
   7540 		KASSERT(lasttx != -1);
   7541 
   7542 		/*
   7543 		 * Set up the command byte on the last descriptor of
   7544 		 * the packet. If we're in the interrupt delay window,
   7545 		 * delay the interrupt.
   7546 		 */
   7547 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7548 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7549 
   7550 		/*
   7551 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7552 		 * up the descriptor to encapsulate the packet for us.
   7553 		 *
   7554 		 * This is only valid on the last descriptor of the packet.
   7555 		 */
   7556 		if (vlan_has_tag(m0)) {
   7557 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7558 			    htole32(WTX_CMD_VLE);
   7559 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7560 			    = htole16(vlan_get_tag(m0));
   7561 		}
   7562 
   7563 		txs->txs_lastdesc = lasttx;
   7564 
   7565 		DPRINTF(WM_DEBUG_TX,
   7566 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7567 			device_xname(sc->sc_dev),
   7568 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7569 
   7570 		/* Sync the descriptors we're using. */
   7571 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7572 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7573 
   7574 		/* Give the packet to the chip. */
   7575 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7576 
   7577 		DPRINTF(WM_DEBUG_TX,
   7578 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7579 
   7580 		DPRINTF(WM_DEBUG_TX,
   7581 		    ("%s: TX: finished transmitting packet, job %d\n",
   7582 			device_xname(sc->sc_dev), txq->txq_snext));
   7583 
   7584 		/* Advance the tx pointer. */
   7585 		txq->txq_free -= txs->txs_ndesc;
   7586 		txq->txq_next = nexttx;
   7587 
   7588 		txq->txq_sfree--;
   7589 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7590 
   7591 		/* Pass the packet to any BPF listeners. */
   7592 		bpf_mtap(ifp, m0, BPF_D_OUT);
   7593 	}
   7594 
   7595 	if (m0 != NULL) {
   7596 		if (!is_transmit)
   7597 			ifp->if_flags |= IFF_OACTIVE;
   7598 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7599 		WM_Q_EVCNT_INCR(txq, descdrop);
   7600 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7601 			__func__));
   7602 		m_freem(m0);
   7603 	}
   7604 
   7605 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7606 		/* No more slots; notify upper layer. */
   7607 		if (!is_transmit)
   7608 			ifp->if_flags |= IFF_OACTIVE;
   7609 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7610 	}
   7611 
   7612 	if (txq->txq_free != ofree) {
   7613 		/* Set a watchdog timer in case the chip flakes out. */
   7614 		txq->txq_lastsent = time_uptime;
   7615 		txq->txq_sending = true;
   7616 	}
   7617 }
   7618 
   7619 /*
   7620  * wm_nq_tx_offload:
   7621  *
   7622  *	Set up TCP/IP checksumming parameters for the
   7623  *	specified packet, for NEWQUEUE devices
   7624  */
   7625 static int
   7626 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7627     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7628 {
   7629 	struct mbuf *m0 = txs->txs_mbuf;
   7630 	uint32_t vl_len, mssidx, cmdc;
   7631 	struct ether_header *eh;
   7632 	int offset, iphl;
   7633 
   7634 	/*
   7635 	 * XXX It would be nice if the mbuf pkthdr had offset
   7636 	 * fields for the protocol headers.
   7637 	 */
   7638 	*cmdlenp = 0;
   7639 	*fieldsp = 0;
   7640 
   7641 	eh = mtod(m0, struct ether_header *);
   7642 	switch (htons(eh->ether_type)) {
   7643 	case ETHERTYPE_IP:
   7644 	case ETHERTYPE_IPV6:
   7645 		offset = ETHER_HDR_LEN;
   7646 		break;
   7647 
   7648 	case ETHERTYPE_VLAN:
   7649 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7650 		break;
   7651 
   7652 	default:
   7653 		/* Don't support this protocol or encapsulation. */
   7654 		*do_csum = false;
   7655 		return 0;
   7656 	}
   7657 	*do_csum = true;
   7658 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7659 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7660 
   7661 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7662 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7663 
   7664 	if ((m0->m_pkthdr.csum_flags &
   7665 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7666 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7667 	} else {
   7668 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7669 	}
   7670 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7671 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7672 
   7673 	if (vlan_has_tag(m0)) {
   7674 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   7675 		    << NQTXC_VLLEN_VLAN_SHIFT);
   7676 		*cmdlenp |= NQTX_CMD_VLE;
   7677 	}
   7678 
   7679 	mssidx = 0;
   7680 
   7681 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7682 		int hlen = offset + iphl;
   7683 		int tcp_hlen;
   7684 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7685 
   7686 		if (__predict_false(m0->m_len <
   7687 				    (hlen + sizeof(struct tcphdr)))) {
   7688 			/*
   7689 			 * TCP/IP headers are not in the first mbuf; we need
   7690 			 * to do this the slow and painful way. Let's just
   7691 			 * hope this doesn't happen very often.
   7692 			 */
   7693 			struct tcphdr th;
   7694 
   7695 			WM_Q_EVCNT_INCR(txq, tsopain);
   7696 
   7697 			m_copydata(m0, hlen, sizeof(th), &th);
   7698 			if (v4) {
   7699 				struct ip ip;
   7700 
   7701 				m_copydata(m0, offset, sizeof(ip), &ip);
   7702 				ip.ip_len = 0;
   7703 				m_copyback(m0,
   7704 				    offset + offsetof(struct ip, ip_len),
   7705 				    sizeof(ip.ip_len), &ip.ip_len);
   7706 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7707 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7708 			} else {
   7709 				struct ip6_hdr ip6;
   7710 
   7711 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7712 				ip6.ip6_plen = 0;
   7713 				m_copyback(m0,
   7714 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7715 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7716 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7717 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7718 			}
   7719 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7720 			    sizeof(th.th_sum), &th.th_sum);
   7721 
   7722 			tcp_hlen = th.th_off << 2;
   7723 		} else {
   7724 			/*
   7725 			 * TCP/IP headers are in the first mbuf; we can do
   7726 			 * this the easy way.
   7727 			 */
   7728 			struct tcphdr *th;
   7729 
   7730 			if (v4) {
   7731 				struct ip *ip =
   7732 				    (void *)(mtod(m0, char *) + offset);
   7733 				th = (void *)(mtod(m0, char *) + hlen);
   7734 
   7735 				ip->ip_len = 0;
   7736 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7737 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7738 			} else {
   7739 				struct ip6_hdr *ip6 =
   7740 				    (void *)(mtod(m0, char *) + offset);
   7741 				th = (void *)(mtod(m0, char *) + hlen);
   7742 
   7743 				ip6->ip6_plen = 0;
   7744 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7745 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7746 			}
   7747 			tcp_hlen = th->th_off << 2;
   7748 		}
   7749 		hlen += tcp_hlen;
   7750 		*cmdlenp |= NQTX_CMD_TSE;
   7751 
   7752 		if (v4) {
   7753 			WM_Q_EVCNT_INCR(txq, tso);
   7754 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7755 		} else {
   7756 			WM_Q_EVCNT_INCR(txq, tso6);
   7757 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7758 		}
   7759 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7760 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7761 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7762 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7763 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7764 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7765 	} else {
   7766 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7767 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7768 	}
   7769 
   7770 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7771 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7772 		cmdc |= NQTXC_CMD_IP4;
   7773 	}
   7774 
   7775 	if (m0->m_pkthdr.csum_flags &
   7776 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7777 		WM_Q_EVCNT_INCR(txq, tusum);
   7778 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   7779 			cmdc |= NQTXC_CMD_TCP;
   7780 		else
   7781 			cmdc |= NQTXC_CMD_UDP;
   7782 
   7783 		cmdc |= NQTXC_CMD_IP4;
   7784 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7785 	}
   7786 	if (m0->m_pkthdr.csum_flags &
   7787 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7788 		WM_Q_EVCNT_INCR(txq, tusum6);
   7789 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   7790 			cmdc |= NQTXC_CMD_TCP;
   7791 		else
   7792 			cmdc |= NQTXC_CMD_UDP;
   7793 
   7794 		cmdc |= NQTXC_CMD_IP6;
   7795 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7796 	}
   7797 
   7798 	/*
   7799 	 * We don't have to write context descriptor for every packet to
   7800 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   7801 	 * I210 and I211. It is enough to write once per a Tx queue for these
   7802 	 * controllers.
   7803 	 * It would be overhead to write context descriptor for every packet,
   7804 	 * however it does not cause problems.
   7805 	 */
   7806 	/* Fill in the context descriptor. */
   7807 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   7808 	    htole32(vl_len);
   7809 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   7810 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   7811 	    htole32(cmdc);
   7812 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   7813 	    htole32(mssidx);
   7814 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7815 	DPRINTF(WM_DEBUG_TX,
   7816 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   7817 		txq->txq_next, 0, vl_len));
   7818 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   7819 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7820 	txs->txs_ndesc++;
   7821 	return 0;
   7822 }
   7823 
   7824 /*
   7825  * wm_nq_start:		[ifnet interface function]
   7826  *
   7827  *	Start packet transmission on the interface for NEWQUEUE devices
   7828  */
   7829 static void
   7830 wm_nq_start(struct ifnet *ifp)
   7831 {
   7832 	struct wm_softc *sc = ifp->if_softc;
   7833 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7834 
   7835 #ifdef WM_MPSAFE
   7836 	KASSERT(if_is_mpsafe(ifp));
   7837 #endif
   7838 	/*
   7839 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7840 	 */
   7841 
   7842 	mutex_enter(txq->txq_lock);
   7843 	if (!txq->txq_stopping)
   7844 		wm_nq_start_locked(ifp);
   7845 	mutex_exit(txq->txq_lock);
   7846 }
   7847 
   7848 static void
   7849 wm_nq_start_locked(struct ifnet *ifp)
   7850 {
   7851 	struct wm_softc *sc = ifp->if_softc;
   7852 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7853 
   7854 	wm_nq_send_common_locked(ifp, txq, false);
   7855 }
   7856 
   7857 static int
   7858 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   7859 {
   7860 	int qid;
   7861 	struct wm_softc *sc = ifp->if_softc;
   7862 	struct wm_txqueue *txq;
   7863 
   7864 	qid = wm_select_txqueue(ifp, m);
   7865 	txq = &sc->sc_queue[qid].wmq_txq;
   7866 
   7867 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7868 		m_freem(m);
   7869 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7870 		return ENOBUFS;
   7871 	}
   7872 
   7873 	/*
   7874 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7875 	 */
   7876 	ifp->if_obytes += m->m_pkthdr.len;
   7877 	if (m->m_flags & M_MCAST)
   7878 		ifp->if_omcasts++;
   7879 
   7880 	/*
   7881 	 * The situations which this mutex_tryenter() fails at running time
   7882 	 * are below two patterns.
   7883 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   7884 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   7885 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   7886 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   7887 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   7888 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   7889 	 * stuck, either.
   7890 	 */
   7891 	if (mutex_tryenter(txq->txq_lock)) {
   7892 		if (!txq->txq_stopping)
   7893 			wm_nq_transmit_locked(ifp, txq);
   7894 		mutex_exit(txq->txq_lock);
   7895 	}
   7896 
   7897 	return 0;
   7898 }
   7899 
   7900 static void
   7901 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7902 {
   7903 
   7904 	wm_nq_send_common_locked(ifp, txq, true);
   7905 }
   7906 
   7907 static void
   7908 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7909     bool is_transmit)
   7910 {
   7911 	struct wm_softc *sc = ifp->if_softc;
   7912 	struct mbuf *m0;
   7913 	struct wm_txsoft *txs;
   7914 	bus_dmamap_t dmamap;
   7915 	int error, nexttx, lasttx = -1, seg, segs_needed;
   7916 	bool do_csum, sent;
   7917 	bool remap = true;
   7918 
   7919 	KASSERT(mutex_owned(txq->txq_lock));
   7920 
   7921 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7922 		return;
   7923 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7924 		return;
   7925 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7926 		return;
   7927 
   7928 	sent = false;
   7929 
   7930 	/*
   7931 	 * Loop through the send queue, setting up transmit descriptors
   7932 	 * until we drain the queue, or use up all available transmit
   7933 	 * descriptors.
   7934 	 */
   7935 	for (;;) {
   7936 		m0 = NULL;
   7937 
   7938 		/* Get a work queue entry. */
   7939 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7940 			wm_txeof(txq, UINT_MAX);
   7941 			if (txq->txq_sfree == 0) {
   7942 				DPRINTF(WM_DEBUG_TX,
   7943 				    ("%s: TX: no free job descriptors\n",
   7944 					device_xname(sc->sc_dev)));
   7945 				WM_Q_EVCNT_INCR(txq, txsstall);
   7946 				break;
   7947 			}
   7948 		}
   7949 
   7950 		/* Grab a packet off the queue. */
   7951 		if (is_transmit)
   7952 			m0 = pcq_get(txq->txq_interq);
   7953 		else
   7954 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7955 		if (m0 == NULL)
   7956 			break;
   7957 
   7958 		DPRINTF(WM_DEBUG_TX,
   7959 		    ("%s: TX: have packet to transmit: %p\n",
   7960 		    device_xname(sc->sc_dev), m0));
   7961 
   7962 		txs = &txq->txq_soft[txq->txq_snext];
   7963 		dmamap = txs->txs_dmamap;
   7964 
   7965 		/*
   7966 		 * Load the DMA map.  If this fails, the packet either
   7967 		 * didn't fit in the allotted number of segments, or we
   7968 		 * were short on resources.  For the too-many-segments
   7969 		 * case, we simply report an error and drop the packet,
   7970 		 * since we can't sanely copy a jumbo packet to a single
   7971 		 * buffer.
   7972 		 */
   7973 retry:
   7974 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7975 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7976 		if (__predict_false(error)) {
   7977 			if (error == EFBIG) {
   7978 				if (remap == true) {
   7979 					struct mbuf *m;
   7980 
   7981 					remap = false;
   7982 					m = m_defrag(m0, M_NOWAIT);
   7983 					if (m != NULL) {
   7984 						WM_Q_EVCNT_INCR(txq, defrag);
   7985 						m0 = m;
   7986 						goto retry;
   7987 					}
   7988 				}
   7989 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   7990 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7991 				    "DMA segments, dropping...\n",
   7992 				    device_xname(sc->sc_dev));
   7993 				wm_dump_mbuf_chain(sc, m0);
   7994 				m_freem(m0);
   7995 				continue;
   7996 			}
   7997 			/* Short on resources, just stop for now. */
   7998 			DPRINTF(WM_DEBUG_TX,
   7999 			    ("%s: TX: dmamap load failed: %d\n",
   8000 				device_xname(sc->sc_dev), error));
   8001 			break;
   8002 		}
   8003 
   8004 		segs_needed = dmamap->dm_nsegs;
   8005 
   8006 		/*
   8007 		 * Ensure we have enough descriptors free to describe
   8008 		 * the packet. Note, we always reserve one descriptor
   8009 		 * at the end of the ring due to the semantics of the
   8010 		 * TDT register, plus one more in the event we need
   8011 		 * to load offload context.
   8012 		 */
   8013 		if (segs_needed > txq->txq_free - 2) {
   8014 			/*
   8015 			 * Not enough free descriptors to transmit this
   8016 			 * packet.  We haven't committed anything yet,
   8017 			 * so just unload the DMA map, put the packet
   8018 			 * pack on the queue, and punt. Notify the upper
   8019 			 * layer that there are no more slots left.
   8020 			 */
   8021 			DPRINTF(WM_DEBUG_TX,
   8022 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8023 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8024 				segs_needed, txq->txq_free - 1));
   8025 			if (!is_transmit)
   8026 				ifp->if_flags |= IFF_OACTIVE;
   8027 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8028 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8029 			WM_Q_EVCNT_INCR(txq, txdstall);
   8030 			break;
   8031 		}
   8032 
   8033 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8034 
   8035 		DPRINTF(WM_DEBUG_TX,
   8036 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8037 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8038 
   8039 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8040 
   8041 		/*
   8042 		 * Store a pointer to the packet so that we can free it
   8043 		 * later.
   8044 		 *
   8045 		 * Initially, we consider the number of descriptors the
   8046 		 * packet uses the number of DMA segments.  This may be
   8047 		 * incremented by 1 if we do checksum offload (a descriptor
   8048 		 * is used to set the checksum context).
   8049 		 */
   8050 		txs->txs_mbuf = m0;
   8051 		txs->txs_firstdesc = txq->txq_next;
   8052 		txs->txs_ndesc = segs_needed;
   8053 
   8054 		/* Set up offload parameters for this packet. */
   8055 		uint32_t cmdlen, fields, dcmdlen;
   8056 		if (m0->m_pkthdr.csum_flags &
   8057 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8058 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8059 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8060 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   8061 			    &do_csum) != 0) {
   8062 				/* Error message already displayed. */
   8063 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   8064 				continue;
   8065 			}
   8066 		} else {
   8067 			do_csum = false;
   8068 			cmdlen = 0;
   8069 			fields = 0;
   8070 		}
   8071 
   8072 		/* Sync the DMA map. */
   8073 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8074 		    BUS_DMASYNC_PREWRITE);
   8075 
   8076 		/* Initialize the first transmit descriptor. */
   8077 		nexttx = txq->txq_next;
   8078 		if (!do_csum) {
   8079 			/* setup a legacy descriptor */
   8080 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   8081 			    dmamap->dm_segs[0].ds_addr);
   8082 			txq->txq_descs[nexttx].wtx_cmdlen =
   8083 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   8084 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   8085 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   8086 			if (vlan_has_tag(m0)) {
   8087 				txq->txq_descs[nexttx].wtx_cmdlen |=
   8088 				    htole32(WTX_CMD_VLE);
   8089 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   8090 				    htole16(vlan_get_tag(m0));
   8091 			} else
   8092 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8093 
   8094 			dcmdlen = 0;
   8095 		} else {
   8096 			/* setup an advanced data descriptor */
   8097 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8098 			    htole64(dmamap->dm_segs[0].ds_addr);
   8099 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   8100 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8101 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   8102 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   8103 			    htole32(fields);
   8104 			DPRINTF(WM_DEBUG_TX,
   8105 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   8106 				device_xname(sc->sc_dev), nexttx,
   8107 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   8108 			DPRINTF(WM_DEBUG_TX,
   8109 			    ("\t 0x%08x%08x\n", fields,
   8110 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   8111 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   8112 		}
   8113 
   8114 		lasttx = nexttx;
   8115 		nexttx = WM_NEXTTX(txq, nexttx);
   8116 		/*
   8117 		 * fill in the next descriptors. legacy or advanced format
   8118 		 * is the same here
   8119 		 */
   8120 		for (seg = 1; seg < dmamap->dm_nsegs;
   8121 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   8122 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8123 			    htole64(dmamap->dm_segs[seg].ds_addr);
   8124 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8125 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   8126 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   8127 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   8128 			lasttx = nexttx;
   8129 
   8130 			DPRINTF(WM_DEBUG_TX,
   8131 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   8132 				device_xname(sc->sc_dev), nexttx,
   8133 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   8134 				dmamap->dm_segs[seg].ds_len));
   8135 		}
   8136 
   8137 		KASSERT(lasttx != -1);
   8138 
   8139 		/*
   8140 		 * Set up the command byte on the last descriptor of
   8141 		 * the packet. If we're in the interrupt delay window,
   8142 		 * delay the interrupt.
   8143 		 */
   8144 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   8145 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   8146 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8147 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8148 
   8149 		txs->txs_lastdesc = lasttx;
   8150 
   8151 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8152 		    device_xname(sc->sc_dev),
   8153 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8154 
   8155 		/* Sync the descriptors we're using. */
   8156 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8157 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8158 
   8159 		/* Give the packet to the chip. */
   8160 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8161 		sent = true;
   8162 
   8163 		DPRINTF(WM_DEBUG_TX,
   8164 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8165 
   8166 		DPRINTF(WM_DEBUG_TX,
   8167 		    ("%s: TX: finished transmitting packet, job %d\n",
   8168 			device_xname(sc->sc_dev), txq->txq_snext));
   8169 
   8170 		/* Advance the tx pointer. */
   8171 		txq->txq_free -= txs->txs_ndesc;
   8172 		txq->txq_next = nexttx;
   8173 
   8174 		txq->txq_sfree--;
   8175 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8176 
   8177 		/* Pass the packet to any BPF listeners. */
   8178 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8179 	}
   8180 
   8181 	if (m0 != NULL) {
   8182 		if (!is_transmit)
   8183 			ifp->if_flags |= IFF_OACTIVE;
   8184 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8185 		WM_Q_EVCNT_INCR(txq, descdrop);
   8186 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8187 			__func__));
   8188 		m_freem(m0);
   8189 	}
   8190 
   8191 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8192 		/* No more slots; notify upper layer. */
   8193 		if (!is_transmit)
   8194 			ifp->if_flags |= IFF_OACTIVE;
   8195 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8196 	}
   8197 
   8198 	if (sent) {
   8199 		/* Set a watchdog timer in case the chip flakes out. */
   8200 		txq->txq_lastsent = time_uptime;
   8201 		txq->txq_sending = true;
   8202 	}
   8203 }
   8204 
   8205 static void
   8206 wm_deferred_start_locked(struct wm_txqueue *txq)
   8207 {
   8208 	struct wm_softc *sc = txq->txq_sc;
   8209 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8210 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8211 	int qid = wmq->wmq_id;
   8212 
   8213 	KASSERT(mutex_owned(txq->txq_lock));
   8214 
   8215 	if (txq->txq_stopping) {
   8216 		mutex_exit(txq->txq_lock);
   8217 		return;
   8218 	}
   8219 
   8220 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8221 		/* XXX need for ALTQ or one CPU system */
   8222 		if (qid == 0)
   8223 			wm_nq_start_locked(ifp);
   8224 		wm_nq_transmit_locked(ifp, txq);
   8225 	} else {
   8226 		/* XXX need for ALTQ or one CPU system */
   8227 		if (qid == 0)
   8228 			wm_start_locked(ifp);
   8229 		wm_transmit_locked(ifp, txq);
   8230 	}
   8231 }
   8232 
   8233 /* Interrupt */
   8234 
   8235 /*
   8236  * wm_txeof:
   8237  *
   8238  *	Helper; handle transmit interrupts.
   8239  */
   8240 static bool
   8241 wm_txeof(struct wm_txqueue *txq, u_int limit)
   8242 {
   8243 	struct wm_softc *sc = txq->txq_sc;
   8244 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8245 	struct wm_txsoft *txs;
   8246 	int count = 0;
   8247 	int i;
   8248 	uint8_t status;
   8249 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8250 	bool more = false;
   8251 
   8252 	KASSERT(mutex_owned(txq->txq_lock));
   8253 
   8254 	if (txq->txq_stopping)
   8255 		return false;
   8256 
   8257 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   8258 	/* for ALTQ and legacy(not use multiqueue) ethernet controller */
   8259 	if (wmq->wmq_id == 0)
   8260 		ifp->if_flags &= ~IFF_OACTIVE;
   8261 
   8262 	/*
   8263 	 * Go through the Tx list and free mbufs for those
   8264 	 * frames which have been transmitted.
   8265 	 */
   8266 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8267 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8268 		if (limit-- == 0) {
   8269 			more = true;
   8270 			DPRINTF(WM_DEBUG_TX,
   8271 			    ("%s: TX: loop limited, job %d is not processed\n",
   8272 				device_xname(sc->sc_dev), i));
   8273 			break;
   8274 		}
   8275 
   8276 		txs = &txq->txq_soft[i];
   8277 
   8278 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8279 			device_xname(sc->sc_dev), i));
   8280 
   8281 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8282 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8283 
   8284 		status =
   8285 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8286 		if ((status & WTX_ST_DD) == 0) {
   8287 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8288 			    BUS_DMASYNC_PREREAD);
   8289 			break;
   8290 		}
   8291 
   8292 		count++;
   8293 		DPRINTF(WM_DEBUG_TX,
   8294 		    ("%s: TX: job %d done: descs %d..%d\n",
   8295 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8296 		    txs->txs_lastdesc));
   8297 
   8298 		/*
   8299 		 * XXX We should probably be using the statistics
   8300 		 * XXX registers, but I don't know if they exist
   8301 		 * XXX on chips before the i82544.
   8302 		 */
   8303 
   8304 #ifdef WM_EVENT_COUNTERS
   8305 		if (status & WTX_ST_TU)
   8306 			WM_Q_EVCNT_INCR(txq, underrun);
   8307 #endif /* WM_EVENT_COUNTERS */
   8308 
   8309 		/*
   8310 		 * 82574 and newer's document says the status field has neither
   8311 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   8312 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   8313 		 * Developer's Manual", 82574 datasheet and newer.
   8314 		 *
   8315 		 * XXX I saw the LC bit was set on I218 even though the media
   8316 		 * was full duplex, so the bit might be used for other
   8317 		 * meaning ...(I have no document).
   8318 		 */
   8319 
   8320 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   8321 		    && ((sc->sc_type < WM_T_82574)
   8322 			|| (sc->sc_type == WM_T_80003))) {
   8323 			ifp->if_oerrors++;
   8324 			if (status & WTX_ST_LC)
   8325 				log(LOG_WARNING, "%s: late collision\n",
   8326 				    device_xname(sc->sc_dev));
   8327 			else if (status & WTX_ST_EC) {
   8328 				ifp->if_collisions +=
   8329 				    TX_COLLISION_THRESHOLD + 1;
   8330 				log(LOG_WARNING, "%s: excessive collisions\n",
   8331 				    device_xname(sc->sc_dev));
   8332 			}
   8333 		} else
   8334 			ifp->if_opackets++;
   8335 
   8336 		txq->txq_packets++;
   8337 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8338 
   8339 		txq->txq_free += txs->txs_ndesc;
   8340 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8341 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8342 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8343 		m_freem(txs->txs_mbuf);
   8344 		txs->txs_mbuf = NULL;
   8345 	}
   8346 
   8347 	/* Update the dirty transmit buffer pointer. */
   8348 	txq->txq_sdirty = i;
   8349 	DPRINTF(WM_DEBUG_TX,
   8350 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8351 
   8352 	if (count != 0)
   8353 		rnd_add_uint32(&sc->rnd_source, count);
   8354 
   8355 	/*
   8356 	 * If there are no more pending transmissions, cancel the watchdog
   8357 	 * timer.
   8358 	 */
   8359 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8360 		txq->txq_sending = false;
   8361 
   8362 	return more;
   8363 }
   8364 
   8365 static inline uint32_t
   8366 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8367 {
   8368 	struct wm_softc *sc = rxq->rxq_sc;
   8369 
   8370 	if (sc->sc_type == WM_T_82574)
   8371 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8372 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8373 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8374 	else
   8375 		return rxq->rxq_descs[idx].wrx_status;
   8376 }
   8377 
   8378 static inline uint32_t
   8379 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8380 {
   8381 	struct wm_softc *sc = rxq->rxq_sc;
   8382 
   8383 	if (sc->sc_type == WM_T_82574)
   8384 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8385 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8386 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8387 	else
   8388 		return rxq->rxq_descs[idx].wrx_errors;
   8389 }
   8390 
   8391 static inline uint16_t
   8392 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8393 {
   8394 	struct wm_softc *sc = rxq->rxq_sc;
   8395 
   8396 	if (sc->sc_type == WM_T_82574)
   8397 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8398 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8399 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8400 	else
   8401 		return rxq->rxq_descs[idx].wrx_special;
   8402 }
   8403 
   8404 static inline int
   8405 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8406 {
   8407 	struct wm_softc *sc = rxq->rxq_sc;
   8408 
   8409 	if (sc->sc_type == WM_T_82574)
   8410 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8411 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8412 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   8413 	else
   8414 		return rxq->rxq_descs[idx].wrx_len;
   8415 }
   8416 
   8417 #ifdef WM_DEBUG
   8418 static inline uint32_t
   8419 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   8420 {
   8421 	struct wm_softc *sc = rxq->rxq_sc;
   8422 
   8423 	if (sc->sc_type == WM_T_82574)
   8424 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   8425 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8426 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   8427 	else
   8428 		return 0;
   8429 }
   8430 
   8431 static inline uint8_t
   8432 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   8433 {
   8434 	struct wm_softc *sc = rxq->rxq_sc;
   8435 
   8436 	if (sc->sc_type == WM_T_82574)
   8437 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   8438 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8439 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   8440 	else
   8441 		return 0;
   8442 }
   8443 #endif /* WM_DEBUG */
   8444 
   8445 static inline bool
   8446 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   8447     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8448 {
   8449 
   8450 	if (sc->sc_type == WM_T_82574)
   8451 		return (status & ext_bit) != 0;
   8452 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8453 		return (status & nq_bit) != 0;
   8454 	else
   8455 		return (status & legacy_bit) != 0;
   8456 }
   8457 
   8458 static inline bool
   8459 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   8460     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8461 {
   8462 
   8463 	if (sc->sc_type == WM_T_82574)
   8464 		return (error & ext_bit) != 0;
   8465 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8466 		return (error & nq_bit) != 0;
   8467 	else
   8468 		return (error & legacy_bit) != 0;
   8469 }
   8470 
   8471 static inline bool
   8472 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   8473 {
   8474 
   8475 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8476 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   8477 		return true;
   8478 	else
   8479 		return false;
   8480 }
   8481 
   8482 static inline bool
   8483 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   8484 {
   8485 	struct wm_softc *sc = rxq->rxq_sc;
   8486 
   8487 	/* XXXX missing error bit for newqueue? */
   8488 	if (wm_rxdesc_is_set_error(sc, errors,
   8489 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   8490 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   8491 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   8492 		NQRXC_ERROR_RXE)) {
   8493 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   8494 		    EXTRXC_ERROR_SE, 0))
   8495 			log(LOG_WARNING, "%s: symbol error\n",
   8496 			    device_xname(sc->sc_dev));
   8497 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   8498 		    EXTRXC_ERROR_SEQ, 0))
   8499 			log(LOG_WARNING, "%s: receive sequence error\n",
   8500 			    device_xname(sc->sc_dev));
   8501 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   8502 		    EXTRXC_ERROR_CE, 0))
   8503 			log(LOG_WARNING, "%s: CRC error\n",
   8504 			    device_xname(sc->sc_dev));
   8505 		return true;
   8506 	}
   8507 
   8508 	return false;
   8509 }
   8510 
   8511 static inline bool
   8512 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8513 {
   8514 	struct wm_softc *sc = rxq->rxq_sc;
   8515 
   8516 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8517 		NQRXC_STATUS_DD)) {
   8518 		/* We have processed all of the receive descriptors. */
   8519 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8520 		return false;
   8521 	}
   8522 
   8523 	return true;
   8524 }
   8525 
   8526 static inline bool
   8527 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   8528     uint16_t vlantag, struct mbuf *m)
   8529 {
   8530 
   8531 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8532 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8533 		vlan_set_tag(m, le16toh(vlantag));
   8534 	}
   8535 
   8536 	return true;
   8537 }
   8538 
   8539 static inline void
   8540 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8541     uint32_t errors, struct mbuf *m)
   8542 {
   8543 	struct wm_softc *sc = rxq->rxq_sc;
   8544 
   8545 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8546 		if (wm_rxdesc_is_set_status(sc, status,
   8547 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8548 			WM_Q_EVCNT_INCR(rxq, ipsum);
   8549 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8550 			if (wm_rxdesc_is_set_error(sc, errors,
   8551 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8552 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   8553 		}
   8554 		if (wm_rxdesc_is_set_status(sc, status,
   8555 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8556 			/*
   8557 			 * Note: we don't know if this was TCP or UDP,
   8558 			 * so we just set both bits, and expect the
   8559 			 * upper layers to deal.
   8560 			 */
   8561 			WM_Q_EVCNT_INCR(rxq, tusum);
   8562 			m->m_pkthdr.csum_flags |=
   8563 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8564 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8565 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   8566 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8567 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   8568 		}
   8569 	}
   8570 }
   8571 
   8572 /*
   8573  * wm_rxeof:
   8574  *
   8575  *	Helper; handle receive interrupts.
   8576  */
   8577 static bool
   8578 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8579 {
   8580 	struct wm_softc *sc = rxq->rxq_sc;
   8581 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8582 	struct wm_rxsoft *rxs;
   8583 	struct mbuf *m;
   8584 	int i, len;
   8585 	int count = 0;
   8586 	uint32_t status, errors;
   8587 	uint16_t vlantag;
   8588 	bool more = false;
   8589 
   8590 	KASSERT(mutex_owned(rxq->rxq_lock));
   8591 
   8592 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8593 		if (limit-- == 0) {
   8594 			rxq->rxq_ptr = i;
   8595 			more = true;
   8596 			DPRINTF(WM_DEBUG_RX,
   8597 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   8598 				device_xname(sc->sc_dev), i));
   8599 			break;
   8600 		}
   8601 
   8602 		rxs = &rxq->rxq_soft[i];
   8603 
   8604 		DPRINTF(WM_DEBUG_RX,
   8605 		    ("%s: RX: checking descriptor %d\n",
   8606 			device_xname(sc->sc_dev), i));
   8607 		wm_cdrxsync(rxq, i,
   8608 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8609 
   8610 		status = wm_rxdesc_get_status(rxq, i);
   8611 		errors = wm_rxdesc_get_errors(rxq, i);
   8612 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8613 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8614 #ifdef WM_DEBUG
   8615 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8616 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8617 #endif
   8618 
   8619 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8620 			/*
   8621 			 * Update the receive pointer holding rxq_lock
   8622 			 * consistent with increment counter.
   8623 			 */
   8624 			rxq->rxq_ptr = i;
   8625 			break;
   8626 		}
   8627 
   8628 		count++;
   8629 		if (__predict_false(rxq->rxq_discard)) {
   8630 			DPRINTF(WM_DEBUG_RX,
   8631 			    ("%s: RX: discarding contents of descriptor %d\n",
   8632 				device_xname(sc->sc_dev), i));
   8633 			wm_init_rxdesc(rxq, i);
   8634 			if (wm_rxdesc_is_eop(rxq, status)) {
   8635 				/* Reset our state. */
   8636 				DPRINTF(WM_DEBUG_RX,
   8637 				    ("%s: RX: resetting rxdiscard -> 0\n",
   8638 					device_xname(sc->sc_dev)));
   8639 				rxq->rxq_discard = 0;
   8640 			}
   8641 			continue;
   8642 		}
   8643 
   8644 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8645 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   8646 
   8647 		m = rxs->rxs_mbuf;
   8648 
   8649 		/*
   8650 		 * Add a new receive buffer to the ring, unless of
   8651 		 * course the length is zero. Treat the latter as a
   8652 		 * failed mapping.
   8653 		 */
   8654 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   8655 			/*
   8656 			 * Failed, throw away what we've done so
   8657 			 * far, and discard the rest of the packet.
   8658 			 */
   8659 			ifp->if_ierrors++;
   8660 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8661 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   8662 			wm_init_rxdesc(rxq, i);
   8663 			if (!wm_rxdesc_is_eop(rxq, status))
   8664 				rxq->rxq_discard = 1;
   8665 			if (rxq->rxq_head != NULL)
   8666 				m_freem(rxq->rxq_head);
   8667 			WM_RXCHAIN_RESET(rxq);
   8668 			DPRINTF(WM_DEBUG_RX,
   8669 			    ("%s: RX: Rx buffer allocation failed, "
   8670 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8671 				rxq->rxq_discard ? " (discard)" : ""));
   8672 			continue;
   8673 		}
   8674 
   8675 		m->m_len = len;
   8676 		rxq->rxq_len += len;
   8677 		DPRINTF(WM_DEBUG_RX,
   8678 		    ("%s: RX: buffer at %p len %d\n",
   8679 			device_xname(sc->sc_dev), m->m_data, len));
   8680 
   8681 		/* If this is not the end of the packet, keep looking. */
   8682 		if (!wm_rxdesc_is_eop(rxq, status)) {
   8683 			WM_RXCHAIN_LINK(rxq, m);
   8684 			DPRINTF(WM_DEBUG_RX,
   8685 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   8686 				device_xname(sc->sc_dev), rxq->rxq_len));
   8687 			continue;
   8688 		}
   8689 
   8690 		/*
   8691 		 * Okay, we have the entire packet now. The chip is
   8692 		 * configured to include the FCS except I350 and I21[01]
   8693 		 * (not all chips can be configured to strip it),
   8694 		 * so we need to trim it.
   8695 		 * May need to adjust length of previous mbuf in the
   8696 		 * chain if the current mbuf is too short.
   8697 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   8698 		 * is always set in I350, so we don't trim it.
   8699 		 */
   8700 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   8701 		    && (sc->sc_type != WM_T_I210)
   8702 		    && (sc->sc_type != WM_T_I211)) {
   8703 			if (m->m_len < ETHER_CRC_LEN) {
   8704 				rxq->rxq_tail->m_len
   8705 				    -= (ETHER_CRC_LEN - m->m_len);
   8706 				m->m_len = 0;
   8707 			} else
   8708 				m->m_len -= ETHER_CRC_LEN;
   8709 			len = rxq->rxq_len - ETHER_CRC_LEN;
   8710 		} else
   8711 			len = rxq->rxq_len;
   8712 
   8713 		WM_RXCHAIN_LINK(rxq, m);
   8714 
   8715 		*rxq->rxq_tailp = NULL;
   8716 		m = rxq->rxq_head;
   8717 
   8718 		WM_RXCHAIN_RESET(rxq);
   8719 
   8720 		DPRINTF(WM_DEBUG_RX,
   8721 		    ("%s: RX: have entire packet, len -> %d\n",
   8722 			device_xname(sc->sc_dev), len));
   8723 
   8724 		/* If an error occurred, update stats and drop the packet. */
   8725 		if (wm_rxdesc_has_errors(rxq, errors)) {
   8726 			m_freem(m);
   8727 			continue;
   8728 		}
   8729 
   8730 		/* No errors.  Receive the packet. */
   8731 		m_set_rcvif(m, ifp);
   8732 		m->m_pkthdr.len = len;
   8733 		/*
   8734 		 * TODO
   8735 		 * should be save rsshash and rsstype to this mbuf.
   8736 		 */
   8737 		DPRINTF(WM_DEBUG_RX,
   8738 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   8739 			device_xname(sc->sc_dev), rsstype, rsshash));
   8740 
   8741 		/*
   8742 		 * If VLANs are enabled, VLAN packets have been unwrapped
   8743 		 * for us.  Associate the tag with the packet.
   8744 		 */
   8745 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   8746 			continue;
   8747 
   8748 		/* Set up checksum info for this packet. */
   8749 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   8750 		/*
   8751 		 * Update the receive pointer holding rxq_lock consistent with
   8752 		 * increment counter.
   8753 		 */
   8754 		rxq->rxq_ptr = i;
   8755 		rxq->rxq_packets++;
   8756 		rxq->rxq_bytes += len;
   8757 		mutex_exit(rxq->rxq_lock);
   8758 
   8759 		/* Pass it on. */
   8760 		if_percpuq_enqueue(sc->sc_ipq, m);
   8761 
   8762 		mutex_enter(rxq->rxq_lock);
   8763 
   8764 		if (rxq->rxq_stopping)
   8765 			break;
   8766 	}
   8767 
   8768 	if (count != 0)
   8769 		rnd_add_uint32(&sc->rnd_source, count);
   8770 
   8771 	DPRINTF(WM_DEBUG_RX,
   8772 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8773 
   8774 	return more;
   8775 }
   8776 
   8777 /*
   8778  * wm_linkintr_gmii:
   8779  *
   8780  *	Helper; handle link interrupts for GMII.
   8781  */
   8782 static void
   8783 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8784 {
   8785 
   8786 	KASSERT(WM_CORE_LOCKED(sc));
   8787 
   8788 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8789 		__func__));
   8790 
   8791 	if (icr & ICR_LSC) {
   8792 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   8793 		uint32_t reg;
   8794 		bool link;
   8795 
   8796 		link = status & STATUS_LU;
   8797 		if (link) {
   8798 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8799 				device_xname(sc->sc_dev),
   8800 				(status & STATUS_FD) ? "FDX" : "HDX"));
   8801 		} else {
   8802 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8803 				device_xname(sc->sc_dev)));
   8804 		}
   8805 		if ((sc->sc_type == WM_T_ICH8) && (link == false))
   8806 			wm_gig_downshift_workaround_ich8lan(sc);
   8807 
   8808 		if ((sc->sc_type == WM_T_ICH8)
   8809 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   8810 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   8811 		}
   8812 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   8813 			device_xname(sc->sc_dev)));
   8814 		mii_pollstat(&sc->sc_mii);
   8815 		if (sc->sc_type == WM_T_82543) {
   8816 			int miistatus, active;
   8817 
   8818 			/*
   8819 			 * With 82543, we need to force speed and
   8820 			 * duplex on the MAC equal to what the PHY
   8821 			 * speed and duplex configuration is.
   8822 			 */
   8823 			miistatus = sc->sc_mii.mii_media_status;
   8824 
   8825 			if (miistatus & IFM_ACTIVE) {
   8826 				active = sc->sc_mii.mii_media_active;
   8827 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8828 				switch (IFM_SUBTYPE(active)) {
   8829 				case IFM_10_T:
   8830 					sc->sc_ctrl |= CTRL_SPEED_10;
   8831 					break;
   8832 				case IFM_100_TX:
   8833 					sc->sc_ctrl |= CTRL_SPEED_100;
   8834 					break;
   8835 				case IFM_1000_T:
   8836 					sc->sc_ctrl |= CTRL_SPEED_1000;
   8837 					break;
   8838 				default:
   8839 					/*
   8840 					 * fiber?
   8841 					 * Shoud not enter here.
   8842 					 */
   8843 					printf("unknown media (%x)\n", active);
   8844 					break;
   8845 				}
   8846 				if (active & IFM_FDX)
   8847 					sc->sc_ctrl |= CTRL_FD;
   8848 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8849 			}
   8850 		} else if (sc->sc_type == WM_T_PCH) {
   8851 			wm_k1_gig_workaround_hv(sc,
   8852 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   8853 		}
   8854 
   8855 		/*
   8856 		 * I217 Packet Loss issue:
   8857 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
   8858 		 * on power up.
   8859 		 * Set the Beacon Duration for I217 to 8 usec
   8860 		 */
   8861 		if (sc->sc_type >= WM_T_PCH_LPT) {
   8862 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   8863 			reg &= ~FEXTNVM4_BEACON_DURATION;
   8864 			reg |= FEXTNVM4_BEACON_DURATION_8US;
   8865 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   8866 		}
   8867 
   8868 		/* Work-around I218 hang issue */
   8869 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   8870 		    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   8871 		    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   8872 		    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   8873 			wm_k1_workaround_lpt_lp(sc, link);
   8874 
   8875 		if (sc->sc_type >= WM_T_PCH_LPT) {
   8876 			/*
   8877 			 * Set platform power management values for Latency
   8878 			 * Tolerance Reporting (LTR)
   8879 			 */
   8880 			wm_platform_pm_pch_lpt(sc,
   8881 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   8882 		}
   8883 
   8884 		/* FEXTNVM6 K1-off workaround */
   8885 		if (sc->sc_type == WM_T_PCH_SPT) {
   8886 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
   8887 			if (CSR_READ(sc, WMREG_PCIEANACFG)
   8888 			    & FEXTNVM6_K1_OFF_ENABLE)
   8889 				reg |= FEXTNVM6_K1_OFF_ENABLE;
   8890 			else
   8891 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   8892 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   8893 		}
   8894 
   8895 		if (!link)
   8896 			return;
   8897 
   8898 		switch (sc->sc_type) {
   8899 		case WM_T_PCH2:
   8900 			wm_k1_workaround_lv(sc);
   8901 			/* FALLTHROUGH */
   8902 		case WM_T_PCH:
   8903 			if (sc->sc_phytype == WMPHY_82578)
   8904 				wm_link_stall_workaround_hv(sc);
   8905 			break;
   8906 		default:
   8907 			break;
   8908 		}
   8909 	} else if (icr & ICR_RXSEQ) {
   8910 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   8911 			device_xname(sc->sc_dev)));
   8912 	}
   8913 }
   8914 
   8915 /*
   8916  * wm_linkintr_tbi:
   8917  *
   8918  *	Helper; handle link interrupts for TBI mode.
   8919  */
   8920 static void
   8921 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   8922 {
   8923 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8924 	uint32_t status;
   8925 
   8926 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8927 		__func__));
   8928 
   8929 	status = CSR_READ(sc, WMREG_STATUS);
   8930 	if (icr & ICR_LSC) {
   8931 		wm_check_for_link(sc);
   8932 		if (status & STATUS_LU) {
   8933 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8934 				device_xname(sc->sc_dev),
   8935 				(status & STATUS_FD) ? "FDX" : "HDX"));
   8936 			/*
   8937 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   8938 			 * so we should update sc->sc_ctrl
   8939 			 */
   8940 
   8941 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   8942 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8943 			sc->sc_fcrtl &= ~FCRTL_XONE;
   8944 			if (status & STATUS_FD)
   8945 				sc->sc_tctl |=
   8946 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8947 			else
   8948 				sc->sc_tctl |=
   8949 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8950 			if (sc->sc_ctrl & CTRL_TFCE)
   8951 				sc->sc_fcrtl |= FCRTL_XONE;
   8952 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8953 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   8954 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   8955 			sc->sc_tbi_linkup = 1;
   8956 			if_link_state_change(ifp, LINK_STATE_UP);
   8957 		} else {
   8958 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8959 				device_xname(sc->sc_dev)));
   8960 			sc->sc_tbi_linkup = 0;
   8961 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8962 		}
   8963 		/* Update LED */
   8964 		wm_tbi_serdes_set_linkled(sc);
   8965 	} else if (icr & ICR_RXSEQ) {
   8966 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   8967 			device_xname(sc->sc_dev)));
   8968 	}
   8969 }
   8970 
   8971 /*
   8972  * wm_linkintr_serdes:
   8973  *
   8974  *	Helper; handle link interrupts for TBI mode.
   8975  */
   8976 static void
   8977 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   8978 {
   8979 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8980 	struct mii_data *mii = &sc->sc_mii;
   8981 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8982 	uint32_t pcs_adv, pcs_lpab, reg;
   8983 
   8984 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8985 		__func__));
   8986 
   8987 	if (icr & ICR_LSC) {
   8988 		/* Check PCS */
   8989 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8990 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   8991 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   8992 				device_xname(sc->sc_dev)));
   8993 			mii->mii_media_status |= IFM_ACTIVE;
   8994 			sc->sc_tbi_linkup = 1;
   8995 			if_link_state_change(ifp, LINK_STATE_UP);
   8996 		} else {
   8997 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8998 				device_xname(sc->sc_dev)));
   8999 			mii->mii_media_status |= IFM_NONE;
   9000 			sc->sc_tbi_linkup = 0;
   9001 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9002 			wm_tbi_serdes_set_linkled(sc);
   9003 			return;
   9004 		}
   9005 		mii->mii_media_active |= IFM_1000_SX;
   9006 		if ((reg & PCS_LSTS_FDX) != 0)
   9007 			mii->mii_media_active |= IFM_FDX;
   9008 		else
   9009 			mii->mii_media_active |= IFM_HDX;
   9010 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9011 			/* Check flow */
   9012 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9013 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9014 				DPRINTF(WM_DEBUG_LINK,
   9015 				    ("XXX LINKOK but not ACOMP\n"));
   9016 				return;
   9017 			}
   9018 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9019 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9020 			DPRINTF(WM_DEBUG_LINK,
   9021 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   9022 			if ((pcs_adv & TXCW_SYM_PAUSE)
   9023 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9024 				mii->mii_media_active |= IFM_FLOW
   9025 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9026 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9027 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9028 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   9029 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9030 				mii->mii_media_active |= IFM_FLOW
   9031 				    | IFM_ETH_TXPAUSE;
   9032 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   9033 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9034 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9035 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9036 				mii->mii_media_active |= IFM_FLOW
   9037 				    | IFM_ETH_RXPAUSE;
   9038 		}
   9039 		/* Update LED */
   9040 		wm_tbi_serdes_set_linkled(sc);
   9041 	} else {
   9042 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9043 		    device_xname(sc->sc_dev)));
   9044 	}
   9045 }
   9046 
   9047 /*
   9048  * wm_linkintr:
   9049  *
   9050  *	Helper; handle link interrupts.
   9051  */
   9052 static void
   9053 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   9054 {
   9055 
   9056 	KASSERT(WM_CORE_LOCKED(sc));
   9057 
   9058 	if (sc->sc_flags & WM_F_HAS_MII)
   9059 		wm_linkintr_gmii(sc, icr);
   9060 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9061 	    && (sc->sc_type >= WM_T_82575))
   9062 		wm_linkintr_serdes(sc, icr);
   9063 	else
   9064 		wm_linkintr_tbi(sc, icr);
   9065 }
   9066 
   9067 /*
   9068  * wm_intr_legacy:
   9069  *
   9070  *	Interrupt service routine for INTx and MSI.
   9071  */
   9072 static int
   9073 wm_intr_legacy(void *arg)
   9074 {
   9075 	struct wm_softc *sc = arg;
   9076 	struct wm_queue *wmq = &sc->sc_queue[0];
   9077 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9078 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9079 	uint32_t icr, rndval = 0;
   9080 	int handled = 0;
   9081 
   9082 	while (1 /* CONSTCOND */) {
   9083 		icr = CSR_READ(sc, WMREG_ICR);
   9084 		if ((icr & sc->sc_icr) == 0)
   9085 			break;
   9086 		if (handled == 0) {
   9087 			DPRINTF(WM_DEBUG_TX,
   9088 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   9089 		}
   9090 		if (rndval == 0)
   9091 			rndval = icr;
   9092 
   9093 		mutex_enter(rxq->rxq_lock);
   9094 
   9095 		if (rxq->rxq_stopping) {
   9096 			mutex_exit(rxq->rxq_lock);
   9097 			break;
   9098 		}
   9099 
   9100 		handled = 1;
   9101 
   9102 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9103 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   9104 			DPRINTF(WM_DEBUG_RX,
   9105 			    ("%s: RX: got Rx intr 0x%08x\n",
   9106 				device_xname(sc->sc_dev),
   9107 				icr & (ICR_RXDMT0 | ICR_RXT0)));
   9108 			WM_Q_EVCNT_INCR(rxq, intr);
   9109 		}
   9110 #endif
   9111 		/*
   9112 		 * wm_rxeof() does *not* call upper layer functions directly,
   9113 		 * as if_percpuq_enqueue() just call softint_schedule().
   9114 		 * So, we can call wm_rxeof() in interrupt context.
   9115 		 */
   9116 		wm_rxeof(rxq, UINT_MAX);
   9117 
   9118 		mutex_exit(rxq->rxq_lock);
   9119 		mutex_enter(txq->txq_lock);
   9120 
   9121 		if (txq->txq_stopping) {
   9122 			mutex_exit(txq->txq_lock);
   9123 			break;
   9124 		}
   9125 
   9126 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9127 		if (icr & ICR_TXDW) {
   9128 			DPRINTF(WM_DEBUG_TX,
   9129 			    ("%s: TX: got TXDW interrupt\n",
   9130 				device_xname(sc->sc_dev)));
   9131 			WM_Q_EVCNT_INCR(txq, txdw);
   9132 		}
   9133 #endif
   9134 		wm_txeof(txq, UINT_MAX);
   9135 
   9136 		mutex_exit(txq->txq_lock);
   9137 		WM_CORE_LOCK(sc);
   9138 
   9139 		if (sc->sc_core_stopping) {
   9140 			WM_CORE_UNLOCK(sc);
   9141 			break;
   9142 		}
   9143 
   9144 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   9145 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9146 			wm_linkintr(sc, icr);
   9147 		}
   9148 
   9149 		WM_CORE_UNLOCK(sc);
   9150 
   9151 		if (icr & ICR_RXO) {
   9152 #if defined(WM_DEBUG)
   9153 			log(LOG_WARNING, "%s: Receive overrun\n",
   9154 			    device_xname(sc->sc_dev));
   9155 #endif /* defined(WM_DEBUG) */
   9156 		}
   9157 	}
   9158 
   9159 	rnd_add_uint32(&sc->rnd_source, rndval);
   9160 
   9161 	if (handled) {
   9162 		/* Try to get more packets going. */
   9163 		softint_schedule(wmq->wmq_si);
   9164 	}
   9165 
   9166 	return handled;
   9167 }
   9168 
   9169 static inline void
   9170 wm_txrxintr_disable(struct wm_queue *wmq)
   9171 {
   9172 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9173 
   9174 	if (sc->sc_type == WM_T_82574)
   9175 		CSR_WRITE(sc, WMREG_IMC,
   9176 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   9177 	else if (sc->sc_type == WM_T_82575)
   9178 		CSR_WRITE(sc, WMREG_EIMC,
   9179 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9180 	else
   9181 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   9182 }
   9183 
   9184 static inline void
   9185 wm_txrxintr_enable(struct wm_queue *wmq)
   9186 {
   9187 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9188 
   9189 	wm_itrs_calculate(sc, wmq);
   9190 
   9191 	/*
   9192 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   9193 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   9194 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   9195 	 * while each wm_handle_queue(wmq) is runnig.
   9196 	 */
   9197 	if (sc->sc_type == WM_T_82574)
   9198 		CSR_WRITE(sc, WMREG_IMS,
   9199 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   9200 	else if (sc->sc_type == WM_T_82575)
   9201 		CSR_WRITE(sc, WMREG_EIMS,
   9202 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9203 	else
   9204 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   9205 }
   9206 
   9207 static int
   9208 wm_txrxintr_msix(void *arg)
   9209 {
   9210 	struct wm_queue *wmq = arg;
   9211 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9212 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9213 	struct wm_softc *sc = txq->txq_sc;
   9214 	u_int txlimit = sc->sc_tx_intr_process_limit;
   9215 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   9216 	bool txmore;
   9217 	bool rxmore;
   9218 
   9219 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   9220 
   9221 	DPRINTF(WM_DEBUG_TX,
   9222 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   9223 
   9224 	wm_txrxintr_disable(wmq);
   9225 
   9226 	mutex_enter(txq->txq_lock);
   9227 
   9228 	if (txq->txq_stopping) {
   9229 		mutex_exit(txq->txq_lock);
   9230 		return 0;
   9231 	}
   9232 
   9233 	WM_Q_EVCNT_INCR(txq, txdw);
   9234 	txmore = wm_txeof(txq, txlimit);
   9235 	/* wm_deferred start() is done in wm_handle_queue(). */
   9236 	mutex_exit(txq->txq_lock);
   9237 
   9238 	DPRINTF(WM_DEBUG_RX,
   9239 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   9240 	mutex_enter(rxq->rxq_lock);
   9241 
   9242 	if (rxq->rxq_stopping) {
   9243 		mutex_exit(rxq->rxq_lock);
   9244 		return 0;
   9245 	}
   9246 
   9247 	WM_Q_EVCNT_INCR(rxq, intr);
   9248 	rxmore = wm_rxeof(rxq, rxlimit);
   9249 	mutex_exit(rxq->rxq_lock);
   9250 
   9251 	wm_itrs_writereg(sc, wmq);
   9252 
   9253 	if (txmore || rxmore)
   9254 		softint_schedule(wmq->wmq_si);
   9255 	else
   9256 		wm_txrxintr_enable(wmq);
   9257 
   9258 	return 1;
   9259 }
   9260 
   9261 static void
   9262 wm_handle_queue(void *arg)
   9263 {
   9264 	struct wm_queue *wmq = arg;
   9265 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9266 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9267 	struct wm_softc *sc = txq->txq_sc;
   9268 	u_int txlimit = sc->sc_tx_process_limit;
   9269 	u_int rxlimit = sc->sc_rx_process_limit;
   9270 	bool txmore;
   9271 	bool rxmore;
   9272 
   9273 	mutex_enter(txq->txq_lock);
   9274 	if (txq->txq_stopping) {
   9275 		mutex_exit(txq->txq_lock);
   9276 		return;
   9277 	}
   9278 	txmore = wm_txeof(txq, txlimit);
   9279 	wm_deferred_start_locked(txq);
   9280 	mutex_exit(txq->txq_lock);
   9281 
   9282 	mutex_enter(rxq->rxq_lock);
   9283 	if (rxq->rxq_stopping) {
   9284 		mutex_exit(rxq->rxq_lock);
   9285 		return;
   9286 	}
   9287 	WM_Q_EVCNT_INCR(rxq, defer);
   9288 	rxmore = wm_rxeof(rxq, rxlimit);
   9289 	mutex_exit(rxq->rxq_lock);
   9290 
   9291 	if (txmore || rxmore)
   9292 		softint_schedule(wmq->wmq_si);
   9293 	else
   9294 		wm_txrxintr_enable(wmq);
   9295 }
   9296 
   9297 /*
   9298  * wm_linkintr_msix:
   9299  *
   9300  *	Interrupt service routine for link status change for MSI-X.
   9301  */
   9302 static int
   9303 wm_linkintr_msix(void *arg)
   9304 {
   9305 	struct wm_softc *sc = arg;
   9306 	uint32_t reg;
   9307 	bool has_rxo;
   9308 
   9309 	DPRINTF(WM_DEBUG_LINK,
   9310 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   9311 
   9312 	reg = CSR_READ(sc, WMREG_ICR);
   9313 	WM_CORE_LOCK(sc);
   9314 	if (sc->sc_core_stopping)
   9315 		goto out;
   9316 
   9317 	if ((reg & ICR_LSC) != 0) {
   9318 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9319 		wm_linkintr(sc, ICR_LSC);
   9320 	}
   9321 
   9322 	/*
   9323 	 * XXX 82574 MSI-X mode workaround
   9324 	 *
   9325 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   9326 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   9327 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   9328 	 * interrupts by writing WMREG_ICS to process receive packets.
   9329 	 */
   9330 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   9331 #if defined(WM_DEBUG)
   9332 		log(LOG_WARNING, "%s: Receive overrun\n",
   9333 		    device_xname(sc->sc_dev));
   9334 #endif /* defined(WM_DEBUG) */
   9335 
   9336 		has_rxo = true;
   9337 		/*
   9338 		 * The RXO interrupt is very high rate when receive traffic is
   9339 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   9340 		 * interrupts. ICR_OTHER will be enabled at the end of
   9341 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   9342 		 * ICR_RXQ(1) interrupts.
   9343 		 */
   9344 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   9345 
   9346 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   9347 	}
   9348 
   9349 
   9350 
   9351 out:
   9352 	WM_CORE_UNLOCK(sc);
   9353 
   9354 	if (sc->sc_type == WM_T_82574) {
   9355 		if (!has_rxo)
   9356 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   9357 		else
   9358 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   9359 	} else if (sc->sc_type == WM_T_82575)
   9360 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   9361 	else
   9362 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   9363 
   9364 	return 1;
   9365 }
   9366 
   9367 /*
   9368  * Media related.
   9369  * GMII, SGMII, TBI (and SERDES)
   9370  */
   9371 
   9372 /* Common */
   9373 
   9374 /*
   9375  * wm_tbi_serdes_set_linkled:
   9376  *
   9377  *	Update the link LED on TBI and SERDES devices.
   9378  */
   9379 static void
   9380 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   9381 {
   9382 
   9383 	if (sc->sc_tbi_linkup)
   9384 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   9385 	else
   9386 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   9387 
   9388 	/* 82540 or newer devices are active low */
   9389 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   9390 
   9391 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9392 }
   9393 
   9394 /* GMII related */
   9395 
   9396 /*
   9397  * wm_gmii_reset:
   9398  *
   9399  *	Reset the PHY.
   9400  */
   9401 static void
   9402 wm_gmii_reset(struct wm_softc *sc)
   9403 {
   9404 	uint32_t reg;
   9405 	int rv;
   9406 
   9407 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9408 		device_xname(sc->sc_dev), __func__));
   9409 
   9410 	rv = sc->phy.acquire(sc);
   9411 	if (rv != 0) {
   9412 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9413 		    __func__);
   9414 		return;
   9415 	}
   9416 
   9417 	switch (sc->sc_type) {
   9418 	case WM_T_82542_2_0:
   9419 	case WM_T_82542_2_1:
   9420 		/* null */
   9421 		break;
   9422 	case WM_T_82543:
   9423 		/*
   9424 		 * With 82543, we need to force speed and duplex on the MAC
   9425 		 * equal to what the PHY speed and duplex configuration is.
   9426 		 * In addition, we need to perform a hardware reset on the PHY
   9427 		 * to take it out of reset.
   9428 		 */
   9429 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9430 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9431 
   9432 		/* The PHY reset pin is active-low. */
   9433 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9434 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   9435 		    CTRL_EXT_SWDPIN(4));
   9436 		reg |= CTRL_EXT_SWDPIO(4);
   9437 
   9438 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9439 		CSR_WRITE_FLUSH(sc);
   9440 		delay(10*1000);
   9441 
   9442 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   9443 		CSR_WRITE_FLUSH(sc);
   9444 		delay(150);
   9445 #if 0
   9446 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   9447 #endif
   9448 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   9449 		break;
   9450 	case WM_T_82544:	/* reset 10000us */
   9451 	case WM_T_82540:
   9452 	case WM_T_82545:
   9453 	case WM_T_82545_3:
   9454 	case WM_T_82546:
   9455 	case WM_T_82546_3:
   9456 	case WM_T_82541:
   9457 	case WM_T_82541_2:
   9458 	case WM_T_82547:
   9459 	case WM_T_82547_2:
   9460 	case WM_T_82571:	/* reset 100us */
   9461 	case WM_T_82572:
   9462 	case WM_T_82573:
   9463 	case WM_T_82574:
   9464 	case WM_T_82575:
   9465 	case WM_T_82576:
   9466 	case WM_T_82580:
   9467 	case WM_T_I350:
   9468 	case WM_T_I354:
   9469 	case WM_T_I210:
   9470 	case WM_T_I211:
   9471 	case WM_T_82583:
   9472 	case WM_T_80003:
   9473 		/* generic reset */
   9474 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9475 		CSR_WRITE_FLUSH(sc);
   9476 		delay(20000);
   9477 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9478 		CSR_WRITE_FLUSH(sc);
   9479 		delay(20000);
   9480 
   9481 		if ((sc->sc_type == WM_T_82541)
   9482 		    || (sc->sc_type == WM_T_82541_2)
   9483 		    || (sc->sc_type == WM_T_82547)
   9484 		    || (sc->sc_type == WM_T_82547_2)) {
   9485 			/* workaround for igp are done in igp_reset() */
   9486 			/* XXX add code to set LED after phy reset */
   9487 		}
   9488 		break;
   9489 	case WM_T_ICH8:
   9490 	case WM_T_ICH9:
   9491 	case WM_T_ICH10:
   9492 	case WM_T_PCH:
   9493 	case WM_T_PCH2:
   9494 	case WM_T_PCH_LPT:
   9495 	case WM_T_PCH_SPT:
   9496 	case WM_T_PCH_CNP:
   9497 		/* generic reset */
   9498 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9499 		CSR_WRITE_FLUSH(sc);
   9500 		delay(100);
   9501 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9502 		CSR_WRITE_FLUSH(sc);
   9503 		delay(150);
   9504 		break;
   9505 	default:
   9506 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   9507 		    __func__);
   9508 		break;
   9509 	}
   9510 
   9511 	sc->phy.release(sc);
   9512 
   9513 	/* get_cfg_done */
   9514 	wm_get_cfg_done(sc);
   9515 
   9516 	/* extra setup */
   9517 	switch (sc->sc_type) {
   9518 	case WM_T_82542_2_0:
   9519 	case WM_T_82542_2_1:
   9520 	case WM_T_82543:
   9521 	case WM_T_82544:
   9522 	case WM_T_82540:
   9523 	case WM_T_82545:
   9524 	case WM_T_82545_3:
   9525 	case WM_T_82546:
   9526 	case WM_T_82546_3:
   9527 	case WM_T_82541_2:
   9528 	case WM_T_82547_2:
   9529 	case WM_T_82571:
   9530 	case WM_T_82572:
   9531 	case WM_T_82573:
   9532 	case WM_T_82574:
   9533 	case WM_T_82583:
   9534 	case WM_T_82575:
   9535 	case WM_T_82576:
   9536 	case WM_T_82580:
   9537 	case WM_T_I350:
   9538 	case WM_T_I354:
   9539 	case WM_T_I210:
   9540 	case WM_T_I211:
   9541 	case WM_T_80003:
   9542 		/* null */
   9543 		break;
   9544 	case WM_T_82541:
   9545 	case WM_T_82547:
   9546 		/* XXX Configure actively LED after PHY reset */
   9547 		break;
   9548 	case WM_T_ICH8:
   9549 	case WM_T_ICH9:
   9550 	case WM_T_ICH10:
   9551 	case WM_T_PCH:
   9552 	case WM_T_PCH2:
   9553 	case WM_T_PCH_LPT:
   9554 	case WM_T_PCH_SPT:
   9555 	case WM_T_PCH_CNP:
   9556 		wm_phy_post_reset(sc);
   9557 		break;
   9558 	default:
   9559 		panic("%s: unknown type\n", __func__);
   9560 		break;
   9561 	}
   9562 }
   9563 
   9564 /*
   9565  * Setup sc_phytype and mii_{read|write}reg.
   9566  *
   9567  *  To identify PHY type, correct read/write function should be selected.
   9568  * To select correct read/write function, PCI ID or MAC type are required
   9569  * without accessing PHY registers.
   9570  *
   9571  *  On the first call of this function, PHY ID is not known yet. Check
   9572  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   9573  * result might be incorrect.
   9574  *
   9575  *  In the second call, PHY OUI and model is used to identify PHY type.
   9576  * It might not be perfpect because of the lack of compared entry, but it
   9577  * would be better than the first call.
   9578  *
   9579  *  If the detected new result and previous assumption is different,
   9580  * diagnous message will be printed.
   9581  */
   9582 static void
   9583 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   9584     uint16_t phy_model)
   9585 {
   9586 	device_t dev = sc->sc_dev;
   9587 	struct mii_data *mii = &sc->sc_mii;
   9588 	uint16_t new_phytype = WMPHY_UNKNOWN;
   9589 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   9590 	mii_readreg_t new_readreg;
   9591 	mii_writereg_t new_writereg;
   9592 
   9593 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9594 		device_xname(sc->sc_dev), __func__));
   9595 
   9596 	if (mii->mii_readreg == NULL) {
   9597 		/*
   9598 		 *  This is the first call of this function. For ICH and PCH
   9599 		 * variants, it's difficult to determine the PHY access method
   9600 		 * by sc_type, so use the PCI product ID for some devices.
   9601 		 */
   9602 
   9603 		switch (sc->sc_pcidevid) {
   9604 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   9605 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   9606 			/* 82577 */
   9607 			new_phytype = WMPHY_82577;
   9608 			break;
   9609 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   9610 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   9611 			/* 82578 */
   9612 			new_phytype = WMPHY_82578;
   9613 			break;
   9614 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   9615 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   9616 			/* 82579 */
   9617 			new_phytype = WMPHY_82579;
   9618 			break;
   9619 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   9620 		case PCI_PRODUCT_INTEL_82801I_BM:
   9621 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   9622 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   9623 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   9624 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   9625 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   9626 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   9627 			/* ICH8, 9, 10 with 82567 */
   9628 			new_phytype = WMPHY_BM;
   9629 			break;
   9630 		default:
   9631 			break;
   9632 		}
   9633 	} else {
   9634 		/* It's not the first call. Use PHY OUI and model */
   9635 		switch (phy_oui) {
   9636 		case MII_OUI_ATTANSIC: /* XXX ??? */
   9637 			switch (phy_model) {
   9638 			case 0x0004: /* XXX */
   9639 				new_phytype = WMPHY_82578;
   9640 				break;
   9641 			default:
   9642 				break;
   9643 			}
   9644 			break;
   9645 		case MII_OUI_xxMARVELL:
   9646 			switch (phy_model) {
   9647 			case MII_MODEL_xxMARVELL_I210:
   9648 				new_phytype = WMPHY_I210;
   9649 				break;
   9650 			case MII_MODEL_xxMARVELL_E1011:
   9651 			case MII_MODEL_xxMARVELL_E1000_3:
   9652 			case MII_MODEL_xxMARVELL_E1000_5:
   9653 			case MII_MODEL_xxMARVELL_E1112:
   9654 				new_phytype = WMPHY_M88;
   9655 				break;
   9656 			case MII_MODEL_xxMARVELL_E1149:
   9657 				new_phytype = WMPHY_BM;
   9658 				break;
   9659 			case MII_MODEL_xxMARVELL_E1111:
   9660 			case MII_MODEL_xxMARVELL_I347:
   9661 			case MII_MODEL_xxMARVELL_E1512:
   9662 			case MII_MODEL_xxMARVELL_E1340M:
   9663 			case MII_MODEL_xxMARVELL_E1543:
   9664 				new_phytype = WMPHY_M88;
   9665 				break;
   9666 			case MII_MODEL_xxMARVELL_I82563:
   9667 				new_phytype = WMPHY_GG82563;
   9668 				break;
   9669 			default:
   9670 				break;
   9671 			}
   9672 			break;
   9673 		case MII_OUI_INTEL:
   9674 			switch (phy_model) {
   9675 			case MII_MODEL_INTEL_I82577:
   9676 				new_phytype = WMPHY_82577;
   9677 				break;
   9678 			case MII_MODEL_INTEL_I82579:
   9679 				new_phytype = WMPHY_82579;
   9680 				break;
   9681 			case MII_MODEL_INTEL_I217:
   9682 				new_phytype = WMPHY_I217;
   9683 				break;
   9684 			case MII_MODEL_INTEL_I82580:
   9685 			case MII_MODEL_INTEL_I350:
   9686 				new_phytype = WMPHY_82580;
   9687 				break;
   9688 			default:
   9689 				break;
   9690 			}
   9691 			break;
   9692 		case MII_OUI_yyINTEL:
   9693 			switch (phy_model) {
   9694 			case MII_MODEL_yyINTEL_I82562G:
   9695 			case MII_MODEL_yyINTEL_I82562EM:
   9696 			case MII_MODEL_yyINTEL_I82562ET:
   9697 				new_phytype = WMPHY_IFE;
   9698 				break;
   9699 			case MII_MODEL_yyINTEL_IGP01E1000:
   9700 				new_phytype = WMPHY_IGP;
   9701 				break;
   9702 			case MII_MODEL_yyINTEL_I82566:
   9703 				new_phytype = WMPHY_IGP_3;
   9704 				break;
   9705 			default:
   9706 				break;
   9707 			}
   9708 			break;
   9709 		default:
   9710 			break;
   9711 		}
   9712 		if (new_phytype == WMPHY_UNKNOWN)
   9713 			aprint_verbose_dev(dev,
   9714 			    "%s: unknown PHY model. OUI=%06x, model=%04x\n",
   9715 			    __func__, phy_oui, phy_model);
   9716 
   9717 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9718 		    && (sc->sc_phytype != new_phytype )) {
   9719 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9720 			    "was incorrect. PHY type from PHY ID = %u\n",
   9721 			    sc->sc_phytype, new_phytype);
   9722 		}
   9723 	}
   9724 
   9725 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   9726 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   9727 		/* SGMII */
   9728 		new_readreg = wm_sgmii_readreg;
   9729 		new_writereg = wm_sgmii_writereg;
   9730 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   9731 		/* BM2 (phyaddr == 1) */
   9732 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9733 		    && (new_phytype != WMPHY_BM)
   9734 		    && (new_phytype != WMPHY_UNKNOWN))
   9735 			doubt_phytype = new_phytype;
   9736 		new_phytype = WMPHY_BM;
   9737 		new_readreg = wm_gmii_bm_readreg;
   9738 		new_writereg = wm_gmii_bm_writereg;
   9739 	} else if (sc->sc_type >= WM_T_PCH) {
   9740 		/* All PCH* use _hv_ */
   9741 		new_readreg = wm_gmii_hv_readreg;
   9742 		new_writereg = wm_gmii_hv_writereg;
   9743 	} else if (sc->sc_type >= WM_T_ICH8) {
   9744 		/* non-82567 ICH8, 9 and 10 */
   9745 		new_readreg = wm_gmii_i82544_readreg;
   9746 		new_writereg = wm_gmii_i82544_writereg;
   9747 	} else if (sc->sc_type >= WM_T_80003) {
   9748 		/* 80003 */
   9749 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9750 		    && (new_phytype != WMPHY_GG82563)
   9751 		    && (new_phytype != WMPHY_UNKNOWN))
   9752 			doubt_phytype = new_phytype;
   9753 		new_phytype = WMPHY_GG82563;
   9754 		new_readreg = wm_gmii_i80003_readreg;
   9755 		new_writereg = wm_gmii_i80003_writereg;
   9756 	} else if (sc->sc_type >= WM_T_I210) {
   9757 		/* I210 and I211 */
   9758 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9759 		    && (new_phytype != WMPHY_I210)
   9760 		    && (new_phytype != WMPHY_UNKNOWN))
   9761 			doubt_phytype = new_phytype;
   9762 		new_phytype = WMPHY_I210;
   9763 		new_readreg = wm_gmii_gs40g_readreg;
   9764 		new_writereg = wm_gmii_gs40g_writereg;
   9765 	} else if (sc->sc_type >= WM_T_82580) {
   9766 		/* 82580, I350 and I354 */
   9767 		new_readreg = wm_gmii_82580_readreg;
   9768 		new_writereg = wm_gmii_82580_writereg;
   9769 	} else if (sc->sc_type >= WM_T_82544) {
   9770 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   9771 		new_readreg = wm_gmii_i82544_readreg;
   9772 		new_writereg = wm_gmii_i82544_writereg;
   9773 	} else {
   9774 		new_readreg = wm_gmii_i82543_readreg;
   9775 		new_writereg = wm_gmii_i82543_writereg;
   9776 	}
   9777 
   9778 	if (new_phytype == WMPHY_BM) {
   9779 		/* All BM use _bm_ */
   9780 		new_readreg = wm_gmii_bm_readreg;
   9781 		new_writereg = wm_gmii_bm_writereg;
   9782 	}
   9783 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   9784 		/* All PCH* use _hv_ */
   9785 		new_readreg = wm_gmii_hv_readreg;
   9786 		new_writereg = wm_gmii_hv_writereg;
   9787 	}
   9788 
   9789 	/* Diag output */
   9790 	if (doubt_phytype != WMPHY_UNKNOWN)
   9791 		aprint_error_dev(dev, "Assumed new PHY type was "
   9792 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   9793 		    new_phytype);
   9794 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9795 	    && (sc->sc_phytype != new_phytype ))
   9796 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9797 		    "was incorrect. New PHY type = %u\n",
   9798 		    sc->sc_phytype, new_phytype);
   9799 
   9800 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   9801 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   9802 
   9803 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   9804 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   9805 		    "function was incorrect.\n");
   9806 
   9807 	/* Update now */
   9808 	sc->sc_phytype = new_phytype;
   9809 	mii->mii_readreg = new_readreg;
   9810 	mii->mii_writereg = new_writereg;
   9811 	if (new_readreg == wm_gmii_hv_readreg) {
   9812 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   9813 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   9814 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   9815 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   9816 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   9817 	}
   9818 }
   9819 
   9820 /*
   9821  * wm_get_phy_id_82575:
   9822  *
   9823  * Return PHY ID. Return -1 if it failed.
   9824  */
   9825 static int
   9826 wm_get_phy_id_82575(struct wm_softc *sc)
   9827 {
   9828 	uint32_t reg;
   9829 	int phyid = -1;
   9830 
   9831 	/* XXX */
   9832 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   9833 		return -1;
   9834 
   9835 	if (wm_sgmii_uses_mdio(sc)) {
   9836 		switch (sc->sc_type) {
   9837 		case WM_T_82575:
   9838 		case WM_T_82576:
   9839 			reg = CSR_READ(sc, WMREG_MDIC);
   9840 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   9841 			break;
   9842 		case WM_T_82580:
   9843 		case WM_T_I350:
   9844 		case WM_T_I354:
   9845 		case WM_T_I210:
   9846 		case WM_T_I211:
   9847 			reg = CSR_READ(sc, WMREG_MDICNFG);
   9848 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   9849 			break;
   9850 		default:
   9851 			return -1;
   9852 		}
   9853 	}
   9854 
   9855 	return phyid;
   9856 }
   9857 
   9858 
   9859 /*
   9860  * wm_gmii_mediainit:
   9861  *
   9862  *	Initialize media for use on 1000BASE-T devices.
   9863  */
   9864 static void
   9865 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   9866 {
   9867 	device_t dev = sc->sc_dev;
   9868 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9869 	struct mii_data *mii = &sc->sc_mii;
   9870 	uint32_t reg;
   9871 
   9872 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9873 		device_xname(sc->sc_dev), __func__));
   9874 
   9875 	/* We have GMII. */
   9876 	sc->sc_flags |= WM_F_HAS_MII;
   9877 
   9878 	if (sc->sc_type == WM_T_80003)
   9879 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9880 	else
   9881 		sc->sc_tipg = TIPG_1000T_DFLT;
   9882 
   9883 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   9884 	if ((sc->sc_type == WM_T_82580)
   9885 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   9886 	    || (sc->sc_type == WM_T_I211)) {
   9887 		reg = CSR_READ(sc, WMREG_PHPM);
   9888 		reg &= ~PHPM_GO_LINK_D;
   9889 		CSR_WRITE(sc, WMREG_PHPM, reg);
   9890 	}
   9891 
   9892 	/*
   9893 	 * Let the chip set speed/duplex on its own based on
   9894 	 * signals from the PHY.
   9895 	 * XXXbouyer - I'm not sure this is right for the 80003,
   9896 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   9897 	 */
   9898 	sc->sc_ctrl |= CTRL_SLU;
   9899 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9900 
   9901 	/* Initialize our media structures and probe the GMII. */
   9902 	mii->mii_ifp = ifp;
   9903 
   9904 	mii->mii_statchg = wm_gmii_statchg;
   9905 
   9906 	/* get PHY control from SMBus to PCIe */
   9907 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   9908 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   9909 	    || (sc->sc_type == WM_T_PCH_CNP))
   9910 		wm_smbustopci(sc);
   9911 
   9912 	wm_gmii_reset(sc);
   9913 
   9914 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9915 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   9916 	    wm_gmii_mediastatus);
   9917 
   9918 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   9919 	    || (sc->sc_type == WM_T_82580)
   9920 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   9921 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   9922 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   9923 			/* Attach only one port */
   9924 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   9925 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9926 		} else {
   9927 			int i, id;
   9928 			uint32_t ctrl_ext;
   9929 
   9930 			id = wm_get_phy_id_82575(sc);
   9931 			if (id != -1) {
   9932 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   9933 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   9934 			}
   9935 			if ((id == -1)
   9936 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9937 				/* Power on sgmii phy if it is disabled */
   9938 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9939 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   9940 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   9941 				CSR_WRITE_FLUSH(sc);
   9942 				delay(300*1000); /* XXX too long */
   9943 
   9944 				/* from 1 to 8 */
   9945 				for (i = 1; i < 8; i++)
   9946 					mii_attach(sc->sc_dev, &sc->sc_mii,
   9947 					    0xffffffff, i, MII_OFFSET_ANY,
   9948 					    MIIF_DOPAUSE);
   9949 
   9950 				/* restore previous sfp cage power state */
   9951 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9952 			}
   9953 		}
   9954 	} else
   9955 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9956 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9957 
   9958 	/*
   9959 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   9960 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   9961 	 */
   9962 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   9963 		|| (sc->sc_type == WM_T_PCH_SPT)
   9964 		|| (sc->sc_type == WM_T_PCH_CNP))
   9965 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9966 		wm_set_mdio_slow_mode_hv(sc);
   9967 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9968 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9969 	}
   9970 
   9971 	/*
   9972 	 * (For ICH8 variants)
   9973 	 * If PHY detection failed, use BM's r/w function and retry.
   9974 	 */
   9975 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9976 		/* if failed, retry with *_bm_* */
   9977 		aprint_verbose_dev(dev, "Assumed PHY access function "
   9978 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   9979 		    sc->sc_phytype);
   9980 		sc->sc_phytype = WMPHY_BM;
   9981 		mii->mii_readreg = wm_gmii_bm_readreg;
   9982 		mii->mii_writereg = wm_gmii_bm_writereg;
   9983 
   9984 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9985 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9986 	}
   9987 
   9988 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9989 		/* Any PHY wasn't find */
   9990 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   9991 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   9992 		sc->sc_phytype = WMPHY_NONE;
   9993 	} else {
   9994 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   9995 
   9996 		/*
   9997 		 * PHY Found! Check PHY type again by the second call of
   9998 		 * wm_gmii_setup_phytype.
   9999 		 */
   10000 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   10001 		    child->mii_mpd_model);
   10002 
   10003 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   10004 	}
   10005 }
   10006 
   10007 /*
   10008  * wm_gmii_mediachange:	[ifmedia interface function]
   10009  *
   10010  *	Set hardware to newly-selected media on a 1000BASE-T device.
   10011  */
   10012 static int
   10013 wm_gmii_mediachange(struct ifnet *ifp)
   10014 {
   10015 	struct wm_softc *sc = ifp->if_softc;
   10016 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10017 	int rc;
   10018 
   10019 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10020 		device_xname(sc->sc_dev), __func__));
   10021 	if ((ifp->if_flags & IFF_UP) == 0)
   10022 		return 0;
   10023 
   10024 	/* Disable D0 LPLU. */
   10025 	wm_lplu_d0_disable(sc);
   10026 
   10027 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   10028 	sc->sc_ctrl |= CTRL_SLU;
   10029 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10030 	    || (sc->sc_type > WM_T_82543)) {
   10031 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   10032 	} else {
   10033 		sc->sc_ctrl &= ~CTRL_ASDE;
   10034 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10035 		if (ife->ifm_media & IFM_FDX)
   10036 			sc->sc_ctrl |= CTRL_FD;
   10037 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   10038 		case IFM_10_T:
   10039 			sc->sc_ctrl |= CTRL_SPEED_10;
   10040 			break;
   10041 		case IFM_100_TX:
   10042 			sc->sc_ctrl |= CTRL_SPEED_100;
   10043 			break;
   10044 		case IFM_1000_T:
   10045 			sc->sc_ctrl |= CTRL_SPEED_1000;
   10046 			break;
   10047 		default:
   10048 			panic("wm_gmii_mediachange: bad media 0x%x",
   10049 			    ife->ifm_media);
   10050 		}
   10051 	}
   10052 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10053 	CSR_WRITE_FLUSH(sc);
   10054 	if (sc->sc_type <= WM_T_82543)
   10055 		wm_gmii_reset(sc);
   10056 
   10057 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   10058 		return 0;
   10059 	return rc;
   10060 }
   10061 
   10062 /*
   10063  * wm_gmii_mediastatus:	[ifmedia interface function]
   10064  *
   10065  *	Get the current interface media status on a 1000BASE-T device.
   10066  */
   10067 static void
   10068 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10069 {
   10070 	struct wm_softc *sc = ifp->if_softc;
   10071 
   10072 	ether_mediastatus(ifp, ifmr);
   10073 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10074 	    | sc->sc_flowflags;
   10075 }
   10076 
   10077 #define	MDI_IO		CTRL_SWDPIN(2)
   10078 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   10079 #define	MDI_CLK		CTRL_SWDPIN(3)
   10080 
   10081 static void
   10082 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   10083 {
   10084 	uint32_t i, v;
   10085 
   10086 	v = CSR_READ(sc, WMREG_CTRL);
   10087 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10088 	v |= MDI_DIR | CTRL_SWDPIO(3);
   10089 
   10090 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   10091 		if (data & i)
   10092 			v |= MDI_IO;
   10093 		else
   10094 			v &= ~MDI_IO;
   10095 		CSR_WRITE(sc, WMREG_CTRL, v);
   10096 		CSR_WRITE_FLUSH(sc);
   10097 		delay(10);
   10098 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10099 		CSR_WRITE_FLUSH(sc);
   10100 		delay(10);
   10101 		CSR_WRITE(sc, WMREG_CTRL, v);
   10102 		CSR_WRITE_FLUSH(sc);
   10103 		delay(10);
   10104 	}
   10105 }
   10106 
   10107 static uint32_t
   10108 wm_i82543_mii_recvbits(struct wm_softc *sc)
   10109 {
   10110 	uint32_t v, i, data = 0;
   10111 
   10112 	v = CSR_READ(sc, WMREG_CTRL);
   10113 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10114 	v |= CTRL_SWDPIO(3);
   10115 
   10116 	CSR_WRITE(sc, WMREG_CTRL, v);
   10117 	CSR_WRITE_FLUSH(sc);
   10118 	delay(10);
   10119 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10120 	CSR_WRITE_FLUSH(sc);
   10121 	delay(10);
   10122 	CSR_WRITE(sc, WMREG_CTRL, v);
   10123 	CSR_WRITE_FLUSH(sc);
   10124 	delay(10);
   10125 
   10126 	for (i = 0; i < 16; i++) {
   10127 		data <<= 1;
   10128 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10129 		CSR_WRITE_FLUSH(sc);
   10130 		delay(10);
   10131 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   10132 			data |= 1;
   10133 		CSR_WRITE(sc, WMREG_CTRL, v);
   10134 		CSR_WRITE_FLUSH(sc);
   10135 		delay(10);
   10136 	}
   10137 
   10138 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10139 	CSR_WRITE_FLUSH(sc);
   10140 	delay(10);
   10141 	CSR_WRITE(sc, WMREG_CTRL, v);
   10142 	CSR_WRITE_FLUSH(sc);
   10143 	delay(10);
   10144 
   10145 	return data;
   10146 }
   10147 
   10148 #undef MDI_IO
   10149 #undef MDI_DIR
   10150 #undef MDI_CLK
   10151 
   10152 /*
   10153  * wm_gmii_i82543_readreg:	[mii interface function]
   10154  *
   10155  *	Read a PHY register on the GMII (i82543 version).
   10156  */
   10157 static int
   10158 wm_gmii_i82543_readreg(device_t dev, int phy, int reg)
   10159 {
   10160 	struct wm_softc *sc = device_private(dev);
   10161 	int rv;
   10162 
   10163 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10164 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   10165 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   10166 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   10167 
   10168 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   10169 		device_xname(dev), phy, reg, rv));
   10170 
   10171 	return rv;
   10172 }
   10173 
   10174 /*
   10175  * wm_gmii_i82543_writereg:	[mii interface function]
   10176  *
   10177  *	Write a PHY register on the GMII (i82543 version).
   10178  */
   10179 static void
   10180 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, int val)
   10181 {
   10182 	struct wm_softc *sc = device_private(dev);
   10183 
   10184 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10185 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   10186 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   10187 	    (MII_COMMAND_START << 30), 32);
   10188 }
   10189 
   10190 /*
   10191  * wm_gmii_mdic_readreg:	[mii interface function]
   10192  *
   10193  *	Read a PHY register on the GMII.
   10194  */
   10195 static int
   10196 wm_gmii_mdic_readreg(device_t dev, int phy, int reg)
   10197 {
   10198 	struct wm_softc *sc = device_private(dev);
   10199 	uint32_t mdic = 0;
   10200 	int i, rv;
   10201 
   10202 	if (reg > MII_ADDRMASK) {
   10203 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10204 		    __func__, sc->sc_phytype, reg);
   10205 		reg &= MII_ADDRMASK;
   10206 	}
   10207 
   10208 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   10209 	    MDIC_REGADD(reg));
   10210 
   10211 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10212 		delay(50);
   10213 		mdic = CSR_READ(sc, WMREG_MDIC);
   10214 		if (mdic & MDIC_READY)
   10215 			break;
   10216 	}
   10217 
   10218 	if ((mdic & MDIC_READY) == 0) {
   10219 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   10220 		    device_xname(dev), phy, reg);
   10221 		return 0;
   10222 	} else if (mdic & MDIC_E) {
   10223 #if 0 /* This is normal if no PHY is present. */
   10224 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   10225 		    device_xname(dev), phy, reg);
   10226 #endif
   10227 		return 0;
   10228 	} else {
   10229 		rv = MDIC_DATA(mdic);
   10230 		if (rv == 0xffff)
   10231 			rv = 0;
   10232 	}
   10233 
   10234 	/*
   10235 	 * Allow some time after each MDIC transaction to avoid
   10236 	 * reading duplicate data in the next MDIC transaction.
   10237 	 */
   10238 	if (sc->sc_type == WM_T_PCH2)
   10239 		delay(100);
   10240 
   10241 	return rv;
   10242 }
   10243 
   10244 /*
   10245  * wm_gmii_mdic_writereg:	[mii interface function]
   10246  *
   10247  *	Write a PHY register on the GMII.
   10248  */
   10249 static void
   10250 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, int val)
   10251 {
   10252 	struct wm_softc *sc = device_private(dev);
   10253 	uint32_t mdic = 0;
   10254 	int i;
   10255 
   10256 	if (reg > MII_ADDRMASK) {
   10257 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10258 		    __func__, sc->sc_phytype, reg);
   10259 		reg &= MII_ADDRMASK;
   10260 	}
   10261 
   10262 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   10263 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   10264 
   10265 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10266 		delay(50);
   10267 		mdic = CSR_READ(sc, WMREG_MDIC);
   10268 		if (mdic & MDIC_READY)
   10269 			break;
   10270 	}
   10271 
   10272 	if ((mdic & MDIC_READY) == 0) {
   10273 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   10274 		    device_xname(dev), phy, reg);
   10275 		return;
   10276 	} else if (mdic & MDIC_E) {
   10277 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   10278 		    device_xname(dev), phy, reg);
   10279 		return;
   10280 	}
   10281 
   10282 	/*
   10283 	 * Allow some time after each MDIC transaction to avoid
   10284 	 * reading duplicate data in the next MDIC transaction.
   10285 	 */
   10286 	if (sc->sc_type == WM_T_PCH2)
   10287 		delay(100);
   10288 }
   10289 
   10290 /*
   10291  * wm_gmii_i82544_readreg:	[mii interface function]
   10292  *
   10293  *	Read a PHY register on the GMII.
   10294  */
   10295 static int
   10296 wm_gmii_i82544_readreg(device_t dev, int phy, int reg)
   10297 {
   10298 	struct wm_softc *sc = device_private(dev);
   10299 	uint16_t val;
   10300 
   10301 	if (sc->phy.acquire(sc)) {
   10302 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10303 		return 0;
   10304 	}
   10305 
   10306 	wm_gmii_i82544_readreg_locked(dev, phy, reg, &val);
   10307 
   10308 	sc->phy.release(sc);
   10309 
   10310 	return val;
   10311 }
   10312 
   10313 static int
   10314 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   10315 {
   10316 	struct wm_softc *sc = device_private(dev);
   10317 
   10318 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10319 		switch (sc->sc_phytype) {
   10320 		case WMPHY_IGP:
   10321 		case WMPHY_IGP_2:
   10322 		case WMPHY_IGP_3:
   10323 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT,
   10324 			    reg);
   10325 			break;
   10326 		default:
   10327 #ifdef WM_DEBUG
   10328 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   10329 			    __func__, sc->sc_phytype, reg);
   10330 #endif
   10331 			break;
   10332 		}
   10333 	}
   10334 
   10335 	*val = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10336 
   10337 	return 0;
   10338 }
   10339 
   10340 /*
   10341  * wm_gmii_i82544_writereg:	[mii interface function]
   10342  *
   10343  *	Write a PHY register on the GMII.
   10344  */
   10345 static void
   10346 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, int val)
   10347 {
   10348 	struct wm_softc *sc = device_private(dev);
   10349 
   10350 	if (sc->phy.acquire(sc)) {
   10351 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10352 		return;
   10353 	}
   10354 
   10355 	wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   10356 	sc->phy.release(sc);
   10357 }
   10358 
   10359 static int
   10360 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   10361 {
   10362 	struct wm_softc *sc = device_private(dev);
   10363 
   10364 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10365 		switch (sc->sc_phytype) {
   10366 		case WMPHY_IGP:
   10367 		case WMPHY_IGP_2:
   10368 		case WMPHY_IGP_3:
   10369 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT,
   10370 			    reg);
   10371 			break;
   10372 		default:
   10373 #ifdef WM_DEBUG
   10374 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   10375 			    __func__, sc->sc_phytype, reg);
   10376 #endif
   10377 			break;
   10378 		}
   10379 	}
   10380 
   10381 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10382 
   10383 	return 0;
   10384 }
   10385 
   10386 /*
   10387  * wm_gmii_i80003_readreg:	[mii interface function]
   10388  *
   10389  *	Read a PHY register on the kumeran
   10390  * This could be handled by the PHY layer if we didn't have to lock the
   10391  * ressource ...
   10392  */
   10393 static int
   10394 wm_gmii_i80003_readreg(device_t dev, int phy, int reg)
   10395 {
   10396 	struct wm_softc *sc = device_private(dev);
   10397 	int page_select, temp;
   10398 	int rv;
   10399 
   10400 	if (phy != 1) /* only one PHY on kumeran bus */
   10401 		return 0;
   10402 
   10403 	if (sc->phy.acquire(sc)) {
   10404 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10405 		return 0;
   10406 	}
   10407 
   10408 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10409 		page_select = GG82563_PHY_PAGE_SELECT;
   10410 	else {
   10411 		/*
   10412 		 * Use Alternative Page Select register to access registers
   10413 		 * 30 and 31.
   10414 		 */
   10415 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10416 	}
   10417 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10418 	wm_gmii_mdic_writereg(dev, phy, page_select, temp);
   10419 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10420 		/*
   10421 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10422 		 * register.
   10423 		 */
   10424 		delay(200);
   10425 		if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) {
   10426 			device_printf(dev, "%s failed\n", __func__);
   10427 			rv = 0; /* XXX */
   10428 			goto out;
   10429 		}
   10430 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10431 		delay(200);
   10432 	} else
   10433 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10434 
   10435 out:
   10436 	sc->phy.release(sc);
   10437 	return rv;
   10438 }
   10439 
   10440 /*
   10441  * wm_gmii_i80003_writereg:	[mii interface function]
   10442  *
   10443  *	Write a PHY register on the kumeran.
   10444  * This could be handled by the PHY layer if we didn't have to lock the
   10445  * ressource ...
   10446  */
   10447 static void
   10448 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, int val)
   10449 {
   10450 	struct wm_softc *sc = device_private(dev);
   10451 	int page_select, temp;
   10452 
   10453 	if (phy != 1) /* only one PHY on kumeran bus */
   10454 		return;
   10455 
   10456 	if (sc->phy.acquire(sc)) {
   10457 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10458 		return;
   10459 	}
   10460 
   10461 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10462 		page_select = GG82563_PHY_PAGE_SELECT;
   10463 	else {
   10464 		/*
   10465 		 * Use Alternative Page Select register to access registers
   10466 		 * 30 and 31.
   10467 		 */
   10468 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10469 	}
   10470 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10471 	wm_gmii_mdic_writereg(dev, phy, page_select, temp);
   10472 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10473 		/*
   10474 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10475 		 * register.
   10476 		 */
   10477 		delay(200);
   10478 		if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) {
   10479 			device_printf(dev, "%s failed\n", __func__);
   10480 			goto out;
   10481 		}
   10482 		wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10483 		delay(200);
   10484 	} else
   10485 		wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10486 
   10487 out:
   10488 	sc->phy.release(sc);
   10489 }
   10490 
   10491 /*
   10492  * wm_gmii_bm_readreg:	[mii interface function]
   10493  *
   10494  *	Read a PHY register on the kumeran
   10495  * This could be handled by the PHY layer if we didn't have to lock the
   10496  * ressource ...
   10497  */
   10498 static int
   10499 wm_gmii_bm_readreg(device_t dev, int phy, int reg)
   10500 {
   10501 	struct wm_softc *sc = device_private(dev);
   10502 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10503 	uint16_t val;
   10504 	int rv;
   10505 
   10506 	if (sc->phy.acquire(sc)) {
   10507 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10508 		return 0;
   10509 	}
   10510 
   10511 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10512 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10513 		    || (reg == 31)) ? 1 : phy;
   10514 	/* Page 800 works differently than the rest so it has its own func */
   10515 	if (page == BM_WUC_PAGE) {
   10516 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
   10517 		rv = val;
   10518 		goto release;
   10519 	}
   10520 
   10521 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10522 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10523 		    && (sc->sc_type != WM_T_82583))
   10524 			wm_gmii_mdic_writereg(dev, phy,
   10525 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10526 		else
   10527 			wm_gmii_mdic_writereg(dev, phy,
   10528 			    BME1000_PHY_PAGE_SELECT, page);
   10529 	}
   10530 
   10531 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10532 
   10533 release:
   10534 	sc->phy.release(sc);
   10535 	return rv;
   10536 }
   10537 
   10538 /*
   10539  * wm_gmii_bm_writereg:	[mii interface function]
   10540  *
   10541  *	Write a PHY register on the kumeran.
   10542  * This could be handled by the PHY layer if we didn't have to lock the
   10543  * ressource ...
   10544  */
   10545 static void
   10546 wm_gmii_bm_writereg(device_t dev, int phy, int reg, int val)
   10547 {
   10548 	struct wm_softc *sc = device_private(dev);
   10549 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10550 
   10551 	if (sc->phy.acquire(sc)) {
   10552 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10553 		return;
   10554 	}
   10555 
   10556 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10557 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10558 		    || (reg == 31)) ? 1 : phy;
   10559 	/* Page 800 works differently than the rest so it has its own func */
   10560 	if (page == BM_WUC_PAGE) {
   10561 		uint16_t tmp;
   10562 
   10563 		tmp = val;
   10564 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   10565 		goto release;
   10566 	}
   10567 
   10568 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10569 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10570 		    && (sc->sc_type != WM_T_82583))
   10571 			wm_gmii_mdic_writereg(dev, phy,
   10572 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10573 		else
   10574 			wm_gmii_mdic_writereg(dev, phy,
   10575 			    BME1000_PHY_PAGE_SELECT, page);
   10576 	}
   10577 
   10578 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10579 
   10580 release:
   10581 	sc->phy.release(sc);
   10582 }
   10583 
   10584 static void
   10585 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd)
   10586 {
   10587 	struct wm_softc *sc = device_private(dev);
   10588 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   10589 	uint16_t wuce, reg;
   10590 
   10591 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10592 		device_xname(dev), __func__));
   10593 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   10594 	if (sc->sc_type == WM_T_PCH) {
   10595 		/* XXX e1000 driver do nothing... why? */
   10596 	}
   10597 
   10598 	/*
   10599 	 * 1) Enable PHY wakeup register first.
   10600 	 * See e1000_enable_phy_wakeup_reg_access_bm().
   10601 	 */
   10602 
   10603 	/* Set page 769 */
   10604 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10605 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   10606 
   10607 	/* Read WUCE and save it */
   10608 	wuce = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG);
   10609 
   10610 	reg = wuce | BM_WUC_ENABLE_BIT;
   10611 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   10612 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, reg);
   10613 
   10614 	/* Select page 800 */
   10615 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10616 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   10617 
   10618 	/*
   10619 	 * 2) Access PHY wakeup register.
   10620 	 * See e1000_access_phy_wakeup_reg_bm.
   10621 	 */
   10622 
   10623 	/* Write page 800 */
   10624 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   10625 
   10626 	if (rd)
   10627 		*val = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE);
   10628 	else
   10629 		wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   10630 
   10631 	/*
   10632 	 * 3) Disable PHY wakeup register.
   10633 	 * See e1000_disable_phy_wakeup_reg_access_bm().
   10634 	 */
   10635 	/* Set page 769 */
   10636 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10637 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   10638 
   10639 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, wuce);
   10640 }
   10641 
   10642 /*
   10643  * wm_gmii_hv_readreg:	[mii interface function]
   10644  *
   10645  *	Read a PHY register on the kumeran
   10646  * This could be handled by the PHY layer if we didn't have to lock the
   10647  * ressource ...
   10648  */
   10649 static int
   10650 wm_gmii_hv_readreg(device_t dev, int phy, int reg)
   10651 {
   10652 	struct wm_softc *sc = device_private(dev);
   10653 	uint16_t val;
   10654 
   10655 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10656 		device_xname(dev), __func__));
   10657 	if (sc->phy.acquire(sc)) {
   10658 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10659 		return 0;
   10660 	}
   10661 
   10662 	wm_gmii_hv_readreg_locked(dev, phy, reg, &val);
   10663 	sc->phy.release(sc);
   10664 	return val;
   10665 }
   10666 
   10667 static int
   10668 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   10669 {
   10670 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10671 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10672 
   10673 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10674 
   10675 	/* Page 800 works differently than the rest so it has its own func */
   10676 	if (page == BM_WUC_PAGE) {
   10677 		wm_access_phy_wakeup_reg_bm(dev, reg, val, 1);
   10678 		return 0;
   10679 	}
   10680 
   10681 	/*
   10682 	 * Lower than page 768 works differently than the rest so it has its
   10683 	 * own func
   10684 	 */
   10685 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10686 		printf("gmii_hv_readreg!!!\n");
   10687 		return 0;
   10688 	}
   10689 
   10690 	/*
   10691 	 * XXX I21[789] documents say that the SMBus Address register is at
   10692 	 * PHY address 01, Page 0 (not 768), Register 26.
   10693 	 */
   10694 	if (page == HV_INTC_FC_PAGE_START)
   10695 		page = 0;
   10696 
   10697 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10698 		wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10699 		    page << BME1000_PAGE_SHIFT);
   10700 	}
   10701 
   10702 	*val = wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK);
   10703 	return 0;
   10704 }
   10705 
   10706 /*
   10707  * wm_gmii_hv_writereg:	[mii interface function]
   10708  *
   10709  *	Write a PHY register on the kumeran.
   10710  * This could be handled by the PHY layer if we didn't have to lock the
   10711  * ressource ...
   10712  */
   10713 static void
   10714 wm_gmii_hv_writereg(device_t dev, int phy, int reg, int val)
   10715 {
   10716 	struct wm_softc *sc = device_private(dev);
   10717 
   10718 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10719 		device_xname(dev), __func__));
   10720 
   10721 	if (sc->phy.acquire(sc)) {
   10722 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10723 		return;
   10724 	}
   10725 
   10726 	wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   10727 	sc->phy.release(sc);
   10728 }
   10729 
   10730 static int
   10731 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   10732 {
   10733 	struct wm_softc *sc = device_private(dev);
   10734 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10735 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10736 
   10737 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10738 
   10739 	/* Page 800 works differently than the rest so it has its own func */
   10740 	if (page == BM_WUC_PAGE) {
   10741 		uint16_t tmp;
   10742 
   10743 		tmp = val;
   10744 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   10745 		return 0;
   10746 	}
   10747 
   10748 	/*
   10749 	 * Lower than page 768 works differently than the rest so it has its
   10750 	 * own func
   10751 	 */
   10752 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10753 		printf("gmii_hv_writereg!!!\n");
   10754 		return -1;
   10755 	}
   10756 
   10757 	{
   10758 		/*
   10759 		 * XXX I21[789] documents say that the SMBus Address register
   10760 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   10761 		 */
   10762 		if (page == HV_INTC_FC_PAGE_START)
   10763 			page = 0;
   10764 
   10765 		/*
   10766 		 * XXX Workaround MDIO accesses being disabled after entering
   10767 		 * IEEE Power Down (whenever bit 11 of the PHY control
   10768 		 * register is set)
   10769 		 */
   10770 		if (sc->sc_phytype == WMPHY_82578) {
   10771 			struct mii_softc *child;
   10772 
   10773 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   10774 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   10775 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   10776 			    && ((val & (1 << 11)) != 0)) {
   10777 				printf("XXX need workaround\n");
   10778 			}
   10779 		}
   10780 
   10781 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10782 			wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10783 			    page << BME1000_PAGE_SHIFT);
   10784 		}
   10785 	}
   10786 
   10787 	wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   10788 
   10789 	return 0;
   10790 }
   10791 
   10792 /*
   10793  * wm_gmii_82580_readreg:	[mii interface function]
   10794  *
   10795  *	Read a PHY register on the 82580 and I350.
   10796  * This could be handled by the PHY layer if we didn't have to lock the
   10797  * ressource ...
   10798  */
   10799 static int
   10800 wm_gmii_82580_readreg(device_t dev, int phy, int reg)
   10801 {
   10802 	struct wm_softc *sc = device_private(dev);
   10803 	int rv;
   10804 
   10805 	if (sc->phy.acquire(sc) != 0) {
   10806 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10807 		return 0;
   10808 	}
   10809 
   10810 #ifdef DIAGNOSTIC
   10811 	if (reg > MII_ADDRMASK) {
   10812 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10813 		    __func__, sc->sc_phytype, reg);
   10814 		reg &= MII_ADDRMASK;
   10815 	}
   10816 #endif
   10817 	rv = wm_gmii_mdic_readreg(dev, phy, reg);
   10818 
   10819 	sc->phy.release(sc);
   10820 	return rv;
   10821 }
   10822 
   10823 /*
   10824  * wm_gmii_82580_writereg:	[mii interface function]
   10825  *
   10826  *	Write a PHY register on the 82580 and I350.
   10827  * This could be handled by the PHY layer if we didn't have to lock the
   10828  * ressource ...
   10829  */
   10830 static void
   10831 wm_gmii_82580_writereg(device_t dev, int phy, int reg, int val)
   10832 {
   10833 	struct wm_softc *sc = device_private(dev);
   10834 
   10835 	if (sc->phy.acquire(sc) != 0) {
   10836 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10837 		return;
   10838 	}
   10839 
   10840 #ifdef DIAGNOSTIC
   10841 	if (reg > MII_ADDRMASK) {
   10842 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10843 		    __func__, sc->sc_phytype, reg);
   10844 		reg &= MII_ADDRMASK;
   10845 	}
   10846 #endif
   10847 	wm_gmii_mdic_writereg(dev, phy, reg, val);
   10848 
   10849 	sc->phy.release(sc);
   10850 }
   10851 
   10852 /*
   10853  * wm_gmii_gs40g_readreg:	[mii interface function]
   10854  *
   10855  *	Read a PHY register on the I2100 and I211.
   10856  * This could be handled by the PHY layer if we didn't have to lock the
   10857  * ressource ...
   10858  */
   10859 static int
   10860 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg)
   10861 {
   10862 	struct wm_softc *sc = device_private(dev);
   10863 	int page, offset;
   10864 	int rv;
   10865 
   10866 	/* Acquire semaphore */
   10867 	if (sc->phy.acquire(sc)) {
   10868 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10869 		return 0;
   10870 	}
   10871 
   10872 	/* Page select */
   10873 	page = reg >> GS40G_PAGE_SHIFT;
   10874 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10875 
   10876 	/* Read reg */
   10877 	offset = reg & GS40G_OFFSET_MASK;
   10878 	rv = wm_gmii_mdic_readreg(dev, phy, offset);
   10879 
   10880 	sc->phy.release(sc);
   10881 	return rv;
   10882 }
   10883 
   10884 /*
   10885  * wm_gmii_gs40g_writereg:	[mii interface function]
   10886  *
   10887  *	Write a PHY register on the I210 and I211.
   10888  * This could be handled by the PHY layer if we didn't have to lock the
   10889  * ressource ...
   10890  */
   10891 static void
   10892 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, int val)
   10893 {
   10894 	struct wm_softc *sc = device_private(dev);
   10895 	int page, offset;
   10896 
   10897 	/* Acquire semaphore */
   10898 	if (sc->phy.acquire(sc)) {
   10899 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10900 		return;
   10901 	}
   10902 
   10903 	/* Page select */
   10904 	page = reg >> GS40G_PAGE_SHIFT;
   10905 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10906 
   10907 	/* Write reg */
   10908 	offset = reg & GS40G_OFFSET_MASK;
   10909 	wm_gmii_mdic_writereg(dev, phy, offset, val);
   10910 
   10911 	/* Release semaphore */
   10912 	sc->phy.release(sc);
   10913 }
   10914 
   10915 /*
   10916  * wm_gmii_statchg:	[mii interface function]
   10917  *
   10918  *	Callback from MII layer when media changes.
   10919  */
   10920 static void
   10921 wm_gmii_statchg(struct ifnet *ifp)
   10922 {
   10923 	struct wm_softc *sc = ifp->if_softc;
   10924 	struct mii_data *mii = &sc->sc_mii;
   10925 
   10926 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   10927 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10928 	sc->sc_fcrtl &= ~FCRTL_XONE;
   10929 
   10930 	/*
   10931 	 * Get flow control negotiation result.
   10932 	 */
   10933 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   10934 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   10935 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   10936 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   10937 	}
   10938 
   10939 	if (sc->sc_flowflags & IFM_FLOW) {
   10940 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   10941 			sc->sc_ctrl |= CTRL_TFCE;
   10942 			sc->sc_fcrtl |= FCRTL_XONE;
   10943 		}
   10944 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   10945 			sc->sc_ctrl |= CTRL_RFCE;
   10946 	}
   10947 
   10948 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   10949 		DPRINTF(WM_DEBUG_LINK,
   10950 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   10951 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10952 	} else {
   10953 		DPRINTF(WM_DEBUG_LINK,
   10954 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   10955 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10956 	}
   10957 
   10958 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10959 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10960 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   10961 						 : WMREG_FCRTL, sc->sc_fcrtl);
   10962 	if (sc->sc_type == WM_T_80003) {
   10963 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   10964 		case IFM_1000_T:
   10965 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10966 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   10967 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10968 			break;
   10969 		default:
   10970 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10971 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   10972 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   10973 			break;
   10974 		}
   10975 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   10976 	}
   10977 }
   10978 
   10979 /* kumeran related (80003, ICH* and PCH*) */
   10980 
   10981 /*
   10982  * wm_kmrn_readreg:
   10983  *
   10984  *	Read a kumeran register
   10985  */
   10986 static int
   10987 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   10988 {
   10989 	int rv;
   10990 
   10991 	if (sc->sc_type == WM_T_80003)
   10992 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10993 	else
   10994 		rv = sc->phy.acquire(sc);
   10995 	if (rv != 0) {
   10996 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   10997 		    __func__);
   10998 		return rv;
   10999 	}
   11000 
   11001 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   11002 
   11003 	if (sc->sc_type == WM_T_80003)
   11004 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11005 	else
   11006 		sc->phy.release(sc);
   11007 
   11008 	return rv;
   11009 }
   11010 
   11011 static int
   11012 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   11013 {
   11014 
   11015 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11016 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   11017 	    KUMCTRLSTA_REN);
   11018 	CSR_WRITE_FLUSH(sc);
   11019 	delay(2);
   11020 
   11021 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   11022 
   11023 	return 0;
   11024 }
   11025 
   11026 /*
   11027  * wm_kmrn_writereg:
   11028  *
   11029  *	Write a kumeran register
   11030  */
   11031 static int
   11032 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   11033 {
   11034 	int rv;
   11035 
   11036 	if (sc->sc_type == WM_T_80003)
   11037 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11038 	else
   11039 		rv = sc->phy.acquire(sc);
   11040 	if (rv != 0) {
   11041 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11042 		    __func__);
   11043 		return rv;
   11044 	}
   11045 
   11046 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   11047 
   11048 	if (sc->sc_type == WM_T_80003)
   11049 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11050 	else
   11051 		sc->phy.release(sc);
   11052 
   11053 	return rv;
   11054 }
   11055 
   11056 static int
   11057 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   11058 {
   11059 
   11060 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11061 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   11062 
   11063 	return 0;
   11064 }
   11065 
   11066 /* SGMII related */
   11067 
   11068 /*
   11069  * wm_sgmii_uses_mdio
   11070  *
   11071  * Check whether the transaction is to the internal PHY or the external
   11072  * MDIO interface. Return true if it's MDIO.
   11073  */
   11074 static bool
   11075 wm_sgmii_uses_mdio(struct wm_softc *sc)
   11076 {
   11077 	uint32_t reg;
   11078 	bool ismdio = false;
   11079 
   11080 	switch (sc->sc_type) {
   11081 	case WM_T_82575:
   11082 	case WM_T_82576:
   11083 		reg = CSR_READ(sc, WMREG_MDIC);
   11084 		ismdio = ((reg & MDIC_DEST) != 0);
   11085 		break;
   11086 	case WM_T_82580:
   11087 	case WM_T_I350:
   11088 	case WM_T_I354:
   11089 	case WM_T_I210:
   11090 	case WM_T_I211:
   11091 		reg = CSR_READ(sc, WMREG_MDICNFG);
   11092 		ismdio = ((reg & MDICNFG_DEST) != 0);
   11093 		break;
   11094 	default:
   11095 		break;
   11096 	}
   11097 
   11098 	return ismdio;
   11099 }
   11100 
   11101 /*
   11102  * wm_sgmii_readreg:	[mii interface function]
   11103  *
   11104  *	Read a PHY register on the SGMII
   11105  * This could be handled by the PHY layer if we didn't have to lock the
   11106  * ressource ...
   11107  */
   11108 static int
   11109 wm_sgmii_readreg(device_t dev, int phy, int reg)
   11110 {
   11111 	struct wm_softc *sc = device_private(dev);
   11112 	uint32_t i2ccmd;
   11113 	int i, rv;
   11114 
   11115 	if (sc->phy.acquire(sc)) {
   11116 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11117 		return 0;
   11118 	}
   11119 
   11120 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11121 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11122 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11123 
   11124 	/* Poll the ready bit */
   11125 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11126 		delay(50);
   11127 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11128 		if (i2ccmd & I2CCMD_READY)
   11129 			break;
   11130 	}
   11131 	if ((i2ccmd & I2CCMD_READY) == 0)
   11132 		device_printf(dev, "I2CCMD Read did not complete\n");
   11133 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   11134 		device_printf(dev, "I2CCMD Error bit set\n");
   11135 
   11136 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   11137 
   11138 	sc->phy.release(sc);
   11139 	return rv;
   11140 }
   11141 
   11142 /*
   11143  * wm_sgmii_writereg:	[mii interface function]
   11144  *
   11145  *	Write a PHY register on the SGMII.
   11146  * This could be handled by the PHY layer if we didn't have to lock the
   11147  * ressource ...
   11148  */
   11149 static void
   11150 wm_sgmii_writereg(device_t dev, int phy, int reg, int val)
   11151 {
   11152 	struct wm_softc *sc = device_private(dev);
   11153 	uint32_t i2ccmd;
   11154 	int i;
   11155 	int swapdata;
   11156 
   11157 	if (sc->phy.acquire(sc) != 0) {
   11158 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11159 		return;
   11160 	}
   11161 	/* Swap the data bytes for the I2C interface */
   11162 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   11163 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11164 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   11165 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11166 
   11167 	/* Poll the ready bit */
   11168 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11169 		delay(50);
   11170 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11171 		if (i2ccmd & I2CCMD_READY)
   11172 			break;
   11173 	}
   11174 	if ((i2ccmd & I2CCMD_READY) == 0)
   11175 		device_printf(dev, "I2CCMD Write did not complete\n");
   11176 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   11177 		device_printf(dev, "I2CCMD Error bit set\n");
   11178 
   11179 	sc->phy.release(sc);
   11180 }
   11181 
   11182 /* TBI related */
   11183 
   11184 static bool
   11185 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   11186 {
   11187 	bool sig;
   11188 
   11189 	sig = ctrl & CTRL_SWDPIN(1);
   11190 
   11191 	/*
   11192 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   11193 	 * detect a signal, 1 if they don't.
   11194 	 */
   11195 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   11196 		sig = !sig;
   11197 
   11198 	return sig;
   11199 }
   11200 
   11201 /*
   11202  * wm_tbi_mediainit:
   11203  *
   11204  *	Initialize media for use on 1000BASE-X devices.
   11205  */
   11206 static void
   11207 wm_tbi_mediainit(struct wm_softc *sc)
   11208 {
   11209 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11210 	const char *sep = "";
   11211 
   11212 	if (sc->sc_type < WM_T_82543)
   11213 		sc->sc_tipg = TIPG_WM_DFLT;
   11214 	else
   11215 		sc->sc_tipg = TIPG_LG_DFLT;
   11216 
   11217 	sc->sc_tbi_serdes_anegticks = 5;
   11218 
   11219 	/* Initialize our media structures */
   11220 	sc->sc_mii.mii_ifp = ifp;
   11221 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   11222 
   11223 	if ((sc->sc_type >= WM_T_82575)
   11224 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   11225 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11226 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   11227 	else
   11228 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11229 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   11230 
   11231 	/*
   11232 	 * SWD Pins:
   11233 	 *
   11234 	 *	0 = Link LED (output)
   11235 	 *	1 = Loss Of Signal (input)
   11236 	 */
   11237 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   11238 
   11239 	/* XXX Perhaps this is only for TBI */
   11240 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11241 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   11242 
   11243 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   11244 		sc->sc_ctrl &= ~CTRL_LRST;
   11245 
   11246 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11247 
   11248 #define	ADD(ss, mm, dd)							\
   11249 do {									\
   11250 	aprint_normal("%s%s", sep, ss);					\
   11251 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   11252 	sep = ", ";							\
   11253 } while (/*CONSTCOND*/0)
   11254 
   11255 	aprint_normal_dev(sc->sc_dev, "");
   11256 
   11257 	if (sc->sc_type == WM_T_I354) {
   11258 		uint32_t status;
   11259 
   11260 		status = CSR_READ(sc, WMREG_STATUS);
   11261 		if (((status & STATUS_2P5_SKU) != 0)
   11262 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11263 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   11264 		} else
   11265 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   11266 	} else if (sc->sc_type == WM_T_82545) {
   11267 		/* Only 82545 is LX (XXX except SFP) */
   11268 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   11269 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   11270 	} else {
   11271 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   11272 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   11273 	}
   11274 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   11275 	aprint_normal("\n");
   11276 
   11277 #undef ADD
   11278 
   11279 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   11280 }
   11281 
   11282 /*
   11283  * wm_tbi_mediachange:	[ifmedia interface function]
   11284  *
   11285  *	Set hardware to newly-selected media on a 1000BASE-X device.
   11286  */
   11287 static int
   11288 wm_tbi_mediachange(struct ifnet *ifp)
   11289 {
   11290 	struct wm_softc *sc = ifp->if_softc;
   11291 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11292 	uint32_t status, ctrl;
   11293 	bool signal;
   11294 	int i;
   11295 
   11296 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   11297 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11298 		/* XXX need some work for >= 82571 and < 82575 */
   11299 		if (sc->sc_type < WM_T_82575)
   11300 			return 0;
   11301 	}
   11302 
   11303 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11304 	    || (sc->sc_type >= WM_T_82575))
   11305 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11306 
   11307 	sc->sc_ctrl &= ~CTRL_LRST;
   11308 	sc->sc_txcw = TXCW_ANE;
   11309 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11310 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   11311 	else if (ife->ifm_media & IFM_FDX)
   11312 		sc->sc_txcw |= TXCW_FD;
   11313 	else
   11314 		sc->sc_txcw |= TXCW_HD;
   11315 
   11316 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   11317 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   11318 
   11319 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   11320 		device_xname(sc->sc_dev), sc->sc_txcw));
   11321 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11322 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11323 	CSR_WRITE_FLUSH(sc);
   11324 	delay(1000);
   11325 
   11326 	ctrl =  CSR_READ(sc, WMREG_CTRL);
   11327 	signal = wm_tbi_havesignal(sc, ctrl);
   11328 
   11329 	DPRINTF(WM_DEBUG_LINK, ("%s: signal = %d\n", device_xname(sc->sc_dev),
   11330 		signal));
   11331 
   11332 	if (signal) {
   11333 		/* Have signal; wait for the link to come up. */
   11334 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   11335 			delay(10000);
   11336 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   11337 				break;
   11338 		}
   11339 
   11340 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   11341 			device_xname(sc->sc_dev),i));
   11342 
   11343 		status = CSR_READ(sc, WMREG_STATUS);
   11344 		DPRINTF(WM_DEBUG_LINK,
   11345 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   11346 			device_xname(sc->sc_dev),status, STATUS_LU));
   11347 		if (status & STATUS_LU) {
   11348 			/* Link is up. */
   11349 			DPRINTF(WM_DEBUG_LINK,
   11350 			    ("%s: LINK: set media -> link up %s\n",
   11351 				device_xname(sc->sc_dev),
   11352 				(status & STATUS_FD) ? "FDX" : "HDX"));
   11353 
   11354 			/*
   11355 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   11356 			 * so we should update sc->sc_ctrl
   11357 			 */
   11358 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   11359 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11360 			sc->sc_fcrtl &= ~FCRTL_XONE;
   11361 			if (status & STATUS_FD)
   11362 				sc->sc_tctl |=
   11363 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11364 			else
   11365 				sc->sc_tctl |=
   11366 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11367 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   11368 				sc->sc_fcrtl |= FCRTL_XONE;
   11369 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11370 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   11371 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   11372 			sc->sc_tbi_linkup = 1;
   11373 		} else {
   11374 			if (i == WM_LINKUP_TIMEOUT)
   11375 				wm_check_for_link(sc);
   11376 			/* Link is down. */
   11377 			DPRINTF(WM_DEBUG_LINK,
   11378 			    ("%s: LINK: set media -> link down\n",
   11379 				device_xname(sc->sc_dev)));
   11380 			sc->sc_tbi_linkup = 0;
   11381 		}
   11382 	} else {
   11383 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   11384 			device_xname(sc->sc_dev)));
   11385 		sc->sc_tbi_linkup = 0;
   11386 	}
   11387 
   11388 	wm_tbi_serdes_set_linkled(sc);
   11389 
   11390 	return 0;
   11391 }
   11392 
   11393 /*
   11394  * wm_tbi_mediastatus:	[ifmedia interface function]
   11395  *
   11396  *	Get the current interface media status on a 1000BASE-X device.
   11397  */
   11398 static void
   11399 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11400 {
   11401 	struct wm_softc *sc = ifp->if_softc;
   11402 	uint32_t ctrl, status;
   11403 
   11404 	ifmr->ifm_status = IFM_AVALID;
   11405 	ifmr->ifm_active = IFM_ETHER;
   11406 
   11407 	status = CSR_READ(sc, WMREG_STATUS);
   11408 	if ((status & STATUS_LU) == 0) {
   11409 		ifmr->ifm_active |= IFM_NONE;
   11410 		return;
   11411 	}
   11412 
   11413 	ifmr->ifm_status |= IFM_ACTIVE;
   11414 	/* Only 82545 is LX */
   11415 	if (sc->sc_type == WM_T_82545)
   11416 		ifmr->ifm_active |= IFM_1000_LX;
   11417 	else
   11418 		ifmr->ifm_active |= IFM_1000_SX;
   11419 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   11420 		ifmr->ifm_active |= IFM_FDX;
   11421 	else
   11422 		ifmr->ifm_active |= IFM_HDX;
   11423 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11424 	if (ctrl & CTRL_RFCE)
   11425 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   11426 	if (ctrl & CTRL_TFCE)
   11427 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   11428 }
   11429 
   11430 /* XXX TBI only */
   11431 static int
   11432 wm_check_for_link(struct wm_softc *sc)
   11433 {
   11434 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11435 	uint32_t rxcw;
   11436 	uint32_t ctrl;
   11437 	uint32_t status;
   11438 	bool signal;
   11439 
   11440 	DPRINTF(WM_DEBUG_LINK, ("%s: %s called\n",
   11441 		device_xname(sc->sc_dev), __func__));
   11442 
   11443 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11444 		/* XXX need some work for >= 82571 */
   11445 		if (sc->sc_type >= WM_T_82571) {
   11446 			sc->sc_tbi_linkup = 1;
   11447 			return 0;
   11448 		}
   11449 	}
   11450 
   11451 	rxcw = CSR_READ(sc, WMREG_RXCW);
   11452 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11453 	status = CSR_READ(sc, WMREG_STATUS);
   11454 	signal = wm_tbi_havesignal(sc, ctrl);
   11455 
   11456 	DPRINTF(WM_DEBUG_LINK,
   11457 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   11458 		device_xname(sc->sc_dev), __func__, signal,
   11459 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   11460 
   11461 	/*
   11462 	 * SWDPIN   LU RXCW
   11463 	 *	0    0	  0
   11464 	 *	0    0	  1	(should not happen)
   11465 	 *	0    1	  0	(should not happen)
   11466 	 *	0    1	  1	(should not happen)
   11467 	 *	1    0	  0	Disable autonego and force linkup
   11468 	 *	1    0	  1	got /C/ but not linkup yet
   11469 	 *	1    1	  0	(linkup)
   11470 	 *	1    1	  1	If IFM_AUTO, back to autonego
   11471 	 *
   11472 	 */
   11473 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   11474 		DPRINTF(WM_DEBUG_LINK,
   11475 		    ("%s: %s: force linkup and fullduplex\n",
   11476 			device_xname(sc->sc_dev), __func__));
   11477 		sc->sc_tbi_linkup = 0;
   11478 		/* Disable auto-negotiation in the TXCW register */
   11479 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   11480 
   11481 		/*
   11482 		 * Force link-up and also force full-duplex.
   11483 		 *
   11484 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   11485 		 * so we should update sc->sc_ctrl
   11486 		 */
   11487 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   11488 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11489 	} else if (((status & STATUS_LU) != 0)
   11490 	    && ((rxcw & RXCW_C) != 0)
   11491 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   11492 		sc->sc_tbi_linkup = 1;
   11493 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   11494 			device_xname(sc->sc_dev),
   11495 			__func__));
   11496 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11497 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   11498 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   11499 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: /C/",
   11500 			device_xname(sc->sc_dev), __func__));
   11501 	} else {
   11502 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   11503 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   11504 			status));
   11505 	}
   11506 
   11507 	return 0;
   11508 }
   11509 
   11510 /*
   11511  * wm_tbi_tick:
   11512  *
   11513  *	Check the link on TBI devices.
   11514  *	This function acts as mii_tick().
   11515  */
   11516 static void
   11517 wm_tbi_tick(struct wm_softc *sc)
   11518 {
   11519 	struct mii_data *mii = &sc->sc_mii;
   11520 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11521 	uint32_t status;
   11522 
   11523 	KASSERT(WM_CORE_LOCKED(sc));
   11524 
   11525 	status = CSR_READ(sc, WMREG_STATUS);
   11526 
   11527 	/* XXX is this needed? */
   11528 	(void)CSR_READ(sc, WMREG_RXCW);
   11529 	(void)CSR_READ(sc, WMREG_CTRL);
   11530 
   11531 	/* set link status */
   11532 	if ((status & STATUS_LU) == 0) {
   11533 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   11534 			device_xname(sc->sc_dev)));
   11535 		sc->sc_tbi_linkup = 0;
   11536 	} else if (sc->sc_tbi_linkup == 0) {
   11537 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   11538 			device_xname(sc->sc_dev),
   11539 			(status & STATUS_FD) ? "FDX" : "HDX"));
   11540 		sc->sc_tbi_linkup = 1;
   11541 		sc->sc_tbi_serdes_ticks = 0;
   11542 	}
   11543 
   11544 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   11545 		goto setled;
   11546 
   11547 	if ((status & STATUS_LU) == 0) {
   11548 		sc->sc_tbi_linkup = 0;
   11549 		/* If the timer expired, retry autonegotiation */
   11550 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11551 		    && (++sc->sc_tbi_serdes_ticks
   11552 			>= sc->sc_tbi_serdes_anegticks)) {
   11553 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11554 			sc->sc_tbi_serdes_ticks = 0;
   11555 			/*
   11556 			 * Reset the link, and let autonegotiation do
   11557 			 * its thing
   11558 			 */
   11559 			sc->sc_ctrl |= CTRL_LRST;
   11560 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11561 			CSR_WRITE_FLUSH(sc);
   11562 			delay(1000);
   11563 			sc->sc_ctrl &= ~CTRL_LRST;
   11564 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11565 			CSR_WRITE_FLUSH(sc);
   11566 			delay(1000);
   11567 			CSR_WRITE(sc, WMREG_TXCW,
   11568 			    sc->sc_txcw & ~TXCW_ANE);
   11569 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11570 		}
   11571 	}
   11572 
   11573 setled:
   11574 	wm_tbi_serdes_set_linkled(sc);
   11575 }
   11576 
   11577 /* SERDES related */
   11578 static void
   11579 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   11580 {
   11581 	uint32_t reg;
   11582 
   11583 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11584 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   11585 		return;
   11586 
   11587 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   11588 	reg |= PCS_CFG_PCS_EN;
   11589 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   11590 
   11591 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11592 	reg &= ~CTRL_EXT_SWDPIN(3);
   11593 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11594 	CSR_WRITE_FLUSH(sc);
   11595 }
   11596 
   11597 static int
   11598 wm_serdes_mediachange(struct ifnet *ifp)
   11599 {
   11600 	struct wm_softc *sc = ifp->if_softc;
   11601 	bool pcs_autoneg = true; /* XXX */
   11602 	uint32_t ctrl_ext, pcs_lctl, reg;
   11603 
   11604 	/* XXX Currently, this function is not called on 8257[12] */
   11605 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11606 	    || (sc->sc_type >= WM_T_82575))
   11607 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11608 
   11609 	wm_serdes_power_up_link_82575(sc);
   11610 
   11611 	sc->sc_ctrl |= CTRL_SLU;
   11612 
   11613 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   11614 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   11615 
   11616 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11617 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   11618 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   11619 	case CTRL_EXT_LINK_MODE_SGMII:
   11620 		pcs_autoneg = true;
   11621 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   11622 		break;
   11623 	case CTRL_EXT_LINK_MODE_1000KX:
   11624 		pcs_autoneg = false;
   11625 		/* FALLTHROUGH */
   11626 	default:
   11627 		if ((sc->sc_type == WM_T_82575)
   11628 		    || (sc->sc_type == WM_T_82576)) {
   11629 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   11630 				pcs_autoneg = false;
   11631 		}
   11632 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   11633 		    | CTRL_FRCFDX;
   11634 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   11635 	}
   11636 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11637 
   11638 	if (pcs_autoneg) {
   11639 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   11640 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   11641 
   11642 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   11643 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   11644 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   11645 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   11646 	} else
   11647 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   11648 
   11649 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   11650 
   11651 
   11652 	return 0;
   11653 }
   11654 
   11655 static void
   11656 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11657 {
   11658 	struct wm_softc *sc = ifp->if_softc;
   11659 	struct mii_data *mii = &sc->sc_mii;
   11660 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11661 	uint32_t pcs_adv, pcs_lpab, reg;
   11662 
   11663 	ifmr->ifm_status = IFM_AVALID;
   11664 	ifmr->ifm_active = IFM_ETHER;
   11665 
   11666 	/* Check PCS */
   11667 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11668 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   11669 		ifmr->ifm_active |= IFM_NONE;
   11670 		sc->sc_tbi_linkup = 0;
   11671 		goto setled;
   11672 	}
   11673 
   11674 	sc->sc_tbi_linkup = 1;
   11675 	ifmr->ifm_status |= IFM_ACTIVE;
   11676 	if (sc->sc_type == WM_T_I354) {
   11677 		uint32_t status;
   11678 
   11679 		status = CSR_READ(sc, WMREG_STATUS);
   11680 		if (((status & STATUS_2P5_SKU) != 0)
   11681 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11682 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
   11683 		} else
   11684 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
   11685 	} else {
   11686 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   11687 		case PCS_LSTS_SPEED_10:
   11688 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   11689 			break;
   11690 		case PCS_LSTS_SPEED_100:
   11691 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   11692 			break;
   11693 		case PCS_LSTS_SPEED_1000:
   11694 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11695 			break;
   11696 		default:
   11697 			device_printf(sc->sc_dev, "Unknown speed\n");
   11698 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11699 			break;
   11700 		}
   11701 	}
   11702 	if ((reg & PCS_LSTS_FDX) != 0)
   11703 		ifmr->ifm_active |= IFM_FDX;
   11704 	else
   11705 		ifmr->ifm_active |= IFM_HDX;
   11706 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   11707 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   11708 		/* Check flow */
   11709 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11710 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   11711 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   11712 			goto setled;
   11713 		}
   11714 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   11715 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   11716 		DPRINTF(WM_DEBUG_LINK,
   11717 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   11718 		if ((pcs_adv & TXCW_SYM_PAUSE)
   11719 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   11720 			mii->mii_media_active |= IFM_FLOW
   11721 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   11722 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   11723 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11724 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   11725 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11726 			mii->mii_media_active |= IFM_FLOW
   11727 			    | IFM_ETH_TXPAUSE;
   11728 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   11729 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11730 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   11731 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11732 			mii->mii_media_active |= IFM_FLOW
   11733 			    | IFM_ETH_RXPAUSE;
   11734 		}
   11735 	}
   11736 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   11737 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   11738 setled:
   11739 	wm_tbi_serdes_set_linkled(sc);
   11740 }
   11741 
   11742 /*
   11743  * wm_serdes_tick:
   11744  *
   11745  *	Check the link on serdes devices.
   11746  */
   11747 static void
   11748 wm_serdes_tick(struct wm_softc *sc)
   11749 {
   11750 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11751 	struct mii_data *mii = &sc->sc_mii;
   11752 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11753 	uint32_t reg;
   11754 
   11755 	KASSERT(WM_CORE_LOCKED(sc));
   11756 
   11757 	mii->mii_media_status = IFM_AVALID;
   11758 	mii->mii_media_active = IFM_ETHER;
   11759 
   11760 	/* Check PCS */
   11761 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11762 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   11763 		mii->mii_media_status |= IFM_ACTIVE;
   11764 		sc->sc_tbi_linkup = 1;
   11765 		sc->sc_tbi_serdes_ticks = 0;
   11766 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   11767 		if ((reg & PCS_LSTS_FDX) != 0)
   11768 			mii->mii_media_active |= IFM_FDX;
   11769 		else
   11770 			mii->mii_media_active |= IFM_HDX;
   11771 	} else {
   11772 		mii->mii_media_status |= IFM_NONE;
   11773 		sc->sc_tbi_linkup = 0;
   11774 		/* If the timer expired, retry autonegotiation */
   11775 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11776 		    && (++sc->sc_tbi_serdes_ticks
   11777 			>= sc->sc_tbi_serdes_anegticks)) {
   11778 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11779 			sc->sc_tbi_serdes_ticks = 0;
   11780 			/* XXX */
   11781 			wm_serdes_mediachange(ifp);
   11782 		}
   11783 	}
   11784 
   11785 	wm_tbi_serdes_set_linkled(sc);
   11786 }
   11787 
   11788 /* SFP related */
   11789 
   11790 static int
   11791 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   11792 {
   11793 	uint32_t i2ccmd;
   11794 	int i;
   11795 
   11796 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11797 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11798 
   11799 	/* Poll the ready bit */
   11800 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11801 		delay(50);
   11802 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11803 		if (i2ccmd & I2CCMD_READY)
   11804 			break;
   11805 	}
   11806 	if ((i2ccmd & I2CCMD_READY) == 0)
   11807 		return -1;
   11808 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   11809 		return -1;
   11810 
   11811 	*data = i2ccmd & 0x00ff;
   11812 
   11813 	return 0;
   11814 }
   11815 
   11816 static uint32_t
   11817 wm_sfp_get_media_type(struct wm_softc *sc)
   11818 {
   11819 	uint32_t ctrl_ext;
   11820 	uint8_t val = 0;
   11821 	int timeout = 3;
   11822 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   11823 	int rv = -1;
   11824 
   11825 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11826 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   11827 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   11828 	CSR_WRITE_FLUSH(sc);
   11829 
   11830 	/* Read SFP module data */
   11831 	while (timeout) {
   11832 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   11833 		if (rv == 0)
   11834 			break;
   11835 		delay(100*1000); /* XXX too big */
   11836 		timeout--;
   11837 	}
   11838 	if (rv != 0)
   11839 		goto out;
   11840 	switch (val) {
   11841 	case SFF_SFP_ID_SFF:
   11842 		aprint_normal_dev(sc->sc_dev,
   11843 		    "Module/Connector soldered to board\n");
   11844 		break;
   11845 	case SFF_SFP_ID_SFP:
   11846 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   11847 		break;
   11848 	case SFF_SFP_ID_UNKNOWN:
   11849 		goto out;
   11850 	default:
   11851 		break;
   11852 	}
   11853 
   11854 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   11855 	if (rv != 0) {
   11856 		goto out;
   11857 	}
   11858 
   11859 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   11860 		mediatype = WM_MEDIATYPE_SERDES;
   11861 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   11862 		sc->sc_flags |= WM_F_SGMII;
   11863 		mediatype = WM_MEDIATYPE_COPPER;
   11864 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   11865 		sc->sc_flags |= WM_F_SGMII;
   11866 		mediatype = WM_MEDIATYPE_SERDES;
   11867 	}
   11868 
   11869 out:
   11870 	/* Restore I2C interface setting */
   11871 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11872 
   11873 	return mediatype;
   11874 }
   11875 
   11876 /*
   11877  * NVM related.
   11878  * Microwire, SPI (w/wo EERD) and Flash.
   11879  */
   11880 
   11881 /* Both spi and uwire */
   11882 
   11883 /*
   11884  * wm_eeprom_sendbits:
   11885  *
   11886  *	Send a series of bits to the EEPROM.
   11887  */
   11888 static void
   11889 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   11890 {
   11891 	uint32_t reg;
   11892 	int x;
   11893 
   11894 	reg = CSR_READ(sc, WMREG_EECD);
   11895 
   11896 	for (x = nbits; x > 0; x--) {
   11897 		if (bits & (1U << (x - 1)))
   11898 			reg |= EECD_DI;
   11899 		else
   11900 			reg &= ~EECD_DI;
   11901 		CSR_WRITE(sc, WMREG_EECD, reg);
   11902 		CSR_WRITE_FLUSH(sc);
   11903 		delay(2);
   11904 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11905 		CSR_WRITE_FLUSH(sc);
   11906 		delay(2);
   11907 		CSR_WRITE(sc, WMREG_EECD, reg);
   11908 		CSR_WRITE_FLUSH(sc);
   11909 		delay(2);
   11910 	}
   11911 }
   11912 
   11913 /*
   11914  * wm_eeprom_recvbits:
   11915  *
   11916  *	Receive a series of bits from the EEPROM.
   11917  */
   11918 static void
   11919 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   11920 {
   11921 	uint32_t reg, val;
   11922 	int x;
   11923 
   11924 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   11925 
   11926 	val = 0;
   11927 	for (x = nbits; x > 0; x--) {
   11928 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11929 		CSR_WRITE_FLUSH(sc);
   11930 		delay(2);
   11931 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   11932 			val |= (1U << (x - 1));
   11933 		CSR_WRITE(sc, WMREG_EECD, reg);
   11934 		CSR_WRITE_FLUSH(sc);
   11935 		delay(2);
   11936 	}
   11937 	*valp = val;
   11938 }
   11939 
   11940 /* Microwire */
   11941 
   11942 /*
   11943  * wm_nvm_read_uwire:
   11944  *
   11945  *	Read a word from the EEPROM using the MicroWire protocol.
   11946  */
   11947 static int
   11948 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11949 {
   11950 	uint32_t reg, val;
   11951 	int i;
   11952 
   11953 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11954 		device_xname(sc->sc_dev), __func__));
   11955 
   11956 	if (sc->nvm.acquire(sc) != 0)
   11957 		return -1;
   11958 
   11959 	for (i = 0; i < wordcnt; i++) {
   11960 		/* Clear SK and DI. */
   11961 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   11962 		CSR_WRITE(sc, WMREG_EECD, reg);
   11963 
   11964 		/*
   11965 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   11966 		 * and Xen.
   11967 		 *
   11968 		 * We use this workaround only for 82540 because qemu's
   11969 		 * e1000 act as 82540.
   11970 		 */
   11971 		if (sc->sc_type == WM_T_82540) {
   11972 			reg |= EECD_SK;
   11973 			CSR_WRITE(sc, WMREG_EECD, reg);
   11974 			reg &= ~EECD_SK;
   11975 			CSR_WRITE(sc, WMREG_EECD, reg);
   11976 			CSR_WRITE_FLUSH(sc);
   11977 			delay(2);
   11978 		}
   11979 		/* XXX: end of workaround */
   11980 
   11981 		/* Set CHIP SELECT. */
   11982 		reg |= EECD_CS;
   11983 		CSR_WRITE(sc, WMREG_EECD, reg);
   11984 		CSR_WRITE_FLUSH(sc);
   11985 		delay(2);
   11986 
   11987 		/* Shift in the READ command. */
   11988 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   11989 
   11990 		/* Shift in address. */
   11991 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   11992 
   11993 		/* Shift out the data. */
   11994 		wm_eeprom_recvbits(sc, &val, 16);
   11995 		data[i] = val & 0xffff;
   11996 
   11997 		/* Clear CHIP SELECT. */
   11998 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   11999 		CSR_WRITE(sc, WMREG_EECD, reg);
   12000 		CSR_WRITE_FLUSH(sc);
   12001 		delay(2);
   12002 	}
   12003 
   12004 	sc->nvm.release(sc);
   12005 	return 0;
   12006 }
   12007 
   12008 /* SPI */
   12009 
   12010 /*
   12011  * Set SPI and FLASH related information from the EECD register.
   12012  * For 82541 and 82547, the word size is taken from EEPROM.
   12013  */
   12014 static int
   12015 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   12016 {
   12017 	int size;
   12018 	uint32_t reg;
   12019 	uint16_t data;
   12020 
   12021 	reg = CSR_READ(sc, WMREG_EECD);
   12022 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   12023 
   12024 	/* Read the size of NVM from EECD by default */
   12025 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12026 	switch (sc->sc_type) {
   12027 	case WM_T_82541:
   12028 	case WM_T_82541_2:
   12029 	case WM_T_82547:
   12030 	case WM_T_82547_2:
   12031 		/* Set dummy value to access EEPROM */
   12032 		sc->sc_nvm_wordsize = 64;
   12033 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   12034 			aprint_error_dev(sc->sc_dev,
   12035 			    "%s: failed to read EEPROM size\n", __func__);
   12036 		}
   12037 		reg = data;
   12038 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12039 		if (size == 0)
   12040 			size = 6; /* 64 word size */
   12041 		else
   12042 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   12043 		break;
   12044 	case WM_T_80003:
   12045 	case WM_T_82571:
   12046 	case WM_T_82572:
   12047 	case WM_T_82573: /* SPI case */
   12048 	case WM_T_82574: /* SPI case */
   12049 	case WM_T_82583: /* SPI case */
   12050 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12051 		if (size > 14)
   12052 			size = 14;
   12053 		break;
   12054 	case WM_T_82575:
   12055 	case WM_T_82576:
   12056 	case WM_T_82580:
   12057 	case WM_T_I350:
   12058 	case WM_T_I354:
   12059 	case WM_T_I210:
   12060 	case WM_T_I211:
   12061 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12062 		if (size > 15)
   12063 			size = 15;
   12064 		break;
   12065 	default:
   12066 		aprint_error_dev(sc->sc_dev,
   12067 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   12068 		return -1;
   12069 		break;
   12070 	}
   12071 
   12072 	sc->sc_nvm_wordsize = 1 << size;
   12073 
   12074 	return 0;
   12075 }
   12076 
   12077 /*
   12078  * wm_nvm_ready_spi:
   12079  *
   12080  *	Wait for a SPI EEPROM to be ready for commands.
   12081  */
   12082 static int
   12083 wm_nvm_ready_spi(struct wm_softc *sc)
   12084 {
   12085 	uint32_t val;
   12086 	int usec;
   12087 
   12088 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12089 		device_xname(sc->sc_dev), __func__));
   12090 
   12091 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   12092 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   12093 		wm_eeprom_recvbits(sc, &val, 8);
   12094 		if ((val & SPI_SR_RDY) == 0)
   12095 			break;
   12096 	}
   12097 	if (usec >= SPI_MAX_RETRIES) {
   12098 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   12099 		return -1;
   12100 	}
   12101 	return 0;
   12102 }
   12103 
   12104 /*
   12105  * wm_nvm_read_spi:
   12106  *
   12107  *	Read a work from the EEPROM using the SPI protocol.
   12108  */
   12109 static int
   12110 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12111 {
   12112 	uint32_t reg, val;
   12113 	int i;
   12114 	uint8_t opc;
   12115 	int rv = 0;
   12116 
   12117 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12118 		device_xname(sc->sc_dev), __func__));
   12119 
   12120 	if (sc->nvm.acquire(sc) != 0)
   12121 		return -1;
   12122 
   12123 	/* Clear SK and CS. */
   12124 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   12125 	CSR_WRITE(sc, WMREG_EECD, reg);
   12126 	CSR_WRITE_FLUSH(sc);
   12127 	delay(2);
   12128 
   12129 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   12130 		goto out;
   12131 
   12132 	/* Toggle CS to flush commands. */
   12133 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   12134 	CSR_WRITE_FLUSH(sc);
   12135 	delay(2);
   12136 	CSR_WRITE(sc, WMREG_EECD, reg);
   12137 	CSR_WRITE_FLUSH(sc);
   12138 	delay(2);
   12139 
   12140 	opc = SPI_OPC_READ;
   12141 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   12142 		opc |= SPI_OPC_A8;
   12143 
   12144 	wm_eeprom_sendbits(sc, opc, 8);
   12145 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   12146 
   12147 	for (i = 0; i < wordcnt; i++) {
   12148 		wm_eeprom_recvbits(sc, &val, 16);
   12149 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   12150 	}
   12151 
   12152 	/* Raise CS and clear SK. */
   12153 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   12154 	CSR_WRITE(sc, WMREG_EECD, reg);
   12155 	CSR_WRITE_FLUSH(sc);
   12156 	delay(2);
   12157 
   12158 out:
   12159 	sc->nvm.release(sc);
   12160 	return rv;
   12161 }
   12162 
   12163 /* Using with EERD */
   12164 
   12165 static int
   12166 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   12167 {
   12168 	uint32_t attempts = 100000;
   12169 	uint32_t i, reg = 0;
   12170 	int32_t done = -1;
   12171 
   12172 	for (i = 0; i < attempts; i++) {
   12173 		reg = CSR_READ(sc, rw);
   12174 
   12175 		if (reg & EERD_DONE) {
   12176 			done = 0;
   12177 			break;
   12178 		}
   12179 		delay(5);
   12180 	}
   12181 
   12182 	return done;
   12183 }
   12184 
   12185 static int
   12186 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   12187 {
   12188 	int i, eerd = 0;
   12189 	int rv = 0;
   12190 
   12191 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12192 		device_xname(sc->sc_dev), __func__));
   12193 
   12194 	if (sc->nvm.acquire(sc) != 0)
   12195 		return -1;
   12196 
   12197 	for (i = 0; i < wordcnt; i++) {
   12198 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   12199 		CSR_WRITE(sc, WMREG_EERD, eerd);
   12200 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   12201 		if (rv != 0) {
   12202 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   12203 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   12204 			break;
   12205 		}
   12206 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   12207 	}
   12208 
   12209 	sc->nvm.release(sc);
   12210 	return rv;
   12211 }
   12212 
   12213 /* Flash */
   12214 
   12215 static int
   12216 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   12217 {
   12218 	uint32_t eecd;
   12219 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   12220 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   12221 	uint32_t nvm_dword = 0;
   12222 	uint8_t sig_byte = 0;
   12223 	int rv;
   12224 
   12225 	switch (sc->sc_type) {
   12226 	case WM_T_PCH_SPT:
   12227 	case WM_T_PCH_CNP:
   12228 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   12229 		act_offset = ICH_NVM_SIG_WORD * 2;
   12230 
   12231 		/* set bank to 0 in case flash read fails. */
   12232 		*bank = 0;
   12233 
   12234 		/* Check bank 0 */
   12235 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   12236 		if (rv != 0)
   12237 			return rv;
   12238 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   12239 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12240 			*bank = 0;
   12241 			return 0;
   12242 		}
   12243 
   12244 		/* Check bank 1 */
   12245 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   12246 		    &nvm_dword);
   12247 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   12248 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12249 			*bank = 1;
   12250 			return 0;
   12251 		}
   12252 		aprint_error_dev(sc->sc_dev,
   12253 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   12254 		return -1;
   12255 	case WM_T_ICH8:
   12256 	case WM_T_ICH9:
   12257 		eecd = CSR_READ(sc, WMREG_EECD);
   12258 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   12259 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   12260 			return 0;
   12261 		}
   12262 		/* FALLTHROUGH */
   12263 	default:
   12264 		/* Default to 0 */
   12265 		*bank = 0;
   12266 
   12267 		/* Check bank 0 */
   12268 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   12269 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12270 			*bank = 0;
   12271 			return 0;
   12272 		}
   12273 
   12274 		/* Check bank 1 */
   12275 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   12276 		    &sig_byte);
   12277 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12278 			*bank = 1;
   12279 			return 0;
   12280 		}
   12281 	}
   12282 
   12283 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   12284 		device_xname(sc->sc_dev)));
   12285 	return -1;
   12286 }
   12287 
   12288 /******************************************************************************
   12289  * This function does initial flash setup so that a new read/write/erase cycle
   12290  * can be started.
   12291  *
   12292  * sc - The pointer to the hw structure
   12293  ****************************************************************************/
   12294 static int32_t
   12295 wm_ich8_cycle_init(struct wm_softc *sc)
   12296 {
   12297 	uint16_t hsfsts;
   12298 	int32_t error = 1;
   12299 	int32_t i     = 0;
   12300 
   12301 	if (sc->sc_type >= WM_T_PCH_SPT)
   12302 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   12303 	else
   12304 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12305 
   12306 	/* May be check the Flash Des Valid bit in Hw status */
   12307 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   12308 		return error;
   12309 
   12310 	/* Clear FCERR in Hw status by writing 1 */
   12311 	/* Clear DAEL in Hw status by writing a 1 */
   12312 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   12313 
   12314 	if (sc->sc_type >= WM_T_PCH_SPT)
   12315 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   12316 	else
   12317 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12318 
   12319 	/*
   12320 	 * Either we should have a hardware SPI cycle in progress bit to check
   12321 	 * against, in order to start a new cycle or FDONE bit should be
   12322 	 * changed in the hardware so that it is 1 after harware reset, which
   12323 	 * can then be used as an indication whether a cycle is in progress or
   12324 	 * has been completed .. we should also have some software semaphore
   12325 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   12326 	 * threads access to those bits can be sequentiallized or a way so that
   12327 	 * 2 threads dont start the cycle at the same time
   12328 	 */
   12329 
   12330 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12331 		/*
   12332 		 * There is no cycle running at present, so we can start a
   12333 		 * cycle
   12334 		 */
   12335 
   12336 		/* Begin by setting Flash Cycle Done. */
   12337 		hsfsts |= HSFSTS_DONE;
   12338 		if (sc->sc_type >= WM_T_PCH_SPT)
   12339 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12340 			    hsfsts & 0xffffUL);
   12341 		else
   12342 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12343 		error = 0;
   12344 	} else {
   12345 		/*
   12346 		 * otherwise poll for sometime so the current cycle has a
   12347 		 * chance to end before giving up.
   12348 		 */
   12349 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   12350 			if (sc->sc_type >= WM_T_PCH_SPT)
   12351 				hsfsts = ICH8_FLASH_READ32(sc,
   12352 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   12353 			else
   12354 				hsfsts = ICH8_FLASH_READ16(sc,
   12355 				    ICH_FLASH_HSFSTS);
   12356 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12357 				error = 0;
   12358 				break;
   12359 			}
   12360 			delay(1);
   12361 		}
   12362 		if (error == 0) {
   12363 			/*
   12364 			 * Successful in waiting for previous cycle to timeout,
   12365 			 * now set the Flash Cycle Done.
   12366 			 */
   12367 			hsfsts |= HSFSTS_DONE;
   12368 			if (sc->sc_type >= WM_T_PCH_SPT)
   12369 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12370 				    hsfsts & 0xffffUL);
   12371 			else
   12372 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   12373 				    hsfsts);
   12374 		}
   12375 	}
   12376 	return error;
   12377 }
   12378 
   12379 /******************************************************************************
   12380  * This function starts a flash cycle and waits for its completion
   12381  *
   12382  * sc - The pointer to the hw structure
   12383  ****************************************************************************/
   12384 static int32_t
   12385 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   12386 {
   12387 	uint16_t hsflctl;
   12388 	uint16_t hsfsts;
   12389 	int32_t error = 1;
   12390 	uint32_t i = 0;
   12391 
   12392 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   12393 	if (sc->sc_type >= WM_T_PCH_SPT)
   12394 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   12395 	else
   12396 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12397 	hsflctl |= HSFCTL_GO;
   12398 	if (sc->sc_type >= WM_T_PCH_SPT)
   12399 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12400 		    (uint32_t)hsflctl << 16);
   12401 	else
   12402 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12403 
   12404 	/* Wait till FDONE bit is set to 1 */
   12405 	do {
   12406 		if (sc->sc_type >= WM_T_PCH_SPT)
   12407 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   12408 			    & 0xffffUL;
   12409 		else
   12410 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12411 		if (hsfsts & HSFSTS_DONE)
   12412 			break;
   12413 		delay(1);
   12414 		i++;
   12415 	} while (i < timeout);
   12416 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   12417 		error = 0;
   12418 
   12419 	return error;
   12420 }
   12421 
   12422 /******************************************************************************
   12423  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   12424  *
   12425  * sc - The pointer to the hw structure
   12426  * index - The index of the byte or word to read.
   12427  * size - Size of data to read, 1=byte 2=word, 4=dword
   12428  * data - Pointer to the word to store the value read.
   12429  *****************************************************************************/
   12430 static int32_t
   12431 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   12432     uint32_t size, uint32_t *data)
   12433 {
   12434 	uint16_t hsfsts;
   12435 	uint16_t hsflctl;
   12436 	uint32_t flash_linear_address;
   12437 	uint32_t flash_data = 0;
   12438 	int32_t error = 1;
   12439 	int32_t count = 0;
   12440 
   12441 	if (size < 1  || size > 4 || data == 0x0 ||
   12442 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   12443 		return error;
   12444 
   12445 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   12446 	    sc->sc_ich8_flash_base;
   12447 
   12448 	do {
   12449 		delay(1);
   12450 		/* Steps */
   12451 		error = wm_ich8_cycle_init(sc);
   12452 		if (error)
   12453 			break;
   12454 
   12455 		if (sc->sc_type >= WM_T_PCH_SPT)
   12456 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   12457 			    >> 16;
   12458 		else
   12459 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12460 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   12461 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   12462 		    & HSFCTL_BCOUNT_MASK;
   12463 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   12464 		if (sc->sc_type >= WM_T_PCH_SPT) {
   12465 			/*
   12466 			 * In SPT, This register is in Lan memory space, not
   12467 			 * flash. Therefore, only 32 bit access is supported.
   12468 			 */
   12469 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12470 			    (uint32_t)hsflctl << 16);
   12471 		} else
   12472 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12473 
   12474 		/*
   12475 		 * Write the last 24 bits of index into Flash Linear address
   12476 		 * field in Flash Address
   12477 		 */
   12478 		/* TODO: TBD maybe check the index against the size of flash */
   12479 
   12480 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   12481 
   12482 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   12483 
   12484 		/*
   12485 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   12486 		 * the whole sequence a few more times, else read in (shift in)
   12487 		 * the Flash Data0, the order is least significant byte first
   12488 		 * msb to lsb
   12489 		 */
   12490 		if (error == 0) {
   12491 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   12492 			if (size == 1)
   12493 				*data = (uint8_t)(flash_data & 0x000000FF);
   12494 			else if (size == 2)
   12495 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   12496 			else if (size == 4)
   12497 				*data = (uint32_t)flash_data;
   12498 			break;
   12499 		} else {
   12500 			/*
   12501 			 * If we've gotten here, then things are probably
   12502 			 * completely hosed, but if the error condition is
   12503 			 * detected, it won't hurt to give it another try...
   12504 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   12505 			 */
   12506 			if (sc->sc_type >= WM_T_PCH_SPT)
   12507 				hsfsts = ICH8_FLASH_READ32(sc,
   12508 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   12509 			else
   12510 				hsfsts = ICH8_FLASH_READ16(sc,
   12511 				    ICH_FLASH_HSFSTS);
   12512 
   12513 			if (hsfsts & HSFSTS_ERR) {
   12514 				/* Repeat for some time before giving up. */
   12515 				continue;
   12516 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   12517 				break;
   12518 		}
   12519 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   12520 
   12521 	return error;
   12522 }
   12523 
   12524 /******************************************************************************
   12525  * Reads a single byte from the NVM using the ICH8 flash access registers.
   12526  *
   12527  * sc - pointer to wm_hw structure
   12528  * index - The index of the byte to read.
   12529  * data - Pointer to a byte to store the value read.
   12530  *****************************************************************************/
   12531 static int32_t
   12532 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   12533 {
   12534 	int32_t status;
   12535 	uint32_t word = 0;
   12536 
   12537 	status = wm_read_ich8_data(sc, index, 1, &word);
   12538 	if (status == 0)
   12539 		*data = (uint8_t)word;
   12540 	else
   12541 		*data = 0;
   12542 
   12543 	return status;
   12544 }
   12545 
   12546 /******************************************************************************
   12547  * Reads a word from the NVM using the ICH8 flash access registers.
   12548  *
   12549  * sc - pointer to wm_hw structure
   12550  * index - The starting byte index of the word to read.
   12551  * data - Pointer to a word to store the value read.
   12552  *****************************************************************************/
   12553 static int32_t
   12554 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   12555 {
   12556 	int32_t status;
   12557 	uint32_t word = 0;
   12558 
   12559 	status = wm_read_ich8_data(sc, index, 2, &word);
   12560 	if (status == 0)
   12561 		*data = (uint16_t)word;
   12562 	else
   12563 		*data = 0;
   12564 
   12565 	return status;
   12566 }
   12567 
   12568 /******************************************************************************
   12569  * Reads a dword from the NVM using the ICH8 flash access registers.
   12570  *
   12571  * sc - pointer to wm_hw structure
   12572  * index - The starting byte index of the word to read.
   12573  * data - Pointer to a word to store the value read.
   12574  *****************************************************************************/
   12575 static int32_t
   12576 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   12577 {
   12578 	int32_t status;
   12579 
   12580 	status = wm_read_ich8_data(sc, index, 4, data);
   12581 	return status;
   12582 }
   12583 
   12584 /******************************************************************************
   12585  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   12586  * register.
   12587  *
   12588  * sc - Struct containing variables accessed by shared code
   12589  * offset - offset of word in the EEPROM to read
   12590  * data - word read from the EEPROM
   12591  * words - number of words to read
   12592  *****************************************************************************/
   12593 static int
   12594 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12595 {
   12596 	int32_t	 rv = 0;
   12597 	uint32_t flash_bank = 0;
   12598 	uint32_t act_offset = 0;
   12599 	uint32_t bank_offset = 0;
   12600 	uint16_t word = 0;
   12601 	uint16_t i = 0;
   12602 
   12603 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12604 		device_xname(sc->sc_dev), __func__));
   12605 
   12606 	if (sc->nvm.acquire(sc) != 0)
   12607 		return -1;
   12608 
   12609 	/*
   12610 	 * We need to know which is the valid flash bank.  In the event
   12611 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12612 	 * managing flash_bank. So it cannot be trusted and needs
   12613 	 * to be updated with each read.
   12614 	 */
   12615 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   12616 	if (rv) {
   12617 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   12618 			device_xname(sc->sc_dev)));
   12619 		flash_bank = 0;
   12620 	}
   12621 
   12622 	/*
   12623 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   12624 	 * size
   12625 	 */
   12626 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   12627 
   12628 	for (i = 0; i < words; i++) {
   12629 		/* The NVM part needs a byte offset, hence * 2 */
   12630 		act_offset = bank_offset + ((offset + i) * 2);
   12631 		rv = wm_read_ich8_word(sc, act_offset, &word);
   12632 		if (rv) {
   12633 			aprint_error_dev(sc->sc_dev,
   12634 			    "%s: failed to read NVM\n", __func__);
   12635 			break;
   12636 		}
   12637 		data[i] = word;
   12638 	}
   12639 
   12640 	sc->nvm.release(sc);
   12641 	return rv;
   12642 }
   12643 
   12644 /******************************************************************************
   12645  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   12646  * register.
   12647  *
   12648  * sc - Struct containing variables accessed by shared code
   12649  * offset - offset of word in the EEPROM to read
   12650  * data - word read from the EEPROM
   12651  * words - number of words to read
   12652  *****************************************************************************/
   12653 static int
   12654 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12655 {
   12656 	int32_t	 rv = 0;
   12657 	uint32_t flash_bank = 0;
   12658 	uint32_t act_offset = 0;
   12659 	uint32_t bank_offset = 0;
   12660 	uint32_t dword = 0;
   12661 	uint16_t i = 0;
   12662 
   12663 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12664 		device_xname(sc->sc_dev), __func__));
   12665 
   12666 	if (sc->nvm.acquire(sc) != 0)
   12667 		return -1;
   12668 
   12669 	/*
   12670 	 * We need to know which is the valid flash bank.  In the event
   12671 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12672 	 * managing flash_bank. So it cannot be trusted and needs
   12673 	 * to be updated with each read.
   12674 	 */
   12675 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   12676 	if (rv) {
   12677 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   12678 			device_xname(sc->sc_dev)));
   12679 		flash_bank = 0;
   12680 	}
   12681 
   12682 	/*
   12683 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   12684 	 * size
   12685 	 */
   12686 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   12687 
   12688 	for (i = 0; i < words; i++) {
   12689 		/* The NVM part needs a byte offset, hence * 2 */
   12690 		act_offset = bank_offset + ((offset + i) * 2);
   12691 		/* but we must read dword aligned, so mask ... */
   12692 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   12693 		if (rv) {
   12694 			aprint_error_dev(sc->sc_dev,
   12695 			    "%s: failed to read NVM\n", __func__);
   12696 			break;
   12697 		}
   12698 		/* ... and pick out low or high word */
   12699 		if ((act_offset & 0x2) == 0)
   12700 			data[i] = (uint16_t)(dword & 0xFFFF);
   12701 		else
   12702 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   12703 	}
   12704 
   12705 	sc->nvm.release(sc);
   12706 	return rv;
   12707 }
   12708 
   12709 /* iNVM */
   12710 
   12711 static int
   12712 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   12713 {
   12714 	int32_t	 rv = 0;
   12715 	uint32_t invm_dword;
   12716 	uint16_t i;
   12717 	uint8_t record_type, word_address;
   12718 
   12719 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12720 		device_xname(sc->sc_dev), __func__));
   12721 
   12722 	for (i = 0; i < INVM_SIZE; i++) {
   12723 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   12724 		/* Get record type */
   12725 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   12726 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   12727 			break;
   12728 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   12729 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   12730 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   12731 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   12732 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   12733 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   12734 			if (word_address == address) {
   12735 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   12736 				rv = 0;
   12737 				break;
   12738 			}
   12739 		}
   12740 	}
   12741 
   12742 	return rv;
   12743 }
   12744 
   12745 static int
   12746 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12747 {
   12748 	int rv = 0;
   12749 	int i;
   12750 
   12751 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12752 		device_xname(sc->sc_dev), __func__));
   12753 
   12754 	if (sc->nvm.acquire(sc) != 0)
   12755 		return -1;
   12756 
   12757 	for (i = 0; i < words; i++) {
   12758 		switch (offset + i) {
   12759 		case NVM_OFF_MACADDR:
   12760 		case NVM_OFF_MACADDR1:
   12761 		case NVM_OFF_MACADDR2:
   12762 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   12763 			if (rv != 0) {
   12764 				data[i] = 0xffff;
   12765 				rv = -1;
   12766 			}
   12767 			break;
   12768 		case NVM_OFF_CFG2:
   12769 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12770 			if (rv != 0) {
   12771 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   12772 				rv = 0;
   12773 			}
   12774 			break;
   12775 		case NVM_OFF_CFG4:
   12776 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12777 			if (rv != 0) {
   12778 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   12779 				rv = 0;
   12780 			}
   12781 			break;
   12782 		case NVM_OFF_LED_1_CFG:
   12783 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12784 			if (rv != 0) {
   12785 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   12786 				rv = 0;
   12787 			}
   12788 			break;
   12789 		case NVM_OFF_LED_0_2_CFG:
   12790 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12791 			if (rv != 0) {
   12792 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   12793 				rv = 0;
   12794 			}
   12795 			break;
   12796 		case NVM_OFF_ID_LED_SETTINGS:
   12797 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12798 			if (rv != 0) {
   12799 				*data = ID_LED_RESERVED_FFFF;
   12800 				rv = 0;
   12801 			}
   12802 			break;
   12803 		default:
   12804 			DPRINTF(WM_DEBUG_NVM,
   12805 			    ("NVM word 0x%02x is not mapped.\n", offset));
   12806 			*data = NVM_RESERVED_WORD;
   12807 			break;
   12808 		}
   12809 	}
   12810 
   12811 	sc->nvm.release(sc);
   12812 	return rv;
   12813 }
   12814 
   12815 /* Lock, detecting NVM type, validate checksum, version and read */
   12816 
   12817 static int
   12818 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   12819 {
   12820 	uint32_t eecd = 0;
   12821 
   12822 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   12823 	    || sc->sc_type == WM_T_82583) {
   12824 		eecd = CSR_READ(sc, WMREG_EECD);
   12825 
   12826 		/* Isolate bits 15 & 16 */
   12827 		eecd = ((eecd >> 15) & 0x03);
   12828 
   12829 		/* If both bits are set, device is Flash type */
   12830 		if (eecd == 0x03)
   12831 			return 0;
   12832 	}
   12833 	return 1;
   12834 }
   12835 
   12836 static int
   12837 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   12838 {
   12839 	uint32_t eec;
   12840 
   12841 	eec = CSR_READ(sc, WMREG_EEC);
   12842 	if ((eec & EEC_FLASH_DETECTED) != 0)
   12843 		return 1;
   12844 
   12845 	return 0;
   12846 }
   12847 
   12848 /*
   12849  * wm_nvm_validate_checksum
   12850  *
   12851  * The checksum is defined as the sum of the first 64 (16 bit) words.
   12852  */
   12853 static int
   12854 wm_nvm_validate_checksum(struct wm_softc *sc)
   12855 {
   12856 	uint16_t checksum;
   12857 	uint16_t eeprom_data;
   12858 #ifdef WM_DEBUG
   12859 	uint16_t csum_wordaddr, valid_checksum;
   12860 #endif
   12861 	int i;
   12862 
   12863 	checksum = 0;
   12864 
   12865 	/* Don't check for I211 */
   12866 	if (sc->sc_type == WM_T_I211)
   12867 		return 0;
   12868 
   12869 #ifdef WM_DEBUG
   12870 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   12871 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   12872 		csum_wordaddr = NVM_OFF_COMPAT;
   12873 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   12874 	} else {
   12875 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   12876 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   12877 	}
   12878 
   12879 	/* Dump EEPROM image for debug */
   12880 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12881 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12882 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   12883 		/* XXX PCH_SPT? */
   12884 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   12885 		if ((eeprom_data & valid_checksum) == 0) {
   12886 			DPRINTF(WM_DEBUG_NVM,
   12887 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   12888 				device_xname(sc->sc_dev), eeprom_data,
   12889 				    valid_checksum));
   12890 		}
   12891 	}
   12892 
   12893 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   12894 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   12895 		for (i = 0; i < NVM_SIZE; i++) {
   12896 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12897 				printf("XXXX ");
   12898 			else
   12899 				printf("%04hx ", eeprom_data);
   12900 			if (i % 8 == 7)
   12901 				printf("\n");
   12902 		}
   12903 	}
   12904 
   12905 #endif /* WM_DEBUG */
   12906 
   12907 	for (i = 0; i < NVM_SIZE; i++) {
   12908 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12909 			return 1;
   12910 		checksum += eeprom_data;
   12911 	}
   12912 
   12913 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   12914 #ifdef WM_DEBUG
   12915 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   12916 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   12917 #endif
   12918 	}
   12919 
   12920 	return 0;
   12921 }
   12922 
   12923 static void
   12924 wm_nvm_version_invm(struct wm_softc *sc)
   12925 {
   12926 	uint32_t dword;
   12927 
   12928 	/*
   12929 	 * Linux's code to decode version is very strange, so we don't
   12930 	 * obey that algorithm and just use word 61 as the document.
   12931 	 * Perhaps it's not perfect though...
   12932 	 *
   12933 	 * Example:
   12934 	 *
   12935 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   12936 	 */
   12937 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   12938 	dword = __SHIFTOUT(dword, INVM_VER_1);
   12939 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   12940 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   12941 }
   12942 
   12943 static void
   12944 wm_nvm_version(struct wm_softc *sc)
   12945 {
   12946 	uint16_t major, minor, build, patch;
   12947 	uint16_t uid0, uid1;
   12948 	uint16_t nvm_data;
   12949 	uint16_t off;
   12950 	bool check_version = false;
   12951 	bool check_optionrom = false;
   12952 	bool have_build = false;
   12953 	bool have_uid = true;
   12954 
   12955 	/*
   12956 	 * Version format:
   12957 	 *
   12958 	 * XYYZ
   12959 	 * X0YZ
   12960 	 * X0YY
   12961 	 *
   12962 	 * Example:
   12963 	 *
   12964 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   12965 	 *	82571	0x50a6	5.10.6?
   12966 	 *	82572	0x506a	5.6.10?
   12967 	 *	82572EI	0x5069	5.6.9?
   12968 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   12969 	 *		0x2013	2.1.3?
   12970 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   12971 	 */
   12972 
   12973 	/*
   12974 	 * XXX
   12975 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   12976 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   12977 	 */
   12978 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   12979 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   12980 		have_uid = false;
   12981 
   12982 	switch (sc->sc_type) {
   12983 	case WM_T_82571:
   12984 	case WM_T_82572:
   12985 	case WM_T_82574:
   12986 	case WM_T_82583:
   12987 		check_version = true;
   12988 		check_optionrom = true;
   12989 		have_build = true;
   12990 		break;
   12991 	case WM_T_82575:
   12992 	case WM_T_82576:
   12993 	case WM_T_82580:
   12994 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   12995 			check_version = true;
   12996 		break;
   12997 	case WM_T_I211:
   12998 		wm_nvm_version_invm(sc);
   12999 		have_uid = false;
   13000 		goto printver;
   13001 	case WM_T_I210:
   13002 		if (!wm_nvm_flash_presence_i210(sc)) {
   13003 			wm_nvm_version_invm(sc);
   13004 			have_uid = false;
   13005 			goto printver;
   13006 		}
   13007 		/* FALLTHROUGH */
   13008 	case WM_T_I350:
   13009 	case WM_T_I354:
   13010 		check_version = true;
   13011 		check_optionrom = true;
   13012 		break;
   13013 	default:
   13014 		return;
   13015 	}
   13016 	if (check_version
   13017 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   13018 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   13019 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   13020 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   13021 			build = nvm_data & NVM_BUILD_MASK;
   13022 			have_build = true;
   13023 		} else
   13024 			minor = nvm_data & 0x00ff;
   13025 
   13026 		/* Decimal */
   13027 		minor = (minor / 16) * 10 + (minor % 16);
   13028 		sc->sc_nvm_ver_major = major;
   13029 		sc->sc_nvm_ver_minor = minor;
   13030 
   13031 printver:
   13032 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   13033 		    sc->sc_nvm_ver_minor);
   13034 		if (have_build) {
   13035 			sc->sc_nvm_ver_build = build;
   13036 			aprint_verbose(".%d", build);
   13037 		}
   13038 	}
   13039 
   13040 	/* Assume the Option ROM area is at avove NVM_SIZE */
   13041 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   13042 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   13043 		/* Option ROM Version */
   13044 		if ((off != 0x0000) && (off != 0xffff)) {
   13045 			int rv;
   13046 
   13047 			off += NVM_COMBO_VER_OFF;
   13048 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   13049 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   13050 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   13051 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   13052 				/* 16bits */
   13053 				major = uid0 >> 8;
   13054 				build = (uid0 << 8) | (uid1 >> 8);
   13055 				patch = uid1 & 0x00ff;
   13056 				aprint_verbose(", option ROM Version %d.%d.%d",
   13057 				    major, build, patch);
   13058 			}
   13059 		}
   13060 	}
   13061 
   13062 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   13063 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   13064 }
   13065 
   13066 /*
   13067  * wm_nvm_read:
   13068  *
   13069  *	Read data from the serial EEPROM.
   13070  */
   13071 static int
   13072 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13073 {
   13074 	int rv;
   13075 
   13076 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13077 		device_xname(sc->sc_dev), __func__));
   13078 
   13079 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   13080 		return -1;
   13081 
   13082 	rv = sc->nvm.read(sc, word, wordcnt, data);
   13083 
   13084 	return rv;
   13085 }
   13086 
   13087 /*
   13088  * Hardware semaphores.
   13089  * Very complexed...
   13090  */
   13091 
   13092 static int
   13093 wm_get_null(struct wm_softc *sc)
   13094 {
   13095 
   13096 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13097 		device_xname(sc->sc_dev), __func__));
   13098 	return 0;
   13099 }
   13100 
   13101 static void
   13102 wm_put_null(struct wm_softc *sc)
   13103 {
   13104 
   13105 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13106 		device_xname(sc->sc_dev), __func__));
   13107 	return;
   13108 }
   13109 
   13110 static int
   13111 wm_get_eecd(struct wm_softc *sc)
   13112 {
   13113 	uint32_t reg;
   13114 	int x;
   13115 
   13116 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13117 		device_xname(sc->sc_dev), __func__));
   13118 
   13119 	reg = CSR_READ(sc, WMREG_EECD);
   13120 
   13121 	/* Request EEPROM access. */
   13122 	reg |= EECD_EE_REQ;
   13123 	CSR_WRITE(sc, WMREG_EECD, reg);
   13124 
   13125 	/* ..and wait for it to be granted. */
   13126 	for (x = 0; x < 1000; x++) {
   13127 		reg = CSR_READ(sc, WMREG_EECD);
   13128 		if (reg & EECD_EE_GNT)
   13129 			break;
   13130 		delay(5);
   13131 	}
   13132 	if ((reg & EECD_EE_GNT) == 0) {
   13133 		aprint_error_dev(sc->sc_dev,
   13134 		    "could not acquire EEPROM GNT\n");
   13135 		reg &= ~EECD_EE_REQ;
   13136 		CSR_WRITE(sc, WMREG_EECD, reg);
   13137 		return -1;
   13138 	}
   13139 
   13140 	return 0;
   13141 }
   13142 
   13143 static void
   13144 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   13145 {
   13146 
   13147 	*eecd |= EECD_SK;
   13148 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13149 	CSR_WRITE_FLUSH(sc);
   13150 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   13151 		delay(1);
   13152 	else
   13153 		delay(50);
   13154 }
   13155 
   13156 static void
   13157 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   13158 {
   13159 
   13160 	*eecd &= ~EECD_SK;
   13161 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13162 	CSR_WRITE_FLUSH(sc);
   13163 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   13164 		delay(1);
   13165 	else
   13166 		delay(50);
   13167 }
   13168 
   13169 static void
   13170 wm_put_eecd(struct wm_softc *sc)
   13171 {
   13172 	uint32_t reg;
   13173 
   13174 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13175 		device_xname(sc->sc_dev), __func__));
   13176 
   13177 	/* Stop nvm */
   13178 	reg = CSR_READ(sc, WMREG_EECD);
   13179 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   13180 		/* Pull CS high */
   13181 		reg |= EECD_CS;
   13182 		wm_nvm_eec_clock_lower(sc, &reg);
   13183 	} else {
   13184 		/* CS on Microwire is active-high */
   13185 		reg &= ~(EECD_CS | EECD_DI);
   13186 		CSR_WRITE(sc, WMREG_EECD, reg);
   13187 		wm_nvm_eec_clock_raise(sc, &reg);
   13188 		wm_nvm_eec_clock_lower(sc, &reg);
   13189 	}
   13190 
   13191 	reg = CSR_READ(sc, WMREG_EECD);
   13192 	reg &= ~EECD_EE_REQ;
   13193 	CSR_WRITE(sc, WMREG_EECD, reg);
   13194 
   13195 	return;
   13196 }
   13197 
   13198 /*
   13199  * Get hardware semaphore.
   13200  * Same as e1000_get_hw_semaphore_generic()
   13201  */
   13202 static int
   13203 wm_get_swsm_semaphore(struct wm_softc *sc)
   13204 {
   13205 	int32_t timeout;
   13206 	uint32_t swsm;
   13207 
   13208 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13209 		device_xname(sc->sc_dev), __func__));
   13210 	KASSERT(sc->sc_nvm_wordsize > 0);
   13211 
   13212 retry:
   13213 	/* Get the SW semaphore. */
   13214 	timeout = sc->sc_nvm_wordsize + 1;
   13215 	while (timeout) {
   13216 		swsm = CSR_READ(sc, WMREG_SWSM);
   13217 
   13218 		if ((swsm & SWSM_SMBI) == 0)
   13219 			break;
   13220 
   13221 		delay(50);
   13222 		timeout--;
   13223 	}
   13224 
   13225 	if (timeout == 0) {
   13226 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   13227 			/*
   13228 			 * In rare circumstances, the SW semaphore may already
   13229 			 * be held unintentionally. Clear the semaphore once
   13230 			 * before giving up.
   13231 			 */
   13232 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   13233 			wm_put_swsm_semaphore(sc);
   13234 			goto retry;
   13235 		}
   13236 		aprint_error_dev(sc->sc_dev,
   13237 		    "could not acquire SWSM SMBI\n");
   13238 		return 1;
   13239 	}
   13240 
   13241 	/* Get the FW semaphore. */
   13242 	timeout = sc->sc_nvm_wordsize + 1;
   13243 	while (timeout) {
   13244 		swsm = CSR_READ(sc, WMREG_SWSM);
   13245 		swsm |= SWSM_SWESMBI;
   13246 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   13247 		/* If we managed to set the bit we got the semaphore. */
   13248 		swsm = CSR_READ(sc, WMREG_SWSM);
   13249 		if (swsm & SWSM_SWESMBI)
   13250 			break;
   13251 
   13252 		delay(50);
   13253 		timeout--;
   13254 	}
   13255 
   13256 	if (timeout == 0) {
   13257 		aprint_error_dev(sc->sc_dev,
   13258 		    "could not acquire SWSM SWESMBI\n");
   13259 		/* Release semaphores */
   13260 		wm_put_swsm_semaphore(sc);
   13261 		return 1;
   13262 	}
   13263 	return 0;
   13264 }
   13265 
   13266 /*
   13267  * Put hardware semaphore.
   13268  * Same as e1000_put_hw_semaphore_generic()
   13269  */
   13270 static void
   13271 wm_put_swsm_semaphore(struct wm_softc *sc)
   13272 {
   13273 	uint32_t swsm;
   13274 
   13275 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13276 		device_xname(sc->sc_dev), __func__));
   13277 
   13278 	swsm = CSR_READ(sc, WMREG_SWSM);
   13279 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   13280 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   13281 }
   13282 
   13283 /*
   13284  * Get SW/FW semaphore.
   13285  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   13286  */
   13287 static int
   13288 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13289 {
   13290 	uint32_t swfw_sync;
   13291 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   13292 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   13293 	int timeout;
   13294 
   13295 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13296 		device_xname(sc->sc_dev), __func__));
   13297 
   13298 	if (sc->sc_type == WM_T_80003)
   13299 		timeout = 50;
   13300 	else
   13301 		timeout = 200;
   13302 
   13303 	while (timeout) {
   13304 		if (wm_get_swsm_semaphore(sc)) {
   13305 			aprint_error_dev(sc->sc_dev,
   13306 			    "%s: failed to get semaphore\n",
   13307 			    __func__);
   13308 			return 1;
   13309 		}
   13310 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13311 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   13312 			swfw_sync |= swmask;
   13313 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13314 			wm_put_swsm_semaphore(sc);
   13315 			return 0;
   13316 		}
   13317 		wm_put_swsm_semaphore(sc);
   13318 		delay(5000);
   13319 		timeout--;
   13320 	}
   13321 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   13322 	    device_xname(sc->sc_dev), mask, swfw_sync);
   13323 	return 1;
   13324 }
   13325 
   13326 static void
   13327 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13328 {
   13329 	uint32_t swfw_sync;
   13330 
   13331 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13332 		device_xname(sc->sc_dev), __func__));
   13333 
   13334 	while (wm_get_swsm_semaphore(sc) != 0)
   13335 		continue;
   13336 
   13337 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13338 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   13339 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13340 
   13341 	wm_put_swsm_semaphore(sc);
   13342 }
   13343 
   13344 static int
   13345 wm_get_nvm_80003(struct wm_softc *sc)
   13346 {
   13347 	int rv;
   13348 
   13349 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13350 		device_xname(sc->sc_dev), __func__));
   13351 
   13352 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   13353 		aprint_error_dev(sc->sc_dev,
   13354 		    "%s: failed to get semaphore(SWFW)\n",
   13355 		    __func__);
   13356 		return rv;
   13357 	}
   13358 
   13359 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13360 	    && (rv = wm_get_eecd(sc)) != 0) {
   13361 		aprint_error_dev(sc->sc_dev,
   13362 		    "%s: failed to get semaphore(EECD)\n",
   13363 		    __func__);
   13364 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13365 		return rv;
   13366 	}
   13367 
   13368 	return 0;
   13369 }
   13370 
   13371 static void
   13372 wm_put_nvm_80003(struct wm_softc *sc)
   13373 {
   13374 
   13375 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13376 		device_xname(sc->sc_dev), __func__));
   13377 
   13378 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13379 		wm_put_eecd(sc);
   13380 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13381 }
   13382 
   13383 static int
   13384 wm_get_nvm_82571(struct wm_softc *sc)
   13385 {
   13386 	int rv;
   13387 
   13388 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13389 		device_xname(sc->sc_dev), __func__));
   13390 
   13391 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   13392 		return rv;
   13393 
   13394 	switch (sc->sc_type) {
   13395 	case WM_T_82573:
   13396 		break;
   13397 	default:
   13398 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13399 			rv = wm_get_eecd(sc);
   13400 		break;
   13401 	}
   13402 
   13403 	if (rv != 0) {
   13404 		aprint_error_dev(sc->sc_dev,
   13405 		    "%s: failed to get semaphore\n",
   13406 		    __func__);
   13407 		wm_put_swsm_semaphore(sc);
   13408 	}
   13409 
   13410 	return rv;
   13411 }
   13412 
   13413 static void
   13414 wm_put_nvm_82571(struct wm_softc *sc)
   13415 {
   13416 
   13417 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13418 		device_xname(sc->sc_dev), __func__));
   13419 
   13420 	switch (sc->sc_type) {
   13421 	case WM_T_82573:
   13422 		break;
   13423 	default:
   13424 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13425 			wm_put_eecd(sc);
   13426 		break;
   13427 	}
   13428 
   13429 	wm_put_swsm_semaphore(sc);
   13430 }
   13431 
   13432 static int
   13433 wm_get_phy_82575(struct wm_softc *sc)
   13434 {
   13435 
   13436 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13437 		device_xname(sc->sc_dev), __func__));
   13438 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13439 }
   13440 
   13441 static void
   13442 wm_put_phy_82575(struct wm_softc *sc)
   13443 {
   13444 
   13445 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13446 		device_xname(sc->sc_dev), __func__));
   13447 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13448 }
   13449 
   13450 static int
   13451 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   13452 {
   13453 	uint32_t ext_ctrl;
   13454 	int timeout = 200;
   13455 
   13456 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13457 		device_xname(sc->sc_dev), __func__));
   13458 
   13459 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13460 	for (timeout = 0; timeout < 200; timeout++) {
   13461 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13462 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13463 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13464 
   13465 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13466 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   13467 			return 0;
   13468 		delay(5000);
   13469 	}
   13470 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   13471 	    device_xname(sc->sc_dev), ext_ctrl);
   13472 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13473 	return 1;
   13474 }
   13475 
   13476 static void
   13477 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   13478 {
   13479 	uint32_t ext_ctrl;
   13480 
   13481 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13482 		device_xname(sc->sc_dev), __func__));
   13483 
   13484 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13485 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13486 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13487 
   13488 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13489 }
   13490 
   13491 static int
   13492 wm_get_swflag_ich8lan(struct wm_softc *sc)
   13493 {
   13494 	uint32_t ext_ctrl;
   13495 	int timeout;
   13496 
   13497 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13498 		device_xname(sc->sc_dev), __func__));
   13499 	mutex_enter(sc->sc_ich_phymtx);
   13500 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   13501 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13502 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   13503 			break;
   13504 		delay(1000);
   13505 	}
   13506 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   13507 		printf("%s: SW has already locked the resource\n",
   13508 		    device_xname(sc->sc_dev));
   13509 		goto out;
   13510 	}
   13511 
   13512 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13513 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13514 	for (timeout = 0; timeout < 1000; timeout++) {
   13515 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13516 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   13517 			break;
   13518 		delay(1000);
   13519 	}
   13520 	if (timeout >= 1000) {
   13521 		printf("%s: failed to acquire semaphore\n",
   13522 		    device_xname(sc->sc_dev));
   13523 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13524 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13525 		goto out;
   13526 	}
   13527 	return 0;
   13528 
   13529 out:
   13530 	mutex_exit(sc->sc_ich_phymtx);
   13531 	return 1;
   13532 }
   13533 
   13534 static void
   13535 wm_put_swflag_ich8lan(struct wm_softc *sc)
   13536 {
   13537 	uint32_t ext_ctrl;
   13538 
   13539 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13540 		device_xname(sc->sc_dev), __func__));
   13541 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13542 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   13543 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13544 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13545 	} else {
   13546 		printf("%s: Semaphore unexpectedly released\n",
   13547 		    device_xname(sc->sc_dev));
   13548 	}
   13549 
   13550 	mutex_exit(sc->sc_ich_phymtx);
   13551 }
   13552 
   13553 static int
   13554 wm_get_nvm_ich8lan(struct wm_softc *sc)
   13555 {
   13556 
   13557 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13558 		device_xname(sc->sc_dev), __func__));
   13559 	mutex_enter(sc->sc_ich_nvmmtx);
   13560 
   13561 	return 0;
   13562 }
   13563 
   13564 static void
   13565 wm_put_nvm_ich8lan(struct wm_softc *sc)
   13566 {
   13567 
   13568 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13569 		device_xname(sc->sc_dev), __func__));
   13570 	mutex_exit(sc->sc_ich_nvmmtx);
   13571 }
   13572 
   13573 static int
   13574 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   13575 {
   13576 	int i = 0;
   13577 	uint32_t reg;
   13578 
   13579 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13580 		device_xname(sc->sc_dev), __func__));
   13581 
   13582 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13583 	do {
   13584 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   13585 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   13586 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13587 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   13588 			break;
   13589 		delay(2*1000);
   13590 		i++;
   13591 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   13592 
   13593 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   13594 		wm_put_hw_semaphore_82573(sc);
   13595 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   13596 		    device_xname(sc->sc_dev));
   13597 		return -1;
   13598 	}
   13599 
   13600 	return 0;
   13601 }
   13602 
   13603 static void
   13604 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   13605 {
   13606 	uint32_t reg;
   13607 
   13608 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13609 		device_xname(sc->sc_dev), __func__));
   13610 
   13611 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13612 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13613 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   13614 }
   13615 
   13616 /*
   13617  * Management mode and power management related subroutines.
   13618  * BMC, AMT, suspend/resume and EEE.
   13619  */
   13620 
   13621 #ifdef WM_WOL
   13622 static int
   13623 wm_check_mng_mode(struct wm_softc *sc)
   13624 {
   13625 	int rv;
   13626 
   13627 	switch (sc->sc_type) {
   13628 	case WM_T_ICH8:
   13629 	case WM_T_ICH9:
   13630 	case WM_T_ICH10:
   13631 	case WM_T_PCH:
   13632 	case WM_T_PCH2:
   13633 	case WM_T_PCH_LPT:
   13634 	case WM_T_PCH_SPT:
   13635 	case WM_T_PCH_CNP:
   13636 		rv = wm_check_mng_mode_ich8lan(sc);
   13637 		break;
   13638 	case WM_T_82574:
   13639 	case WM_T_82583:
   13640 		rv = wm_check_mng_mode_82574(sc);
   13641 		break;
   13642 	case WM_T_82571:
   13643 	case WM_T_82572:
   13644 	case WM_T_82573:
   13645 	case WM_T_80003:
   13646 		rv = wm_check_mng_mode_generic(sc);
   13647 		break;
   13648 	default:
   13649 		/* noting to do */
   13650 		rv = 0;
   13651 		break;
   13652 	}
   13653 
   13654 	return rv;
   13655 }
   13656 
   13657 static int
   13658 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   13659 {
   13660 	uint32_t fwsm;
   13661 
   13662 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13663 
   13664 	if (((fwsm & FWSM_FW_VALID) != 0)
   13665 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   13666 		return 1;
   13667 
   13668 	return 0;
   13669 }
   13670 
   13671 static int
   13672 wm_check_mng_mode_82574(struct wm_softc *sc)
   13673 {
   13674 	uint16_t data;
   13675 
   13676 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   13677 
   13678 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   13679 		return 1;
   13680 
   13681 	return 0;
   13682 }
   13683 
   13684 static int
   13685 wm_check_mng_mode_generic(struct wm_softc *sc)
   13686 {
   13687 	uint32_t fwsm;
   13688 
   13689 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13690 
   13691 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   13692 		return 1;
   13693 
   13694 	return 0;
   13695 }
   13696 #endif /* WM_WOL */
   13697 
   13698 static int
   13699 wm_enable_mng_pass_thru(struct wm_softc *sc)
   13700 {
   13701 	uint32_t manc, fwsm, factps;
   13702 
   13703 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   13704 		return 0;
   13705 
   13706 	manc = CSR_READ(sc, WMREG_MANC);
   13707 
   13708 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   13709 		device_xname(sc->sc_dev), manc));
   13710 	if ((manc & MANC_RECV_TCO_EN) == 0)
   13711 		return 0;
   13712 
   13713 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   13714 		fwsm = CSR_READ(sc, WMREG_FWSM);
   13715 		factps = CSR_READ(sc, WMREG_FACTPS);
   13716 		if (((factps & FACTPS_MNGCG) == 0)
   13717 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   13718 			return 1;
   13719 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   13720 		uint16_t data;
   13721 
   13722 		factps = CSR_READ(sc, WMREG_FACTPS);
   13723 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   13724 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   13725 			device_xname(sc->sc_dev), factps, data));
   13726 		if (((factps & FACTPS_MNGCG) == 0)
   13727 		    && ((data & NVM_CFG2_MNGM_MASK)
   13728 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   13729 			return 1;
   13730 	} else if (((manc & MANC_SMBUS_EN) != 0)
   13731 	    && ((manc & MANC_ASF_EN) == 0))
   13732 		return 1;
   13733 
   13734 	return 0;
   13735 }
   13736 
   13737 static bool
   13738 wm_phy_resetisblocked(struct wm_softc *sc)
   13739 {
   13740 	bool blocked = false;
   13741 	uint32_t reg;
   13742 	int i = 0;
   13743 
   13744 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13745 		device_xname(sc->sc_dev), __func__));
   13746 
   13747 	switch (sc->sc_type) {
   13748 	case WM_T_ICH8:
   13749 	case WM_T_ICH9:
   13750 	case WM_T_ICH10:
   13751 	case WM_T_PCH:
   13752 	case WM_T_PCH2:
   13753 	case WM_T_PCH_LPT:
   13754 	case WM_T_PCH_SPT:
   13755 	case WM_T_PCH_CNP:
   13756 		do {
   13757 			reg = CSR_READ(sc, WMREG_FWSM);
   13758 			if ((reg & FWSM_RSPCIPHY) == 0) {
   13759 				blocked = true;
   13760 				delay(10*1000);
   13761 				continue;
   13762 			}
   13763 			blocked = false;
   13764 		} while (blocked && (i++ < 30));
   13765 		return blocked;
   13766 		break;
   13767 	case WM_T_82571:
   13768 	case WM_T_82572:
   13769 	case WM_T_82573:
   13770 	case WM_T_82574:
   13771 	case WM_T_82583:
   13772 	case WM_T_80003:
   13773 		reg = CSR_READ(sc, WMREG_MANC);
   13774 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   13775 			return true;
   13776 		else
   13777 			return false;
   13778 		break;
   13779 	default:
   13780 		/* no problem */
   13781 		break;
   13782 	}
   13783 
   13784 	return false;
   13785 }
   13786 
   13787 static void
   13788 wm_get_hw_control(struct wm_softc *sc)
   13789 {
   13790 	uint32_t reg;
   13791 
   13792 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13793 		device_xname(sc->sc_dev), __func__));
   13794 
   13795 	if (sc->sc_type == WM_T_82573) {
   13796 		reg = CSR_READ(sc, WMREG_SWSM);
   13797 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   13798 	} else if (sc->sc_type >= WM_T_82571) {
   13799 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13800 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   13801 	}
   13802 }
   13803 
   13804 static void
   13805 wm_release_hw_control(struct wm_softc *sc)
   13806 {
   13807 	uint32_t reg;
   13808 
   13809 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13810 		device_xname(sc->sc_dev), __func__));
   13811 
   13812 	if (sc->sc_type == WM_T_82573) {
   13813 		reg = CSR_READ(sc, WMREG_SWSM);
   13814 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   13815 	} else if (sc->sc_type >= WM_T_82571) {
   13816 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13817 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   13818 	}
   13819 }
   13820 
   13821 static void
   13822 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   13823 {
   13824 	uint32_t reg;
   13825 
   13826 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13827 		device_xname(sc->sc_dev), __func__));
   13828 
   13829 	if (sc->sc_type < WM_T_PCH2)
   13830 		return;
   13831 
   13832 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13833 
   13834 	if (gate)
   13835 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   13836 	else
   13837 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   13838 
   13839 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   13840 }
   13841 
   13842 static void
   13843 wm_smbustopci(struct wm_softc *sc)
   13844 {
   13845 	uint32_t fwsm, reg;
   13846 	int rv = 0;
   13847 
   13848 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13849 		device_xname(sc->sc_dev), __func__));
   13850 
   13851 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   13852 	wm_gate_hw_phy_config_ich8lan(sc, true);
   13853 
   13854 	/* Disable ULP */
   13855 	wm_ulp_disable(sc);
   13856 
   13857 	/* Acquire PHY semaphore */
   13858 	sc->phy.acquire(sc);
   13859 
   13860 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13861 	switch (sc->sc_type) {
   13862 	case WM_T_PCH_LPT:
   13863 	case WM_T_PCH_SPT:
   13864 	case WM_T_PCH_CNP:
   13865 		if (wm_phy_is_accessible_pchlan(sc))
   13866 			break;
   13867 
   13868 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13869 		reg |= CTRL_EXT_FORCE_SMBUS;
   13870 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13871 #if 0
   13872 		/* XXX Isn't this required??? */
   13873 		CSR_WRITE_FLUSH(sc);
   13874 #endif
   13875 		delay(50 * 1000);
   13876 		/* FALLTHROUGH */
   13877 	case WM_T_PCH2:
   13878 		if (wm_phy_is_accessible_pchlan(sc) == true)
   13879 			break;
   13880 		/* FALLTHROUGH */
   13881 	case WM_T_PCH:
   13882 		if (sc->sc_type == WM_T_PCH)
   13883 			if ((fwsm & FWSM_FW_VALID) != 0)
   13884 				break;
   13885 
   13886 		if (wm_phy_resetisblocked(sc) == true) {
   13887 			printf("XXX reset is blocked(3)\n");
   13888 			break;
   13889 		}
   13890 
   13891 		wm_toggle_lanphypc_pch_lpt(sc);
   13892 
   13893 		if (sc->sc_type >= WM_T_PCH_LPT) {
   13894 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13895 				break;
   13896 
   13897 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13898 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   13899 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13900 
   13901 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13902 				break;
   13903 			rv = -1;
   13904 		}
   13905 		break;
   13906 	default:
   13907 		break;
   13908 	}
   13909 
   13910 	/* Release semaphore */
   13911 	sc->phy.release(sc);
   13912 
   13913 	if (rv == 0) {
   13914 		if (wm_phy_resetisblocked(sc)) {
   13915 			printf("XXX reset is blocked(4)\n");
   13916 			goto out;
   13917 		}
   13918 		wm_reset_phy(sc);
   13919 		if (wm_phy_resetisblocked(sc))
   13920 			printf("XXX reset is blocked(4)\n");
   13921 	}
   13922 
   13923 out:
   13924 	/*
   13925 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   13926 	 */
   13927 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   13928 		delay(10*1000);
   13929 		wm_gate_hw_phy_config_ich8lan(sc, false);
   13930 	}
   13931 }
   13932 
   13933 static void
   13934 wm_init_manageability(struct wm_softc *sc)
   13935 {
   13936 
   13937 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13938 		device_xname(sc->sc_dev), __func__));
   13939 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13940 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   13941 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13942 
   13943 		/* Disable hardware interception of ARP */
   13944 		manc &= ~MANC_ARP_EN;
   13945 
   13946 		/* Enable receiving management packets to the host */
   13947 		if (sc->sc_type >= WM_T_82571) {
   13948 			manc |= MANC_EN_MNG2HOST;
   13949 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   13950 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   13951 		}
   13952 
   13953 		CSR_WRITE(sc, WMREG_MANC, manc);
   13954 	}
   13955 }
   13956 
   13957 static void
   13958 wm_release_manageability(struct wm_softc *sc)
   13959 {
   13960 
   13961 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13962 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13963 
   13964 		manc |= MANC_ARP_EN;
   13965 		if (sc->sc_type >= WM_T_82571)
   13966 			manc &= ~MANC_EN_MNG2HOST;
   13967 
   13968 		CSR_WRITE(sc, WMREG_MANC, manc);
   13969 	}
   13970 }
   13971 
   13972 static void
   13973 wm_get_wakeup(struct wm_softc *sc)
   13974 {
   13975 
   13976 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   13977 	switch (sc->sc_type) {
   13978 	case WM_T_82573:
   13979 	case WM_T_82583:
   13980 		sc->sc_flags |= WM_F_HAS_AMT;
   13981 		/* FALLTHROUGH */
   13982 	case WM_T_80003:
   13983 	case WM_T_82575:
   13984 	case WM_T_82576:
   13985 	case WM_T_82580:
   13986 	case WM_T_I350:
   13987 	case WM_T_I354:
   13988 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   13989 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   13990 		/* FALLTHROUGH */
   13991 	case WM_T_82541:
   13992 	case WM_T_82541_2:
   13993 	case WM_T_82547:
   13994 	case WM_T_82547_2:
   13995 	case WM_T_82571:
   13996 	case WM_T_82572:
   13997 	case WM_T_82574:
   13998 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13999 		break;
   14000 	case WM_T_ICH8:
   14001 	case WM_T_ICH9:
   14002 	case WM_T_ICH10:
   14003 	case WM_T_PCH:
   14004 	case WM_T_PCH2:
   14005 	case WM_T_PCH_LPT:
   14006 	case WM_T_PCH_SPT:
   14007 	case WM_T_PCH_CNP:
   14008 		sc->sc_flags |= WM_F_HAS_AMT;
   14009 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14010 		break;
   14011 	default:
   14012 		break;
   14013 	}
   14014 
   14015 	/* 1: HAS_MANAGE */
   14016 	if (wm_enable_mng_pass_thru(sc) != 0)
   14017 		sc->sc_flags |= WM_F_HAS_MANAGE;
   14018 
   14019 	/*
   14020 	 * Note that the WOL flags is set after the resetting of the eeprom
   14021 	 * stuff
   14022 	 */
   14023 }
   14024 
   14025 /*
   14026  * Unconfigure Ultra Low Power mode.
   14027  * Only for I217 and newer (see below).
   14028  */
   14029 static int
   14030 wm_ulp_disable(struct wm_softc *sc)
   14031 {
   14032 	uint32_t reg;
   14033 	uint16_t phyreg;
   14034 	int i = 0, rv = 0;
   14035 
   14036 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14037 		device_xname(sc->sc_dev), __func__));
   14038 	/* Exclude old devices */
   14039 	if ((sc->sc_type < WM_T_PCH_LPT)
   14040 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   14041 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   14042 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   14043 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   14044 		return 0;
   14045 
   14046 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   14047 		/* Request ME un-configure ULP mode in the PHY */
   14048 		reg = CSR_READ(sc, WMREG_H2ME);
   14049 		reg &= ~H2ME_ULP;
   14050 		reg |= H2ME_ENFORCE_SETTINGS;
   14051 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14052 
   14053 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   14054 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   14055 			if (i++ == 30) {
   14056 				printf("%s timed out\n", __func__);
   14057 				return -1;
   14058 			}
   14059 			delay(10 * 1000);
   14060 		}
   14061 		reg = CSR_READ(sc, WMREG_H2ME);
   14062 		reg &= ~H2ME_ENFORCE_SETTINGS;
   14063 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14064 
   14065 		return 0;
   14066 	}
   14067 
   14068 	/* Acquire semaphore */
   14069 	sc->phy.acquire(sc);
   14070 
   14071 	/* Toggle LANPHYPC */
   14072 	wm_toggle_lanphypc_pch_lpt(sc);
   14073 
   14074 	/* Unforce SMBus mode in PHY */
   14075 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   14076 	if (rv != 0) {
   14077 		uint32_t reg2;
   14078 
   14079 		printf("%s: Force SMBus first.\n", __func__);
   14080 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   14081 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   14082 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   14083 		delay(50 * 1000);
   14084 
   14085 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   14086 		    &phyreg);
   14087 		if (rv != 0)
   14088 			goto release;
   14089 	}
   14090 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   14091 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   14092 
   14093 	/* Unforce SMBus mode in MAC */
   14094 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14095 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   14096 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14097 
   14098 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   14099 	if (rv != 0)
   14100 		goto release;
   14101 	phyreg |= HV_PM_CTRL_K1_ENA;
   14102 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   14103 
   14104 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   14105 		&phyreg);
   14106 	if (rv != 0)
   14107 		goto release;
   14108 	phyreg &= ~(I218_ULP_CONFIG1_IND
   14109 	    | I218_ULP_CONFIG1_STICKY_ULP
   14110 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   14111 	    | I218_ULP_CONFIG1_WOL_HOST
   14112 	    | I218_ULP_CONFIG1_INBAND_EXIT
   14113 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   14114 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   14115 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   14116 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   14117 	phyreg |= I218_ULP_CONFIG1_START;
   14118 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   14119 
   14120 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   14121 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   14122 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   14123 
   14124 release:
   14125 	/* Release semaphore */
   14126 	sc->phy.release(sc);
   14127 	wm_gmii_reset(sc);
   14128 	delay(50 * 1000);
   14129 
   14130 	return rv;
   14131 }
   14132 
   14133 /* WOL in the newer chipset interfaces (pchlan) */
   14134 static void
   14135 wm_enable_phy_wakeup(struct wm_softc *sc)
   14136 {
   14137 #if 0
   14138 	uint16_t preg;
   14139 
   14140 	/* Copy MAC RARs to PHY RARs */
   14141 
   14142 	/* Copy MAC MTA to PHY MTA */
   14143 
   14144 	/* Configure PHY Rx Control register */
   14145 
   14146 	/* Enable PHY wakeup in MAC register */
   14147 
   14148 	/* Configure and enable PHY wakeup in PHY registers */
   14149 
   14150 	/* Activate PHY wakeup */
   14151 
   14152 	/* XXX */
   14153 #endif
   14154 }
   14155 
   14156 /* Power down workaround on D3 */
   14157 static void
   14158 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   14159 {
   14160 	uint32_t reg;
   14161 	int i;
   14162 
   14163 	for (i = 0; i < 2; i++) {
   14164 		/* Disable link */
   14165 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14166 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   14167 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14168 
   14169 		/*
   14170 		 * Call gig speed drop workaround on Gig disable before
   14171 		 * accessing any PHY registers
   14172 		 */
   14173 		if (sc->sc_type == WM_T_ICH8)
   14174 			wm_gig_downshift_workaround_ich8lan(sc);
   14175 
   14176 		/* Write VR power-down enable */
   14177 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   14178 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   14179 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   14180 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   14181 
   14182 		/* Read it back and test */
   14183 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   14184 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   14185 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   14186 			break;
   14187 
   14188 		/* Issue PHY reset and repeat at most one more time */
   14189 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   14190 	}
   14191 }
   14192 
   14193 /*
   14194  *  e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   14195  *  @sc: pointer to the HW structure
   14196  *
   14197  *  During S0 to Sx transition, it is possible the link remains at gig
   14198  *  instead of negotiating to a lower speed.  Before going to Sx, set
   14199  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   14200  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   14201  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   14202  *  needs to be written.
   14203  *  Parts that support (and are linked to a partner which support) EEE in
   14204  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   14205  *  than 10Mbps w/o EEE.
   14206  */
   14207 static void
   14208 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   14209 {
   14210 	uint32_t phy_ctrl;
   14211 
   14212 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   14213 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   14214 
   14215 	if (sc->sc_phytype == WMPHY_I217) {
   14216 		uint16_t devid = sc->sc_pcidevid;
   14217 
   14218 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   14219 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   14220 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   14221 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   14222 		    (sc->sc_type >= WM_T_PCH_SPT))
   14223 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   14224 			    CSR_READ(sc, WMREG_FEXTNVM6)
   14225 			    & ~FEXTNVM6_REQ_PLL_CLK);
   14226 
   14227 #if 0 /* notyet */
   14228 		if (sc->phy.acquire(sc) != 0)
   14229 			goto out;
   14230 
   14231 		/* XXX Do workaround for EEE */
   14232 
   14233 		/*
   14234 		 * For i217 Intel Rapid Start Technology support,
   14235 		 * when the system is going into Sx and no manageability engine
   14236 		 * is present, the driver must configure proxy to reset only on
   14237 		 * power good.	LPI (Low Power Idle) state must also reset only
   14238 		 * on power good, as well as the MTA (Multicast table array).
   14239 		 * The SMBus release must also be disabled on LCD reset.
   14240 		 */
   14241 
   14242 		/*
   14243 		 * Enable MTA to reset for Intel Rapid Start Technology
   14244 		 * Support
   14245 		 */
   14246 
   14247 		sc->phy.release(sc);
   14248 #endif
   14249 	}
   14250 #if 0
   14251 out:
   14252 #endif
   14253 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   14254 
   14255 	if (sc->sc_type == WM_T_ICH8)
   14256 		wm_gig_downshift_workaround_ich8lan(sc);
   14257 
   14258 	if (sc->sc_type >= WM_T_PCH) {
   14259 		wm_oem_bits_config_ich8lan(sc, false);
   14260 
   14261 		/* Reset PHY to activate OEM bits on 82577/8 */
   14262 		if (sc->sc_type == WM_T_PCH)
   14263 			wm_reset_phy(sc);
   14264 
   14265 		if (sc->phy.acquire(sc) != 0)
   14266 			return;
   14267 		wm_write_smbus_addr(sc);
   14268 		sc->phy.release(sc);
   14269 	}
   14270 }
   14271 
   14272 static void
   14273 wm_enable_wakeup(struct wm_softc *sc)
   14274 {
   14275 	uint32_t reg, pmreg;
   14276 	pcireg_t pmode;
   14277 
   14278 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14279 		device_xname(sc->sc_dev), __func__));
   14280 
   14281 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   14282 		&pmreg, NULL) == 0)
   14283 		return;
   14284 
   14285 	/* Advertise the wakeup capability */
   14286 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   14287 	    | CTRL_SWDPIN(3));
   14288 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   14289 
   14290 	/* Keep the laser running on fiber adapters */
   14291 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   14292 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   14293 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14294 		reg |= CTRL_EXT_SWDPIN(3);
   14295 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14296 	}
   14297 
   14298 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   14299 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
   14300 		wm_suspend_workarounds_ich8lan(sc);
   14301 
   14302 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   14303 #if 0	/* for the multicast packet */
   14304 	reg |= WUFC_MC;
   14305 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   14306 #endif
   14307 
   14308 	if (sc->sc_type >= WM_T_PCH)
   14309 		wm_enable_phy_wakeup(sc);
   14310 	else {
   14311 		/* Enable wakeup by the MAC */
   14312 		CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
   14313 		CSR_WRITE(sc, WMREG_WUFC, reg);
   14314 	}
   14315 
   14316 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   14317 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   14318 		|| (sc->sc_type == WM_T_PCH2))
   14319 	    && (sc->sc_phytype == WMPHY_IGP_3))
   14320 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   14321 
   14322 	/* Request PME */
   14323 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   14324 #if 0
   14325 	/* Disable WOL */
   14326 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   14327 #else
   14328 	/* For WOL */
   14329 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   14330 #endif
   14331 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   14332 }
   14333 
   14334 /* Disable ASPM L0s and/or L1 for workaround */
   14335 static void
   14336 wm_disable_aspm(struct wm_softc *sc)
   14337 {
   14338 	pcireg_t reg, mask = 0;
   14339 	unsigned const char *str = "";
   14340 
   14341 	/*
   14342 	 *  Only for PCIe device which has PCIe capability in the PCI config
   14343 	 * space.
   14344 	 */
   14345 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   14346 		return;
   14347 
   14348 	switch (sc->sc_type) {
   14349 	case WM_T_82571:
   14350 	case WM_T_82572:
   14351 		/*
   14352 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   14353 		 * State Power management L1 State (ASPM L1).
   14354 		 */
   14355 		mask = PCIE_LCSR_ASPM_L1;
   14356 		str = "L1 is";
   14357 		break;
   14358 	case WM_T_82573:
   14359 	case WM_T_82574:
   14360 	case WM_T_82583:
   14361 		/*
   14362 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   14363 		 *
   14364 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   14365 		 * some chipset.  The document of 82574 and 82583 says that
   14366 		 * disabling L0s with some specific chipset is sufficient,
   14367 		 * but we follow as of the Intel em driver does.
   14368 		 *
   14369 		 * References:
   14370 		 * Errata 8 of the Specification Update of i82573.
   14371 		 * Errata 20 of the Specification Update of i82574.
   14372 		 * Errata 9 of the Specification Update of i82583.
   14373 		 */
   14374 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   14375 		str = "L0s and L1 are";
   14376 		break;
   14377 	default:
   14378 		return;
   14379 	}
   14380 
   14381 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14382 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   14383 	reg &= ~mask;
   14384 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14385 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   14386 
   14387 	/* Print only in wm_attach() */
   14388 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   14389 		aprint_verbose_dev(sc->sc_dev,
   14390 		    "ASPM %s disabled to workaround the errata.\n", str);
   14391 }
   14392 
   14393 /* LPLU */
   14394 
   14395 static void
   14396 wm_lplu_d0_disable(struct wm_softc *sc)
   14397 {
   14398 	struct mii_data *mii = &sc->sc_mii;
   14399 	uint32_t reg;
   14400 
   14401 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14402 		device_xname(sc->sc_dev), __func__));
   14403 
   14404 	if (sc->sc_phytype == WMPHY_IFE)
   14405 		return;
   14406 
   14407 	switch (sc->sc_type) {
   14408 	case WM_T_82571:
   14409 	case WM_T_82572:
   14410 	case WM_T_82573:
   14411 	case WM_T_82575:
   14412 	case WM_T_82576:
   14413 		reg = mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT);
   14414 		reg &= ~PMR_D0_LPLU;
   14415 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, reg);
   14416 		break;
   14417 	case WM_T_82580:
   14418 	case WM_T_I350:
   14419 	case WM_T_I210:
   14420 	case WM_T_I211:
   14421 		reg = CSR_READ(sc, WMREG_PHPM);
   14422 		reg &= ~PHPM_D0A_LPLU;
   14423 		CSR_WRITE(sc, WMREG_PHPM, reg);
   14424 		break;
   14425 	case WM_T_82574:
   14426 	case WM_T_82583:
   14427 	case WM_T_ICH8:
   14428 	case WM_T_ICH9:
   14429 	case WM_T_ICH10:
   14430 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14431 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   14432 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14433 		CSR_WRITE_FLUSH(sc);
   14434 		break;
   14435 	case WM_T_PCH:
   14436 	case WM_T_PCH2:
   14437 	case WM_T_PCH_LPT:
   14438 	case WM_T_PCH_SPT:
   14439 	case WM_T_PCH_CNP:
   14440 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   14441 		reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   14442 		if (wm_phy_resetisblocked(sc) == false)
   14443 			reg |= HV_OEM_BITS_ANEGNOW;
   14444 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   14445 		break;
   14446 	default:
   14447 		break;
   14448 	}
   14449 }
   14450 
   14451 /* EEE */
   14452 
   14453 static void
   14454 wm_set_eee_i350(struct wm_softc *sc)
   14455 {
   14456 	uint32_t ipcnfg, eeer;
   14457 
   14458 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   14459 	eeer = CSR_READ(sc, WMREG_EEER);
   14460 
   14461 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   14462 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   14463 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   14464 		    | EEER_LPI_FC);
   14465 	} else {
   14466 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   14467 		ipcnfg &= ~IPCNFG_10BASE_TE;
   14468 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   14469 		    | EEER_LPI_FC);
   14470 	}
   14471 
   14472 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   14473 	CSR_WRITE(sc, WMREG_EEER, eeer);
   14474 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   14475 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   14476 }
   14477 
   14478 /*
   14479  * Workarounds (mainly PHY related).
   14480  * Basically, PHY's workarounds are in the PHY drivers.
   14481  */
   14482 
   14483 /* Work-around for 82566 Kumeran PCS lock loss */
   14484 static void
   14485 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   14486 {
   14487 	struct mii_data *mii = &sc->sc_mii;
   14488 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   14489 	int i;
   14490 	int reg;
   14491 
   14492 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14493 		device_xname(sc->sc_dev), __func__));
   14494 
   14495 	/* If the link is not up, do nothing */
   14496 	if ((status & STATUS_LU) == 0)
   14497 		return;
   14498 
   14499 	/* Nothing to do if the link is other than 1Gbps */
   14500 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   14501 		return;
   14502 
   14503 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14504 	for (i = 0; i < 10; i++) {
   14505 		/* read twice */
   14506 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   14507 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   14508 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   14509 			goto out;	/* GOOD! */
   14510 
   14511 		/* Reset the PHY */
   14512 		wm_reset_phy(sc);
   14513 		delay(5*1000);
   14514 	}
   14515 
   14516 	/* Disable GigE link negotiation */
   14517 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14518 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   14519 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14520 
   14521 	/*
   14522 	 * Call gig speed drop workaround on Gig disable before accessing
   14523 	 * any PHY registers.
   14524 	 */
   14525 	wm_gig_downshift_workaround_ich8lan(sc);
   14526 
   14527 out:
   14528 	return;
   14529 }
   14530 
   14531 /*
   14532  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   14533  *  @sc: pointer to the HW structure
   14534  *
   14535  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   14536  *  LPLU, Gig disable, MDIC PHY reset):
   14537  *    1) Set Kumeran Near-end loopback
   14538  *    2) Clear Kumeran Near-end loopback
   14539  *  Should only be called for ICH8[m] devices with any 1G Phy.
   14540  */
   14541 static void
   14542 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   14543 {
   14544 	uint16_t kmreg;
   14545 
   14546 	/* Only for igp3 */
   14547 	if (sc->sc_phytype == WMPHY_IGP_3) {
   14548 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   14549 			return;
   14550 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   14551 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   14552 			return;
   14553 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   14554 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   14555 	}
   14556 }
   14557 
   14558 /*
   14559  * Workaround for pch's PHYs
   14560  * XXX should be moved to new PHY driver?
   14561  */
   14562 static void
   14563 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   14564 {
   14565 
   14566 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14567 		device_xname(sc->sc_dev), __func__));
   14568 	KASSERT(sc->sc_type == WM_T_PCH);
   14569 
   14570 	if (sc->sc_phytype == WMPHY_82577)
   14571 		wm_set_mdio_slow_mode_hv(sc);
   14572 
   14573 	/* XXX (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   14574 
   14575 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   14576 
   14577 	/* 82578 */
   14578 	if (sc->sc_phytype == WMPHY_82578) {
   14579 		struct mii_softc *child;
   14580 
   14581 		/*
   14582 		 * Return registers to default by doing a soft reset then
   14583 		 * writing 0x3140 to the control register
   14584 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   14585 		 */
   14586 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   14587 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   14588 			PHY_RESET(child);
   14589 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   14590 			    0x3140);
   14591 		}
   14592 	}
   14593 
   14594 	/* Select page 0 */
   14595 	sc->phy.acquire(sc);
   14596 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   14597 	sc->phy.release(sc);
   14598 
   14599 	/*
   14600 	 * Configure the K1 Si workaround during phy reset assuming there is
   14601 	 * link so that it disables K1 if link is in 1Gbps.
   14602 	 */
   14603 	wm_k1_gig_workaround_hv(sc, 1);
   14604 }
   14605 
   14606 /*
   14607  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   14608  *  done after every PHY reset.
   14609  */
   14610 static void
   14611 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   14612 {
   14613 
   14614 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14615 		device_xname(sc->sc_dev), __func__));
   14616 	KASSERT(sc->sc_type == WM_T_PCH2);
   14617 
   14618 	/* Set MDIO slow mode before any other MDIO access */
   14619 	wm_set_mdio_slow_mode_hv(sc);
   14620 
   14621 	/* XXX set MSE higher to enable link to stay up when noise is high */
   14622 	/* XXX drop link after 5 times MSE threshold was reached */
   14623 }
   14624 
   14625 /**
   14626  *  e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   14627  *  @link: link up bool flag
   14628  *
   14629  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   14630  *  preventing further DMA write requests.  Workaround the issue by disabling
   14631  *  the de-assertion of the clock request when in 1Gpbs mode.
   14632  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   14633  *  speeds in order to avoid Tx hangs.
   14634  **/
   14635 static int
   14636 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   14637 {
   14638 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   14639 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   14640 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   14641 	uint16_t phyreg;
   14642 
   14643 	if (link && (speed == STATUS_SPEED_1000)) {
   14644 		sc->phy.acquire(sc);
   14645 		int rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   14646 		    &phyreg);
   14647 		if (rv != 0)
   14648 			goto release;
   14649 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   14650 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   14651 		if (rv != 0)
   14652 			goto release;
   14653 		delay(20);
   14654 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   14655 
   14656 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   14657 		    &phyreg);
   14658 release:
   14659 		sc->phy.release(sc);
   14660 		return rv;
   14661 	}
   14662 
   14663 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   14664 
   14665 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   14666 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   14667 	    || !link
   14668 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   14669 		goto update_fextnvm6;
   14670 
   14671 	phyreg = wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL);
   14672 
   14673 	/* Clear link status transmit timeout */
   14674 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   14675 	if (speed == STATUS_SPEED_100) {
   14676 		/* Set inband Tx timeout to 5x10us for 100Half */
   14677 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   14678 
   14679 		/* Do not extend the K1 entry latency for 100Half */
   14680 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   14681 	} else {
   14682 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   14683 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   14684 
   14685 		/* Extend the K1 entry latency for 10 Mbps */
   14686 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   14687 	}
   14688 
   14689 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   14690 
   14691 update_fextnvm6:
   14692 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   14693 	return 0;
   14694 }
   14695 
   14696 /*
   14697  *  wm_k1_gig_workaround_hv - K1 Si workaround
   14698  *  @sc:   pointer to the HW structure
   14699  *  @link: link up bool flag
   14700  *
   14701  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   14702  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   14703  *  If link is down, the function will restore the default K1 setting located
   14704  *  in the NVM.
   14705  */
   14706 static int
   14707 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   14708 {
   14709 	int k1_enable = sc->sc_nvm_k1_enabled;
   14710 
   14711 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14712 		device_xname(sc->sc_dev), __func__));
   14713 
   14714 	if (sc->phy.acquire(sc) != 0)
   14715 		return -1;
   14716 
   14717 	if (link) {
   14718 		k1_enable = 0;
   14719 
   14720 		/* Link stall fix for link up */
   14721 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   14722 		    0x0100);
   14723 	} else {
   14724 		/* Link stall fix for link down */
   14725 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   14726 		    0x4100);
   14727 	}
   14728 
   14729 	wm_configure_k1_ich8lan(sc, k1_enable);
   14730 	sc->phy.release(sc);
   14731 
   14732 	return 0;
   14733 }
   14734 
   14735 /*
   14736  *  wm_k1_gig_workaround_lv - K1 Si workaround
   14737  *  @sc:   pointer to the HW structure
   14738  *
   14739  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   14740  *  Disable K1 for 1000 and 100 speeds
   14741  */
   14742 static int
   14743 wm_k1_workaround_lv(struct wm_softc *sc)
   14744 {
   14745 	uint32_t reg;
   14746 	int phyreg;
   14747 
   14748 	if (sc->sc_type != WM_T_PCH2)
   14749 		return 0;
   14750 
   14751 	/* Set K1 beacon duration based on 10Mbps speed */
   14752 	phyreg = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS);
   14753 
   14754 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   14755 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   14756 		if (phyreg &
   14757 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   14758 			/* LV 1G/100 Packet drop issue wa  */
   14759 			phyreg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL);
   14760 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   14761 			wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL, phyreg);
   14762 		} else {
   14763 			/* For 10Mbps */
   14764 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   14765 			reg &= ~FEXTNVM4_BEACON_DURATION;
   14766 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   14767 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   14768 		}
   14769 	}
   14770 
   14771 	return 0;
   14772 }
   14773 
   14774 /*
   14775  *  wm_link_stall_workaround_hv - Si workaround
   14776  *  @sc: pointer to the HW structure
   14777  *
   14778  *  This function works around a Si bug where the link partner can get
   14779  *  a link up indication before the PHY does. If small packets are sent
   14780  *  by the link partner they can be placed in the packet buffer without
   14781  *  being properly accounted for by the PHY and will stall preventing
   14782  *  further packets from being received.  The workaround is to clear the
   14783  *  packet buffer after the PHY detects link up.
   14784  */
   14785 static int
   14786 wm_link_stall_workaround_hv(struct wm_softc *sc)
   14787 {
   14788 	int phyreg;
   14789 
   14790 	if (sc->sc_phytype != WMPHY_82578)
   14791 		return 0;
   14792 
   14793 	/* Do not apply workaround if in PHY loopback bit 14 set */
   14794 	phyreg =  wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR);
   14795 	if ((phyreg & BMCR_LOOP) != 0)
   14796 		return 0;
   14797 
   14798 	/* check if link is up and at 1Gbps */
   14799 	phyreg = wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS);
   14800 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   14801 	    | BM_CS_STATUS_SPEED_MASK;
   14802 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   14803 		| BM_CS_STATUS_SPEED_1000))
   14804 		return 0;
   14805 
   14806 	delay(200 * 1000);	/* XXX too big */
   14807 
   14808 	/* flush the packets in the fifo buffer */
   14809 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   14810 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   14811 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   14812 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   14813 
   14814 	return 0;
   14815 }
   14816 
   14817 static void
   14818 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   14819 {
   14820 	uint32_t reg;
   14821 
   14822 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   14823 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   14824 	    reg | HV_KMRN_MDIO_SLOW);
   14825 }
   14826 
   14827 /*
   14828  *  wm_configure_k1_ich8lan - Configure K1 power state
   14829  *  @sc: pointer to the HW structure
   14830  *  @enable: K1 state to configure
   14831  *
   14832  *  Configure the K1 power state based on the provided parameter.
   14833  *  Assumes semaphore already acquired.
   14834  */
   14835 static void
   14836 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   14837 {
   14838 	uint32_t ctrl, ctrl_ext, tmp;
   14839 	uint16_t kmreg;
   14840 	int rv;
   14841 
   14842 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   14843 
   14844 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   14845 	if (rv != 0)
   14846 		return;
   14847 
   14848 	if (k1_enable)
   14849 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   14850 	else
   14851 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   14852 
   14853 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   14854 	if (rv != 0)
   14855 		return;
   14856 
   14857 	delay(20);
   14858 
   14859 	ctrl = CSR_READ(sc, WMREG_CTRL);
   14860 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   14861 
   14862 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   14863 	tmp |= CTRL_FRCSPD;
   14864 
   14865 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   14866 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   14867 	CSR_WRITE_FLUSH(sc);
   14868 	delay(20);
   14869 
   14870 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   14871 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   14872 	CSR_WRITE_FLUSH(sc);
   14873 	delay(20);
   14874 
   14875 	return;
   14876 }
   14877 
   14878 /* special case - for 82575 - need to do manual init ... */
   14879 static void
   14880 wm_reset_init_script_82575(struct wm_softc *sc)
   14881 {
   14882 	/*
   14883 	 * remark: this is untested code - we have no board without EEPROM
   14884 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   14885 	 */
   14886 
   14887 	/* SerDes configuration via SERDESCTRL */
   14888 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   14889 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   14890 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   14891 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   14892 
   14893 	/* CCM configuration via CCMCTL register */
   14894 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   14895 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   14896 
   14897 	/* PCIe lanes configuration */
   14898 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   14899 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   14900 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   14901 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   14902 
   14903 	/* PCIe PLL Configuration */
   14904 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   14905 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   14906 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   14907 }
   14908 
   14909 static void
   14910 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   14911 {
   14912 	uint32_t reg;
   14913 	uint16_t nvmword;
   14914 	int rv;
   14915 
   14916 	if (sc->sc_type != WM_T_82580)
   14917 		return;
   14918 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   14919 		return;
   14920 
   14921 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   14922 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   14923 	if (rv != 0) {
   14924 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   14925 		    __func__);
   14926 		return;
   14927 	}
   14928 
   14929 	reg = CSR_READ(sc, WMREG_MDICNFG);
   14930 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   14931 		reg |= MDICNFG_DEST;
   14932 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   14933 		reg |= MDICNFG_COM_MDIO;
   14934 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   14935 }
   14936 
   14937 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   14938 
   14939 static bool
   14940 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   14941 {
   14942 	uint32_t reg;
   14943 	uint16_t id1, id2;
   14944 	int i, rv;
   14945 
   14946 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14947 		device_xname(sc->sc_dev), __func__));
   14948 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   14949 
   14950 	id1 = id2 = 0xffff;
   14951 	for (i = 0; i < 2; i++) {
   14952 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   14953 		    &id1);
   14954 		if ((rv != 0) || MII_INVALIDID(id1))
   14955 			continue;
   14956 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   14957 		    &id2);
   14958 		if ((rv != 0) || MII_INVALIDID(id2))
   14959 			continue;
   14960 		break;
   14961 	}
   14962 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   14963 		goto out;
   14964 
   14965 	/*
   14966 	 * In case the PHY needs to be in mdio slow mode,
   14967 	 * set slow mode and try to get the PHY id again.
   14968 	 */
   14969 	if (sc->sc_type < WM_T_PCH_LPT) {
   14970 		sc->phy.release(sc);
   14971 		wm_set_mdio_slow_mode_hv(sc);
   14972 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
   14973 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
   14974 		sc->phy.acquire(sc);
   14975 	}
   14976 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   14977 		printf("XXX return with false\n");
   14978 		return false;
   14979 	}
   14980 out:
   14981 	if (sc->sc_type >= WM_T_PCH_LPT) {
   14982 		/* Only unforce SMBus if ME is not active */
   14983 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   14984 			uint16_t phyreg;
   14985 
   14986 			/* Unforce SMBus mode in PHY */
   14987 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   14988 			    CV_SMB_CTRL, &phyreg);
   14989 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   14990 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   14991 			    CV_SMB_CTRL, phyreg);
   14992 
   14993 			/* Unforce SMBus mode in MAC */
   14994 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14995 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   14996 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14997 		}
   14998 	}
   14999 	return true;
   15000 }
   15001 
   15002 static void
   15003 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   15004 {
   15005 	uint32_t reg;
   15006 	int i;
   15007 
   15008 	/* Set PHY Config Counter to 50msec */
   15009 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   15010 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   15011 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   15012 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   15013 
   15014 	/* Toggle LANPHYPC */
   15015 	reg = CSR_READ(sc, WMREG_CTRL);
   15016 	reg |= CTRL_LANPHYPC_OVERRIDE;
   15017 	reg &= ~CTRL_LANPHYPC_VALUE;
   15018 	CSR_WRITE(sc, WMREG_CTRL, reg);
   15019 	CSR_WRITE_FLUSH(sc);
   15020 	delay(1000);
   15021 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   15022 	CSR_WRITE(sc, WMREG_CTRL, reg);
   15023 	CSR_WRITE_FLUSH(sc);
   15024 
   15025 	if (sc->sc_type < WM_T_PCH_LPT)
   15026 		delay(50 * 1000);
   15027 	else {
   15028 		i = 20;
   15029 
   15030 		do {
   15031 			delay(5 * 1000);
   15032 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   15033 		    && i--);
   15034 
   15035 		delay(30 * 1000);
   15036 	}
   15037 }
   15038 
   15039 static int
   15040 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   15041 {
   15042 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   15043 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   15044 	uint32_t rxa;
   15045 	uint16_t scale = 0, lat_enc = 0;
   15046 	int32_t obff_hwm = 0;
   15047 	int64_t lat_ns, value;
   15048 
   15049 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15050 		device_xname(sc->sc_dev), __func__));
   15051 
   15052 	if (link) {
   15053 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   15054 		uint32_t status;
   15055 		uint16_t speed;
   15056 		pcireg_t preg;
   15057 
   15058 		status = CSR_READ(sc, WMREG_STATUS);
   15059 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   15060 		case STATUS_SPEED_10:
   15061 			speed = 10;
   15062 			break;
   15063 		case STATUS_SPEED_100:
   15064 			speed = 100;
   15065 			break;
   15066 		case STATUS_SPEED_1000:
   15067 			speed = 1000;
   15068 			break;
   15069 		default:
   15070 			device_printf(sc->sc_dev, "Unknown speed "
   15071 			    "(status = %08x)\n", status);
   15072 			return -1;
   15073 		}
   15074 
   15075 		/* Rx Packet Buffer Allocation size (KB) */
   15076 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   15077 
   15078 		/*
   15079 		 * Determine the maximum latency tolerated by the device.
   15080 		 *
   15081 		 * Per the PCIe spec, the tolerated latencies are encoded as
   15082 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   15083 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   15084 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   15085 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   15086 		 */
   15087 		lat_ns = ((int64_t)rxa * 1024 -
   15088 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   15089 			+ ETHER_HDR_LEN))) * 8 * 1000;
   15090 		if (lat_ns < 0)
   15091 			lat_ns = 0;
   15092 		else
   15093 			lat_ns /= speed;
   15094 		value = lat_ns;
   15095 
   15096 		while (value > LTRV_VALUE) {
   15097 			scale ++;
   15098 			value = howmany(value, __BIT(5));
   15099 		}
   15100 		if (scale > LTRV_SCALE_MAX) {
   15101 			printf("%s: Invalid LTR latency scale %d\n",
   15102 			    device_xname(sc->sc_dev), scale);
   15103 			return -1;
   15104 		}
   15105 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   15106 
   15107 		/* Determine the maximum latency tolerated by the platform */
   15108 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   15109 		    WM_PCI_LTR_CAP_LPT);
   15110 		max_snoop = preg & 0xffff;
   15111 		max_nosnoop = preg >> 16;
   15112 
   15113 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   15114 
   15115 		if (lat_enc > max_ltr_enc) {
   15116 			lat_enc = max_ltr_enc;
   15117 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   15118 			    * PCI_LTR_SCALETONS(
   15119 				    __SHIFTOUT(lat_enc,
   15120 					PCI_LTR_MAXSNOOPLAT_SCALE));
   15121 		}
   15122 
   15123 		if (lat_ns) {
   15124 			lat_ns *= speed * 1000;
   15125 			lat_ns /= 8;
   15126 			lat_ns /= 1000000000;
   15127 			obff_hwm = (int32_t)(rxa - lat_ns);
   15128 		}
   15129 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   15130 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   15131 			    "(rxa = %d, lat_ns = %d)\n",
   15132 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   15133 			return -1;
   15134 		}
   15135 	}
   15136 	/* Snoop and No-Snoop latencies the same */
   15137 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   15138 	CSR_WRITE(sc, WMREG_LTRV, reg);
   15139 
   15140 	/* Set OBFF high water mark */
   15141 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   15142 	reg |= obff_hwm;
   15143 	CSR_WRITE(sc, WMREG_SVT, reg);
   15144 
   15145 	/* Enable OBFF */
   15146 	reg = CSR_READ(sc, WMREG_SVCR);
   15147 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   15148 	CSR_WRITE(sc, WMREG_SVCR, reg);
   15149 
   15150 	return 0;
   15151 }
   15152 
   15153 /*
   15154  * I210 Errata 25 and I211 Errata 10
   15155  * Slow System Clock.
   15156  */
   15157 static void
   15158 wm_pll_workaround_i210(struct wm_softc *sc)
   15159 {
   15160 	uint32_t mdicnfg, wuc;
   15161 	uint32_t reg;
   15162 	pcireg_t pcireg;
   15163 	uint32_t pmreg;
   15164 	uint16_t nvmword, tmp_nvmword;
   15165 	int phyval;
   15166 	bool wa_done = false;
   15167 	int i;
   15168 
   15169 	/* Save WUC and MDICNFG registers */
   15170 	wuc = CSR_READ(sc, WMREG_WUC);
   15171 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   15172 
   15173 	reg = mdicnfg & ~MDICNFG_DEST;
   15174 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   15175 
   15176 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   15177 		nvmword = INVM_DEFAULT_AL;
   15178 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   15179 
   15180 	/* Get Power Management cap offset */
   15181 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   15182 		&pmreg, NULL) == 0)
   15183 		return;
   15184 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   15185 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   15186 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   15187 
   15188 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   15189 			break; /* OK */
   15190 		}
   15191 
   15192 		wa_done = true;
   15193 		/* Directly reset the internal PHY */
   15194 		reg = CSR_READ(sc, WMREG_CTRL);
   15195 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   15196 
   15197 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15198 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   15199 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15200 
   15201 		CSR_WRITE(sc, WMREG_WUC, 0);
   15202 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   15203 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   15204 
   15205 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   15206 		    pmreg + PCI_PMCSR);
   15207 		pcireg |= PCI_PMCSR_STATE_D3;
   15208 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   15209 		    pmreg + PCI_PMCSR, pcireg);
   15210 		delay(1000);
   15211 		pcireg &= ~PCI_PMCSR_STATE_D3;
   15212 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   15213 		    pmreg + PCI_PMCSR, pcireg);
   15214 
   15215 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   15216 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   15217 
   15218 		/* Restore WUC register */
   15219 		CSR_WRITE(sc, WMREG_WUC, wuc);
   15220 	}
   15221 
   15222 	/* Restore MDICNFG setting */
   15223 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   15224 	if (wa_done)
   15225 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   15226 }
   15227 
   15228 static void
   15229 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   15230 {
   15231 	uint32_t reg;
   15232 
   15233 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15234 		device_xname(sc->sc_dev), __func__));
   15235 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   15236 	    || (sc->sc_type == WM_T_PCH_CNP));
   15237 
   15238 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   15239 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   15240 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   15241 
   15242 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   15243 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   15244 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   15245 }
   15246