Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.603
      1 /*	$NetBSD: if_wm.c,v 1.603 2018/12/12 08:49:33 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet)
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  *	- Image Unique ID
     83  */
     84 
     85 #include <sys/cdefs.h>
     86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.603 2018/12/12 08:49:33 msaitoh Exp $");
     87 
     88 #ifdef _KERNEL_OPT
     89 #include "opt_net_mpsafe.h"
     90 #include "opt_if_wm.h"
     91 #endif
     92 
     93 #include <sys/param.h>
     94 #include <sys/systm.h>
     95 #include <sys/callout.h>
     96 #include <sys/mbuf.h>
     97 #include <sys/malloc.h>
     98 #include <sys/kmem.h>
     99 #include <sys/kernel.h>
    100 #include <sys/socket.h>
    101 #include <sys/ioctl.h>
    102 #include <sys/errno.h>
    103 #include <sys/device.h>
    104 #include <sys/queue.h>
    105 #include <sys/syslog.h>
    106 #include <sys/interrupt.h>
    107 #include <sys/cpu.h>
    108 #include <sys/pcq.h>
    109 
    110 #include <sys/rndsource.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <net/rss_config.h>
    120 
    121 #include <netinet/in.h>			/* XXX for struct ip */
    122 #include <netinet/in_systm.h>		/* XXX for struct ip */
    123 #include <netinet/ip.h>			/* XXX for struct ip */
    124 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    125 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    126 
    127 #include <sys/bus.h>
    128 #include <sys/intr.h>
    129 #include <machine/endian.h>
    130 
    131 #include <dev/mii/mii.h>
    132 #include <dev/mii/miivar.h>
    133 #include <dev/mii/miidevs.h>
    134 #include <dev/mii/mii_bitbang.h>
    135 #include <dev/mii/ikphyreg.h>
    136 #include <dev/mii/igphyreg.h>
    137 #include <dev/mii/igphyvar.h>
    138 #include <dev/mii/inbmphyreg.h>
    139 #include <dev/mii/ihphyreg.h>
    140 
    141 #include <dev/pci/pcireg.h>
    142 #include <dev/pci/pcivar.h>
    143 #include <dev/pci/pcidevs.h>
    144 
    145 #include <dev/pci/if_wmreg.h>
    146 #include <dev/pci/if_wmvar.h>
    147 
    148 #ifdef WM_DEBUG
    149 #define	WM_DEBUG_LINK		__BIT(0)
    150 #define	WM_DEBUG_TX		__BIT(1)
    151 #define	WM_DEBUG_RX		__BIT(2)
    152 #define	WM_DEBUG_GMII		__BIT(3)
    153 #define	WM_DEBUG_MANAGE		__BIT(4)
    154 #define	WM_DEBUG_NVM		__BIT(5)
    155 #define	WM_DEBUG_INIT		__BIT(6)
    156 #define	WM_DEBUG_LOCK		__BIT(7)
    157 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    158     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    159 
    160 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    161 #else
    162 #define	DPRINTF(x, y)	/* nothing */
    163 #endif /* WM_DEBUG */
    164 
    165 #ifdef NET_MPSAFE
    166 #define WM_MPSAFE	1
    167 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    168 #else
    169 #define CALLOUT_FLAGS	0
    170 #endif
    171 
    172 /*
    173  * This device driver's max interrupt numbers.
    174  */
    175 #define WM_MAX_NQUEUEINTR	16
    176 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    177 
    178 #ifndef WM_DISABLE_MSI
    179 #define	WM_DISABLE_MSI 0
    180 #endif
    181 #ifndef WM_DISABLE_MSIX
    182 #define	WM_DISABLE_MSIX 0
    183 #endif
    184 
    185 int wm_disable_msi = WM_DISABLE_MSI;
    186 int wm_disable_msix = WM_DISABLE_MSIX;
    187 
    188 #ifndef WM_WATCHDOG_TIMEOUT
    189 #define WM_WATCHDOG_TIMEOUT 5
    190 #endif
    191 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
    192 
    193 /*
    194  * Transmit descriptor list size.  Due to errata, we can only have
    195  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    196  * on >= 82544. We tell the upper layers that they can queue a lot
    197  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    198  * of them at a time.
    199  *
    200  * We allow up to 64 DMA segments per packet.  Pathological packet
    201  * chains containing many small mbufs have been observed in zero-copy
    202  * situations with jumbo frames. If a mbuf chain has more than 64 DMA segments,
    203  * m_defrag() is called to reduce it.
    204  */
    205 #define	WM_NTXSEGS		64
    206 #define	WM_IFQUEUELEN		256
    207 #define	WM_TXQUEUELEN_MAX	64
    208 #define	WM_TXQUEUELEN_MAX_82547	16
    209 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    210 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    211 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    212 #define	WM_NTXDESC_82542	256
    213 #define	WM_NTXDESC_82544	4096
    214 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    215 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    216 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    217 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    218 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    219 
    220 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    221 
    222 #define	WM_TXINTERQSIZE		256
    223 
    224 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
    225 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
    226 #endif
    227 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
    228 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
    229 #endif
    230 
    231 /*
    232  * Receive descriptor list size.  We have one Rx buffer for normal
    233  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    234  * packet.  We allocate 256 receive descriptors, each with a 2k
    235  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    236  */
    237 #define	WM_NRXDESC		256
    238 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    239 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    240 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    241 
    242 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    243 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    244 #endif
    245 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    246 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    247 #endif
    248 
    249 typedef union txdescs {
    250 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    251 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
    252 } txdescs_t;
    253 
    254 typedef union rxdescs {
    255 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    256 	ext_rxdesc_t	  sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    257 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    258 } rxdescs_t;
    259 
    260 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    261 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    262 
    263 /*
    264  * Software state for transmit jobs.
    265  */
    266 struct wm_txsoft {
    267 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    268 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    269 	int txs_firstdesc;		/* first descriptor in packet */
    270 	int txs_lastdesc;		/* last descriptor in packet */
    271 	int txs_ndesc;			/* # of descriptors used */
    272 };
    273 
    274 /*
    275  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
    276  * buffer and a DMA map. For packets which fill more than one buffer, we chain
    277  * them together.
    278  */
    279 struct wm_rxsoft {
    280 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    281 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    282 };
    283 
    284 #define WM_LINKUP_TIMEOUT	50
    285 
    286 static uint16_t swfwphysem[] = {
    287 	SWFW_PHY0_SM,
    288 	SWFW_PHY1_SM,
    289 	SWFW_PHY2_SM,
    290 	SWFW_PHY3_SM
    291 };
    292 
    293 static const uint32_t wm_82580_rxpbs_table[] = {
    294 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    295 };
    296 
    297 struct wm_softc;
    298 
    299 #ifdef WM_EVENT_COUNTERS
    300 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    301 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    302 	struct evcnt qname##_ev_##evname;
    303 
    304 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    305 	do {								\
    306 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    307 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    308 		    "%s%02d%s", #qname, (qnum), #evname);		\
    309 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    310 		    (evtype), NULL, (xname),				\
    311 		    (q)->qname##_##evname##_evcnt_name);		\
    312 	} while (0)
    313 
    314 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    315 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    316 
    317 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    318 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    319 
    320 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    321 	evcnt_detach(&(q)->qname##_ev_##evname);
    322 #endif /* WM_EVENT_COUNTERS */
    323 
    324 struct wm_txqueue {
    325 	kmutex_t *txq_lock;		/* lock for tx operations */
    326 
    327 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    328 
    329 	/* Software state for the transmit descriptors. */
    330 	int txq_num;			/* must be a power of two */
    331 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    332 
    333 	/* TX control data structures. */
    334 	int txq_ndesc;			/* must be a power of two */
    335 	size_t txq_descsize;		/* a tx descriptor size */
    336 	txdescs_t *txq_descs_u;
    337 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    338 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    339 	int txq_desc_rseg;		/* real number of control segment */
    340 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    341 #define	txq_descs	txq_descs_u->sctxu_txdescs
    342 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    343 
    344 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    345 
    346 	int txq_free;			/* number of free Tx descriptors */
    347 	int txq_next;			/* next ready Tx descriptor */
    348 
    349 	int txq_sfree;			/* number of free Tx jobs */
    350 	int txq_snext;			/* next free Tx job */
    351 	int txq_sdirty;			/* dirty Tx jobs */
    352 
    353 	/* These 4 variables are used only on the 82547. */
    354 	int txq_fifo_size;		/* Tx FIFO size */
    355 	int txq_fifo_head;		/* current head of FIFO */
    356 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    357 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    358 
    359 	/*
    360 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    361 	 * CPUs. This queue intermediate them without block.
    362 	 */
    363 	pcq_t *txq_interq;
    364 
    365 	/*
    366 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    367 	 * to manage Tx H/W queue's busy flag.
    368 	 */
    369 	int txq_flags;			/* flags for H/W queue, see below */
    370 #define	WM_TXQ_NO_SPACE	0x1
    371 
    372 	bool txq_stopping;
    373 
    374 	bool txq_sending;
    375 	time_t txq_lastsent;
    376 
    377 	uint32_t txq_packets;		/* for AIM */
    378 	uint32_t txq_bytes;		/* for AIM */
    379 #ifdef WM_EVENT_COUNTERS
    380 	/* TX event counters */
    381 	WM_Q_EVCNT_DEFINE(txq, txsstall)    /* Stalled due to no txs */
    382 	WM_Q_EVCNT_DEFINE(txq, txdstall)    /* Stalled due to no txd */
    383 	WM_Q_EVCNT_DEFINE(txq, fifo_stall)  /* FIFO stalls (82547) */
    384 	WM_Q_EVCNT_DEFINE(txq, txdw)	    /* Tx descriptor interrupts */
    385 	WM_Q_EVCNT_DEFINE(txq, txqe)	    /* Tx queue empty interrupts */
    386 					    /* XXX not used? */
    387 
    388 	WM_Q_EVCNT_DEFINE(txq, ipsum)	    /* IP checksums comp. */
    389 	WM_Q_EVCNT_DEFINE(txq, tusum)	    /* TCP/UDP cksums comp. */
    390 	WM_Q_EVCNT_DEFINE(txq, tusum6)	    /* TCP/UDP v6 cksums comp. */
    391 	WM_Q_EVCNT_DEFINE(txq, tso)	    /* TCP seg offload (IPv4) */
    392 	WM_Q_EVCNT_DEFINE(txq, tso6)	    /* TCP seg offload (IPv6) */
    393 	WM_Q_EVCNT_DEFINE(txq, tsopain)     /* Painful header manip. for TSO */
    394 	WM_Q_EVCNT_DEFINE(txq, pcqdrop)	    /* Pkt dropped in pcq */
    395 	WM_Q_EVCNT_DEFINE(txq, descdrop)    /* Pkt dropped in MAC desc ring */
    396 					    /* other than toomanyseg */
    397 
    398 	WM_Q_EVCNT_DEFINE(txq, toomanyseg)  /* Pkt dropped(toomany DMA segs) */
    399 	WM_Q_EVCNT_DEFINE(txq, defrag)	    /* m_defrag() */
    400 	WM_Q_EVCNT_DEFINE(txq, underrun)    /* Tx underrun */
    401 
    402 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    403 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    404 #endif /* WM_EVENT_COUNTERS */
    405 };
    406 
    407 struct wm_rxqueue {
    408 	kmutex_t *rxq_lock;		/* lock for rx operations */
    409 
    410 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    411 
    412 	/* Software state for the receive descriptors. */
    413 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    414 
    415 	/* RX control data structures. */
    416 	int rxq_ndesc;			/* must be a power of two */
    417 	size_t rxq_descsize;		/* a rx descriptor size */
    418 	rxdescs_t *rxq_descs_u;
    419 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    420 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    421 	int rxq_desc_rseg;		/* real number of control segment */
    422 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    423 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    424 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    425 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    426 
    427 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    428 
    429 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    430 	int rxq_discard;
    431 	int rxq_len;
    432 	struct mbuf *rxq_head;
    433 	struct mbuf *rxq_tail;
    434 	struct mbuf **rxq_tailp;
    435 
    436 	bool rxq_stopping;
    437 
    438 	uint32_t rxq_packets;		/* for AIM */
    439 	uint32_t rxq_bytes;		/* for AIM */
    440 #ifdef WM_EVENT_COUNTERS
    441 	/* RX event counters */
    442 	WM_Q_EVCNT_DEFINE(rxq, intr);	/* Interrupts */
    443 	WM_Q_EVCNT_DEFINE(rxq, defer);	/* Rx deferred processing */
    444 
    445 	WM_Q_EVCNT_DEFINE(rxq, ipsum);	/* IP checksums checked */
    446 	WM_Q_EVCNT_DEFINE(rxq, tusum);	/* TCP/UDP cksums checked */
    447 #endif
    448 };
    449 
    450 struct wm_queue {
    451 	int wmq_id;			/* index of TX/RX queues */
    452 	int wmq_intr_idx;		/* index of MSI-X tables */
    453 
    454 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    455 	bool wmq_set_itr;
    456 
    457 	struct wm_txqueue wmq_txq;
    458 	struct wm_rxqueue wmq_rxq;
    459 
    460 	void *wmq_si;
    461 };
    462 
    463 struct wm_phyop {
    464 	int (*acquire)(struct wm_softc *);
    465 	void (*release)(struct wm_softc *);
    466 	int (*readreg_locked)(device_t, int, int, uint16_t *);
    467 	int (*writereg_locked)(device_t, int, int, uint16_t);
    468 	int reset_delay_us;
    469 };
    470 
    471 struct wm_nvmop {
    472 	int (*acquire)(struct wm_softc *);
    473 	void (*release)(struct wm_softc *);
    474 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    475 };
    476 
    477 /*
    478  * Software state per device.
    479  */
    480 struct wm_softc {
    481 	device_t sc_dev;		/* generic device information */
    482 	bus_space_tag_t sc_st;		/* bus space tag */
    483 	bus_space_handle_t sc_sh;	/* bus space handle */
    484 	bus_size_t sc_ss;		/* bus space size */
    485 	bus_space_tag_t sc_iot;		/* I/O space tag */
    486 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    487 	bus_size_t sc_ios;		/* I/O space size */
    488 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    489 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    490 	bus_size_t sc_flashs;		/* flash registers space size */
    491 	off_t sc_flashreg_offset;	/*
    492 					 * offset to flash registers from
    493 					 * start of BAR
    494 					 */
    495 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    496 
    497 	struct ethercom sc_ethercom;	/* ethernet common data */
    498 	struct mii_data sc_mii;		/* MII/media information */
    499 
    500 	pci_chipset_tag_t sc_pc;
    501 	pcitag_t sc_pcitag;
    502 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    503 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    504 
    505 	uint16_t sc_pcidevid;		/* PCI device ID */
    506 	wm_chip_type sc_type;		/* MAC type */
    507 	int sc_rev;			/* MAC revision */
    508 	wm_phy_type sc_phytype;		/* PHY type */
    509 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    510 #define	WM_MEDIATYPE_UNKNOWN		0x00
    511 #define	WM_MEDIATYPE_FIBER		0x01
    512 #define	WM_MEDIATYPE_COPPER		0x02
    513 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    514 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    515 	int sc_flags;			/* flags; see below */
    516 	int sc_if_flags;		/* last if_flags */
    517 	int sc_flowflags;		/* 802.3x flow control flags */
    518 	int sc_align_tweak;
    519 
    520 	void *sc_ihs[WM_MAX_NINTR];	/*
    521 					 * interrupt cookie.
    522 					 * - legacy and msi use sc_ihs[0] only
    523 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    524 					 */
    525 	pci_intr_handle_t *sc_intrs;	/*
    526 					 * legacy and msi use sc_intrs[0] only
    527 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    528 					 */
    529 	int sc_nintrs;			/* number of interrupts */
    530 
    531 	int sc_link_intr_idx;		/* index of MSI-X tables */
    532 
    533 	callout_t sc_tick_ch;		/* tick callout */
    534 	bool sc_core_stopping;
    535 
    536 	int sc_nvm_ver_major;
    537 	int sc_nvm_ver_minor;
    538 	int sc_nvm_ver_build;
    539 	int sc_nvm_addrbits;		/* NVM address bits */
    540 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    541 	int sc_ich8_flash_base;
    542 	int sc_ich8_flash_bank_size;
    543 	int sc_nvm_k1_enabled;
    544 
    545 	int sc_nqueues;
    546 	struct wm_queue *sc_queue;
    547 	u_int sc_tx_process_limit;	/* Tx processing repeat limit in softint */
    548 	u_int sc_tx_intr_process_limit;	/* Tx processing repeat limit in H/W intr */
    549 	u_int sc_rx_process_limit;	/* Rx processing repeat limit in softint */
    550 	u_int sc_rx_intr_process_limit;	/* Rx processing repeat limit in H/W intr */
    551 
    552 	int sc_affinity_offset;
    553 
    554 #ifdef WM_EVENT_COUNTERS
    555 	/* Event counters. */
    556 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    557 
    558 	/* WM_T_82542_2_1 only */
    559 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    560 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    561 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    562 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    563 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    564 #endif /* WM_EVENT_COUNTERS */
    565 
    566 	/* This variable are used only on the 82547. */
    567 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    568 
    569 	uint32_t sc_ctrl;		/* prototype CTRL register */
    570 #if 0
    571 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    572 #endif
    573 	uint32_t sc_icr;		/* prototype interrupt bits */
    574 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    575 	uint32_t sc_tctl;		/* prototype TCTL register */
    576 	uint32_t sc_rctl;		/* prototype RCTL register */
    577 	uint32_t sc_txcw;		/* prototype TXCW register */
    578 	uint32_t sc_tipg;		/* prototype TIPG register */
    579 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    580 	uint32_t sc_pba;		/* prototype PBA register */
    581 
    582 	int sc_tbi_linkup;		/* TBI link status */
    583 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    584 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    585 
    586 	int sc_mchash_type;		/* multicast filter offset */
    587 
    588 	krndsource_t rnd_source;	/* random source */
    589 
    590 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    591 
    592 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    593 	kmutex_t *sc_ich_phymtx;	/*
    594 					 * 82574/82583/ICH/PCH specific PHY
    595 					 * mutex. For 82574/82583, the mutex
    596 					 * is used for both PHY and NVM.
    597 					 */
    598 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    599 
    600 	struct wm_phyop phy;
    601 	struct wm_nvmop nvm;
    602 };
    603 
    604 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    605 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    606 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    607 
    608 #define	WM_RXCHAIN_RESET(rxq)						\
    609 do {									\
    610 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    611 	*(rxq)->rxq_tailp = NULL;					\
    612 	(rxq)->rxq_len = 0;						\
    613 } while (/*CONSTCOND*/0)
    614 
    615 #define	WM_RXCHAIN_LINK(rxq, m)						\
    616 do {									\
    617 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    618 	(rxq)->rxq_tailp = &(m)->m_next;				\
    619 } while (/*CONSTCOND*/0)
    620 
    621 #ifdef WM_EVENT_COUNTERS
    622 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    623 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    624 
    625 #define WM_Q_EVCNT_INCR(qname, evname)			\
    626 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    627 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    628 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    629 #else /* !WM_EVENT_COUNTERS */
    630 #define	WM_EVCNT_INCR(ev)	/* nothing */
    631 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    632 
    633 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    634 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    635 #endif /* !WM_EVENT_COUNTERS */
    636 
    637 #define	CSR_READ(sc, reg)						\
    638 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    639 #define	CSR_WRITE(sc, reg, val)						\
    640 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    641 #define	CSR_WRITE_FLUSH(sc)						\
    642 	(void) CSR_READ((sc), WMREG_STATUS)
    643 
    644 #define ICH8_FLASH_READ32(sc, reg)					\
    645 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    646 	    (reg) + sc->sc_flashreg_offset)
    647 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    648 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    649 	    (reg) + sc->sc_flashreg_offset, (data))
    650 
    651 #define ICH8_FLASH_READ16(sc, reg)					\
    652 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    653 	    (reg) + sc->sc_flashreg_offset)
    654 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    655 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    656 	    (reg) + sc->sc_flashreg_offset, (data))
    657 
    658 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    659 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    660 
    661 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    662 #define	WM_CDTXADDR_HI(txq, x)						\
    663 	(sizeof(bus_addr_t) == 8 ?					\
    664 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    665 
    666 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    667 #define	WM_CDRXADDR_HI(rxq, x)						\
    668 	(sizeof(bus_addr_t) == 8 ?					\
    669 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    670 
    671 /*
    672  * Register read/write functions.
    673  * Other than CSR_{READ|WRITE}().
    674  */
    675 #if 0
    676 static inline uint32_t wm_io_read(struct wm_softc *, int);
    677 #endif
    678 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    679 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    680     uint32_t, uint32_t);
    681 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    682 
    683 /*
    684  * Descriptor sync/init functions.
    685  */
    686 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    687 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    688 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    689 
    690 /*
    691  * Device driver interface functions and commonly used functions.
    692  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    693  */
    694 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    695 static int	wm_match(device_t, cfdata_t, void *);
    696 static void	wm_attach(device_t, device_t, void *);
    697 static int	wm_detach(device_t, int);
    698 static bool	wm_suspend(device_t, const pmf_qual_t *);
    699 static bool	wm_resume(device_t, const pmf_qual_t *);
    700 static void	wm_watchdog(struct ifnet *);
    701 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
    702     uint16_t *);
    703 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
    704     uint16_t *);
    705 static void	wm_tick(void *);
    706 static int	wm_ifflags_cb(struct ethercom *);
    707 static int	wm_ioctl(struct ifnet *, u_long, void *);
    708 /* MAC address related */
    709 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    710 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    711 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    712 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    713 static void	wm_set_filter(struct wm_softc *);
    714 /* Reset and init related */
    715 static void	wm_set_vlan(struct wm_softc *);
    716 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    717 static void	wm_get_auto_rd_done(struct wm_softc *);
    718 static void	wm_lan_init_done(struct wm_softc *);
    719 static void	wm_get_cfg_done(struct wm_softc *);
    720 static void	wm_phy_post_reset(struct wm_softc *);
    721 static int	wm_write_smbus_addr(struct wm_softc *);
    722 static void	wm_init_lcd_from_nvm(struct wm_softc *);
    723 static int	wm_oem_bits_config_ich8lan(struct wm_softc *, bool);
    724 static void	wm_initialize_hardware_bits(struct wm_softc *);
    725 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    726 static int	wm_reset_phy(struct wm_softc *);
    727 static void	wm_flush_desc_rings(struct wm_softc *);
    728 static void	wm_reset(struct wm_softc *);
    729 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    730 static void	wm_rxdrain(struct wm_rxqueue *);
    731 static void	wm_init_rss(struct wm_softc *);
    732 static void	wm_adjust_qnum(struct wm_softc *, int);
    733 static inline bool	wm_is_using_msix(struct wm_softc *);
    734 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    735 static int	wm_softint_establish(struct wm_softc *, int, int);
    736 static int	wm_setup_legacy(struct wm_softc *);
    737 static int	wm_setup_msix(struct wm_softc *);
    738 static int	wm_init(struct ifnet *);
    739 static int	wm_init_locked(struct ifnet *);
    740 static void	wm_unset_stopping_flags(struct wm_softc *);
    741 static void	wm_set_stopping_flags(struct wm_softc *);
    742 static void	wm_stop(struct ifnet *, int);
    743 static void	wm_stop_locked(struct ifnet *, int);
    744 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    745 static void	wm_82547_txfifo_stall(void *);
    746 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    747 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    748 /* DMA related */
    749 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    750 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    751 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    752 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    753     struct wm_txqueue *);
    754 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    755 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    756 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    757     struct wm_rxqueue *);
    758 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    759 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    760 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    761 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    762 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    763 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    764 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    765     struct wm_txqueue *);
    766 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    767     struct wm_rxqueue *);
    768 static int	wm_alloc_txrx_queues(struct wm_softc *);
    769 static void	wm_free_txrx_queues(struct wm_softc *);
    770 static int	wm_init_txrx_queues(struct wm_softc *);
    771 /* Start */
    772 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    773     struct wm_txsoft *, uint32_t *, uint8_t *);
    774 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    775 static void	wm_start(struct ifnet *);
    776 static void	wm_start_locked(struct ifnet *);
    777 static int	wm_transmit(struct ifnet *, struct mbuf *);
    778 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    779 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
    780     bool);
    781 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    782     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    783 static void	wm_nq_start(struct ifnet *);
    784 static void	wm_nq_start_locked(struct ifnet *);
    785 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    786 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    787 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
    788     bool);
    789 static void	wm_deferred_start_locked(struct wm_txqueue *);
    790 static void	wm_handle_queue(void *);
    791 /* Interrupt */
    792 static bool	wm_txeof(struct wm_txqueue *, u_int);
    793 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
    794 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    795 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    796 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    797 static void	wm_linkintr(struct wm_softc *, uint32_t);
    798 static int	wm_intr_legacy(void *);
    799 static inline void	wm_txrxintr_disable(struct wm_queue *);
    800 static inline void	wm_txrxintr_enable(struct wm_queue *);
    801 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    802 static int	wm_txrxintr_msix(void *);
    803 static int	wm_linkintr_msix(void *);
    804 
    805 /*
    806  * Media related.
    807  * GMII, SGMII, TBI, SERDES and SFP.
    808  */
    809 /* Common */
    810 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    811 /* GMII related */
    812 static void	wm_gmii_reset(struct wm_softc *);
    813 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
    814 static int	wm_get_phy_id_82575(struct wm_softc *);
    815 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    816 static int	wm_gmii_mediachange(struct ifnet *);
    817 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    818 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    819 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    820 static int	wm_gmii_i82543_readreg(device_t, int, int);
    821 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    822 static int	wm_gmii_mdic_readreg(device_t, int, int);
    823 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    824 static int	wm_gmii_i82544_readreg(device_t, int, int);
    825 static int	wm_gmii_i82544_readreg_locked(device_t, int, int, uint16_t *);
    826 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    827 static int	wm_gmii_i82544_writereg_locked(device_t, int, int, uint16_t);
    828 static int	wm_gmii_i80003_readreg(device_t, int, int);
    829 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    830 static int	wm_gmii_bm_readreg(device_t, int, int);
    831 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    832 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    833 static int	wm_gmii_hv_readreg(device_t, int, int);
    834 static int	wm_gmii_hv_readreg_locked(device_t, int, int, uint16_t *);
    835 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    836 static int	wm_gmii_hv_writereg_locked(device_t, int, int, uint16_t);
    837 static int	wm_gmii_82580_readreg(device_t, int, int);
    838 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    839 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    840 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    841 static void	wm_gmii_statchg(struct ifnet *);
    842 /*
    843  * kumeran related (80003, ICH* and PCH*).
    844  * These functions are not for accessing MII registers but for accessing
    845  * kumeran specific registers.
    846  */
    847 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    848 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    849 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    850 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    851 /* SGMII */
    852 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    853 static int	wm_sgmii_readreg(device_t, int, int);
    854 static void	wm_sgmii_writereg(device_t, int, int, int);
    855 /* TBI related */
    856 static bool	wm_tbi_havesignal(struct wm_softc *, uint32_t);
    857 static void	wm_tbi_mediainit(struct wm_softc *);
    858 static int	wm_tbi_mediachange(struct ifnet *);
    859 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    860 static int	wm_check_for_link(struct wm_softc *);
    861 static void	wm_tbi_tick(struct wm_softc *);
    862 /* SERDES related */
    863 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    864 static int	wm_serdes_mediachange(struct ifnet *);
    865 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    866 static void	wm_serdes_tick(struct wm_softc *);
    867 /* SFP related */
    868 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    869 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    870 
    871 /*
    872  * NVM related.
    873  * Microwire, SPI (w/wo EERD) and Flash.
    874  */
    875 /* Misc functions */
    876 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    877 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    878 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    879 /* Microwire */
    880 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    881 /* SPI */
    882 static int	wm_nvm_ready_spi(struct wm_softc *);
    883 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    884 /* Using with EERD */
    885 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    886 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    887 /* Flash */
    888 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    889     unsigned int *);
    890 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    891 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    892 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    893     uint32_t *);
    894 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    895 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    896 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    897 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    898 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    899 /* iNVM */
    900 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    901 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    902 /* Lock, detecting NVM type, validate checksum and read */
    903 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    904 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
    905 static int	wm_nvm_validate_checksum(struct wm_softc *);
    906 static void	wm_nvm_version_invm(struct wm_softc *);
    907 static void	wm_nvm_version(struct wm_softc *);
    908 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    909 
    910 /*
    911  * Hardware semaphores.
    912  * Very complexed...
    913  */
    914 static int	wm_get_null(struct wm_softc *);
    915 static void	wm_put_null(struct wm_softc *);
    916 static int	wm_get_eecd(struct wm_softc *);
    917 static void	wm_put_eecd(struct wm_softc *);
    918 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    919 static void	wm_put_swsm_semaphore(struct wm_softc *);
    920 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    921 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    922 static int	wm_get_nvm_80003(struct wm_softc *);
    923 static void	wm_put_nvm_80003(struct wm_softc *);
    924 static int	wm_get_nvm_82571(struct wm_softc *);
    925 static void	wm_put_nvm_82571(struct wm_softc *);
    926 static int	wm_get_phy_82575(struct wm_softc *);
    927 static void	wm_put_phy_82575(struct wm_softc *);
    928 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    929 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    930 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    931 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    932 static int	wm_get_nvm_ich8lan(struct wm_softc *);
    933 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    934 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    935 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    936 
    937 /*
    938  * Management mode and power management related subroutines.
    939  * BMC, AMT, suspend/resume and EEE.
    940  */
    941 #if 0
    942 static int	wm_check_mng_mode(struct wm_softc *);
    943 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    944 static int	wm_check_mng_mode_82574(struct wm_softc *);
    945 static int	wm_check_mng_mode_generic(struct wm_softc *);
    946 #endif
    947 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    948 static bool	wm_phy_resetisblocked(struct wm_softc *);
    949 static void	wm_get_hw_control(struct wm_softc *);
    950 static void	wm_release_hw_control(struct wm_softc *);
    951 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    952 static int	wm_init_phy_workarounds_pchlan(struct wm_softc *);
    953 static void	wm_init_manageability(struct wm_softc *);
    954 static void	wm_release_manageability(struct wm_softc *);
    955 static void	wm_get_wakeup(struct wm_softc *);
    956 static int	wm_ulp_disable(struct wm_softc *);
    957 static void	wm_enable_phy_wakeup(struct wm_softc *);
    958 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    959 static void	wm_suspend_workarounds_ich8lan(struct wm_softc *);
    960 static int	wm_resume_workarounds_pchlan(struct wm_softc *);
    961 static void	wm_enable_wakeup(struct wm_softc *);
    962 static void	wm_disable_aspm(struct wm_softc *);
    963 /* LPLU (Low Power Link Up) */
    964 static void	wm_lplu_d0_disable(struct wm_softc *);
    965 /* EEE */
    966 static void	wm_set_eee_i350(struct wm_softc *);
    967 
    968 /*
    969  * Workarounds (mainly PHY related).
    970  * Basically, PHY's workarounds are in the PHY drivers.
    971  */
    972 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    973 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    974 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    975 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    976 static int	wm_k1_workaround_lpt_lp(struct wm_softc *, bool);
    977 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    978 static int	wm_k1_workaround_lv(struct wm_softc *);
    979 static int	wm_link_stall_workaround_hv(struct wm_softc *);
    980 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    981 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    982 static void	wm_reset_init_script_82575(struct wm_softc *);
    983 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    984 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
    985 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
    986 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
    987 static void	wm_pll_workaround_i210(struct wm_softc *);
    988 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
    989 
    990 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    991     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    992 
    993 /*
    994  * Devices supported by this driver.
    995  */
    996 static const struct wm_product {
    997 	pci_vendor_id_t		wmp_vendor;
    998 	pci_product_id_t	wmp_product;
    999 	const char		*wmp_name;
   1000 	wm_chip_type		wmp_type;
   1001 	uint32_t		wmp_flags;
   1002 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
   1003 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
   1004 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
   1005 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
   1006 #define WMP_MEDIATYPE(x)	((x) & 0x03)
   1007 } wm_products[] = {
   1008 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
   1009 	  "Intel i82542 1000BASE-X Ethernet",
   1010 	  WM_T_82542_2_1,	WMP_F_FIBER },
   1011 
   1012 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
   1013 	  "Intel i82543GC 1000BASE-X Ethernet",
   1014 	  WM_T_82543,		WMP_F_FIBER },
   1015 
   1016 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
   1017 	  "Intel i82543GC 1000BASE-T Ethernet",
   1018 	  WM_T_82543,		WMP_F_COPPER },
   1019 
   1020 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
   1021 	  "Intel i82544EI 1000BASE-T Ethernet",
   1022 	  WM_T_82544,		WMP_F_COPPER },
   1023 
   1024 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
   1025 	  "Intel i82544EI 1000BASE-X Ethernet",
   1026 	  WM_T_82544,		WMP_F_FIBER },
   1027 
   1028 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
   1029 	  "Intel i82544GC 1000BASE-T Ethernet",
   1030 	  WM_T_82544,		WMP_F_COPPER },
   1031 
   1032 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
   1033 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
   1034 	  WM_T_82544,		WMP_F_COPPER },
   1035 
   1036 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
   1037 	  "Intel i82540EM 1000BASE-T Ethernet",
   1038 	  WM_T_82540,		WMP_F_COPPER },
   1039 
   1040 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
   1041 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1042 	  WM_T_82540,		WMP_F_COPPER },
   1043 
   1044 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1045 	  "Intel i82540EP 1000BASE-T Ethernet",
   1046 	  WM_T_82540,		WMP_F_COPPER },
   1047 
   1048 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1049 	  "Intel i82540EP 1000BASE-T Ethernet",
   1050 	  WM_T_82540,		WMP_F_COPPER },
   1051 
   1052 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1053 	  "Intel i82540EP 1000BASE-T Ethernet",
   1054 	  WM_T_82540,		WMP_F_COPPER },
   1055 
   1056 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1057 	  "Intel i82545EM 1000BASE-T Ethernet",
   1058 	  WM_T_82545,		WMP_F_COPPER },
   1059 
   1060 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1061 	  "Intel i82545GM 1000BASE-T Ethernet",
   1062 	  WM_T_82545_3,		WMP_F_COPPER },
   1063 
   1064 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1065 	  "Intel i82545GM 1000BASE-X Ethernet",
   1066 	  WM_T_82545_3,		WMP_F_FIBER },
   1067 
   1068 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1069 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1070 	  WM_T_82545_3,		WMP_F_SERDES },
   1071 
   1072 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1073 	  "Intel i82546EB 1000BASE-T Ethernet",
   1074 	  WM_T_82546,		WMP_F_COPPER },
   1075 
   1076 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1077 	  "Intel i82546EB 1000BASE-T Ethernet",
   1078 	  WM_T_82546,		WMP_F_COPPER },
   1079 
   1080 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1081 	  "Intel i82545EM 1000BASE-X Ethernet",
   1082 	  WM_T_82545,		WMP_F_FIBER },
   1083 
   1084 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1085 	  "Intel i82546EB 1000BASE-X Ethernet",
   1086 	  WM_T_82546,		WMP_F_FIBER },
   1087 
   1088 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1089 	  "Intel i82546GB 1000BASE-T Ethernet",
   1090 	  WM_T_82546_3,		WMP_F_COPPER },
   1091 
   1092 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1093 	  "Intel i82546GB 1000BASE-X Ethernet",
   1094 	  WM_T_82546_3,		WMP_F_FIBER },
   1095 
   1096 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1097 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1098 	  WM_T_82546_3,		WMP_F_SERDES },
   1099 
   1100 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1101 	  "i82546GB quad-port Gigabit Ethernet",
   1102 	  WM_T_82546_3,		WMP_F_COPPER },
   1103 
   1104 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1105 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1106 	  WM_T_82546_3,		WMP_F_COPPER },
   1107 
   1108 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1109 	  "Intel PRO/1000MT (82546GB)",
   1110 	  WM_T_82546_3,		WMP_F_COPPER },
   1111 
   1112 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1113 	  "Intel i82541EI 1000BASE-T Ethernet",
   1114 	  WM_T_82541,		WMP_F_COPPER },
   1115 
   1116 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1117 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1118 	  WM_T_82541,		WMP_F_COPPER },
   1119 
   1120 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1121 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1122 	  WM_T_82541,		WMP_F_COPPER },
   1123 
   1124 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1125 	  "Intel i82541ER 1000BASE-T Ethernet",
   1126 	  WM_T_82541_2,		WMP_F_COPPER },
   1127 
   1128 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1129 	  "Intel i82541GI 1000BASE-T Ethernet",
   1130 	  WM_T_82541_2,		WMP_F_COPPER },
   1131 
   1132 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1133 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1134 	  WM_T_82541_2,		WMP_F_COPPER },
   1135 
   1136 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1137 	  "Intel i82541PI 1000BASE-T Ethernet",
   1138 	  WM_T_82541_2,		WMP_F_COPPER },
   1139 
   1140 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1141 	  "Intel i82547EI 1000BASE-T Ethernet",
   1142 	  WM_T_82547,		WMP_F_COPPER },
   1143 
   1144 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1145 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1146 	  WM_T_82547,		WMP_F_COPPER },
   1147 
   1148 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1149 	  "Intel i82547GI 1000BASE-T Ethernet",
   1150 	  WM_T_82547_2,		WMP_F_COPPER },
   1151 
   1152 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1153 	  "Intel PRO/1000 PT (82571EB)",
   1154 	  WM_T_82571,		WMP_F_COPPER },
   1155 
   1156 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1157 	  "Intel PRO/1000 PF (82571EB)",
   1158 	  WM_T_82571,		WMP_F_FIBER },
   1159 
   1160 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1161 	  "Intel PRO/1000 PB (82571EB)",
   1162 	  WM_T_82571,		WMP_F_SERDES },
   1163 
   1164 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1165 	  "Intel PRO/1000 QT (82571EB)",
   1166 	  WM_T_82571,		WMP_F_COPPER },
   1167 
   1168 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1169 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1170 	  WM_T_82571,		WMP_F_COPPER, },
   1171 
   1172 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1173 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1174 	  WM_T_82571,		WMP_F_COPPER, },
   1175 
   1176 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1177 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1178 	  WM_T_82571,		WMP_F_SERDES, },
   1179 
   1180 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1181 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1182 	  WM_T_82571,		WMP_F_SERDES, },
   1183 
   1184 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1185 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1186 	  WM_T_82571,		WMP_F_FIBER, },
   1187 
   1188 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1189 	  "Intel i82572EI 1000baseT Ethernet",
   1190 	  WM_T_82572,		WMP_F_COPPER },
   1191 
   1192 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1193 	  "Intel i82572EI 1000baseX Ethernet",
   1194 	  WM_T_82572,		WMP_F_FIBER },
   1195 
   1196 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1197 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1198 	  WM_T_82572,		WMP_F_SERDES },
   1199 
   1200 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1201 	  "Intel i82572EI 1000baseT Ethernet",
   1202 	  WM_T_82572,		WMP_F_COPPER },
   1203 
   1204 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1205 	  "Intel i82573E",
   1206 	  WM_T_82573,		WMP_F_COPPER },
   1207 
   1208 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1209 	  "Intel i82573E IAMT",
   1210 	  WM_T_82573,		WMP_F_COPPER },
   1211 
   1212 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1213 	  "Intel i82573L Gigabit Ethernet",
   1214 	  WM_T_82573,		WMP_F_COPPER },
   1215 
   1216 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1217 	  "Intel i82574L",
   1218 	  WM_T_82574,		WMP_F_COPPER },
   1219 
   1220 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1221 	  "Intel i82574L",
   1222 	  WM_T_82574,		WMP_F_COPPER },
   1223 
   1224 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1225 	  "Intel i82583V",
   1226 	  WM_T_82583,		WMP_F_COPPER },
   1227 
   1228 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1229 	  "i80003 dual 1000baseT Ethernet",
   1230 	  WM_T_80003,		WMP_F_COPPER },
   1231 
   1232 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1233 	  "i80003 dual 1000baseX Ethernet",
   1234 	  WM_T_80003,		WMP_F_COPPER },
   1235 
   1236 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1237 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1238 	  WM_T_80003,		WMP_F_SERDES },
   1239 
   1240 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1241 	  "Intel i80003 1000baseT Ethernet",
   1242 	  WM_T_80003,		WMP_F_COPPER },
   1243 
   1244 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1245 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1246 	  WM_T_80003,		WMP_F_SERDES },
   1247 
   1248 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1249 	  "Intel i82801H (M_AMT) LAN Controller",
   1250 	  WM_T_ICH8,		WMP_F_COPPER },
   1251 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1252 	  "Intel i82801H (AMT) LAN Controller",
   1253 	  WM_T_ICH8,		WMP_F_COPPER },
   1254 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1255 	  "Intel i82801H LAN Controller",
   1256 	  WM_T_ICH8,		WMP_F_COPPER },
   1257 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1258 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1259 	  WM_T_ICH8,		WMP_F_COPPER },
   1260 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1261 	  "Intel i82801H (M) LAN Controller",
   1262 	  WM_T_ICH8,		WMP_F_COPPER },
   1263 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1264 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1265 	  WM_T_ICH8,		WMP_F_COPPER },
   1266 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1267 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1268 	  WM_T_ICH8,		WMP_F_COPPER },
   1269 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1270 	  "82567V-3 LAN Controller",
   1271 	  WM_T_ICH8,		WMP_F_COPPER },
   1272 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1273 	  "82801I (AMT) LAN Controller",
   1274 	  WM_T_ICH9,		WMP_F_COPPER },
   1275 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1276 	  "82801I 10/100 LAN Controller",
   1277 	  WM_T_ICH9,		WMP_F_COPPER },
   1278 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1279 	  "82801I (G) 10/100 LAN Controller",
   1280 	  WM_T_ICH9,		WMP_F_COPPER },
   1281 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1282 	  "82801I (GT) 10/100 LAN Controller",
   1283 	  WM_T_ICH9,		WMP_F_COPPER },
   1284 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1285 	  "82801I (C) LAN Controller",
   1286 	  WM_T_ICH9,		WMP_F_COPPER },
   1287 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1288 	  "82801I mobile LAN Controller",
   1289 	  WM_T_ICH9,		WMP_F_COPPER },
   1290 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1291 	  "82801I mobile (V) LAN Controller",
   1292 	  WM_T_ICH9,		WMP_F_COPPER },
   1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1294 	  "82801I mobile (AMT) LAN Controller",
   1295 	  WM_T_ICH9,		WMP_F_COPPER },
   1296 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1297 	  "82567LM-4 LAN Controller",
   1298 	  WM_T_ICH9,		WMP_F_COPPER },
   1299 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1300 	  "82567LM-2 LAN Controller",
   1301 	  WM_T_ICH10,		WMP_F_COPPER },
   1302 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1303 	  "82567LF-2 LAN Controller",
   1304 	  WM_T_ICH10,		WMP_F_COPPER },
   1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1306 	  "82567LM-3 LAN Controller",
   1307 	  WM_T_ICH10,		WMP_F_COPPER },
   1308 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1309 	  "82567LF-3 LAN Controller",
   1310 	  WM_T_ICH10,		WMP_F_COPPER },
   1311 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1312 	  "82567V-2 LAN Controller",
   1313 	  WM_T_ICH10,		WMP_F_COPPER },
   1314 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1315 	  "82567V-3? LAN Controller",
   1316 	  WM_T_ICH10,		WMP_F_COPPER },
   1317 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1318 	  "HANKSVILLE LAN Controller",
   1319 	  WM_T_ICH10,		WMP_F_COPPER },
   1320 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1321 	  "PCH LAN (82577LM) Controller",
   1322 	  WM_T_PCH,		WMP_F_COPPER },
   1323 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1324 	  "PCH LAN (82577LC) Controller",
   1325 	  WM_T_PCH,		WMP_F_COPPER },
   1326 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1327 	  "PCH LAN (82578DM) Controller",
   1328 	  WM_T_PCH,		WMP_F_COPPER },
   1329 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1330 	  "PCH LAN (82578DC) Controller",
   1331 	  WM_T_PCH,		WMP_F_COPPER },
   1332 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1333 	  "PCH2 LAN (82579LM) Controller",
   1334 	  WM_T_PCH2,		WMP_F_COPPER },
   1335 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1336 	  "PCH2 LAN (82579V) Controller",
   1337 	  WM_T_PCH2,		WMP_F_COPPER },
   1338 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1339 	  "82575EB dual-1000baseT Ethernet",
   1340 	  WM_T_82575,		WMP_F_COPPER },
   1341 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1342 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1343 	  WM_T_82575,		WMP_F_SERDES },
   1344 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1345 	  "82575GB quad-1000baseT Ethernet",
   1346 	  WM_T_82575,		WMP_F_COPPER },
   1347 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1348 	  "82575GB quad-1000baseT Ethernet (PM)",
   1349 	  WM_T_82575,		WMP_F_COPPER },
   1350 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1351 	  "82576 1000BaseT Ethernet",
   1352 	  WM_T_82576,		WMP_F_COPPER },
   1353 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1354 	  "82576 1000BaseX Ethernet",
   1355 	  WM_T_82576,		WMP_F_FIBER },
   1356 
   1357 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1358 	  "82576 gigabit Ethernet (SERDES)",
   1359 	  WM_T_82576,		WMP_F_SERDES },
   1360 
   1361 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1362 	  "82576 quad-1000BaseT Ethernet",
   1363 	  WM_T_82576,		WMP_F_COPPER },
   1364 
   1365 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1366 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1367 	  WM_T_82576,		WMP_F_COPPER },
   1368 
   1369 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1370 	  "82576 gigabit Ethernet",
   1371 	  WM_T_82576,		WMP_F_COPPER },
   1372 
   1373 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1374 	  "82576 gigabit Ethernet (SERDES)",
   1375 	  WM_T_82576,		WMP_F_SERDES },
   1376 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1377 	  "82576 quad-gigabit Ethernet (SERDES)",
   1378 	  WM_T_82576,		WMP_F_SERDES },
   1379 
   1380 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1381 	  "82580 1000BaseT Ethernet",
   1382 	  WM_T_82580,		WMP_F_COPPER },
   1383 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1384 	  "82580 1000BaseX Ethernet",
   1385 	  WM_T_82580,		WMP_F_FIBER },
   1386 
   1387 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1388 	  "82580 1000BaseT Ethernet (SERDES)",
   1389 	  WM_T_82580,		WMP_F_SERDES },
   1390 
   1391 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1392 	  "82580 gigabit Ethernet (SGMII)",
   1393 	  WM_T_82580,		WMP_F_COPPER },
   1394 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1395 	  "82580 dual-1000BaseT Ethernet",
   1396 	  WM_T_82580,		WMP_F_COPPER },
   1397 
   1398 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1399 	  "82580 quad-1000BaseX Ethernet",
   1400 	  WM_T_82580,		WMP_F_FIBER },
   1401 
   1402 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1403 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1404 	  WM_T_82580,		WMP_F_COPPER },
   1405 
   1406 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1407 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1408 	  WM_T_82580,		WMP_F_SERDES },
   1409 
   1410 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1411 	  "DH89XXCC 1000BASE-KX Ethernet",
   1412 	  WM_T_82580,		WMP_F_SERDES },
   1413 
   1414 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1415 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1416 	  WM_T_82580,		WMP_F_SERDES },
   1417 
   1418 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1419 	  "I350 Gigabit Network Connection",
   1420 	  WM_T_I350,		WMP_F_COPPER },
   1421 
   1422 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1423 	  "I350 Gigabit Fiber Network Connection",
   1424 	  WM_T_I350,		WMP_F_FIBER },
   1425 
   1426 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1427 	  "I350 Gigabit Backplane Connection",
   1428 	  WM_T_I350,		WMP_F_SERDES },
   1429 
   1430 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1431 	  "I350 Quad Port Gigabit Ethernet",
   1432 	  WM_T_I350,		WMP_F_SERDES },
   1433 
   1434 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1435 	  "I350 Gigabit Connection",
   1436 	  WM_T_I350,		WMP_F_COPPER },
   1437 
   1438 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1439 	  "I354 Gigabit Ethernet (KX)",
   1440 	  WM_T_I354,		WMP_F_SERDES },
   1441 
   1442 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1443 	  "I354 Gigabit Ethernet (SGMII)",
   1444 	  WM_T_I354,		WMP_F_COPPER },
   1445 
   1446 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1447 	  "I354 Gigabit Ethernet (2.5G)",
   1448 	  WM_T_I354,		WMP_F_COPPER },
   1449 
   1450 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1451 	  "I210-T1 Ethernet Server Adapter",
   1452 	  WM_T_I210,		WMP_F_COPPER },
   1453 
   1454 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1455 	  "I210 Ethernet (Copper OEM)",
   1456 	  WM_T_I210,		WMP_F_COPPER },
   1457 
   1458 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1459 	  "I210 Ethernet (Copper IT)",
   1460 	  WM_T_I210,		WMP_F_COPPER },
   1461 
   1462 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1463 	  "I210 Ethernet (FLASH less)",
   1464 	  WM_T_I210,		WMP_F_COPPER },
   1465 
   1466 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1467 	  "I210 Gigabit Ethernet (Fiber)",
   1468 	  WM_T_I210,		WMP_F_FIBER },
   1469 
   1470 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1471 	  "I210 Gigabit Ethernet (SERDES)",
   1472 	  WM_T_I210,		WMP_F_SERDES },
   1473 
   1474 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1475 	  "I210 Gigabit Ethernet (FLASH less)",
   1476 	  WM_T_I210,		WMP_F_SERDES },
   1477 
   1478 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1479 	  "I210 Gigabit Ethernet (SGMII)",
   1480 	  WM_T_I210,		WMP_F_COPPER },
   1481 
   1482 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1483 	  "I211 Ethernet (COPPER)",
   1484 	  WM_T_I211,		WMP_F_COPPER },
   1485 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1486 	  "I217 V Ethernet Connection",
   1487 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1488 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1489 	  "I217 LM Ethernet Connection",
   1490 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1491 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1492 	  "I218 V Ethernet Connection",
   1493 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1494 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1495 	  "I218 V Ethernet Connection",
   1496 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1497 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1498 	  "I218 V Ethernet Connection",
   1499 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1500 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1501 	  "I218 LM Ethernet Connection",
   1502 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1503 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1504 	  "I218 LM Ethernet Connection",
   1505 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1506 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1507 	  "I218 LM Ethernet Connection",
   1508 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1509 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1510 	  "I219 V Ethernet Connection",
   1511 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1512 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1513 	  "I219 V Ethernet Connection",
   1514 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1515 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1516 	  "I219 V Ethernet Connection",
   1517 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1518 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1519 	  "I219 V Ethernet Connection",
   1520 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1521 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1522 	  "I219 LM Ethernet Connection",
   1523 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1524 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1525 	  "I219 LM Ethernet Connection",
   1526 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1527 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1528 	  "I219 LM Ethernet Connection",
   1529 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1530 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1531 	  "I219 LM Ethernet Connection",
   1532 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1533 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1534 	  "I219 LM Ethernet Connection",
   1535 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1536 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
   1537 	  "I219 V Ethernet Connection",
   1538 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1539 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
   1540 	  "I219 V Ethernet Connection",
   1541 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1542 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
   1543 	  "I219 LM Ethernet Connection",
   1544 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1545 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
   1546 	  "I219 LM Ethernet Connection",
   1547 	  WM_T_PCH_CNP,		WMP_F_COPPER },
   1548 	{ 0,			0,
   1549 	  NULL,
   1550 	  0,			0 },
   1551 };
   1552 
   1553 /*
   1554  * Register read/write functions.
   1555  * Other than CSR_{READ|WRITE}().
   1556  */
   1557 
   1558 #if 0 /* Not currently used */
   1559 static inline uint32_t
   1560 wm_io_read(struct wm_softc *sc, int reg)
   1561 {
   1562 
   1563 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1564 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1565 }
   1566 #endif
   1567 
   1568 static inline void
   1569 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1570 {
   1571 
   1572 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1573 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1574 }
   1575 
   1576 static inline void
   1577 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1578     uint32_t data)
   1579 {
   1580 	uint32_t regval;
   1581 	int i;
   1582 
   1583 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1584 
   1585 	CSR_WRITE(sc, reg, regval);
   1586 
   1587 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1588 		delay(5);
   1589 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1590 			break;
   1591 	}
   1592 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1593 		aprint_error("%s: WARNING:"
   1594 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1595 		    device_xname(sc->sc_dev), reg);
   1596 	}
   1597 }
   1598 
   1599 static inline void
   1600 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1601 {
   1602 	wa->wa_low = htole32(v & 0xffffffffU);
   1603 	if (sizeof(bus_addr_t) == 8)
   1604 		wa->wa_high = htole32((uint64_t) v >> 32);
   1605 	else
   1606 		wa->wa_high = 0;
   1607 }
   1608 
   1609 /*
   1610  * Descriptor sync/init functions.
   1611  */
   1612 static inline void
   1613 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1614 {
   1615 	struct wm_softc *sc = txq->txq_sc;
   1616 
   1617 	/* If it will wrap around, sync to the end of the ring. */
   1618 	if ((start + num) > WM_NTXDESC(txq)) {
   1619 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1620 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1621 		    (WM_NTXDESC(txq) - start), ops);
   1622 		num -= (WM_NTXDESC(txq) - start);
   1623 		start = 0;
   1624 	}
   1625 
   1626 	/* Now sync whatever is left. */
   1627 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1628 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1629 }
   1630 
   1631 static inline void
   1632 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1633 {
   1634 	struct wm_softc *sc = rxq->rxq_sc;
   1635 
   1636 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1637 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1638 }
   1639 
   1640 static inline void
   1641 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1642 {
   1643 	struct wm_softc *sc = rxq->rxq_sc;
   1644 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1645 	struct mbuf *m = rxs->rxs_mbuf;
   1646 
   1647 	/*
   1648 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1649 	 * so that the payload after the Ethernet header is aligned
   1650 	 * to a 4-byte boundary.
   1651 
   1652 	 * XXX BRAINDAMAGE ALERT!
   1653 	 * The stupid chip uses the same size for every buffer, which
   1654 	 * is set in the Receive Control register.  We are using the 2K
   1655 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1656 	 * reason, we can't "scoot" packets longer than the standard
   1657 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1658 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1659 	 * the upper layer copy the headers.
   1660 	 */
   1661 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1662 
   1663 	if (sc->sc_type == WM_T_82574) {
   1664 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1665 		rxd->erx_data.erxd_addr =
   1666 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1667 		rxd->erx_data.erxd_dd = 0;
   1668 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1669 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1670 
   1671 		rxd->nqrx_data.nrxd_paddr =
   1672 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1673 		/* Currently, split header is not supported. */
   1674 		rxd->nqrx_data.nrxd_haddr = 0;
   1675 	} else {
   1676 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1677 
   1678 		wm_set_dma_addr(&rxd->wrx_addr,
   1679 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1680 		rxd->wrx_len = 0;
   1681 		rxd->wrx_cksum = 0;
   1682 		rxd->wrx_status = 0;
   1683 		rxd->wrx_errors = 0;
   1684 		rxd->wrx_special = 0;
   1685 	}
   1686 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1687 
   1688 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1689 }
   1690 
   1691 /*
   1692  * Device driver interface functions and commonly used functions.
   1693  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1694  */
   1695 
   1696 /* Lookup supported device table */
   1697 static const struct wm_product *
   1698 wm_lookup(const struct pci_attach_args *pa)
   1699 {
   1700 	const struct wm_product *wmp;
   1701 
   1702 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1703 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1704 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1705 			return wmp;
   1706 	}
   1707 	return NULL;
   1708 }
   1709 
   1710 /* The match function (ca_match) */
   1711 static int
   1712 wm_match(device_t parent, cfdata_t cf, void *aux)
   1713 {
   1714 	struct pci_attach_args *pa = aux;
   1715 
   1716 	if (wm_lookup(pa) != NULL)
   1717 		return 1;
   1718 
   1719 	return 0;
   1720 }
   1721 
   1722 /* The attach function (ca_attach) */
   1723 static void
   1724 wm_attach(device_t parent, device_t self, void *aux)
   1725 {
   1726 	struct wm_softc *sc = device_private(self);
   1727 	struct pci_attach_args *pa = aux;
   1728 	prop_dictionary_t dict;
   1729 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1730 	pci_chipset_tag_t pc = pa->pa_pc;
   1731 	int counts[PCI_INTR_TYPE_SIZE];
   1732 	pci_intr_type_t max_type;
   1733 	const char *eetype, *xname;
   1734 	bus_space_tag_t memt;
   1735 	bus_space_handle_t memh;
   1736 	bus_size_t memsize;
   1737 	int memh_valid;
   1738 	int i, error;
   1739 	const struct wm_product *wmp;
   1740 	prop_data_t ea;
   1741 	prop_number_t pn;
   1742 	uint8_t enaddr[ETHER_ADDR_LEN];
   1743 	char buf[256];
   1744 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1745 	pcireg_t preg, memtype;
   1746 	uint16_t eeprom_data, apme_mask;
   1747 	bool force_clear_smbi;
   1748 	uint32_t link_mode;
   1749 	uint32_t reg;
   1750 
   1751 	sc->sc_dev = self;
   1752 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1753 	sc->sc_core_stopping = false;
   1754 
   1755 	wmp = wm_lookup(pa);
   1756 #ifdef DIAGNOSTIC
   1757 	if (wmp == NULL) {
   1758 		printf("\n");
   1759 		panic("wm_attach: impossible");
   1760 	}
   1761 #endif
   1762 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1763 
   1764 	sc->sc_pc = pa->pa_pc;
   1765 	sc->sc_pcitag = pa->pa_tag;
   1766 
   1767 	if (pci_dma64_available(pa))
   1768 		sc->sc_dmat = pa->pa_dmat64;
   1769 	else
   1770 		sc->sc_dmat = pa->pa_dmat;
   1771 
   1772 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1773 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1774 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1775 
   1776 	sc->sc_type = wmp->wmp_type;
   1777 
   1778 	/* Set default function pointers */
   1779 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1780 	sc->phy.release = sc->nvm.release = wm_put_null;
   1781 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1782 
   1783 	if (sc->sc_type < WM_T_82543) {
   1784 		if (sc->sc_rev < 2) {
   1785 			aprint_error_dev(sc->sc_dev,
   1786 			    "i82542 must be at least rev. 2\n");
   1787 			return;
   1788 		}
   1789 		if (sc->sc_rev < 3)
   1790 			sc->sc_type = WM_T_82542_2_0;
   1791 	}
   1792 
   1793 	/*
   1794 	 * Disable MSI for Errata:
   1795 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1796 	 *
   1797 	 *  82544: Errata 25
   1798 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1799 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1800 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1801 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1802 	 *
   1803 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1804 	 *
   1805 	 *  82571 & 82572: Errata 63
   1806 	 */
   1807 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1808 	    || (sc->sc_type == WM_T_82572))
   1809 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1810 
   1811 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1812 	    || (sc->sc_type == WM_T_82580)
   1813 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1814 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1815 		sc->sc_flags |= WM_F_NEWQUEUE;
   1816 
   1817 	/* Set device properties (mactype) */
   1818 	dict = device_properties(sc->sc_dev);
   1819 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1820 
   1821 	/*
   1822 	 * Map the device.  All devices support memory-mapped acccess,
   1823 	 * and it is really required for normal operation.
   1824 	 */
   1825 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1826 	switch (memtype) {
   1827 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1828 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1829 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1830 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1831 		break;
   1832 	default:
   1833 		memh_valid = 0;
   1834 		break;
   1835 	}
   1836 
   1837 	if (memh_valid) {
   1838 		sc->sc_st = memt;
   1839 		sc->sc_sh = memh;
   1840 		sc->sc_ss = memsize;
   1841 	} else {
   1842 		aprint_error_dev(sc->sc_dev,
   1843 		    "unable to map device registers\n");
   1844 		return;
   1845 	}
   1846 
   1847 	/*
   1848 	 * In addition, i82544 and later support I/O mapped indirect
   1849 	 * register access.  It is not desirable (nor supported in
   1850 	 * this driver) to use it for normal operation, though it is
   1851 	 * required to work around bugs in some chip versions.
   1852 	 */
   1853 	if (sc->sc_type >= WM_T_82544) {
   1854 		/* First we have to find the I/O BAR. */
   1855 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1856 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1857 			if (memtype == PCI_MAPREG_TYPE_IO)
   1858 				break;
   1859 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1860 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1861 				i += 4;	/* skip high bits, too */
   1862 		}
   1863 		if (i < PCI_MAPREG_END) {
   1864 			/*
   1865 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1866 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1867 			 * It's no problem because newer chips has no this
   1868 			 * bug.
   1869 			 *
   1870 			 * The i8254x doesn't apparently respond when the
   1871 			 * I/O BAR is 0, which looks somewhat like it's not
   1872 			 * been configured.
   1873 			 */
   1874 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1875 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1876 				aprint_error_dev(sc->sc_dev,
   1877 				    "WARNING: I/O BAR at zero.\n");
   1878 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1879 					0, &sc->sc_iot, &sc->sc_ioh,
   1880 					NULL, &sc->sc_ios) == 0) {
   1881 				sc->sc_flags |= WM_F_IOH_VALID;
   1882 			} else
   1883 				aprint_error_dev(sc->sc_dev,
   1884 				    "WARNING: unable to map I/O space\n");
   1885 		}
   1886 
   1887 	}
   1888 
   1889 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1890 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1891 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1892 	if (sc->sc_type < WM_T_82542_2_1)
   1893 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1894 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1895 
   1896 	/* power up chip */
   1897 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
   1898 	    && error != EOPNOTSUPP) {
   1899 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1900 		return;
   1901 	}
   1902 
   1903 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1904 	/*
   1905 	 *  Don't use MSI-X if we can use only one queue to save interrupt
   1906 	 * resource.
   1907 	 */
   1908 	if (sc->sc_nqueues > 1) {
   1909 		max_type = PCI_INTR_TYPE_MSIX;
   1910 		/*
   1911 		 *  82583 has a MSI-X capability in the PCI configuration space
   1912 		 * but it doesn't support it. At least the document doesn't
   1913 		 * say anything about MSI-X.
   1914 		 */
   1915 		counts[PCI_INTR_TYPE_MSIX]
   1916 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
   1917 	} else {
   1918 		max_type = PCI_INTR_TYPE_MSI;
   1919 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1920 	}
   1921 
   1922 	/* Allocation settings */
   1923 	counts[PCI_INTR_TYPE_MSI] = 1;
   1924 	counts[PCI_INTR_TYPE_INTX] = 1;
   1925 	/* overridden by disable flags */
   1926 	if (wm_disable_msi != 0) {
   1927 		counts[PCI_INTR_TYPE_MSI] = 0;
   1928 		if (wm_disable_msix != 0) {
   1929 			max_type = PCI_INTR_TYPE_INTX;
   1930 			counts[PCI_INTR_TYPE_MSIX] = 0;
   1931 		}
   1932 	} else if (wm_disable_msix != 0) {
   1933 		max_type = PCI_INTR_TYPE_MSI;
   1934 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1935 	}
   1936 
   1937 alloc_retry:
   1938 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1939 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1940 		return;
   1941 	}
   1942 
   1943 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1944 		error = wm_setup_msix(sc);
   1945 		if (error) {
   1946 			pci_intr_release(pc, sc->sc_intrs,
   1947 			    counts[PCI_INTR_TYPE_MSIX]);
   1948 
   1949 			/* Setup for MSI: Disable MSI-X */
   1950 			max_type = PCI_INTR_TYPE_MSI;
   1951 			counts[PCI_INTR_TYPE_MSI] = 1;
   1952 			counts[PCI_INTR_TYPE_INTX] = 1;
   1953 			goto alloc_retry;
   1954 		}
   1955 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1956 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1957 		error = wm_setup_legacy(sc);
   1958 		if (error) {
   1959 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1960 			    counts[PCI_INTR_TYPE_MSI]);
   1961 
   1962 			/* The next try is for INTx: Disable MSI */
   1963 			max_type = PCI_INTR_TYPE_INTX;
   1964 			counts[PCI_INTR_TYPE_INTX] = 1;
   1965 			goto alloc_retry;
   1966 		}
   1967 	} else {
   1968 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1969 		error = wm_setup_legacy(sc);
   1970 		if (error) {
   1971 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1972 			    counts[PCI_INTR_TYPE_INTX]);
   1973 			return;
   1974 		}
   1975 	}
   1976 
   1977 	/*
   1978 	 * Check the function ID (unit number of the chip).
   1979 	 */
   1980 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1981 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
   1982 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1983 	    || (sc->sc_type == WM_T_82580)
   1984 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1985 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1986 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1987 	else
   1988 		sc->sc_funcid = 0;
   1989 
   1990 	/*
   1991 	 * Determine a few things about the bus we're connected to.
   1992 	 */
   1993 	if (sc->sc_type < WM_T_82543) {
   1994 		/* We don't really know the bus characteristics here. */
   1995 		sc->sc_bus_speed = 33;
   1996 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1997 		/*
   1998 		 * CSA (Communication Streaming Architecture) is about as fast
   1999 		 * a 32-bit 66MHz PCI Bus.
   2000 		 */
   2001 		sc->sc_flags |= WM_F_CSA;
   2002 		sc->sc_bus_speed = 66;
   2003 		aprint_verbose_dev(sc->sc_dev,
   2004 		    "Communication Streaming Architecture\n");
   2005 		if (sc->sc_type == WM_T_82547) {
   2006 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   2007 			callout_setfunc(&sc->sc_txfifo_ch,
   2008 			    wm_82547_txfifo_stall, sc);
   2009 			aprint_verbose_dev(sc->sc_dev,
   2010 			    "using 82547 Tx FIFO stall work-around\n");
   2011 		}
   2012 	} else if (sc->sc_type >= WM_T_82571) {
   2013 		sc->sc_flags |= WM_F_PCIE;
   2014 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   2015 		    && (sc->sc_type != WM_T_ICH10)
   2016 		    && (sc->sc_type != WM_T_PCH)
   2017 		    && (sc->sc_type != WM_T_PCH2)
   2018 		    && (sc->sc_type != WM_T_PCH_LPT)
   2019 		    && (sc->sc_type != WM_T_PCH_SPT)
   2020 		    && (sc->sc_type != WM_T_PCH_CNP)) {
   2021 			/* ICH* and PCH* have no PCIe capability registers */
   2022 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2023 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   2024 				NULL) == 0)
   2025 				aprint_error_dev(sc->sc_dev,
   2026 				    "unable to find PCIe capability\n");
   2027 		}
   2028 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   2029 	} else {
   2030 		reg = CSR_READ(sc, WMREG_STATUS);
   2031 		if (reg & STATUS_BUS64)
   2032 			sc->sc_flags |= WM_F_BUS64;
   2033 		if ((reg & STATUS_PCIX_MODE) != 0) {
   2034 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   2035 
   2036 			sc->sc_flags |= WM_F_PCIX;
   2037 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   2038 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   2039 				aprint_error_dev(sc->sc_dev,
   2040 				    "unable to find PCIX capability\n");
   2041 			else if (sc->sc_type != WM_T_82545_3 &&
   2042 				 sc->sc_type != WM_T_82546_3) {
   2043 				/*
   2044 				 * Work around a problem caused by the BIOS
   2045 				 * setting the max memory read byte count
   2046 				 * incorrectly.
   2047 				 */
   2048 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2049 				    sc->sc_pcixe_capoff + PCIX_CMD);
   2050 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   2051 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   2052 
   2053 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   2054 				    PCIX_CMD_BYTECNT_SHIFT;
   2055 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   2056 				    PCIX_STATUS_MAXB_SHIFT;
   2057 				if (bytecnt > maxb) {
   2058 					aprint_verbose_dev(sc->sc_dev,
   2059 					    "resetting PCI-X MMRBC: %d -> %d\n",
   2060 					    512 << bytecnt, 512 << maxb);
   2061 					pcix_cmd = (pcix_cmd &
   2062 					    ~PCIX_CMD_BYTECNT_MASK) |
   2063 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
   2064 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   2065 					    sc->sc_pcixe_capoff + PCIX_CMD,
   2066 					    pcix_cmd);
   2067 				}
   2068 			}
   2069 		}
   2070 		/*
   2071 		 * The quad port adapter is special; it has a PCIX-PCIX
   2072 		 * bridge on the board, and can run the secondary bus at
   2073 		 * a higher speed.
   2074 		 */
   2075 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2076 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2077 								      : 66;
   2078 		} else if (sc->sc_flags & WM_F_PCIX) {
   2079 			switch (reg & STATUS_PCIXSPD_MASK) {
   2080 			case STATUS_PCIXSPD_50_66:
   2081 				sc->sc_bus_speed = 66;
   2082 				break;
   2083 			case STATUS_PCIXSPD_66_100:
   2084 				sc->sc_bus_speed = 100;
   2085 				break;
   2086 			case STATUS_PCIXSPD_100_133:
   2087 				sc->sc_bus_speed = 133;
   2088 				break;
   2089 			default:
   2090 				aprint_error_dev(sc->sc_dev,
   2091 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2092 				    reg & STATUS_PCIXSPD_MASK);
   2093 				sc->sc_bus_speed = 66;
   2094 				break;
   2095 			}
   2096 		} else
   2097 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2098 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2099 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2100 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2101 	}
   2102 
   2103 	/* clear interesting stat counters */
   2104 	CSR_READ(sc, WMREG_COLC);
   2105 	CSR_READ(sc, WMREG_RXERRC);
   2106 
   2107 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2108 	    || (sc->sc_type >= WM_T_ICH8))
   2109 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2110 	if (sc->sc_type >= WM_T_ICH8)
   2111 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2112 
   2113 	/* Set PHY, NVM mutex related stuff */
   2114 	switch (sc->sc_type) {
   2115 	case WM_T_82542_2_0:
   2116 	case WM_T_82542_2_1:
   2117 	case WM_T_82543:
   2118 	case WM_T_82544:
   2119 		/* Microwire */
   2120 		sc->nvm.read = wm_nvm_read_uwire;
   2121 		sc->sc_nvm_wordsize = 64;
   2122 		sc->sc_nvm_addrbits = 6;
   2123 		break;
   2124 	case WM_T_82540:
   2125 	case WM_T_82545:
   2126 	case WM_T_82545_3:
   2127 	case WM_T_82546:
   2128 	case WM_T_82546_3:
   2129 		/* Microwire */
   2130 		sc->nvm.read = wm_nvm_read_uwire;
   2131 		reg = CSR_READ(sc, WMREG_EECD);
   2132 		if (reg & EECD_EE_SIZE) {
   2133 			sc->sc_nvm_wordsize = 256;
   2134 			sc->sc_nvm_addrbits = 8;
   2135 		} else {
   2136 			sc->sc_nvm_wordsize = 64;
   2137 			sc->sc_nvm_addrbits = 6;
   2138 		}
   2139 		sc->sc_flags |= WM_F_LOCK_EECD;
   2140 		sc->nvm.acquire = wm_get_eecd;
   2141 		sc->nvm.release = wm_put_eecd;
   2142 		break;
   2143 	case WM_T_82541:
   2144 	case WM_T_82541_2:
   2145 	case WM_T_82547:
   2146 	case WM_T_82547_2:
   2147 		reg = CSR_READ(sc, WMREG_EECD);
   2148 		/*
   2149 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
   2150 		 * on 8254[17], so set flags and functios before calling it.
   2151 		 */
   2152 		sc->sc_flags |= WM_F_LOCK_EECD;
   2153 		sc->nvm.acquire = wm_get_eecd;
   2154 		sc->nvm.release = wm_put_eecd;
   2155 		if (reg & EECD_EE_TYPE) {
   2156 			/* SPI */
   2157 			sc->nvm.read = wm_nvm_read_spi;
   2158 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2159 			wm_nvm_set_addrbits_size_eecd(sc);
   2160 		} else {
   2161 			/* Microwire */
   2162 			sc->nvm.read = wm_nvm_read_uwire;
   2163 			if ((reg & EECD_EE_ABITS) != 0) {
   2164 				sc->sc_nvm_wordsize = 256;
   2165 				sc->sc_nvm_addrbits = 8;
   2166 			} else {
   2167 				sc->sc_nvm_wordsize = 64;
   2168 				sc->sc_nvm_addrbits = 6;
   2169 			}
   2170 		}
   2171 		break;
   2172 	case WM_T_82571:
   2173 	case WM_T_82572:
   2174 		/* SPI */
   2175 		sc->nvm.read = wm_nvm_read_eerd;
   2176 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2177 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2178 		wm_nvm_set_addrbits_size_eecd(sc);
   2179 		sc->phy.acquire = wm_get_swsm_semaphore;
   2180 		sc->phy.release = wm_put_swsm_semaphore;
   2181 		sc->nvm.acquire = wm_get_nvm_82571;
   2182 		sc->nvm.release = wm_put_nvm_82571;
   2183 		break;
   2184 	case WM_T_82573:
   2185 	case WM_T_82574:
   2186 	case WM_T_82583:
   2187 		sc->nvm.read = wm_nvm_read_eerd;
   2188 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2189 		if (sc->sc_type == WM_T_82573) {
   2190 			sc->phy.acquire = wm_get_swsm_semaphore;
   2191 			sc->phy.release = wm_put_swsm_semaphore;
   2192 			sc->nvm.acquire = wm_get_nvm_82571;
   2193 			sc->nvm.release = wm_put_nvm_82571;
   2194 		} else {
   2195 			/* Both PHY and NVM use the same semaphore. */
   2196 			sc->phy.acquire = sc->nvm.acquire
   2197 			    = wm_get_swfwhw_semaphore;
   2198 			sc->phy.release = sc->nvm.release
   2199 			    = wm_put_swfwhw_semaphore;
   2200 		}
   2201 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2202 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2203 			sc->sc_nvm_wordsize = 2048;
   2204 		} else {
   2205 			/* SPI */
   2206 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2207 			wm_nvm_set_addrbits_size_eecd(sc);
   2208 		}
   2209 		break;
   2210 	case WM_T_82575:
   2211 	case WM_T_82576:
   2212 	case WM_T_82580:
   2213 	case WM_T_I350:
   2214 	case WM_T_I354:
   2215 	case WM_T_80003:
   2216 		/* SPI */
   2217 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2218 		wm_nvm_set_addrbits_size_eecd(sc);
   2219 		if ((sc->sc_type == WM_T_80003)
   2220 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2221 			sc->nvm.read = wm_nvm_read_eerd;
   2222 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2223 		} else {
   2224 			sc->nvm.read = wm_nvm_read_spi;
   2225 			sc->sc_flags |= WM_F_LOCK_EECD;
   2226 		}
   2227 		sc->phy.acquire = wm_get_phy_82575;
   2228 		sc->phy.release = wm_put_phy_82575;
   2229 		sc->nvm.acquire = wm_get_nvm_80003;
   2230 		sc->nvm.release = wm_put_nvm_80003;
   2231 		break;
   2232 	case WM_T_ICH8:
   2233 	case WM_T_ICH9:
   2234 	case WM_T_ICH10:
   2235 	case WM_T_PCH:
   2236 	case WM_T_PCH2:
   2237 	case WM_T_PCH_LPT:
   2238 		sc->nvm.read = wm_nvm_read_ich8;
   2239 		/* FLASH */
   2240 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2241 		sc->sc_nvm_wordsize = 2048;
   2242 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2243 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2244 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2245 			aprint_error_dev(sc->sc_dev,
   2246 			    "can't map FLASH registers\n");
   2247 			goto out;
   2248 		}
   2249 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2250 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2251 		    ICH_FLASH_SECTOR_SIZE;
   2252 		sc->sc_ich8_flash_bank_size =
   2253 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2254 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2255 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2256 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2257 		sc->sc_flashreg_offset = 0;
   2258 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2259 		sc->phy.release = wm_put_swflag_ich8lan;
   2260 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2261 		sc->nvm.release = wm_put_nvm_ich8lan;
   2262 		break;
   2263 	case WM_T_PCH_SPT:
   2264 	case WM_T_PCH_CNP:
   2265 		sc->nvm.read = wm_nvm_read_spt;
   2266 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2267 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2268 		sc->sc_flasht = sc->sc_st;
   2269 		sc->sc_flashh = sc->sc_sh;
   2270 		sc->sc_ich8_flash_base = 0;
   2271 		sc->sc_nvm_wordsize =
   2272 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2273 		    * NVM_SIZE_MULTIPLIER;
   2274 		/* It is size in bytes, we want words */
   2275 		sc->sc_nvm_wordsize /= 2;
   2276 		/* assume 2 banks */
   2277 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2278 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2279 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2280 		sc->phy.release = wm_put_swflag_ich8lan;
   2281 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2282 		sc->nvm.release = wm_put_nvm_ich8lan;
   2283 		break;
   2284 	case WM_T_I210:
   2285 	case WM_T_I211:
   2286 		/* Allow a single clear of the SW semaphore on I210 and newer*/
   2287 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
   2288 		if (wm_nvm_flash_presence_i210(sc)) {
   2289 			sc->nvm.read = wm_nvm_read_eerd;
   2290 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2291 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2292 			wm_nvm_set_addrbits_size_eecd(sc);
   2293 		} else {
   2294 			sc->nvm.read = wm_nvm_read_invm;
   2295 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2296 			sc->sc_nvm_wordsize = INVM_SIZE;
   2297 		}
   2298 		sc->phy.acquire = wm_get_phy_82575;
   2299 		sc->phy.release = wm_put_phy_82575;
   2300 		sc->nvm.acquire = wm_get_nvm_80003;
   2301 		sc->nvm.release = wm_put_nvm_80003;
   2302 		break;
   2303 	default:
   2304 		break;
   2305 	}
   2306 
   2307 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2308 	switch (sc->sc_type) {
   2309 	case WM_T_82571:
   2310 	case WM_T_82572:
   2311 		reg = CSR_READ(sc, WMREG_SWSM2);
   2312 		if ((reg & SWSM2_LOCK) == 0) {
   2313 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2314 			force_clear_smbi = true;
   2315 		} else
   2316 			force_clear_smbi = false;
   2317 		break;
   2318 	case WM_T_82573:
   2319 	case WM_T_82574:
   2320 	case WM_T_82583:
   2321 		force_clear_smbi = true;
   2322 		break;
   2323 	default:
   2324 		force_clear_smbi = false;
   2325 		break;
   2326 	}
   2327 	if (force_clear_smbi) {
   2328 		reg = CSR_READ(sc, WMREG_SWSM);
   2329 		if ((reg & SWSM_SMBI) != 0)
   2330 			aprint_error_dev(sc->sc_dev,
   2331 			    "Please update the Bootagent\n");
   2332 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2333 	}
   2334 
   2335 	/*
   2336 	 * Defer printing the EEPROM type until after verifying the checksum
   2337 	 * This allows the EEPROM type to be printed correctly in the case
   2338 	 * that no EEPROM is attached.
   2339 	 */
   2340 	/*
   2341 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2342 	 * this for later, so we can fail future reads from the EEPROM.
   2343 	 */
   2344 	if (wm_nvm_validate_checksum(sc)) {
   2345 		/*
   2346 		 * Read twice again because some PCI-e parts fail the
   2347 		 * first check due to the link being in sleep state.
   2348 		 */
   2349 		if (wm_nvm_validate_checksum(sc))
   2350 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2351 	}
   2352 
   2353 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2354 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2355 	else {
   2356 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2357 		    sc->sc_nvm_wordsize);
   2358 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2359 			aprint_verbose("iNVM");
   2360 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2361 			aprint_verbose("FLASH(HW)");
   2362 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2363 			aprint_verbose("FLASH");
   2364 		else {
   2365 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2366 				eetype = "SPI";
   2367 			else
   2368 				eetype = "MicroWire";
   2369 			aprint_verbose("(%d address bits) %s EEPROM",
   2370 			    sc->sc_nvm_addrbits, eetype);
   2371 		}
   2372 	}
   2373 	wm_nvm_version(sc);
   2374 	aprint_verbose("\n");
   2375 
   2376 	/*
   2377 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2378 	 * incorrect.
   2379 	 */
   2380 	wm_gmii_setup_phytype(sc, 0, 0);
   2381 
   2382 	/* Reset the chip to a known state. */
   2383 	wm_reset(sc);
   2384 
   2385 	/*
   2386 	 * Check for I21[01] PLL workaround.
   2387 	 *
   2388 	 * Three cases:
   2389 	 * a) Chip is I211.
   2390 	 * b) Chip is I210 and it uses INVM (not FLASH).
   2391 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
   2392 	 */
   2393 	if (sc->sc_type == WM_T_I211)
   2394 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2395 	if (sc->sc_type == WM_T_I210) {
   2396 		if (!wm_nvm_flash_presence_i210(sc))
   2397 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2398 		else if ((sc->sc_nvm_ver_major < 3)
   2399 		    || ((sc->sc_nvm_ver_major == 3)
   2400 			&& (sc->sc_nvm_ver_minor < 25))) {
   2401 			aprint_verbose_dev(sc->sc_dev,
   2402 			    "ROM image version %d.%d is older than 3.25\n",
   2403 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2404 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2405 		}
   2406 	}
   2407 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2408 		wm_pll_workaround_i210(sc);
   2409 
   2410 	wm_get_wakeup(sc);
   2411 
   2412 	/* Non-AMT based hardware can now take control from firmware */
   2413 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2414 		wm_get_hw_control(sc);
   2415 
   2416 	/*
   2417 	 * Read the Ethernet address from the EEPROM, if not first found
   2418 	 * in device properties.
   2419 	 */
   2420 	ea = prop_dictionary_get(dict, "mac-address");
   2421 	if (ea != NULL) {
   2422 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2423 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2424 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2425 	} else {
   2426 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2427 			aprint_error_dev(sc->sc_dev,
   2428 			    "unable to read Ethernet address\n");
   2429 			goto out;
   2430 		}
   2431 	}
   2432 
   2433 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2434 	    ether_sprintf(enaddr));
   2435 
   2436 	/*
   2437 	 * Read the config info from the EEPROM, and set up various
   2438 	 * bits in the control registers based on their contents.
   2439 	 */
   2440 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2441 	if (pn != NULL) {
   2442 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2443 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2444 	} else {
   2445 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2446 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2447 			goto out;
   2448 		}
   2449 	}
   2450 
   2451 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2452 	if (pn != NULL) {
   2453 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2454 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2455 	} else {
   2456 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2457 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2458 			goto out;
   2459 		}
   2460 	}
   2461 
   2462 	/* check for WM_F_WOL */
   2463 	switch (sc->sc_type) {
   2464 	case WM_T_82542_2_0:
   2465 	case WM_T_82542_2_1:
   2466 	case WM_T_82543:
   2467 		/* dummy? */
   2468 		eeprom_data = 0;
   2469 		apme_mask = NVM_CFG3_APME;
   2470 		break;
   2471 	case WM_T_82544:
   2472 		apme_mask = NVM_CFG2_82544_APM_EN;
   2473 		eeprom_data = cfg2;
   2474 		break;
   2475 	case WM_T_82546:
   2476 	case WM_T_82546_3:
   2477 	case WM_T_82571:
   2478 	case WM_T_82572:
   2479 	case WM_T_82573:
   2480 	case WM_T_82574:
   2481 	case WM_T_82583:
   2482 	case WM_T_80003:
   2483 	default:
   2484 		apme_mask = NVM_CFG3_APME;
   2485 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2486 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2487 		break;
   2488 	case WM_T_82575:
   2489 	case WM_T_82576:
   2490 	case WM_T_82580:
   2491 	case WM_T_I350:
   2492 	case WM_T_I354: /* XXX ok? */
   2493 	case WM_T_ICH8:
   2494 	case WM_T_ICH9:
   2495 	case WM_T_ICH10:
   2496 	case WM_T_PCH:
   2497 	case WM_T_PCH2:
   2498 	case WM_T_PCH_LPT:
   2499 	case WM_T_PCH_SPT:
   2500 	case WM_T_PCH_CNP:
   2501 		/* XXX The funcid should be checked on some devices */
   2502 		apme_mask = WUC_APME;
   2503 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2504 		break;
   2505 	}
   2506 
   2507 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2508 	if ((eeprom_data & apme_mask) != 0)
   2509 		sc->sc_flags |= WM_F_WOL;
   2510 
   2511 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2512 		/* Check NVM for autonegotiation */
   2513 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2514 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2515 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2516 		}
   2517 	}
   2518 
   2519 	/*
   2520 	 * XXX need special handling for some multiple port cards
   2521 	 * to disable a paticular port.
   2522 	 */
   2523 
   2524 	if (sc->sc_type >= WM_T_82544) {
   2525 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2526 		if (pn != NULL) {
   2527 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2528 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2529 		} else {
   2530 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2531 				aprint_error_dev(sc->sc_dev,
   2532 				    "unable to read SWDPIN\n");
   2533 				goto out;
   2534 			}
   2535 		}
   2536 	}
   2537 
   2538 	if (cfg1 & NVM_CFG1_ILOS)
   2539 		sc->sc_ctrl |= CTRL_ILOS;
   2540 
   2541 	/*
   2542 	 * XXX
   2543 	 * This code isn't correct because pin 2 and 3 are located
   2544 	 * in different position on newer chips. Check all datasheet.
   2545 	 *
   2546 	 * Until resolve this problem, check if a chip < 82580
   2547 	 */
   2548 	if (sc->sc_type <= WM_T_82580) {
   2549 		if (sc->sc_type >= WM_T_82544) {
   2550 			sc->sc_ctrl |=
   2551 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2552 			    CTRL_SWDPIO_SHIFT;
   2553 			sc->sc_ctrl |=
   2554 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2555 			    CTRL_SWDPINS_SHIFT;
   2556 		} else {
   2557 			sc->sc_ctrl |=
   2558 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2559 			    CTRL_SWDPIO_SHIFT;
   2560 		}
   2561 	}
   2562 
   2563 	/* XXX For other than 82580? */
   2564 	if (sc->sc_type == WM_T_82580) {
   2565 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2566 		if (nvmword & __BIT(13))
   2567 			sc->sc_ctrl |= CTRL_ILOS;
   2568 	}
   2569 
   2570 #if 0
   2571 	if (sc->sc_type >= WM_T_82544) {
   2572 		if (cfg1 & NVM_CFG1_IPS0)
   2573 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2574 		if (cfg1 & NVM_CFG1_IPS1)
   2575 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2576 		sc->sc_ctrl_ext |=
   2577 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2578 		    CTRL_EXT_SWDPIO_SHIFT;
   2579 		sc->sc_ctrl_ext |=
   2580 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2581 		    CTRL_EXT_SWDPINS_SHIFT;
   2582 	} else {
   2583 		sc->sc_ctrl_ext |=
   2584 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2585 		    CTRL_EXT_SWDPIO_SHIFT;
   2586 	}
   2587 #endif
   2588 
   2589 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2590 #if 0
   2591 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2592 #endif
   2593 
   2594 	if (sc->sc_type == WM_T_PCH) {
   2595 		uint16_t val;
   2596 
   2597 		/* Save the NVM K1 bit setting */
   2598 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2599 
   2600 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2601 			sc->sc_nvm_k1_enabled = 1;
   2602 		else
   2603 			sc->sc_nvm_k1_enabled = 0;
   2604 	}
   2605 
   2606 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2607 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2608 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2609 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2610 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
   2611 	    || sc->sc_type == WM_T_82573
   2612 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2613 		/* Copper only */
   2614 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2615 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2616 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2617 	    || (sc->sc_type ==WM_T_I211)) {
   2618 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2619 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2620 		switch (link_mode) {
   2621 		case CTRL_EXT_LINK_MODE_1000KX:
   2622 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2623 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2624 			break;
   2625 		case CTRL_EXT_LINK_MODE_SGMII:
   2626 			if (wm_sgmii_uses_mdio(sc)) {
   2627 				aprint_verbose_dev(sc->sc_dev,
   2628 				    "SGMII(MDIO)\n");
   2629 				sc->sc_flags |= WM_F_SGMII;
   2630 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2631 				break;
   2632 			}
   2633 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2634 			/*FALLTHROUGH*/
   2635 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2636 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2637 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2638 				if (link_mode
   2639 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2640 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2641 					sc->sc_flags |= WM_F_SGMII;
   2642 				} else {
   2643 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2644 					aprint_verbose_dev(sc->sc_dev,
   2645 					    "SERDES\n");
   2646 				}
   2647 				break;
   2648 			}
   2649 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2650 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2651 
   2652 			/* Change current link mode setting */
   2653 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2654 			switch (sc->sc_mediatype) {
   2655 			case WM_MEDIATYPE_COPPER:
   2656 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2657 				break;
   2658 			case WM_MEDIATYPE_SERDES:
   2659 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2660 				break;
   2661 			default:
   2662 				break;
   2663 			}
   2664 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2665 			break;
   2666 		case CTRL_EXT_LINK_MODE_GMII:
   2667 		default:
   2668 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2669 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2670 			break;
   2671 		}
   2672 
   2673 		reg &= ~CTRL_EXT_I2C_ENA;
   2674 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2675 			reg |= CTRL_EXT_I2C_ENA;
   2676 		else
   2677 			reg &= ~CTRL_EXT_I2C_ENA;
   2678 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2679 	} else if (sc->sc_type < WM_T_82543 ||
   2680 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2681 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2682 			aprint_error_dev(sc->sc_dev,
   2683 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2684 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2685 		}
   2686 	} else {
   2687 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2688 			aprint_error_dev(sc->sc_dev,
   2689 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2690 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2691 		}
   2692 	}
   2693 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2694 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2695 
   2696 	/* Set device properties (macflags) */
   2697 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2698 
   2699 	/* Initialize the media structures accordingly. */
   2700 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2701 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2702 	else
   2703 		wm_tbi_mediainit(sc); /* All others */
   2704 
   2705 	ifp = &sc->sc_ethercom.ec_if;
   2706 	xname = device_xname(sc->sc_dev);
   2707 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2708 	ifp->if_softc = sc;
   2709 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2710 #ifdef WM_MPSAFE
   2711 	ifp->if_extflags = IFEF_MPSAFE;
   2712 #endif
   2713 	ifp->if_ioctl = wm_ioctl;
   2714 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2715 		ifp->if_start = wm_nq_start;
   2716 		/*
   2717 		 * When the number of CPUs is one and the controller can use
   2718 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2719 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2720 		 * and the other is used for link status changing.
   2721 		 * In this situation, wm_nq_transmit() is disadvantageous
   2722 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2723 		 */
   2724 		if (wm_is_using_multiqueue(sc))
   2725 			ifp->if_transmit = wm_nq_transmit;
   2726 	} else {
   2727 		ifp->if_start = wm_start;
   2728 		/*
   2729 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2730 		 */
   2731 		if (wm_is_using_multiqueue(sc))
   2732 			ifp->if_transmit = wm_transmit;
   2733 	}
   2734 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
   2735 	ifp->if_init = wm_init;
   2736 	ifp->if_stop = wm_stop;
   2737 	IFQ_SET_MAXLEN(&ifp->if_snd, uimax(WM_IFQUEUELEN, IFQ_MAXLEN));
   2738 	IFQ_SET_READY(&ifp->if_snd);
   2739 
   2740 	/* Check for jumbo frame */
   2741 	switch (sc->sc_type) {
   2742 	case WM_T_82573:
   2743 		/* XXX limited to 9234 if ASPM is disabled */
   2744 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2745 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2746 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2747 		break;
   2748 	case WM_T_82571:
   2749 	case WM_T_82572:
   2750 	case WM_T_82574:
   2751 	case WM_T_82583:
   2752 	case WM_T_82575:
   2753 	case WM_T_82576:
   2754 	case WM_T_82580:
   2755 	case WM_T_I350:
   2756 	case WM_T_I354:
   2757 	case WM_T_I210:
   2758 	case WM_T_I211:
   2759 	case WM_T_80003:
   2760 	case WM_T_ICH9:
   2761 	case WM_T_ICH10:
   2762 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2763 	case WM_T_PCH_LPT:
   2764 	case WM_T_PCH_SPT:
   2765 	case WM_T_PCH_CNP:
   2766 		/* XXX limited to 9234 */
   2767 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2768 		break;
   2769 	case WM_T_PCH:
   2770 		/* XXX limited to 4096 */
   2771 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2772 		break;
   2773 	case WM_T_82542_2_0:
   2774 	case WM_T_82542_2_1:
   2775 	case WM_T_ICH8:
   2776 		/* No support for jumbo frame */
   2777 		break;
   2778 	default:
   2779 		/* ETHER_MAX_LEN_JUMBO */
   2780 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2781 		break;
   2782 	}
   2783 
   2784 	/* If we're a i82543 or greater, we can support VLANs. */
   2785 	if (sc->sc_type >= WM_T_82543)
   2786 		sc->sc_ethercom.ec_capabilities |=
   2787 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2788 
   2789 	/*
   2790 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2791 	 * on i82543 and later.
   2792 	 */
   2793 	if (sc->sc_type >= WM_T_82543) {
   2794 		ifp->if_capabilities |=
   2795 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2796 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2797 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2798 		    IFCAP_CSUM_TCPv6_Tx |
   2799 		    IFCAP_CSUM_UDPv6_Tx;
   2800 	}
   2801 
   2802 	/*
   2803 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2804 	 *
   2805 	 *	82541GI (8086:1076) ... no
   2806 	 *	82572EI (8086:10b9) ... yes
   2807 	 */
   2808 	if (sc->sc_type >= WM_T_82571) {
   2809 		ifp->if_capabilities |=
   2810 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2811 	}
   2812 
   2813 	/*
   2814 	 * If we're a i82544 or greater (except i82547), we can do
   2815 	 * TCP segmentation offload.
   2816 	 */
   2817 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2818 		ifp->if_capabilities |= IFCAP_TSOv4;
   2819 	}
   2820 
   2821 	if (sc->sc_type >= WM_T_82571) {
   2822 		ifp->if_capabilities |= IFCAP_TSOv6;
   2823 	}
   2824 
   2825 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
   2826 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
   2827 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2828 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2829 
   2830 #ifdef WM_MPSAFE
   2831 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2832 #else
   2833 	sc->sc_core_lock = NULL;
   2834 #endif
   2835 
   2836 	/* Attach the interface. */
   2837 	error = if_initialize(ifp);
   2838 	if (error != 0) {
   2839 		aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
   2840 		    error);
   2841 		return; /* Error */
   2842 	}
   2843 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2844 	ether_ifattach(ifp, enaddr);
   2845 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2846 	if_register(ifp);
   2847 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2848 	    RND_FLAG_DEFAULT);
   2849 
   2850 #ifdef WM_EVENT_COUNTERS
   2851 	/* Attach event counters. */
   2852 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2853 	    NULL, xname, "linkintr");
   2854 
   2855 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2856 	    NULL, xname, "tx_xoff");
   2857 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2858 	    NULL, xname, "tx_xon");
   2859 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2860 	    NULL, xname, "rx_xoff");
   2861 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2862 	    NULL, xname, "rx_xon");
   2863 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2864 	    NULL, xname, "rx_macctl");
   2865 #endif /* WM_EVENT_COUNTERS */
   2866 
   2867 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2868 		pmf_class_network_register(self, ifp);
   2869 	else
   2870 		aprint_error_dev(self, "couldn't establish power handler\n");
   2871 
   2872 	sc->sc_flags |= WM_F_ATTACHED;
   2873  out:
   2874 	return;
   2875 }
   2876 
   2877 /* The detach function (ca_detach) */
   2878 static int
   2879 wm_detach(device_t self, int flags __unused)
   2880 {
   2881 	struct wm_softc *sc = device_private(self);
   2882 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2883 	int i;
   2884 
   2885 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2886 		return 0;
   2887 
   2888 	/* Stop the interface. Callouts are stopped in it. */
   2889 	wm_stop(ifp, 1);
   2890 
   2891 	pmf_device_deregister(self);
   2892 
   2893 #ifdef WM_EVENT_COUNTERS
   2894 	evcnt_detach(&sc->sc_ev_linkintr);
   2895 
   2896 	evcnt_detach(&sc->sc_ev_tx_xoff);
   2897 	evcnt_detach(&sc->sc_ev_tx_xon);
   2898 	evcnt_detach(&sc->sc_ev_rx_xoff);
   2899 	evcnt_detach(&sc->sc_ev_rx_xon);
   2900 	evcnt_detach(&sc->sc_ev_rx_macctl);
   2901 #endif /* WM_EVENT_COUNTERS */
   2902 
   2903 	/* Tell the firmware about the release */
   2904 	WM_CORE_LOCK(sc);
   2905 	wm_release_manageability(sc);
   2906 	wm_release_hw_control(sc);
   2907 	wm_enable_wakeup(sc);
   2908 	WM_CORE_UNLOCK(sc);
   2909 
   2910 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2911 
   2912 	/* Delete all remaining media. */
   2913 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2914 
   2915 	ether_ifdetach(ifp);
   2916 	if_detach(ifp);
   2917 	if_percpuq_destroy(sc->sc_ipq);
   2918 
   2919 	/* Unload RX dmamaps and free mbufs */
   2920 	for (i = 0; i < sc->sc_nqueues; i++) {
   2921 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2922 		mutex_enter(rxq->rxq_lock);
   2923 		wm_rxdrain(rxq);
   2924 		mutex_exit(rxq->rxq_lock);
   2925 	}
   2926 	/* Must unlock here */
   2927 
   2928 	/* Disestablish the interrupt handler */
   2929 	for (i = 0; i < sc->sc_nintrs; i++) {
   2930 		if (sc->sc_ihs[i] != NULL) {
   2931 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2932 			sc->sc_ihs[i] = NULL;
   2933 		}
   2934 	}
   2935 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2936 
   2937 	wm_free_txrx_queues(sc);
   2938 
   2939 	/* Unmap the registers */
   2940 	if (sc->sc_ss) {
   2941 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2942 		sc->sc_ss = 0;
   2943 	}
   2944 	if (sc->sc_ios) {
   2945 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2946 		sc->sc_ios = 0;
   2947 	}
   2948 	if (sc->sc_flashs) {
   2949 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2950 		sc->sc_flashs = 0;
   2951 	}
   2952 
   2953 	if (sc->sc_core_lock)
   2954 		mutex_obj_free(sc->sc_core_lock);
   2955 	if (sc->sc_ich_phymtx)
   2956 		mutex_obj_free(sc->sc_ich_phymtx);
   2957 	if (sc->sc_ich_nvmmtx)
   2958 		mutex_obj_free(sc->sc_ich_nvmmtx);
   2959 
   2960 	return 0;
   2961 }
   2962 
   2963 static bool
   2964 wm_suspend(device_t self, const pmf_qual_t *qual)
   2965 {
   2966 	struct wm_softc *sc = device_private(self);
   2967 
   2968 	wm_release_manageability(sc);
   2969 	wm_release_hw_control(sc);
   2970 	wm_enable_wakeup(sc);
   2971 
   2972 	return true;
   2973 }
   2974 
   2975 static bool
   2976 wm_resume(device_t self, const pmf_qual_t *qual)
   2977 {
   2978 	struct wm_softc *sc = device_private(self);
   2979 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2980 
   2981 	if (sc->sc_type >= WM_T_PCH2)
   2982 		wm_resume_workarounds_pchlan(sc);
   2983 	if ((ifp->if_flags & IFF_UP) == 0) {
   2984 		wm_reset(sc);
   2985 		/* Non-AMT based hardware can now take control from firmware */
   2986 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2987 			wm_get_hw_control(sc);
   2988 		wm_init_manageability(sc);
   2989 	} else {
   2990 		/*
   2991 		 * We called pmf_class_network_register(), so if_init() is
   2992 		 * automatically called when IFF_UP. wm_reset(),
   2993 		 * wm_get_hw_control() and wm_init_manageability() are called
   2994 		 * via wm_init().
   2995 		 */
   2996 	}
   2997 
   2998 	return true;
   2999 }
   3000 
   3001 /*
   3002  * wm_watchdog:		[ifnet interface function]
   3003  *
   3004  *	Watchdog timer handler.
   3005  */
   3006 static void
   3007 wm_watchdog(struct ifnet *ifp)
   3008 {
   3009 	int qid;
   3010 	struct wm_softc *sc = ifp->if_softc;
   3011 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
   3012 
   3013 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   3014 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   3015 
   3016 		wm_watchdog_txq(ifp, txq, &hang_queue);
   3017 	}
   3018 
   3019 	/*
   3020 	 * IF any of queues hanged up, reset the interface.
   3021 	 */
   3022 	if (hang_queue != 0) {
   3023 		(void) wm_init(ifp);
   3024 
   3025 		/*
   3026 		 * There are still some upper layer processing which call
   3027 		 * ifp->if_start(). e.g. ALTQ or one CPU system
   3028 		 */
   3029 		/* Try to get more packets going. */
   3030 		ifp->if_start(ifp);
   3031 	}
   3032 }
   3033 
   3034 
   3035 static void
   3036 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
   3037 {
   3038 
   3039 	mutex_enter(txq->txq_lock);
   3040 	if (txq->txq_sending &&
   3041 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout) {
   3042 		wm_watchdog_txq_locked(ifp, txq, hang);
   3043 	}
   3044 	mutex_exit(txq->txq_lock);
   3045 }
   3046 
   3047 static void
   3048 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   3049     uint16_t *hang)
   3050 {
   3051 	struct wm_softc *sc = ifp->if_softc;
   3052 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   3053 
   3054 	KASSERT(mutex_owned(txq->txq_lock));
   3055 
   3056 	/*
   3057 	 * Since we're using delayed interrupts, sweep up
   3058 	 * before we report an error.
   3059 	 */
   3060 	wm_txeof(txq, UINT_MAX);
   3061 
   3062 	if (txq->txq_sending)
   3063 		*hang |= __BIT(wmq->wmq_id);
   3064 
   3065 	if (txq->txq_free == WM_NTXDESC(txq)) {
   3066 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
   3067 		    device_xname(sc->sc_dev));
   3068 	} else {
   3069 #ifdef WM_DEBUG
   3070 		int i, j;
   3071 		struct wm_txsoft *txs;
   3072 #endif
   3073 		log(LOG_ERR,
   3074 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   3075 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   3076 		    txq->txq_next);
   3077 		ifp->if_oerrors++;
   3078 #ifdef WM_DEBUG
   3079 		for (i = txq->txq_sdirty; i != txq->txq_snext;
   3080 		    i = WM_NEXTTXS(txq, i)) {
   3081 		    txs = &txq->txq_soft[i];
   3082 		    printf("txs %d tx %d -> %d\n",
   3083 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   3084 		    for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
   3085 			    if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3086 				    printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3087 					txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   3088 				    printf("\t %#08x%08x\n",
   3089 					txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   3090 					txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   3091 			    } else {
   3092 				    printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   3093 					(uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
   3094 					txq->txq_descs[j].wtx_addr.wa_low);
   3095 				    printf("\t %#04x%02x%02x%08x\n",
   3096 					txq->txq_descs[j].wtx_fields.wtxu_vlan,
   3097 					txq->txq_descs[j].wtx_fields.wtxu_options,
   3098 					txq->txq_descs[j].wtx_fields.wtxu_status,
   3099 					txq->txq_descs[j].wtx_cmdlen);
   3100 			    }
   3101 			if (j == txs->txs_lastdesc)
   3102 				break;
   3103 			}
   3104 		}
   3105 #endif
   3106 	}
   3107 }
   3108 
   3109 /*
   3110  * wm_tick:
   3111  *
   3112  *	One second timer, used to check link status, sweep up
   3113  *	completed transmit jobs, etc.
   3114  */
   3115 static void
   3116 wm_tick(void *arg)
   3117 {
   3118 	struct wm_softc *sc = arg;
   3119 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3120 #ifndef WM_MPSAFE
   3121 	int s = splnet();
   3122 #endif
   3123 
   3124 	WM_CORE_LOCK(sc);
   3125 
   3126 	if (sc->sc_core_stopping) {
   3127 		WM_CORE_UNLOCK(sc);
   3128 #ifndef WM_MPSAFE
   3129 		splx(s);
   3130 #endif
   3131 		return;
   3132 	}
   3133 
   3134 	if (sc->sc_type >= WM_T_82542_2_1) {
   3135 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   3136 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   3137 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   3138 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   3139 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   3140 	}
   3141 
   3142 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   3143 	ifp->if_ierrors += 0ULL /* ensure quad_t */
   3144 	    + CSR_READ(sc, WMREG_CRCERRS)
   3145 	    + CSR_READ(sc, WMREG_ALGNERRC)
   3146 	    + CSR_READ(sc, WMREG_SYMERRC)
   3147 	    + CSR_READ(sc, WMREG_RXERRC)
   3148 	    + CSR_READ(sc, WMREG_SEC)
   3149 	    + CSR_READ(sc, WMREG_CEXTERR)
   3150 	    + CSR_READ(sc, WMREG_RLEC);
   3151 	/*
   3152 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3153 	 * memory. It does not mean the number of dropped packet. Because
   3154 	 * ethernet controller can receive packets in such case if there is
   3155 	 * space in phy's FIFO.
   3156 	 *
   3157 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3158 	 * own EVCNT instead of if_iqdrops.
   3159 	 */
   3160 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   3161 
   3162 	if (sc->sc_flags & WM_F_HAS_MII)
   3163 		mii_tick(&sc->sc_mii);
   3164 	else if ((sc->sc_type >= WM_T_82575)
   3165 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3166 		wm_serdes_tick(sc);
   3167 	else
   3168 		wm_tbi_tick(sc);
   3169 
   3170 	WM_CORE_UNLOCK(sc);
   3171 
   3172 	wm_watchdog(ifp);
   3173 
   3174 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   3175 }
   3176 
   3177 static int
   3178 wm_ifflags_cb(struct ethercom *ec)
   3179 {
   3180 	struct ifnet *ifp = &ec->ec_if;
   3181 	struct wm_softc *sc = ifp->if_softc;
   3182 	int rc = 0;
   3183 
   3184 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3185 		device_xname(sc->sc_dev), __func__));
   3186 
   3187 	WM_CORE_LOCK(sc);
   3188 
   3189 	int change = ifp->if_flags ^ sc->sc_if_flags;
   3190 	sc->sc_if_flags = ifp->if_flags;
   3191 
   3192 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3193 		rc = ENETRESET;
   3194 		goto out;
   3195 	}
   3196 
   3197 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   3198 		wm_set_filter(sc);
   3199 
   3200 	wm_set_vlan(sc);
   3201 
   3202 out:
   3203 	WM_CORE_UNLOCK(sc);
   3204 
   3205 	return rc;
   3206 }
   3207 
   3208 /*
   3209  * wm_ioctl:		[ifnet interface function]
   3210  *
   3211  *	Handle control requests from the operator.
   3212  */
   3213 static int
   3214 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3215 {
   3216 	struct wm_softc *sc = ifp->if_softc;
   3217 	struct ifreq *ifr = (struct ifreq *) data;
   3218 	struct ifaddr *ifa = (struct ifaddr *)data;
   3219 	struct sockaddr_dl *sdl;
   3220 	int s, error;
   3221 
   3222 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3223 		device_xname(sc->sc_dev), __func__));
   3224 
   3225 #ifndef WM_MPSAFE
   3226 	s = splnet();
   3227 #endif
   3228 	switch (cmd) {
   3229 	case SIOCSIFMEDIA:
   3230 	case SIOCGIFMEDIA:
   3231 		WM_CORE_LOCK(sc);
   3232 		/* Flow control requires full-duplex mode. */
   3233 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3234 		    (ifr->ifr_media & IFM_FDX) == 0)
   3235 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3236 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3237 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3238 				/* We can do both TXPAUSE and RXPAUSE. */
   3239 				ifr->ifr_media |=
   3240 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3241 			}
   3242 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3243 		}
   3244 		WM_CORE_UNLOCK(sc);
   3245 #ifdef WM_MPSAFE
   3246 		s = splnet();
   3247 #endif
   3248 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3249 #ifdef WM_MPSAFE
   3250 		splx(s);
   3251 #endif
   3252 		break;
   3253 	case SIOCINITIFADDR:
   3254 		WM_CORE_LOCK(sc);
   3255 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3256 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3257 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3258 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3259 			/* unicast address is first multicast entry */
   3260 			wm_set_filter(sc);
   3261 			error = 0;
   3262 			WM_CORE_UNLOCK(sc);
   3263 			break;
   3264 		}
   3265 		WM_CORE_UNLOCK(sc);
   3266 		/*FALLTHROUGH*/
   3267 	default:
   3268 #ifdef WM_MPSAFE
   3269 		s = splnet();
   3270 #endif
   3271 		/* It may call wm_start, so unlock here */
   3272 		error = ether_ioctl(ifp, cmd, data);
   3273 #ifdef WM_MPSAFE
   3274 		splx(s);
   3275 #endif
   3276 		if (error != ENETRESET)
   3277 			break;
   3278 
   3279 		error = 0;
   3280 
   3281 		if (cmd == SIOCSIFCAP)
   3282 			error = (*ifp->if_init)(ifp);
   3283 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3284 			;
   3285 		else if (ifp->if_flags & IFF_RUNNING) {
   3286 			/*
   3287 			 * Multicast list has changed; set the hardware filter
   3288 			 * accordingly.
   3289 			 */
   3290 			WM_CORE_LOCK(sc);
   3291 			wm_set_filter(sc);
   3292 			WM_CORE_UNLOCK(sc);
   3293 		}
   3294 		break;
   3295 	}
   3296 
   3297 #ifndef WM_MPSAFE
   3298 	splx(s);
   3299 #endif
   3300 	return error;
   3301 }
   3302 
   3303 /* MAC address related */
   3304 
   3305 /*
   3306  * Get the offset of MAC address and return it.
   3307  * If error occured, use offset 0.
   3308  */
   3309 static uint16_t
   3310 wm_check_alt_mac_addr(struct wm_softc *sc)
   3311 {
   3312 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3313 	uint16_t offset = NVM_OFF_MACADDR;
   3314 
   3315 	/* Try to read alternative MAC address pointer */
   3316 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3317 		return 0;
   3318 
   3319 	/* Check pointer if it's valid or not. */
   3320 	if ((offset == 0x0000) || (offset == 0xffff))
   3321 		return 0;
   3322 
   3323 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3324 	/*
   3325 	 * Check whether alternative MAC address is valid or not.
   3326 	 * Some cards have non 0xffff pointer but those don't use
   3327 	 * alternative MAC address in reality.
   3328 	 *
   3329 	 * Check whether the broadcast bit is set or not.
   3330 	 */
   3331 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3332 		if (((myea[0] & 0xff) & 0x01) == 0)
   3333 			return offset; /* Found */
   3334 
   3335 	/* Not found */
   3336 	return 0;
   3337 }
   3338 
   3339 static int
   3340 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3341 {
   3342 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3343 	uint16_t offset = NVM_OFF_MACADDR;
   3344 	int do_invert = 0;
   3345 
   3346 	switch (sc->sc_type) {
   3347 	case WM_T_82580:
   3348 	case WM_T_I350:
   3349 	case WM_T_I354:
   3350 		/* EEPROM Top Level Partitioning */
   3351 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3352 		break;
   3353 	case WM_T_82571:
   3354 	case WM_T_82575:
   3355 	case WM_T_82576:
   3356 	case WM_T_80003:
   3357 	case WM_T_I210:
   3358 	case WM_T_I211:
   3359 		offset = wm_check_alt_mac_addr(sc);
   3360 		if (offset == 0)
   3361 			if ((sc->sc_funcid & 0x01) == 1)
   3362 				do_invert = 1;
   3363 		break;
   3364 	default:
   3365 		if ((sc->sc_funcid & 0x01) == 1)
   3366 			do_invert = 1;
   3367 		break;
   3368 	}
   3369 
   3370 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3371 		goto bad;
   3372 
   3373 	enaddr[0] = myea[0] & 0xff;
   3374 	enaddr[1] = myea[0] >> 8;
   3375 	enaddr[2] = myea[1] & 0xff;
   3376 	enaddr[3] = myea[1] >> 8;
   3377 	enaddr[4] = myea[2] & 0xff;
   3378 	enaddr[5] = myea[2] >> 8;
   3379 
   3380 	/*
   3381 	 * Toggle the LSB of the MAC address on the second port
   3382 	 * of some dual port cards.
   3383 	 */
   3384 	if (do_invert != 0)
   3385 		enaddr[5] ^= 1;
   3386 
   3387 	return 0;
   3388 
   3389  bad:
   3390 	return -1;
   3391 }
   3392 
   3393 /*
   3394  * wm_set_ral:
   3395  *
   3396  *	Set an entery in the receive address list.
   3397  */
   3398 static void
   3399 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3400 {
   3401 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3402 	uint32_t wlock_mac;
   3403 	int rv;
   3404 
   3405 	if (enaddr != NULL) {
   3406 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3407 		    (enaddr[3] << 24);
   3408 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3409 		ral_hi |= RAL_AV;
   3410 	} else {
   3411 		ral_lo = 0;
   3412 		ral_hi = 0;
   3413 	}
   3414 
   3415 	switch (sc->sc_type) {
   3416 	case WM_T_82542_2_0:
   3417 	case WM_T_82542_2_1:
   3418 	case WM_T_82543:
   3419 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3420 		CSR_WRITE_FLUSH(sc);
   3421 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3422 		CSR_WRITE_FLUSH(sc);
   3423 		break;
   3424 	case WM_T_PCH2:
   3425 	case WM_T_PCH_LPT:
   3426 	case WM_T_PCH_SPT:
   3427 	case WM_T_PCH_CNP:
   3428 		if (idx == 0) {
   3429 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3430 			CSR_WRITE_FLUSH(sc);
   3431 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3432 			CSR_WRITE_FLUSH(sc);
   3433 			return;
   3434 		}
   3435 		if (sc->sc_type != WM_T_PCH2) {
   3436 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3437 			    FWSM_WLOCK_MAC);
   3438 			addrl = WMREG_SHRAL(idx - 1);
   3439 			addrh = WMREG_SHRAH(idx - 1);
   3440 		} else {
   3441 			wlock_mac = 0;
   3442 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3443 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3444 		}
   3445 
   3446 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3447 			rv = wm_get_swflag_ich8lan(sc);
   3448 			if (rv != 0)
   3449 				return;
   3450 			CSR_WRITE(sc, addrl, ral_lo);
   3451 			CSR_WRITE_FLUSH(sc);
   3452 			CSR_WRITE(sc, addrh, ral_hi);
   3453 			CSR_WRITE_FLUSH(sc);
   3454 			wm_put_swflag_ich8lan(sc);
   3455 		}
   3456 
   3457 		break;
   3458 	default:
   3459 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3460 		CSR_WRITE_FLUSH(sc);
   3461 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3462 		CSR_WRITE_FLUSH(sc);
   3463 		break;
   3464 	}
   3465 }
   3466 
   3467 /*
   3468  * wm_mchash:
   3469  *
   3470  *	Compute the hash of the multicast address for the 4096-bit
   3471  *	multicast filter.
   3472  */
   3473 static uint32_t
   3474 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3475 {
   3476 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3477 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3478 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3479 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3480 	uint32_t hash;
   3481 
   3482 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3483 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3484 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3485 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   3486 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3487 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3488 		return (hash & 0x3ff);
   3489 	}
   3490 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3491 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3492 
   3493 	return (hash & 0xfff);
   3494 }
   3495 
   3496 /*
   3497  * wm_set_filter:
   3498  *
   3499  *	Set up the receive filter.
   3500  */
   3501 static void
   3502 wm_set_filter(struct wm_softc *sc)
   3503 {
   3504 	struct ethercom *ec = &sc->sc_ethercom;
   3505 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3506 	struct ether_multi *enm;
   3507 	struct ether_multistep step;
   3508 	bus_addr_t mta_reg;
   3509 	uint32_t hash, reg, bit;
   3510 	int i, size, ralmax;
   3511 
   3512 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3513 		device_xname(sc->sc_dev), __func__));
   3514 
   3515 	if (sc->sc_type >= WM_T_82544)
   3516 		mta_reg = WMREG_CORDOVA_MTA;
   3517 	else
   3518 		mta_reg = WMREG_MTA;
   3519 
   3520 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3521 
   3522 	if (ifp->if_flags & IFF_BROADCAST)
   3523 		sc->sc_rctl |= RCTL_BAM;
   3524 	if (ifp->if_flags & IFF_PROMISC) {
   3525 		sc->sc_rctl |= RCTL_UPE;
   3526 		goto allmulti;
   3527 	}
   3528 
   3529 	/*
   3530 	 * Set the station address in the first RAL slot, and
   3531 	 * clear the remaining slots.
   3532 	 */
   3533 	if (sc->sc_type == WM_T_ICH8)
   3534 		size = WM_RAL_TABSIZE_ICH8 -1;
   3535 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3536 	    || (sc->sc_type == WM_T_PCH))
   3537 		size = WM_RAL_TABSIZE_ICH8;
   3538 	else if (sc->sc_type == WM_T_PCH2)
   3539 		size = WM_RAL_TABSIZE_PCH2;
   3540 	else if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3541 	    || (sc->sc_type == WM_T_PCH_CNP))
   3542 		size = WM_RAL_TABSIZE_PCH_LPT;
   3543 	else if (sc->sc_type == WM_T_82575)
   3544 		size = WM_RAL_TABSIZE_82575;
   3545 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3546 		size = WM_RAL_TABSIZE_82576;
   3547 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3548 		size = WM_RAL_TABSIZE_I350;
   3549 	else
   3550 		size = WM_RAL_TABSIZE;
   3551 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3552 
   3553 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   3554 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   3555 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3556 		switch (i) {
   3557 		case 0:
   3558 			/* We can use all entries */
   3559 			ralmax = size;
   3560 			break;
   3561 		case 1:
   3562 			/* Only RAR[0] */
   3563 			ralmax = 1;
   3564 			break;
   3565 		default:
   3566 			/* available SHRA + RAR[0] */
   3567 			ralmax = i + 1;
   3568 		}
   3569 	} else
   3570 		ralmax = size;
   3571 	for (i = 1; i < size; i++) {
   3572 		if (i < ralmax)
   3573 			wm_set_ral(sc, NULL, i);
   3574 	}
   3575 
   3576 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3577 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3578 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3579 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
   3580 		size = WM_ICH8_MC_TABSIZE;
   3581 	else
   3582 		size = WM_MC_TABSIZE;
   3583 	/* Clear out the multicast table. */
   3584 	for (i = 0; i < size; i++) {
   3585 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3586 		CSR_WRITE_FLUSH(sc);
   3587 	}
   3588 
   3589 	ETHER_LOCK(ec);
   3590 	ETHER_FIRST_MULTI(step, ec, enm);
   3591 	while (enm != NULL) {
   3592 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3593 			ETHER_UNLOCK(ec);
   3594 			/*
   3595 			 * We must listen to a range of multicast addresses.
   3596 			 * For now, just accept all multicasts, rather than
   3597 			 * trying to set only those filter bits needed to match
   3598 			 * the range.  (At this time, the only use of address
   3599 			 * ranges is for IP multicast routing, for which the
   3600 			 * range is big enough to require all bits set.)
   3601 			 */
   3602 			goto allmulti;
   3603 		}
   3604 
   3605 		hash = wm_mchash(sc, enm->enm_addrlo);
   3606 
   3607 		reg = (hash >> 5);
   3608 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3609 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3610 		    || (sc->sc_type == WM_T_PCH2)
   3611 		    || (sc->sc_type == WM_T_PCH_LPT)
   3612 		    || (sc->sc_type == WM_T_PCH_SPT)
   3613 		    || (sc->sc_type == WM_T_PCH_CNP))
   3614 			reg &= 0x1f;
   3615 		else
   3616 			reg &= 0x7f;
   3617 		bit = hash & 0x1f;
   3618 
   3619 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3620 		hash |= 1U << bit;
   3621 
   3622 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3623 			/*
   3624 			 * 82544 Errata 9: Certain register cannot be written
   3625 			 * with particular alignments in PCI-X bus operation
   3626 			 * (FCAH, MTA and VFTA).
   3627 			 */
   3628 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3629 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3630 			CSR_WRITE_FLUSH(sc);
   3631 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3632 			CSR_WRITE_FLUSH(sc);
   3633 		} else {
   3634 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3635 			CSR_WRITE_FLUSH(sc);
   3636 		}
   3637 
   3638 		ETHER_NEXT_MULTI(step, enm);
   3639 	}
   3640 	ETHER_UNLOCK(ec);
   3641 
   3642 	ifp->if_flags &= ~IFF_ALLMULTI;
   3643 	goto setit;
   3644 
   3645  allmulti:
   3646 	ifp->if_flags |= IFF_ALLMULTI;
   3647 	sc->sc_rctl |= RCTL_MPE;
   3648 
   3649  setit:
   3650 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3651 }
   3652 
   3653 /* Reset and init related */
   3654 
   3655 static void
   3656 wm_set_vlan(struct wm_softc *sc)
   3657 {
   3658 
   3659 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3660 		device_xname(sc->sc_dev), __func__));
   3661 
   3662 	/* Deal with VLAN enables. */
   3663 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3664 		sc->sc_ctrl |= CTRL_VME;
   3665 	else
   3666 		sc->sc_ctrl &= ~CTRL_VME;
   3667 
   3668 	/* Write the control registers. */
   3669 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3670 }
   3671 
   3672 static void
   3673 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3674 {
   3675 	uint32_t gcr;
   3676 	pcireg_t ctrl2;
   3677 
   3678 	gcr = CSR_READ(sc, WMREG_GCR);
   3679 
   3680 	/* Only take action if timeout value is defaulted to 0 */
   3681 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3682 		goto out;
   3683 
   3684 	if ((gcr & GCR_CAP_VER2) == 0) {
   3685 		gcr |= GCR_CMPL_TMOUT_10MS;
   3686 		goto out;
   3687 	}
   3688 
   3689 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3690 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3691 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3692 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3693 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3694 
   3695 out:
   3696 	/* Disable completion timeout resend */
   3697 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3698 
   3699 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3700 }
   3701 
   3702 void
   3703 wm_get_auto_rd_done(struct wm_softc *sc)
   3704 {
   3705 	int i;
   3706 
   3707 	/* wait for eeprom to reload */
   3708 	switch (sc->sc_type) {
   3709 	case WM_T_82571:
   3710 	case WM_T_82572:
   3711 	case WM_T_82573:
   3712 	case WM_T_82574:
   3713 	case WM_T_82583:
   3714 	case WM_T_82575:
   3715 	case WM_T_82576:
   3716 	case WM_T_82580:
   3717 	case WM_T_I350:
   3718 	case WM_T_I354:
   3719 	case WM_T_I210:
   3720 	case WM_T_I211:
   3721 	case WM_T_80003:
   3722 	case WM_T_ICH8:
   3723 	case WM_T_ICH9:
   3724 		for (i = 0; i < 10; i++) {
   3725 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3726 				break;
   3727 			delay(1000);
   3728 		}
   3729 		if (i == 10) {
   3730 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3731 			    "complete\n", device_xname(sc->sc_dev));
   3732 		}
   3733 		break;
   3734 	default:
   3735 		break;
   3736 	}
   3737 }
   3738 
   3739 void
   3740 wm_lan_init_done(struct wm_softc *sc)
   3741 {
   3742 	uint32_t reg = 0;
   3743 	int i;
   3744 
   3745 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3746 		device_xname(sc->sc_dev), __func__));
   3747 
   3748 	/* Wait for eeprom to reload */
   3749 	switch (sc->sc_type) {
   3750 	case WM_T_ICH10:
   3751 	case WM_T_PCH:
   3752 	case WM_T_PCH2:
   3753 	case WM_T_PCH_LPT:
   3754 	case WM_T_PCH_SPT:
   3755 	case WM_T_PCH_CNP:
   3756 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3757 			reg = CSR_READ(sc, WMREG_STATUS);
   3758 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3759 				break;
   3760 			delay(100);
   3761 		}
   3762 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3763 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3764 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3765 		}
   3766 		break;
   3767 	default:
   3768 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3769 		    __func__);
   3770 		break;
   3771 	}
   3772 
   3773 	reg &= ~STATUS_LAN_INIT_DONE;
   3774 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3775 }
   3776 
   3777 void
   3778 wm_get_cfg_done(struct wm_softc *sc)
   3779 {
   3780 	int mask;
   3781 	uint32_t reg;
   3782 	int i;
   3783 
   3784 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3785 		device_xname(sc->sc_dev), __func__));
   3786 
   3787 	/* Wait for eeprom to reload */
   3788 	switch (sc->sc_type) {
   3789 	case WM_T_82542_2_0:
   3790 	case WM_T_82542_2_1:
   3791 		/* null */
   3792 		break;
   3793 	case WM_T_82543:
   3794 	case WM_T_82544:
   3795 	case WM_T_82540:
   3796 	case WM_T_82545:
   3797 	case WM_T_82545_3:
   3798 	case WM_T_82546:
   3799 	case WM_T_82546_3:
   3800 	case WM_T_82541:
   3801 	case WM_T_82541_2:
   3802 	case WM_T_82547:
   3803 	case WM_T_82547_2:
   3804 	case WM_T_82573:
   3805 	case WM_T_82574:
   3806 	case WM_T_82583:
   3807 		/* generic */
   3808 		delay(10*1000);
   3809 		break;
   3810 	case WM_T_80003:
   3811 	case WM_T_82571:
   3812 	case WM_T_82572:
   3813 	case WM_T_82575:
   3814 	case WM_T_82576:
   3815 	case WM_T_82580:
   3816 	case WM_T_I350:
   3817 	case WM_T_I354:
   3818 	case WM_T_I210:
   3819 	case WM_T_I211:
   3820 		if (sc->sc_type == WM_T_82571) {
   3821 			/* Only 82571 shares port 0 */
   3822 			mask = EEMNGCTL_CFGDONE_0;
   3823 		} else
   3824 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3825 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3826 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3827 				break;
   3828 			delay(1000);
   3829 		}
   3830 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3831 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3832 				device_xname(sc->sc_dev), __func__));
   3833 		}
   3834 		break;
   3835 	case WM_T_ICH8:
   3836 	case WM_T_ICH9:
   3837 	case WM_T_ICH10:
   3838 	case WM_T_PCH:
   3839 	case WM_T_PCH2:
   3840 	case WM_T_PCH_LPT:
   3841 	case WM_T_PCH_SPT:
   3842 	case WM_T_PCH_CNP:
   3843 		delay(10*1000);
   3844 		if (sc->sc_type >= WM_T_ICH10)
   3845 			wm_lan_init_done(sc);
   3846 		else
   3847 			wm_get_auto_rd_done(sc);
   3848 
   3849 		/* Clear PHY Reset Asserted bit */
   3850 		reg = CSR_READ(sc, WMREG_STATUS);
   3851 		if ((reg & STATUS_PHYRA) != 0)
   3852 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3853 		break;
   3854 	default:
   3855 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3856 		    __func__);
   3857 		break;
   3858 	}
   3859 }
   3860 
   3861 void
   3862 wm_phy_post_reset(struct wm_softc *sc)
   3863 {
   3864 	uint32_t reg;
   3865 
   3866 	/* This function is only for ICH8 and newer. */
   3867 	if (sc->sc_type < WM_T_ICH8)
   3868 		return;
   3869 
   3870 	if (wm_phy_resetisblocked(sc)) {
   3871 		/* XXX */
   3872 		device_printf(sc->sc_dev, "PHY is blocked\n");
   3873 		return;
   3874 	}
   3875 
   3876 	/* Allow time for h/w to get to quiescent state after reset */
   3877 	delay(10*1000);
   3878 
   3879 	/* Perform any necessary post-reset workarounds */
   3880 	if (sc->sc_type == WM_T_PCH)
   3881 		wm_hv_phy_workaround_ich8lan(sc);
   3882 	else if (sc->sc_type == WM_T_PCH2)
   3883 		wm_lv_phy_workaround_ich8lan(sc);
   3884 
   3885 	/* Clear the host wakeup bit after lcd reset */
   3886 	if (sc->sc_type >= WM_T_PCH) {
   3887 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   3888 		    BM_PORT_GEN_CFG);
   3889 		reg &= ~BM_WUC_HOST_WU_BIT;
   3890 		wm_gmii_hv_writereg(sc->sc_dev, 2,
   3891 		    BM_PORT_GEN_CFG, reg);
   3892 	}
   3893 
   3894 	/* Configure the LCD with the extended configuration region in NVM */
   3895 	wm_init_lcd_from_nvm(sc);
   3896 
   3897 	/* Configure the LCD with the OEM bits in NVM */
   3898 	wm_oem_bits_config_ich8lan(sc, true);
   3899 
   3900 	if (sc->sc_type == WM_T_PCH2) {
   3901 		/* Ungate automatic PHY configuration on non-managed 82579 */
   3902 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   3903 			delay(10 * 1000);
   3904 			wm_gate_hw_phy_config_ich8lan(sc, false);
   3905 		}
   3906 		/* XXX Set EEE LPI Update Timer to 200usec */
   3907 	}
   3908 }
   3909 
   3910 /* Only for PCH and newer */
   3911 static int
   3912 wm_write_smbus_addr(struct wm_softc *sc)
   3913 {
   3914 	uint32_t strap, freq;
   3915 	uint16_t phy_data;
   3916 	int rv;
   3917 
   3918 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3919 		device_xname(sc->sc_dev), __func__));
   3920 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   3921 
   3922 	strap = CSR_READ(sc, WMREG_STRAP);
   3923 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   3924 
   3925 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR, &phy_data);
   3926 	if (rv != 0)
   3927 		return -1;
   3928 
   3929 	phy_data &= ~HV_SMB_ADDR_ADDR;
   3930 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   3931 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   3932 
   3933 	if (sc->sc_phytype == WMPHY_I217) {
   3934 		/* Restore SMBus frequency */
   3935 		if (freq --) {
   3936 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   3937 			    | HV_SMB_ADDR_FREQ_HIGH);
   3938 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   3939 			    HV_SMB_ADDR_FREQ_LOW);
   3940 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   3941 			    HV_SMB_ADDR_FREQ_HIGH);
   3942 		} else {
   3943 			DPRINTF(WM_DEBUG_INIT,
   3944 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   3945 				device_xname(sc->sc_dev), __func__));
   3946 		}
   3947 	}
   3948 
   3949 	return wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR,
   3950 	    phy_data);
   3951 }
   3952 
   3953 void
   3954 wm_init_lcd_from_nvm(struct wm_softc *sc)
   3955 {
   3956 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   3957 	uint16_t phy_page = 0;
   3958 
   3959 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3960 		device_xname(sc->sc_dev), __func__));
   3961 
   3962 	switch (sc->sc_type) {
   3963 	case WM_T_ICH8:
   3964 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   3965 		    || (sc->sc_phytype != WMPHY_IGP_3))
   3966 			return;
   3967 
   3968 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   3969 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   3970 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   3971 			break;
   3972 		}
   3973 		/* FALLTHROUGH */
   3974 	case WM_T_PCH:
   3975 	case WM_T_PCH2:
   3976 	case WM_T_PCH_LPT:
   3977 	case WM_T_PCH_SPT:
   3978 	case WM_T_PCH_CNP:
   3979 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   3980 		break;
   3981 	default:
   3982 		return;
   3983 	}
   3984 
   3985 	sc->phy.acquire(sc);
   3986 
   3987 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   3988 	if ((reg & sw_cfg_mask) == 0)
   3989 		goto release;
   3990 
   3991 	/*
   3992 	 * Make sure HW does not configure LCD from PHY extended configuration
   3993 	 * before SW configuration
   3994 	 */
   3995 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   3996 	if ((sc->sc_type < WM_T_PCH2)
   3997 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   3998 		goto release;
   3999 
   4000 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   4001 		device_xname(sc->sc_dev), __func__));
   4002 	/* word_addr is in DWORD */
   4003 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   4004 
   4005 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   4006 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   4007 	if (cnf_size == 0)
   4008 		goto release;
   4009 
   4010 	if (((sc->sc_type == WM_T_PCH)
   4011 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   4012 	    || (sc->sc_type > WM_T_PCH)) {
   4013 		/*
   4014 		 * HW configures the SMBus address and LEDs when the OEM and
   4015 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   4016 		 * are cleared, SW will configure them instead.
   4017 		 */
   4018 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   4019 			device_xname(sc->sc_dev), __func__));
   4020 		wm_write_smbus_addr(sc);
   4021 
   4022 		reg = CSR_READ(sc, WMREG_LEDCTL);
   4023 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG, reg);
   4024 	}
   4025 
   4026 	/* Configure LCD from extended configuration region. */
   4027 	for (i = 0; i < cnf_size; i++) {
   4028 		uint16_t reg_data, reg_addr;
   4029 
   4030 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   4031 			goto release;
   4032 
   4033 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   4034 			goto release;
   4035 
   4036 		if (reg_addr == MII_IGPHY_PAGE_SELECT)
   4037 			phy_page = reg_data;
   4038 
   4039 		reg_addr &= IGPHY_MAXREGADDR;
   4040 		reg_addr |= phy_page;
   4041 
   4042 		KASSERT(sc->phy.writereg_locked != NULL);
   4043 		sc->phy.writereg_locked(sc->sc_dev, 1, reg_addr, reg_data);
   4044 	}
   4045 
   4046 release:
   4047 	sc->phy.release(sc);
   4048 	return;
   4049 }
   4050 
   4051 /*
   4052  *  wm_oem_bits_config_ich8lan - SW-based LCD Configuration
   4053  *  @sc:       pointer to the HW structure
   4054  *  @d0_state: boolean if entering d0 or d3 device state
   4055  *
   4056  *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
   4057  *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
   4058  *  in NVM determines whether HW should configure LPLU and Gbe Disable.
   4059  */
   4060 int
   4061 wm_oem_bits_config_ich8lan(struct wm_softc *sc, bool d0_state)
   4062 {
   4063 	uint32_t mac_reg;
   4064 	uint16_t oem_reg;
   4065 	int rv;
   4066 
   4067 	if (sc->sc_type < WM_T_PCH)
   4068 		return 0;
   4069 
   4070 	rv = sc->phy.acquire(sc);
   4071 	if (rv != 0)
   4072 		return rv;
   4073 
   4074 	if (sc->sc_type == WM_T_PCH) {
   4075 		mac_reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   4076 		if ((mac_reg & EXTCNFCTR_OEM_WRITE_ENABLE) != 0)
   4077 			goto release;
   4078 	}
   4079 
   4080 	mac_reg = CSR_READ(sc, WMREG_FEXTNVM);
   4081 	if ((mac_reg & FEXTNVM_SW_CONFIG_ICH8M) == 0)
   4082 		goto release;
   4083 
   4084 	mac_reg = CSR_READ(sc, WMREG_PHY_CTRL);
   4085 
   4086 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 1, HV_OEM_BITS, &oem_reg);
   4087 	if (rv != 0)
   4088 		goto release;
   4089 	oem_reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   4090 
   4091 	if (d0_state) {
   4092 		if ((mac_reg & PHY_CTRL_GBE_DIS) != 0)
   4093 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4094 		if ((mac_reg & PHY_CTRL_D0A_LPLU) != 0)
   4095 			oem_reg |= HV_OEM_BITS_LPLU;
   4096 	} else {
   4097 		if ((mac_reg & (PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS))
   4098 		    != 0)
   4099 			oem_reg |= HV_OEM_BITS_A1KDIS;
   4100 		if ((mac_reg & (PHY_CTRL_D0A_LPLU | PHY_CTRL_NOND0A_LPLU))
   4101 		    != 0)
   4102 			oem_reg |= HV_OEM_BITS_LPLU;
   4103 	}
   4104 
   4105 	/* Set Restart auto-neg to activate the bits */
   4106 	if ((d0_state || (sc->sc_type != WM_T_PCH))
   4107 	    && (wm_phy_resetisblocked(sc) == false))
   4108 		oem_reg |= HV_OEM_BITS_ANEGNOW;
   4109 
   4110 	rv = wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_OEM_BITS, oem_reg);
   4111 
   4112 release:
   4113 	sc->phy.release(sc);
   4114 
   4115 	return rv;
   4116 }
   4117 
   4118 /* Init hardware bits */
   4119 void
   4120 wm_initialize_hardware_bits(struct wm_softc *sc)
   4121 {
   4122 	uint32_t tarc0, tarc1, reg;
   4123 
   4124 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4125 		device_xname(sc->sc_dev), __func__));
   4126 
   4127 	/* For 82571 variant, 80003 and ICHs */
   4128 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   4129 	    || (sc->sc_type >= WM_T_80003)) {
   4130 
   4131 		/* Transmit Descriptor Control 0 */
   4132 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   4133 		reg |= TXDCTL_COUNT_DESC;
   4134 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   4135 
   4136 		/* Transmit Descriptor Control 1 */
   4137 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   4138 		reg |= TXDCTL_COUNT_DESC;
   4139 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   4140 
   4141 		/* TARC0 */
   4142 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   4143 		switch (sc->sc_type) {
   4144 		case WM_T_82571:
   4145 		case WM_T_82572:
   4146 		case WM_T_82573:
   4147 		case WM_T_82574:
   4148 		case WM_T_82583:
   4149 		case WM_T_80003:
   4150 			/* Clear bits 30..27 */
   4151 			tarc0 &= ~__BITS(30, 27);
   4152 			break;
   4153 		default:
   4154 			break;
   4155 		}
   4156 
   4157 		switch (sc->sc_type) {
   4158 		case WM_T_82571:
   4159 		case WM_T_82572:
   4160 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   4161 
   4162 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4163 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   4164 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   4165 			/* 8257[12] Errata No.7 */
   4166 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   4167 
   4168 			/* TARC1 bit 28 */
   4169 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4170 				tarc1 &= ~__BIT(28);
   4171 			else
   4172 				tarc1 |= __BIT(28);
   4173 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4174 
   4175 			/*
   4176 			 * 8257[12] Errata No.13
   4177 			 * Disable Dyamic Clock Gating.
   4178 			 */
   4179 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4180 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   4181 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4182 			break;
   4183 		case WM_T_82573:
   4184 		case WM_T_82574:
   4185 		case WM_T_82583:
   4186 			if ((sc->sc_type == WM_T_82574)
   4187 			    || (sc->sc_type == WM_T_82583))
   4188 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   4189 
   4190 			/* Extended Device Control */
   4191 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4192 			reg &= ~__BIT(23);	/* Clear bit 23 */
   4193 			reg |= __BIT(22);	/* Set bit 22 */
   4194 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4195 
   4196 			/* Device Control */
   4197 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   4198 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4199 
   4200 			/* PCIe Control Register */
   4201 			/*
   4202 			 * 82573 Errata (unknown).
   4203 			 *
   4204 			 * 82574 Errata 25 and 82583 Errata 12
   4205 			 * "Dropped Rx Packets":
   4206 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   4207 			 */
   4208 			reg = CSR_READ(sc, WMREG_GCR);
   4209 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   4210 			CSR_WRITE(sc, WMREG_GCR, reg);
   4211 
   4212 			if ((sc->sc_type == WM_T_82574)
   4213 			    || (sc->sc_type == WM_T_82583)) {
   4214 				/*
   4215 				 * Document says this bit must be set for
   4216 				 * proper operation.
   4217 				 */
   4218 				reg = CSR_READ(sc, WMREG_GCR);
   4219 				reg |= __BIT(22);
   4220 				CSR_WRITE(sc, WMREG_GCR, reg);
   4221 
   4222 				/*
   4223 				 * Apply workaround for hardware errata
   4224 				 * documented in errata docs Fixes issue where
   4225 				 * some error prone or unreliable PCIe
   4226 				 * completions are occurring, particularly
   4227 				 * with ASPM enabled. Without fix, issue can
   4228 				 * cause Tx timeouts.
   4229 				 */
   4230 				reg = CSR_READ(sc, WMREG_GCR2);
   4231 				reg |= __BIT(0);
   4232 				CSR_WRITE(sc, WMREG_GCR2, reg);
   4233 			}
   4234 			break;
   4235 		case WM_T_80003:
   4236 			/* TARC0 */
   4237 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4238 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4239 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4240 
   4241 			/* TARC1 bit 28 */
   4242 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4243 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4244 				tarc1 &= ~__BIT(28);
   4245 			else
   4246 				tarc1 |= __BIT(28);
   4247 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4248 			break;
   4249 		case WM_T_ICH8:
   4250 		case WM_T_ICH9:
   4251 		case WM_T_ICH10:
   4252 		case WM_T_PCH:
   4253 		case WM_T_PCH2:
   4254 		case WM_T_PCH_LPT:
   4255 		case WM_T_PCH_SPT:
   4256 		case WM_T_PCH_CNP:
   4257 			/* TARC0 */
   4258 			if (sc->sc_type == WM_T_ICH8) {
   4259 				/* Set TARC0 bits 29 and 28 */
   4260 				tarc0 |= __BITS(29, 28);
   4261 			} else if (sc->sc_type == WM_T_PCH_SPT) {
   4262 				tarc0 |= __BIT(29);
   4263 				/*
   4264 				 *  Drop bit 28. From Linux.
   4265 				 * See I218/I219 spec update
   4266 				 * "5. Buffer Overrun While the I219 is
   4267 				 * Processing DMA Transactions"
   4268 				 */
   4269 				tarc0 &= ~__BIT(28);
   4270 			}
   4271 			/* Set TARC0 bits 23,24,26,27 */
   4272 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4273 
   4274 			/* CTRL_EXT */
   4275 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4276 			reg |= __BIT(22);	/* Set bit 22 */
   4277 			/*
   4278 			 * Enable PHY low-power state when MAC is at D3
   4279 			 * w/o WoL
   4280 			 */
   4281 			if (sc->sc_type >= WM_T_PCH)
   4282 				reg |= CTRL_EXT_PHYPDEN;
   4283 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4284 
   4285 			/* TARC1 */
   4286 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4287 			/* bit 28 */
   4288 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4289 				tarc1 &= ~__BIT(28);
   4290 			else
   4291 				tarc1 |= __BIT(28);
   4292 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4293 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4294 
   4295 			/* Device Status */
   4296 			if (sc->sc_type == WM_T_ICH8) {
   4297 				reg = CSR_READ(sc, WMREG_STATUS);
   4298 				reg &= ~__BIT(31);
   4299 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4300 
   4301 			}
   4302 
   4303 			/* IOSFPC */
   4304 			if (sc->sc_type == WM_T_PCH_SPT) {
   4305 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4306 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4307 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4308 			}
   4309 			/*
   4310 			 * Work-around descriptor data corruption issue during
   4311 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4312 			 * capability.
   4313 			 */
   4314 			reg = CSR_READ(sc, WMREG_RFCTL);
   4315 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4316 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4317 			break;
   4318 		default:
   4319 			break;
   4320 		}
   4321 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4322 
   4323 		switch (sc->sc_type) {
   4324 		/*
   4325 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4326 		 * Avoid RSS Hash Value bug.
   4327 		 */
   4328 		case WM_T_82571:
   4329 		case WM_T_82572:
   4330 		case WM_T_82573:
   4331 		case WM_T_80003:
   4332 		case WM_T_ICH8:
   4333 			reg = CSR_READ(sc, WMREG_RFCTL);
   4334 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4335 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4336 			break;
   4337 		case WM_T_82574:
   4338 			/* use extened Rx descriptor. */
   4339 			reg = CSR_READ(sc, WMREG_RFCTL);
   4340 			reg |= WMREG_RFCTL_EXSTEN;
   4341 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4342 			break;
   4343 		default:
   4344 			break;
   4345 		}
   4346 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4347 		/*
   4348 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4349 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4350 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4351 		 * Correctly by the Device"
   4352 		 *
   4353 		 * I354(C2000) Errata AVR53:
   4354 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4355 		 * Hang"
   4356 		 */
   4357 		reg = CSR_READ(sc, WMREG_RFCTL);
   4358 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4359 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4360 	}
   4361 }
   4362 
   4363 static uint32_t
   4364 wm_rxpbs_adjust_82580(uint32_t val)
   4365 {
   4366 	uint32_t rv = 0;
   4367 
   4368 	if (val < __arraycount(wm_82580_rxpbs_table))
   4369 		rv = wm_82580_rxpbs_table[val];
   4370 
   4371 	return rv;
   4372 }
   4373 
   4374 /*
   4375  * wm_reset_phy:
   4376  *
   4377  *	generic PHY reset function.
   4378  *	Same as e1000_phy_hw_reset_generic()
   4379  */
   4380 static int
   4381 wm_reset_phy(struct wm_softc *sc)
   4382 {
   4383 	uint32_t reg;
   4384 
   4385 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4386 		device_xname(sc->sc_dev), __func__));
   4387 	if (wm_phy_resetisblocked(sc))
   4388 		return -1;
   4389 
   4390 	sc->phy.acquire(sc);
   4391 
   4392 	reg = CSR_READ(sc, WMREG_CTRL);
   4393 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4394 	CSR_WRITE_FLUSH(sc);
   4395 
   4396 	delay(sc->phy.reset_delay_us);
   4397 
   4398 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4399 	CSR_WRITE_FLUSH(sc);
   4400 
   4401 	delay(150);
   4402 
   4403 	sc->phy.release(sc);
   4404 
   4405 	wm_get_cfg_done(sc);
   4406 	wm_phy_post_reset(sc);
   4407 
   4408 	return 0;
   4409 }
   4410 
   4411 /*
   4412  * Only used by WM_T_PCH_SPT which does not use multiqueue,
   4413  * so it is enough to check sc->sc_queue[0] only.
   4414  */
   4415 static void
   4416 wm_flush_desc_rings(struct wm_softc *sc)
   4417 {
   4418 	pcireg_t preg;
   4419 	uint32_t reg;
   4420 	struct wm_txqueue *txq;
   4421 	wiseman_txdesc_t *txd;
   4422 	int nexttx;
   4423 	uint32_t rctl;
   4424 
   4425 	/* First, disable MULR fix in FEXTNVM11 */
   4426 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4427 	reg |= FEXTNVM11_DIS_MULRFIX;
   4428 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4429 
   4430 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4431 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4432 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4433 		return;
   4434 
   4435 	/* TX */
   4436 	printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   4437 	    device_xname(sc->sc_dev), preg, reg);
   4438 	reg = CSR_READ(sc, WMREG_TCTL);
   4439 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4440 
   4441 	txq = &sc->sc_queue[0].wmq_txq;
   4442 	nexttx = txq->txq_next;
   4443 	txd = &txq->txq_descs[nexttx];
   4444 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4445 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
   4446 	txd->wtx_fields.wtxu_status = 0;
   4447 	txd->wtx_fields.wtxu_options = 0;
   4448 	txd->wtx_fields.wtxu_vlan = 0;
   4449 
   4450 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4451 	    BUS_SPACE_BARRIER_WRITE);
   4452 
   4453 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4454 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4455 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4456 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4457 	delay(250);
   4458 
   4459 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4460 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4461 		return;
   4462 
   4463 	/* RX */
   4464 	printf("%s: Need RX flush (reg = %08x)\n",
   4465 	    device_xname(sc->sc_dev), preg);
   4466 	rctl = CSR_READ(sc, WMREG_RCTL);
   4467 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4468 	CSR_WRITE_FLUSH(sc);
   4469 	delay(150);
   4470 
   4471 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4472 	/* zero the lower 14 bits (prefetch and host thresholds) */
   4473 	reg &= 0xffffc000;
   4474 	/*
   4475 	 * update thresholds: prefetch threshold to 31, host threshold
   4476 	 * to 1 and make sure the granularity is "descriptors" and not
   4477 	 * "cache lines"
   4478 	 */
   4479 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4480 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4481 
   4482 	/*
   4483 	 * momentarily enable the RX ring for the changes to take
   4484 	 * effect
   4485 	 */
   4486 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4487 	CSR_WRITE_FLUSH(sc);
   4488 	delay(150);
   4489 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4490 }
   4491 
   4492 /*
   4493  * wm_reset:
   4494  *
   4495  *	Reset the i82542 chip.
   4496  */
   4497 static void
   4498 wm_reset(struct wm_softc *sc)
   4499 {
   4500 	int phy_reset = 0;
   4501 	int i, error = 0;
   4502 	uint32_t reg;
   4503 	uint16_t kmreg;
   4504 	int rv;
   4505 
   4506 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4507 		device_xname(sc->sc_dev), __func__));
   4508 	KASSERT(sc->sc_type != 0);
   4509 
   4510 	/*
   4511 	 * Allocate on-chip memory according to the MTU size.
   4512 	 * The Packet Buffer Allocation register must be written
   4513 	 * before the chip is reset.
   4514 	 */
   4515 	switch (sc->sc_type) {
   4516 	case WM_T_82547:
   4517 	case WM_T_82547_2:
   4518 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4519 		    PBA_22K : PBA_30K;
   4520 		for (i = 0; i < sc->sc_nqueues; i++) {
   4521 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4522 			txq->txq_fifo_head = 0;
   4523 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4524 			txq->txq_fifo_size =
   4525 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4526 			txq->txq_fifo_stall = 0;
   4527 		}
   4528 		break;
   4529 	case WM_T_82571:
   4530 	case WM_T_82572:
   4531 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4532 	case WM_T_80003:
   4533 		sc->sc_pba = PBA_32K;
   4534 		break;
   4535 	case WM_T_82573:
   4536 		sc->sc_pba = PBA_12K;
   4537 		break;
   4538 	case WM_T_82574:
   4539 	case WM_T_82583:
   4540 		sc->sc_pba = PBA_20K;
   4541 		break;
   4542 	case WM_T_82576:
   4543 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4544 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4545 		break;
   4546 	case WM_T_82580:
   4547 	case WM_T_I350:
   4548 	case WM_T_I354:
   4549 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4550 		break;
   4551 	case WM_T_I210:
   4552 	case WM_T_I211:
   4553 		sc->sc_pba = PBA_34K;
   4554 		break;
   4555 	case WM_T_ICH8:
   4556 		/* Workaround for a bit corruption issue in FIFO memory */
   4557 		sc->sc_pba = PBA_8K;
   4558 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4559 		break;
   4560 	case WM_T_ICH9:
   4561 	case WM_T_ICH10:
   4562 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4563 		    PBA_14K : PBA_10K;
   4564 		break;
   4565 	case WM_T_PCH:
   4566 	case WM_T_PCH2:	/* XXX 14K? */
   4567 	case WM_T_PCH_LPT:
   4568 	case WM_T_PCH_SPT:
   4569 	case WM_T_PCH_CNP:
   4570 		sc->sc_pba = PBA_26K;
   4571 		break;
   4572 	default:
   4573 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4574 		    PBA_40K : PBA_48K;
   4575 		break;
   4576 	}
   4577 	/*
   4578 	 * Only old or non-multiqueue devices have the PBA register
   4579 	 * XXX Need special handling for 82575.
   4580 	 */
   4581 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4582 	    || (sc->sc_type == WM_T_82575))
   4583 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4584 
   4585 	/* Prevent the PCI-E bus from sticking */
   4586 	if (sc->sc_flags & WM_F_PCIE) {
   4587 		int timeout = 800;
   4588 
   4589 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4590 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4591 
   4592 		while (timeout--) {
   4593 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4594 			    == 0)
   4595 				break;
   4596 			delay(100);
   4597 		}
   4598 		if (timeout == 0)
   4599 			device_printf(sc->sc_dev,
   4600 			    "failed to disable busmastering\n");
   4601 	}
   4602 
   4603 	/* Set the completion timeout for interface */
   4604 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4605 	    || (sc->sc_type == WM_T_82580)
   4606 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4607 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4608 		wm_set_pcie_completion_timeout(sc);
   4609 
   4610 	/* Clear interrupt */
   4611 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4612 	if (wm_is_using_msix(sc)) {
   4613 		if (sc->sc_type != WM_T_82574) {
   4614 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4615 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4616 		} else
   4617 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4618 	}
   4619 
   4620 	/* Stop the transmit and receive processes. */
   4621 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4622 	sc->sc_rctl &= ~RCTL_EN;
   4623 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4624 	CSR_WRITE_FLUSH(sc);
   4625 
   4626 	/* XXX set_tbi_sbp_82543() */
   4627 
   4628 	delay(10*1000);
   4629 
   4630 	/* Must acquire the MDIO ownership before MAC reset */
   4631 	switch (sc->sc_type) {
   4632 	case WM_T_82573:
   4633 	case WM_T_82574:
   4634 	case WM_T_82583:
   4635 		error = wm_get_hw_semaphore_82573(sc);
   4636 		break;
   4637 	default:
   4638 		break;
   4639 	}
   4640 
   4641 	/*
   4642 	 * 82541 Errata 29? & 82547 Errata 28?
   4643 	 * See also the description about PHY_RST bit in CTRL register
   4644 	 * in 8254x_GBe_SDM.pdf.
   4645 	 */
   4646 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4647 		CSR_WRITE(sc, WMREG_CTRL,
   4648 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4649 		CSR_WRITE_FLUSH(sc);
   4650 		delay(5000);
   4651 	}
   4652 
   4653 	switch (sc->sc_type) {
   4654 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4655 	case WM_T_82541:
   4656 	case WM_T_82541_2:
   4657 	case WM_T_82547:
   4658 	case WM_T_82547_2:
   4659 		/*
   4660 		 * On some chipsets, a reset through a memory-mapped write
   4661 		 * cycle can cause the chip to reset before completing the
   4662 		 * write cycle. This causes major headache that can be avoided
   4663 		 * by issuing the reset via indirect register writes through
   4664 		 * I/O space.
   4665 		 *
   4666 		 * So, if we successfully mapped the I/O BAR at attach time,
   4667 		 * use that. Otherwise, try our luck with a memory-mapped
   4668 		 * reset.
   4669 		 */
   4670 		if (sc->sc_flags & WM_F_IOH_VALID)
   4671 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4672 		else
   4673 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4674 		break;
   4675 	case WM_T_82545_3:
   4676 	case WM_T_82546_3:
   4677 		/* Use the shadow control register on these chips. */
   4678 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4679 		break;
   4680 	case WM_T_80003:
   4681 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4682 		sc->phy.acquire(sc);
   4683 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4684 		sc->phy.release(sc);
   4685 		break;
   4686 	case WM_T_ICH8:
   4687 	case WM_T_ICH9:
   4688 	case WM_T_ICH10:
   4689 	case WM_T_PCH:
   4690 	case WM_T_PCH2:
   4691 	case WM_T_PCH_LPT:
   4692 	case WM_T_PCH_SPT:
   4693 	case WM_T_PCH_CNP:
   4694 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4695 		if (wm_phy_resetisblocked(sc) == false) {
   4696 			/*
   4697 			 * Gate automatic PHY configuration by hardware on
   4698 			 * non-managed 82579
   4699 			 */
   4700 			if ((sc->sc_type == WM_T_PCH2)
   4701 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4702 				== 0))
   4703 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4704 
   4705 			reg |= CTRL_PHY_RESET;
   4706 			phy_reset = 1;
   4707 		} else
   4708 			printf("XXX reset is blocked!!!\n");
   4709 		sc->phy.acquire(sc);
   4710 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4711 		/* Don't insert a completion barrier when reset */
   4712 		delay(20*1000);
   4713 		mutex_exit(sc->sc_ich_phymtx);
   4714 		break;
   4715 	case WM_T_82580:
   4716 	case WM_T_I350:
   4717 	case WM_T_I354:
   4718 	case WM_T_I210:
   4719 	case WM_T_I211:
   4720 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4721 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4722 			CSR_WRITE_FLUSH(sc);
   4723 		delay(5000);
   4724 		break;
   4725 	case WM_T_82542_2_0:
   4726 	case WM_T_82542_2_1:
   4727 	case WM_T_82543:
   4728 	case WM_T_82540:
   4729 	case WM_T_82545:
   4730 	case WM_T_82546:
   4731 	case WM_T_82571:
   4732 	case WM_T_82572:
   4733 	case WM_T_82573:
   4734 	case WM_T_82574:
   4735 	case WM_T_82575:
   4736 	case WM_T_82576:
   4737 	case WM_T_82583:
   4738 	default:
   4739 		/* Everything else can safely use the documented method. */
   4740 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4741 		break;
   4742 	}
   4743 
   4744 	/* Must release the MDIO ownership after MAC reset */
   4745 	switch (sc->sc_type) {
   4746 	case WM_T_82573:
   4747 	case WM_T_82574:
   4748 	case WM_T_82583:
   4749 		if (error == 0)
   4750 			wm_put_hw_semaphore_82573(sc);
   4751 		break;
   4752 	default:
   4753 		break;
   4754 	}
   4755 
   4756 	/* Set Phy Config Counter to 50msec */
   4757 	if (sc->sc_type == WM_T_PCH2) {
   4758 		reg = CSR_READ(sc, WMREG_FEXTNVM3);
   4759 		reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   4760 		reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   4761 		CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   4762 	}
   4763 
   4764 	if (phy_reset != 0)
   4765 		wm_get_cfg_done(sc);
   4766 
   4767 	/* reload EEPROM */
   4768 	switch (sc->sc_type) {
   4769 	case WM_T_82542_2_0:
   4770 	case WM_T_82542_2_1:
   4771 	case WM_T_82543:
   4772 	case WM_T_82544:
   4773 		delay(10);
   4774 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4775 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4776 		CSR_WRITE_FLUSH(sc);
   4777 		delay(2000);
   4778 		break;
   4779 	case WM_T_82540:
   4780 	case WM_T_82545:
   4781 	case WM_T_82545_3:
   4782 	case WM_T_82546:
   4783 	case WM_T_82546_3:
   4784 		delay(5*1000);
   4785 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4786 		break;
   4787 	case WM_T_82541:
   4788 	case WM_T_82541_2:
   4789 	case WM_T_82547:
   4790 	case WM_T_82547_2:
   4791 		delay(20000);
   4792 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4793 		break;
   4794 	case WM_T_82571:
   4795 	case WM_T_82572:
   4796 	case WM_T_82573:
   4797 	case WM_T_82574:
   4798 	case WM_T_82583:
   4799 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4800 			delay(10);
   4801 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4802 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4803 			CSR_WRITE_FLUSH(sc);
   4804 		}
   4805 		/* check EECD_EE_AUTORD */
   4806 		wm_get_auto_rd_done(sc);
   4807 		/*
   4808 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4809 		 * is set.
   4810 		 */
   4811 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4812 		    || (sc->sc_type == WM_T_82583))
   4813 			delay(25*1000);
   4814 		break;
   4815 	case WM_T_82575:
   4816 	case WM_T_82576:
   4817 	case WM_T_82580:
   4818 	case WM_T_I350:
   4819 	case WM_T_I354:
   4820 	case WM_T_I210:
   4821 	case WM_T_I211:
   4822 	case WM_T_80003:
   4823 		/* check EECD_EE_AUTORD */
   4824 		wm_get_auto_rd_done(sc);
   4825 		break;
   4826 	case WM_T_ICH8:
   4827 	case WM_T_ICH9:
   4828 	case WM_T_ICH10:
   4829 	case WM_T_PCH:
   4830 	case WM_T_PCH2:
   4831 	case WM_T_PCH_LPT:
   4832 	case WM_T_PCH_SPT:
   4833 	case WM_T_PCH_CNP:
   4834 		break;
   4835 	default:
   4836 		panic("%s: unknown type\n", __func__);
   4837 	}
   4838 
   4839 	/* Check whether EEPROM is present or not */
   4840 	switch (sc->sc_type) {
   4841 	case WM_T_82575:
   4842 	case WM_T_82576:
   4843 	case WM_T_82580:
   4844 	case WM_T_I350:
   4845 	case WM_T_I354:
   4846 	case WM_T_ICH8:
   4847 	case WM_T_ICH9:
   4848 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4849 			/* Not found */
   4850 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4851 			if (sc->sc_type == WM_T_82575)
   4852 				wm_reset_init_script_82575(sc);
   4853 		}
   4854 		break;
   4855 	default:
   4856 		break;
   4857 	}
   4858 
   4859 	if (phy_reset != 0)
   4860 		wm_phy_post_reset(sc);
   4861 
   4862 	if ((sc->sc_type == WM_T_82580)
   4863 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4864 		/* clear global device reset status bit */
   4865 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4866 	}
   4867 
   4868 	/* Clear any pending interrupt events. */
   4869 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4870 	reg = CSR_READ(sc, WMREG_ICR);
   4871 	if (wm_is_using_msix(sc)) {
   4872 		if (sc->sc_type != WM_T_82574) {
   4873 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4874 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4875 		} else
   4876 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4877 	}
   4878 
   4879 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4880 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4881 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4882 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
   4883 		reg = CSR_READ(sc, WMREG_KABGTXD);
   4884 		reg |= KABGTXD_BGSQLBIAS;
   4885 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   4886 	}
   4887 
   4888 	/* reload sc_ctrl */
   4889 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4890 
   4891 	if (sc->sc_type == WM_T_I354) {
   4892 #if 0
   4893 		/* I354 uses an external PHY */
   4894 		wm_set_eee_i354(sc);
   4895 #endif
   4896 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4897 		wm_set_eee_i350(sc);
   4898 
   4899 	/*
   4900 	 * For PCH, this write will make sure that any noise will be detected
   4901 	 * as a CRC error and be dropped rather than show up as a bad packet
   4902 	 * to the DMA engine
   4903 	 */
   4904 	if (sc->sc_type == WM_T_PCH)
   4905 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4906 
   4907 	if (sc->sc_type >= WM_T_82544)
   4908 		CSR_WRITE(sc, WMREG_WUC, 0);
   4909 
   4910 	if (sc->sc_type < WM_T_82575)
   4911 		wm_disable_aspm(sc);
   4912 
   4913 	wm_reset_mdicnfg_82580(sc);
   4914 
   4915 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4916 		wm_pll_workaround_i210(sc);
   4917 
   4918 	if (sc->sc_type == WM_T_80003) {
   4919 		/* default to TRUE to enable the MDIC W/A */
   4920 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   4921 
   4922 		rv = wm_kmrn_readreg(sc,
   4923 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   4924 		if (rv == 0) {
   4925 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   4926 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   4927 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   4928 			else
   4929 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   4930 		}
   4931 	}
   4932 }
   4933 
   4934 /*
   4935  * wm_add_rxbuf:
   4936  *
   4937  *	Add a receive buffer to the indiciated descriptor.
   4938  */
   4939 static int
   4940 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4941 {
   4942 	struct wm_softc *sc = rxq->rxq_sc;
   4943 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4944 	struct mbuf *m;
   4945 	int error;
   4946 
   4947 	KASSERT(mutex_owned(rxq->rxq_lock));
   4948 
   4949 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4950 	if (m == NULL)
   4951 		return ENOBUFS;
   4952 
   4953 	MCLGET(m, M_DONTWAIT);
   4954 	if ((m->m_flags & M_EXT) == 0) {
   4955 		m_freem(m);
   4956 		return ENOBUFS;
   4957 	}
   4958 
   4959 	if (rxs->rxs_mbuf != NULL)
   4960 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4961 
   4962 	rxs->rxs_mbuf = m;
   4963 
   4964 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4965 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4966 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4967 	if (error) {
   4968 		/* XXX XXX XXX */
   4969 		aprint_error_dev(sc->sc_dev,
   4970 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
   4971 		panic("wm_add_rxbuf");
   4972 	}
   4973 
   4974 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4975 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4976 
   4977 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4978 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4979 			wm_init_rxdesc(rxq, idx);
   4980 	} else
   4981 		wm_init_rxdesc(rxq, idx);
   4982 
   4983 	return 0;
   4984 }
   4985 
   4986 /*
   4987  * wm_rxdrain:
   4988  *
   4989  *	Drain the receive queue.
   4990  */
   4991 static void
   4992 wm_rxdrain(struct wm_rxqueue *rxq)
   4993 {
   4994 	struct wm_softc *sc = rxq->rxq_sc;
   4995 	struct wm_rxsoft *rxs;
   4996 	int i;
   4997 
   4998 	KASSERT(mutex_owned(rxq->rxq_lock));
   4999 
   5000 	for (i = 0; i < WM_NRXDESC; i++) {
   5001 		rxs = &rxq->rxq_soft[i];
   5002 		if (rxs->rxs_mbuf != NULL) {
   5003 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   5004 			m_freem(rxs->rxs_mbuf);
   5005 			rxs->rxs_mbuf = NULL;
   5006 		}
   5007 	}
   5008 }
   5009 
   5010 /*
   5011  * Setup registers for RSS.
   5012  *
   5013  * XXX not yet VMDq support
   5014  */
   5015 static void
   5016 wm_init_rss(struct wm_softc *sc)
   5017 {
   5018 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   5019 	int i;
   5020 
   5021 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
   5022 
   5023 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   5024 		int qid, reta_ent;
   5025 
   5026 		qid  = i % sc->sc_nqueues;
   5027 		switch (sc->sc_type) {
   5028 		case WM_T_82574:
   5029 			reta_ent = __SHIFTIN(qid,
   5030 			    RETA_ENT_QINDEX_MASK_82574);
   5031 			break;
   5032 		case WM_T_82575:
   5033 			reta_ent = __SHIFTIN(qid,
   5034 			    RETA_ENT_QINDEX1_MASK_82575);
   5035 			break;
   5036 		default:
   5037 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   5038 			break;
   5039 		}
   5040 
   5041 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   5042 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   5043 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   5044 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   5045 	}
   5046 
   5047 	rss_getkey((uint8_t *)rss_key);
   5048 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   5049 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   5050 
   5051 	if (sc->sc_type == WM_T_82574)
   5052 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   5053 	else
   5054 		mrqc = MRQC_ENABLE_RSS_MQ;
   5055 
   5056 	/*
   5057 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   5058 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   5059 	 */
   5060 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   5061 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   5062 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   5063 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   5064 
   5065 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   5066 }
   5067 
   5068 /*
   5069  * Adjust TX and RX queue numbers which the system actulally uses.
   5070  *
   5071  * The numbers are affected by below parameters.
   5072  *     - The nubmer of hardware queues
   5073  *     - The number of MSI-X vectors (= "nvectors" argument)
   5074  *     - ncpu
   5075  */
   5076 static void
   5077 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   5078 {
   5079 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   5080 
   5081 	if (nvectors < 2) {
   5082 		sc->sc_nqueues = 1;
   5083 		return;
   5084 	}
   5085 
   5086 	switch (sc->sc_type) {
   5087 	case WM_T_82572:
   5088 		hw_ntxqueues = 2;
   5089 		hw_nrxqueues = 2;
   5090 		break;
   5091 	case WM_T_82574:
   5092 		hw_ntxqueues = 2;
   5093 		hw_nrxqueues = 2;
   5094 		break;
   5095 	case WM_T_82575:
   5096 		hw_ntxqueues = 4;
   5097 		hw_nrxqueues = 4;
   5098 		break;
   5099 	case WM_T_82576:
   5100 		hw_ntxqueues = 16;
   5101 		hw_nrxqueues = 16;
   5102 		break;
   5103 	case WM_T_82580:
   5104 	case WM_T_I350:
   5105 	case WM_T_I354:
   5106 		hw_ntxqueues = 8;
   5107 		hw_nrxqueues = 8;
   5108 		break;
   5109 	case WM_T_I210:
   5110 		hw_ntxqueues = 4;
   5111 		hw_nrxqueues = 4;
   5112 		break;
   5113 	case WM_T_I211:
   5114 		hw_ntxqueues = 2;
   5115 		hw_nrxqueues = 2;
   5116 		break;
   5117 		/*
   5118 		 * As below ethernet controllers does not support MSI-X,
   5119 		 * this driver let them not use multiqueue.
   5120 		 *     - WM_T_80003
   5121 		 *     - WM_T_ICH8
   5122 		 *     - WM_T_ICH9
   5123 		 *     - WM_T_ICH10
   5124 		 *     - WM_T_PCH
   5125 		 *     - WM_T_PCH2
   5126 		 *     - WM_T_PCH_LPT
   5127 		 */
   5128 	default:
   5129 		hw_ntxqueues = 1;
   5130 		hw_nrxqueues = 1;
   5131 		break;
   5132 	}
   5133 
   5134 	hw_nqueues = uimin(hw_ntxqueues, hw_nrxqueues);
   5135 
   5136 	/*
   5137 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   5138 	 * the number of queues used actually.
   5139 	 */
   5140 	if (nvectors < hw_nqueues + 1)
   5141 		sc->sc_nqueues = nvectors - 1;
   5142 	else
   5143 		sc->sc_nqueues = hw_nqueues;
   5144 
   5145 	/*
   5146 	 * As queues more then cpus cannot improve scaling, we limit
   5147 	 * the number of queues used actually.
   5148 	 */
   5149 	if (ncpu < sc->sc_nqueues)
   5150 		sc->sc_nqueues = ncpu;
   5151 }
   5152 
   5153 static inline bool
   5154 wm_is_using_msix(struct wm_softc *sc)
   5155 {
   5156 
   5157 	return (sc->sc_nintrs > 1);
   5158 }
   5159 
   5160 static inline bool
   5161 wm_is_using_multiqueue(struct wm_softc *sc)
   5162 {
   5163 
   5164 	return (sc->sc_nqueues > 1);
   5165 }
   5166 
   5167 static int
   5168 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   5169 {
   5170 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   5171 	wmq->wmq_id = qidx;
   5172 	wmq->wmq_intr_idx = intr_idx;
   5173 	wmq->wmq_si = softint_establish(SOFTINT_NET
   5174 #ifdef WM_MPSAFE
   5175 	    | SOFTINT_MPSAFE
   5176 #endif
   5177 	    , wm_handle_queue, wmq);
   5178 	if (wmq->wmq_si != NULL)
   5179 		return 0;
   5180 
   5181 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   5182 	    wmq->wmq_id);
   5183 
   5184 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   5185 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5186 	return ENOMEM;
   5187 }
   5188 
   5189 /*
   5190  * Both single interrupt MSI and INTx can use this function.
   5191  */
   5192 static int
   5193 wm_setup_legacy(struct wm_softc *sc)
   5194 {
   5195 	pci_chipset_tag_t pc = sc->sc_pc;
   5196 	const char *intrstr = NULL;
   5197 	char intrbuf[PCI_INTRSTR_LEN];
   5198 	int error;
   5199 
   5200 	error = wm_alloc_txrx_queues(sc);
   5201 	if (error) {
   5202 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5203 		    error);
   5204 		return ENOMEM;
   5205 	}
   5206 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   5207 	    sizeof(intrbuf));
   5208 #ifdef WM_MPSAFE
   5209 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   5210 #endif
   5211 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   5212 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   5213 	if (sc->sc_ihs[0] == NULL) {
   5214 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   5215 		    (pci_intr_type(pc, sc->sc_intrs[0])
   5216 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   5217 		return ENOMEM;
   5218 	}
   5219 
   5220 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   5221 	sc->sc_nintrs = 1;
   5222 
   5223 	return wm_softint_establish(sc, 0, 0);
   5224 }
   5225 
   5226 static int
   5227 wm_setup_msix(struct wm_softc *sc)
   5228 {
   5229 	void *vih;
   5230 	kcpuset_t *affinity;
   5231 	int qidx, error, intr_idx, txrx_established;
   5232 	pci_chipset_tag_t pc = sc->sc_pc;
   5233 	const char *intrstr = NULL;
   5234 	char intrbuf[PCI_INTRSTR_LEN];
   5235 	char intr_xname[INTRDEVNAMEBUF];
   5236 
   5237 	if (sc->sc_nqueues < ncpu) {
   5238 		/*
   5239 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5240 		 * interrupts start from CPU#1.
   5241 		 */
   5242 		sc->sc_affinity_offset = 1;
   5243 	} else {
   5244 		/*
   5245 		 * In this case, this device use all CPUs. So, we unify
   5246 		 * affinitied cpu_index to msix vector number for readability.
   5247 		 */
   5248 		sc->sc_affinity_offset = 0;
   5249 	}
   5250 
   5251 	error = wm_alloc_txrx_queues(sc);
   5252 	if (error) {
   5253 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5254 		    error);
   5255 		return ENOMEM;
   5256 	}
   5257 
   5258 	kcpuset_create(&affinity, false);
   5259 	intr_idx = 0;
   5260 
   5261 	/*
   5262 	 * TX and RX
   5263 	 */
   5264 	txrx_established = 0;
   5265 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5266 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5267 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5268 
   5269 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5270 		    sizeof(intrbuf));
   5271 #ifdef WM_MPSAFE
   5272 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5273 		    PCI_INTR_MPSAFE, true);
   5274 #endif
   5275 		memset(intr_xname, 0, sizeof(intr_xname));
   5276 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5277 		    device_xname(sc->sc_dev), qidx);
   5278 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5279 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5280 		if (vih == NULL) {
   5281 			aprint_error_dev(sc->sc_dev,
   5282 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5283 			    intrstr ? " at " : "",
   5284 			    intrstr ? intrstr : "");
   5285 
   5286 			goto fail;
   5287 		}
   5288 		kcpuset_zero(affinity);
   5289 		/* Round-robin affinity */
   5290 		kcpuset_set(affinity, affinity_to);
   5291 		error = interrupt_distribute(vih, affinity, NULL);
   5292 		if (error == 0) {
   5293 			aprint_normal_dev(sc->sc_dev,
   5294 			    "for TX and RX interrupting at %s affinity to %u\n",
   5295 			    intrstr, affinity_to);
   5296 		} else {
   5297 			aprint_normal_dev(sc->sc_dev,
   5298 			    "for TX and RX interrupting at %s\n", intrstr);
   5299 		}
   5300 		sc->sc_ihs[intr_idx] = vih;
   5301 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   5302 			goto fail;
   5303 		txrx_established++;
   5304 		intr_idx++;
   5305 	}
   5306 
   5307 	/*
   5308 	 * LINK
   5309 	 */
   5310 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5311 	    sizeof(intrbuf));
   5312 #ifdef WM_MPSAFE
   5313 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5314 #endif
   5315 	memset(intr_xname, 0, sizeof(intr_xname));
   5316 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5317 	    device_xname(sc->sc_dev));
   5318 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5319 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5320 	if (vih == NULL) {
   5321 		aprint_error_dev(sc->sc_dev,
   5322 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5323 		    intrstr ? " at " : "",
   5324 		    intrstr ? intrstr : "");
   5325 
   5326 		goto fail;
   5327 	}
   5328 	/* keep default affinity to LINK interrupt */
   5329 	aprint_normal_dev(sc->sc_dev,
   5330 	    "for LINK interrupting at %s\n", intrstr);
   5331 	sc->sc_ihs[intr_idx] = vih;
   5332 	sc->sc_link_intr_idx = intr_idx;
   5333 
   5334 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5335 	kcpuset_destroy(affinity);
   5336 	return 0;
   5337 
   5338  fail:
   5339 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5340 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5341 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5342 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5343 	}
   5344 
   5345 	kcpuset_destroy(affinity);
   5346 	return ENOMEM;
   5347 }
   5348 
   5349 static void
   5350 wm_unset_stopping_flags(struct wm_softc *sc)
   5351 {
   5352 	int i;
   5353 
   5354 	KASSERT(WM_CORE_LOCKED(sc));
   5355 
   5356 	/*
   5357 	 * must unset stopping flags in ascending order.
   5358 	 */
   5359 	for (i = 0; i < sc->sc_nqueues; i++) {
   5360 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5361 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5362 
   5363 		mutex_enter(txq->txq_lock);
   5364 		txq->txq_stopping = false;
   5365 		mutex_exit(txq->txq_lock);
   5366 
   5367 		mutex_enter(rxq->rxq_lock);
   5368 		rxq->rxq_stopping = false;
   5369 		mutex_exit(rxq->rxq_lock);
   5370 	}
   5371 
   5372 	sc->sc_core_stopping = false;
   5373 }
   5374 
   5375 static void
   5376 wm_set_stopping_flags(struct wm_softc *sc)
   5377 {
   5378 	int i;
   5379 
   5380 	KASSERT(WM_CORE_LOCKED(sc));
   5381 
   5382 	sc->sc_core_stopping = true;
   5383 
   5384 	/*
   5385 	 * must set stopping flags in ascending order.
   5386 	 */
   5387 	for (i = 0; i < sc->sc_nqueues; i++) {
   5388 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5389 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5390 
   5391 		mutex_enter(rxq->rxq_lock);
   5392 		rxq->rxq_stopping = true;
   5393 		mutex_exit(rxq->rxq_lock);
   5394 
   5395 		mutex_enter(txq->txq_lock);
   5396 		txq->txq_stopping = true;
   5397 		mutex_exit(txq->txq_lock);
   5398 	}
   5399 }
   5400 
   5401 /*
   5402  * write interrupt interval value to ITR or EITR
   5403  */
   5404 static void
   5405 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5406 {
   5407 
   5408 	if (!wmq->wmq_set_itr)
   5409 		return;
   5410 
   5411 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5412 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5413 
   5414 		/*
   5415 		 * 82575 doesn't have CNT_INGR field.
   5416 		 * So, overwrite counter field by software.
   5417 		 */
   5418 		if (sc->sc_type == WM_T_82575)
   5419 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5420 		else
   5421 			eitr |= EITR_CNT_INGR;
   5422 
   5423 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5424 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5425 		/*
   5426 		 * 82574 has both ITR and EITR. SET EITR when we use
   5427 		 * the multi queue function with MSI-X.
   5428 		 */
   5429 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5430 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5431 	} else {
   5432 		KASSERT(wmq->wmq_id == 0);
   5433 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5434 	}
   5435 
   5436 	wmq->wmq_set_itr = false;
   5437 }
   5438 
   5439 /*
   5440  * TODO
   5441  * Below dynamic calculation of itr is almost the same as linux igb,
   5442  * however it does not fit to wm(4). So, we will have been disable AIM
   5443  * until we will find appropriate calculation of itr.
   5444  */
   5445 /*
   5446  * calculate interrupt interval value to be going to write register in
   5447  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5448  */
   5449 static void
   5450 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5451 {
   5452 #ifdef NOTYET
   5453 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5454 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5455 	uint32_t avg_size = 0;
   5456 	uint32_t new_itr;
   5457 
   5458 	if (rxq->rxq_packets)
   5459 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5460 	if (txq->txq_packets)
   5461 		avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
   5462 
   5463 	if (avg_size == 0) {
   5464 		new_itr = 450; /* restore default value */
   5465 		goto out;
   5466 	}
   5467 
   5468 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5469 	avg_size += 24;
   5470 
   5471 	/* Don't starve jumbo frames */
   5472 	avg_size = uimin(avg_size, 3000);
   5473 
   5474 	/* Give a little boost to mid-size frames */
   5475 	if ((avg_size > 300) && (avg_size < 1200))
   5476 		new_itr = avg_size / 3;
   5477 	else
   5478 		new_itr = avg_size / 2;
   5479 
   5480 out:
   5481 	/*
   5482 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5483 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5484 	 */
   5485 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5486 		new_itr *= 4;
   5487 
   5488 	if (new_itr != wmq->wmq_itr) {
   5489 		wmq->wmq_itr = new_itr;
   5490 		wmq->wmq_set_itr = true;
   5491 	} else
   5492 		wmq->wmq_set_itr = false;
   5493 
   5494 	rxq->rxq_packets = 0;
   5495 	rxq->rxq_bytes = 0;
   5496 	txq->txq_packets = 0;
   5497 	txq->txq_bytes = 0;
   5498 #endif
   5499 }
   5500 
   5501 /*
   5502  * wm_init:		[ifnet interface function]
   5503  *
   5504  *	Initialize the interface.
   5505  */
   5506 static int
   5507 wm_init(struct ifnet *ifp)
   5508 {
   5509 	struct wm_softc *sc = ifp->if_softc;
   5510 	int ret;
   5511 
   5512 	WM_CORE_LOCK(sc);
   5513 	ret = wm_init_locked(ifp);
   5514 	WM_CORE_UNLOCK(sc);
   5515 
   5516 	return ret;
   5517 }
   5518 
   5519 static int
   5520 wm_init_locked(struct ifnet *ifp)
   5521 {
   5522 	struct wm_softc *sc = ifp->if_softc;
   5523 	int i, j, trynum, error = 0;
   5524 	uint32_t reg;
   5525 
   5526 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5527 		device_xname(sc->sc_dev), __func__));
   5528 	KASSERT(WM_CORE_LOCKED(sc));
   5529 
   5530 	/*
   5531 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5532 	 * There is a small but measurable benefit to avoiding the adjusment
   5533 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5534 	 * on such platforms.  One possibility is that the DMA itself is
   5535 	 * slightly more efficient if the front of the entire packet (instead
   5536 	 * of the front of the headers) is aligned.
   5537 	 *
   5538 	 * Note we must always set align_tweak to 0 if we are using
   5539 	 * jumbo frames.
   5540 	 */
   5541 #ifdef __NO_STRICT_ALIGNMENT
   5542 	sc->sc_align_tweak = 0;
   5543 #else
   5544 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5545 		sc->sc_align_tweak = 0;
   5546 	else
   5547 		sc->sc_align_tweak = 2;
   5548 #endif /* __NO_STRICT_ALIGNMENT */
   5549 
   5550 	/* Cancel any pending I/O. */
   5551 	wm_stop_locked(ifp, 0);
   5552 
   5553 	/* update statistics before reset */
   5554 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   5555 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   5556 
   5557 	/* PCH_SPT hardware workaround */
   5558 	if (sc->sc_type == WM_T_PCH_SPT)
   5559 		wm_flush_desc_rings(sc);
   5560 
   5561 	/* Reset the chip to a known state. */
   5562 	wm_reset(sc);
   5563 
   5564 	/*
   5565 	 * AMT based hardware can now take control from firmware
   5566 	 * Do this after reset.
   5567 	 */
   5568 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5569 		wm_get_hw_control(sc);
   5570 
   5571 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
   5572 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5573 		wm_legacy_irq_quirk_spt(sc);
   5574 
   5575 	/* Init hardware bits */
   5576 	wm_initialize_hardware_bits(sc);
   5577 
   5578 	/* Reset the PHY. */
   5579 	if (sc->sc_flags & WM_F_HAS_MII)
   5580 		wm_gmii_reset(sc);
   5581 
   5582 	if (sc->sc_type >= WM_T_ICH8) {
   5583 		reg = CSR_READ(sc, WMREG_GCR);
   5584 		/*
   5585 		 * ICH8 No-snoop bits are opposite polarity. Set to snoop by
   5586 		 * default after reset.
   5587 		 */
   5588 		if (sc->sc_type == WM_T_ICH8)
   5589 			reg |= GCR_NO_SNOOP_ALL;
   5590 		else
   5591 			reg &= ~GCR_NO_SNOOP_ALL;
   5592 		CSR_WRITE(sc, WMREG_GCR, reg);
   5593 	}
   5594 	if ((sc->sc_type >= WM_T_ICH8)
   5595 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER)
   5596 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3)) {
   5597 
   5598 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5599 		reg |= CTRL_EXT_RO_DIS;
   5600 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5601 	}
   5602 
   5603 	/* Calculate (E)ITR value */
   5604 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5605 		/*
   5606 		 * For NEWQUEUE's EITR (except for 82575).
   5607 		 * 82575's EITR should be set same throttling value as other
   5608 		 * old controllers' ITR because the interrupt/sec calculation
   5609 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5610 		 *
   5611 		 * 82574's EITR should be set same throttling value as ITR.
   5612 		 *
   5613 		 * For N interrupts/sec, set this value to:
   5614 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5615 		 */
   5616 		sc->sc_itr_init = 450;
   5617 	} else if (sc->sc_type >= WM_T_82543) {
   5618 		/*
   5619 		 * Set up the interrupt throttling register (units of 256ns)
   5620 		 * Note that a footnote in Intel's documentation says this
   5621 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5622 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5623 		 * that that is also true for the 1024ns units of the other
   5624 		 * interrupt-related timer registers -- so, really, we ought
   5625 		 * to divide this value by 4 when the link speed is low.
   5626 		 *
   5627 		 * XXX implement this division at link speed change!
   5628 		 */
   5629 
   5630 		/*
   5631 		 * For N interrupts/sec, set this value to:
   5632 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5633 		 * absolute and packet timer values to this value
   5634 		 * divided by 4 to get "simple timer" behavior.
   5635 		 */
   5636 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5637 	}
   5638 
   5639 	error = wm_init_txrx_queues(sc);
   5640 	if (error)
   5641 		goto out;
   5642 
   5643 	/*
   5644 	 * Clear out the VLAN table -- we don't use it (yet).
   5645 	 */
   5646 	CSR_WRITE(sc, WMREG_VET, 0);
   5647 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5648 		trynum = 10; /* Due to hw errata */
   5649 	else
   5650 		trynum = 1;
   5651 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5652 		for (j = 0; j < trynum; j++)
   5653 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5654 
   5655 	/*
   5656 	 * Set up flow-control parameters.
   5657 	 *
   5658 	 * XXX Values could probably stand some tuning.
   5659 	 */
   5660 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5661 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5662 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5663 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
   5664 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5665 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5666 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5667 	}
   5668 
   5669 	sc->sc_fcrtl = FCRTL_DFLT;
   5670 	if (sc->sc_type < WM_T_82543) {
   5671 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5672 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5673 	} else {
   5674 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5675 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5676 	}
   5677 
   5678 	if (sc->sc_type == WM_T_80003)
   5679 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5680 	else
   5681 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5682 
   5683 	/* Writes the control register. */
   5684 	wm_set_vlan(sc);
   5685 
   5686 	if (sc->sc_flags & WM_F_HAS_MII) {
   5687 		uint16_t kmreg;
   5688 
   5689 		switch (sc->sc_type) {
   5690 		case WM_T_80003:
   5691 		case WM_T_ICH8:
   5692 		case WM_T_ICH9:
   5693 		case WM_T_ICH10:
   5694 		case WM_T_PCH:
   5695 		case WM_T_PCH2:
   5696 		case WM_T_PCH_LPT:
   5697 		case WM_T_PCH_SPT:
   5698 		case WM_T_PCH_CNP:
   5699 			/*
   5700 			 * Set the mac to wait the maximum time between each
   5701 			 * iteration and increase the max iterations when
   5702 			 * polling the phy; this fixes erroneous timeouts at
   5703 			 * 10Mbps.
   5704 			 */
   5705 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5706 			    0xFFFF);
   5707 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5708 			    &kmreg);
   5709 			kmreg |= 0x3F;
   5710 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5711 			    kmreg);
   5712 			break;
   5713 		default:
   5714 			break;
   5715 		}
   5716 
   5717 		if (sc->sc_type == WM_T_80003) {
   5718 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5719 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   5720 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5721 
   5722 			/* Bypass RX and TX FIFO's */
   5723 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5724 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5725 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5726 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5727 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5728 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5729 		}
   5730 	}
   5731 #if 0
   5732 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5733 #endif
   5734 
   5735 	/* Set up checksum offload parameters. */
   5736 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5737 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5738 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5739 		reg |= RXCSUM_IPOFL;
   5740 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5741 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5742 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5743 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5744 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5745 
   5746 	/* Set registers about MSI-X */
   5747 	if (wm_is_using_msix(sc)) {
   5748 		uint32_t ivar;
   5749 		struct wm_queue *wmq;
   5750 		int qid, qintr_idx;
   5751 
   5752 		if (sc->sc_type == WM_T_82575) {
   5753 			/* Interrupt control */
   5754 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5755 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5756 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5757 
   5758 			/* TX and RX */
   5759 			for (i = 0; i < sc->sc_nqueues; i++) {
   5760 				wmq = &sc->sc_queue[i];
   5761 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5762 				    EITR_TX_QUEUE(wmq->wmq_id)
   5763 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5764 			}
   5765 			/* Link status */
   5766 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5767 			    EITR_OTHER);
   5768 		} else if (sc->sc_type == WM_T_82574) {
   5769 			/* Interrupt control */
   5770 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5771 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5772 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5773 
   5774 			/*
   5775 			 * workaround issue with spurious interrupts
   5776 			 * in MSI-X mode.
   5777 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5778 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5779 			 */
   5780 			reg = CSR_READ(sc, WMREG_RFCTL);
   5781 			reg |= WMREG_RFCTL_ACKDIS;
   5782 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5783 
   5784 			ivar = 0;
   5785 			/* TX and RX */
   5786 			for (i = 0; i < sc->sc_nqueues; i++) {
   5787 				wmq = &sc->sc_queue[i];
   5788 				qid = wmq->wmq_id;
   5789 				qintr_idx = wmq->wmq_intr_idx;
   5790 
   5791 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5792 				    IVAR_TX_MASK_Q_82574(qid));
   5793 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5794 				    IVAR_RX_MASK_Q_82574(qid));
   5795 			}
   5796 			/* Link status */
   5797 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5798 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5799 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5800 		} else {
   5801 			/* Interrupt control */
   5802 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5803 			    | GPIE_EIAME | GPIE_PBA);
   5804 
   5805 			switch (sc->sc_type) {
   5806 			case WM_T_82580:
   5807 			case WM_T_I350:
   5808 			case WM_T_I354:
   5809 			case WM_T_I210:
   5810 			case WM_T_I211:
   5811 				/* TX and RX */
   5812 				for (i = 0; i < sc->sc_nqueues; i++) {
   5813 					wmq = &sc->sc_queue[i];
   5814 					qid = wmq->wmq_id;
   5815 					qintr_idx = wmq->wmq_intr_idx;
   5816 
   5817 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5818 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5819 					ivar |= __SHIFTIN((qintr_idx
   5820 						| IVAR_VALID),
   5821 					    IVAR_TX_MASK_Q(qid));
   5822 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5823 					ivar |= __SHIFTIN((qintr_idx
   5824 						| IVAR_VALID),
   5825 					    IVAR_RX_MASK_Q(qid));
   5826 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5827 				}
   5828 				break;
   5829 			case WM_T_82576:
   5830 				/* TX and RX */
   5831 				for (i = 0; i < sc->sc_nqueues; i++) {
   5832 					wmq = &sc->sc_queue[i];
   5833 					qid = wmq->wmq_id;
   5834 					qintr_idx = wmq->wmq_intr_idx;
   5835 
   5836 					ivar = CSR_READ(sc,
   5837 					    WMREG_IVAR_Q_82576(qid));
   5838 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   5839 					ivar |= __SHIFTIN((qintr_idx
   5840 						| IVAR_VALID),
   5841 					    IVAR_TX_MASK_Q_82576(qid));
   5842 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   5843 					ivar |= __SHIFTIN((qintr_idx
   5844 						| IVAR_VALID),
   5845 					    IVAR_RX_MASK_Q_82576(qid));
   5846 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   5847 					    ivar);
   5848 				}
   5849 				break;
   5850 			default:
   5851 				break;
   5852 			}
   5853 
   5854 			/* Link status */
   5855 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   5856 			    IVAR_MISC_OTHER);
   5857 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   5858 		}
   5859 
   5860 		if (wm_is_using_multiqueue(sc)) {
   5861 			wm_init_rss(sc);
   5862 
   5863 			/*
   5864 			** NOTE: Receive Full-Packet Checksum Offload
   5865 			** is mutually exclusive with Multiqueue. However
   5866 			** this is not the same as TCP/IP checksums which
   5867 			** still work.
   5868 			*/
   5869 			reg = CSR_READ(sc, WMREG_RXCSUM);
   5870 			reg |= RXCSUM_PCSD;
   5871 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5872 		}
   5873 	}
   5874 
   5875 	/* Set up the interrupt registers. */
   5876 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5877 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   5878 	    ICR_RXO | ICR_RXT0;
   5879 	if (wm_is_using_msix(sc)) {
   5880 		uint32_t mask;
   5881 		struct wm_queue *wmq;
   5882 
   5883 		switch (sc->sc_type) {
   5884 		case WM_T_82574:
   5885 			mask = 0;
   5886 			for (i = 0; i < sc->sc_nqueues; i++) {
   5887 				wmq = &sc->sc_queue[i];
   5888 				mask |= ICR_TXQ(wmq->wmq_id);
   5889 				mask |= ICR_RXQ(wmq->wmq_id);
   5890 			}
   5891 			mask |= ICR_OTHER;
   5892 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   5893 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   5894 			break;
   5895 		default:
   5896 			if (sc->sc_type == WM_T_82575) {
   5897 				mask = 0;
   5898 				for (i = 0; i < sc->sc_nqueues; i++) {
   5899 					wmq = &sc->sc_queue[i];
   5900 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   5901 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   5902 				}
   5903 				mask |= EITR_OTHER;
   5904 			} else {
   5905 				mask = 0;
   5906 				for (i = 0; i < sc->sc_nqueues; i++) {
   5907 					wmq = &sc->sc_queue[i];
   5908 					mask |= 1 << wmq->wmq_intr_idx;
   5909 				}
   5910 				mask |= 1 << sc->sc_link_intr_idx;
   5911 			}
   5912 			CSR_WRITE(sc, WMREG_EIAC, mask);
   5913 			CSR_WRITE(sc, WMREG_EIAM, mask);
   5914 			CSR_WRITE(sc, WMREG_EIMS, mask);
   5915 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   5916 			break;
   5917 		}
   5918 	} else
   5919 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5920 
   5921 	/* Set up the inter-packet gap. */
   5922 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   5923 
   5924 	if (sc->sc_type >= WM_T_82543) {
   5925 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5926 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   5927 			wm_itrs_writereg(sc, wmq);
   5928 		}
   5929 		/*
   5930 		 * Link interrupts occur much less than TX
   5931 		 * interrupts and RX interrupts. So, we don't
   5932 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   5933 		 * FreeBSD's if_igb.
   5934 		 */
   5935 	}
   5936 
   5937 	/* Set the VLAN ethernetype. */
   5938 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   5939 
   5940 	/*
   5941 	 * Set up the transmit control register; we start out with
   5942 	 * a collision distance suitable for FDX, but update it whe
   5943 	 * we resolve the media type.
   5944 	 */
   5945 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   5946 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   5947 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5948 	if (sc->sc_type >= WM_T_82571)
   5949 		sc->sc_tctl |= TCTL_MULR;
   5950 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5951 
   5952 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5953 		/* Write TDT after TCTL.EN is set. See the document. */
   5954 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   5955 	}
   5956 
   5957 	if (sc->sc_type == WM_T_80003) {
   5958 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   5959 		reg &= ~TCTL_EXT_GCEX_MASK;
   5960 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   5961 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   5962 	}
   5963 
   5964 	/* Set the media. */
   5965 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   5966 		goto out;
   5967 
   5968 	/* Configure for OS presence */
   5969 	wm_init_manageability(sc);
   5970 
   5971 	/*
   5972 	 * Set up the receive control register; we actually program the
   5973 	 * register when we set the receive filter. Use multicast address
   5974 	 * offset type 0.
   5975 	 *
   5976 	 * Only the i82544 has the ability to strip the incoming CRC, so we
   5977 	 * don't enable that feature.
   5978 	 */
   5979 	sc->sc_mchash_type = 0;
   5980 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   5981 	    | RCTL_MO(sc->sc_mchash_type);
   5982 
   5983 	/*
   5984 	 * 82574 use one buffer extended Rx descriptor.
   5985 	 */
   5986 	if (sc->sc_type == WM_T_82574)
   5987 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   5988 
   5989 	/*
   5990 	 * The I350 has a bug where it always strips the CRC whether
   5991 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   5992 	 */
   5993 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5994 	    || (sc->sc_type == WM_T_I210))
   5995 		sc->sc_rctl |= RCTL_SECRC;
   5996 
   5997 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5998 	    && (ifp->if_mtu > ETHERMTU)) {
   5999 		sc->sc_rctl |= RCTL_LPE;
   6000 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6001 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   6002 	}
   6003 
   6004 	if (MCLBYTES == 2048)
   6005 		sc->sc_rctl |= RCTL_2k;
   6006 	else {
   6007 		if (sc->sc_type >= WM_T_82543) {
   6008 			switch (MCLBYTES) {
   6009 			case 4096:
   6010 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   6011 				break;
   6012 			case 8192:
   6013 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   6014 				break;
   6015 			case 16384:
   6016 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   6017 				break;
   6018 			default:
   6019 				panic("wm_init: MCLBYTES %d unsupported",
   6020 				    MCLBYTES);
   6021 				break;
   6022 			}
   6023 		} else
   6024 			panic("wm_init: i82542 requires MCLBYTES = 2048");
   6025 	}
   6026 
   6027 	/* Enable ECC */
   6028 	switch (sc->sc_type) {
   6029 	case WM_T_82571:
   6030 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   6031 		reg |= PBA_ECC_CORR_EN;
   6032 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   6033 		break;
   6034 	case WM_T_PCH_LPT:
   6035 	case WM_T_PCH_SPT:
   6036 	case WM_T_PCH_CNP:
   6037 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   6038 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   6039 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   6040 
   6041 		sc->sc_ctrl |= CTRL_MEHE;
   6042 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   6043 		break;
   6044 	default:
   6045 		break;
   6046 	}
   6047 
   6048 	/*
   6049 	 * Set the receive filter.
   6050 	 *
   6051 	 * For 82575 and 82576, the RX descriptors must be initialized after
   6052 	 * the setting of RCTL.EN in wm_set_filter()
   6053 	 */
   6054 	wm_set_filter(sc);
   6055 
   6056 	/* On 575 and later set RDT only if RX enabled */
   6057 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6058 		int qidx;
   6059 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6060 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   6061 			for (i = 0; i < WM_NRXDESC; i++) {
   6062 				mutex_enter(rxq->rxq_lock);
   6063 				wm_init_rxdesc(rxq, i);
   6064 				mutex_exit(rxq->rxq_lock);
   6065 
   6066 			}
   6067 		}
   6068 	}
   6069 
   6070 	wm_unset_stopping_flags(sc);
   6071 
   6072 	/* Start the one second link check clock. */
   6073 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   6074 
   6075 	/* ...all done! */
   6076 	ifp->if_flags |= IFF_RUNNING;
   6077 	ifp->if_flags &= ~IFF_OACTIVE;
   6078 
   6079  out:
   6080 	sc->sc_if_flags = ifp->if_flags;
   6081 	if (error)
   6082 		log(LOG_ERR, "%s: interface not running\n",
   6083 		    device_xname(sc->sc_dev));
   6084 	return error;
   6085 }
   6086 
   6087 /*
   6088  * wm_stop:		[ifnet interface function]
   6089  *
   6090  *	Stop transmission on the interface.
   6091  */
   6092 static void
   6093 wm_stop(struct ifnet *ifp, int disable)
   6094 {
   6095 	struct wm_softc *sc = ifp->if_softc;
   6096 
   6097 	WM_CORE_LOCK(sc);
   6098 	wm_stop_locked(ifp, disable);
   6099 	WM_CORE_UNLOCK(sc);
   6100 }
   6101 
   6102 static void
   6103 wm_stop_locked(struct ifnet *ifp, int disable)
   6104 {
   6105 	struct wm_softc *sc = ifp->if_softc;
   6106 	struct wm_txsoft *txs;
   6107 	int i, qidx;
   6108 
   6109 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6110 		device_xname(sc->sc_dev), __func__));
   6111 	KASSERT(WM_CORE_LOCKED(sc));
   6112 
   6113 	wm_set_stopping_flags(sc);
   6114 
   6115 	/* Stop the one second clock. */
   6116 	callout_stop(&sc->sc_tick_ch);
   6117 
   6118 	/* Stop the 82547 Tx FIFO stall check timer. */
   6119 	if (sc->sc_type == WM_T_82547)
   6120 		callout_stop(&sc->sc_txfifo_ch);
   6121 
   6122 	if (sc->sc_flags & WM_F_HAS_MII) {
   6123 		/* Down the MII. */
   6124 		mii_down(&sc->sc_mii);
   6125 	} else {
   6126 #if 0
   6127 		/* Should we clear PHY's status properly? */
   6128 		wm_reset(sc);
   6129 #endif
   6130 	}
   6131 
   6132 	/* Stop the transmit and receive processes. */
   6133 	CSR_WRITE(sc, WMREG_TCTL, 0);
   6134 	CSR_WRITE(sc, WMREG_RCTL, 0);
   6135 	sc->sc_rctl &= ~RCTL_EN;
   6136 
   6137 	/*
   6138 	 * Clear the interrupt mask to ensure the device cannot assert its
   6139 	 * interrupt line.
   6140 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   6141 	 * service any currently pending or shared interrupt.
   6142 	 */
   6143 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   6144 	sc->sc_icr = 0;
   6145 	if (wm_is_using_msix(sc)) {
   6146 		if (sc->sc_type != WM_T_82574) {
   6147 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   6148 			CSR_WRITE(sc, WMREG_EIAC, 0);
   6149 		} else
   6150 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   6151 	}
   6152 
   6153 	/* Release any queued transmit buffers. */
   6154 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   6155 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   6156 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6157 		mutex_enter(txq->txq_lock);
   6158 		txq->txq_sending = false; /* ensure watchdog disabled */
   6159 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6160 			txs = &txq->txq_soft[i];
   6161 			if (txs->txs_mbuf != NULL) {
   6162 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   6163 				m_freem(txs->txs_mbuf);
   6164 				txs->txs_mbuf = NULL;
   6165 			}
   6166 		}
   6167 		mutex_exit(txq->txq_lock);
   6168 	}
   6169 
   6170 	/* Mark the interface as down and cancel the watchdog timer. */
   6171 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   6172 
   6173 	if (disable) {
   6174 		for (i = 0; i < sc->sc_nqueues; i++) {
   6175 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6176 			mutex_enter(rxq->rxq_lock);
   6177 			wm_rxdrain(rxq);
   6178 			mutex_exit(rxq->rxq_lock);
   6179 		}
   6180 	}
   6181 
   6182 #if 0 /* notyet */
   6183 	if (sc->sc_type >= WM_T_82544)
   6184 		CSR_WRITE(sc, WMREG_WUC, 0);
   6185 #endif
   6186 }
   6187 
   6188 static void
   6189 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   6190 {
   6191 	struct mbuf *m;
   6192 	int i;
   6193 
   6194 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   6195 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   6196 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   6197 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   6198 		    m->m_data, m->m_len, m->m_flags);
   6199 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   6200 	    i, i == 1 ? "" : "s");
   6201 }
   6202 
   6203 /*
   6204  * wm_82547_txfifo_stall:
   6205  *
   6206  *	Callout used to wait for the 82547 Tx FIFO to drain,
   6207  *	reset the FIFO pointers, and restart packet transmission.
   6208  */
   6209 static void
   6210 wm_82547_txfifo_stall(void *arg)
   6211 {
   6212 	struct wm_softc *sc = arg;
   6213 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6214 
   6215 	mutex_enter(txq->txq_lock);
   6216 
   6217 	if (txq->txq_stopping)
   6218 		goto out;
   6219 
   6220 	if (txq->txq_fifo_stall) {
   6221 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   6222 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   6223 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   6224 			/*
   6225 			 * Packets have drained.  Stop transmitter, reset
   6226 			 * FIFO pointers, restart transmitter, and kick
   6227 			 * the packet queue.
   6228 			 */
   6229 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   6230 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   6231 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   6232 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   6233 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   6234 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   6235 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   6236 			CSR_WRITE_FLUSH(sc);
   6237 
   6238 			txq->txq_fifo_head = 0;
   6239 			txq->txq_fifo_stall = 0;
   6240 			wm_start_locked(&sc->sc_ethercom.ec_if);
   6241 		} else {
   6242 			/*
   6243 			 * Still waiting for packets to drain; try again in
   6244 			 * another tick.
   6245 			 */
   6246 			callout_schedule(&sc->sc_txfifo_ch, 1);
   6247 		}
   6248 	}
   6249 
   6250 out:
   6251 	mutex_exit(txq->txq_lock);
   6252 }
   6253 
   6254 /*
   6255  * wm_82547_txfifo_bugchk:
   6256  *
   6257  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   6258  *	prevent enqueueing a packet that would wrap around the end
   6259  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   6260  *
   6261  *	We do this by checking the amount of space before the end
   6262  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
   6263  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6264  *	the internal FIFO pointers to the beginning, and restart
   6265  *	transmission on the interface.
   6266  */
   6267 #define	WM_FIFO_HDR		0x10
   6268 #define	WM_82547_PAD_LEN	0x3e0
   6269 static int
   6270 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6271 {
   6272 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6273 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6274 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6275 
   6276 	/* Just return if already stalled. */
   6277 	if (txq->txq_fifo_stall)
   6278 		return 1;
   6279 
   6280 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6281 		/* Stall only occurs in half-duplex mode. */
   6282 		goto send_packet;
   6283 	}
   6284 
   6285 	if (len >= WM_82547_PAD_LEN + space) {
   6286 		txq->txq_fifo_stall = 1;
   6287 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6288 		return 1;
   6289 	}
   6290 
   6291  send_packet:
   6292 	txq->txq_fifo_head += len;
   6293 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6294 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6295 
   6296 	return 0;
   6297 }
   6298 
   6299 static int
   6300 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6301 {
   6302 	int error;
   6303 
   6304 	/*
   6305 	 * Allocate the control data structures, and create and load the
   6306 	 * DMA map for it.
   6307 	 *
   6308 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6309 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6310 	 * both sets within the same 4G segment.
   6311 	 */
   6312 	if (sc->sc_type < WM_T_82544)
   6313 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6314 	else
   6315 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6316 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6317 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6318 	else
   6319 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6320 
   6321 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6322 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6323 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6324 		aprint_error_dev(sc->sc_dev,
   6325 		    "unable to allocate TX control data, error = %d\n",
   6326 		    error);
   6327 		goto fail_0;
   6328 	}
   6329 
   6330 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6331 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6332 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6333 		aprint_error_dev(sc->sc_dev,
   6334 		    "unable to map TX control data, error = %d\n", error);
   6335 		goto fail_1;
   6336 	}
   6337 
   6338 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6339 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6340 		aprint_error_dev(sc->sc_dev,
   6341 		    "unable to create TX control data DMA map, error = %d\n",
   6342 		    error);
   6343 		goto fail_2;
   6344 	}
   6345 
   6346 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6347 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6348 		aprint_error_dev(sc->sc_dev,
   6349 		    "unable to load TX control data DMA map, error = %d\n",
   6350 		    error);
   6351 		goto fail_3;
   6352 	}
   6353 
   6354 	return 0;
   6355 
   6356  fail_3:
   6357 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6358  fail_2:
   6359 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6360 	    WM_TXDESCS_SIZE(txq));
   6361  fail_1:
   6362 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6363  fail_0:
   6364 	return error;
   6365 }
   6366 
   6367 static void
   6368 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6369 {
   6370 
   6371 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6372 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6373 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6374 	    WM_TXDESCS_SIZE(txq));
   6375 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6376 }
   6377 
   6378 static int
   6379 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6380 {
   6381 	int error;
   6382 	size_t rxq_descs_size;
   6383 
   6384 	/*
   6385 	 * Allocate the control data structures, and create and load the
   6386 	 * DMA map for it.
   6387 	 *
   6388 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6389 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6390 	 * both sets within the same 4G segment.
   6391 	 */
   6392 	rxq->rxq_ndesc = WM_NRXDESC;
   6393 	if (sc->sc_type == WM_T_82574)
   6394 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6395 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6396 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6397 	else
   6398 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6399 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6400 
   6401 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6402 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6403 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6404 		aprint_error_dev(sc->sc_dev,
   6405 		    "unable to allocate RX control data, error = %d\n",
   6406 		    error);
   6407 		goto fail_0;
   6408 	}
   6409 
   6410 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6411 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6412 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6413 		aprint_error_dev(sc->sc_dev,
   6414 		    "unable to map RX control data, error = %d\n", error);
   6415 		goto fail_1;
   6416 	}
   6417 
   6418 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6419 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6420 		aprint_error_dev(sc->sc_dev,
   6421 		    "unable to create RX control data DMA map, error = %d\n",
   6422 		    error);
   6423 		goto fail_2;
   6424 	}
   6425 
   6426 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6427 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6428 		aprint_error_dev(sc->sc_dev,
   6429 		    "unable to load RX control data DMA map, error = %d\n",
   6430 		    error);
   6431 		goto fail_3;
   6432 	}
   6433 
   6434 	return 0;
   6435 
   6436  fail_3:
   6437 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6438  fail_2:
   6439 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6440 	    rxq_descs_size);
   6441  fail_1:
   6442 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6443  fail_0:
   6444 	return error;
   6445 }
   6446 
   6447 static void
   6448 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6449 {
   6450 
   6451 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6452 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6453 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6454 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6455 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6456 }
   6457 
   6458 
   6459 static int
   6460 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6461 {
   6462 	int i, error;
   6463 
   6464 	/* Create the transmit buffer DMA maps. */
   6465 	WM_TXQUEUELEN(txq) =
   6466 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6467 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6468 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6469 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6470 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6471 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6472 			aprint_error_dev(sc->sc_dev,
   6473 			    "unable to create Tx DMA map %d, error = %d\n",
   6474 			    i, error);
   6475 			goto fail;
   6476 		}
   6477 	}
   6478 
   6479 	return 0;
   6480 
   6481  fail:
   6482 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6483 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6484 			bus_dmamap_destroy(sc->sc_dmat,
   6485 			    txq->txq_soft[i].txs_dmamap);
   6486 	}
   6487 	return error;
   6488 }
   6489 
   6490 static void
   6491 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6492 {
   6493 	int i;
   6494 
   6495 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6496 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6497 			bus_dmamap_destroy(sc->sc_dmat,
   6498 			    txq->txq_soft[i].txs_dmamap);
   6499 	}
   6500 }
   6501 
   6502 static int
   6503 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6504 {
   6505 	int i, error;
   6506 
   6507 	/* Create the receive buffer DMA maps. */
   6508 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6509 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6510 			    MCLBYTES, 0, 0,
   6511 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6512 			aprint_error_dev(sc->sc_dev,
   6513 			    "unable to create Rx DMA map %d error = %d\n",
   6514 			    i, error);
   6515 			goto fail;
   6516 		}
   6517 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6518 	}
   6519 
   6520 	return 0;
   6521 
   6522  fail:
   6523 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6524 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6525 			bus_dmamap_destroy(sc->sc_dmat,
   6526 			    rxq->rxq_soft[i].rxs_dmamap);
   6527 	}
   6528 	return error;
   6529 }
   6530 
   6531 static void
   6532 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6533 {
   6534 	int i;
   6535 
   6536 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6537 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6538 			bus_dmamap_destroy(sc->sc_dmat,
   6539 			    rxq->rxq_soft[i].rxs_dmamap);
   6540 	}
   6541 }
   6542 
   6543 /*
   6544  * wm_alloc_quques:
   6545  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6546  */
   6547 static int
   6548 wm_alloc_txrx_queues(struct wm_softc *sc)
   6549 {
   6550 	int i, error, tx_done, rx_done;
   6551 
   6552 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6553 	    KM_SLEEP);
   6554 	if (sc->sc_queue == NULL) {
   6555 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6556 		error = ENOMEM;
   6557 		goto fail_0;
   6558 	}
   6559 
   6560 	/*
   6561 	 * For transmission
   6562 	 */
   6563 	error = 0;
   6564 	tx_done = 0;
   6565 	for (i = 0; i < sc->sc_nqueues; i++) {
   6566 #ifdef WM_EVENT_COUNTERS
   6567 		int j;
   6568 		const char *xname;
   6569 #endif
   6570 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6571 		txq->txq_sc = sc;
   6572 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6573 
   6574 		error = wm_alloc_tx_descs(sc, txq);
   6575 		if (error)
   6576 			break;
   6577 		error = wm_alloc_tx_buffer(sc, txq);
   6578 		if (error) {
   6579 			wm_free_tx_descs(sc, txq);
   6580 			break;
   6581 		}
   6582 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6583 		if (txq->txq_interq == NULL) {
   6584 			wm_free_tx_descs(sc, txq);
   6585 			wm_free_tx_buffer(sc, txq);
   6586 			error = ENOMEM;
   6587 			break;
   6588 		}
   6589 
   6590 #ifdef WM_EVENT_COUNTERS
   6591 		xname = device_xname(sc->sc_dev);
   6592 
   6593 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6594 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6595 		WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
   6596 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6597 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6598 		WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
   6599 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
   6600 		WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
   6601 		WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
   6602 		WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
   6603 		WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
   6604 
   6605 		for (j = 0; j < WM_NTXSEGS; j++) {
   6606 			snprintf(txq->txq_txseg_evcnt_names[j],
   6607 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6608 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6609 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6610 		}
   6611 
   6612 		WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
   6613 		WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
   6614 		WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
   6615 		WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
   6616 		WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
   6617 #endif /* WM_EVENT_COUNTERS */
   6618 
   6619 		tx_done++;
   6620 	}
   6621 	if (error)
   6622 		goto fail_1;
   6623 
   6624 	/*
   6625 	 * For recieve
   6626 	 */
   6627 	error = 0;
   6628 	rx_done = 0;
   6629 	for (i = 0; i < sc->sc_nqueues; i++) {
   6630 #ifdef WM_EVENT_COUNTERS
   6631 		const char *xname;
   6632 #endif
   6633 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6634 		rxq->rxq_sc = sc;
   6635 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6636 
   6637 		error = wm_alloc_rx_descs(sc, rxq);
   6638 		if (error)
   6639 			break;
   6640 
   6641 		error = wm_alloc_rx_buffer(sc, rxq);
   6642 		if (error) {
   6643 			wm_free_rx_descs(sc, rxq);
   6644 			break;
   6645 		}
   6646 
   6647 #ifdef WM_EVENT_COUNTERS
   6648 		xname = device_xname(sc->sc_dev);
   6649 
   6650 		WM_Q_INTR_EVCNT_ATTACH(rxq, intr, rxq, i, xname);
   6651 		WM_Q_INTR_EVCNT_ATTACH(rxq, defer, rxq, i, xname);
   6652 
   6653 		WM_Q_MISC_EVCNT_ATTACH(rxq, ipsum, rxq, i, xname);
   6654 		WM_Q_MISC_EVCNT_ATTACH(rxq, tusum, rxq, i, xname);
   6655 #endif /* WM_EVENT_COUNTERS */
   6656 
   6657 		rx_done++;
   6658 	}
   6659 	if (error)
   6660 		goto fail_2;
   6661 
   6662 	return 0;
   6663 
   6664  fail_2:
   6665 	for (i = 0; i < rx_done; i++) {
   6666 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6667 		wm_free_rx_buffer(sc, rxq);
   6668 		wm_free_rx_descs(sc, rxq);
   6669 		if (rxq->rxq_lock)
   6670 			mutex_obj_free(rxq->rxq_lock);
   6671 	}
   6672  fail_1:
   6673 	for (i = 0; i < tx_done; i++) {
   6674 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6675 		pcq_destroy(txq->txq_interq);
   6676 		wm_free_tx_buffer(sc, txq);
   6677 		wm_free_tx_descs(sc, txq);
   6678 		if (txq->txq_lock)
   6679 			mutex_obj_free(txq->txq_lock);
   6680 	}
   6681 
   6682 	kmem_free(sc->sc_queue,
   6683 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6684  fail_0:
   6685 	return error;
   6686 }
   6687 
   6688 /*
   6689  * wm_free_quques:
   6690  *	Free {tx,rx}descs and {tx,rx} buffers
   6691  */
   6692 static void
   6693 wm_free_txrx_queues(struct wm_softc *sc)
   6694 {
   6695 	int i;
   6696 
   6697 	for (i = 0; i < sc->sc_nqueues; i++) {
   6698 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6699 
   6700 #ifdef WM_EVENT_COUNTERS
   6701 		WM_Q_EVCNT_DETACH(rxq, intr, rxq, i);
   6702 		WM_Q_EVCNT_DETACH(rxq, defer, rxq, i);
   6703 		WM_Q_EVCNT_DETACH(rxq, ipsum, rxq, i);
   6704 		WM_Q_EVCNT_DETACH(rxq, tusum, rxq, i);
   6705 #endif /* WM_EVENT_COUNTERS */
   6706 
   6707 		wm_free_rx_buffer(sc, rxq);
   6708 		wm_free_rx_descs(sc, rxq);
   6709 		if (rxq->rxq_lock)
   6710 			mutex_obj_free(rxq->rxq_lock);
   6711 	}
   6712 
   6713 	for (i = 0; i < sc->sc_nqueues; i++) {
   6714 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6715 		struct mbuf *m;
   6716 #ifdef WM_EVENT_COUNTERS
   6717 		int j;
   6718 
   6719 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6720 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6721 		WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
   6722 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6723 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6724 		WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
   6725 		WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
   6726 		WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
   6727 		WM_Q_EVCNT_DETACH(txq, tso, txq, i);
   6728 		WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
   6729 		WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
   6730 
   6731 		for (j = 0; j < WM_NTXSEGS; j++)
   6732 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6733 
   6734 		WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
   6735 		WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
   6736 		WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
   6737 		WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
   6738 		WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
   6739 #endif /* WM_EVENT_COUNTERS */
   6740 
   6741 		/* drain txq_interq */
   6742 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6743 			m_freem(m);
   6744 		pcq_destroy(txq->txq_interq);
   6745 
   6746 		wm_free_tx_buffer(sc, txq);
   6747 		wm_free_tx_descs(sc, txq);
   6748 		if (txq->txq_lock)
   6749 			mutex_obj_free(txq->txq_lock);
   6750 	}
   6751 
   6752 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6753 }
   6754 
   6755 static void
   6756 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6757 {
   6758 
   6759 	KASSERT(mutex_owned(txq->txq_lock));
   6760 
   6761 	/* Initialize the transmit descriptor ring. */
   6762 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6763 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6764 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6765 	txq->txq_free = WM_NTXDESC(txq);
   6766 	txq->txq_next = 0;
   6767 }
   6768 
   6769 static void
   6770 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6771     struct wm_txqueue *txq)
   6772 {
   6773 
   6774 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6775 		device_xname(sc->sc_dev), __func__));
   6776 	KASSERT(mutex_owned(txq->txq_lock));
   6777 
   6778 	if (sc->sc_type < WM_T_82543) {
   6779 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6780 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6781 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6782 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6783 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6784 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6785 	} else {
   6786 		int qid = wmq->wmq_id;
   6787 
   6788 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6789 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6790 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6791 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6792 
   6793 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6794 			/*
   6795 			 * Don't write TDT before TCTL.EN is set.
   6796 			 * See the document.
   6797 			 */
   6798 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6799 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6800 			    | TXDCTL_WTHRESH(0));
   6801 		else {
   6802 			/* XXX should update with AIM? */
   6803 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6804 			if (sc->sc_type >= WM_T_82540) {
   6805 				/* should be same */
   6806 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6807 			}
   6808 
   6809 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6810 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6811 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6812 		}
   6813 	}
   6814 }
   6815 
   6816 static void
   6817 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6818 {
   6819 	int i;
   6820 
   6821 	KASSERT(mutex_owned(txq->txq_lock));
   6822 
   6823 	/* Initialize the transmit job descriptors. */
   6824 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6825 		txq->txq_soft[i].txs_mbuf = NULL;
   6826 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6827 	txq->txq_snext = 0;
   6828 	txq->txq_sdirty = 0;
   6829 }
   6830 
   6831 static void
   6832 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6833     struct wm_txqueue *txq)
   6834 {
   6835 
   6836 	KASSERT(mutex_owned(txq->txq_lock));
   6837 
   6838 	/*
   6839 	 * Set up some register offsets that are different between
   6840 	 * the i82542 and the i82543 and later chips.
   6841 	 */
   6842 	if (sc->sc_type < WM_T_82543)
   6843 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   6844 	else
   6845 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   6846 
   6847 	wm_init_tx_descs(sc, txq);
   6848 	wm_init_tx_regs(sc, wmq, txq);
   6849 	wm_init_tx_buffer(sc, txq);
   6850 
   6851 	txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */
   6852 	txq->txq_sending = false;
   6853 }
   6854 
   6855 static void
   6856 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6857     struct wm_rxqueue *rxq)
   6858 {
   6859 
   6860 	KASSERT(mutex_owned(rxq->rxq_lock));
   6861 
   6862 	/*
   6863 	 * Initialize the receive descriptor and receive job
   6864 	 * descriptor rings.
   6865 	 */
   6866 	if (sc->sc_type < WM_T_82543) {
   6867 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   6868 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   6869 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   6870 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6871 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   6872 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   6873 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   6874 
   6875 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   6876 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   6877 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   6878 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   6879 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   6880 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   6881 	} else {
   6882 		int qid = wmq->wmq_id;
   6883 
   6884 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   6885 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   6886 		CSR_WRITE(sc, WMREG_RDLEN(qid),
   6887 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6888 
   6889 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6890 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   6891 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   6892 
   6893 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   6894 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   6895 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   6896 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   6897 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   6898 			    | RXDCTL_WTHRESH(1));
   6899 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6900 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6901 		} else {
   6902 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6903 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6904 			/* XXX should update with AIM? */
   6905 			CSR_WRITE(sc, WMREG_RDTR,
   6906 			    (wmq->wmq_itr / 4) | RDTR_FPD);
   6907 			/* MUST be same */
   6908 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   6909 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   6910 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   6911 		}
   6912 	}
   6913 }
   6914 
   6915 static int
   6916 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6917 {
   6918 	struct wm_rxsoft *rxs;
   6919 	int error, i;
   6920 
   6921 	KASSERT(mutex_owned(rxq->rxq_lock));
   6922 
   6923 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6924 		rxs = &rxq->rxq_soft[i];
   6925 		if (rxs->rxs_mbuf == NULL) {
   6926 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   6927 				log(LOG_ERR, "%s: unable to allocate or map "
   6928 				    "rx buffer %d, error = %d\n",
   6929 				    device_xname(sc->sc_dev), i, error);
   6930 				/*
   6931 				 * XXX Should attempt to run with fewer receive
   6932 				 * XXX buffers instead of just failing.
   6933 				 */
   6934 				wm_rxdrain(rxq);
   6935 				return ENOMEM;
   6936 			}
   6937 		} else {
   6938 			/*
   6939 			 * For 82575 and 82576, the RX descriptors must be
   6940 			 * initialized after the setting of RCTL.EN in
   6941 			 * wm_set_filter()
   6942 			 */
   6943 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   6944 				wm_init_rxdesc(rxq, i);
   6945 		}
   6946 	}
   6947 	rxq->rxq_ptr = 0;
   6948 	rxq->rxq_discard = 0;
   6949 	WM_RXCHAIN_RESET(rxq);
   6950 
   6951 	return 0;
   6952 }
   6953 
   6954 static int
   6955 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6956     struct wm_rxqueue *rxq)
   6957 {
   6958 
   6959 	KASSERT(mutex_owned(rxq->rxq_lock));
   6960 
   6961 	/*
   6962 	 * Set up some register offsets that are different between
   6963 	 * the i82542 and the i82543 and later chips.
   6964 	 */
   6965 	if (sc->sc_type < WM_T_82543)
   6966 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   6967 	else
   6968 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   6969 
   6970 	wm_init_rx_regs(sc, wmq, rxq);
   6971 	return wm_init_rx_buffer(sc, rxq);
   6972 }
   6973 
   6974 /*
   6975  * wm_init_quques:
   6976  *	Initialize {tx,rx}descs and {tx,rx} buffers
   6977  */
   6978 static int
   6979 wm_init_txrx_queues(struct wm_softc *sc)
   6980 {
   6981 	int i, error = 0;
   6982 
   6983 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6984 		device_xname(sc->sc_dev), __func__));
   6985 
   6986 	for (i = 0; i < sc->sc_nqueues; i++) {
   6987 		struct wm_queue *wmq = &sc->sc_queue[i];
   6988 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6989 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6990 
   6991 		/*
   6992 		 * TODO
   6993 		 * Currently, use constant variable instead of AIM.
   6994 		 * Furthermore, the interrupt interval of multiqueue which use
   6995 		 * polling mode is less than default value.
   6996 		 * More tuning and AIM are required.
   6997 		 */
   6998 		if (wm_is_using_multiqueue(sc))
   6999 			wmq->wmq_itr = 50;
   7000 		else
   7001 			wmq->wmq_itr = sc->sc_itr_init;
   7002 		wmq->wmq_set_itr = true;
   7003 
   7004 		mutex_enter(txq->txq_lock);
   7005 		wm_init_tx_queue(sc, wmq, txq);
   7006 		mutex_exit(txq->txq_lock);
   7007 
   7008 		mutex_enter(rxq->rxq_lock);
   7009 		error = wm_init_rx_queue(sc, wmq, rxq);
   7010 		mutex_exit(rxq->rxq_lock);
   7011 		if (error)
   7012 			break;
   7013 	}
   7014 
   7015 	return error;
   7016 }
   7017 
   7018 /*
   7019  * wm_tx_offload:
   7020  *
   7021  *	Set up TCP/IP checksumming parameters for the
   7022  *	specified packet.
   7023  */
   7024 static int
   7025 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7026     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   7027 {
   7028 	struct mbuf *m0 = txs->txs_mbuf;
   7029 	struct livengood_tcpip_ctxdesc *t;
   7030 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   7031 	uint32_t ipcse;
   7032 	struct ether_header *eh;
   7033 	int offset, iphl;
   7034 	uint8_t fields;
   7035 
   7036 	/*
   7037 	 * XXX It would be nice if the mbuf pkthdr had offset
   7038 	 * fields for the protocol headers.
   7039 	 */
   7040 
   7041 	eh = mtod(m0, struct ether_header *);
   7042 	switch (htons(eh->ether_type)) {
   7043 	case ETHERTYPE_IP:
   7044 	case ETHERTYPE_IPV6:
   7045 		offset = ETHER_HDR_LEN;
   7046 		break;
   7047 
   7048 	case ETHERTYPE_VLAN:
   7049 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7050 		break;
   7051 
   7052 	default:
   7053 		/*
   7054 		 * Don't support this protocol or encapsulation.
   7055 		 */
   7056 		*fieldsp = 0;
   7057 		*cmdp = 0;
   7058 		return 0;
   7059 	}
   7060 
   7061 	if ((m0->m_pkthdr.csum_flags &
   7062 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7063 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7064 	} else
   7065 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7066 
   7067 	ipcse = offset + iphl - 1;
   7068 
   7069 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   7070 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   7071 	seg = 0;
   7072 	fields = 0;
   7073 
   7074 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7075 		int hlen = offset + iphl;
   7076 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7077 
   7078 		if (__predict_false(m0->m_len <
   7079 				    (hlen + sizeof(struct tcphdr)))) {
   7080 			/*
   7081 			 * TCP/IP headers are not in the first mbuf; we need
   7082 			 * to do this the slow and painful way. Let's just
   7083 			 * hope this doesn't happen very often.
   7084 			 */
   7085 			struct tcphdr th;
   7086 
   7087 			WM_Q_EVCNT_INCR(txq, tsopain);
   7088 
   7089 			m_copydata(m0, hlen, sizeof(th), &th);
   7090 			if (v4) {
   7091 				struct ip ip;
   7092 
   7093 				m_copydata(m0, offset, sizeof(ip), &ip);
   7094 				ip.ip_len = 0;
   7095 				m_copyback(m0,
   7096 				    offset + offsetof(struct ip, ip_len),
   7097 				    sizeof(ip.ip_len), &ip.ip_len);
   7098 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7099 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7100 			} else {
   7101 				struct ip6_hdr ip6;
   7102 
   7103 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7104 				ip6.ip6_plen = 0;
   7105 				m_copyback(m0,
   7106 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7107 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7108 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7109 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7110 			}
   7111 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7112 			    sizeof(th.th_sum), &th.th_sum);
   7113 
   7114 			hlen += th.th_off << 2;
   7115 		} else {
   7116 			/*
   7117 			 * TCP/IP headers are in the first mbuf; we can do
   7118 			 * this the easy way.
   7119 			 */
   7120 			struct tcphdr *th;
   7121 
   7122 			if (v4) {
   7123 				struct ip *ip =
   7124 				    (void *)(mtod(m0, char *) + offset);
   7125 				th = (void *)(mtod(m0, char *) + hlen);
   7126 
   7127 				ip->ip_len = 0;
   7128 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7129 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7130 			} else {
   7131 				struct ip6_hdr *ip6 =
   7132 				    (void *)(mtod(m0, char *) + offset);
   7133 				th = (void *)(mtod(m0, char *) + hlen);
   7134 
   7135 				ip6->ip6_plen = 0;
   7136 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7137 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7138 			}
   7139 			hlen += th->th_off << 2;
   7140 		}
   7141 
   7142 		if (v4) {
   7143 			WM_Q_EVCNT_INCR(txq, tso);
   7144 			cmdlen |= WTX_TCPIP_CMD_IP;
   7145 		} else {
   7146 			WM_Q_EVCNT_INCR(txq, tso6);
   7147 			ipcse = 0;
   7148 		}
   7149 		cmd |= WTX_TCPIP_CMD_TSE;
   7150 		cmdlen |= WTX_TCPIP_CMD_TSE |
   7151 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   7152 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   7153 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   7154 	}
   7155 
   7156 	/*
   7157 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   7158 	 * offload feature, if we load the context descriptor, we
   7159 	 * MUST provide valid values for IPCSS and TUCSS fields.
   7160 	 */
   7161 
   7162 	ipcs = WTX_TCPIP_IPCSS(offset) |
   7163 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   7164 	    WTX_TCPIP_IPCSE(ipcse);
   7165 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   7166 		WM_Q_EVCNT_INCR(txq, ipsum);
   7167 		fields |= WTX_IXSM;
   7168 	}
   7169 
   7170 	offset += iphl;
   7171 
   7172 	if (m0->m_pkthdr.csum_flags &
   7173 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   7174 		WM_Q_EVCNT_INCR(txq, tusum);
   7175 		fields |= WTX_TXSM;
   7176 		tucs = WTX_TCPIP_TUCSS(offset) |
   7177 		    WTX_TCPIP_TUCSO(offset +
   7178 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   7179 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7180 	} else if ((m0->m_pkthdr.csum_flags &
   7181 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   7182 		WM_Q_EVCNT_INCR(txq, tusum6);
   7183 		fields |= WTX_TXSM;
   7184 		tucs = WTX_TCPIP_TUCSS(offset) |
   7185 		    WTX_TCPIP_TUCSO(offset +
   7186 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   7187 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7188 	} else {
   7189 		/* Just initialize it to a valid TCP context. */
   7190 		tucs = WTX_TCPIP_TUCSS(offset) |
   7191 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   7192 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   7193 	}
   7194 
   7195 	/*
   7196 	 * We don't have to write context descriptor for every packet
   7197 	 * except for 82574. For 82574, we must write context descriptor
   7198 	 * for every packet when we use two descriptor queues.
   7199 	 * It would be overhead to write context descriptor for every packet,
   7200 	 * however it does not cause problems.
   7201 	 */
   7202 	/* Fill in the context descriptor. */
   7203 	t = (struct livengood_tcpip_ctxdesc *)
   7204 	    &txq->txq_descs[txq->txq_next];
   7205 	t->tcpip_ipcs = htole32(ipcs);
   7206 	t->tcpip_tucs = htole32(tucs);
   7207 	t->tcpip_cmdlen = htole32(cmdlen);
   7208 	t->tcpip_seg = htole32(seg);
   7209 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7210 
   7211 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7212 	txs->txs_ndesc++;
   7213 
   7214 	*cmdp = cmd;
   7215 	*fieldsp = fields;
   7216 
   7217 	return 0;
   7218 }
   7219 
   7220 static inline int
   7221 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   7222 {
   7223 	struct wm_softc *sc = ifp->if_softc;
   7224 	u_int cpuid = cpu_index(curcpu());
   7225 
   7226 	/*
   7227 	 * Currently, simple distribute strategy.
   7228 	 * TODO:
   7229 	 * distribute by flowid(RSS has value).
   7230 	 */
   7231 	return (cpuid + ncpu - sc->sc_affinity_offset) % sc->sc_nqueues;
   7232 }
   7233 
   7234 /*
   7235  * wm_start:		[ifnet interface function]
   7236  *
   7237  *	Start packet transmission on the interface.
   7238  */
   7239 static void
   7240 wm_start(struct ifnet *ifp)
   7241 {
   7242 	struct wm_softc *sc = ifp->if_softc;
   7243 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7244 
   7245 #ifdef WM_MPSAFE
   7246 	KASSERT(if_is_mpsafe(ifp));
   7247 #endif
   7248 	/*
   7249 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7250 	 */
   7251 
   7252 	mutex_enter(txq->txq_lock);
   7253 	if (!txq->txq_stopping)
   7254 		wm_start_locked(ifp);
   7255 	mutex_exit(txq->txq_lock);
   7256 }
   7257 
   7258 static void
   7259 wm_start_locked(struct ifnet *ifp)
   7260 {
   7261 	struct wm_softc *sc = ifp->if_softc;
   7262 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7263 
   7264 	wm_send_common_locked(ifp, txq, false);
   7265 }
   7266 
   7267 static int
   7268 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   7269 {
   7270 	int qid;
   7271 	struct wm_softc *sc = ifp->if_softc;
   7272 	struct wm_txqueue *txq;
   7273 
   7274 	qid = wm_select_txqueue(ifp, m);
   7275 	txq = &sc->sc_queue[qid].wmq_txq;
   7276 
   7277 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7278 		m_freem(m);
   7279 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7280 		return ENOBUFS;
   7281 	}
   7282 
   7283 	/*
   7284 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7285 	 */
   7286 	ifp->if_obytes += m->m_pkthdr.len;
   7287 	if (m->m_flags & M_MCAST)
   7288 		ifp->if_omcasts++;
   7289 
   7290 	if (mutex_tryenter(txq->txq_lock)) {
   7291 		if (!txq->txq_stopping)
   7292 			wm_transmit_locked(ifp, txq);
   7293 		mutex_exit(txq->txq_lock);
   7294 	}
   7295 
   7296 	return 0;
   7297 }
   7298 
   7299 static void
   7300 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7301 {
   7302 
   7303 	wm_send_common_locked(ifp, txq, true);
   7304 }
   7305 
   7306 static void
   7307 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7308     bool is_transmit)
   7309 {
   7310 	struct wm_softc *sc = ifp->if_softc;
   7311 	struct mbuf *m0;
   7312 	struct wm_txsoft *txs;
   7313 	bus_dmamap_t dmamap;
   7314 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7315 	bus_addr_t curaddr;
   7316 	bus_size_t seglen, curlen;
   7317 	uint32_t cksumcmd;
   7318 	uint8_t cksumfields;
   7319 	bool remap = true;
   7320 
   7321 	KASSERT(mutex_owned(txq->txq_lock));
   7322 
   7323 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7324 		return;
   7325 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7326 		return;
   7327 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7328 		return;
   7329 
   7330 	/* Remember the previous number of free descriptors. */
   7331 	ofree = txq->txq_free;
   7332 
   7333 	/*
   7334 	 * Loop through the send queue, setting up transmit descriptors
   7335 	 * until we drain the queue, or use up all available transmit
   7336 	 * descriptors.
   7337 	 */
   7338 	for (;;) {
   7339 		m0 = NULL;
   7340 
   7341 		/* Get a work queue entry. */
   7342 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7343 			wm_txeof(txq, UINT_MAX);
   7344 			if (txq->txq_sfree == 0) {
   7345 				DPRINTF(WM_DEBUG_TX,
   7346 				    ("%s: TX: no free job descriptors\n",
   7347 					device_xname(sc->sc_dev)));
   7348 				WM_Q_EVCNT_INCR(txq, txsstall);
   7349 				break;
   7350 			}
   7351 		}
   7352 
   7353 		/* Grab a packet off the queue. */
   7354 		if (is_transmit)
   7355 			m0 = pcq_get(txq->txq_interq);
   7356 		else
   7357 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7358 		if (m0 == NULL)
   7359 			break;
   7360 
   7361 		DPRINTF(WM_DEBUG_TX,
   7362 		    ("%s: TX: have packet to transmit: %p\n",
   7363 			device_xname(sc->sc_dev), m0));
   7364 
   7365 		txs = &txq->txq_soft[txq->txq_snext];
   7366 		dmamap = txs->txs_dmamap;
   7367 
   7368 		use_tso = (m0->m_pkthdr.csum_flags &
   7369 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7370 
   7371 		/*
   7372 		 * So says the Linux driver:
   7373 		 * The controller does a simple calculation to make sure
   7374 		 * there is enough room in the FIFO before initiating the
   7375 		 * DMA for each buffer. The calc is:
   7376 		 *	4 = ceil(buffer len / MSS)
   7377 		 * To make sure we don't overrun the FIFO, adjust the max
   7378 		 * buffer len if the MSS drops.
   7379 		 */
   7380 		dmamap->dm_maxsegsz =
   7381 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7382 		    ? m0->m_pkthdr.segsz << 2
   7383 		    : WTX_MAX_LEN;
   7384 
   7385 		/*
   7386 		 * Load the DMA map.  If this fails, the packet either
   7387 		 * didn't fit in the allotted number of segments, or we
   7388 		 * were short on resources.  For the too-many-segments
   7389 		 * case, we simply report an error and drop the packet,
   7390 		 * since we can't sanely copy a jumbo packet to a single
   7391 		 * buffer.
   7392 		 */
   7393 retry:
   7394 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7395 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7396 		if (__predict_false(error)) {
   7397 			if (error == EFBIG) {
   7398 				if (remap == true) {
   7399 					struct mbuf *m;
   7400 
   7401 					remap = false;
   7402 					m = m_defrag(m0, M_NOWAIT);
   7403 					if (m != NULL) {
   7404 						WM_Q_EVCNT_INCR(txq, defrag);
   7405 						m0 = m;
   7406 						goto retry;
   7407 					}
   7408 				}
   7409 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   7410 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7411 				    "DMA segments, dropping...\n",
   7412 				    device_xname(sc->sc_dev));
   7413 				wm_dump_mbuf_chain(sc, m0);
   7414 				m_freem(m0);
   7415 				continue;
   7416 			}
   7417 			/*  Short on resources, just stop for now. */
   7418 			DPRINTF(WM_DEBUG_TX,
   7419 			    ("%s: TX: dmamap load failed: %d\n",
   7420 				device_xname(sc->sc_dev), error));
   7421 			break;
   7422 		}
   7423 
   7424 		segs_needed = dmamap->dm_nsegs;
   7425 		if (use_tso) {
   7426 			/* For sentinel descriptor; see below. */
   7427 			segs_needed++;
   7428 		}
   7429 
   7430 		/*
   7431 		 * Ensure we have enough descriptors free to describe
   7432 		 * the packet. Note, we always reserve one descriptor
   7433 		 * at the end of the ring due to the semantics of the
   7434 		 * TDT register, plus one more in the event we need
   7435 		 * to load offload context.
   7436 		 */
   7437 		if (segs_needed > txq->txq_free - 2) {
   7438 			/*
   7439 			 * Not enough free descriptors to transmit this
   7440 			 * packet.  We haven't committed anything yet,
   7441 			 * so just unload the DMA map, put the packet
   7442 			 * pack on the queue, and punt. Notify the upper
   7443 			 * layer that there are no more slots left.
   7444 			 */
   7445 			DPRINTF(WM_DEBUG_TX,
   7446 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7447 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7448 				segs_needed, txq->txq_free - 1));
   7449 			if (!is_transmit)
   7450 				ifp->if_flags |= IFF_OACTIVE;
   7451 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7452 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7453 			WM_Q_EVCNT_INCR(txq, txdstall);
   7454 			break;
   7455 		}
   7456 
   7457 		/*
   7458 		 * Check for 82547 Tx FIFO bug. We need to do this
   7459 		 * once we know we can transmit the packet, since we
   7460 		 * do some internal FIFO space accounting here.
   7461 		 */
   7462 		if (sc->sc_type == WM_T_82547 &&
   7463 		    wm_82547_txfifo_bugchk(sc, m0)) {
   7464 			DPRINTF(WM_DEBUG_TX,
   7465 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   7466 				device_xname(sc->sc_dev)));
   7467 			if (!is_transmit)
   7468 				ifp->if_flags |= IFF_OACTIVE;
   7469 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7470 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7471 			WM_Q_EVCNT_INCR(txq, fifo_stall);
   7472 			break;
   7473 		}
   7474 
   7475 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7476 
   7477 		DPRINTF(WM_DEBUG_TX,
   7478 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7479 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7480 
   7481 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7482 
   7483 		/*
   7484 		 * Store a pointer to the packet so that we can free it
   7485 		 * later.
   7486 		 *
   7487 		 * Initially, we consider the number of descriptors the
   7488 		 * packet uses the number of DMA segments.  This may be
   7489 		 * incremented by 1 if we do checksum offload (a descriptor
   7490 		 * is used to set the checksum context).
   7491 		 */
   7492 		txs->txs_mbuf = m0;
   7493 		txs->txs_firstdesc = txq->txq_next;
   7494 		txs->txs_ndesc = segs_needed;
   7495 
   7496 		/* Set up offload parameters for this packet. */
   7497 		if (m0->m_pkthdr.csum_flags &
   7498 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7499 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7500 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7501 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   7502 					  &cksumfields) != 0) {
   7503 				/* Error message already displayed. */
   7504 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7505 				continue;
   7506 			}
   7507 		} else {
   7508 			cksumcmd = 0;
   7509 			cksumfields = 0;
   7510 		}
   7511 
   7512 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7513 
   7514 		/* Sync the DMA map. */
   7515 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7516 		    BUS_DMASYNC_PREWRITE);
   7517 
   7518 		/* Initialize the transmit descriptor. */
   7519 		for (nexttx = txq->txq_next, seg = 0;
   7520 		     seg < dmamap->dm_nsegs; seg++) {
   7521 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7522 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7523 			     seglen != 0;
   7524 			     curaddr += curlen, seglen -= curlen,
   7525 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7526 				curlen = seglen;
   7527 
   7528 				/*
   7529 				 * So says the Linux driver:
   7530 				 * Work around for premature descriptor
   7531 				 * write-backs in TSO mode.  Append a
   7532 				 * 4-byte sentinel descriptor.
   7533 				 */
   7534 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7535 				    curlen > 8)
   7536 					curlen -= 4;
   7537 
   7538 				wm_set_dma_addr(
   7539 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7540 				txq->txq_descs[nexttx].wtx_cmdlen
   7541 				    = htole32(cksumcmd | curlen);
   7542 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7543 				    = 0;
   7544 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7545 				    = cksumfields;
   7546 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7547 				lasttx = nexttx;
   7548 
   7549 				DPRINTF(WM_DEBUG_TX,
   7550 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7551 					"len %#04zx\n",
   7552 					device_xname(sc->sc_dev), nexttx,
   7553 					(uint64_t)curaddr, curlen));
   7554 			}
   7555 		}
   7556 
   7557 		KASSERT(lasttx != -1);
   7558 
   7559 		/*
   7560 		 * Set up the command byte on the last descriptor of
   7561 		 * the packet. If we're in the interrupt delay window,
   7562 		 * delay the interrupt.
   7563 		 */
   7564 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7565 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7566 
   7567 		/*
   7568 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7569 		 * up the descriptor to encapsulate the packet for us.
   7570 		 *
   7571 		 * This is only valid on the last descriptor of the packet.
   7572 		 */
   7573 		if (vlan_has_tag(m0)) {
   7574 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7575 			    htole32(WTX_CMD_VLE);
   7576 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7577 			    = htole16(vlan_get_tag(m0));
   7578 		}
   7579 
   7580 		txs->txs_lastdesc = lasttx;
   7581 
   7582 		DPRINTF(WM_DEBUG_TX,
   7583 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7584 			device_xname(sc->sc_dev),
   7585 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7586 
   7587 		/* Sync the descriptors we're using. */
   7588 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7589 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7590 
   7591 		/* Give the packet to the chip. */
   7592 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7593 
   7594 		DPRINTF(WM_DEBUG_TX,
   7595 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7596 
   7597 		DPRINTF(WM_DEBUG_TX,
   7598 		    ("%s: TX: finished transmitting packet, job %d\n",
   7599 			device_xname(sc->sc_dev), txq->txq_snext));
   7600 
   7601 		/* Advance the tx pointer. */
   7602 		txq->txq_free -= txs->txs_ndesc;
   7603 		txq->txq_next = nexttx;
   7604 
   7605 		txq->txq_sfree--;
   7606 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7607 
   7608 		/* Pass the packet to any BPF listeners. */
   7609 		bpf_mtap(ifp, m0, BPF_D_OUT);
   7610 	}
   7611 
   7612 	if (m0 != NULL) {
   7613 		if (!is_transmit)
   7614 			ifp->if_flags |= IFF_OACTIVE;
   7615 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7616 		WM_Q_EVCNT_INCR(txq, descdrop);
   7617 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7618 			__func__));
   7619 		m_freem(m0);
   7620 	}
   7621 
   7622 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7623 		/* No more slots; notify upper layer. */
   7624 		if (!is_transmit)
   7625 			ifp->if_flags |= IFF_OACTIVE;
   7626 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7627 	}
   7628 
   7629 	if (txq->txq_free != ofree) {
   7630 		/* Set a watchdog timer in case the chip flakes out. */
   7631 		txq->txq_lastsent = time_uptime;
   7632 		txq->txq_sending = true;
   7633 	}
   7634 }
   7635 
   7636 /*
   7637  * wm_nq_tx_offload:
   7638  *
   7639  *	Set up TCP/IP checksumming parameters for the
   7640  *	specified packet, for NEWQUEUE devices
   7641  */
   7642 static int
   7643 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7644     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7645 {
   7646 	struct mbuf *m0 = txs->txs_mbuf;
   7647 	uint32_t vl_len, mssidx, cmdc;
   7648 	struct ether_header *eh;
   7649 	int offset, iphl;
   7650 
   7651 	/*
   7652 	 * XXX It would be nice if the mbuf pkthdr had offset
   7653 	 * fields for the protocol headers.
   7654 	 */
   7655 	*cmdlenp = 0;
   7656 	*fieldsp = 0;
   7657 
   7658 	eh = mtod(m0, struct ether_header *);
   7659 	switch (htons(eh->ether_type)) {
   7660 	case ETHERTYPE_IP:
   7661 	case ETHERTYPE_IPV6:
   7662 		offset = ETHER_HDR_LEN;
   7663 		break;
   7664 
   7665 	case ETHERTYPE_VLAN:
   7666 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7667 		break;
   7668 
   7669 	default:
   7670 		/* Don't support this protocol or encapsulation. */
   7671 		*do_csum = false;
   7672 		return 0;
   7673 	}
   7674 	*do_csum = true;
   7675 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7676 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7677 
   7678 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7679 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7680 
   7681 	if ((m0->m_pkthdr.csum_flags &
   7682 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7683 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7684 	} else {
   7685 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
   7686 	}
   7687 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7688 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7689 
   7690 	if (vlan_has_tag(m0)) {
   7691 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
   7692 		    << NQTXC_VLLEN_VLAN_SHIFT);
   7693 		*cmdlenp |= NQTX_CMD_VLE;
   7694 	}
   7695 
   7696 	mssidx = 0;
   7697 
   7698 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7699 		int hlen = offset + iphl;
   7700 		int tcp_hlen;
   7701 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7702 
   7703 		if (__predict_false(m0->m_len <
   7704 				    (hlen + sizeof(struct tcphdr)))) {
   7705 			/*
   7706 			 * TCP/IP headers are not in the first mbuf; we need
   7707 			 * to do this the slow and painful way. Let's just
   7708 			 * hope this doesn't happen very often.
   7709 			 */
   7710 			struct tcphdr th;
   7711 
   7712 			WM_Q_EVCNT_INCR(txq, tsopain);
   7713 
   7714 			m_copydata(m0, hlen, sizeof(th), &th);
   7715 			if (v4) {
   7716 				struct ip ip;
   7717 
   7718 				m_copydata(m0, offset, sizeof(ip), &ip);
   7719 				ip.ip_len = 0;
   7720 				m_copyback(m0,
   7721 				    offset + offsetof(struct ip, ip_len),
   7722 				    sizeof(ip.ip_len), &ip.ip_len);
   7723 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7724 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7725 			} else {
   7726 				struct ip6_hdr ip6;
   7727 
   7728 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7729 				ip6.ip6_plen = 0;
   7730 				m_copyback(m0,
   7731 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7732 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7733 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7734 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7735 			}
   7736 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7737 			    sizeof(th.th_sum), &th.th_sum);
   7738 
   7739 			tcp_hlen = th.th_off << 2;
   7740 		} else {
   7741 			/*
   7742 			 * TCP/IP headers are in the first mbuf; we can do
   7743 			 * this the easy way.
   7744 			 */
   7745 			struct tcphdr *th;
   7746 
   7747 			if (v4) {
   7748 				struct ip *ip =
   7749 				    (void *)(mtod(m0, char *) + offset);
   7750 				th = (void *)(mtod(m0, char *) + hlen);
   7751 
   7752 				ip->ip_len = 0;
   7753 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7754 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7755 			} else {
   7756 				struct ip6_hdr *ip6 =
   7757 				    (void *)(mtod(m0, char *) + offset);
   7758 				th = (void *)(mtod(m0, char *) + hlen);
   7759 
   7760 				ip6->ip6_plen = 0;
   7761 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7762 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7763 			}
   7764 			tcp_hlen = th->th_off << 2;
   7765 		}
   7766 		hlen += tcp_hlen;
   7767 		*cmdlenp |= NQTX_CMD_TSE;
   7768 
   7769 		if (v4) {
   7770 			WM_Q_EVCNT_INCR(txq, tso);
   7771 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7772 		} else {
   7773 			WM_Q_EVCNT_INCR(txq, tso6);
   7774 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7775 		}
   7776 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7777 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7778 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7779 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7780 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7781 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7782 	} else {
   7783 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7784 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7785 	}
   7786 
   7787 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7788 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7789 		cmdc |= NQTXC_CMD_IP4;
   7790 	}
   7791 
   7792 	if (m0->m_pkthdr.csum_flags &
   7793 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7794 		WM_Q_EVCNT_INCR(txq, tusum);
   7795 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4))
   7796 			cmdc |= NQTXC_CMD_TCP;
   7797 		else
   7798 			cmdc |= NQTXC_CMD_UDP;
   7799 
   7800 		cmdc |= NQTXC_CMD_IP4;
   7801 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7802 	}
   7803 	if (m0->m_pkthdr.csum_flags &
   7804 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7805 		WM_Q_EVCNT_INCR(txq, tusum6);
   7806 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6))
   7807 			cmdc |= NQTXC_CMD_TCP;
   7808 		else
   7809 			cmdc |= NQTXC_CMD_UDP;
   7810 
   7811 		cmdc |= NQTXC_CMD_IP6;
   7812 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7813 	}
   7814 
   7815 	/*
   7816 	 * We don't have to write context descriptor for every packet to
   7817 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   7818 	 * I210 and I211. It is enough to write once per a Tx queue for these
   7819 	 * controllers.
   7820 	 * It would be overhead to write context descriptor for every packet,
   7821 	 * however it does not cause problems.
   7822 	 */
   7823 	/* Fill in the context descriptor. */
   7824 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   7825 	    htole32(vl_len);
   7826 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   7827 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   7828 	    htole32(cmdc);
   7829 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   7830 	    htole32(mssidx);
   7831 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7832 	DPRINTF(WM_DEBUG_TX,
   7833 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   7834 		txq->txq_next, 0, vl_len));
   7835 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   7836 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7837 	txs->txs_ndesc++;
   7838 	return 0;
   7839 }
   7840 
   7841 /*
   7842  * wm_nq_start:		[ifnet interface function]
   7843  *
   7844  *	Start packet transmission on the interface for NEWQUEUE devices
   7845  */
   7846 static void
   7847 wm_nq_start(struct ifnet *ifp)
   7848 {
   7849 	struct wm_softc *sc = ifp->if_softc;
   7850 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7851 
   7852 #ifdef WM_MPSAFE
   7853 	KASSERT(if_is_mpsafe(ifp));
   7854 #endif
   7855 	/*
   7856 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7857 	 */
   7858 
   7859 	mutex_enter(txq->txq_lock);
   7860 	if (!txq->txq_stopping)
   7861 		wm_nq_start_locked(ifp);
   7862 	mutex_exit(txq->txq_lock);
   7863 }
   7864 
   7865 static void
   7866 wm_nq_start_locked(struct ifnet *ifp)
   7867 {
   7868 	struct wm_softc *sc = ifp->if_softc;
   7869 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7870 
   7871 	wm_nq_send_common_locked(ifp, txq, false);
   7872 }
   7873 
   7874 static int
   7875 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   7876 {
   7877 	int qid;
   7878 	struct wm_softc *sc = ifp->if_softc;
   7879 	struct wm_txqueue *txq;
   7880 
   7881 	qid = wm_select_txqueue(ifp, m);
   7882 	txq = &sc->sc_queue[qid].wmq_txq;
   7883 
   7884 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7885 		m_freem(m);
   7886 		WM_Q_EVCNT_INCR(txq, pcqdrop);
   7887 		return ENOBUFS;
   7888 	}
   7889 
   7890 	/*
   7891 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7892 	 */
   7893 	ifp->if_obytes += m->m_pkthdr.len;
   7894 	if (m->m_flags & M_MCAST)
   7895 		ifp->if_omcasts++;
   7896 
   7897 	/*
   7898 	 * The situations which this mutex_tryenter() fails at running time
   7899 	 * are below two patterns.
   7900 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   7901 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   7902 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   7903 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   7904 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
   7905 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
   7906 	 * stuck, either.
   7907 	 */
   7908 	if (mutex_tryenter(txq->txq_lock)) {
   7909 		if (!txq->txq_stopping)
   7910 			wm_nq_transmit_locked(ifp, txq);
   7911 		mutex_exit(txq->txq_lock);
   7912 	}
   7913 
   7914 	return 0;
   7915 }
   7916 
   7917 static void
   7918 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7919 {
   7920 
   7921 	wm_nq_send_common_locked(ifp, txq, true);
   7922 }
   7923 
   7924 static void
   7925 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7926     bool is_transmit)
   7927 {
   7928 	struct wm_softc *sc = ifp->if_softc;
   7929 	struct mbuf *m0;
   7930 	struct wm_txsoft *txs;
   7931 	bus_dmamap_t dmamap;
   7932 	int error, nexttx, lasttx = -1, seg, segs_needed;
   7933 	bool do_csum, sent;
   7934 	bool remap = true;
   7935 
   7936 	KASSERT(mutex_owned(txq->txq_lock));
   7937 
   7938 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7939 		return;
   7940 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7941 		return;
   7942 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7943 		return;
   7944 
   7945 	sent = false;
   7946 
   7947 	/*
   7948 	 * Loop through the send queue, setting up transmit descriptors
   7949 	 * until we drain the queue, or use up all available transmit
   7950 	 * descriptors.
   7951 	 */
   7952 	for (;;) {
   7953 		m0 = NULL;
   7954 
   7955 		/* Get a work queue entry. */
   7956 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7957 			wm_txeof(txq, UINT_MAX);
   7958 			if (txq->txq_sfree == 0) {
   7959 				DPRINTF(WM_DEBUG_TX,
   7960 				    ("%s: TX: no free job descriptors\n",
   7961 					device_xname(sc->sc_dev)));
   7962 				WM_Q_EVCNT_INCR(txq, txsstall);
   7963 				break;
   7964 			}
   7965 		}
   7966 
   7967 		/* Grab a packet off the queue. */
   7968 		if (is_transmit)
   7969 			m0 = pcq_get(txq->txq_interq);
   7970 		else
   7971 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7972 		if (m0 == NULL)
   7973 			break;
   7974 
   7975 		DPRINTF(WM_DEBUG_TX,
   7976 		    ("%s: TX: have packet to transmit: %p\n",
   7977 		    device_xname(sc->sc_dev), m0));
   7978 
   7979 		txs = &txq->txq_soft[txq->txq_snext];
   7980 		dmamap = txs->txs_dmamap;
   7981 
   7982 		/*
   7983 		 * Load the DMA map.  If this fails, the packet either
   7984 		 * didn't fit in the allotted number of segments, or we
   7985 		 * were short on resources.  For the too-many-segments
   7986 		 * case, we simply report an error and drop the packet,
   7987 		 * since we can't sanely copy a jumbo packet to a single
   7988 		 * buffer.
   7989 		 */
   7990 retry:
   7991 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7992 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7993 		if (__predict_false(error)) {
   7994 			if (error == EFBIG) {
   7995 				if (remap == true) {
   7996 					struct mbuf *m;
   7997 
   7998 					remap = false;
   7999 					m = m_defrag(m0, M_NOWAIT);
   8000 					if (m != NULL) {
   8001 						WM_Q_EVCNT_INCR(txq, defrag);
   8002 						m0 = m;
   8003 						goto retry;
   8004 					}
   8005 				}
   8006 				WM_Q_EVCNT_INCR(txq, toomanyseg);
   8007 				log(LOG_ERR, "%s: Tx packet consumes too many "
   8008 				    "DMA segments, dropping...\n",
   8009 				    device_xname(sc->sc_dev));
   8010 				wm_dump_mbuf_chain(sc, m0);
   8011 				m_freem(m0);
   8012 				continue;
   8013 			}
   8014 			/* Short on resources, just stop for now. */
   8015 			DPRINTF(WM_DEBUG_TX,
   8016 			    ("%s: TX: dmamap load failed: %d\n",
   8017 				device_xname(sc->sc_dev), error));
   8018 			break;
   8019 		}
   8020 
   8021 		segs_needed = dmamap->dm_nsegs;
   8022 
   8023 		/*
   8024 		 * Ensure we have enough descriptors free to describe
   8025 		 * the packet. Note, we always reserve one descriptor
   8026 		 * at the end of the ring due to the semantics of the
   8027 		 * TDT register, plus one more in the event we need
   8028 		 * to load offload context.
   8029 		 */
   8030 		if (segs_needed > txq->txq_free - 2) {
   8031 			/*
   8032 			 * Not enough free descriptors to transmit this
   8033 			 * packet.  We haven't committed anything yet,
   8034 			 * so just unload the DMA map, put the packet
   8035 			 * pack on the queue, and punt. Notify the upper
   8036 			 * layer that there are no more slots left.
   8037 			 */
   8038 			DPRINTF(WM_DEBUG_TX,
   8039 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   8040 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
   8041 				segs_needed, txq->txq_free - 1));
   8042 			if (!is_transmit)
   8043 				ifp->if_flags |= IFF_OACTIVE;
   8044 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   8045 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   8046 			WM_Q_EVCNT_INCR(txq, txdstall);
   8047 			break;
   8048 		}
   8049 
   8050 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   8051 
   8052 		DPRINTF(WM_DEBUG_TX,
   8053 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   8054 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   8055 
   8056 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   8057 
   8058 		/*
   8059 		 * Store a pointer to the packet so that we can free it
   8060 		 * later.
   8061 		 *
   8062 		 * Initially, we consider the number of descriptors the
   8063 		 * packet uses the number of DMA segments.  This may be
   8064 		 * incremented by 1 if we do checksum offload (a descriptor
   8065 		 * is used to set the checksum context).
   8066 		 */
   8067 		txs->txs_mbuf = m0;
   8068 		txs->txs_firstdesc = txq->txq_next;
   8069 		txs->txs_ndesc = segs_needed;
   8070 
   8071 		/* Set up offload parameters for this packet. */
   8072 		uint32_t cmdlen, fields, dcmdlen;
   8073 		if (m0->m_pkthdr.csum_flags &
   8074 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   8075 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8076 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   8077 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   8078 			    &do_csum) != 0) {
   8079 				/* Error message already displayed. */
   8080 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   8081 				continue;
   8082 			}
   8083 		} else {
   8084 			do_csum = false;
   8085 			cmdlen = 0;
   8086 			fields = 0;
   8087 		}
   8088 
   8089 		/* Sync the DMA map. */
   8090 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   8091 		    BUS_DMASYNC_PREWRITE);
   8092 
   8093 		/* Initialize the first transmit descriptor. */
   8094 		nexttx = txq->txq_next;
   8095 		if (!do_csum) {
   8096 			/* setup a legacy descriptor */
   8097 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   8098 			    dmamap->dm_segs[0].ds_addr);
   8099 			txq->txq_descs[nexttx].wtx_cmdlen =
   8100 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   8101 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   8102 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   8103 			if (vlan_has_tag(m0)) {
   8104 				txq->txq_descs[nexttx].wtx_cmdlen |=
   8105 				    htole32(WTX_CMD_VLE);
   8106 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   8107 				    htole16(vlan_get_tag(m0));
   8108 			} else
   8109 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   8110 
   8111 			dcmdlen = 0;
   8112 		} else {
   8113 			/* setup an advanced data descriptor */
   8114 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8115 			    htole64(dmamap->dm_segs[0].ds_addr);
   8116 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   8117 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8118 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   8119 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   8120 			    htole32(fields);
   8121 			DPRINTF(WM_DEBUG_TX,
   8122 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   8123 				device_xname(sc->sc_dev), nexttx,
   8124 				(uint64_t)dmamap->dm_segs[0].ds_addr));
   8125 			DPRINTF(WM_DEBUG_TX,
   8126 			    ("\t 0x%08x%08x\n", fields,
   8127 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   8128 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   8129 		}
   8130 
   8131 		lasttx = nexttx;
   8132 		nexttx = WM_NEXTTX(txq, nexttx);
   8133 		/*
   8134 		 * fill in the next descriptors. legacy or advanced format
   8135 		 * is the same here
   8136 		 */
   8137 		for (seg = 1; seg < dmamap->dm_nsegs;
   8138 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   8139 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   8140 			    htole64(dmamap->dm_segs[seg].ds_addr);
   8141 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   8142 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   8143 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   8144 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   8145 			lasttx = nexttx;
   8146 
   8147 			DPRINTF(WM_DEBUG_TX,
   8148 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
   8149 				device_xname(sc->sc_dev), nexttx,
   8150 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
   8151 				dmamap->dm_segs[seg].ds_len));
   8152 		}
   8153 
   8154 		KASSERT(lasttx != -1);
   8155 
   8156 		/*
   8157 		 * Set up the command byte on the last descriptor of
   8158 		 * the packet. If we're in the interrupt delay window,
   8159 		 * delay the interrupt.
   8160 		 */
   8161 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   8162 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   8163 		txq->txq_descs[lasttx].wtx_cmdlen |=
   8164 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   8165 
   8166 		txs->txs_lastdesc = lasttx;
   8167 
   8168 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   8169 		    device_xname(sc->sc_dev),
   8170 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   8171 
   8172 		/* Sync the descriptors we're using. */
   8173 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   8174 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   8175 
   8176 		/* Give the packet to the chip. */
   8177 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   8178 		sent = true;
   8179 
   8180 		DPRINTF(WM_DEBUG_TX,
   8181 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   8182 
   8183 		DPRINTF(WM_DEBUG_TX,
   8184 		    ("%s: TX: finished transmitting packet, job %d\n",
   8185 			device_xname(sc->sc_dev), txq->txq_snext));
   8186 
   8187 		/* Advance the tx pointer. */
   8188 		txq->txq_free -= txs->txs_ndesc;
   8189 		txq->txq_next = nexttx;
   8190 
   8191 		txq->txq_sfree--;
   8192 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   8193 
   8194 		/* Pass the packet to any BPF listeners. */
   8195 		bpf_mtap(ifp, m0, BPF_D_OUT);
   8196 	}
   8197 
   8198 	if (m0 != NULL) {
   8199 		if (!is_transmit)
   8200 			ifp->if_flags |= IFF_OACTIVE;
   8201 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8202 		WM_Q_EVCNT_INCR(txq, descdrop);
   8203 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   8204 			__func__));
   8205 		m_freem(m0);
   8206 	}
   8207 
   8208 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   8209 		/* No more slots; notify upper layer. */
   8210 		if (!is_transmit)
   8211 			ifp->if_flags |= IFF_OACTIVE;
   8212 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   8213 	}
   8214 
   8215 	if (sent) {
   8216 		/* Set a watchdog timer in case the chip flakes out. */
   8217 		txq->txq_lastsent = time_uptime;
   8218 		txq->txq_sending = true;
   8219 	}
   8220 }
   8221 
   8222 static void
   8223 wm_deferred_start_locked(struct wm_txqueue *txq)
   8224 {
   8225 	struct wm_softc *sc = txq->txq_sc;
   8226 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8227 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8228 	int qid = wmq->wmq_id;
   8229 
   8230 	KASSERT(mutex_owned(txq->txq_lock));
   8231 
   8232 	if (txq->txq_stopping) {
   8233 		mutex_exit(txq->txq_lock);
   8234 		return;
   8235 	}
   8236 
   8237 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   8238 		/* XXX need for ALTQ or one CPU system */
   8239 		if (qid == 0)
   8240 			wm_nq_start_locked(ifp);
   8241 		wm_nq_transmit_locked(ifp, txq);
   8242 	} else {
   8243 		/* XXX need for ALTQ or one CPU system */
   8244 		if (qid == 0)
   8245 			wm_start_locked(ifp);
   8246 		wm_transmit_locked(ifp, txq);
   8247 	}
   8248 }
   8249 
   8250 /* Interrupt */
   8251 
   8252 /*
   8253  * wm_txeof:
   8254  *
   8255  *	Helper; handle transmit interrupts.
   8256  */
   8257 static bool
   8258 wm_txeof(struct wm_txqueue *txq, u_int limit)
   8259 {
   8260 	struct wm_softc *sc = txq->txq_sc;
   8261 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8262 	struct wm_txsoft *txs;
   8263 	int count = 0;
   8264 	int i;
   8265 	uint8_t status;
   8266 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   8267 	bool more = false;
   8268 
   8269 	KASSERT(mutex_owned(txq->txq_lock));
   8270 
   8271 	if (txq->txq_stopping)
   8272 		return false;
   8273 
   8274 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   8275 	/* for ALTQ and legacy(not use multiqueue) ethernet controller */
   8276 	if (wmq->wmq_id == 0)
   8277 		ifp->if_flags &= ~IFF_OACTIVE;
   8278 
   8279 	/*
   8280 	 * Go through the Tx list and free mbufs for those
   8281 	 * frames which have been transmitted.
   8282 	 */
   8283 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   8284 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   8285 		if (limit-- == 0) {
   8286 			more = true;
   8287 			DPRINTF(WM_DEBUG_TX,
   8288 			    ("%s: TX: loop limited, job %d is not processed\n",
   8289 				device_xname(sc->sc_dev), i));
   8290 			break;
   8291 		}
   8292 
   8293 		txs = &txq->txq_soft[i];
   8294 
   8295 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   8296 			device_xname(sc->sc_dev), i));
   8297 
   8298 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   8299 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8300 
   8301 		status =
   8302 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   8303 		if ((status & WTX_ST_DD) == 0) {
   8304 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8305 			    BUS_DMASYNC_PREREAD);
   8306 			break;
   8307 		}
   8308 
   8309 		count++;
   8310 		DPRINTF(WM_DEBUG_TX,
   8311 		    ("%s: TX: job %d done: descs %d..%d\n",
   8312 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8313 		    txs->txs_lastdesc));
   8314 
   8315 		/*
   8316 		 * XXX We should probably be using the statistics
   8317 		 * XXX registers, but I don't know if they exist
   8318 		 * XXX on chips before the i82544.
   8319 		 */
   8320 
   8321 #ifdef WM_EVENT_COUNTERS
   8322 		if (status & WTX_ST_TU)
   8323 			WM_Q_EVCNT_INCR(txq, underrun);
   8324 #endif /* WM_EVENT_COUNTERS */
   8325 
   8326 		/*
   8327 		 * 82574 and newer's document says the status field has neither
   8328 		 * EC (Excessive Collision) bit nor LC (Late Collision) bit
   8329 		 * (reserved). Refer "PCIe GbE Controller Open Source Software
   8330 		 * Developer's Manual", 82574 datasheet and newer.
   8331 		 *
   8332 		 * XXX I saw the LC bit was set on I218 even though the media
   8333 		 * was full duplex, so the bit might be used for other
   8334 		 * meaning ...(I have no document).
   8335 		 */
   8336 
   8337 		if (((status & (WTX_ST_EC | WTX_ST_LC)) != 0)
   8338 		    && ((sc->sc_type < WM_T_82574)
   8339 			|| (sc->sc_type == WM_T_80003))) {
   8340 			ifp->if_oerrors++;
   8341 			if (status & WTX_ST_LC)
   8342 				log(LOG_WARNING, "%s: late collision\n",
   8343 				    device_xname(sc->sc_dev));
   8344 			else if (status & WTX_ST_EC) {
   8345 				ifp->if_collisions +=
   8346 				    TX_COLLISION_THRESHOLD + 1;
   8347 				log(LOG_WARNING, "%s: excessive collisions\n",
   8348 				    device_xname(sc->sc_dev));
   8349 			}
   8350 		} else
   8351 			ifp->if_opackets++;
   8352 
   8353 		txq->txq_packets++;
   8354 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8355 
   8356 		txq->txq_free += txs->txs_ndesc;
   8357 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8358 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8359 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8360 		m_freem(txs->txs_mbuf);
   8361 		txs->txs_mbuf = NULL;
   8362 	}
   8363 
   8364 	/* Update the dirty transmit buffer pointer. */
   8365 	txq->txq_sdirty = i;
   8366 	DPRINTF(WM_DEBUG_TX,
   8367 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8368 
   8369 	if (count != 0)
   8370 		rnd_add_uint32(&sc->rnd_source, count);
   8371 
   8372 	/*
   8373 	 * If there are no more pending transmissions, cancel the watchdog
   8374 	 * timer.
   8375 	 */
   8376 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8377 		txq->txq_sending = false;
   8378 
   8379 	return more;
   8380 }
   8381 
   8382 static inline uint32_t
   8383 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8384 {
   8385 	struct wm_softc *sc = rxq->rxq_sc;
   8386 
   8387 	if (sc->sc_type == WM_T_82574)
   8388 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8389 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8390 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8391 	else
   8392 		return rxq->rxq_descs[idx].wrx_status;
   8393 }
   8394 
   8395 static inline uint32_t
   8396 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8397 {
   8398 	struct wm_softc *sc = rxq->rxq_sc;
   8399 
   8400 	if (sc->sc_type == WM_T_82574)
   8401 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8402 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8403 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8404 	else
   8405 		return rxq->rxq_descs[idx].wrx_errors;
   8406 }
   8407 
   8408 static inline uint16_t
   8409 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8410 {
   8411 	struct wm_softc *sc = rxq->rxq_sc;
   8412 
   8413 	if (sc->sc_type == WM_T_82574)
   8414 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8415 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8416 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8417 	else
   8418 		return rxq->rxq_descs[idx].wrx_special;
   8419 }
   8420 
   8421 static inline int
   8422 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8423 {
   8424 	struct wm_softc *sc = rxq->rxq_sc;
   8425 
   8426 	if (sc->sc_type == WM_T_82574)
   8427 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8428 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8429 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   8430 	else
   8431 		return rxq->rxq_descs[idx].wrx_len;
   8432 }
   8433 
   8434 #ifdef WM_DEBUG
   8435 static inline uint32_t
   8436 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   8437 {
   8438 	struct wm_softc *sc = rxq->rxq_sc;
   8439 
   8440 	if (sc->sc_type == WM_T_82574)
   8441 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   8442 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8443 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   8444 	else
   8445 		return 0;
   8446 }
   8447 
   8448 static inline uint8_t
   8449 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   8450 {
   8451 	struct wm_softc *sc = rxq->rxq_sc;
   8452 
   8453 	if (sc->sc_type == WM_T_82574)
   8454 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   8455 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8456 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   8457 	else
   8458 		return 0;
   8459 }
   8460 #endif /* WM_DEBUG */
   8461 
   8462 static inline bool
   8463 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   8464     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8465 {
   8466 
   8467 	if (sc->sc_type == WM_T_82574)
   8468 		return (status & ext_bit) != 0;
   8469 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8470 		return (status & nq_bit) != 0;
   8471 	else
   8472 		return (status & legacy_bit) != 0;
   8473 }
   8474 
   8475 static inline bool
   8476 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   8477     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8478 {
   8479 
   8480 	if (sc->sc_type == WM_T_82574)
   8481 		return (error & ext_bit) != 0;
   8482 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8483 		return (error & nq_bit) != 0;
   8484 	else
   8485 		return (error & legacy_bit) != 0;
   8486 }
   8487 
   8488 static inline bool
   8489 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   8490 {
   8491 
   8492 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8493 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   8494 		return true;
   8495 	else
   8496 		return false;
   8497 }
   8498 
   8499 static inline bool
   8500 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   8501 {
   8502 	struct wm_softc *sc = rxq->rxq_sc;
   8503 
   8504 	/* XXXX missing error bit for newqueue? */
   8505 	if (wm_rxdesc_is_set_error(sc, errors,
   8506 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
   8507 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
   8508 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
   8509 		NQRXC_ERROR_RXE)) {
   8510 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
   8511 		    EXTRXC_ERROR_SE, 0))
   8512 			log(LOG_WARNING, "%s: symbol error\n",
   8513 			    device_xname(sc->sc_dev));
   8514 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
   8515 		    EXTRXC_ERROR_SEQ, 0))
   8516 			log(LOG_WARNING, "%s: receive sequence error\n",
   8517 			    device_xname(sc->sc_dev));
   8518 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
   8519 		    EXTRXC_ERROR_CE, 0))
   8520 			log(LOG_WARNING, "%s: CRC error\n",
   8521 			    device_xname(sc->sc_dev));
   8522 		return true;
   8523 	}
   8524 
   8525 	return false;
   8526 }
   8527 
   8528 static inline bool
   8529 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8530 {
   8531 	struct wm_softc *sc = rxq->rxq_sc;
   8532 
   8533 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8534 		NQRXC_STATUS_DD)) {
   8535 		/* We have processed all of the receive descriptors. */
   8536 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8537 		return false;
   8538 	}
   8539 
   8540 	return true;
   8541 }
   8542 
   8543 static inline bool
   8544 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
   8545     uint16_t vlantag, struct mbuf *m)
   8546 {
   8547 
   8548 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8549 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8550 		vlan_set_tag(m, le16toh(vlantag));
   8551 	}
   8552 
   8553 	return true;
   8554 }
   8555 
   8556 static inline void
   8557 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8558     uint32_t errors, struct mbuf *m)
   8559 {
   8560 	struct wm_softc *sc = rxq->rxq_sc;
   8561 
   8562 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8563 		if (wm_rxdesc_is_set_status(sc, status,
   8564 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8565 			WM_Q_EVCNT_INCR(rxq, ipsum);
   8566 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8567 			if (wm_rxdesc_is_set_error(sc, errors,
   8568 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8569 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
   8570 		}
   8571 		if (wm_rxdesc_is_set_status(sc, status,
   8572 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8573 			/*
   8574 			 * Note: we don't know if this was TCP or UDP,
   8575 			 * so we just set both bits, and expect the
   8576 			 * upper layers to deal.
   8577 			 */
   8578 			WM_Q_EVCNT_INCR(rxq, tusum);
   8579 			m->m_pkthdr.csum_flags |=
   8580 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8581 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8582 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
   8583 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8584 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
   8585 		}
   8586 	}
   8587 }
   8588 
   8589 /*
   8590  * wm_rxeof:
   8591  *
   8592  *	Helper; handle receive interrupts.
   8593  */
   8594 static bool
   8595 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8596 {
   8597 	struct wm_softc *sc = rxq->rxq_sc;
   8598 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8599 	struct wm_rxsoft *rxs;
   8600 	struct mbuf *m;
   8601 	int i, len;
   8602 	int count = 0;
   8603 	uint32_t status, errors;
   8604 	uint16_t vlantag;
   8605 	bool more = false;
   8606 
   8607 	KASSERT(mutex_owned(rxq->rxq_lock));
   8608 
   8609 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8610 		if (limit-- == 0) {
   8611 			rxq->rxq_ptr = i;
   8612 			more = true;
   8613 			DPRINTF(WM_DEBUG_RX,
   8614 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
   8615 				device_xname(sc->sc_dev), i));
   8616 			break;
   8617 		}
   8618 
   8619 		rxs = &rxq->rxq_soft[i];
   8620 
   8621 		DPRINTF(WM_DEBUG_RX,
   8622 		    ("%s: RX: checking descriptor %d\n",
   8623 			device_xname(sc->sc_dev), i));
   8624 		wm_cdrxsync(rxq, i,
   8625 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   8626 
   8627 		status = wm_rxdesc_get_status(rxq, i);
   8628 		errors = wm_rxdesc_get_errors(rxq, i);
   8629 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8630 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8631 #ifdef WM_DEBUG
   8632 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8633 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8634 #endif
   8635 
   8636 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8637 			/*
   8638 			 * Update the receive pointer holding rxq_lock
   8639 			 * consistent with increment counter.
   8640 			 */
   8641 			rxq->rxq_ptr = i;
   8642 			break;
   8643 		}
   8644 
   8645 		count++;
   8646 		if (__predict_false(rxq->rxq_discard)) {
   8647 			DPRINTF(WM_DEBUG_RX,
   8648 			    ("%s: RX: discarding contents of descriptor %d\n",
   8649 				device_xname(sc->sc_dev), i));
   8650 			wm_init_rxdesc(rxq, i);
   8651 			if (wm_rxdesc_is_eop(rxq, status)) {
   8652 				/* Reset our state. */
   8653 				DPRINTF(WM_DEBUG_RX,
   8654 				    ("%s: RX: resetting rxdiscard -> 0\n",
   8655 					device_xname(sc->sc_dev)));
   8656 				rxq->rxq_discard = 0;
   8657 			}
   8658 			continue;
   8659 		}
   8660 
   8661 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8662 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   8663 
   8664 		m = rxs->rxs_mbuf;
   8665 
   8666 		/*
   8667 		 * Add a new receive buffer to the ring, unless of
   8668 		 * course the length is zero. Treat the latter as a
   8669 		 * failed mapping.
   8670 		 */
   8671 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   8672 			/*
   8673 			 * Failed, throw away what we've done so
   8674 			 * far, and discard the rest of the packet.
   8675 			 */
   8676 			ifp->if_ierrors++;
   8677 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8678 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   8679 			wm_init_rxdesc(rxq, i);
   8680 			if (!wm_rxdesc_is_eop(rxq, status))
   8681 				rxq->rxq_discard = 1;
   8682 			if (rxq->rxq_head != NULL)
   8683 				m_freem(rxq->rxq_head);
   8684 			WM_RXCHAIN_RESET(rxq);
   8685 			DPRINTF(WM_DEBUG_RX,
   8686 			    ("%s: RX: Rx buffer allocation failed, "
   8687 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8688 				rxq->rxq_discard ? " (discard)" : ""));
   8689 			continue;
   8690 		}
   8691 
   8692 		m->m_len = len;
   8693 		rxq->rxq_len += len;
   8694 		DPRINTF(WM_DEBUG_RX,
   8695 		    ("%s: RX: buffer at %p len %d\n",
   8696 			device_xname(sc->sc_dev), m->m_data, len));
   8697 
   8698 		/* If this is not the end of the packet, keep looking. */
   8699 		if (!wm_rxdesc_is_eop(rxq, status)) {
   8700 			WM_RXCHAIN_LINK(rxq, m);
   8701 			DPRINTF(WM_DEBUG_RX,
   8702 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   8703 				device_xname(sc->sc_dev), rxq->rxq_len));
   8704 			continue;
   8705 		}
   8706 
   8707 		/*
   8708 		 * Okay, we have the entire packet now. The chip is
   8709 		 * configured to include the FCS except I350 and I21[01]
   8710 		 * (not all chips can be configured to strip it),
   8711 		 * so we need to trim it.
   8712 		 * May need to adjust length of previous mbuf in the
   8713 		 * chain if the current mbuf is too short.
   8714 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   8715 		 * is always set in I350, so we don't trim it.
   8716 		 */
   8717 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   8718 		    && (sc->sc_type != WM_T_I210)
   8719 		    && (sc->sc_type != WM_T_I211)) {
   8720 			if (m->m_len < ETHER_CRC_LEN) {
   8721 				rxq->rxq_tail->m_len
   8722 				    -= (ETHER_CRC_LEN - m->m_len);
   8723 				m->m_len = 0;
   8724 			} else
   8725 				m->m_len -= ETHER_CRC_LEN;
   8726 			len = rxq->rxq_len - ETHER_CRC_LEN;
   8727 		} else
   8728 			len = rxq->rxq_len;
   8729 
   8730 		WM_RXCHAIN_LINK(rxq, m);
   8731 
   8732 		*rxq->rxq_tailp = NULL;
   8733 		m = rxq->rxq_head;
   8734 
   8735 		WM_RXCHAIN_RESET(rxq);
   8736 
   8737 		DPRINTF(WM_DEBUG_RX,
   8738 		    ("%s: RX: have entire packet, len -> %d\n",
   8739 			device_xname(sc->sc_dev), len));
   8740 
   8741 		/* If an error occurred, update stats and drop the packet. */
   8742 		if (wm_rxdesc_has_errors(rxq, errors)) {
   8743 			m_freem(m);
   8744 			continue;
   8745 		}
   8746 
   8747 		/* No errors.  Receive the packet. */
   8748 		m_set_rcvif(m, ifp);
   8749 		m->m_pkthdr.len = len;
   8750 		/*
   8751 		 * TODO
   8752 		 * should be save rsshash and rsstype to this mbuf.
   8753 		 */
   8754 		DPRINTF(WM_DEBUG_RX,
   8755 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   8756 			device_xname(sc->sc_dev), rsstype, rsshash));
   8757 
   8758 		/*
   8759 		 * If VLANs are enabled, VLAN packets have been unwrapped
   8760 		 * for us.  Associate the tag with the packet.
   8761 		 */
   8762 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   8763 			continue;
   8764 
   8765 		/* Set up checksum info for this packet. */
   8766 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   8767 		/*
   8768 		 * Update the receive pointer holding rxq_lock consistent with
   8769 		 * increment counter.
   8770 		 */
   8771 		rxq->rxq_ptr = i;
   8772 		rxq->rxq_packets++;
   8773 		rxq->rxq_bytes += len;
   8774 		mutex_exit(rxq->rxq_lock);
   8775 
   8776 		/* Pass it on. */
   8777 		if_percpuq_enqueue(sc->sc_ipq, m);
   8778 
   8779 		mutex_enter(rxq->rxq_lock);
   8780 
   8781 		if (rxq->rxq_stopping)
   8782 			break;
   8783 	}
   8784 
   8785 	if (count != 0)
   8786 		rnd_add_uint32(&sc->rnd_source, count);
   8787 
   8788 	DPRINTF(WM_DEBUG_RX,
   8789 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8790 
   8791 	return more;
   8792 }
   8793 
   8794 /*
   8795  * wm_linkintr_gmii:
   8796  *
   8797  *	Helper; handle link interrupts for GMII.
   8798  */
   8799 static void
   8800 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8801 {
   8802 
   8803 	KASSERT(WM_CORE_LOCKED(sc));
   8804 
   8805 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8806 		__func__));
   8807 
   8808 	if (icr & ICR_LSC) {
   8809 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   8810 		uint32_t reg;
   8811 		bool link;
   8812 
   8813 		link = status & STATUS_LU;
   8814 		if (link) {
   8815 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8816 				device_xname(sc->sc_dev),
   8817 				(status & STATUS_FD) ? "FDX" : "HDX"));
   8818 		} else {
   8819 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8820 				device_xname(sc->sc_dev)));
   8821 		}
   8822 		if ((sc->sc_type == WM_T_ICH8) && (link == false))
   8823 			wm_gig_downshift_workaround_ich8lan(sc);
   8824 
   8825 		if ((sc->sc_type == WM_T_ICH8)
   8826 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   8827 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   8828 		}
   8829 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   8830 			device_xname(sc->sc_dev)));
   8831 		mii_pollstat(&sc->sc_mii);
   8832 		if (sc->sc_type == WM_T_82543) {
   8833 			int miistatus, active;
   8834 
   8835 			/*
   8836 			 * With 82543, we need to force speed and
   8837 			 * duplex on the MAC equal to what the PHY
   8838 			 * speed and duplex configuration is.
   8839 			 */
   8840 			miistatus = sc->sc_mii.mii_media_status;
   8841 
   8842 			if (miistatus & IFM_ACTIVE) {
   8843 				active = sc->sc_mii.mii_media_active;
   8844 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8845 				switch (IFM_SUBTYPE(active)) {
   8846 				case IFM_10_T:
   8847 					sc->sc_ctrl |= CTRL_SPEED_10;
   8848 					break;
   8849 				case IFM_100_TX:
   8850 					sc->sc_ctrl |= CTRL_SPEED_100;
   8851 					break;
   8852 				case IFM_1000_T:
   8853 					sc->sc_ctrl |= CTRL_SPEED_1000;
   8854 					break;
   8855 				default:
   8856 					/*
   8857 					 * fiber?
   8858 					 * Shoud not enter here.
   8859 					 */
   8860 					printf("unknown media (%x)\n", active);
   8861 					break;
   8862 				}
   8863 				if (active & IFM_FDX)
   8864 					sc->sc_ctrl |= CTRL_FD;
   8865 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8866 			}
   8867 		} else if (sc->sc_type == WM_T_PCH) {
   8868 			wm_k1_gig_workaround_hv(sc,
   8869 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   8870 		}
   8871 
   8872 		/*
   8873 		 * I217 Packet Loss issue:
   8874 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
   8875 		 * on power up.
   8876 		 * Set the Beacon Duration for I217 to 8 usec
   8877 		 */
   8878 		if (sc->sc_type >= WM_T_PCH_LPT) {
   8879 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   8880 			reg &= ~FEXTNVM4_BEACON_DURATION;
   8881 			reg |= FEXTNVM4_BEACON_DURATION_8US;
   8882 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   8883 		}
   8884 
   8885 		/* Work-around I218 hang issue */
   8886 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM) ||
   8887 		    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V) ||
   8888 		    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM3) ||
   8889 		    (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V3))
   8890 			wm_k1_workaround_lpt_lp(sc, link);
   8891 
   8892 		if (sc->sc_type >= WM_T_PCH_LPT) {
   8893 			/*
   8894 			 * Set platform power management values for Latency
   8895 			 * Tolerance Reporting (LTR)
   8896 			 */
   8897 			wm_platform_pm_pch_lpt(sc,
   8898 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   8899 		}
   8900 
   8901 		/* FEXTNVM6 K1-off workaround */
   8902 		if (sc->sc_type == WM_T_PCH_SPT) {
   8903 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
   8904 			if (CSR_READ(sc, WMREG_PCIEANACFG)
   8905 			    & FEXTNVM6_K1_OFF_ENABLE)
   8906 				reg |= FEXTNVM6_K1_OFF_ENABLE;
   8907 			else
   8908 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   8909 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   8910 		}
   8911 
   8912 		if (!link)
   8913 			return;
   8914 
   8915 		switch (sc->sc_type) {
   8916 		case WM_T_PCH2:
   8917 			wm_k1_workaround_lv(sc);
   8918 			/* FALLTHROUGH */
   8919 		case WM_T_PCH:
   8920 			if (sc->sc_phytype == WMPHY_82578)
   8921 				wm_link_stall_workaround_hv(sc);
   8922 			break;
   8923 		default:
   8924 			break;
   8925 		}
   8926 	} else if (icr & ICR_RXSEQ) {
   8927 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   8928 			device_xname(sc->sc_dev)));
   8929 	}
   8930 }
   8931 
   8932 /*
   8933  * wm_linkintr_tbi:
   8934  *
   8935  *	Helper; handle link interrupts for TBI mode.
   8936  */
   8937 static void
   8938 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   8939 {
   8940 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8941 	uint32_t status;
   8942 
   8943 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8944 		__func__));
   8945 
   8946 	status = CSR_READ(sc, WMREG_STATUS);
   8947 	if (icr & ICR_LSC) {
   8948 		wm_check_for_link(sc);
   8949 		if (status & STATUS_LU) {
   8950 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8951 				device_xname(sc->sc_dev),
   8952 				(status & STATUS_FD) ? "FDX" : "HDX"));
   8953 			/*
   8954 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   8955 			 * so we should update sc->sc_ctrl
   8956 			 */
   8957 
   8958 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   8959 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8960 			sc->sc_fcrtl &= ~FCRTL_XONE;
   8961 			if (status & STATUS_FD)
   8962 				sc->sc_tctl |=
   8963 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8964 			else
   8965 				sc->sc_tctl |=
   8966 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8967 			if (sc->sc_ctrl & CTRL_TFCE)
   8968 				sc->sc_fcrtl |= FCRTL_XONE;
   8969 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8970 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   8971 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   8972 			sc->sc_tbi_linkup = 1;
   8973 			if_link_state_change(ifp, LINK_STATE_UP);
   8974 		} else {
   8975 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8976 				device_xname(sc->sc_dev)));
   8977 			sc->sc_tbi_linkup = 0;
   8978 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8979 		}
   8980 		/* Update LED */
   8981 		wm_tbi_serdes_set_linkled(sc);
   8982 	} else if (icr & ICR_RXSEQ) {
   8983 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   8984 			device_xname(sc->sc_dev)));
   8985 	}
   8986 }
   8987 
   8988 /*
   8989  * wm_linkintr_serdes:
   8990  *
   8991  *	Helper; handle link interrupts for TBI mode.
   8992  */
   8993 static void
   8994 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   8995 {
   8996 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8997 	struct mii_data *mii = &sc->sc_mii;
   8998 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8999 	uint32_t pcs_adv, pcs_lpab, reg;
   9000 
   9001 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   9002 		__func__));
   9003 
   9004 	if (icr & ICR_LSC) {
   9005 		/* Check PCS */
   9006 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9007 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   9008 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   9009 				device_xname(sc->sc_dev)));
   9010 			mii->mii_media_status |= IFM_ACTIVE;
   9011 			sc->sc_tbi_linkup = 1;
   9012 			if_link_state_change(ifp, LINK_STATE_UP);
   9013 		} else {
   9014 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   9015 				device_xname(sc->sc_dev)));
   9016 			mii->mii_media_status |= IFM_NONE;
   9017 			sc->sc_tbi_linkup = 0;
   9018 			if_link_state_change(ifp, LINK_STATE_DOWN);
   9019 			wm_tbi_serdes_set_linkled(sc);
   9020 			return;
   9021 		}
   9022 		mii->mii_media_active |= IFM_1000_SX;
   9023 		if ((reg & PCS_LSTS_FDX) != 0)
   9024 			mii->mii_media_active |= IFM_FDX;
   9025 		else
   9026 			mii->mii_media_active |= IFM_HDX;
   9027 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9028 			/* Check flow */
   9029 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9030 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9031 				DPRINTF(WM_DEBUG_LINK,
   9032 				    ("XXX LINKOK but not ACOMP\n"));
   9033 				return;
   9034 			}
   9035 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9036 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9037 			DPRINTF(WM_DEBUG_LINK,
   9038 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   9039 			if ((pcs_adv & TXCW_SYM_PAUSE)
   9040 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9041 				mii->mii_media_active |= IFM_FLOW
   9042 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9043 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9044 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9045 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   9046 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9047 				mii->mii_media_active |= IFM_FLOW
   9048 				    | IFM_ETH_TXPAUSE;
   9049 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   9050 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   9051 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9052 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   9053 				mii->mii_media_active |= IFM_FLOW
   9054 				    | IFM_ETH_RXPAUSE;
   9055 		}
   9056 		/* Update LED */
   9057 		wm_tbi_serdes_set_linkled(sc);
   9058 	} else {
   9059 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
   9060 		    device_xname(sc->sc_dev)));
   9061 	}
   9062 }
   9063 
   9064 /*
   9065  * wm_linkintr:
   9066  *
   9067  *	Helper; handle link interrupts.
   9068  */
   9069 static void
   9070 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   9071 {
   9072 
   9073 	KASSERT(WM_CORE_LOCKED(sc));
   9074 
   9075 	if (sc->sc_flags & WM_F_HAS_MII)
   9076 		wm_linkintr_gmii(sc, icr);
   9077 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9078 	    && (sc->sc_type >= WM_T_82575))
   9079 		wm_linkintr_serdes(sc, icr);
   9080 	else
   9081 		wm_linkintr_tbi(sc, icr);
   9082 }
   9083 
   9084 /*
   9085  * wm_intr_legacy:
   9086  *
   9087  *	Interrupt service routine for INTx and MSI.
   9088  */
   9089 static int
   9090 wm_intr_legacy(void *arg)
   9091 {
   9092 	struct wm_softc *sc = arg;
   9093 	struct wm_queue *wmq = &sc->sc_queue[0];
   9094 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9095 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9096 	uint32_t icr, rndval = 0;
   9097 	int handled = 0;
   9098 
   9099 	while (1 /* CONSTCOND */) {
   9100 		icr = CSR_READ(sc, WMREG_ICR);
   9101 		if ((icr & sc->sc_icr) == 0)
   9102 			break;
   9103 		if (handled == 0) {
   9104 			DPRINTF(WM_DEBUG_TX,
   9105 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   9106 		}
   9107 		if (rndval == 0)
   9108 			rndval = icr;
   9109 
   9110 		mutex_enter(rxq->rxq_lock);
   9111 
   9112 		if (rxq->rxq_stopping) {
   9113 			mutex_exit(rxq->rxq_lock);
   9114 			break;
   9115 		}
   9116 
   9117 		handled = 1;
   9118 
   9119 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9120 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   9121 			DPRINTF(WM_DEBUG_RX,
   9122 			    ("%s: RX: got Rx intr 0x%08x\n",
   9123 				device_xname(sc->sc_dev),
   9124 				icr & (ICR_RXDMT0 | ICR_RXT0)));
   9125 			WM_Q_EVCNT_INCR(rxq, intr);
   9126 		}
   9127 #endif
   9128 		/*
   9129 		 * wm_rxeof() does *not* call upper layer functions directly,
   9130 		 * as if_percpuq_enqueue() just call softint_schedule().
   9131 		 * So, we can call wm_rxeof() in interrupt context.
   9132 		 */
   9133 		wm_rxeof(rxq, UINT_MAX);
   9134 
   9135 		mutex_exit(rxq->rxq_lock);
   9136 		mutex_enter(txq->txq_lock);
   9137 
   9138 		if (txq->txq_stopping) {
   9139 			mutex_exit(txq->txq_lock);
   9140 			break;
   9141 		}
   9142 
   9143 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   9144 		if (icr & ICR_TXDW) {
   9145 			DPRINTF(WM_DEBUG_TX,
   9146 			    ("%s: TX: got TXDW interrupt\n",
   9147 				device_xname(sc->sc_dev)));
   9148 			WM_Q_EVCNT_INCR(txq, txdw);
   9149 		}
   9150 #endif
   9151 		wm_txeof(txq, UINT_MAX);
   9152 
   9153 		mutex_exit(txq->txq_lock);
   9154 		WM_CORE_LOCK(sc);
   9155 
   9156 		if (sc->sc_core_stopping) {
   9157 			WM_CORE_UNLOCK(sc);
   9158 			break;
   9159 		}
   9160 
   9161 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   9162 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9163 			wm_linkintr(sc, icr);
   9164 		}
   9165 
   9166 		WM_CORE_UNLOCK(sc);
   9167 
   9168 		if (icr & ICR_RXO) {
   9169 #if defined(WM_DEBUG)
   9170 			log(LOG_WARNING, "%s: Receive overrun\n",
   9171 			    device_xname(sc->sc_dev));
   9172 #endif /* defined(WM_DEBUG) */
   9173 		}
   9174 	}
   9175 
   9176 	rnd_add_uint32(&sc->rnd_source, rndval);
   9177 
   9178 	if (handled) {
   9179 		/* Try to get more packets going. */
   9180 		softint_schedule(wmq->wmq_si);
   9181 	}
   9182 
   9183 	return handled;
   9184 }
   9185 
   9186 static inline void
   9187 wm_txrxintr_disable(struct wm_queue *wmq)
   9188 {
   9189 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9190 
   9191 	if (sc->sc_type == WM_T_82574)
   9192 		CSR_WRITE(sc, WMREG_IMC,
   9193 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   9194 	else if (sc->sc_type == WM_T_82575)
   9195 		CSR_WRITE(sc, WMREG_EIMC,
   9196 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9197 	else
   9198 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   9199 }
   9200 
   9201 static inline void
   9202 wm_txrxintr_enable(struct wm_queue *wmq)
   9203 {
   9204 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   9205 
   9206 	wm_itrs_calculate(sc, wmq);
   9207 
   9208 	/*
   9209 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
   9210 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
   9211 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
   9212 	 * while each wm_handle_queue(wmq) is runnig.
   9213 	 */
   9214 	if (sc->sc_type == WM_T_82574)
   9215 		CSR_WRITE(sc, WMREG_IMS,
   9216 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
   9217 	else if (sc->sc_type == WM_T_82575)
   9218 		CSR_WRITE(sc, WMREG_EIMS,
   9219 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   9220 	else
   9221 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   9222 }
   9223 
   9224 static int
   9225 wm_txrxintr_msix(void *arg)
   9226 {
   9227 	struct wm_queue *wmq = arg;
   9228 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9229 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9230 	struct wm_softc *sc = txq->txq_sc;
   9231 	u_int txlimit = sc->sc_tx_intr_process_limit;
   9232 	u_int rxlimit = sc->sc_rx_intr_process_limit;
   9233 	bool txmore;
   9234 	bool rxmore;
   9235 
   9236 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   9237 
   9238 	DPRINTF(WM_DEBUG_TX,
   9239 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   9240 
   9241 	wm_txrxintr_disable(wmq);
   9242 
   9243 	mutex_enter(txq->txq_lock);
   9244 
   9245 	if (txq->txq_stopping) {
   9246 		mutex_exit(txq->txq_lock);
   9247 		return 0;
   9248 	}
   9249 
   9250 	WM_Q_EVCNT_INCR(txq, txdw);
   9251 	txmore = wm_txeof(txq, txlimit);
   9252 	/* wm_deferred start() is done in wm_handle_queue(). */
   9253 	mutex_exit(txq->txq_lock);
   9254 
   9255 	DPRINTF(WM_DEBUG_RX,
   9256 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   9257 	mutex_enter(rxq->rxq_lock);
   9258 
   9259 	if (rxq->rxq_stopping) {
   9260 		mutex_exit(rxq->rxq_lock);
   9261 		return 0;
   9262 	}
   9263 
   9264 	WM_Q_EVCNT_INCR(rxq, intr);
   9265 	rxmore = wm_rxeof(rxq, rxlimit);
   9266 	mutex_exit(rxq->rxq_lock);
   9267 
   9268 	wm_itrs_writereg(sc, wmq);
   9269 
   9270 	if (txmore || rxmore)
   9271 		softint_schedule(wmq->wmq_si);
   9272 	else
   9273 		wm_txrxintr_enable(wmq);
   9274 
   9275 	return 1;
   9276 }
   9277 
   9278 static void
   9279 wm_handle_queue(void *arg)
   9280 {
   9281 	struct wm_queue *wmq = arg;
   9282 	struct wm_txqueue *txq = &wmq->wmq_txq;
   9283 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   9284 	struct wm_softc *sc = txq->txq_sc;
   9285 	u_int txlimit = sc->sc_tx_process_limit;
   9286 	u_int rxlimit = sc->sc_rx_process_limit;
   9287 	bool txmore;
   9288 	bool rxmore;
   9289 
   9290 	mutex_enter(txq->txq_lock);
   9291 	if (txq->txq_stopping) {
   9292 		mutex_exit(txq->txq_lock);
   9293 		return;
   9294 	}
   9295 	txmore = wm_txeof(txq, txlimit);
   9296 	wm_deferred_start_locked(txq);
   9297 	mutex_exit(txq->txq_lock);
   9298 
   9299 	mutex_enter(rxq->rxq_lock);
   9300 	if (rxq->rxq_stopping) {
   9301 		mutex_exit(rxq->rxq_lock);
   9302 		return;
   9303 	}
   9304 	WM_Q_EVCNT_INCR(rxq, defer);
   9305 	rxmore = wm_rxeof(rxq, rxlimit);
   9306 	mutex_exit(rxq->rxq_lock);
   9307 
   9308 	if (txmore || rxmore)
   9309 		softint_schedule(wmq->wmq_si);
   9310 	else
   9311 		wm_txrxintr_enable(wmq);
   9312 }
   9313 
   9314 /*
   9315  * wm_linkintr_msix:
   9316  *
   9317  *	Interrupt service routine for link status change for MSI-X.
   9318  */
   9319 static int
   9320 wm_linkintr_msix(void *arg)
   9321 {
   9322 	struct wm_softc *sc = arg;
   9323 	uint32_t reg;
   9324 	bool has_rxo;
   9325 
   9326 	DPRINTF(WM_DEBUG_LINK,
   9327 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   9328 
   9329 	reg = CSR_READ(sc, WMREG_ICR);
   9330 	WM_CORE_LOCK(sc);
   9331 	if (sc->sc_core_stopping)
   9332 		goto out;
   9333 
   9334 	if ((reg & ICR_LSC) != 0) {
   9335 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   9336 		wm_linkintr(sc, ICR_LSC);
   9337 	}
   9338 
   9339 	/*
   9340 	 * XXX 82574 MSI-X mode workaround
   9341 	 *
   9342 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
   9343 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
   9344 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
   9345 	 * interrupts by writing WMREG_ICS to process receive packets.
   9346 	 */
   9347 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
   9348 #if defined(WM_DEBUG)
   9349 		log(LOG_WARNING, "%s: Receive overrun\n",
   9350 		    device_xname(sc->sc_dev));
   9351 #endif /* defined(WM_DEBUG) */
   9352 
   9353 		has_rxo = true;
   9354 		/*
   9355 		 * The RXO interrupt is very high rate when receive traffic is
   9356 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
   9357 		 * interrupts. ICR_OTHER will be enabled at the end of
   9358 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
   9359 		 * ICR_RXQ(1) interrupts.
   9360 		 */
   9361 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
   9362 
   9363 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
   9364 	}
   9365 
   9366 
   9367 
   9368 out:
   9369 	WM_CORE_UNLOCK(sc);
   9370 
   9371 	if (sc->sc_type == WM_T_82574) {
   9372 		if (!has_rxo)
   9373 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   9374 		else
   9375 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   9376 	} else if (sc->sc_type == WM_T_82575)
   9377 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   9378 	else
   9379 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   9380 
   9381 	return 1;
   9382 }
   9383 
   9384 /*
   9385  * Media related.
   9386  * GMII, SGMII, TBI (and SERDES)
   9387  */
   9388 
   9389 /* Common */
   9390 
   9391 /*
   9392  * wm_tbi_serdes_set_linkled:
   9393  *
   9394  *	Update the link LED on TBI and SERDES devices.
   9395  */
   9396 static void
   9397 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   9398 {
   9399 
   9400 	if (sc->sc_tbi_linkup)
   9401 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   9402 	else
   9403 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   9404 
   9405 	/* 82540 or newer devices are active low */
   9406 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   9407 
   9408 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9409 }
   9410 
   9411 /* GMII related */
   9412 
   9413 /*
   9414  * wm_gmii_reset:
   9415  *
   9416  *	Reset the PHY.
   9417  */
   9418 static void
   9419 wm_gmii_reset(struct wm_softc *sc)
   9420 {
   9421 	uint32_t reg;
   9422 	int rv;
   9423 
   9424 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9425 		device_xname(sc->sc_dev), __func__));
   9426 
   9427 	rv = sc->phy.acquire(sc);
   9428 	if (rv != 0) {
   9429 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9430 		    __func__);
   9431 		return;
   9432 	}
   9433 
   9434 	switch (sc->sc_type) {
   9435 	case WM_T_82542_2_0:
   9436 	case WM_T_82542_2_1:
   9437 		/* null */
   9438 		break;
   9439 	case WM_T_82543:
   9440 		/*
   9441 		 * With 82543, we need to force speed and duplex on the MAC
   9442 		 * equal to what the PHY speed and duplex configuration is.
   9443 		 * In addition, we need to perform a hardware reset on the PHY
   9444 		 * to take it out of reset.
   9445 		 */
   9446 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9447 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9448 
   9449 		/* The PHY reset pin is active-low. */
   9450 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9451 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   9452 		    CTRL_EXT_SWDPIN(4));
   9453 		reg |= CTRL_EXT_SWDPIO(4);
   9454 
   9455 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9456 		CSR_WRITE_FLUSH(sc);
   9457 		delay(10*1000);
   9458 
   9459 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   9460 		CSR_WRITE_FLUSH(sc);
   9461 		delay(150);
   9462 #if 0
   9463 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   9464 #endif
   9465 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   9466 		break;
   9467 	case WM_T_82544:	/* reset 10000us */
   9468 	case WM_T_82540:
   9469 	case WM_T_82545:
   9470 	case WM_T_82545_3:
   9471 	case WM_T_82546:
   9472 	case WM_T_82546_3:
   9473 	case WM_T_82541:
   9474 	case WM_T_82541_2:
   9475 	case WM_T_82547:
   9476 	case WM_T_82547_2:
   9477 	case WM_T_82571:	/* reset 100us */
   9478 	case WM_T_82572:
   9479 	case WM_T_82573:
   9480 	case WM_T_82574:
   9481 	case WM_T_82575:
   9482 	case WM_T_82576:
   9483 	case WM_T_82580:
   9484 	case WM_T_I350:
   9485 	case WM_T_I354:
   9486 	case WM_T_I210:
   9487 	case WM_T_I211:
   9488 	case WM_T_82583:
   9489 	case WM_T_80003:
   9490 		/* generic reset */
   9491 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9492 		CSR_WRITE_FLUSH(sc);
   9493 		delay(20000);
   9494 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9495 		CSR_WRITE_FLUSH(sc);
   9496 		delay(20000);
   9497 
   9498 		if ((sc->sc_type == WM_T_82541)
   9499 		    || (sc->sc_type == WM_T_82541_2)
   9500 		    || (sc->sc_type == WM_T_82547)
   9501 		    || (sc->sc_type == WM_T_82547_2)) {
   9502 			/* workaround for igp are done in igp_reset() */
   9503 			/* XXX add code to set LED after phy reset */
   9504 		}
   9505 		break;
   9506 	case WM_T_ICH8:
   9507 	case WM_T_ICH9:
   9508 	case WM_T_ICH10:
   9509 	case WM_T_PCH:
   9510 	case WM_T_PCH2:
   9511 	case WM_T_PCH_LPT:
   9512 	case WM_T_PCH_SPT:
   9513 	case WM_T_PCH_CNP:
   9514 		/* generic reset */
   9515 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9516 		CSR_WRITE_FLUSH(sc);
   9517 		delay(100);
   9518 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9519 		CSR_WRITE_FLUSH(sc);
   9520 		delay(150);
   9521 		break;
   9522 	default:
   9523 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   9524 		    __func__);
   9525 		break;
   9526 	}
   9527 
   9528 	sc->phy.release(sc);
   9529 
   9530 	/* get_cfg_done */
   9531 	wm_get_cfg_done(sc);
   9532 
   9533 	/* extra setup */
   9534 	switch (sc->sc_type) {
   9535 	case WM_T_82542_2_0:
   9536 	case WM_T_82542_2_1:
   9537 	case WM_T_82543:
   9538 	case WM_T_82544:
   9539 	case WM_T_82540:
   9540 	case WM_T_82545:
   9541 	case WM_T_82545_3:
   9542 	case WM_T_82546:
   9543 	case WM_T_82546_3:
   9544 	case WM_T_82541_2:
   9545 	case WM_T_82547_2:
   9546 	case WM_T_82571:
   9547 	case WM_T_82572:
   9548 	case WM_T_82573:
   9549 	case WM_T_82574:
   9550 	case WM_T_82583:
   9551 	case WM_T_82575:
   9552 	case WM_T_82576:
   9553 	case WM_T_82580:
   9554 	case WM_T_I350:
   9555 	case WM_T_I354:
   9556 	case WM_T_I210:
   9557 	case WM_T_I211:
   9558 	case WM_T_80003:
   9559 		/* null */
   9560 		break;
   9561 	case WM_T_82541:
   9562 	case WM_T_82547:
   9563 		/* XXX Configure actively LED after PHY reset */
   9564 		break;
   9565 	case WM_T_ICH8:
   9566 	case WM_T_ICH9:
   9567 	case WM_T_ICH10:
   9568 	case WM_T_PCH:
   9569 	case WM_T_PCH2:
   9570 	case WM_T_PCH_LPT:
   9571 	case WM_T_PCH_SPT:
   9572 	case WM_T_PCH_CNP:
   9573 		wm_phy_post_reset(sc);
   9574 		break;
   9575 	default:
   9576 		panic("%s: unknown type\n", __func__);
   9577 		break;
   9578 	}
   9579 }
   9580 
   9581 /*
   9582  * Setup sc_phytype and mii_{read|write}reg.
   9583  *
   9584  *  To identify PHY type, correct read/write function should be selected.
   9585  * To select correct read/write function, PCI ID or MAC type are required
   9586  * without accessing PHY registers.
   9587  *
   9588  *  On the first call of this function, PHY ID is not known yet. Check
   9589  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   9590  * result might be incorrect.
   9591  *
   9592  *  In the second call, PHY OUI and model is used to identify PHY type.
   9593  * It might not be perfpect because of the lack of compared entry, but it
   9594  * would be better than the first call.
   9595  *
   9596  *  If the detected new result and previous assumption is different,
   9597  * diagnous message will be printed.
   9598  */
   9599 static void
   9600 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   9601     uint16_t phy_model)
   9602 {
   9603 	device_t dev = sc->sc_dev;
   9604 	struct mii_data *mii = &sc->sc_mii;
   9605 	uint16_t new_phytype = WMPHY_UNKNOWN;
   9606 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   9607 	mii_readreg_t new_readreg;
   9608 	mii_writereg_t new_writereg;
   9609 
   9610 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9611 		device_xname(sc->sc_dev), __func__));
   9612 
   9613 	if (mii->mii_readreg == NULL) {
   9614 		/*
   9615 		 *  This is the first call of this function. For ICH and PCH
   9616 		 * variants, it's difficult to determine the PHY access method
   9617 		 * by sc_type, so use the PCI product ID for some devices.
   9618 		 */
   9619 
   9620 		switch (sc->sc_pcidevid) {
   9621 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   9622 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   9623 			/* 82577 */
   9624 			new_phytype = WMPHY_82577;
   9625 			break;
   9626 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   9627 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   9628 			/* 82578 */
   9629 			new_phytype = WMPHY_82578;
   9630 			break;
   9631 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   9632 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   9633 			/* 82579 */
   9634 			new_phytype = WMPHY_82579;
   9635 			break;
   9636 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   9637 		case PCI_PRODUCT_INTEL_82801I_BM:
   9638 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   9639 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   9640 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   9641 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   9642 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   9643 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   9644 			/* ICH8, 9, 10 with 82567 */
   9645 			new_phytype = WMPHY_BM;
   9646 			break;
   9647 		default:
   9648 			break;
   9649 		}
   9650 	} else {
   9651 		/* It's not the first call. Use PHY OUI and model */
   9652 		switch (phy_oui) {
   9653 		case MII_OUI_ATTANSIC: /* XXX ??? */
   9654 			switch (phy_model) {
   9655 			case 0x0004: /* XXX */
   9656 				new_phytype = WMPHY_82578;
   9657 				break;
   9658 			default:
   9659 				break;
   9660 			}
   9661 			break;
   9662 		case MII_OUI_xxMARVELL:
   9663 			switch (phy_model) {
   9664 			case MII_MODEL_xxMARVELL_I210:
   9665 				new_phytype = WMPHY_I210;
   9666 				break;
   9667 			case MII_MODEL_xxMARVELL_E1011:
   9668 			case MII_MODEL_xxMARVELL_E1000_3:
   9669 			case MII_MODEL_xxMARVELL_E1000_5:
   9670 			case MII_MODEL_xxMARVELL_E1112:
   9671 				new_phytype = WMPHY_M88;
   9672 				break;
   9673 			case MII_MODEL_xxMARVELL_E1149:
   9674 				new_phytype = WMPHY_BM;
   9675 				break;
   9676 			case MII_MODEL_xxMARVELL_E1111:
   9677 			case MII_MODEL_xxMARVELL_I347:
   9678 			case MII_MODEL_xxMARVELL_E1512:
   9679 			case MII_MODEL_xxMARVELL_E1340M:
   9680 			case MII_MODEL_xxMARVELL_E1543:
   9681 				new_phytype = WMPHY_M88;
   9682 				break;
   9683 			case MII_MODEL_xxMARVELL_I82563:
   9684 				new_phytype = WMPHY_GG82563;
   9685 				break;
   9686 			default:
   9687 				break;
   9688 			}
   9689 			break;
   9690 		case MII_OUI_INTEL:
   9691 			switch (phy_model) {
   9692 			case MII_MODEL_INTEL_I82577:
   9693 				new_phytype = WMPHY_82577;
   9694 				break;
   9695 			case MII_MODEL_INTEL_I82579:
   9696 				new_phytype = WMPHY_82579;
   9697 				break;
   9698 			case MII_MODEL_INTEL_I217:
   9699 				new_phytype = WMPHY_I217;
   9700 				break;
   9701 			case MII_MODEL_INTEL_I82580:
   9702 			case MII_MODEL_INTEL_I350:
   9703 				new_phytype = WMPHY_82580;
   9704 				break;
   9705 			default:
   9706 				break;
   9707 			}
   9708 			break;
   9709 		case MII_OUI_yyINTEL:
   9710 			switch (phy_model) {
   9711 			case MII_MODEL_yyINTEL_I82562G:
   9712 			case MII_MODEL_yyINTEL_I82562EM:
   9713 			case MII_MODEL_yyINTEL_I82562ET:
   9714 				new_phytype = WMPHY_IFE;
   9715 				break;
   9716 			case MII_MODEL_yyINTEL_IGP01E1000:
   9717 				new_phytype = WMPHY_IGP;
   9718 				break;
   9719 			case MII_MODEL_yyINTEL_I82566:
   9720 				new_phytype = WMPHY_IGP_3;
   9721 				break;
   9722 			default:
   9723 				break;
   9724 			}
   9725 			break;
   9726 		default:
   9727 			break;
   9728 		}
   9729 		if (new_phytype == WMPHY_UNKNOWN)
   9730 			aprint_verbose_dev(dev,
   9731 			    "%s: unknown PHY model. OUI=%06x, model=%04x\n",
   9732 			    __func__, phy_oui, phy_model);
   9733 
   9734 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9735 		    && (sc->sc_phytype != new_phytype )) {
   9736 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9737 			    "was incorrect. PHY type from PHY ID = %u\n",
   9738 			    sc->sc_phytype, new_phytype);
   9739 		}
   9740 	}
   9741 
   9742 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   9743 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   9744 		/* SGMII */
   9745 		new_readreg = wm_sgmii_readreg;
   9746 		new_writereg = wm_sgmii_writereg;
   9747 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   9748 		/* BM2 (phyaddr == 1) */
   9749 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9750 		    && (new_phytype != WMPHY_BM)
   9751 		    && (new_phytype != WMPHY_UNKNOWN))
   9752 			doubt_phytype = new_phytype;
   9753 		new_phytype = WMPHY_BM;
   9754 		new_readreg = wm_gmii_bm_readreg;
   9755 		new_writereg = wm_gmii_bm_writereg;
   9756 	} else if (sc->sc_type >= WM_T_PCH) {
   9757 		/* All PCH* use _hv_ */
   9758 		new_readreg = wm_gmii_hv_readreg;
   9759 		new_writereg = wm_gmii_hv_writereg;
   9760 	} else if (sc->sc_type >= WM_T_ICH8) {
   9761 		/* non-82567 ICH8, 9 and 10 */
   9762 		new_readreg = wm_gmii_i82544_readreg;
   9763 		new_writereg = wm_gmii_i82544_writereg;
   9764 	} else if (sc->sc_type >= WM_T_80003) {
   9765 		/* 80003 */
   9766 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9767 		    && (new_phytype != WMPHY_GG82563)
   9768 		    && (new_phytype != WMPHY_UNKNOWN))
   9769 			doubt_phytype = new_phytype;
   9770 		new_phytype = WMPHY_GG82563;
   9771 		new_readreg = wm_gmii_i80003_readreg;
   9772 		new_writereg = wm_gmii_i80003_writereg;
   9773 	} else if (sc->sc_type >= WM_T_I210) {
   9774 		/* I210 and I211 */
   9775 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9776 		    && (new_phytype != WMPHY_I210)
   9777 		    && (new_phytype != WMPHY_UNKNOWN))
   9778 			doubt_phytype = new_phytype;
   9779 		new_phytype = WMPHY_I210;
   9780 		new_readreg = wm_gmii_gs40g_readreg;
   9781 		new_writereg = wm_gmii_gs40g_writereg;
   9782 	} else if (sc->sc_type >= WM_T_82580) {
   9783 		/* 82580, I350 and I354 */
   9784 		new_readreg = wm_gmii_82580_readreg;
   9785 		new_writereg = wm_gmii_82580_writereg;
   9786 	} else if (sc->sc_type >= WM_T_82544) {
   9787 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   9788 		new_readreg = wm_gmii_i82544_readreg;
   9789 		new_writereg = wm_gmii_i82544_writereg;
   9790 	} else {
   9791 		new_readreg = wm_gmii_i82543_readreg;
   9792 		new_writereg = wm_gmii_i82543_writereg;
   9793 	}
   9794 
   9795 	if (new_phytype == WMPHY_BM) {
   9796 		/* All BM use _bm_ */
   9797 		new_readreg = wm_gmii_bm_readreg;
   9798 		new_writereg = wm_gmii_bm_writereg;
   9799 	}
   9800 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
   9801 		/* All PCH* use _hv_ */
   9802 		new_readreg = wm_gmii_hv_readreg;
   9803 		new_writereg = wm_gmii_hv_writereg;
   9804 	}
   9805 
   9806 	/* Diag output */
   9807 	if (doubt_phytype != WMPHY_UNKNOWN)
   9808 		aprint_error_dev(dev, "Assumed new PHY type was "
   9809 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   9810 		    new_phytype);
   9811 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9812 	    && (sc->sc_phytype != new_phytype ))
   9813 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9814 		    "was incorrect. New PHY type = %u\n",
   9815 		    sc->sc_phytype, new_phytype);
   9816 
   9817 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   9818 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   9819 
   9820 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   9821 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   9822 		    "function was incorrect.\n");
   9823 
   9824 	/* Update now */
   9825 	sc->sc_phytype = new_phytype;
   9826 	mii->mii_readreg = new_readreg;
   9827 	mii->mii_writereg = new_writereg;
   9828 	if (new_readreg == wm_gmii_hv_readreg) {
   9829 		sc->phy.readreg_locked = wm_gmii_hv_readreg_locked;
   9830 		sc->phy.writereg_locked = wm_gmii_hv_writereg_locked;
   9831 	} else if (new_readreg == wm_gmii_i82544_readreg) {
   9832 		sc->phy.readreg_locked = wm_gmii_i82544_readreg_locked;
   9833 		sc->phy.writereg_locked = wm_gmii_i82544_writereg_locked;
   9834 	}
   9835 }
   9836 
   9837 /*
   9838  * wm_get_phy_id_82575:
   9839  *
   9840  * Return PHY ID. Return -1 if it failed.
   9841  */
   9842 static int
   9843 wm_get_phy_id_82575(struct wm_softc *sc)
   9844 {
   9845 	uint32_t reg;
   9846 	int phyid = -1;
   9847 
   9848 	/* XXX */
   9849 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   9850 		return -1;
   9851 
   9852 	if (wm_sgmii_uses_mdio(sc)) {
   9853 		switch (sc->sc_type) {
   9854 		case WM_T_82575:
   9855 		case WM_T_82576:
   9856 			reg = CSR_READ(sc, WMREG_MDIC);
   9857 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   9858 			break;
   9859 		case WM_T_82580:
   9860 		case WM_T_I350:
   9861 		case WM_T_I354:
   9862 		case WM_T_I210:
   9863 		case WM_T_I211:
   9864 			reg = CSR_READ(sc, WMREG_MDICNFG);
   9865 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   9866 			break;
   9867 		default:
   9868 			return -1;
   9869 		}
   9870 	}
   9871 
   9872 	return phyid;
   9873 }
   9874 
   9875 
   9876 /*
   9877  * wm_gmii_mediainit:
   9878  *
   9879  *	Initialize media for use on 1000BASE-T devices.
   9880  */
   9881 static void
   9882 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   9883 {
   9884 	device_t dev = sc->sc_dev;
   9885 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9886 	struct mii_data *mii = &sc->sc_mii;
   9887 	uint32_t reg;
   9888 
   9889 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9890 		device_xname(sc->sc_dev), __func__));
   9891 
   9892 	/* We have GMII. */
   9893 	sc->sc_flags |= WM_F_HAS_MII;
   9894 
   9895 	if (sc->sc_type == WM_T_80003)
   9896 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9897 	else
   9898 		sc->sc_tipg = TIPG_1000T_DFLT;
   9899 
   9900 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   9901 	if ((sc->sc_type == WM_T_82580)
   9902 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   9903 	    || (sc->sc_type == WM_T_I211)) {
   9904 		reg = CSR_READ(sc, WMREG_PHPM);
   9905 		reg &= ~PHPM_GO_LINK_D;
   9906 		CSR_WRITE(sc, WMREG_PHPM, reg);
   9907 	}
   9908 
   9909 	/*
   9910 	 * Let the chip set speed/duplex on its own based on
   9911 	 * signals from the PHY.
   9912 	 * XXXbouyer - I'm not sure this is right for the 80003,
   9913 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   9914 	 */
   9915 	sc->sc_ctrl |= CTRL_SLU;
   9916 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9917 
   9918 	/* Initialize our media structures and probe the GMII. */
   9919 	mii->mii_ifp = ifp;
   9920 
   9921 	mii->mii_statchg = wm_gmii_statchg;
   9922 
   9923 	/* get PHY control from SMBus to PCIe */
   9924 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   9925 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   9926 	    || (sc->sc_type == WM_T_PCH_CNP))
   9927 		wm_init_phy_workarounds_pchlan(sc);
   9928 
   9929 	wm_gmii_reset(sc);
   9930 
   9931 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9932 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   9933 	    wm_gmii_mediastatus);
   9934 
   9935 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   9936 	    || (sc->sc_type == WM_T_82580)
   9937 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   9938 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   9939 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   9940 			/* Attach only one port */
   9941 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   9942 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9943 		} else {
   9944 			int i, id;
   9945 			uint32_t ctrl_ext;
   9946 
   9947 			id = wm_get_phy_id_82575(sc);
   9948 			if (id != -1) {
   9949 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   9950 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   9951 			}
   9952 			if ((id == -1)
   9953 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9954 				/* Power on sgmii phy if it is disabled */
   9955 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9956 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   9957 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   9958 				CSR_WRITE_FLUSH(sc);
   9959 				delay(300*1000); /* XXX too long */
   9960 
   9961 				/* from 1 to 8 */
   9962 				for (i = 1; i < 8; i++)
   9963 					mii_attach(sc->sc_dev, &sc->sc_mii,
   9964 					    0xffffffff, i, MII_OFFSET_ANY,
   9965 					    MIIF_DOPAUSE);
   9966 
   9967 				/* restore previous sfp cage power state */
   9968 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9969 			}
   9970 		}
   9971 	} else
   9972 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9973 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9974 
   9975 	/*
   9976 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   9977 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   9978 	 */
   9979 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   9980 		|| (sc->sc_type == WM_T_PCH_SPT)
   9981 		|| (sc->sc_type == WM_T_PCH_CNP))
   9982 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9983 		wm_set_mdio_slow_mode_hv(sc);
   9984 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9985 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9986 	}
   9987 
   9988 	/*
   9989 	 * (For ICH8 variants)
   9990 	 * If PHY detection failed, use BM's r/w function and retry.
   9991 	 */
   9992 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9993 		/* if failed, retry with *_bm_* */
   9994 		aprint_verbose_dev(dev, "Assumed PHY access function "
   9995 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   9996 		    sc->sc_phytype);
   9997 		sc->sc_phytype = WMPHY_BM;
   9998 		mii->mii_readreg = wm_gmii_bm_readreg;
   9999 		mii->mii_writereg = wm_gmii_bm_writereg;
   10000 
   10001 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   10002 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   10003 	}
   10004 
   10005 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   10006 		/* Any PHY wasn't find */
   10007 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   10008 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   10009 		sc->sc_phytype = WMPHY_NONE;
   10010 	} else {
   10011 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   10012 
   10013 		/*
   10014 		 * PHY Found! Check PHY type again by the second call of
   10015 		 * wm_gmii_setup_phytype.
   10016 		 */
   10017 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   10018 		    child->mii_mpd_model);
   10019 
   10020 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   10021 	}
   10022 }
   10023 
   10024 /*
   10025  * wm_gmii_mediachange:	[ifmedia interface function]
   10026  *
   10027  *	Set hardware to newly-selected media on a 1000BASE-T device.
   10028  */
   10029 static int
   10030 wm_gmii_mediachange(struct ifnet *ifp)
   10031 {
   10032 	struct wm_softc *sc = ifp->if_softc;
   10033 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10034 	int rc;
   10035 
   10036 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10037 		device_xname(sc->sc_dev), __func__));
   10038 	if ((ifp->if_flags & IFF_UP) == 0)
   10039 		return 0;
   10040 
   10041 	/* Disable D0 LPLU. */
   10042 	wm_lplu_d0_disable(sc);
   10043 
   10044 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   10045 	sc->sc_ctrl |= CTRL_SLU;
   10046 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10047 	    || (sc->sc_type > WM_T_82543)) {
   10048 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   10049 	} else {
   10050 		sc->sc_ctrl &= ~CTRL_ASDE;
   10051 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   10052 		if (ife->ifm_media & IFM_FDX)
   10053 			sc->sc_ctrl |= CTRL_FD;
   10054 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   10055 		case IFM_10_T:
   10056 			sc->sc_ctrl |= CTRL_SPEED_10;
   10057 			break;
   10058 		case IFM_100_TX:
   10059 			sc->sc_ctrl |= CTRL_SPEED_100;
   10060 			break;
   10061 		case IFM_1000_T:
   10062 			sc->sc_ctrl |= CTRL_SPEED_1000;
   10063 			break;
   10064 		default:
   10065 			panic("wm_gmii_mediachange: bad media 0x%x",
   10066 			    ife->ifm_media);
   10067 		}
   10068 	}
   10069 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10070 	CSR_WRITE_FLUSH(sc);
   10071 	if (sc->sc_type <= WM_T_82543)
   10072 		wm_gmii_reset(sc);
   10073 
   10074 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   10075 		return 0;
   10076 	return rc;
   10077 }
   10078 
   10079 /*
   10080  * wm_gmii_mediastatus:	[ifmedia interface function]
   10081  *
   10082  *	Get the current interface media status on a 1000BASE-T device.
   10083  */
   10084 static void
   10085 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10086 {
   10087 	struct wm_softc *sc = ifp->if_softc;
   10088 
   10089 	ether_mediastatus(ifp, ifmr);
   10090 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10091 	    | sc->sc_flowflags;
   10092 }
   10093 
   10094 #define	MDI_IO		CTRL_SWDPIN(2)
   10095 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   10096 #define	MDI_CLK		CTRL_SWDPIN(3)
   10097 
   10098 static void
   10099 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   10100 {
   10101 	uint32_t i, v;
   10102 
   10103 	v = CSR_READ(sc, WMREG_CTRL);
   10104 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10105 	v |= MDI_DIR | CTRL_SWDPIO(3);
   10106 
   10107 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   10108 		if (data & i)
   10109 			v |= MDI_IO;
   10110 		else
   10111 			v &= ~MDI_IO;
   10112 		CSR_WRITE(sc, WMREG_CTRL, v);
   10113 		CSR_WRITE_FLUSH(sc);
   10114 		delay(10);
   10115 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10116 		CSR_WRITE_FLUSH(sc);
   10117 		delay(10);
   10118 		CSR_WRITE(sc, WMREG_CTRL, v);
   10119 		CSR_WRITE_FLUSH(sc);
   10120 		delay(10);
   10121 	}
   10122 }
   10123 
   10124 static uint32_t
   10125 wm_i82543_mii_recvbits(struct wm_softc *sc)
   10126 {
   10127 	uint32_t v, i, data = 0;
   10128 
   10129 	v = CSR_READ(sc, WMREG_CTRL);
   10130 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   10131 	v |= CTRL_SWDPIO(3);
   10132 
   10133 	CSR_WRITE(sc, WMREG_CTRL, v);
   10134 	CSR_WRITE_FLUSH(sc);
   10135 	delay(10);
   10136 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10137 	CSR_WRITE_FLUSH(sc);
   10138 	delay(10);
   10139 	CSR_WRITE(sc, WMREG_CTRL, v);
   10140 	CSR_WRITE_FLUSH(sc);
   10141 	delay(10);
   10142 
   10143 	for (i = 0; i < 16; i++) {
   10144 		data <<= 1;
   10145 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10146 		CSR_WRITE_FLUSH(sc);
   10147 		delay(10);
   10148 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   10149 			data |= 1;
   10150 		CSR_WRITE(sc, WMREG_CTRL, v);
   10151 		CSR_WRITE_FLUSH(sc);
   10152 		delay(10);
   10153 	}
   10154 
   10155 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   10156 	CSR_WRITE_FLUSH(sc);
   10157 	delay(10);
   10158 	CSR_WRITE(sc, WMREG_CTRL, v);
   10159 	CSR_WRITE_FLUSH(sc);
   10160 	delay(10);
   10161 
   10162 	return data;
   10163 }
   10164 
   10165 #undef MDI_IO
   10166 #undef MDI_DIR
   10167 #undef MDI_CLK
   10168 
   10169 /*
   10170  * wm_gmii_i82543_readreg:	[mii interface function]
   10171  *
   10172  *	Read a PHY register on the GMII (i82543 version).
   10173  */
   10174 static int
   10175 wm_gmii_i82543_readreg(device_t dev, int phy, int reg)
   10176 {
   10177 	struct wm_softc *sc = device_private(dev);
   10178 	int rv;
   10179 
   10180 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10181 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   10182 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   10183 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   10184 
   10185 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   10186 		device_xname(dev), phy, reg, rv));
   10187 
   10188 	return rv;
   10189 }
   10190 
   10191 /*
   10192  * wm_gmii_i82543_writereg:	[mii interface function]
   10193  *
   10194  *	Write a PHY register on the GMII (i82543 version).
   10195  */
   10196 static void
   10197 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, int val)
   10198 {
   10199 	struct wm_softc *sc = device_private(dev);
   10200 
   10201 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   10202 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   10203 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   10204 	    (MII_COMMAND_START << 30), 32);
   10205 }
   10206 
   10207 /*
   10208  * wm_gmii_mdic_readreg:	[mii interface function]
   10209  *
   10210  *	Read a PHY register on the GMII.
   10211  */
   10212 static int
   10213 wm_gmii_mdic_readreg(device_t dev, int phy, int reg)
   10214 {
   10215 	struct wm_softc *sc = device_private(dev);
   10216 	uint32_t mdic = 0;
   10217 	int i, rv;
   10218 
   10219 	if (reg > MII_ADDRMASK) {
   10220 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10221 		    __func__, sc->sc_phytype, reg);
   10222 		reg &= MII_ADDRMASK;
   10223 	}
   10224 
   10225 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   10226 	    MDIC_REGADD(reg));
   10227 
   10228 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10229 		delay(50);
   10230 		mdic = CSR_READ(sc, WMREG_MDIC);
   10231 		if (mdic & MDIC_READY)
   10232 			break;
   10233 	}
   10234 
   10235 	if ((mdic & MDIC_READY) == 0) {
   10236 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   10237 		    device_xname(dev), phy, reg);
   10238 		return 0;
   10239 	} else if (mdic & MDIC_E) {
   10240 #if 0 /* This is normal if no PHY is present. */
   10241 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   10242 		    device_xname(dev), phy, reg);
   10243 #endif
   10244 		return 0;
   10245 	} else {
   10246 		rv = MDIC_DATA(mdic);
   10247 		if (rv == 0xffff)
   10248 			rv = 0;
   10249 	}
   10250 
   10251 	/*
   10252 	 * Allow some time after each MDIC transaction to avoid
   10253 	 * reading duplicate data in the next MDIC transaction.
   10254 	 */
   10255 	if (sc->sc_type == WM_T_PCH2)
   10256 		delay(100);
   10257 
   10258 	return rv;
   10259 }
   10260 
   10261 /*
   10262  * wm_gmii_mdic_writereg:	[mii interface function]
   10263  *
   10264  *	Write a PHY register on the GMII.
   10265  */
   10266 static void
   10267 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, int val)
   10268 {
   10269 	struct wm_softc *sc = device_private(dev);
   10270 	uint32_t mdic = 0;
   10271 	int i;
   10272 
   10273 	if (reg > MII_ADDRMASK) {
   10274 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10275 		    __func__, sc->sc_phytype, reg);
   10276 		reg &= MII_ADDRMASK;
   10277 	}
   10278 
   10279 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   10280 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   10281 
   10282 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   10283 		delay(50);
   10284 		mdic = CSR_READ(sc, WMREG_MDIC);
   10285 		if (mdic & MDIC_READY)
   10286 			break;
   10287 	}
   10288 
   10289 	if ((mdic & MDIC_READY) == 0) {
   10290 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   10291 		    device_xname(dev), phy, reg);
   10292 		return;
   10293 	} else if (mdic & MDIC_E) {
   10294 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   10295 		    device_xname(dev), phy, reg);
   10296 		return;
   10297 	}
   10298 
   10299 	/*
   10300 	 * Allow some time after each MDIC transaction to avoid
   10301 	 * reading duplicate data in the next MDIC transaction.
   10302 	 */
   10303 	if (sc->sc_type == WM_T_PCH2)
   10304 		delay(100);
   10305 }
   10306 
   10307 /*
   10308  * wm_gmii_i82544_readreg:	[mii interface function]
   10309  *
   10310  *	Read a PHY register on the GMII.
   10311  */
   10312 static int
   10313 wm_gmii_i82544_readreg(device_t dev, int phy, int reg)
   10314 {
   10315 	struct wm_softc *sc = device_private(dev);
   10316 	uint16_t val;
   10317 
   10318 	if (sc->phy.acquire(sc)) {
   10319 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10320 		return 0;
   10321 	}
   10322 
   10323 	wm_gmii_i82544_readreg_locked(dev, phy, reg, &val);
   10324 
   10325 	sc->phy.release(sc);
   10326 
   10327 	return val;
   10328 }
   10329 
   10330 static int
   10331 wm_gmii_i82544_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   10332 {
   10333 	struct wm_softc *sc = device_private(dev);
   10334 
   10335 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10336 		switch (sc->sc_phytype) {
   10337 		case WMPHY_IGP:
   10338 		case WMPHY_IGP_2:
   10339 		case WMPHY_IGP_3:
   10340 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT,
   10341 			    reg);
   10342 			break;
   10343 		default:
   10344 #ifdef WM_DEBUG
   10345 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   10346 			    __func__, sc->sc_phytype, reg);
   10347 #endif
   10348 			break;
   10349 		}
   10350 	}
   10351 
   10352 	*val = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10353 
   10354 	return 0;
   10355 }
   10356 
   10357 /*
   10358  * wm_gmii_i82544_writereg:	[mii interface function]
   10359  *
   10360  *	Write a PHY register on the GMII.
   10361  */
   10362 static void
   10363 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, int val)
   10364 {
   10365 	struct wm_softc *sc = device_private(dev);
   10366 
   10367 	if (sc->phy.acquire(sc)) {
   10368 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10369 		return;
   10370 	}
   10371 
   10372 	wm_gmii_i82544_writereg_locked(dev, phy, reg & MII_ADDRMASK, val);
   10373 	sc->phy.release(sc);
   10374 }
   10375 
   10376 static int
   10377 wm_gmii_i82544_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   10378 {
   10379 	struct wm_softc *sc = device_private(dev);
   10380 
   10381 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10382 		switch (sc->sc_phytype) {
   10383 		case WMPHY_IGP:
   10384 		case WMPHY_IGP_2:
   10385 		case WMPHY_IGP_3:
   10386 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT,
   10387 			    reg);
   10388 			break;
   10389 		default:
   10390 #ifdef WM_DEBUG
   10391 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   10392 			    __func__, sc->sc_phytype, reg);
   10393 #endif
   10394 			break;
   10395 		}
   10396 	}
   10397 
   10398 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10399 
   10400 	return 0;
   10401 }
   10402 
   10403 /*
   10404  * wm_gmii_i80003_readreg:	[mii interface function]
   10405  *
   10406  *	Read a PHY register on the kumeran
   10407  * This could be handled by the PHY layer if we didn't have to lock the
   10408  * ressource ...
   10409  */
   10410 static int
   10411 wm_gmii_i80003_readreg(device_t dev, int phy, int reg)
   10412 {
   10413 	struct wm_softc *sc = device_private(dev);
   10414 	int page_select, temp;
   10415 	int rv;
   10416 
   10417 	if (phy != 1) /* only one PHY on kumeran bus */
   10418 		return 0;
   10419 
   10420 	if (sc->phy.acquire(sc)) {
   10421 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10422 		return 0;
   10423 	}
   10424 
   10425 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10426 		page_select = GG82563_PHY_PAGE_SELECT;
   10427 	else {
   10428 		/*
   10429 		 * Use Alternative Page Select register to access registers
   10430 		 * 30 and 31.
   10431 		 */
   10432 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10433 	}
   10434 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10435 	wm_gmii_mdic_writereg(dev, phy, page_select, temp);
   10436 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10437 		/*
   10438 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10439 		 * register.
   10440 		 */
   10441 		delay(200);
   10442 		if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) {
   10443 			device_printf(dev, "%s failed\n", __func__);
   10444 			rv = 0; /* XXX */
   10445 			goto out;
   10446 		}
   10447 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10448 		delay(200);
   10449 	} else
   10450 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10451 
   10452 out:
   10453 	sc->phy.release(sc);
   10454 	return rv;
   10455 }
   10456 
   10457 /*
   10458  * wm_gmii_i80003_writereg:	[mii interface function]
   10459  *
   10460  *	Write a PHY register on the kumeran.
   10461  * This could be handled by the PHY layer if we didn't have to lock the
   10462  * ressource ...
   10463  */
   10464 static void
   10465 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, int val)
   10466 {
   10467 	struct wm_softc *sc = device_private(dev);
   10468 	int page_select, temp;
   10469 
   10470 	if (phy != 1) /* only one PHY on kumeran bus */
   10471 		return;
   10472 
   10473 	if (sc->phy.acquire(sc)) {
   10474 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10475 		return;
   10476 	}
   10477 
   10478 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10479 		page_select = GG82563_PHY_PAGE_SELECT;
   10480 	else {
   10481 		/*
   10482 		 * Use Alternative Page Select register to access registers
   10483 		 * 30 and 31.
   10484 		 */
   10485 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10486 	}
   10487 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10488 	wm_gmii_mdic_writereg(dev, phy, page_select, temp);
   10489 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10490 		/*
   10491 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10492 		 * register.
   10493 		 */
   10494 		delay(200);
   10495 		if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) {
   10496 			device_printf(dev, "%s failed\n", __func__);
   10497 			goto out;
   10498 		}
   10499 		wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10500 		delay(200);
   10501 	} else
   10502 		wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10503 
   10504 out:
   10505 	sc->phy.release(sc);
   10506 }
   10507 
   10508 /*
   10509  * wm_gmii_bm_readreg:	[mii interface function]
   10510  *
   10511  *	Read a PHY register on the kumeran
   10512  * This could be handled by the PHY layer if we didn't have to lock the
   10513  * ressource ...
   10514  */
   10515 static int
   10516 wm_gmii_bm_readreg(device_t dev, int phy, int reg)
   10517 {
   10518 	struct wm_softc *sc = device_private(dev);
   10519 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10520 	uint16_t val;
   10521 	int rv;
   10522 
   10523 	if (sc->phy.acquire(sc)) {
   10524 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10525 		return 0;
   10526 	}
   10527 
   10528 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10529 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10530 		    || (reg == 31)) ? 1 : phy;
   10531 	/* Page 800 works differently than the rest so it has its own func */
   10532 	if (page == BM_WUC_PAGE) {
   10533 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
   10534 		rv = val;
   10535 		goto release;
   10536 	}
   10537 
   10538 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10539 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10540 		    && (sc->sc_type != WM_T_82583))
   10541 			wm_gmii_mdic_writereg(dev, phy,
   10542 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10543 		else
   10544 			wm_gmii_mdic_writereg(dev, phy,
   10545 			    BME1000_PHY_PAGE_SELECT, page);
   10546 	}
   10547 
   10548 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10549 
   10550 release:
   10551 	sc->phy.release(sc);
   10552 	return rv;
   10553 }
   10554 
   10555 /*
   10556  * wm_gmii_bm_writereg:	[mii interface function]
   10557  *
   10558  *	Write a PHY register on the kumeran.
   10559  * This could be handled by the PHY layer if we didn't have to lock the
   10560  * ressource ...
   10561  */
   10562 static void
   10563 wm_gmii_bm_writereg(device_t dev, int phy, int reg, int val)
   10564 {
   10565 	struct wm_softc *sc = device_private(dev);
   10566 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10567 
   10568 	if (sc->phy.acquire(sc)) {
   10569 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10570 		return;
   10571 	}
   10572 
   10573 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10574 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10575 		    || (reg == 31)) ? 1 : phy;
   10576 	/* Page 800 works differently than the rest so it has its own func */
   10577 	if (page == BM_WUC_PAGE) {
   10578 		uint16_t tmp;
   10579 
   10580 		tmp = val;
   10581 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   10582 		goto release;
   10583 	}
   10584 
   10585 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10586 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10587 		    && (sc->sc_type != WM_T_82583))
   10588 			wm_gmii_mdic_writereg(dev, phy,
   10589 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10590 		else
   10591 			wm_gmii_mdic_writereg(dev, phy,
   10592 			    BME1000_PHY_PAGE_SELECT, page);
   10593 	}
   10594 
   10595 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10596 
   10597 release:
   10598 	sc->phy.release(sc);
   10599 }
   10600 
   10601 static void
   10602 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd)
   10603 {
   10604 	struct wm_softc *sc = device_private(dev);
   10605 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   10606 	uint16_t wuce, reg;
   10607 
   10608 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10609 		device_xname(dev), __func__));
   10610 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   10611 	if (sc->sc_type == WM_T_PCH) {
   10612 		/* XXX e1000 driver do nothing... why? */
   10613 	}
   10614 
   10615 	/*
   10616 	 * 1) Enable PHY wakeup register first.
   10617 	 * See e1000_enable_phy_wakeup_reg_access_bm().
   10618 	 */
   10619 
   10620 	/* Set page 769 */
   10621 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10622 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   10623 
   10624 	/* Read WUCE and save it */
   10625 	wuce = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG);
   10626 
   10627 	reg = wuce | BM_WUC_ENABLE_BIT;
   10628 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   10629 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, reg);
   10630 
   10631 	/* Select page 800 */
   10632 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10633 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   10634 
   10635 	/*
   10636 	 * 2) Access PHY wakeup register.
   10637 	 * See e1000_access_phy_wakeup_reg_bm.
   10638 	 */
   10639 
   10640 	/* Write page 800 */
   10641 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   10642 
   10643 	if (rd)
   10644 		*val = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE);
   10645 	else
   10646 		wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   10647 
   10648 	/*
   10649 	 * 3) Disable PHY wakeup register.
   10650 	 * See e1000_disable_phy_wakeup_reg_access_bm().
   10651 	 */
   10652 	/* Set page 769 */
   10653 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10654 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   10655 
   10656 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, wuce);
   10657 }
   10658 
   10659 /*
   10660  * wm_gmii_hv_readreg:	[mii interface function]
   10661  *
   10662  *	Read a PHY register on the kumeran
   10663  * This could be handled by the PHY layer if we didn't have to lock the
   10664  * ressource ...
   10665  */
   10666 static int
   10667 wm_gmii_hv_readreg(device_t dev, int phy, int reg)
   10668 {
   10669 	struct wm_softc *sc = device_private(dev);
   10670 	uint16_t val;
   10671 
   10672 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10673 		device_xname(dev), __func__));
   10674 	if (sc->phy.acquire(sc)) {
   10675 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10676 		return 0;
   10677 	}
   10678 
   10679 	wm_gmii_hv_readreg_locked(dev, phy, reg, &val);
   10680 	sc->phy.release(sc);
   10681 	return val;
   10682 }
   10683 
   10684 static int
   10685 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg, uint16_t *val)
   10686 {
   10687 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10688 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10689 
   10690 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10691 
   10692 	/* Page 800 works differently than the rest so it has its own func */
   10693 	if (page == BM_WUC_PAGE) {
   10694 		wm_access_phy_wakeup_reg_bm(dev, reg, val, 1);
   10695 		return 0;
   10696 	}
   10697 
   10698 	/*
   10699 	 * Lower than page 768 works differently than the rest so it has its
   10700 	 * own func
   10701 	 */
   10702 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10703 		printf("gmii_hv_readreg!!!\n");
   10704 		return 0;
   10705 	}
   10706 
   10707 	/*
   10708 	 * XXX I21[789] documents say that the SMBus Address register is at
   10709 	 * PHY address 01, Page 0 (not 768), Register 26.
   10710 	 */
   10711 	if (page == HV_INTC_FC_PAGE_START)
   10712 		page = 0;
   10713 
   10714 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10715 		wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10716 		    page << BME1000_PAGE_SHIFT);
   10717 	}
   10718 
   10719 	*val = wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK);
   10720 	return 0;
   10721 }
   10722 
   10723 /*
   10724  * wm_gmii_hv_writereg:	[mii interface function]
   10725  *
   10726  *	Write a PHY register on the kumeran.
   10727  * This could be handled by the PHY layer if we didn't have to lock the
   10728  * ressource ...
   10729  */
   10730 static void
   10731 wm_gmii_hv_writereg(device_t dev, int phy, int reg, int val)
   10732 {
   10733 	struct wm_softc *sc = device_private(dev);
   10734 
   10735 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10736 		device_xname(dev), __func__));
   10737 
   10738 	if (sc->phy.acquire(sc)) {
   10739 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10740 		return;
   10741 	}
   10742 
   10743 	wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   10744 	sc->phy.release(sc);
   10745 }
   10746 
   10747 static int
   10748 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, uint16_t val)
   10749 {
   10750 	struct wm_softc *sc = device_private(dev);
   10751 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10752 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10753 
   10754 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10755 
   10756 	/* Page 800 works differently than the rest so it has its own func */
   10757 	if (page == BM_WUC_PAGE) {
   10758 		uint16_t tmp;
   10759 
   10760 		tmp = val;
   10761 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   10762 		return 0;
   10763 	}
   10764 
   10765 	/*
   10766 	 * Lower than page 768 works differently than the rest so it has its
   10767 	 * own func
   10768 	 */
   10769 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10770 		printf("gmii_hv_writereg!!!\n");
   10771 		return -1;
   10772 	}
   10773 
   10774 	{
   10775 		/*
   10776 		 * XXX I21[789] documents say that the SMBus Address register
   10777 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   10778 		 */
   10779 		if (page == HV_INTC_FC_PAGE_START)
   10780 			page = 0;
   10781 
   10782 		/*
   10783 		 * XXX Workaround MDIO accesses being disabled after entering
   10784 		 * IEEE Power Down (whenever bit 11 of the PHY control
   10785 		 * register is set)
   10786 		 */
   10787 		if (sc->sc_phytype == WMPHY_82578) {
   10788 			struct mii_softc *child;
   10789 
   10790 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   10791 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   10792 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   10793 			    && ((val & (1 << 11)) != 0)) {
   10794 				printf("XXX need workaround\n");
   10795 			}
   10796 		}
   10797 
   10798 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10799 			wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10800 			    page << BME1000_PAGE_SHIFT);
   10801 		}
   10802 	}
   10803 
   10804 	wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   10805 
   10806 	return 0;
   10807 }
   10808 
   10809 /*
   10810  * wm_gmii_82580_readreg:	[mii interface function]
   10811  *
   10812  *	Read a PHY register on the 82580 and I350.
   10813  * This could be handled by the PHY layer if we didn't have to lock the
   10814  * ressource ...
   10815  */
   10816 static int
   10817 wm_gmii_82580_readreg(device_t dev, int phy, int reg)
   10818 {
   10819 	struct wm_softc *sc = device_private(dev);
   10820 	int rv;
   10821 
   10822 	if (sc->phy.acquire(sc) != 0) {
   10823 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10824 		return 0;
   10825 	}
   10826 
   10827 #ifdef DIAGNOSTIC
   10828 	if (reg > MII_ADDRMASK) {
   10829 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10830 		    __func__, sc->sc_phytype, reg);
   10831 		reg &= MII_ADDRMASK;
   10832 	}
   10833 #endif
   10834 	rv = wm_gmii_mdic_readreg(dev, phy, reg);
   10835 
   10836 	sc->phy.release(sc);
   10837 	return rv;
   10838 }
   10839 
   10840 /*
   10841  * wm_gmii_82580_writereg:	[mii interface function]
   10842  *
   10843  *	Write a PHY register on the 82580 and I350.
   10844  * This could be handled by the PHY layer if we didn't have to lock the
   10845  * ressource ...
   10846  */
   10847 static void
   10848 wm_gmii_82580_writereg(device_t dev, int phy, int reg, int val)
   10849 {
   10850 	struct wm_softc *sc = device_private(dev);
   10851 
   10852 	if (sc->phy.acquire(sc) != 0) {
   10853 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10854 		return;
   10855 	}
   10856 
   10857 #ifdef DIAGNOSTIC
   10858 	if (reg > MII_ADDRMASK) {
   10859 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10860 		    __func__, sc->sc_phytype, reg);
   10861 		reg &= MII_ADDRMASK;
   10862 	}
   10863 #endif
   10864 	wm_gmii_mdic_writereg(dev, phy, reg, val);
   10865 
   10866 	sc->phy.release(sc);
   10867 }
   10868 
   10869 /*
   10870  * wm_gmii_gs40g_readreg:	[mii interface function]
   10871  *
   10872  *	Read a PHY register on the I2100 and I211.
   10873  * This could be handled by the PHY layer if we didn't have to lock the
   10874  * ressource ...
   10875  */
   10876 static int
   10877 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg)
   10878 {
   10879 	struct wm_softc *sc = device_private(dev);
   10880 	int page, offset;
   10881 	int rv;
   10882 
   10883 	/* Acquire semaphore */
   10884 	if (sc->phy.acquire(sc)) {
   10885 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10886 		return 0;
   10887 	}
   10888 
   10889 	/* Page select */
   10890 	page = reg >> GS40G_PAGE_SHIFT;
   10891 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10892 
   10893 	/* Read reg */
   10894 	offset = reg & GS40G_OFFSET_MASK;
   10895 	rv = wm_gmii_mdic_readreg(dev, phy, offset);
   10896 
   10897 	sc->phy.release(sc);
   10898 	return rv;
   10899 }
   10900 
   10901 /*
   10902  * wm_gmii_gs40g_writereg:	[mii interface function]
   10903  *
   10904  *	Write a PHY register on the I210 and I211.
   10905  * This could be handled by the PHY layer if we didn't have to lock the
   10906  * ressource ...
   10907  */
   10908 static void
   10909 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, int val)
   10910 {
   10911 	struct wm_softc *sc = device_private(dev);
   10912 	int page, offset;
   10913 
   10914 	/* Acquire semaphore */
   10915 	if (sc->phy.acquire(sc)) {
   10916 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10917 		return;
   10918 	}
   10919 
   10920 	/* Page select */
   10921 	page = reg >> GS40G_PAGE_SHIFT;
   10922 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10923 
   10924 	/* Write reg */
   10925 	offset = reg & GS40G_OFFSET_MASK;
   10926 	wm_gmii_mdic_writereg(dev, phy, offset, val);
   10927 
   10928 	/* Release semaphore */
   10929 	sc->phy.release(sc);
   10930 }
   10931 
   10932 /*
   10933  * wm_gmii_statchg:	[mii interface function]
   10934  *
   10935  *	Callback from MII layer when media changes.
   10936  */
   10937 static void
   10938 wm_gmii_statchg(struct ifnet *ifp)
   10939 {
   10940 	struct wm_softc *sc = ifp->if_softc;
   10941 	struct mii_data *mii = &sc->sc_mii;
   10942 
   10943 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   10944 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10945 	sc->sc_fcrtl &= ~FCRTL_XONE;
   10946 
   10947 	/*
   10948 	 * Get flow control negotiation result.
   10949 	 */
   10950 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   10951 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   10952 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   10953 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   10954 	}
   10955 
   10956 	if (sc->sc_flowflags & IFM_FLOW) {
   10957 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   10958 			sc->sc_ctrl |= CTRL_TFCE;
   10959 			sc->sc_fcrtl |= FCRTL_XONE;
   10960 		}
   10961 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   10962 			sc->sc_ctrl |= CTRL_RFCE;
   10963 	}
   10964 
   10965 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   10966 		DPRINTF(WM_DEBUG_LINK,
   10967 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   10968 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10969 	} else {
   10970 		DPRINTF(WM_DEBUG_LINK,
   10971 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   10972 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10973 	}
   10974 
   10975 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10976 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10977 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   10978 						 : WMREG_FCRTL, sc->sc_fcrtl);
   10979 	if (sc->sc_type == WM_T_80003) {
   10980 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   10981 		case IFM_1000_T:
   10982 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10983 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   10984 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10985 			break;
   10986 		default:
   10987 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10988 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   10989 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   10990 			break;
   10991 		}
   10992 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   10993 	}
   10994 }
   10995 
   10996 /* kumeran related (80003, ICH* and PCH*) */
   10997 
   10998 /*
   10999  * wm_kmrn_readreg:
   11000  *
   11001  *	Read a kumeran register
   11002  */
   11003 static int
   11004 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   11005 {
   11006 	int rv;
   11007 
   11008 	if (sc->sc_type == WM_T_80003)
   11009 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11010 	else
   11011 		rv = sc->phy.acquire(sc);
   11012 	if (rv != 0) {
   11013 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11014 		    __func__);
   11015 		return rv;
   11016 	}
   11017 
   11018 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   11019 
   11020 	if (sc->sc_type == WM_T_80003)
   11021 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11022 	else
   11023 		sc->phy.release(sc);
   11024 
   11025 	return rv;
   11026 }
   11027 
   11028 static int
   11029 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   11030 {
   11031 
   11032 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11033 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   11034 	    KUMCTRLSTA_REN);
   11035 	CSR_WRITE_FLUSH(sc);
   11036 	delay(2);
   11037 
   11038 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   11039 
   11040 	return 0;
   11041 }
   11042 
   11043 /*
   11044  * wm_kmrn_writereg:
   11045  *
   11046  *	Write a kumeran register
   11047  */
   11048 static int
   11049 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   11050 {
   11051 	int rv;
   11052 
   11053 	if (sc->sc_type == WM_T_80003)
   11054 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11055 	else
   11056 		rv = sc->phy.acquire(sc);
   11057 	if (rv != 0) {
   11058 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   11059 		    __func__);
   11060 		return rv;
   11061 	}
   11062 
   11063 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   11064 
   11065 	if (sc->sc_type == WM_T_80003)
   11066 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   11067 	else
   11068 		sc->phy.release(sc);
   11069 
   11070 	return rv;
   11071 }
   11072 
   11073 static int
   11074 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   11075 {
   11076 
   11077 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   11078 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   11079 
   11080 	return 0;
   11081 }
   11082 
   11083 /* SGMII related */
   11084 
   11085 /*
   11086  * wm_sgmii_uses_mdio
   11087  *
   11088  * Check whether the transaction is to the internal PHY or the external
   11089  * MDIO interface. Return true if it's MDIO.
   11090  */
   11091 static bool
   11092 wm_sgmii_uses_mdio(struct wm_softc *sc)
   11093 {
   11094 	uint32_t reg;
   11095 	bool ismdio = false;
   11096 
   11097 	switch (sc->sc_type) {
   11098 	case WM_T_82575:
   11099 	case WM_T_82576:
   11100 		reg = CSR_READ(sc, WMREG_MDIC);
   11101 		ismdio = ((reg & MDIC_DEST) != 0);
   11102 		break;
   11103 	case WM_T_82580:
   11104 	case WM_T_I350:
   11105 	case WM_T_I354:
   11106 	case WM_T_I210:
   11107 	case WM_T_I211:
   11108 		reg = CSR_READ(sc, WMREG_MDICNFG);
   11109 		ismdio = ((reg & MDICNFG_DEST) != 0);
   11110 		break;
   11111 	default:
   11112 		break;
   11113 	}
   11114 
   11115 	return ismdio;
   11116 }
   11117 
   11118 /*
   11119  * wm_sgmii_readreg:	[mii interface function]
   11120  *
   11121  *	Read a PHY register on the SGMII
   11122  * This could be handled by the PHY layer if we didn't have to lock the
   11123  * ressource ...
   11124  */
   11125 static int
   11126 wm_sgmii_readreg(device_t dev, int phy, int reg)
   11127 {
   11128 	struct wm_softc *sc = device_private(dev);
   11129 	uint32_t i2ccmd;
   11130 	int i, rv;
   11131 
   11132 	if (sc->phy.acquire(sc)) {
   11133 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11134 		return 0;
   11135 	}
   11136 
   11137 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11138 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11139 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11140 
   11141 	/* Poll the ready bit */
   11142 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11143 		delay(50);
   11144 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11145 		if (i2ccmd & I2CCMD_READY)
   11146 			break;
   11147 	}
   11148 	if ((i2ccmd & I2CCMD_READY) == 0)
   11149 		device_printf(dev, "I2CCMD Read did not complete\n");
   11150 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   11151 		device_printf(dev, "I2CCMD Error bit set\n");
   11152 
   11153 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   11154 
   11155 	sc->phy.release(sc);
   11156 	return rv;
   11157 }
   11158 
   11159 /*
   11160  * wm_sgmii_writereg:	[mii interface function]
   11161  *
   11162  *	Write a PHY register on the SGMII.
   11163  * This could be handled by the PHY layer if we didn't have to lock the
   11164  * ressource ...
   11165  */
   11166 static void
   11167 wm_sgmii_writereg(device_t dev, int phy, int reg, int val)
   11168 {
   11169 	struct wm_softc *sc = device_private(dev);
   11170 	uint32_t i2ccmd;
   11171 	int i;
   11172 	int swapdata;
   11173 
   11174 	if (sc->phy.acquire(sc) != 0) {
   11175 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   11176 		return;
   11177 	}
   11178 	/* Swap the data bytes for the I2C interface */
   11179 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   11180 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   11181 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
   11182 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11183 
   11184 	/* Poll the ready bit */
   11185 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11186 		delay(50);
   11187 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11188 		if (i2ccmd & I2CCMD_READY)
   11189 			break;
   11190 	}
   11191 	if ((i2ccmd & I2CCMD_READY) == 0)
   11192 		device_printf(dev, "I2CCMD Write did not complete\n");
   11193 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   11194 		device_printf(dev, "I2CCMD Error bit set\n");
   11195 
   11196 	sc->phy.release(sc);
   11197 }
   11198 
   11199 /* TBI related */
   11200 
   11201 static bool
   11202 wm_tbi_havesignal(struct wm_softc *sc, uint32_t ctrl)
   11203 {
   11204 	bool sig;
   11205 
   11206 	sig = ctrl & CTRL_SWDPIN(1);
   11207 
   11208 	/*
   11209 	 * On 82543 and 82544, the CTRL_SWDPIN(1) bit will be 0 if the optics
   11210 	 * detect a signal, 1 if they don't.
   11211 	 */
   11212 	if ((sc->sc_type == WM_T_82543) || (sc->sc_type == WM_T_82544))
   11213 		sig = !sig;
   11214 
   11215 	return sig;
   11216 }
   11217 
   11218 /*
   11219  * wm_tbi_mediainit:
   11220  *
   11221  *	Initialize media for use on 1000BASE-X devices.
   11222  */
   11223 static void
   11224 wm_tbi_mediainit(struct wm_softc *sc)
   11225 {
   11226 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11227 	const char *sep = "";
   11228 
   11229 	if (sc->sc_type < WM_T_82543)
   11230 		sc->sc_tipg = TIPG_WM_DFLT;
   11231 	else
   11232 		sc->sc_tipg = TIPG_LG_DFLT;
   11233 
   11234 	sc->sc_tbi_serdes_anegticks = 5;
   11235 
   11236 	/* Initialize our media structures */
   11237 	sc->sc_mii.mii_ifp = ifp;
   11238 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   11239 
   11240 	if ((sc->sc_type >= WM_T_82575)
   11241 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   11242 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11243 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   11244 	else
   11245 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   11246 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   11247 
   11248 	/*
   11249 	 * SWD Pins:
   11250 	 *
   11251 	 *	0 = Link LED (output)
   11252 	 *	1 = Loss Of Signal (input)
   11253 	 */
   11254 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   11255 
   11256 	/* XXX Perhaps this is only for TBI */
   11257 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11258 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   11259 
   11260 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   11261 		sc->sc_ctrl &= ~CTRL_LRST;
   11262 
   11263 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11264 
   11265 #define	ADD(ss, mm, dd)							\
   11266 do {									\
   11267 	aprint_normal("%s%s", sep, ss);					\
   11268 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   11269 	sep = ", ";							\
   11270 } while (/*CONSTCOND*/0)
   11271 
   11272 	aprint_normal_dev(sc->sc_dev, "");
   11273 
   11274 	if (sc->sc_type == WM_T_I354) {
   11275 		uint32_t status;
   11276 
   11277 		status = CSR_READ(sc, WMREG_STATUS);
   11278 		if (((status & STATUS_2P5_SKU) != 0)
   11279 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11280 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   11281 		} else
   11282 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   11283 	} else if (sc->sc_type == WM_T_82545) {
   11284 		/* Only 82545 is LX (XXX except SFP) */
   11285 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   11286 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   11287 	} else {
   11288 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   11289 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   11290 	}
   11291 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   11292 	aprint_normal("\n");
   11293 
   11294 #undef ADD
   11295 
   11296 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   11297 }
   11298 
   11299 /*
   11300  * wm_tbi_mediachange:	[ifmedia interface function]
   11301  *
   11302  *	Set hardware to newly-selected media on a 1000BASE-X device.
   11303  */
   11304 static int
   11305 wm_tbi_mediachange(struct ifnet *ifp)
   11306 {
   11307 	struct wm_softc *sc = ifp->if_softc;
   11308 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11309 	uint32_t status, ctrl;
   11310 	bool signal;
   11311 	int i;
   11312 
   11313 	KASSERT(sc->sc_mediatype != WM_MEDIATYPE_COPPER);
   11314 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11315 		/* XXX need some work for >= 82571 and < 82575 */
   11316 		if (sc->sc_type < WM_T_82575)
   11317 			return 0;
   11318 	}
   11319 
   11320 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11321 	    || (sc->sc_type >= WM_T_82575))
   11322 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11323 
   11324 	sc->sc_ctrl &= ~CTRL_LRST;
   11325 	sc->sc_txcw = TXCW_ANE;
   11326 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11327 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   11328 	else if (ife->ifm_media & IFM_FDX)
   11329 		sc->sc_txcw |= TXCW_FD;
   11330 	else
   11331 		sc->sc_txcw |= TXCW_HD;
   11332 
   11333 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   11334 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   11335 
   11336 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   11337 		device_xname(sc->sc_dev), sc->sc_txcw));
   11338 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11339 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11340 	CSR_WRITE_FLUSH(sc);
   11341 	delay(1000);
   11342 
   11343 	ctrl =  CSR_READ(sc, WMREG_CTRL);
   11344 	signal = wm_tbi_havesignal(sc, ctrl);
   11345 
   11346 	DPRINTF(WM_DEBUG_LINK, ("%s: signal = %d\n", device_xname(sc->sc_dev),
   11347 		signal));
   11348 
   11349 	if (signal) {
   11350 		/* Have signal; wait for the link to come up. */
   11351 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   11352 			delay(10000);
   11353 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   11354 				break;
   11355 		}
   11356 
   11357 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   11358 			device_xname(sc->sc_dev),i));
   11359 
   11360 		status = CSR_READ(sc, WMREG_STATUS);
   11361 		DPRINTF(WM_DEBUG_LINK,
   11362 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   11363 			device_xname(sc->sc_dev),status, STATUS_LU));
   11364 		if (status & STATUS_LU) {
   11365 			/* Link is up. */
   11366 			DPRINTF(WM_DEBUG_LINK,
   11367 			    ("%s: LINK: set media -> link up %s\n",
   11368 				device_xname(sc->sc_dev),
   11369 				(status & STATUS_FD) ? "FDX" : "HDX"));
   11370 
   11371 			/*
   11372 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   11373 			 * so we should update sc->sc_ctrl
   11374 			 */
   11375 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   11376 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   11377 			sc->sc_fcrtl &= ~FCRTL_XONE;
   11378 			if (status & STATUS_FD)
   11379 				sc->sc_tctl |=
   11380 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   11381 			else
   11382 				sc->sc_tctl |=
   11383 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   11384 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   11385 				sc->sc_fcrtl |= FCRTL_XONE;
   11386 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   11387 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   11388 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
   11389 			sc->sc_tbi_linkup = 1;
   11390 		} else {
   11391 			if (i == WM_LINKUP_TIMEOUT)
   11392 				wm_check_for_link(sc);
   11393 			/* Link is down. */
   11394 			DPRINTF(WM_DEBUG_LINK,
   11395 			    ("%s: LINK: set media -> link down\n",
   11396 				device_xname(sc->sc_dev)));
   11397 			sc->sc_tbi_linkup = 0;
   11398 		}
   11399 	} else {
   11400 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   11401 			device_xname(sc->sc_dev)));
   11402 		sc->sc_tbi_linkup = 0;
   11403 	}
   11404 
   11405 	wm_tbi_serdes_set_linkled(sc);
   11406 
   11407 	return 0;
   11408 }
   11409 
   11410 /*
   11411  * wm_tbi_mediastatus:	[ifmedia interface function]
   11412  *
   11413  *	Get the current interface media status on a 1000BASE-X device.
   11414  */
   11415 static void
   11416 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11417 {
   11418 	struct wm_softc *sc = ifp->if_softc;
   11419 	uint32_t ctrl, status;
   11420 
   11421 	ifmr->ifm_status = IFM_AVALID;
   11422 	ifmr->ifm_active = IFM_ETHER;
   11423 
   11424 	status = CSR_READ(sc, WMREG_STATUS);
   11425 	if ((status & STATUS_LU) == 0) {
   11426 		ifmr->ifm_active |= IFM_NONE;
   11427 		return;
   11428 	}
   11429 
   11430 	ifmr->ifm_status |= IFM_ACTIVE;
   11431 	/* Only 82545 is LX */
   11432 	if (sc->sc_type == WM_T_82545)
   11433 		ifmr->ifm_active |= IFM_1000_LX;
   11434 	else
   11435 		ifmr->ifm_active |= IFM_1000_SX;
   11436 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   11437 		ifmr->ifm_active |= IFM_FDX;
   11438 	else
   11439 		ifmr->ifm_active |= IFM_HDX;
   11440 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11441 	if (ctrl & CTRL_RFCE)
   11442 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   11443 	if (ctrl & CTRL_TFCE)
   11444 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   11445 }
   11446 
   11447 /* XXX TBI only */
   11448 static int
   11449 wm_check_for_link(struct wm_softc *sc)
   11450 {
   11451 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11452 	uint32_t rxcw;
   11453 	uint32_t ctrl;
   11454 	uint32_t status;
   11455 	bool signal;
   11456 
   11457 	DPRINTF(WM_DEBUG_LINK, ("%s: %s called\n",
   11458 		device_xname(sc->sc_dev), __func__));
   11459 
   11460 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11461 		/* XXX need some work for >= 82571 */
   11462 		if (sc->sc_type >= WM_T_82571) {
   11463 			sc->sc_tbi_linkup = 1;
   11464 			return 0;
   11465 		}
   11466 	}
   11467 
   11468 	rxcw = CSR_READ(sc, WMREG_RXCW);
   11469 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11470 	status = CSR_READ(sc, WMREG_STATUS);
   11471 	signal = wm_tbi_havesignal(sc, ctrl);
   11472 
   11473 	DPRINTF(WM_DEBUG_LINK,
   11474 	    ("%s: %s: signal = %d, status_lu = %d, rxcw_c = %d\n",
   11475 		device_xname(sc->sc_dev), __func__, signal,
   11476 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   11477 
   11478 	/*
   11479 	 * SWDPIN   LU RXCW
   11480 	 *	0    0	  0
   11481 	 *	0    0	  1	(should not happen)
   11482 	 *	0    1	  0	(should not happen)
   11483 	 *	0    1	  1	(should not happen)
   11484 	 *	1    0	  0	Disable autonego and force linkup
   11485 	 *	1    0	  1	got /C/ but not linkup yet
   11486 	 *	1    1	  0	(linkup)
   11487 	 *	1    1	  1	If IFM_AUTO, back to autonego
   11488 	 *
   11489 	 */
   11490 	if (signal && ((status & STATUS_LU) == 0) && ((rxcw & RXCW_C) == 0)) {
   11491 		DPRINTF(WM_DEBUG_LINK,
   11492 		    ("%s: %s: force linkup and fullduplex\n",
   11493 			device_xname(sc->sc_dev), __func__));
   11494 		sc->sc_tbi_linkup = 0;
   11495 		/* Disable auto-negotiation in the TXCW register */
   11496 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   11497 
   11498 		/*
   11499 		 * Force link-up and also force full-duplex.
   11500 		 *
   11501 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   11502 		 * so we should update sc->sc_ctrl
   11503 		 */
   11504 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   11505 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11506 	} else if (((status & STATUS_LU) != 0)
   11507 	    && ((rxcw & RXCW_C) != 0)
   11508 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   11509 		sc->sc_tbi_linkup = 1;
   11510 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: go back to autonego\n",
   11511 			device_xname(sc->sc_dev),
   11512 			__func__));
   11513 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11514 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   11515 	} else if (signal && ((rxcw & RXCW_C) != 0)) {
   11516 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: /C/",
   11517 			device_xname(sc->sc_dev), __func__));
   11518 	} else {
   11519 		DPRINTF(WM_DEBUG_LINK, ("%s: %s: linkup %08x,%08x,%08x\n",
   11520 			device_xname(sc->sc_dev), __func__, rxcw, ctrl,
   11521 			status));
   11522 	}
   11523 
   11524 	return 0;
   11525 }
   11526 
   11527 /*
   11528  * wm_tbi_tick:
   11529  *
   11530  *	Check the link on TBI devices.
   11531  *	This function acts as mii_tick().
   11532  */
   11533 static void
   11534 wm_tbi_tick(struct wm_softc *sc)
   11535 {
   11536 	struct mii_data *mii = &sc->sc_mii;
   11537 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11538 	uint32_t status;
   11539 
   11540 	KASSERT(WM_CORE_LOCKED(sc));
   11541 
   11542 	status = CSR_READ(sc, WMREG_STATUS);
   11543 
   11544 	/* XXX is this needed? */
   11545 	(void)CSR_READ(sc, WMREG_RXCW);
   11546 	(void)CSR_READ(sc, WMREG_CTRL);
   11547 
   11548 	/* set link status */
   11549 	if ((status & STATUS_LU) == 0) {
   11550 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
   11551 			device_xname(sc->sc_dev)));
   11552 		sc->sc_tbi_linkup = 0;
   11553 	} else if (sc->sc_tbi_linkup == 0) {
   11554 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
   11555 			device_xname(sc->sc_dev),
   11556 			(status & STATUS_FD) ? "FDX" : "HDX"));
   11557 		sc->sc_tbi_linkup = 1;
   11558 		sc->sc_tbi_serdes_ticks = 0;
   11559 	}
   11560 
   11561 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   11562 		goto setled;
   11563 
   11564 	if ((status & STATUS_LU) == 0) {
   11565 		sc->sc_tbi_linkup = 0;
   11566 		/* If the timer expired, retry autonegotiation */
   11567 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11568 		    && (++sc->sc_tbi_serdes_ticks
   11569 			>= sc->sc_tbi_serdes_anegticks)) {
   11570 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11571 			sc->sc_tbi_serdes_ticks = 0;
   11572 			/*
   11573 			 * Reset the link, and let autonegotiation do
   11574 			 * its thing
   11575 			 */
   11576 			sc->sc_ctrl |= CTRL_LRST;
   11577 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11578 			CSR_WRITE_FLUSH(sc);
   11579 			delay(1000);
   11580 			sc->sc_ctrl &= ~CTRL_LRST;
   11581 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11582 			CSR_WRITE_FLUSH(sc);
   11583 			delay(1000);
   11584 			CSR_WRITE(sc, WMREG_TXCW,
   11585 			    sc->sc_txcw & ~TXCW_ANE);
   11586 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11587 		}
   11588 	}
   11589 
   11590 setled:
   11591 	wm_tbi_serdes_set_linkled(sc);
   11592 }
   11593 
   11594 /* SERDES related */
   11595 static void
   11596 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   11597 {
   11598 	uint32_t reg;
   11599 
   11600 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11601 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   11602 		return;
   11603 
   11604 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   11605 	reg |= PCS_CFG_PCS_EN;
   11606 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   11607 
   11608 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11609 	reg &= ~CTRL_EXT_SWDPIN(3);
   11610 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11611 	CSR_WRITE_FLUSH(sc);
   11612 }
   11613 
   11614 static int
   11615 wm_serdes_mediachange(struct ifnet *ifp)
   11616 {
   11617 	struct wm_softc *sc = ifp->if_softc;
   11618 	bool pcs_autoneg = true; /* XXX */
   11619 	uint32_t ctrl_ext, pcs_lctl, reg;
   11620 
   11621 	/* XXX Currently, this function is not called on 8257[12] */
   11622 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11623 	    || (sc->sc_type >= WM_T_82575))
   11624 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11625 
   11626 	wm_serdes_power_up_link_82575(sc);
   11627 
   11628 	sc->sc_ctrl |= CTRL_SLU;
   11629 
   11630 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   11631 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   11632 
   11633 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11634 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   11635 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   11636 	case CTRL_EXT_LINK_MODE_SGMII:
   11637 		pcs_autoneg = true;
   11638 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   11639 		break;
   11640 	case CTRL_EXT_LINK_MODE_1000KX:
   11641 		pcs_autoneg = false;
   11642 		/* FALLTHROUGH */
   11643 	default:
   11644 		if ((sc->sc_type == WM_T_82575)
   11645 		    || (sc->sc_type == WM_T_82576)) {
   11646 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   11647 				pcs_autoneg = false;
   11648 		}
   11649 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   11650 		    | CTRL_FRCFDX;
   11651 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   11652 	}
   11653 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11654 
   11655 	if (pcs_autoneg) {
   11656 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   11657 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   11658 
   11659 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   11660 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   11661 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   11662 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   11663 	} else
   11664 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   11665 
   11666 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   11667 
   11668 
   11669 	return 0;
   11670 }
   11671 
   11672 static void
   11673 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11674 {
   11675 	struct wm_softc *sc = ifp->if_softc;
   11676 	struct mii_data *mii = &sc->sc_mii;
   11677 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11678 	uint32_t pcs_adv, pcs_lpab, reg;
   11679 
   11680 	ifmr->ifm_status = IFM_AVALID;
   11681 	ifmr->ifm_active = IFM_ETHER;
   11682 
   11683 	/* Check PCS */
   11684 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11685 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   11686 		ifmr->ifm_active |= IFM_NONE;
   11687 		sc->sc_tbi_linkup = 0;
   11688 		goto setled;
   11689 	}
   11690 
   11691 	sc->sc_tbi_linkup = 1;
   11692 	ifmr->ifm_status |= IFM_ACTIVE;
   11693 	if (sc->sc_type == WM_T_I354) {
   11694 		uint32_t status;
   11695 
   11696 		status = CSR_READ(sc, WMREG_STATUS);
   11697 		if (((status & STATUS_2P5_SKU) != 0)
   11698 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11699 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
   11700 		} else
   11701 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
   11702 	} else {
   11703 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   11704 		case PCS_LSTS_SPEED_10:
   11705 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   11706 			break;
   11707 		case PCS_LSTS_SPEED_100:
   11708 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   11709 			break;
   11710 		case PCS_LSTS_SPEED_1000:
   11711 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11712 			break;
   11713 		default:
   11714 			device_printf(sc->sc_dev, "Unknown speed\n");
   11715 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11716 			break;
   11717 		}
   11718 	}
   11719 	if ((reg & PCS_LSTS_FDX) != 0)
   11720 		ifmr->ifm_active |= IFM_FDX;
   11721 	else
   11722 		ifmr->ifm_active |= IFM_HDX;
   11723 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   11724 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   11725 		/* Check flow */
   11726 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11727 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   11728 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   11729 			goto setled;
   11730 		}
   11731 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   11732 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   11733 		DPRINTF(WM_DEBUG_LINK,
   11734 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   11735 		if ((pcs_adv & TXCW_SYM_PAUSE)
   11736 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   11737 			mii->mii_media_active |= IFM_FLOW
   11738 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   11739 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   11740 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11741 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   11742 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11743 			mii->mii_media_active |= IFM_FLOW
   11744 			    | IFM_ETH_TXPAUSE;
   11745 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   11746 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11747 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   11748 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11749 			mii->mii_media_active |= IFM_FLOW
   11750 			    | IFM_ETH_RXPAUSE;
   11751 		}
   11752 	}
   11753 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   11754 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   11755 setled:
   11756 	wm_tbi_serdes_set_linkled(sc);
   11757 }
   11758 
   11759 /*
   11760  * wm_serdes_tick:
   11761  *
   11762  *	Check the link on serdes devices.
   11763  */
   11764 static void
   11765 wm_serdes_tick(struct wm_softc *sc)
   11766 {
   11767 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11768 	struct mii_data *mii = &sc->sc_mii;
   11769 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11770 	uint32_t reg;
   11771 
   11772 	KASSERT(WM_CORE_LOCKED(sc));
   11773 
   11774 	mii->mii_media_status = IFM_AVALID;
   11775 	mii->mii_media_active = IFM_ETHER;
   11776 
   11777 	/* Check PCS */
   11778 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11779 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   11780 		mii->mii_media_status |= IFM_ACTIVE;
   11781 		sc->sc_tbi_linkup = 1;
   11782 		sc->sc_tbi_serdes_ticks = 0;
   11783 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   11784 		if ((reg & PCS_LSTS_FDX) != 0)
   11785 			mii->mii_media_active |= IFM_FDX;
   11786 		else
   11787 			mii->mii_media_active |= IFM_HDX;
   11788 	} else {
   11789 		mii->mii_media_status |= IFM_NONE;
   11790 		sc->sc_tbi_linkup = 0;
   11791 		/* If the timer expired, retry autonegotiation */
   11792 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11793 		    && (++sc->sc_tbi_serdes_ticks
   11794 			>= sc->sc_tbi_serdes_anegticks)) {
   11795 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11796 			sc->sc_tbi_serdes_ticks = 0;
   11797 			/* XXX */
   11798 			wm_serdes_mediachange(ifp);
   11799 		}
   11800 	}
   11801 
   11802 	wm_tbi_serdes_set_linkled(sc);
   11803 }
   11804 
   11805 /* SFP related */
   11806 
   11807 static int
   11808 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   11809 {
   11810 	uint32_t i2ccmd;
   11811 	int i;
   11812 
   11813 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11814 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11815 
   11816 	/* Poll the ready bit */
   11817 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11818 		delay(50);
   11819 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11820 		if (i2ccmd & I2CCMD_READY)
   11821 			break;
   11822 	}
   11823 	if ((i2ccmd & I2CCMD_READY) == 0)
   11824 		return -1;
   11825 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   11826 		return -1;
   11827 
   11828 	*data = i2ccmd & 0x00ff;
   11829 
   11830 	return 0;
   11831 }
   11832 
   11833 static uint32_t
   11834 wm_sfp_get_media_type(struct wm_softc *sc)
   11835 {
   11836 	uint32_t ctrl_ext;
   11837 	uint8_t val = 0;
   11838 	int timeout = 3;
   11839 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   11840 	int rv = -1;
   11841 
   11842 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11843 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   11844 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   11845 	CSR_WRITE_FLUSH(sc);
   11846 
   11847 	/* Read SFP module data */
   11848 	while (timeout) {
   11849 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   11850 		if (rv == 0)
   11851 			break;
   11852 		delay(100*1000); /* XXX too big */
   11853 		timeout--;
   11854 	}
   11855 	if (rv != 0)
   11856 		goto out;
   11857 	switch (val) {
   11858 	case SFF_SFP_ID_SFF:
   11859 		aprint_normal_dev(sc->sc_dev,
   11860 		    "Module/Connector soldered to board\n");
   11861 		break;
   11862 	case SFF_SFP_ID_SFP:
   11863 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   11864 		break;
   11865 	case SFF_SFP_ID_UNKNOWN:
   11866 		goto out;
   11867 	default:
   11868 		break;
   11869 	}
   11870 
   11871 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   11872 	if (rv != 0) {
   11873 		goto out;
   11874 	}
   11875 
   11876 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   11877 		mediatype = WM_MEDIATYPE_SERDES;
   11878 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
   11879 		sc->sc_flags |= WM_F_SGMII;
   11880 		mediatype = WM_MEDIATYPE_COPPER;
   11881 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
   11882 		sc->sc_flags |= WM_F_SGMII;
   11883 		mediatype = WM_MEDIATYPE_SERDES;
   11884 	}
   11885 
   11886 out:
   11887 	/* Restore I2C interface setting */
   11888 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11889 
   11890 	return mediatype;
   11891 }
   11892 
   11893 /*
   11894  * NVM related.
   11895  * Microwire, SPI (w/wo EERD) and Flash.
   11896  */
   11897 
   11898 /* Both spi and uwire */
   11899 
   11900 /*
   11901  * wm_eeprom_sendbits:
   11902  *
   11903  *	Send a series of bits to the EEPROM.
   11904  */
   11905 static void
   11906 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   11907 {
   11908 	uint32_t reg;
   11909 	int x;
   11910 
   11911 	reg = CSR_READ(sc, WMREG_EECD);
   11912 
   11913 	for (x = nbits; x > 0; x--) {
   11914 		if (bits & (1U << (x - 1)))
   11915 			reg |= EECD_DI;
   11916 		else
   11917 			reg &= ~EECD_DI;
   11918 		CSR_WRITE(sc, WMREG_EECD, reg);
   11919 		CSR_WRITE_FLUSH(sc);
   11920 		delay(2);
   11921 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11922 		CSR_WRITE_FLUSH(sc);
   11923 		delay(2);
   11924 		CSR_WRITE(sc, WMREG_EECD, reg);
   11925 		CSR_WRITE_FLUSH(sc);
   11926 		delay(2);
   11927 	}
   11928 }
   11929 
   11930 /*
   11931  * wm_eeprom_recvbits:
   11932  *
   11933  *	Receive a series of bits from the EEPROM.
   11934  */
   11935 static void
   11936 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   11937 {
   11938 	uint32_t reg, val;
   11939 	int x;
   11940 
   11941 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   11942 
   11943 	val = 0;
   11944 	for (x = nbits; x > 0; x--) {
   11945 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11946 		CSR_WRITE_FLUSH(sc);
   11947 		delay(2);
   11948 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   11949 			val |= (1U << (x - 1));
   11950 		CSR_WRITE(sc, WMREG_EECD, reg);
   11951 		CSR_WRITE_FLUSH(sc);
   11952 		delay(2);
   11953 	}
   11954 	*valp = val;
   11955 }
   11956 
   11957 /* Microwire */
   11958 
   11959 /*
   11960  * wm_nvm_read_uwire:
   11961  *
   11962  *	Read a word from the EEPROM using the MicroWire protocol.
   11963  */
   11964 static int
   11965 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11966 {
   11967 	uint32_t reg, val;
   11968 	int i;
   11969 
   11970 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11971 		device_xname(sc->sc_dev), __func__));
   11972 
   11973 	if (sc->nvm.acquire(sc) != 0)
   11974 		return -1;
   11975 
   11976 	for (i = 0; i < wordcnt; i++) {
   11977 		/* Clear SK and DI. */
   11978 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   11979 		CSR_WRITE(sc, WMREG_EECD, reg);
   11980 
   11981 		/*
   11982 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   11983 		 * and Xen.
   11984 		 *
   11985 		 * We use this workaround only for 82540 because qemu's
   11986 		 * e1000 act as 82540.
   11987 		 */
   11988 		if (sc->sc_type == WM_T_82540) {
   11989 			reg |= EECD_SK;
   11990 			CSR_WRITE(sc, WMREG_EECD, reg);
   11991 			reg &= ~EECD_SK;
   11992 			CSR_WRITE(sc, WMREG_EECD, reg);
   11993 			CSR_WRITE_FLUSH(sc);
   11994 			delay(2);
   11995 		}
   11996 		/* XXX: end of workaround */
   11997 
   11998 		/* Set CHIP SELECT. */
   11999 		reg |= EECD_CS;
   12000 		CSR_WRITE(sc, WMREG_EECD, reg);
   12001 		CSR_WRITE_FLUSH(sc);
   12002 		delay(2);
   12003 
   12004 		/* Shift in the READ command. */
   12005 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   12006 
   12007 		/* Shift in address. */
   12008 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   12009 
   12010 		/* Shift out the data. */
   12011 		wm_eeprom_recvbits(sc, &val, 16);
   12012 		data[i] = val & 0xffff;
   12013 
   12014 		/* Clear CHIP SELECT. */
   12015 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   12016 		CSR_WRITE(sc, WMREG_EECD, reg);
   12017 		CSR_WRITE_FLUSH(sc);
   12018 		delay(2);
   12019 	}
   12020 
   12021 	sc->nvm.release(sc);
   12022 	return 0;
   12023 }
   12024 
   12025 /* SPI */
   12026 
   12027 /*
   12028  * Set SPI and FLASH related information from the EECD register.
   12029  * For 82541 and 82547, the word size is taken from EEPROM.
   12030  */
   12031 static int
   12032 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   12033 {
   12034 	int size;
   12035 	uint32_t reg;
   12036 	uint16_t data;
   12037 
   12038 	reg = CSR_READ(sc, WMREG_EECD);
   12039 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   12040 
   12041 	/* Read the size of NVM from EECD by default */
   12042 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12043 	switch (sc->sc_type) {
   12044 	case WM_T_82541:
   12045 	case WM_T_82541_2:
   12046 	case WM_T_82547:
   12047 	case WM_T_82547_2:
   12048 		/* Set dummy value to access EEPROM */
   12049 		sc->sc_nvm_wordsize = 64;
   12050 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
   12051 			aprint_error_dev(sc->sc_dev,
   12052 			    "%s: failed to read EEPROM size\n", __func__);
   12053 		}
   12054 		reg = data;
   12055 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   12056 		if (size == 0)
   12057 			size = 6; /* 64 word size */
   12058 		else
   12059 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   12060 		break;
   12061 	case WM_T_80003:
   12062 	case WM_T_82571:
   12063 	case WM_T_82572:
   12064 	case WM_T_82573: /* SPI case */
   12065 	case WM_T_82574: /* SPI case */
   12066 	case WM_T_82583: /* SPI case */
   12067 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12068 		if (size > 14)
   12069 			size = 14;
   12070 		break;
   12071 	case WM_T_82575:
   12072 	case WM_T_82576:
   12073 	case WM_T_82580:
   12074 	case WM_T_I350:
   12075 	case WM_T_I354:
   12076 	case WM_T_I210:
   12077 	case WM_T_I211:
   12078 		size += NVM_WORD_SIZE_BASE_SHIFT;
   12079 		if (size > 15)
   12080 			size = 15;
   12081 		break;
   12082 	default:
   12083 		aprint_error_dev(sc->sc_dev,
   12084 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   12085 		return -1;
   12086 		break;
   12087 	}
   12088 
   12089 	sc->sc_nvm_wordsize = 1 << size;
   12090 
   12091 	return 0;
   12092 }
   12093 
   12094 /*
   12095  * wm_nvm_ready_spi:
   12096  *
   12097  *	Wait for a SPI EEPROM to be ready for commands.
   12098  */
   12099 static int
   12100 wm_nvm_ready_spi(struct wm_softc *sc)
   12101 {
   12102 	uint32_t val;
   12103 	int usec;
   12104 
   12105 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12106 		device_xname(sc->sc_dev), __func__));
   12107 
   12108 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   12109 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   12110 		wm_eeprom_recvbits(sc, &val, 8);
   12111 		if ((val & SPI_SR_RDY) == 0)
   12112 			break;
   12113 	}
   12114 	if (usec >= SPI_MAX_RETRIES) {
   12115 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   12116 		return -1;
   12117 	}
   12118 	return 0;
   12119 }
   12120 
   12121 /*
   12122  * wm_nvm_read_spi:
   12123  *
   12124  *	Read a work from the EEPROM using the SPI protocol.
   12125  */
   12126 static int
   12127 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12128 {
   12129 	uint32_t reg, val;
   12130 	int i;
   12131 	uint8_t opc;
   12132 	int rv = 0;
   12133 
   12134 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12135 		device_xname(sc->sc_dev), __func__));
   12136 
   12137 	if (sc->nvm.acquire(sc) != 0)
   12138 		return -1;
   12139 
   12140 	/* Clear SK and CS. */
   12141 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   12142 	CSR_WRITE(sc, WMREG_EECD, reg);
   12143 	CSR_WRITE_FLUSH(sc);
   12144 	delay(2);
   12145 
   12146 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   12147 		goto out;
   12148 
   12149 	/* Toggle CS to flush commands. */
   12150 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   12151 	CSR_WRITE_FLUSH(sc);
   12152 	delay(2);
   12153 	CSR_WRITE(sc, WMREG_EECD, reg);
   12154 	CSR_WRITE_FLUSH(sc);
   12155 	delay(2);
   12156 
   12157 	opc = SPI_OPC_READ;
   12158 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   12159 		opc |= SPI_OPC_A8;
   12160 
   12161 	wm_eeprom_sendbits(sc, opc, 8);
   12162 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   12163 
   12164 	for (i = 0; i < wordcnt; i++) {
   12165 		wm_eeprom_recvbits(sc, &val, 16);
   12166 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   12167 	}
   12168 
   12169 	/* Raise CS and clear SK. */
   12170 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   12171 	CSR_WRITE(sc, WMREG_EECD, reg);
   12172 	CSR_WRITE_FLUSH(sc);
   12173 	delay(2);
   12174 
   12175 out:
   12176 	sc->nvm.release(sc);
   12177 	return rv;
   12178 }
   12179 
   12180 /* Using with EERD */
   12181 
   12182 static int
   12183 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   12184 {
   12185 	uint32_t attempts = 100000;
   12186 	uint32_t i, reg = 0;
   12187 	int32_t done = -1;
   12188 
   12189 	for (i = 0; i < attempts; i++) {
   12190 		reg = CSR_READ(sc, rw);
   12191 
   12192 		if (reg & EERD_DONE) {
   12193 			done = 0;
   12194 			break;
   12195 		}
   12196 		delay(5);
   12197 	}
   12198 
   12199 	return done;
   12200 }
   12201 
   12202 static int
   12203 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
   12204 {
   12205 	int i, eerd = 0;
   12206 	int rv = 0;
   12207 
   12208 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12209 		device_xname(sc->sc_dev), __func__));
   12210 
   12211 	if (sc->nvm.acquire(sc) != 0)
   12212 		return -1;
   12213 
   12214 	for (i = 0; i < wordcnt; i++) {
   12215 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   12216 		CSR_WRITE(sc, WMREG_EERD, eerd);
   12217 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   12218 		if (rv != 0) {
   12219 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
   12220 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
   12221 			break;
   12222 		}
   12223 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   12224 	}
   12225 
   12226 	sc->nvm.release(sc);
   12227 	return rv;
   12228 }
   12229 
   12230 /* Flash */
   12231 
   12232 static int
   12233 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   12234 {
   12235 	uint32_t eecd;
   12236 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   12237 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   12238 	uint32_t nvm_dword = 0;
   12239 	uint8_t sig_byte = 0;
   12240 	int rv;
   12241 
   12242 	switch (sc->sc_type) {
   12243 	case WM_T_PCH_SPT:
   12244 	case WM_T_PCH_CNP:
   12245 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
   12246 		act_offset = ICH_NVM_SIG_WORD * 2;
   12247 
   12248 		/* set bank to 0 in case flash read fails. */
   12249 		*bank = 0;
   12250 
   12251 		/* Check bank 0 */
   12252 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
   12253 		if (rv != 0)
   12254 			return rv;
   12255 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   12256 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12257 			*bank = 0;
   12258 			return 0;
   12259 		}
   12260 
   12261 		/* Check bank 1 */
   12262 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
   12263 		    &nvm_dword);
   12264 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
   12265 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12266 			*bank = 1;
   12267 			return 0;
   12268 		}
   12269 		aprint_error_dev(sc->sc_dev,
   12270 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
   12271 		return -1;
   12272 	case WM_T_ICH8:
   12273 	case WM_T_ICH9:
   12274 		eecd = CSR_READ(sc, WMREG_EECD);
   12275 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   12276 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   12277 			return 0;
   12278 		}
   12279 		/* FALLTHROUGH */
   12280 	default:
   12281 		/* Default to 0 */
   12282 		*bank = 0;
   12283 
   12284 		/* Check bank 0 */
   12285 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   12286 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12287 			*bank = 0;
   12288 			return 0;
   12289 		}
   12290 
   12291 		/* Check bank 1 */
   12292 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   12293 		    &sig_byte);
   12294 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   12295 			*bank = 1;
   12296 			return 0;
   12297 		}
   12298 	}
   12299 
   12300 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   12301 		device_xname(sc->sc_dev)));
   12302 	return -1;
   12303 }
   12304 
   12305 /******************************************************************************
   12306  * This function does initial flash setup so that a new read/write/erase cycle
   12307  * can be started.
   12308  *
   12309  * sc - The pointer to the hw structure
   12310  ****************************************************************************/
   12311 static int32_t
   12312 wm_ich8_cycle_init(struct wm_softc *sc)
   12313 {
   12314 	uint16_t hsfsts;
   12315 	int32_t error = 1;
   12316 	int32_t i     = 0;
   12317 
   12318 	if (sc->sc_type >= WM_T_PCH_SPT)
   12319 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
   12320 	else
   12321 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12322 
   12323 	/* May be check the Flash Des Valid bit in Hw status */
   12324 	if ((hsfsts & HSFSTS_FLDVAL) == 0)
   12325 		return error;
   12326 
   12327 	/* Clear FCERR in Hw status by writing 1 */
   12328 	/* Clear DAEL in Hw status by writing a 1 */
   12329 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   12330 
   12331 	if (sc->sc_type >= WM_T_PCH_SPT)
   12332 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
   12333 	else
   12334 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12335 
   12336 	/*
   12337 	 * Either we should have a hardware SPI cycle in progress bit to check
   12338 	 * against, in order to start a new cycle or FDONE bit should be
   12339 	 * changed in the hardware so that it is 1 after harware reset, which
   12340 	 * can then be used as an indication whether a cycle is in progress or
   12341 	 * has been completed .. we should also have some software semaphore
   12342 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   12343 	 * threads access to those bits can be sequentiallized or a way so that
   12344 	 * 2 threads dont start the cycle at the same time
   12345 	 */
   12346 
   12347 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12348 		/*
   12349 		 * There is no cycle running at present, so we can start a
   12350 		 * cycle
   12351 		 */
   12352 
   12353 		/* Begin by setting Flash Cycle Done. */
   12354 		hsfsts |= HSFSTS_DONE;
   12355 		if (sc->sc_type >= WM_T_PCH_SPT)
   12356 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12357 			    hsfsts & 0xffffUL);
   12358 		else
   12359 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   12360 		error = 0;
   12361 	} else {
   12362 		/*
   12363 		 * otherwise poll for sometime so the current cycle has a
   12364 		 * chance to end before giving up.
   12365 		 */
   12366 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   12367 			if (sc->sc_type >= WM_T_PCH_SPT)
   12368 				hsfsts = ICH8_FLASH_READ32(sc,
   12369 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   12370 			else
   12371 				hsfsts = ICH8_FLASH_READ16(sc,
   12372 				    ICH_FLASH_HSFSTS);
   12373 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   12374 				error = 0;
   12375 				break;
   12376 			}
   12377 			delay(1);
   12378 		}
   12379 		if (error == 0) {
   12380 			/*
   12381 			 * Successful in waiting for previous cycle to timeout,
   12382 			 * now set the Flash Cycle Done.
   12383 			 */
   12384 			hsfsts |= HSFSTS_DONE;
   12385 			if (sc->sc_type >= WM_T_PCH_SPT)
   12386 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12387 				    hsfsts & 0xffffUL);
   12388 			else
   12389 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
   12390 				    hsfsts);
   12391 		}
   12392 	}
   12393 	return error;
   12394 }
   12395 
   12396 /******************************************************************************
   12397  * This function starts a flash cycle and waits for its completion
   12398  *
   12399  * sc - The pointer to the hw structure
   12400  ****************************************************************************/
   12401 static int32_t
   12402 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   12403 {
   12404 	uint16_t hsflctl;
   12405 	uint16_t hsfsts;
   12406 	int32_t error = 1;
   12407 	uint32_t i = 0;
   12408 
   12409 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   12410 	if (sc->sc_type >= WM_T_PCH_SPT)
   12411 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
   12412 	else
   12413 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12414 	hsflctl |= HSFCTL_GO;
   12415 	if (sc->sc_type >= WM_T_PCH_SPT)
   12416 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12417 		    (uint32_t)hsflctl << 16);
   12418 	else
   12419 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12420 
   12421 	/* Wait till FDONE bit is set to 1 */
   12422 	do {
   12423 		if (sc->sc_type >= WM_T_PCH_SPT)
   12424 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   12425 			    & 0xffffUL;
   12426 		else
   12427 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12428 		if (hsfsts & HSFSTS_DONE)
   12429 			break;
   12430 		delay(1);
   12431 		i++;
   12432 	} while (i < timeout);
   12433 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   12434 		error = 0;
   12435 
   12436 	return error;
   12437 }
   12438 
   12439 /******************************************************************************
   12440  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   12441  *
   12442  * sc - The pointer to the hw structure
   12443  * index - The index of the byte or word to read.
   12444  * size - Size of data to read, 1=byte 2=word, 4=dword
   12445  * data - Pointer to the word to store the value read.
   12446  *****************************************************************************/
   12447 static int32_t
   12448 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   12449     uint32_t size, uint32_t *data)
   12450 {
   12451 	uint16_t hsfsts;
   12452 	uint16_t hsflctl;
   12453 	uint32_t flash_linear_address;
   12454 	uint32_t flash_data = 0;
   12455 	int32_t error = 1;
   12456 	int32_t count = 0;
   12457 
   12458 	if (size < 1  || size > 4 || data == 0x0 ||
   12459 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   12460 		return error;
   12461 
   12462 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   12463 	    sc->sc_ich8_flash_base;
   12464 
   12465 	do {
   12466 		delay(1);
   12467 		/* Steps */
   12468 		error = wm_ich8_cycle_init(sc);
   12469 		if (error)
   12470 			break;
   12471 
   12472 		if (sc->sc_type >= WM_T_PCH_SPT)
   12473 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
   12474 			    >> 16;
   12475 		else
   12476 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   12477 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   12478 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   12479 		    & HSFCTL_BCOUNT_MASK;
   12480 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   12481 		if (sc->sc_type >= WM_T_PCH_SPT) {
   12482 			/*
   12483 			 * In SPT, This register is in Lan memory space, not
   12484 			 * flash. Therefore, only 32 bit access is supported.
   12485 			 */
   12486 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
   12487 			    (uint32_t)hsflctl << 16);
   12488 		} else
   12489 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   12490 
   12491 		/*
   12492 		 * Write the last 24 bits of index into Flash Linear address
   12493 		 * field in Flash Address
   12494 		 */
   12495 		/* TODO: TBD maybe check the index against the size of flash */
   12496 
   12497 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   12498 
   12499 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   12500 
   12501 		/*
   12502 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   12503 		 * the whole sequence a few more times, else read in (shift in)
   12504 		 * the Flash Data0, the order is least significant byte first
   12505 		 * msb to lsb
   12506 		 */
   12507 		if (error == 0) {
   12508 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   12509 			if (size == 1)
   12510 				*data = (uint8_t)(flash_data & 0x000000FF);
   12511 			else if (size == 2)
   12512 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   12513 			else if (size == 4)
   12514 				*data = (uint32_t)flash_data;
   12515 			break;
   12516 		} else {
   12517 			/*
   12518 			 * If we've gotten here, then things are probably
   12519 			 * completely hosed, but if the error condition is
   12520 			 * detected, it won't hurt to give it another try...
   12521 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   12522 			 */
   12523 			if (sc->sc_type >= WM_T_PCH_SPT)
   12524 				hsfsts = ICH8_FLASH_READ32(sc,
   12525 				    ICH_FLASH_HSFSTS) & 0xffffUL;
   12526 			else
   12527 				hsfsts = ICH8_FLASH_READ16(sc,
   12528 				    ICH_FLASH_HSFSTS);
   12529 
   12530 			if (hsfsts & HSFSTS_ERR) {
   12531 				/* Repeat for some time before giving up. */
   12532 				continue;
   12533 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   12534 				break;
   12535 		}
   12536 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   12537 
   12538 	return error;
   12539 }
   12540 
   12541 /******************************************************************************
   12542  * Reads a single byte from the NVM using the ICH8 flash access registers.
   12543  *
   12544  * sc - pointer to wm_hw structure
   12545  * index - The index of the byte to read.
   12546  * data - Pointer to a byte to store the value read.
   12547  *****************************************************************************/
   12548 static int32_t
   12549 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   12550 {
   12551 	int32_t status;
   12552 	uint32_t word = 0;
   12553 
   12554 	status = wm_read_ich8_data(sc, index, 1, &word);
   12555 	if (status == 0)
   12556 		*data = (uint8_t)word;
   12557 	else
   12558 		*data = 0;
   12559 
   12560 	return status;
   12561 }
   12562 
   12563 /******************************************************************************
   12564  * Reads a word from the NVM using the ICH8 flash access registers.
   12565  *
   12566  * sc - pointer to wm_hw structure
   12567  * index - The starting byte index of the word to read.
   12568  * data - Pointer to a word to store the value read.
   12569  *****************************************************************************/
   12570 static int32_t
   12571 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   12572 {
   12573 	int32_t status;
   12574 	uint32_t word = 0;
   12575 
   12576 	status = wm_read_ich8_data(sc, index, 2, &word);
   12577 	if (status == 0)
   12578 		*data = (uint16_t)word;
   12579 	else
   12580 		*data = 0;
   12581 
   12582 	return status;
   12583 }
   12584 
   12585 /******************************************************************************
   12586  * Reads a dword from the NVM using the ICH8 flash access registers.
   12587  *
   12588  * sc - pointer to wm_hw structure
   12589  * index - The starting byte index of the word to read.
   12590  * data - Pointer to a word to store the value read.
   12591  *****************************************************************************/
   12592 static int32_t
   12593 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   12594 {
   12595 	int32_t status;
   12596 
   12597 	status = wm_read_ich8_data(sc, index, 4, data);
   12598 	return status;
   12599 }
   12600 
   12601 /******************************************************************************
   12602  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   12603  * register.
   12604  *
   12605  * sc - Struct containing variables accessed by shared code
   12606  * offset - offset of word in the EEPROM to read
   12607  * data - word read from the EEPROM
   12608  * words - number of words to read
   12609  *****************************************************************************/
   12610 static int
   12611 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12612 {
   12613 	int32_t	 rv = 0;
   12614 	uint32_t flash_bank = 0;
   12615 	uint32_t act_offset = 0;
   12616 	uint32_t bank_offset = 0;
   12617 	uint16_t word = 0;
   12618 	uint16_t i = 0;
   12619 
   12620 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12621 		device_xname(sc->sc_dev), __func__));
   12622 
   12623 	if (sc->nvm.acquire(sc) != 0)
   12624 		return -1;
   12625 
   12626 	/*
   12627 	 * We need to know which is the valid flash bank.  In the event
   12628 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12629 	 * managing flash_bank. So it cannot be trusted and needs
   12630 	 * to be updated with each read.
   12631 	 */
   12632 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   12633 	if (rv) {
   12634 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   12635 			device_xname(sc->sc_dev)));
   12636 		flash_bank = 0;
   12637 	}
   12638 
   12639 	/*
   12640 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   12641 	 * size
   12642 	 */
   12643 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   12644 
   12645 	for (i = 0; i < words; i++) {
   12646 		/* The NVM part needs a byte offset, hence * 2 */
   12647 		act_offset = bank_offset + ((offset + i) * 2);
   12648 		rv = wm_read_ich8_word(sc, act_offset, &word);
   12649 		if (rv) {
   12650 			aprint_error_dev(sc->sc_dev,
   12651 			    "%s: failed to read NVM\n", __func__);
   12652 			break;
   12653 		}
   12654 		data[i] = word;
   12655 	}
   12656 
   12657 	sc->nvm.release(sc);
   12658 	return rv;
   12659 }
   12660 
   12661 /******************************************************************************
   12662  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   12663  * register.
   12664  *
   12665  * sc - Struct containing variables accessed by shared code
   12666  * offset - offset of word in the EEPROM to read
   12667  * data - word read from the EEPROM
   12668  * words - number of words to read
   12669  *****************************************************************************/
   12670 static int
   12671 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12672 {
   12673 	int32_t	 rv = 0;
   12674 	uint32_t flash_bank = 0;
   12675 	uint32_t act_offset = 0;
   12676 	uint32_t bank_offset = 0;
   12677 	uint32_t dword = 0;
   12678 	uint16_t i = 0;
   12679 
   12680 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12681 		device_xname(sc->sc_dev), __func__));
   12682 
   12683 	if (sc->nvm.acquire(sc) != 0)
   12684 		return -1;
   12685 
   12686 	/*
   12687 	 * We need to know which is the valid flash bank.  In the event
   12688 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12689 	 * managing flash_bank. So it cannot be trusted and needs
   12690 	 * to be updated with each read.
   12691 	 */
   12692 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   12693 	if (rv) {
   12694 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   12695 			device_xname(sc->sc_dev)));
   12696 		flash_bank = 0;
   12697 	}
   12698 
   12699 	/*
   12700 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   12701 	 * size
   12702 	 */
   12703 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   12704 
   12705 	for (i = 0; i < words; i++) {
   12706 		/* The NVM part needs a byte offset, hence * 2 */
   12707 		act_offset = bank_offset + ((offset + i) * 2);
   12708 		/* but we must read dword aligned, so mask ... */
   12709 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   12710 		if (rv) {
   12711 			aprint_error_dev(sc->sc_dev,
   12712 			    "%s: failed to read NVM\n", __func__);
   12713 			break;
   12714 		}
   12715 		/* ... and pick out low or high word */
   12716 		if ((act_offset & 0x2) == 0)
   12717 			data[i] = (uint16_t)(dword & 0xFFFF);
   12718 		else
   12719 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   12720 	}
   12721 
   12722 	sc->nvm.release(sc);
   12723 	return rv;
   12724 }
   12725 
   12726 /* iNVM */
   12727 
   12728 static int
   12729 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   12730 {
   12731 	int32_t	 rv = 0;
   12732 	uint32_t invm_dword;
   12733 	uint16_t i;
   12734 	uint8_t record_type, word_address;
   12735 
   12736 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12737 		device_xname(sc->sc_dev), __func__));
   12738 
   12739 	for (i = 0; i < INVM_SIZE; i++) {
   12740 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   12741 		/* Get record type */
   12742 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   12743 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   12744 			break;
   12745 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   12746 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   12747 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   12748 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   12749 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   12750 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   12751 			if (word_address == address) {
   12752 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   12753 				rv = 0;
   12754 				break;
   12755 			}
   12756 		}
   12757 	}
   12758 
   12759 	return rv;
   12760 }
   12761 
   12762 static int
   12763 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12764 {
   12765 	int rv = 0;
   12766 	int i;
   12767 
   12768 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12769 		device_xname(sc->sc_dev), __func__));
   12770 
   12771 	if (sc->nvm.acquire(sc) != 0)
   12772 		return -1;
   12773 
   12774 	for (i = 0; i < words; i++) {
   12775 		switch (offset + i) {
   12776 		case NVM_OFF_MACADDR:
   12777 		case NVM_OFF_MACADDR1:
   12778 		case NVM_OFF_MACADDR2:
   12779 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   12780 			if (rv != 0) {
   12781 				data[i] = 0xffff;
   12782 				rv = -1;
   12783 			}
   12784 			break;
   12785 		case NVM_OFF_CFG2:
   12786 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12787 			if (rv != 0) {
   12788 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   12789 				rv = 0;
   12790 			}
   12791 			break;
   12792 		case NVM_OFF_CFG4:
   12793 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12794 			if (rv != 0) {
   12795 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   12796 				rv = 0;
   12797 			}
   12798 			break;
   12799 		case NVM_OFF_LED_1_CFG:
   12800 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12801 			if (rv != 0) {
   12802 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   12803 				rv = 0;
   12804 			}
   12805 			break;
   12806 		case NVM_OFF_LED_0_2_CFG:
   12807 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12808 			if (rv != 0) {
   12809 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   12810 				rv = 0;
   12811 			}
   12812 			break;
   12813 		case NVM_OFF_ID_LED_SETTINGS:
   12814 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12815 			if (rv != 0) {
   12816 				*data = ID_LED_RESERVED_FFFF;
   12817 				rv = 0;
   12818 			}
   12819 			break;
   12820 		default:
   12821 			DPRINTF(WM_DEBUG_NVM,
   12822 			    ("NVM word 0x%02x is not mapped.\n", offset));
   12823 			*data = NVM_RESERVED_WORD;
   12824 			break;
   12825 		}
   12826 	}
   12827 
   12828 	sc->nvm.release(sc);
   12829 	return rv;
   12830 }
   12831 
   12832 /* Lock, detecting NVM type, validate checksum, version and read */
   12833 
   12834 static int
   12835 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   12836 {
   12837 	uint32_t eecd = 0;
   12838 
   12839 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   12840 	    || sc->sc_type == WM_T_82583) {
   12841 		eecd = CSR_READ(sc, WMREG_EECD);
   12842 
   12843 		/* Isolate bits 15 & 16 */
   12844 		eecd = ((eecd >> 15) & 0x03);
   12845 
   12846 		/* If both bits are set, device is Flash type */
   12847 		if (eecd == 0x03)
   12848 			return 0;
   12849 	}
   12850 	return 1;
   12851 }
   12852 
   12853 static int
   12854 wm_nvm_flash_presence_i210(struct wm_softc *sc)
   12855 {
   12856 	uint32_t eec;
   12857 
   12858 	eec = CSR_READ(sc, WMREG_EEC);
   12859 	if ((eec & EEC_FLASH_DETECTED) != 0)
   12860 		return 1;
   12861 
   12862 	return 0;
   12863 }
   12864 
   12865 /*
   12866  * wm_nvm_validate_checksum
   12867  *
   12868  * The checksum is defined as the sum of the first 64 (16 bit) words.
   12869  */
   12870 static int
   12871 wm_nvm_validate_checksum(struct wm_softc *sc)
   12872 {
   12873 	uint16_t checksum;
   12874 	uint16_t eeprom_data;
   12875 #ifdef WM_DEBUG
   12876 	uint16_t csum_wordaddr, valid_checksum;
   12877 #endif
   12878 	int i;
   12879 
   12880 	checksum = 0;
   12881 
   12882 	/* Don't check for I211 */
   12883 	if (sc->sc_type == WM_T_I211)
   12884 		return 0;
   12885 
   12886 #ifdef WM_DEBUG
   12887 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
   12888 	    || (sc->sc_type == WM_T_PCH_CNP)) {
   12889 		csum_wordaddr = NVM_OFF_COMPAT;
   12890 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   12891 	} else {
   12892 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   12893 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   12894 	}
   12895 
   12896 	/* Dump EEPROM image for debug */
   12897 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12898 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12899 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   12900 		/* XXX PCH_SPT? */
   12901 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   12902 		if ((eeprom_data & valid_checksum) == 0) {
   12903 			DPRINTF(WM_DEBUG_NVM,
   12904 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   12905 				device_xname(sc->sc_dev), eeprom_data,
   12906 				    valid_checksum));
   12907 		}
   12908 	}
   12909 
   12910 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   12911 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   12912 		for (i = 0; i < NVM_SIZE; i++) {
   12913 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12914 				printf("XXXX ");
   12915 			else
   12916 				printf("%04hx ", eeprom_data);
   12917 			if (i % 8 == 7)
   12918 				printf("\n");
   12919 		}
   12920 	}
   12921 
   12922 #endif /* WM_DEBUG */
   12923 
   12924 	for (i = 0; i < NVM_SIZE; i++) {
   12925 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12926 			return 1;
   12927 		checksum += eeprom_data;
   12928 	}
   12929 
   12930 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   12931 #ifdef WM_DEBUG
   12932 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   12933 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   12934 #endif
   12935 	}
   12936 
   12937 	return 0;
   12938 }
   12939 
   12940 static void
   12941 wm_nvm_version_invm(struct wm_softc *sc)
   12942 {
   12943 	uint32_t dword;
   12944 
   12945 	/*
   12946 	 * Linux's code to decode version is very strange, so we don't
   12947 	 * obey that algorithm and just use word 61 as the document.
   12948 	 * Perhaps it's not perfect though...
   12949 	 *
   12950 	 * Example:
   12951 	 *
   12952 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   12953 	 */
   12954 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   12955 	dword = __SHIFTOUT(dword, INVM_VER_1);
   12956 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   12957 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   12958 }
   12959 
   12960 static void
   12961 wm_nvm_version(struct wm_softc *sc)
   12962 {
   12963 	uint16_t major, minor, build, patch;
   12964 	uint16_t uid0, uid1;
   12965 	uint16_t nvm_data;
   12966 	uint16_t off;
   12967 	bool check_version = false;
   12968 	bool check_optionrom = false;
   12969 	bool have_build = false;
   12970 	bool have_uid = true;
   12971 
   12972 	/*
   12973 	 * Version format:
   12974 	 *
   12975 	 * XYYZ
   12976 	 * X0YZ
   12977 	 * X0YY
   12978 	 *
   12979 	 * Example:
   12980 	 *
   12981 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   12982 	 *	82571	0x50a6	5.10.6?
   12983 	 *	82572	0x506a	5.6.10?
   12984 	 *	82572EI	0x5069	5.6.9?
   12985 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   12986 	 *		0x2013	2.1.3?
   12987 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   12988 	 */
   12989 
   12990 	/*
   12991 	 * XXX
   12992 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
   12993 	 * I've never seen on real 82574 hardware with such small SPI ROM.
   12994 	 */
   12995 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
   12996 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
   12997 		have_uid = false;
   12998 
   12999 	switch (sc->sc_type) {
   13000 	case WM_T_82571:
   13001 	case WM_T_82572:
   13002 	case WM_T_82574:
   13003 	case WM_T_82583:
   13004 		check_version = true;
   13005 		check_optionrom = true;
   13006 		have_build = true;
   13007 		break;
   13008 	case WM_T_82575:
   13009 	case WM_T_82576:
   13010 	case WM_T_82580:
   13011 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   13012 			check_version = true;
   13013 		break;
   13014 	case WM_T_I211:
   13015 		wm_nvm_version_invm(sc);
   13016 		have_uid = false;
   13017 		goto printver;
   13018 	case WM_T_I210:
   13019 		if (!wm_nvm_flash_presence_i210(sc)) {
   13020 			wm_nvm_version_invm(sc);
   13021 			have_uid = false;
   13022 			goto printver;
   13023 		}
   13024 		/* FALLTHROUGH */
   13025 	case WM_T_I350:
   13026 	case WM_T_I354:
   13027 		check_version = true;
   13028 		check_optionrom = true;
   13029 		break;
   13030 	default:
   13031 		return;
   13032 	}
   13033 	if (check_version
   13034 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
   13035 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   13036 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   13037 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   13038 			build = nvm_data & NVM_BUILD_MASK;
   13039 			have_build = true;
   13040 		} else
   13041 			minor = nvm_data & 0x00ff;
   13042 
   13043 		/* Decimal */
   13044 		minor = (minor / 16) * 10 + (minor % 16);
   13045 		sc->sc_nvm_ver_major = major;
   13046 		sc->sc_nvm_ver_minor = minor;
   13047 
   13048 printver:
   13049 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   13050 		    sc->sc_nvm_ver_minor);
   13051 		if (have_build) {
   13052 			sc->sc_nvm_ver_build = build;
   13053 			aprint_verbose(".%d", build);
   13054 		}
   13055 	}
   13056 
   13057 	/* Assume the Option ROM area is at avove NVM_SIZE */
   13058 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
   13059 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
   13060 		/* Option ROM Version */
   13061 		if ((off != 0x0000) && (off != 0xffff)) {
   13062 			int rv;
   13063 
   13064 			off += NVM_COMBO_VER_OFF;
   13065 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
   13066 			rv |= wm_nvm_read(sc, off, 1, &uid0);
   13067 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
   13068 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   13069 				/* 16bits */
   13070 				major = uid0 >> 8;
   13071 				build = (uid0 << 8) | (uid1 >> 8);
   13072 				patch = uid1 & 0x00ff;
   13073 				aprint_verbose(", option ROM Version %d.%d.%d",
   13074 				    major, build, patch);
   13075 			}
   13076 		}
   13077 	}
   13078 
   13079 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
   13080 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   13081 }
   13082 
   13083 /*
   13084  * wm_nvm_read:
   13085  *
   13086  *	Read data from the serial EEPROM.
   13087  */
   13088 static int
   13089 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   13090 {
   13091 	int rv;
   13092 
   13093 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   13094 		device_xname(sc->sc_dev), __func__));
   13095 
   13096 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   13097 		return -1;
   13098 
   13099 	rv = sc->nvm.read(sc, word, wordcnt, data);
   13100 
   13101 	return rv;
   13102 }
   13103 
   13104 /*
   13105  * Hardware semaphores.
   13106  * Very complexed...
   13107  */
   13108 
   13109 static int
   13110 wm_get_null(struct wm_softc *sc)
   13111 {
   13112 
   13113 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13114 		device_xname(sc->sc_dev), __func__));
   13115 	return 0;
   13116 }
   13117 
   13118 static void
   13119 wm_put_null(struct wm_softc *sc)
   13120 {
   13121 
   13122 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13123 		device_xname(sc->sc_dev), __func__));
   13124 	return;
   13125 }
   13126 
   13127 static int
   13128 wm_get_eecd(struct wm_softc *sc)
   13129 {
   13130 	uint32_t reg;
   13131 	int x;
   13132 
   13133 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13134 		device_xname(sc->sc_dev), __func__));
   13135 
   13136 	reg = CSR_READ(sc, WMREG_EECD);
   13137 
   13138 	/* Request EEPROM access. */
   13139 	reg |= EECD_EE_REQ;
   13140 	CSR_WRITE(sc, WMREG_EECD, reg);
   13141 
   13142 	/* ..and wait for it to be granted. */
   13143 	for (x = 0; x < 1000; x++) {
   13144 		reg = CSR_READ(sc, WMREG_EECD);
   13145 		if (reg & EECD_EE_GNT)
   13146 			break;
   13147 		delay(5);
   13148 	}
   13149 	if ((reg & EECD_EE_GNT) == 0) {
   13150 		aprint_error_dev(sc->sc_dev,
   13151 		    "could not acquire EEPROM GNT\n");
   13152 		reg &= ~EECD_EE_REQ;
   13153 		CSR_WRITE(sc, WMREG_EECD, reg);
   13154 		return -1;
   13155 	}
   13156 
   13157 	return 0;
   13158 }
   13159 
   13160 static void
   13161 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   13162 {
   13163 
   13164 	*eecd |= EECD_SK;
   13165 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13166 	CSR_WRITE_FLUSH(sc);
   13167 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   13168 		delay(1);
   13169 	else
   13170 		delay(50);
   13171 }
   13172 
   13173 static void
   13174 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   13175 {
   13176 
   13177 	*eecd &= ~EECD_SK;
   13178 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   13179 	CSR_WRITE_FLUSH(sc);
   13180 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   13181 		delay(1);
   13182 	else
   13183 		delay(50);
   13184 }
   13185 
   13186 static void
   13187 wm_put_eecd(struct wm_softc *sc)
   13188 {
   13189 	uint32_t reg;
   13190 
   13191 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13192 		device_xname(sc->sc_dev), __func__));
   13193 
   13194 	/* Stop nvm */
   13195 	reg = CSR_READ(sc, WMREG_EECD);
   13196 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   13197 		/* Pull CS high */
   13198 		reg |= EECD_CS;
   13199 		wm_nvm_eec_clock_lower(sc, &reg);
   13200 	} else {
   13201 		/* CS on Microwire is active-high */
   13202 		reg &= ~(EECD_CS | EECD_DI);
   13203 		CSR_WRITE(sc, WMREG_EECD, reg);
   13204 		wm_nvm_eec_clock_raise(sc, &reg);
   13205 		wm_nvm_eec_clock_lower(sc, &reg);
   13206 	}
   13207 
   13208 	reg = CSR_READ(sc, WMREG_EECD);
   13209 	reg &= ~EECD_EE_REQ;
   13210 	CSR_WRITE(sc, WMREG_EECD, reg);
   13211 
   13212 	return;
   13213 }
   13214 
   13215 /*
   13216  * Get hardware semaphore.
   13217  * Same as e1000_get_hw_semaphore_generic()
   13218  */
   13219 static int
   13220 wm_get_swsm_semaphore(struct wm_softc *sc)
   13221 {
   13222 	int32_t timeout;
   13223 	uint32_t swsm;
   13224 
   13225 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13226 		device_xname(sc->sc_dev), __func__));
   13227 	KASSERT(sc->sc_nvm_wordsize > 0);
   13228 
   13229 retry:
   13230 	/* Get the SW semaphore. */
   13231 	timeout = sc->sc_nvm_wordsize + 1;
   13232 	while (timeout) {
   13233 		swsm = CSR_READ(sc, WMREG_SWSM);
   13234 
   13235 		if ((swsm & SWSM_SMBI) == 0)
   13236 			break;
   13237 
   13238 		delay(50);
   13239 		timeout--;
   13240 	}
   13241 
   13242 	if (timeout == 0) {
   13243 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
   13244 			/*
   13245 			 * In rare circumstances, the SW semaphore may already
   13246 			 * be held unintentionally. Clear the semaphore once
   13247 			 * before giving up.
   13248 			 */
   13249 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
   13250 			wm_put_swsm_semaphore(sc);
   13251 			goto retry;
   13252 		}
   13253 		aprint_error_dev(sc->sc_dev,
   13254 		    "could not acquire SWSM SMBI\n");
   13255 		return 1;
   13256 	}
   13257 
   13258 	/* Get the FW semaphore. */
   13259 	timeout = sc->sc_nvm_wordsize + 1;
   13260 	while (timeout) {
   13261 		swsm = CSR_READ(sc, WMREG_SWSM);
   13262 		swsm |= SWSM_SWESMBI;
   13263 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   13264 		/* If we managed to set the bit we got the semaphore. */
   13265 		swsm = CSR_READ(sc, WMREG_SWSM);
   13266 		if (swsm & SWSM_SWESMBI)
   13267 			break;
   13268 
   13269 		delay(50);
   13270 		timeout--;
   13271 	}
   13272 
   13273 	if (timeout == 0) {
   13274 		aprint_error_dev(sc->sc_dev,
   13275 		    "could not acquire SWSM SWESMBI\n");
   13276 		/* Release semaphores */
   13277 		wm_put_swsm_semaphore(sc);
   13278 		return 1;
   13279 	}
   13280 	return 0;
   13281 }
   13282 
   13283 /*
   13284  * Put hardware semaphore.
   13285  * Same as e1000_put_hw_semaphore_generic()
   13286  */
   13287 static void
   13288 wm_put_swsm_semaphore(struct wm_softc *sc)
   13289 {
   13290 	uint32_t swsm;
   13291 
   13292 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13293 		device_xname(sc->sc_dev), __func__));
   13294 
   13295 	swsm = CSR_READ(sc, WMREG_SWSM);
   13296 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   13297 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   13298 }
   13299 
   13300 /*
   13301  * Get SW/FW semaphore.
   13302  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   13303  */
   13304 static int
   13305 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13306 {
   13307 	uint32_t swfw_sync;
   13308 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   13309 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   13310 	int timeout;
   13311 
   13312 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13313 		device_xname(sc->sc_dev), __func__));
   13314 
   13315 	if (sc->sc_type == WM_T_80003)
   13316 		timeout = 50;
   13317 	else
   13318 		timeout = 200;
   13319 
   13320 	while (timeout) {
   13321 		if (wm_get_swsm_semaphore(sc)) {
   13322 			aprint_error_dev(sc->sc_dev,
   13323 			    "%s: failed to get semaphore\n",
   13324 			    __func__);
   13325 			return 1;
   13326 		}
   13327 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13328 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   13329 			swfw_sync |= swmask;
   13330 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13331 			wm_put_swsm_semaphore(sc);
   13332 			return 0;
   13333 		}
   13334 		wm_put_swsm_semaphore(sc);
   13335 		delay(5000);
   13336 		timeout--;
   13337 	}
   13338 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   13339 	    device_xname(sc->sc_dev), mask, swfw_sync);
   13340 	return 1;
   13341 }
   13342 
   13343 static void
   13344 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   13345 {
   13346 	uint32_t swfw_sync;
   13347 
   13348 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13349 		device_xname(sc->sc_dev), __func__));
   13350 
   13351 	while (wm_get_swsm_semaphore(sc) != 0)
   13352 		continue;
   13353 
   13354 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   13355 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   13356 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   13357 
   13358 	wm_put_swsm_semaphore(sc);
   13359 }
   13360 
   13361 static int
   13362 wm_get_nvm_80003(struct wm_softc *sc)
   13363 {
   13364 	int rv;
   13365 
   13366 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   13367 		device_xname(sc->sc_dev), __func__));
   13368 
   13369 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   13370 		aprint_error_dev(sc->sc_dev,
   13371 		    "%s: failed to get semaphore(SWFW)\n",
   13372 		    __func__);
   13373 		return rv;
   13374 	}
   13375 
   13376 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13377 	    && (rv = wm_get_eecd(sc)) != 0) {
   13378 		aprint_error_dev(sc->sc_dev,
   13379 		    "%s: failed to get semaphore(EECD)\n",
   13380 		    __func__);
   13381 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13382 		return rv;
   13383 	}
   13384 
   13385 	return 0;
   13386 }
   13387 
   13388 static void
   13389 wm_put_nvm_80003(struct wm_softc *sc)
   13390 {
   13391 
   13392 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13393 		device_xname(sc->sc_dev), __func__));
   13394 
   13395 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13396 		wm_put_eecd(sc);
   13397 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   13398 }
   13399 
   13400 static int
   13401 wm_get_nvm_82571(struct wm_softc *sc)
   13402 {
   13403 	int rv;
   13404 
   13405 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13406 		device_xname(sc->sc_dev), __func__));
   13407 
   13408 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   13409 		return rv;
   13410 
   13411 	switch (sc->sc_type) {
   13412 	case WM_T_82573:
   13413 		break;
   13414 	default:
   13415 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13416 			rv = wm_get_eecd(sc);
   13417 		break;
   13418 	}
   13419 
   13420 	if (rv != 0) {
   13421 		aprint_error_dev(sc->sc_dev,
   13422 		    "%s: failed to get semaphore\n",
   13423 		    __func__);
   13424 		wm_put_swsm_semaphore(sc);
   13425 	}
   13426 
   13427 	return rv;
   13428 }
   13429 
   13430 static void
   13431 wm_put_nvm_82571(struct wm_softc *sc)
   13432 {
   13433 
   13434 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13435 		device_xname(sc->sc_dev), __func__));
   13436 
   13437 	switch (sc->sc_type) {
   13438 	case WM_T_82573:
   13439 		break;
   13440 	default:
   13441 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   13442 			wm_put_eecd(sc);
   13443 		break;
   13444 	}
   13445 
   13446 	wm_put_swsm_semaphore(sc);
   13447 }
   13448 
   13449 static int
   13450 wm_get_phy_82575(struct wm_softc *sc)
   13451 {
   13452 
   13453 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13454 		device_xname(sc->sc_dev), __func__));
   13455 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13456 }
   13457 
   13458 static void
   13459 wm_put_phy_82575(struct wm_softc *sc)
   13460 {
   13461 
   13462 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13463 		device_xname(sc->sc_dev), __func__));
   13464 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   13465 }
   13466 
   13467 static int
   13468 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   13469 {
   13470 	uint32_t ext_ctrl;
   13471 	int timeout = 200;
   13472 
   13473 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13474 		device_xname(sc->sc_dev), __func__));
   13475 
   13476 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13477 	for (timeout = 0; timeout < 200; timeout++) {
   13478 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13479 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13480 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13481 
   13482 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13483 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   13484 			return 0;
   13485 		delay(5000);
   13486 	}
   13487 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   13488 	    device_xname(sc->sc_dev), ext_ctrl);
   13489 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13490 	return 1;
   13491 }
   13492 
   13493 static void
   13494 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   13495 {
   13496 	uint32_t ext_ctrl;
   13497 
   13498 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13499 		device_xname(sc->sc_dev), __func__));
   13500 
   13501 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13502 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13503 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13504 
   13505 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   13506 }
   13507 
   13508 static int
   13509 wm_get_swflag_ich8lan(struct wm_softc *sc)
   13510 {
   13511 	uint32_t ext_ctrl;
   13512 	int timeout;
   13513 
   13514 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13515 		device_xname(sc->sc_dev), __func__));
   13516 	mutex_enter(sc->sc_ich_phymtx);
   13517 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   13518 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13519 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   13520 			break;
   13521 		delay(1000);
   13522 	}
   13523 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   13524 		printf("%s: SW has already locked the resource\n",
   13525 		    device_xname(sc->sc_dev));
   13526 		goto out;
   13527 	}
   13528 
   13529 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13530 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13531 	for (timeout = 0; timeout < 1000; timeout++) {
   13532 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13533 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   13534 			break;
   13535 		delay(1000);
   13536 	}
   13537 	if (timeout >= 1000) {
   13538 		printf("%s: failed to acquire semaphore\n",
   13539 		    device_xname(sc->sc_dev));
   13540 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13541 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13542 		goto out;
   13543 	}
   13544 	return 0;
   13545 
   13546 out:
   13547 	mutex_exit(sc->sc_ich_phymtx);
   13548 	return 1;
   13549 }
   13550 
   13551 static void
   13552 wm_put_swflag_ich8lan(struct wm_softc *sc)
   13553 {
   13554 	uint32_t ext_ctrl;
   13555 
   13556 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13557 		device_xname(sc->sc_dev), __func__));
   13558 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13559 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   13560 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13561 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13562 	} else {
   13563 		printf("%s: Semaphore unexpectedly released\n",
   13564 		    device_xname(sc->sc_dev));
   13565 	}
   13566 
   13567 	mutex_exit(sc->sc_ich_phymtx);
   13568 }
   13569 
   13570 static int
   13571 wm_get_nvm_ich8lan(struct wm_softc *sc)
   13572 {
   13573 
   13574 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13575 		device_xname(sc->sc_dev), __func__));
   13576 	mutex_enter(sc->sc_ich_nvmmtx);
   13577 
   13578 	return 0;
   13579 }
   13580 
   13581 static void
   13582 wm_put_nvm_ich8lan(struct wm_softc *sc)
   13583 {
   13584 
   13585 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13586 		device_xname(sc->sc_dev), __func__));
   13587 	mutex_exit(sc->sc_ich_nvmmtx);
   13588 }
   13589 
   13590 static int
   13591 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   13592 {
   13593 	int i = 0;
   13594 	uint32_t reg;
   13595 
   13596 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13597 		device_xname(sc->sc_dev), __func__));
   13598 
   13599 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13600 	do {
   13601 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   13602 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   13603 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13604 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   13605 			break;
   13606 		delay(2*1000);
   13607 		i++;
   13608 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   13609 
   13610 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   13611 		wm_put_hw_semaphore_82573(sc);
   13612 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   13613 		    device_xname(sc->sc_dev));
   13614 		return -1;
   13615 	}
   13616 
   13617 	return 0;
   13618 }
   13619 
   13620 static void
   13621 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   13622 {
   13623 	uint32_t reg;
   13624 
   13625 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13626 		device_xname(sc->sc_dev), __func__));
   13627 
   13628 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13629 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13630 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   13631 }
   13632 
   13633 /*
   13634  * Management mode and power management related subroutines.
   13635  * BMC, AMT, suspend/resume and EEE.
   13636  */
   13637 
   13638 #ifdef WM_WOL
   13639 static int
   13640 wm_check_mng_mode(struct wm_softc *sc)
   13641 {
   13642 	int rv;
   13643 
   13644 	switch (sc->sc_type) {
   13645 	case WM_T_ICH8:
   13646 	case WM_T_ICH9:
   13647 	case WM_T_ICH10:
   13648 	case WM_T_PCH:
   13649 	case WM_T_PCH2:
   13650 	case WM_T_PCH_LPT:
   13651 	case WM_T_PCH_SPT:
   13652 	case WM_T_PCH_CNP:
   13653 		rv = wm_check_mng_mode_ich8lan(sc);
   13654 		break;
   13655 	case WM_T_82574:
   13656 	case WM_T_82583:
   13657 		rv = wm_check_mng_mode_82574(sc);
   13658 		break;
   13659 	case WM_T_82571:
   13660 	case WM_T_82572:
   13661 	case WM_T_82573:
   13662 	case WM_T_80003:
   13663 		rv = wm_check_mng_mode_generic(sc);
   13664 		break;
   13665 	default:
   13666 		/* noting to do */
   13667 		rv = 0;
   13668 		break;
   13669 	}
   13670 
   13671 	return rv;
   13672 }
   13673 
   13674 static int
   13675 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   13676 {
   13677 	uint32_t fwsm;
   13678 
   13679 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13680 
   13681 	if (((fwsm & FWSM_FW_VALID) != 0)
   13682 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   13683 		return 1;
   13684 
   13685 	return 0;
   13686 }
   13687 
   13688 static int
   13689 wm_check_mng_mode_82574(struct wm_softc *sc)
   13690 {
   13691 	uint16_t data;
   13692 
   13693 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   13694 
   13695 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   13696 		return 1;
   13697 
   13698 	return 0;
   13699 }
   13700 
   13701 static int
   13702 wm_check_mng_mode_generic(struct wm_softc *sc)
   13703 {
   13704 	uint32_t fwsm;
   13705 
   13706 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13707 
   13708 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   13709 		return 1;
   13710 
   13711 	return 0;
   13712 }
   13713 #endif /* WM_WOL */
   13714 
   13715 static int
   13716 wm_enable_mng_pass_thru(struct wm_softc *sc)
   13717 {
   13718 	uint32_t manc, fwsm, factps;
   13719 
   13720 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   13721 		return 0;
   13722 
   13723 	manc = CSR_READ(sc, WMREG_MANC);
   13724 
   13725 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   13726 		device_xname(sc->sc_dev), manc));
   13727 	if ((manc & MANC_RECV_TCO_EN) == 0)
   13728 		return 0;
   13729 
   13730 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   13731 		fwsm = CSR_READ(sc, WMREG_FWSM);
   13732 		factps = CSR_READ(sc, WMREG_FACTPS);
   13733 		if (((factps & FACTPS_MNGCG) == 0)
   13734 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   13735 			return 1;
   13736 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   13737 		uint16_t data;
   13738 
   13739 		factps = CSR_READ(sc, WMREG_FACTPS);
   13740 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   13741 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   13742 			device_xname(sc->sc_dev), factps, data));
   13743 		if (((factps & FACTPS_MNGCG) == 0)
   13744 		    && ((data & NVM_CFG2_MNGM_MASK)
   13745 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   13746 			return 1;
   13747 	} else if (((manc & MANC_SMBUS_EN) != 0)
   13748 	    && ((manc & MANC_ASF_EN) == 0))
   13749 		return 1;
   13750 
   13751 	return 0;
   13752 }
   13753 
   13754 static bool
   13755 wm_phy_resetisblocked(struct wm_softc *sc)
   13756 {
   13757 	bool blocked = false;
   13758 	uint32_t reg;
   13759 	int i = 0;
   13760 
   13761 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13762 		device_xname(sc->sc_dev), __func__));
   13763 
   13764 	switch (sc->sc_type) {
   13765 	case WM_T_ICH8:
   13766 	case WM_T_ICH9:
   13767 	case WM_T_ICH10:
   13768 	case WM_T_PCH:
   13769 	case WM_T_PCH2:
   13770 	case WM_T_PCH_LPT:
   13771 	case WM_T_PCH_SPT:
   13772 	case WM_T_PCH_CNP:
   13773 		do {
   13774 			reg = CSR_READ(sc, WMREG_FWSM);
   13775 			if ((reg & FWSM_RSPCIPHY) == 0) {
   13776 				blocked = true;
   13777 				delay(10*1000);
   13778 				continue;
   13779 			}
   13780 			blocked = false;
   13781 		} while (blocked && (i++ < 30));
   13782 		return blocked;
   13783 		break;
   13784 	case WM_T_82571:
   13785 	case WM_T_82572:
   13786 	case WM_T_82573:
   13787 	case WM_T_82574:
   13788 	case WM_T_82583:
   13789 	case WM_T_80003:
   13790 		reg = CSR_READ(sc, WMREG_MANC);
   13791 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   13792 			return true;
   13793 		else
   13794 			return false;
   13795 		break;
   13796 	default:
   13797 		/* no problem */
   13798 		break;
   13799 	}
   13800 
   13801 	return false;
   13802 }
   13803 
   13804 static void
   13805 wm_get_hw_control(struct wm_softc *sc)
   13806 {
   13807 	uint32_t reg;
   13808 
   13809 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13810 		device_xname(sc->sc_dev), __func__));
   13811 
   13812 	if (sc->sc_type == WM_T_82573) {
   13813 		reg = CSR_READ(sc, WMREG_SWSM);
   13814 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   13815 	} else if (sc->sc_type >= WM_T_82571) {
   13816 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13817 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   13818 	}
   13819 }
   13820 
   13821 static void
   13822 wm_release_hw_control(struct wm_softc *sc)
   13823 {
   13824 	uint32_t reg;
   13825 
   13826 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13827 		device_xname(sc->sc_dev), __func__));
   13828 
   13829 	if (sc->sc_type == WM_T_82573) {
   13830 		reg = CSR_READ(sc, WMREG_SWSM);
   13831 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   13832 	} else if (sc->sc_type >= WM_T_82571) {
   13833 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13834 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   13835 	}
   13836 }
   13837 
   13838 static void
   13839 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   13840 {
   13841 	uint32_t reg;
   13842 
   13843 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13844 		device_xname(sc->sc_dev), __func__));
   13845 
   13846 	if (sc->sc_type < WM_T_PCH2)
   13847 		return;
   13848 
   13849 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13850 
   13851 	if (gate)
   13852 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   13853 	else
   13854 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   13855 
   13856 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   13857 }
   13858 
   13859 static int
   13860 wm_init_phy_workarounds_pchlan(struct wm_softc *sc)
   13861 {
   13862 	uint32_t fwsm, reg;
   13863 	int rv = 0;
   13864 
   13865 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13866 		device_xname(sc->sc_dev), __func__));
   13867 
   13868 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   13869 	wm_gate_hw_phy_config_ich8lan(sc, true);
   13870 
   13871 	/* Disable ULP */
   13872 	wm_ulp_disable(sc);
   13873 
   13874 	/* Acquire PHY semaphore */
   13875 	rv = sc->phy.acquire(sc);
   13876 	if (rv != 0) {
   13877 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
   13878 		device_xname(sc->sc_dev), __func__));
   13879 		return -1;
   13880 	}
   13881 
   13882 	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
   13883 	 * inaccessible and resetting the PHY is not blocked, toggle the
   13884 	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
   13885 	 */
   13886 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13887 	switch (sc->sc_type) {
   13888 	case WM_T_PCH_LPT:
   13889 	case WM_T_PCH_SPT:
   13890 	case WM_T_PCH_CNP:
   13891 		if (wm_phy_is_accessible_pchlan(sc))
   13892 			break;
   13893 
   13894 		/* Before toggling LANPHYPC, see if PHY is accessible by
   13895 		 * forcing MAC to SMBus mode first.
   13896 		 */
   13897 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13898 		reg |= CTRL_EXT_FORCE_SMBUS;
   13899 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13900 #if 0
   13901 		/* XXX Isn't this required??? */
   13902 		CSR_WRITE_FLUSH(sc);
   13903 #endif
   13904 		/* Wait 50 milliseconds for MAC to finish any retries
   13905 		 * that it might be trying to perform from previous
   13906 		 * attempts to acknowledge any phy read requests.
   13907 		 */
   13908 		delay(50 * 1000);
   13909 		/* FALLTHROUGH */
   13910 	case WM_T_PCH2:
   13911 		if (wm_phy_is_accessible_pchlan(sc) == true)
   13912 			break;
   13913 		/* FALLTHROUGH */
   13914 	case WM_T_PCH:
   13915 		if (sc->sc_type == WM_T_PCH)
   13916 			if ((fwsm & FWSM_FW_VALID) != 0)
   13917 				break;
   13918 
   13919 		if (wm_phy_resetisblocked(sc) == true) {
   13920 			printf("XXX reset is blocked(3)\n");
   13921 			break;
   13922 		}
   13923 
   13924 		/* Toggle LANPHYPC Value bit */
   13925 		wm_toggle_lanphypc_pch_lpt(sc);
   13926 
   13927 		if (sc->sc_type >= WM_T_PCH_LPT) {
   13928 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13929 				break;
   13930 
   13931 			/* Toggling LANPHYPC brings the PHY out of SMBus mode
   13932 			 * so ensure that the MAC is also out of SMBus mode
   13933 			 */
   13934 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13935 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   13936 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13937 
   13938 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13939 				break;
   13940 			rv = -1;
   13941 		}
   13942 		break;
   13943 	default:
   13944 		break;
   13945 	}
   13946 
   13947 	/* Release semaphore */
   13948 	sc->phy.release(sc);
   13949 
   13950 	if (rv == 0) {
   13951 		/* Check to see if able to reset PHY.  Print error if not */
   13952 		if (wm_phy_resetisblocked(sc)) {
   13953 			printf("XXX reset is blocked(4)\n");
   13954 			goto out;
   13955 		}
   13956 
   13957 		/* Reset the PHY before any access to it.  Doing so, ensures
   13958 		 * that the PHY is in a known good state before we read/write
   13959 		 * PHY registers.  The generic reset is sufficient here,
   13960 		 * because we haven't determined the PHY type yet.
   13961 		 */
   13962 		if (wm_reset_phy(sc) != 0)
   13963 			goto out;
   13964 
   13965 		/* On a successful reset, possibly need to wait for the PHY
   13966 		 * to quiesce to an accessible state before returning control
   13967 		 * to the calling function.  If the PHY does not quiesce, then
   13968 		 * return E1000E_BLK_PHY_RESET, as this is the condition that
   13969 		 *  the PHY is in.
   13970 		 */
   13971 		if (wm_phy_resetisblocked(sc))
   13972 			printf("XXX reset is blocked(4)\n");
   13973 	}
   13974 
   13975 out:
   13976 	/* Ungate automatic PHY configuration on non-managed 82579 */
   13977 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   13978 		delay(10*1000);
   13979 		wm_gate_hw_phy_config_ich8lan(sc, false);
   13980 	}
   13981 
   13982 	return 0;
   13983 }
   13984 
   13985 static void
   13986 wm_init_manageability(struct wm_softc *sc)
   13987 {
   13988 
   13989 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13990 		device_xname(sc->sc_dev), __func__));
   13991 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13992 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   13993 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13994 
   13995 		/* Disable hardware interception of ARP */
   13996 		manc &= ~MANC_ARP_EN;
   13997 
   13998 		/* Enable receiving management packets to the host */
   13999 		if (sc->sc_type >= WM_T_82571) {
   14000 			manc |= MANC_EN_MNG2HOST;
   14001 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
   14002 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   14003 		}
   14004 
   14005 		CSR_WRITE(sc, WMREG_MANC, manc);
   14006 	}
   14007 }
   14008 
   14009 static void
   14010 wm_release_manageability(struct wm_softc *sc)
   14011 {
   14012 
   14013 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   14014 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   14015 
   14016 		manc |= MANC_ARP_EN;
   14017 		if (sc->sc_type >= WM_T_82571)
   14018 			manc &= ~MANC_EN_MNG2HOST;
   14019 
   14020 		CSR_WRITE(sc, WMREG_MANC, manc);
   14021 	}
   14022 }
   14023 
   14024 static void
   14025 wm_get_wakeup(struct wm_softc *sc)
   14026 {
   14027 
   14028 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   14029 	switch (sc->sc_type) {
   14030 	case WM_T_82573:
   14031 	case WM_T_82583:
   14032 		sc->sc_flags |= WM_F_HAS_AMT;
   14033 		/* FALLTHROUGH */
   14034 	case WM_T_80003:
   14035 	case WM_T_82575:
   14036 	case WM_T_82576:
   14037 	case WM_T_82580:
   14038 	case WM_T_I350:
   14039 	case WM_T_I354:
   14040 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   14041 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   14042 		/* FALLTHROUGH */
   14043 	case WM_T_82541:
   14044 	case WM_T_82541_2:
   14045 	case WM_T_82547:
   14046 	case WM_T_82547_2:
   14047 	case WM_T_82571:
   14048 	case WM_T_82572:
   14049 	case WM_T_82574:
   14050 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14051 		break;
   14052 	case WM_T_ICH8:
   14053 	case WM_T_ICH9:
   14054 	case WM_T_ICH10:
   14055 	case WM_T_PCH:
   14056 	case WM_T_PCH2:
   14057 	case WM_T_PCH_LPT:
   14058 	case WM_T_PCH_SPT:
   14059 	case WM_T_PCH_CNP:
   14060 		sc->sc_flags |= WM_F_HAS_AMT;
   14061 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   14062 		break;
   14063 	default:
   14064 		break;
   14065 	}
   14066 
   14067 	/* 1: HAS_MANAGE */
   14068 	if (wm_enable_mng_pass_thru(sc) != 0)
   14069 		sc->sc_flags |= WM_F_HAS_MANAGE;
   14070 
   14071 	/*
   14072 	 * Note that the WOL flags is set after the resetting of the eeprom
   14073 	 * stuff
   14074 	 */
   14075 }
   14076 
   14077 /*
   14078  * Unconfigure Ultra Low Power mode.
   14079  * Only for I217 and newer (see below).
   14080  */
   14081 static int
   14082 wm_ulp_disable(struct wm_softc *sc)
   14083 {
   14084 	uint32_t reg;
   14085 	uint16_t phyreg;
   14086 	int i = 0, rv = 0;
   14087 
   14088 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14089 		device_xname(sc->sc_dev), __func__));
   14090 	/* Exclude old devices */
   14091 	if ((sc->sc_type < WM_T_PCH_LPT)
   14092 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   14093 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   14094 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   14095 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   14096 		return 0;
   14097 
   14098 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   14099 		/* Request ME un-configure ULP mode in the PHY */
   14100 		reg = CSR_READ(sc, WMREG_H2ME);
   14101 		reg &= ~H2ME_ULP;
   14102 		reg |= H2ME_ENFORCE_SETTINGS;
   14103 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14104 
   14105 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   14106 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   14107 			if (i++ == 30) {
   14108 				printf("%s timed out\n", __func__);
   14109 				return -1;
   14110 			}
   14111 			delay(10 * 1000);
   14112 		}
   14113 		reg = CSR_READ(sc, WMREG_H2ME);
   14114 		reg &= ~H2ME_ENFORCE_SETTINGS;
   14115 		CSR_WRITE(sc, WMREG_H2ME, reg);
   14116 
   14117 		return 0;
   14118 	}
   14119 
   14120 	/* Acquire semaphore */
   14121 	rv = sc->phy.acquire(sc);
   14122 	if (rv != 0) {
   14123 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: failed\n",
   14124 		device_xname(sc->sc_dev), __func__));
   14125 		goto release;
   14126 	}
   14127 
   14128 	/* Toggle LANPHYPC */
   14129 	wm_toggle_lanphypc_pch_lpt(sc);
   14130 
   14131 	/* Unforce SMBus mode in PHY */
   14132 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL, &phyreg);
   14133 	if (rv != 0) {
   14134 		uint32_t reg2;
   14135 
   14136 		printf("%s: Force SMBus first.\n", __func__);
   14137 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   14138 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   14139 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   14140 		delay(50 * 1000);
   14141 
   14142 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL,
   14143 		    &phyreg);
   14144 		if (rv != 0)
   14145 			goto release;
   14146 	}
   14147 	phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   14148 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, phyreg);
   14149 
   14150 	/* Unforce SMBus mode in MAC */
   14151 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14152 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   14153 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14154 
   14155 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL, &phyreg);
   14156 	if (rv != 0)
   14157 		goto release;
   14158 	phyreg |= HV_PM_CTRL_K1_ENA;
   14159 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, phyreg);
   14160 
   14161 	rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1,
   14162 		&phyreg);
   14163 	if (rv != 0)
   14164 		goto release;
   14165 	phyreg &= ~(I218_ULP_CONFIG1_IND
   14166 	    | I218_ULP_CONFIG1_STICKY_ULP
   14167 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   14168 	    | I218_ULP_CONFIG1_WOL_HOST
   14169 	    | I218_ULP_CONFIG1_INBAND_EXIT
   14170 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   14171 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   14172 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   14173 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   14174 	phyreg |= I218_ULP_CONFIG1_START;
   14175 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, phyreg);
   14176 
   14177 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   14178 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   14179 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   14180 
   14181 release:
   14182 	/* Release semaphore */
   14183 	sc->phy.release(sc);
   14184 	wm_gmii_reset(sc);
   14185 	delay(50 * 1000);
   14186 
   14187 	return rv;
   14188 }
   14189 
   14190 /* WOL in the newer chipset interfaces (pchlan) */
   14191 static void
   14192 wm_enable_phy_wakeup(struct wm_softc *sc)
   14193 {
   14194 #if 0
   14195 	uint16_t preg;
   14196 
   14197 	/* Copy MAC RARs to PHY RARs */
   14198 
   14199 	/* Copy MAC MTA to PHY MTA */
   14200 
   14201 	/* Configure PHY Rx Control register */
   14202 
   14203 	/* Enable PHY wakeup in MAC register */
   14204 
   14205 	/* Configure and enable PHY wakeup in PHY registers */
   14206 
   14207 	/* Activate PHY wakeup */
   14208 
   14209 	/* XXX */
   14210 #endif
   14211 }
   14212 
   14213 /* Power down workaround on D3 */
   14214 static void
   14215 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   14216 {
   14217 	uint32_t reg;
   14218 	int i;
   14219 
   14220 	for (i = 0; i < 2; i++) {
   14221 		/* Disable link */
   14222 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14223 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   14224 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14225 
   14226 		/*
   14227 		 * Call gig speed drop workaround on Gig disable before
   14228 		 * accessing any PHY registers
   14229 		 */
   14230 		if (sc->sc_type == WM_T_ICH8)
   14231 			wm_gig_downshift_workaround_ich8lan(sc);
   14232 
   14233 		/* Write VR power-down enable */
   14234 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   14235 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   14236 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   14237 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   14238 
   14239 		/* Read it back and test */
   14240 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   14241 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   14242 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   14243 			break;
   14244 
   14245 		/* Issue PHY reset and repeat at most one more time */
   14246 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   14247 	}
   14248 }
   14249 
   14250 /*
   14251  *  e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
   14252  *  @sc: pointer to the HW structure
   14253  *
   14254  *  During S0 to Sx transition, it is possible the link remains at gig
   14255  *  instead of negotiating to a lower speed.  Before going to Sx, set
   14256  *  'Gig Disable' to force link speed negotiation to a lower speed based on
   14257  *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
   14258  *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
   14259  *  needs to be written.
   14260  *  Parts that support (and are linked to a partner which support) EEE in
   14261  *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
   14262  *  than 10Mbps w/o EEE.
   14263  */
   14264 static void
   14265 wm_suspend_workarounds_ich8lan(struct wm_softc *sc)
   14266 {
   14267 	uint32_t phy_ctrl;
   14268 
   14269 	phy_ctrl = CSR_READ(sc, WMREG_PHY_CTRL);
   14270 	phy_ctrl |= PHY_CTRL_GBE_DIS;
   14271 
   14272 	if (sc->sc_phytype == WMPHY_I217) {
   14273 		uint16_t devid = sc->sc_pcidevid;
   14274 
   14275 		if ((devid == PCI_PRODUCT_INTEL_I218_LM) ||
   14276 		    (devid == PCI_PRODUCT_INTEL_I218_V) ||
   14277 		    (devid == PCI_PRODUCT_INTEL_I218_LM3) ||
   14278 		    (devid == PCI_PRODUCT_INTEL_I218_V3) ||
   14279 		    (sc->sc_type >= WM_T_PCH_SPT))
   14280 			CSR_WRITE(sc, WMREG_FEXTNVM6,
   14281 			    CSR_READ(sc, WMREG_FEXTNVM6)
   14282 			    & ~FEXTNVM6_REQ_PLL_CLK);
   14283 
   14284 #if 0 /* notyet */
   14285 		if (sc->phy.acquire(sc) != 0)
   14286 			goto out;
   14287 
   14288 		/* XXX Do workaround for EEE */
   14289 
   14290 		/*
   14291 		 * For i217 Intel Rapid Start Technology support,
   14292 		 * when the system is going into Sx and no manageability engine
   14293 		 * is present, the driver must configure proxy to reset only on
   14294 		 * power good.	LPI (Low Power Idle) state must also reset only
   14295 		 * on power good, as well as the MTA (Multicast table array).
   14296 		 * The SMBus release must also be disabled on LCD reset.
   14297 		 */
   14298 
   14299 		/*
   14300 		 * Enable MTA to reset for Intel Rapid Start Technology
   14301 		 * Support
   14302 		 */
   14303 
   14304 		sc->phy.release(sc);
   14305 #endif
   14306 	}
   14307 #if 0
   14308 out:
   14309 #endif
   14310 	CSR_WRITE(sc, WMREG_PHY_CTRL, phy_ctrl);
   14311 
   14312 	if (sc->sc_type == WM_T_ICH8)
   14313 		wm_gig_downshift_workaround_ich8lan(sc);
   14314 
   14315 	if (sc->sc_type >= WM_T_PCH) {
   14316 		wm_oem_bits_config_ich8lan(sc, false);
   14317 
   14318 		/* Reset PHY to activate OEM bits on 82577/8 */
   14319 		if (sc->sc_type == WM_T_PCH)
   14320 			wm_reset_phy(sc);
   14321 
   14322 		if (sc->phy.acquire(sc) != 0)
   14323 			return;
   14324 		wm_write_smbus_addr(sc);
   14325 		sc->phy.release(sc);
   14326 	}
   14327 }
   14328 
   14329 /*
   14330  *  wm_resume_workarounds_pchlan - workarounds needed during Sx->S0
   14331  *  @hw: pointer to the HW structure
   14332  *
   14333  *  During Sx to S0 transitions on non-managed devices or managed devices
   14334  *  on which PHY resets are not blocked, if the PHY registers cannot be
   14335  *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
   14336  *  the PHY.
   14337  *  On i217, setup Intel Rapid Start Technology.
   14338  */
   14339 static int
   14340 wm_resume_workarounds_pchlan(struct wm_softc *sc)
   14341 {
   14342 	device_t dev = sc->sc_dev;
   14343 	int rv;
   14344 
   14345 	if (sc->sc_type < WM_T_PCH2)
   14346 		return 0;
   14347 
   14348 	rv = wm_init_phy_workarounds_pchlan(sc);
   14349 	if (rv != 0)
   14350 		return -1;
   14351 
   14352 	/* For i217 Intel Rapid Start Technology support when the system
   14353 	 * is transitioning from Sx and no manageability engine is present
   14354 	 * configure SMBus to restore on reset, disable proxy, and enable
   14355 	 * the reset on MTA (Multicast table array).
   14356 	 */
   14357 	if (sc->sc_phytype == WMPHY_I217) {
   14358 		uint16_t phy_reg;
   14359 
   14360 		if (sc->phy.acquire(sc) != 0)
   14361 			goto release;
   14362 
   14363 		/* Clear Auto Enable LPI after link up */
   14364 		sc->phy.readreg_locked(dev, 1, I217_LPI_GPIO_CTRL, &phy_reg);
   14365 		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
   14366 		sc->phy.writereg_locked(dev, 1, I217_LPI_GPIO_CTRL, phy_reg);
   14367 
   14368 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   14369 			/* Restore clear on SMB if no manageability engine
   14370 			 * is present
   14371 			 */
   14372 			sc->phy.readreg_locked(dev, 1, I217_MEMPWR, &phy_reg);
   14373 			if (rv != 0)
   14374 				goto release;
   14375 			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
   14376 			sc->phy.writereg_locked(dev, 1, I217_MEMPWR, phy_reg);
   14377 
   14378 			/* Disable Proxy */
   14379 			sc->phy.writereg_locked(dev, 1, I217_PROXY_CTRL, 0);
   14380 		}
   14381 		/* Enable reset on MTA */
   14382 		sc->phy.readreg_locked(dev, 1, I217_CFGREG, &phy_reg);
   14383 		if (rv != 0)
   14384 			goto release;
   14385 		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
   14386 		sc->phy.writereg_locked(dev, 1, I217_CFGREG, phy_reg);
   14387 
   14388 release:
   14389 		sc->phy.release(sc);
   14390 		return rv;
   14391 	}
   14392 
   14393 	return 0;
   14394 }
   14395 
   14396 static void
   14397 wm_enable_wakeup(struct wm_softc *sc)
   14398 {
   14399 	uint32_t reg, pmreg;
   14400 	pcireg_t pmode;
   14401 
   14402 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14403 		device_xname(sc->sc_dev), __func__));
   14404 
   14405 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   14406 		&pmreg, NULL) == 0)
   14407 		return;
   14408 
   14409 	/* Advertise the wakeup capability */
   14410 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   14411 	    | CTRL_SWDPIN(3));
   14412 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   14413 
   14414 	/* Keep the laser running on fiber adapters */
   14415 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   14416 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   14417 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14418 		reg |= CTRL_EXT_SWDPIN(3);
   14419 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14420 	}
   14421 
   14422 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9) ||
   14423 	    (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
   14424 		wm_suspend_workarounds_ich8lan(sc);
   14425 
   14426 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   14427 #if 0	/* for the multicast packet */
   14428 	reg |= WUFC_MC;
   14429 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   14430 #endif
   14431 
   14432 	if (sc->sc_type >= WM_T_PCH)
   14433 		wm_enable_phy_wakeup(sc);
   14434 	else {
   14435 		/* Enable wakeup by the MAC */
   14436 		CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
   14437 		CSR_WRITE(sc, WMREG_WUFC, reg);
   14438 	}
   14439 
   14440 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   14441 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   14442 		|| (sc->sc_type == WM_T_PCH2))
   14443 	    && (sc->sc_phytype == WMPHY_IGP_3))
   14444 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   14445 
   14446 	/* Request PME */
   14447 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   14448 #if 0
   14449 	/* Disable WOL */
   14450 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   14451 #else
   14452 	/* For WOL */
   14453 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   14454 #endif
   14455 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   14456 }
   14457 
   14458 /* Disable ASPM L0s and/or L1 for workaround */
   14459 static void
   14460 wm_disable_aspm(struct wm_softc *sc)
   14461 {
   14462 	pcireg_t reg, mask = 0;
   14463 	unsigned const char *str = "";
   14464 
   14465 	/*
   14466 	 *  Only for PCIe device which has PCIe capability in the PCI config
   14467 	 * space.
   14468 	 */
   14469 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
   14470 		return;
   14471 
   14472 	switch (sc->sc_type) {
   14473 	case WM_T_82571:
   14474 	case WM_T_82572:
   14475 		/*
   14476 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
   14477 		 * State Power management L1 State (ASPM L1).
   14478 		 */
   14479 		mask = PCIE_LCSR_ASPM_L1;
   14480 		str = "L1 is";
   14481 		break;
   14482 	case WM_T_82573:
   14483 	case WM_T_82574:
   14484 	case WM_T_82583:
   14485 		/*
   14486 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
   14487 		 *
   14488 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
   14489 		 * some chipset.  The document of 82574 and 82583 says that
   14490 		 * disabling L0s with some specific chipset is sufficient,
   14491 		 * but we follow as of the Intel em driver does.
   14492 		 *
   14493 		 * References:
   14494 		 * Errata 8 of the Specification Update of i82573.
   14495 		 * Errata 20 of the Specification Update of i82574.
   14496 		 * Errata 9 of the Specification Update of i82583.
   14497 		 */
   14498 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
   14499 		str = "L0s and L1 are";
   14500 		break;
   14501 	default:
   14502 		return;
   14503 	}
   14504 
   14505 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14506 	    sc->sc_pcixe_capoff + PCIE_LCSR);
   14507 	reg &= ~mask;
   14508 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14509 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
   14510 
   14511 	/* Print only in wm_attach() */
   14512 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   14513 		aprint_verbose_dev(sc->sc_dev,
   14514 		    "ASPM %s disabled to workaround the errata.\n", str);
   14515 }
   14516 
   14517 /* LPLU */
   14518 
   14519 static void
   14520 wm_lplu_d0_disable(struct wm_softc *sc)
   14521 {
   14522 	struct mii_data *mii = &sc->sc_mii;
   14523 	uint32_t reg;
   14524 
   14525 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14526 		device_xname(sc->sc_dev), __func__));
   14527 
   14528 	if (sc->sc_phytype == WMPHY_IFE)
   14529 		return;
   14530 
   14531 	switch (sc->sc_type) {
   14532 	case WM_T_82571:
   14533 	case WM_T_82572:
   14534 	case WM_T_82573:
   14535 	case WM_T_82575:
   14536 	case WM_T_82576:
   14537 		reg = mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT);
   14538 		reg &= ~PMR_D0_LPLU;
   14539 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, reg);
   14540 		break;
   14541 	case WM_T_82580:
   14542 	case WM_T_I350:
   14543 	case WM_T_I210:
   14544 	case WM_T_I211:
   14545 		reg = CSR_READ(sc, WMREG_PHPM);
   14546 		reg &= ~PHPM_D0A_LPLU;
   14547 		CSR_WRITE(sc, WMREG_PHPM, reg);
   14548 		break;
   14549 	case WM_T_82574:
   14550 	case WM_T_82583:
   14551 	case WM_T_ICH8:
   14552 	case WM_T_ICH9:
   14553 	case WM_T_ICH10:
   14554 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14555 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   14556 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14557 		CSR_WRITE_FLUSH(sc);
   14558 		break;
   14559 	case WM_T_PCH:
   14560 	case WM_T_PCH2:
   14561 	case WM_T_PCH_LPT:
   14562 	case WM_T_PCH_SPT:
   14563 	case WM_T_PCH_CNP:
   14564 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   14565 		reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   14566 		if (wm_phy_resetisblocked(sc) == false)
   14567 			reg |= HV_OEM_BITS_ANEGNOW;
   14568 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   14569 		break;
   14570 	default:
   14571 		break;
   14572 	}
   14573 }
   14574 
   14575 /* EEE */
   14576 
   14577 static void
   14578 wm_set_eee_i350(struct wm_softc *sc)
   14579 {
   14580 	uint32_t ipcnfg, eeer;
   14581 
   14582 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   14583 	eeer = CSR_READ(sc, WMREG_EEER);
   14584 
   14585 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   14586 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   14587 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   14588 		    | EEER_LPI_FC);
   14589 	} else {
   14590 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   14591 		ipcnfg &= ~IPCNFG_10BASE_TE;
   14592 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   14593 		    | EEER_LPI_FC);
   14594 	}
   14595 
   14596 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   14597 	CSR_WRITE(sc, WMREG_EEER, eeer);
   14598 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   14599 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   14600 }
   14601 
   14602 /*
   14603  * Workarounds (mainly PHY related).
   14604  * Basically, PHY's workarounds are in the PHY drivers.
   14605  */
   14606 
   14607 /* Work-around for 82566 Kumeran PCS lock loss */
   14608 static void
   14609 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   14610 {
   14611 	struct mii_data *mii = &sc->sc_mii;
   14612 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   14613 	int i;
   14614 	int reg;
   14615 
   14616 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14617 		device_xname(sc->sc_dev), __func__));
   14618 
   14619 	/* If the link is not up, do nothing */
   14620 	if ((status & STATUS_LU) == 0)
   14621 		return;
   14622 
   14623 	/* Nothing to do if the link is other than 1Gbps */
   14624 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   14625 		return;
   14626 
   14627 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14628 	for (i = 0; i < 10; i++) {
   14629 		/* read twice */
   14630 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   14631 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   14632 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   14633 			goto out;	/* GOOD! */
   14634 
   14635 		/* Reset the PHY */
   14636 		wm_reset_phy(sc);
   14637 		delay(5*1000);
   14638 	}
   14639 
   14640 	/* Disable GigE link negotiation */
   14641 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   14642 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   14643 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   14644 
   14645 	/*
   14646 	 * Call gig speed drop workaround on Gig disable before accessing
   14647 	 * any PHY registers.
   14648 	 */
   14649 	wm_gig_downshift_workaround_ich8lan(sc);
   14650 
   14651 out:
   14652 	return;
   14653 }
   14654 
   14655 /*
   14656  *  wm_gig_downshift_workaround_ich8lan - WoL from S5 stops working
   14657  *  @sc: pointer to the HW structure
   14658  *
   14659  *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
   14660  *  LPLU, Gig disable, MDIC PHY reset):
   14661  *    1) Set Kumeran Near-end loopback
   14662  *    2) Clear Kumeran Near-end loopback
   14663  *  Should only be called for ICH8[m] devices with any 1G Phy.
   14664  */
   14665 static void
   14666 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   14667 {
   14668 	uint16_t kmreg;
   14669 
   14670 	/* Only for igp3 */
   14671 	if (sc->sc_phytype == WMPHY_IGP_3) {
   14672 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   14673 			return;
   14674 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   14675 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   14676 			return;
   14677 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   14678 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   14679 	}
   14680 }
   14681 
   14682 /*
   14683  * Workaround for pch's PHYs
   14684  * XXX should be moved to new PHY driver?
   14685  */
   14686 static void
   14687 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   14688 {
   14689 
   14690 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14691 		device_xname(sc->sc_dev), __func__));
   14692 	KASSERT(sc->sc_type == WM_T_PCH);
   14693 
   14694 	if (sc->sc_phytype == WMPHY_82577)
   14695 		wm_set_mdio_slow_mode_hv(sc);
   14696 
   14697 	/* XXX (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   14698 
   14699 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   14700 
   14701 	/* 82578 */
   14702 	if (sc->sc_phytype == WMPHY_82578) {
   14703 		struct mii_softc *child;
   14704 
   14705 		/*
   14706 		 * Return registers to default by doing a soft reset then
   14707 		 * writing 0x3140 to the control register
   14708 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   14709 		 */
   14710 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   14711 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   14712 			PHY_RESET(child);
   14713 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   14714 			    0x3140);
   14715 		}
   14716 	}
   14717 
   14718 	/* Select page 0 */
   14719 	sc->phy.acquire(sc);
   14720 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   14721 	sc->phy.release(sc);
   14722 
   14723 	/*
   14724 	 * Configure the K1 Si workaround during phy reset assuming there is
   14725 	 * link so that it disables K1 if link is in 1Gbps.
   14726 	 */
   14727 	wm_k1_gig_workaround_hv(sc, 1);
   14728 }
   14729 
   14730 /*
   14731  *  wm_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
   14732  *  done after every PHY reset.
   14733  */
   14734 static void
   14735 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   14736 {
   14737 
   14738 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14739 		device_xname(sc->sc_dev), __func__));
   14740 	KASSERT(sc->sc_type == WM_T_PCH2);
   14741 
   14742 	/* Set MDIO slow mode before any other MDIO access */
   14743 	wm_set_mdio_slow_mode_hv(sc);
   14744 
   14745 	/* XXX set MSE higher to enable link to stay up when noise is high */
   14746 	/* XXX drop link after 5 times MSE threshold was reached */
   14747 }
   14748 
   14749 /**
   14750  *  e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
   14751  *  @link: link up bool flag
   14752  *
   14753  *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
   14754  *  preventing further DMA write requests.  Workaround the issue by disabling
   14755  *  the de-assertion of the clock request when in 1Gpbs mode.
   14756  *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
   14757  *  speeds in order to avoid Tx hangs.
   14758  **/
   14759 static int
   14760 wm_k1_workaround_lpt_lp(struct wm_softc *sc, bool link)
   14761 {
   14762 	uint32_t fextnvm6 = CSR_READ(sc, WMREG_FEXTNVM6);
   14763 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   14764 	uint32_t speed = __SHIFTOUT(status, STATUS_SPEED);
   14765 	uint16_t phyreg;
   14766 
   14767 	if (link && (speed == STATUS_SPEED_1000)) {
   14768 		sc->phy.acquire(sc);
   14769 		int rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   14770 		    &phyreg);
   14771 		if (rv != 0)
   14772 			goto release;
   14773 		rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   14774 		    phyreg & ~KUMCTRLSTA_K1_ENABLE);
   14775 		if (rv != 0)
   14776 			goto release;
   14777 		delay(20);
   14778 		CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6 | FEXTNVM6_REQ_PLL_CLK);
   14779 
   14780 		rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG,
   14781 		    &phyreg);
   14782 release:
   14783 		sc->phy.release(sc);
   14784 		return rv;
   14785 	}
   14786 
   14787 	fextnvm6 &= ~FEXTNVM6_REQ_PLL_CLK;
   14788 
   14789 	struct mii_softc *child = LIST_FIRST(&sc->sc_mii.mii_phys);
   14790 	if (((child != NULL) && (child->mii_mpd_rev > 5))
   14791 	    || !link
   14792 	    || ((speed == STATUS_SPEED_100) && (status & STATUS_FD)))
   14793 		goto update_fextnvm6;
   14794 
   14795 	phyreg = wm_gmii_hv_readreg(sc->sc_dev, 2, I217_INBAND_CTRL);
   14796 
   14797 	/* Clear link status transmit timeout */
   14798 	phyreg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
   14799 	if (speed == STATUS_SPEED_100) {
   14800 		/* Set inband Tx timeout to 5x10us for 100Half */
   14801 		phyreg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   14802 
   14803 		/* Do not extend the K1 entry latency for 100Half */
   14804 		fextnvm6 &= ~FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   14805 	} else {
   14806 		/* Set inband Tx timeout to 50x10us for 10Full/Half */
   14807 		phyreg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
   14808 
   14809 		/* Extend the K1 entry latency for 10 Mbps */
   14810 		fextnvm6 |= FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
   14811 	}
   14812 
   14813 	wm_gmii_hv_writereg(sc->sc_dev, 2, I217_INBAND_CTRL, phyreg);
   14814 
   14815 update_fextnvm6:
   14816 	CSR_WRITE(sc, WMREG_FEXTNVM6, fextnvm6);
   14817 	return 0;
   14818 }
   14819 
   14820 /*
   14821  *  wm_k1_gig_workaround_hv - K1 Si workaround
   14822  *  @sc:   pointer to the HW structure
   14823  *  @link: link up bool flag
   14824  *
   14825  *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
   14826  *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
   14827  *  If link is down, the function will restore the default K1 setting located
   14828  *  in the NVM.
   14829  */
   14830 static int
   14831 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   14832 {
   14833 	int k1_enable = sc->sc_nvm_k1_enabled;
   14834 
   14835 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14836 		device_xname(sc->sc_dev), __func__));
   14837 
   14838 	if (sc->phy.acquire(sc) != 0)
   14839 		return -1;
   14840 
   14841 	if (link) {
   14842 		k1_enable = 0;
   14843 
   14844 		/* Link stall fix for link up */
   14845 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   14846 		    0x0100);
   14847 	} else {
   14848 		/* Link stall fix for link down */
   14849 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
   14850 		    0x4100);
   14851 	}
   14852 
   14853 	wm_configure_k1_ich8lan(sc, k1_enable);
   14854 	sc->phy.release(sc);
   14855 
   14856 	return 0;
   14857 }
   14858 
   14859 /*
   14860  *  wm_k1_workaround_lv - K1 Si workaround
   14861  *  @sc:   pointer to the HW structure
   14862  *
   14863  *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
   14864  *  Disable K1 for 1000 and 100 speeds
   14865  */
   14866 static int
   14867 wm_k1_workaround_lv(struct wm_softc *sc)
   14868 {
   14869 	uint32_t reg;
   14870 	int phyreg;
   14871 
   14872 	if (sc->sc_type != WM_T_PCH2)
   14873 		return 0;
   14874 
   14875 	/* Set K1 beacon duration based on 10Mbps speed */
   14876 	phyreg = wm_gmii_hv_readreg(sc->sc_dev, 2, HV_M_STATUS);
   14877 
   14878 	if ((phyreg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
   14879 	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
   14880 		if (phyreg &
   14881 		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
   14882 			/* LV 1G/100 Packet drop issue wa  */
   14883 			phyreg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_PM_CTRL);
   14884 			phyreg &= ~HV_PM_CTRL_K1_ENA;
   14885 			wm_gmii_hv_writereg(sc->sc_dev, 1, HV_PM_CTRL, phyreg);
   14886 		} else {
   14887 			/* For 10Mbps */
   14888 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   14889 			reg &= ~FEXTNVM4_BEACON_DURATION;
   14890 			reg |= FEXTNVM4_BEACON_DURATION_16US;
   14891 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   14892 		}
   14893 	}
   14894 
   14895 	return 0;
   14896 }
   14897 
   14898 /*
   14899  *  wm_link_stall_workaround_hv - Si workaround
   14900  *  @sc: pointer to the HW structure
   14901  *
   14902  *  This function works around a Si bug where the link partner can get
   14903  *  a link up indication before the PHY does. If small packets are sent
   14904  *  by the link partner they can be placed in the packet buffer without
   14905  *  being properly accounted for by the PHY and will stall preventing
   14906  *  further packets from being received.  The workaround is to clear the
   14907  *  packet buffer after the PHY detects link up.
   14908  */
   14909 static int
   14910 wm_link_stall_workaround_hv(struct wm_softc *sc)
   14911 {
   14912 	int phyreg;
   14913 
   14914 	if (sc->sc_phytype != WMPHY_82578)
   14915 		return 0;
   14916 
   14917 	/* Do not apply workaround if in PHY loopback bit 14 set */
   14918 	phyreg =  wm_gmii_hv_readreg(sc->sc_dev, 2, MII_BMCR);
   14919 	if ((phyreg & BMCR_LOOP) != 0)
   14920 		return 0;
   14921 
   14922 	/* check if link is up and at 1Gbps */
   14923 	phyreg = wm_gmii_hv_readreg(sc->sc_dev, 2, BM_CS_STATUS);
   14924 	phyreg &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   14925 	    | BM_CS_STATUS_SPEED_MASK;
   14926 	if (phyreg != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED
   14927 		| BM_CS_STATUS_SPEED_1000))
   14928 		return 0;
   14929 
   14930 	delay(200 * 1000);	/* XXX too big */
   14931 
   14932 	/* flush the packets in the fifo buffer */
   14933 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   14934 	    HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED);
   14935 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_MUX_DATA_CTRL,
   14936 	    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   14937 
   14938 	return 0;
   14939 }
   14940 
   14941 static void
   14942 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   14943 {
   14944 	uint32_t reg;
   14945 
   14946 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   14947 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   14948 	    reg | HV_KMRN_MDIO_SLOW);
   14949 }
   14950 
   14951 /*
   14952  *  wm_configure_k1_ich8lan - Configure K1 power state
   14953  *  @sc: pointer to the HW structure
   14954  *  @enable: K1 state to configure
   14955  *
   14956  *  Configure the K1 power state based on the provided parameter.
   14957  *  Assumes semaphore already acquired.
   14958  */
   14959 static void
   14960 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   14961 {
   14962 	uint32_t ctrl, ctrl_ext, tmp;
   14963 	uint16_t kmreg;
   14964 	int rv;
   14965 
   14966 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   14967 
   14968 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   14969 	if (rv != 0)
   14970 		return;
   14971 
   14972 	if (k1_enable)
   14973 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   14974 	else
   14975 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   14976 
   14977 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   14978 	if (rv != 0)
   14979 		return;
   14980 
   14981 	delay(20);
   14982 
   14983 	ctrl = CSR_READ(sc, WMREG_CTRL);
   14984 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   14985 
   14986 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   14987 	tmp |= CTRL_FRCSPD;
   14988 
   14989 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   14990 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   14991 	CSR_WRITE_FLUSH(sc);
   14992 	delay(20);
   14993 
   14994 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   14995 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   14996 	CSR_WRITE_FLUSH(sc);
   14997 	delay(20);
   14998 
   14999 	return;
   15000 }
   15001 
   15002 /* special case - for 82575 - need to do manual init ... */
   15003 static void
   15004 wm_reset_init_script_82575(struct wm_softc *sc)
   15005 {
   15006 	/*
   15007 	 * remark: this is untested code - we have no board without EEPROM
   15008 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   15009 	 */
   15010 
   15011 	/* SerDes configuration via SERDESCTRL */
   15012 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   15013 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   15014 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   15015 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   15016 
   15017 	/* CCM configuration via CCMCTL register */
   15018 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   15019 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   15020 
   15021 	/* PCIe lanes configuration */
   15022 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   15023 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   15024 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   15025 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   15026 
   15027 	/* PCIe PLL Configuration */
   15028 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   15029 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   15030 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   15031 }
   15032 
   15033 static void
   15034 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   15035 {
   15036 	uint32_t reg;
   15037 	uint16_t nvmword;
   15038 	int rv;
   15039 
   15040 	if (sc->sc_type != WM_T_82580)
   15041 		return;
   15042 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   15043 		return;
   15044 
   15045 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   15046 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   15047 	if (rv != 0) {
   15048 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   15049 		    __func__);
   15050 		return;
   15051 	}
   15052 
   15053 	reg = CSR_READ(sc, WMREG_MDICNFG);
   15054 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   15055 		reg |= MDICNFG_DEST;
   15056 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   15057 		reg |= MDICNFG_COM_MDIO;
   15058 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   15059 }
   15060 
   15061 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   15062 
   15063 static bool
   15064 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   15065 {
   15066 	uint32_t reg;
   15067 	uint16_t id1, id2;
   15068 	int i, rv;
   15069 
   15070 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15071 		device_xname(sc->sc_dev), __func__));
   15072 	KASSERT(CSR_READ(sc, WMREG_EXTCNFCTR) & EXTCNFCTR_MDIO_SW_OWNERSHIP);
   15073 
   15074 	id1 = id2 = 0xffff;
   15075 	for (i = 0; i < 2; i++) {
   15076 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1,
   15077 		    &id1);
   15078 		if ((rv != 0) || MII_INVALIDID(id1))
   15079 			continue;
   15080 		rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2,
   15081 		    &id2);
   15082 		if ((rv != 0) || MII_INVALIDID(id2))
   15083 			continue;
   15084 		break;
   15085 	}
   15086 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2))
   15087 		goto out;
   15088 
   15089 	/*
   15090 	 * In case the PHY needs to be in mdio slow mode,
   15091 	 * set slow mode and try to get the PHY id again.
   15092 	 */
   15093 	if (sc->sc_type < WM_T_PCH_LPT) {
   15094 		sc->phy.release(sc);
   15095 		wm_set_mdio_slow_mode_hv(sc);
   15096 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
   15097 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
   15098 		sc->phy.acquire(sc);
   15099 	}
   15100 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   15101 		printf("XXX return with false\n");
   15102 		return false;
   15103 	}
   15104 out:
   15105 	if (sc->sc_type >= WM_T_PCH_LPT) {
   15106 		/* Only unforce SMBus if ME is not active */
   15107 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   15108 			uint16_t phyreg;
   15109 
   15110 			/* Unforce SMBus mode in PHY */
   15111 			rv = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   15112 			    CV_SMB_CTRL, &phyreg);
   15113 			phyreg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   15114 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   15115 			    CV_SMB_CTRL, phyreg);
   15116 
   15117 			/* Unforce SMBus mode in MAC */
   15118 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15119 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   15120 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15121 		}
   15122 	}
   15123 	return true;
   15124 }
   15125 
   15126 static void
   15127 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   15128 {
   15129 	uint32_t reg;
   15130 	int i;
   15131 
   15132 	/* Set PHY Config Counter to 50msec */
   15133 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   15134 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   15135 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   15136 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   15137 
   15138 	/* Toggle LANPHYPC */
   15139 	reg = CSR_READ(sc, WMREG_CTRL);
   15140 	reg |= CTRL_LANPHYPC_OVERRIDE;
   15141 	reg &= ~CTRL_LANPHYPC_VALUE;
   15142 	CSR_WRITE(sc, WMREG_CTRL, reg);
   15143 	CSR_WRITE_FLUSH(sc);
   15144 	delay(1000);
   15145 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   15146 	CSR_WRITE(sc, WMREG_CTRL, reg);
   15147 	CSR_WRITE_FLUSH(sc);
   15148 
   15149 	if (sc->sc_type < WM_T_PCH_LPT)
   15150 		delay(50 * 1000);
   15151 	else {
   15152 		i = 20;
   15153 
   15154 		do {
   15155 			delay(5 * 1000);
   15156 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   15157 		    && i--);
   15158 
   15159 		delay(30 * 1000);
   15160 	}
   15161 }
   15162 
   15163 static int
   15164 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   15165 {
   15166 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   15167 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   15168 	uint32_t rxa;
   15169 	uint16_t scale = 0, lat_enc = 0;
   15170 	int32_t obff_hwm = 0;
   15171 	int64_t lat_ns, value;
   15172 
   15173 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15174 		device_xname(sc->sc_dev), __func__));
   15175 
   15176 	if (link) {
   15177 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   15178 		uint32_t status;
   15179 		uint16_t speed;
   15180 		pcireg_t preg;
   15181 
   15182 		status = CSR_READ(sc, WMREG_STATUS);
   15183 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   15184 		case STATUS_SPEED_10:
   15185 			speed = 10;
   15186 			break;
   15187 		case STATUS_SPEED_100:
   15188 			speed = 100;
   15189 			break;
   15190 		case STATUS_SPEED_1000:
   15191 			speed = 1000;
   15192 			break;
   15193 		default:
   15194 			device_printf(sc->sc_dev, "Unknown speed "
   15195 			    "(status = %08x)\n", status);
   15196 			return -1;
   15197 		}
   15198 
   15199 		/* Rx Packet Buffer Allocation size (KB) */
   15200 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   15201 
   15202 		/*
   15203 		 * Determine the maximum latency tolerated by the device.
   15204 		 *
   15205 		 * Per the PCIe spec, the tolerated latencies are encoded as
   15206 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   15207 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   15208 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   15209 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   15210 		 */
   15211 		lat_ns = ((int64_t)rxa * 1024 -
   15212 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   15213 			+ ETHER_HDR_LEN))) * 8 * 1000;
   15214 		if (lat_ns < 0)
   15215 			lat_ns = 0;
   15216 		else
   15217 			lat_ns /= speed;
   15218 		value = lat_ns;
   15219 
   15220 		while (value > LTRV_VALUE) {
   15221 			scale ++;
   15222 			value = howmany(value, __BIT(5));
   15223 		}
   15224 		if (scale > LTRV_SCALE_MAX) {
   15225 			printf("%s: Invalid LTR latency scale %d\n",
   15226 			    device_xname(sc->sc_dev), scale);
   15227 			return -1;
   15228 		}
   15229 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   15230 
   15231 		/* Determine the maximum latency tolerated by the platform */
   15232 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   15233 		    WM_PCI_LTR_CAP_LPT);
   15234 		max_snoop = preg & 0xffff;
   15235 		max_nosnoop = preg >> 16;
   15236 
   15237 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   15238 
   15239 		if (lat_enc > max_ltr_enc) {
   15240 			lat_enc = max_ltr_enc;
   15241 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   15242 			    * PCI_LTR_SCALETONS(
   15243 				    __SHIFTOUT(lat_enc,
   15244 					PCI_LTR_MAXSNOOPLAT_SCALE));
   15245 		}
   15246 
   15247 		if (lat_ns) {
   15248 			lat_ns *= speed * 1000;
   15249 			lat_ns /= 8;
   15250 			lat_ns /= 1000000000;
   15251 			obff_hwm = (int32_t)(rxa - lat_ns);
   15252 		}
   15253 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   15254 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   15255 			    "(rxa = %d, lat_ns = %d)\n",
   15256 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   15257 			return -1;
   15258 		}
   15259 	}
   15260 	/* Snoop and No-Snoop latencies the same */
   15261 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   15262 	CSR_WRITE(sc, WMREG_LTRV, reg);
   15263 
   15264 	/* Set OBFF high water mark */
   15265 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   15266 	reg |= obff_hwm;
   15267 	CSR_WRITE(sc, WMREG_SVT, reg);
   15268 
   15269 	/* Enable OBFF */
   15270 	reg = CSR_READ(sc, WMREG_SVCR);
   15271 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   15272 	CSR_WRITE(sc, WMREG_SVCR, reg);
   15273 
   15274 	return 0;
   15275 }
   15276 
   15277 /*
   15278  * I210 Errata 25 and I211 Errata 10
   15279  * Slow System Clock.
   15280  */
   15281 static void
   15282 wm_pll_workaround_i210(struct wm_softc *sc)
   15283 {
   15284 	uint32_t mdicnfg, wuc;
   15285 	uint32_t reg;
   15286 	pcireg_t pcireg;
   15287 	uint32_t pmreg;
   15288 	uint16_t nvmword, tmp_nvmword;
   15289 	int phyval;
   15290 	bool wa_done = false;
   15291 	int i;
   15292 
   15293 	/* Save WUC and MDICNFG registers */
   15294 	wuc = CSR_READ(sc, WMREG_WUC);
   15295 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   15296 
   15297 	reg = mdicnfg & ~MDICNFG_DEST;
   15298 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   15299 
   15300 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   15301 		nvmword = INVM_DEFAULT_AL;
   15302 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   15303 
   15304 	/* Get Power Management cap offset */
   15305 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   15306 		&pmreg, NULL) == 0)
   15307 		return;
   15308 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   15309 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   15310 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   15311 
   15312 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   15313 			break; /* OK */
   15314 		}
   15315 
   15316 		wa_done = true;
   15317 		/* Directly reset the internal PHY */
   15318 		reg = CSR_READ(sc, WMREG_CTRL);
   15319 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   15320 
   15321 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   15322 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   15323 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   15324 
   15325 		CSR_WRITE(sc, WMREG_WUC, 0);
   15326 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   15327 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   15328 
   15329 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   15330 		    pmreg + PCI_PMCSR);
   15331 		pcireg |= PCI_PMCSR_STATE_D3;
   15332 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   15333 		    pmreg + PCI_PMCSR, pcireg);
   15334 		delay(1000);
   15335 		pcireg &= ~PCI_PMCSR_STATE_D3;
   15336 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   15337 		    pmreg + PCI_PMCSR, pcireg);
   15338 
   15339 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   15340 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   15341 
   15342 		/* Restore WUC register */
   15343 		CSR_WRITE(sc, WMREG_WUC, wuc);
   15344 	}
   15345 
   15346 	/* Restore MDICNFG setting */
   15347 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   15348 	if (wa_done)
   15349 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   15350 }
   15351 
   15352 static void
   15353 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   15354 {
   15355 	uint32_t reg;
   15356 
   15357 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   15358 		device_xname(sc->sc_dev), __func__));
   15359 	KASSERT((sc->sc_type == WM_T_PCH_SPT)
   15360 	    || (sc->sc_type == WM_T_PCH_CNP));
   15361 
   15362 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   15363 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   15364 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   15365 
   15366 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   15367 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   15368 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   15369 }
   15370