Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.509
      1 /*	$NetBSD: if_wm.c,v 1.509 2017/06/12 03:03:22 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- Disable D0 LPLU on 8257[12356], 82580 and I350.
     77  *	- TX Multi queue improvement (refine queue selection logic)
     78  *	- Split header buffer for newer descriptors
     79  *	- EEE (Energy Efficiency Ethernet)
     80  *	- Virtual Function
     81  *	- Set LED correctly (based on contents in EEPROM)
     82  *	- Rework how parameters are loaded from the EEPROM.
     83  *	- Image Unique ID
     84  */
     85 
     86 #include <sys/cdefs.h>
     87 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.509 2017/06/12 03:03:22 msaitoh Exp $");
     88 
     89 #ifdef _KERNEL_OPT
     90 #include "opt_net_mpsafe.h"
     91 #include "opt_if_wm.h"
     92 #endif
     93 
     94 #include <sys/param.h>
     95 #include <sys/systm.h>
     96 #include <sys/callout.h>
     97 #include <sys/mbuf.h>
     98 #include <sys/malloc.h>
     99 #include <sys/kmem.h>
    100 #include <sys/kernel.h>
    101 #include <sys/socket.h>
    102 #include <sys/ioctl.h>
    103 #include <sys/errno.h>
    104 #include <sys/device.h>
    105 #include <sys/queue.h>
    106 #include <sys/syslog.h>
    107 #include <sys/interrupt.h>
    108 #include <sys/cpu.h>
    109 #include <sys/pcq.h>
    110 
    111 #include <sys/rndsource.h>
    112 
    113 #include <net/if.h>
    114 #include <net/if_dl.h>
    115 #include <net/if_media.h>
    116 #include <net/if_ether.h>
    117 
    118 #include <net/bpf.h>
    119 
    120 #include <netinet/in.h>			/* XXX for struct ip */
    121 #include <netinet/in_systm.h>		/* XXX for struct ip */
    122 #include <netinet/ip.h>			/* XXX for struct ip */
    123 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    124 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    125 
    126 #include <sys/bus.h>
    127 #include <sys/intr.h>
    128 #include <machine/endian.h>
    129 
    130 #include <dev/mii/mii.h>
    131 #include <dev/mii/miivar.h>
    132 #include <dev/mii/miidevs.h>
    133 #include <dev/mii/mii_bitbang.h>
    134 #include <dev/mii/ikphyreg.h>
    135 #include <dev/mii/igphyreg.h>
    136 #include <dev/mii/igphyvar.h>
    137 #include <dev/mii/inbmphyreg.h>
    138 
    139 #include <dev/pci/pcireg.h>
    140 #include <dev/pci/pcivar.h>
    141 #include <dev/pci/pcidevs.h>
    142 
    143 #include <dev/pci/if_wmreg.h>
    144 #include <dev/pci/if_wmvar.h>
    145 
    146 #ifdef WM_DEBUG
    147 #define	WM_DEBUG_LINK		__BIT(0)
    148 #define	WM_DEBUG_TX		__BIT(1)
    149 #define	WM_DEBUG_RX		__BIT(2)
    150 #define	WM_DEBUG_GMII		__BIT(3)
    151 #define	WM_DEBUG_MANAGE		__BIT(4)
    152 #define	WM_DEBUG_NVM		__BIT(5)
    153 #define	WM_DEBUG_INIT		__BIT(6)
    154 #define	WM_DEBUG_LOCK		__BIT(7)
    155 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    156     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    157 
    158 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    159 #else
    160 #define	DPRINTF(x, y)	/* nothing */
    161 #endif /* WM_DEBUG */
    162 
    163 #ifdef NET_MPSAFE
    164 #define WM_MPSAFE	1
    165 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    166 #else
    167 #define CALLOUT_FLAGS	0
    168 #endif
    169 
    170 /*
    171  * This device driver's max interrupt numbers.
    172  */
    173 #define WM_MAX_NQUEUEINTR	16
    174 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    175 
    176 #ifndef WM_DISABLE_MSI
    177 #define	WM_DISABLE_MSI 0
    178 #endif
    179 #ifndef WM_DISABLE_MSIX
    180 #define	WM_DISABLE_MSIX 0
    181 #endif
    182 
    183 int wm_disable_msi = WM_DISABLE_MSI;
    184 int wm_disable_msix = WM_DISABLE_MSIX;
    185 
    186 /*
    187  * Transmit descriptor list size.  Due to errata, we can only have
    188  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    189  * on >= 82544.  We tell the upper layers that they can queue a lot
    190  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    191  * of them at a time.
    192  *
    193  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    194  * chains containing many small mbufs have been observed in zero-copy
    195  * situations with jumbo frames.
    196  */
    197 #define	WM_NTXSEGS		256
    198 #define	WM_IFQUEUELEN		256
    199 #define	WM_TXQUEUELEN_MAX	64
    200 #define	WM_TXQUEUELEN_MAX_82547	16
    201 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    202 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    203 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    204 #define	WM_NTXDESC_82542	256
    205 #define	WM_NTXDESC_82544	4096
    206 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    207 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    208 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    209 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    210 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    211 
    212 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    213 
    214 #define	WM_TXINTERQSIZE		256
    215 
    216 /*
    217  * Receive descriptor list size.  We have one Rx buffer for normal
    218  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    219  * packet.  We allocate 256 receive descriptors, each with a 2k
    220  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    221  */
    222 #define	WM_NRXDESC		256
    223 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    224 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    225 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    226 
    227 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    228 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    229 #endif
    230 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    231 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    232 #endif
    233 
    234 typedef union txdescs {
    235 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    236 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    237 } txdescs_t;
    238 
    239 typedef union rxdescs {
    240 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    241 	ext_rxdesc_t      sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    242 	nq_rxdesc_t      sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    243 } rxdescs_t;
    244 
    245 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    246 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    247 
    248 /*
    249  * Software state for transmit jobs.
    250  */
    251 struct wm_txsoft {
    252 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    253 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    254 	int txs_firstdesc;		/* first descriptor in packet */
    255 	int txs_lastdesc;		/* last descriptor in packet */
    256 	int txs_ndesc;			/* # of descriptors used */
    257 };
    258 
    259 /*
    260  * Software state for receive buffers.  Each descriptor gets a
    261  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    262  * more than one buffer, we chain them together.
    263  */
    264 struct wm_rxsoft {
    265 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    266 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    267 };
    268 
    269 #define WM_LINKUP_TIMEOUT	50
    270 
    271 static uint16_t swfwphysem[] = {
    272 	SWFW_PHY0_SM,
    273 	SWFW_PHY1_SM,
    274 	SWFW_PHY2_SM,
    275 	SWFW_PHY3_SM
    276 };
    277 
    278 static const uint32_t wm_82580_rxpbs_table[] = {
    279 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    280 };
    281 
    282 struct wm_softc;
    283 
    284 #ifdef WM_EVENT_COUNTERS
    285 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    286 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    287 	struct evcnt qname##_ev_##evname;
    288 
    289 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    290 	do{								\
    291 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    292 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    293 		    "%s%02d%s", #qname, (qnum), #evname);		\
    294 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    295 		    (evtype), NULL, (xname),				\
    296 		    (q)->qname##_##evname##_evcnt_name);		\
    297 	}while(0)
    298 
    299 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    300 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    301 
    302 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    303 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    304 
    305 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    306 	evcnt_detach(&(q)->qname##_ev_##evname);
    307 #endif /* WM_EVENT_COUNTERS */
    308 
    309 struct wm_txqueue {
    310 	kmutex_t *txq_lock;		/* lock for tx operations */
    311 
    312 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    313 
    314 	/* Software state for the transmit descriptors. */
    315 	int txq_num;			/* must be a power of two */
    316 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    317 
    318 	/* TX control data structures. */
    319 	int txq_ndesc;			/* must be a power of two */
    320 	size_t txq_descsize;		/* a tx descriptor size */
    321 	txdescs_t *txq_descs_u;
    322         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    323 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    324 	int txq_desc_rseg;		/* real number of control segment */
    325 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    326 #define	txq_descs	txq_descs_u->sctxu_txdescs
    327 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    328 
    329 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    330 
    331 	int txq_free;			/* number of free Tx descriptors */
    332 	int txq_next;			/* next ready Tx descriptor */
    333 
    334 	int txq_sfree;			/* number of free Tx jobs */
    335 	int txq_snext;			/* next free Tx job */
    336 	int txq_sdirty;			/* dirty Tx jobs */
    337 
    338 	/* These 4 variables are used only on the 82547. */
    339 	int txq_fifo_size;		/* Tx FIFO size */
    340 	int txq_fifo_head;		/* current head of FIFO */
    341 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    342 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    343 
    344 	/*
    345 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    346 	 * CPUs. This queue intermediate them without block.
    347 	 */
    348 	pcq_t *txq_interq;
    349 
    350 	/*
    351 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    352 	 * to manage Tx H/W queue's busy flag.
    353 	 */
    354 	int txq_flags;			/* flags for H/W queue, see below */
    355 #define	WM_TXQ_NO_SPACE	0x1
    356 
    357 	bool txq_stopping;
    358 
    359 	uint32_t txq_packets;		/* for AIM */
    360 	uint32_t txq_bytes;		/* for AIM */
    361 #ifdef WM_EVENT_COUNTERS
    362 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
    363 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
    364 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
    365 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
    366 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
    367 						/* XXX not used? */
    368 
    369 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
    370 	WM_Q_EVCNT_DEFINE(txq,txtusum)		/* TCP/UDP cksums comp. out-bound */
    371 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
    372 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
    373 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
    374 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
    375 
    376 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
    377 
    378 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
    379 
    380 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    381 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    382 #endif /* WM_EVENT_COUNTERS */
    383 };
    384 
    385 struct wm_rxqueue {
    386 	kmutex_t *rxq_lock;		/* lock for rx operations */
    387 
    388 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    389 
    390 	/* Software state for the receive descriptors. */
    391 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    392 
    393 	/* RX control data structures. */
    394 	int rxq_ndesc;			/* must be a power of two */
    395 	size_t rxq_descsize;		/* a rx descriptor size */
    396 	rxdescs_t *rxq_descs_u;
    397 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    398 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    399 	int rxq_desc_rseg;		/* real number of control segment */
    400 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    401 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    402 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    403 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    404 
    405 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    406 
    407 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    408 	int rxq_discard;
    409 	int rxq_len;
    410 	struct mbuf *rxq_head;
    411 	struct mbuf *rxq_tail;
    412 	struct mbuf **rxq_tailp;
    413 
    414 	bool rxq_stopping;
    415 
    416 	uint32_t rxq_packets;		/* for AIM */
    417 	uint32_t rxq_bytes;		/* for AIM */
    418 #ifdef WM_EVENT_COUNTERS
    419 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
    420 
    421 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
    422 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
    423 #endif
    424 };
    425 
    426 struct wm_queue {
    427 	int wmq_id;			/* index of transmit and receive queues */
    428 	int wmq_intr_idx;		/* index of MSI-X tables */
    429 
    430 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    431 	bool wmq_set_itr;
    432 
    433 	struct wm_txqueue wmq_txq;
    434 	struct wm_rxqueue wmq_rxq;
    435 
    436 	void *wmq_si;
    437 };
    438 
    439 struct wm_phyop {
    440 	int (*acquire)(struct wm_softc *);
    441 	void (*release)(struct wm_softc *);
    442 	int reset_delay_us;
    443 };
    444 
    445 /*
    446  * Software state per device.
    447  */
    448 struct wm_softc {
    449 	device_t sc_dev;		/* generic device information */
    450 	bus_space_tag_t sc_st;		/* bus space tag */
    451 	bus_space_handle_t sc_sh;	/* bus space handle */
    452 	bus_size_t sc_ss;		/* bus space size */
    453 	bus_space_tag_t sc_iot;		/* I/O space tag */
    454 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    455 	bus_size_t sc_ios;		/* I/O space size */
    456 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    457 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    458 	bus_size_t sc_flashs;		/* flash registers space size */
    459 	off_t sc_flashreg_offset;	/*
    460 					 * offset to flash registers from
    461 					 * start of BAR
    462 					 */
    463 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    464 
    465 	struct ethercom sc_ethercom;	/* ethernet common data */
    466 	struct mii_data sc_mii;		/* MII/media information */
    467 
    468 	pci_chipset_tag_t sc_pc;
    469 	pcitag_t sc_pcitag;
    470 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    471 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    472 
    473 	uint16_t sc_pcidevid;		/* PCI device ID */
    474 	wm_chip_type sc_type;		/* MAC type */
    475 	int sc_rev;			/* MAC revision */
    476 	wm_phy_type sc_phytype;		/* PHY type */
    477 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    478 #define	WM_MEDIATYPE_UNKNOWN		0x00
    479 #define	WM_MEDIATYPE_FIBER		0x01
    480 #define	WM_MEDIATYPE_COPPER		0x02
    481 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    482 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    483 	int sc_flags;			/* flags; see below */
    484 	int sc_if_flags;		/* last if_flags */
    485 	int sc_flowflags;		/* 802.3x flow control flags */
    486 	int sc_align_tweak;
    487 
    488 	void *sc_ihs[WM_MAX_NINTR];	/*
    489 					 * interrupt cookie.
    490 					 * - legacy and msi use sc_ihs[0] only
    491 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    492 					 */
    493 	pci_intr_handle_t *sc_intrs;	/*
    494 					 * legacy and msi use sc_intrs[0] only
    495 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    496 					 */
    497 	int sc_nintrs;			/* number of interrupts */
    498 
    499 	int sc_link_intr_idx;		/* index of MSI-X tables */
    500 
    501 	callout_t sc_tick_ch;		/* tick callout */
    502 	bool sc_core_stopping;
    503 
    504 	int sc_nvm_ver_major;
    505 	int sc_nvm_ver_minor;
    506 	int sc_nvm_ver_build;
    507 	int sc_nvm_addrbits;		/* NVM address bits */
    508 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    509 	int sc_ich8_flash_base;
    510 	int sc_ich8_flash_bank_size;
    511 	int sc_nvm_k1_enabled;
    512 
    513 	int sc_nqueues;
    514 	struct wm_queue *sc_queue;
    515 	u_int sc_rx_process_limit;	/* Rx processing repeat limit in softint */
    516 	u_int sc_rx_intr_process_limit;	/* Rx processing repeat limit in H/W intr */
    517 
    518 	int sc_affinity_offset;
    519 
    520 #ifdef WM_EVENT_COUNTERS
    521 	/* Event counters. */
    522 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    523 
    524         /* WM_T_82542_2_1 only */
    525 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    526 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    527 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    528 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    529 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    530 #endif /* WM_EVENT_COUNTERS */
    531 
    532 	/* This variable are used only on the 82547. */
    533 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    534 
    535 	uint32_t sc_ctrl;		/* prototype CTRL register */
    536 #if 0
    537 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    538 #endif
    539 	uint32_t sc_icr;		/* prototype interrupt bits */
    540 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    541 	uint32_t sc_tctl;		/* prototype TCTL register */
    542 	uint32_t sc_rctl;		/* prototype RCTL register */
    543 	uint32_t sc_txcw;		/* prototype TXCW register */
    544 	uint32_t sc_tipg;		/* prototype TIPG register */
    545 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    546 	uint32_t sc_pba;		/* prototype PBA register */
    547 
    548 	int sc_tbi_linkup;		/* TBI link status */
    549 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    550 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    551 
    552 	int sc_mchash_type;		/* multicast filter offset */
    553 
    554 	krndsource_t rnd_source;	/* random source */
    555 
    556 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    557 
    558 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    559 	kmutex_t *sc_ich_phymtx;	/*
    560 					 * 82574/82583/ICH/PCH specific PHY
    561 					 * mutex. For 82574/82583, the mutex
    562 					 * is used for both PHY and NVM.
    563 					 */
    564 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    565 
    566 	struct wm_phyop phy;
    567 };
    568 
    569 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    570 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    571 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    572 
    573 #define	WM_RXCHAIN_RESET(rxq)						\
    574 do {									\
    575 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    576 	*(rxq)->rxq_tailp = NULL;					\
    577 	(rxq)->rxq_len = 0;						\
    578 } while (/*CONSTCOND*/0)
    579 
    580 #define	WM_RXCHAIN_LINK(rxq, m)						\
    581 do {									\
    582 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    583 	(rxq)->rxq_tailp = &(m)->m_next;				\
    584 } while (/*CONSTCOND*/0)
    585 
    586 #ifdef WM_EVENT_COUNTERS
    587 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    588 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    589 
    590 #define WM_Q_EVCNT_INCR(qname, evname)			\
    591 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    592 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    593 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    594 #else /* !WM_EVENT_COUNTERS */
    595 #define	WM_EVCNT_INCR(ev)	/* nothing */
    596 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    597 
    598 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    599 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    600 #endif /* !WM_EVENT_COUNTERS */
    601 
    602 #define	CSR_READ(sc, reg)						\
    603 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    604 #define	CSR_WRITE(sc, reg, val)						\
    605 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    606 #define	CSR_WRITE_FLUSH(sc)						\
    607 	(void) CSR_READ((sc), WMREG_STATUS)
    608 
    609 #define ICH8_FLASH_READ32(sc, reg)					\
    610 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    611 	    (reg) + sc->sc_flashreg_offset)
    612 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    613 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    614 	    (reg) + sc->sc_flashreg_offset, (data))
    615 
    616 #define ICH8_FLASH_READ16(sc, reg)					\
    617 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    618 	    (reg) + sc->sc_flashreg_offset)
    619 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    620 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    621 	    (reg) + sc->sc_flashreg_offset, (data))
    622 
    623 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    624 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    625 
    626 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    627 #define	WM_CDTXADDR_HI(txq, x)						\
    628 	(sizeof(bus_addr_t) == 8 ?					\
    629 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    630 
    631 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    632 #define	WM_CDRXADDR_HI(rxq, x)						\
    633 	(sizeof(bus_addr_t) == 8 ?					\
    634 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    635 
    636 /*
    637  * Register read/write functions.
    638  * Other than CSR_{READ|WRITE}().
    639  */
    640 #if 0
    641 static inline uint32_t wm_io_read(struct wm_softc *, int);
    642 #endif
    643 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    644 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    645 	uint32_t, uint32_t);
    646 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    647 
    648 /*
    649  * Descriptor sync/init functions.
    650  */
    651 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    652 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    653 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    654 
    655 /*
    656  * Device driver interface functions and commonly used functions.
    657  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    658  */
    659 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    660 static int	wm_match(device_t, cfdata_t, void *);
    661 static void	wm_attach(device_t, device_t, void *);
    662 static int	wm_detach(device_t, int);
    663 static bool	wm_suspend(device_t, const pmf_qual_t *);
    664 static bool	wm_resume(device_t, const pmf_qual_t *);
    665 static void	wm_watchdog(struct ifnet *);
    666 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
    667 static void	wm_tick(void *);
    668 static int	wm_ifflags_cb(struct ethercom *);
    669 static int	wm_ioctl(struct ifnet *, u_long, void *);
    670 /* MAC address related */
    671 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    672 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    673 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    674 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    675 static void	wm_set_filter(struct wm_softc *);
    676 /* Reset and init related */
    677 static void	wm_set_vlan(struct wm_softc *);
    678 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    679 static void	wm_get_auto_rd_done(struct wm_softc *);
    680 static void	wm_lan_init_done(struct wm_softc *);
    681 static void	wm_get_cfg_done(struct wm_softc *);
    682 static void	wm_initialize_hardware_bits(struct wm_softc *);
    683 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    684 static void	wm_reset_phy(struct wm_softc *);
    685 static void	wm_flush_desc_rings(struct wm_softc *);
    686 static void	wm_reset(struct wm_softc *);
    687 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    688 static void	wm_rxdrain(struct wm_rxqueue *);
    689 static void	wm_rss_getkey(uint8_t *);
    690 static void	wm_init_rss(struct wm_softc *);
    691 static void	wm_adjust_qnum(struct wm_softc *, int);
    692 static inline bool	wm_is_using_msix(struct wm_softc *);
    693 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    694 static int	wm_softint_establish(struct wm_softc *, int, int);
    695 static int	wm_setup_legacy(struct wm_softc *);
    696 static int	wm_setup_msix(struct wm_softc *);
    697 static int	wm_init(struct ifnet *);
    698 static int	wm_init_locked(struct ifnet *);
    699 static void	wm_turnon(struct wm_softc *);
    700 static void	wm_turnoff(struct wm_softc *);
    701 static void	wm_stop(struct ifnet *, int);
    702 static void	wm_stop_locked(struct ifnet *, int);
    703 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    704 static void	wm_82547_txfifo_stall(void *);
    705 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    706 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    707 /* DMA related */
    708 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    709 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    710 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    711 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    712     struct wm_txqueue *);
    713 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    714 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    715 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    716     struct wm_rxqueue *);
    717 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    718 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    719 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    720 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    721 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    722 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    723 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    724     struct wm_txqueue *);
    725 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    726     struct wm_rxqueue *);
    727 static int	wm_alloc_txrx_queues(struct wm_softc *);
    728 static void	wm_free_txrx_queues(struct wm_softc *);
    729 static int	wm_init_txrx_queues(struct wm_softc *);
    730 /* Start */
    731 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    732     struct wm_txsoft *, uint32_t *, uint8_t *);
    733 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    734 static void	wm_start(struct ifnet *);
    735 static void	wm_start_locked(struct ifnet *);
    736 static int	wm_transmit(struct ifnet *, struct mbuf *);
    737 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    738 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    739 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    740     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    741 static void	wm_nq_start(struct ifnet *);
    742 static void	wm_nq_start_locked(struct ifnet *);
    743 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    744 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    745 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    746 static void	wm_deferred_start_locked(struct wm_txqueue *);
    747 static void	wm_handle_queue(void *);
    748 /* Interrupt */
    749 static int	wm_txeof(struct wm_softc *, struct wm_txqueue *);
    750 static void	wm_rxeof(struct wm_rxqueue *, u_int);
    751 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    752 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    753 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    754 static void	wm_linkintr(struct wm_softc *, uint32_t);
    755 static int	wm_intr_legacy(void *);
    756 static inline void	wm_txrxintr_disable(struct wm_queue *);
    757 static inline void	wm_txrxintr_enable(struct wm_queue *);
    758 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    759 static int	wm_txrxintr_msix(void *);
    760 static int	wm_linkintr_msix(void *);
    761 
    762 /*
    763  * Media related.
    764  * GMII, SGMII, TBI, SERDES and SFP.
    765  */
    766 /* Common */
    767 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    768 /* GMII related */
    769 static void	wm_gmii_reset(struct wm_softc *);
    770 static void	wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t, uint16_t);
    771 static int	wm_get_phy_id_82575(struct wm_softc *);
    772 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    773 static int	wm_gmii_mediachange(struct ifnet *);
    774 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    775 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    776 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    777 static int	wm_gmii_i82543_readreg(device_t, int, int);
    778 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    779 static int	wm_gmii_mdic_readreg(device_t, int, int);
    780 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    781 static int	wm_gmii_i82544_readreg(device_t, int, int);
    782 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    783 static int	wm_gmii_i80003_readreg(device_t, int, int);
    784 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    785 static int	wm_gmii_bm_readreg(device_t, int, int);
    786 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    787 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    788 static int	wm_gmii_hv_readreg(device_t, int, int);
    789 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
    790 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    791 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
    792 static int	wm_gmii_82580_readreg(device_t, int, int);
    793 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    794 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    795 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    796 static void	wm_gmii_statchg(struct ifnet *);
    797 /*
    798  * kumeran related (80003, ICH* and PCH*).
    799  * These functions are not for accessing MII registers but for accessing
    800  * kumeran specific registers.
    801  */
    802 static int	wm_kmrn_readreg(struct wm_softc *, int);
    803 static int	wm_kmrn_readreg_locked(struct wm_softc *, int);
    804 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
    805 static void	wm_kmrn_writereg_locked(struct wm_softc *, int, int);
    806 /* SGMII */
    807 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    808 static int	wm_sgmii_readreg(device_t, int, int);
    809 static void	wm_sgmii_writereg(device_t, int, int, int);
    810 /* TBI related */
    811 static void	wm_tbi_mediainit(struct wm_softc *);
    812 static int	wm_tbi_mediachange(struct ifnet *);
    813 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    814 static int	wm_check_for_link(struct wm_softc *);
    815 static void	wm_tbi_tick(struct wm_softc *);
    816 /* SERDES related */
    817 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    818 static int	wm_serdes_mediachange(struct ifnet *);
    819 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    820 static void	wm_serdes_tick(struct wm_softc *);
    821 /* SFP related */
    822 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    823 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    824 
    825 /*
    826  * NVM related.
    827  * Microwire, SPI (w/wo EERD) and Flash.
    828  */
    829 /* Misc functions */
    830 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    831 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    832 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    833 /* Microwire */
    834 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    835 /* SPI */
    836 static int	wm_nvm_ready_spi(struct wm_softc *);
    837 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    838 /* Using with EERD */
    839 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    840 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    841 /* Flash */
    842 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    843     unsigned int *);
    844 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    845 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    846 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    847 	uint32_t *);
    848 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    849 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    850 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    851 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    852 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    853 /* iNVM */
    854 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    855 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    856 /* Lock, detecting NVM type, validate checksum and read */
    857 static int	wm_nvm_acquire(struct wm_softc *);
    858 static void	wm_nvm_release(struct wm_softc *);
    859 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    860 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    861 static int	wm_nvm_validate_checksum(struct wm_softc *);
    862 static void	wm_nvm_version_invm(struct wm_softc *);
    863 static void	wm_nvm_version(struct wm_softc *);
    864 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    865 
    866 /*
    867  * Hardware semaphores.
    868  * Very complexed...
    869  */
    870 static int	wm_get_null(struct wm_softc *);
    871 static void	wm_put_null(struct wm_softc *);
    872 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    873 static void	wm_put_swsm_semaphore(struct wm_softc *);
    874 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    875 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    876 static int	wm_get_phy_82575(struct wm_softc *);
    877 static void	wm_put_phy_82575(struct wm_softc *);
    878 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    879 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    880 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    881 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    882 static int	wm_get_nvm_ich8lan(struct wm_softc *);		/* For NVM */
    883 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    884 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    885 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    886 
    887 /*
    888  * Management mode and power management related subroutines.
    889  * BMC, AMT, suspend/resume and EEE.
    890  */
    891 #if 0
    892 static int	wm_check_mng_mode(struct wm_softc *);
    893 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    894 static int	wm_check_mng_mode_82574(struct wm_softc *);
    895 static int	wm_check_mng_mode_generic(struct wm_softc *);
    896 #endif
    897 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    898 static bool	wm_phy_resetisblocked(struct wm_softc *);
    899 static void	wm_get_hw_control(struct wm_softc *);
    900 static void	wm_release_hw_control(struct wm_softc *);
    901 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    902 static void	wm_smbustopci(struct wm_softc *);
    903 static void	wm_init_manageability(struct wm_softc *);
    904 static void	wm_release_manageability(struct wm_softc *);
    905 static void	wm_get_wakeup(struct wm_softc *);
    906 static void	wm_ulp_disable(struct wm_softc *);
    907 static void	wm_enable_phy_wakeup(struct wm_softc *);
    908 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    909 static void	wm_enable_wakeup(struct wm_softc *);
    910 /* LPLU (Low Power Link Up) */
    911 static void	wm_lplu_d0_disable(struct wm_softc *);
    912 static void	wm_lplu_d0_disable_pch(struct wm_softc *);
    913 /* EEE */
    914 static void	wm_set_eee_i350(struct wm_softc *);
    915 
    916 /*
    917  * Workarounds (mainly PHY related).
    918  * Basically, PHY's workarounds are in the PHY drivers.
    919  */
    920 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    921 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    922 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    923 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    924 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    925 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    926 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    927 static void	wm_reset_init_script_82575(struct wm_softc *);
    928 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    929 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
    930 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
    931 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
    932 static void	wm_pll_workaround_i210(struct wm_softc *);
    933 
    934 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    935     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    936 
    937 /*
    938  * Devices supported by this driver.
    939  */
    940 static const struct wm_product {
    941 	pci_vendor_id_t		wmp_vendor;
    942 	pci_product_id_t	wmp_product;
    943 	const char		*wmp_name;
    944 	wm_chip_type		wmp_type;
    945 	uint32_t		wmp_flags;
    946 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    947 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    948 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    949 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    950 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    951 } wm_products[] = {
    952 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    953 	  "Intel i82542 1000BASE-X Ethernet",
    954 	  WM_T_82542_2_1,	WMP_F_FIBER },
    955 
    956 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    957 	  "Intel i82543GC 1000BASE-X Ethernet",
    958 	  WM_T_82543,		WMP_F_FIBER },
    959 
    960 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    961 	  "Intel i82543GC 1000BASE-T Ethernet",
    962 	  WM_T_82543,		WMP_F_COPPER },
    963 
    964 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    965 	  "Intel i82544EI 1000BASE-T Ethernet",
    966 	  WM_T_82544,		WMP_F_COPPER },
    967 
    968 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    969 	  "Intel i82544EI 1000BASE-X Ethernet",
    970 	  WM_T_82544,		WMP_F_FIBER },
    971 
    972 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    973 	  "Intel i82544GC 1000BASE-T Ethernet",
    974 	  WM_T_82544,		WMP_F_COPPER },
    975 
    976 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    977 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    978 	  WM_T_82544,		WMP_F_COPPER },
    979 
    980 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    981 	  "Intel i82540EM 1000BASE-T Ethernet",
    982 	  WM_T_82540,		WMP_F_COPPER },
    983 
    984 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    985 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    986 	  WM_T_82540,		WMP_F_COPPER },
    987 
    988 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    989 	  "Intel i82540EP 1000BASE-T Ethernet",
    990 	  WM_T_82540,		WMP_F_COPPER },
    991 
    992 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    993 	  "Intel i82540EP 1000BASE-T Ethernet",
    994 	  WM_T_82540,		WMP_F_COPPER },
    995 
    996 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    997 	  "Intel i82540EP 1000BASE-T Ethernet",
    998 	  WM_T_82540,		WMP_F_COPPER },
    999 
   1000 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1001 	  "Intel i82545EM 1000BASE-T Ethernet",
   1002 	  WM_T_82545,		WMP_F_COPPER },
   1003 
   1004 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1005 	  "Intel i82545GM 1000BASE-T Ethernet",
   1006 	  WM_T_82545_3,		WMP_F_COPPER },
   1007 
   1008 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1009 	  "Intel i82545GM 1000BASE-X Ethernet",
   1010 	  WM_T_82545_3,		WMP_F_FIBER },
   1011 
   1012 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1013 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1014 	  WM_T_82545_3,		WMP_F_SERDES },
   1015 
   1016 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1017 	  "Intel i82546EB 1000BASE-T Ethernet",
   1018 	  WM_T_82546,		WMP_F_COPPER },
   1019 
   1020 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1021 	  "Intel i82546EB 1000BASE-T Ethernet",
   1022 	  WM_T_82546,		WMP_F_COPPER },
   1023 
   1024 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1025 	  "Intel i82545EM 1000BASE-X Ethernet",
   1026 	  WM_T_82545,		WMP_F_FIBER },
   1027 
   1028 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1029 	  "Intel i82546EB 1000BASE-X Ethernet",
   1030 	  WM_T_82546,		WMP_F_FIBER },
   1031 
   1032 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1033 	  "Intel i82546GB 1000BASE-T Ethernet",
   1034 	  WM_T_82546_3,		WMP_F_COPPER },
   1035 
   1036 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1037 	  "Intel i82546GB 1000BASE-X Ethernet",
   1038 	  WM_T_82546_3,		WMP_F_FIBER },
   1039 
   1040 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1041 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1042 	  WM_T_82546_3,		WMP_F_SERDES },
   1043 
   1044 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1045 	  "i82546GB quad-port Gigabit Ethernet",
   1046 	  WM_T_82546_3,		WMP_F_COPPER },
   1047 
   1048 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1049 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1050 	  WM_T_82546_3,		WMP_F_COPPER },
   1051 
   1052 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1053 	  "Intel PRO/1000MT (82546GB)",
   1054 	  WM_T_82546_3,		WMP_F_COPPER },
   1055 
   1056 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1057 	  "Intel i82541EI 1000BASE-T Ethernet",
   1058 	  WM_T_82541,		WMP_F_COPPER },
   1059 
   1060 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1061 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1062 	  WM_T_82541,		WMP_F_COPPER },
   1063 
   1064 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1065 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1066 	  WM_T_82541,		WMP_F_COPPER },
   1067 
   1068 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1069 	  "Intel i82541ER 1000BASE-T Ethernet",
   1070 	  WM_T_82541_2,		WMP_F_COPPER },
   1071 
   1072 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1073 	  "Intel i82541GI 1000BASE-T Ethernet",
   1074 	  WM_T_82541_2,		WMP_F_COPPER },
   1075 
   1076 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1077 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1078 	  WM_T_82541_2,		WMP_F_COPPER },
   1079 
   1080 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1081 	  "Intel i82541PI 1000BASE-T Ethernet",
   1082 	  WM_T_82541_2,		WMP_F_COPPER },
   1083 
   1084 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1085 	  "Intel i82547EI 1000BASE-T Ethernet",
   1086 	  WM_T_82547,		WMP_F_COPPER },
   1087 
   1088 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1089 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1090 	  WM_T_82547,		WMP_F_COPPER },
   1091 
   1092 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1093 	  "Intel i82547GI 1000BASE-T Ethernet",
   1094 	  WM_T_82547_2,		WMP_F_COPPER },
   1095 
   1096 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1097 	  "Intel PRO/1000 PT (82571EB)",
   1098 	  WM_T_82571,		WMP_F_COPPER },
   1099 
   1100 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1101 	  "Intel PRO/1000 PF (82571EB)",
   1102 	  WM_T_82571,		WMP_F_FIBER },
   1103 
   1104 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1105 	  "Intel PRO/1000 PB (82571EB)",
   1106 	  WM_T_82571,		WMP_F_SERDES },
   1107 
   1108 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1109 	  "Intel PRO/1000 QT (82571EB)",
   1110 	  WM_T_82571,		WMP_F_COPPER },
   1111 
   1112 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1113 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1114 	  WM_T_82571,		WMP_F_COPPER, },
   1115 
   1116 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1117 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1118 	  WM_T_82571,		WMP_F_COPPER, },
   1119 
   1120 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1121 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1122 	  WM_T_82571,		WMP_F_SERDES, },
   1123 
   1124 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1125 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1126 	  WM_T_82571,		WMP_F_SERDES, },
   1127 
   1128 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1129 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1130 	  WM_T_82571,		WMP_F_FIBER, },
   1131 
   1132 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1133 	  "Intel i82572EI 1000baseT Ethernet",
   1134 	  WM_T_82572,		WMP_F_COPPER },
   1135 
   1136 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1137 	  "Intel i82572EI 1000baseX Ethernet",
   1138 	  WM_T_82572,		WMP_F_FIBER },
   1139 
   1140 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1141 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1142 	  WM_T_82572,		WMP_F_SERDES },
   1143 
   1144 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1145 	  "Intel i82572EI 1000baseT Ethernet",
   1146 	  WM_T_82572,		WMP_F_COPPER },
   1147 
   1148 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1149 	  "Intel i82573E",
   1150 	  WM_T_82573,		WMP_F_COPPER },
   1151 
   1152 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1153 	  "Intel i82573E IAMT",
   1154 	  WM_T_82573,		WMP_F_COPPER },
   1155 
   1156 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1157 	  "Intel i82573L Gigabit Ethernet",
   1158 	  WM_T_82573,		WMP_F_COPPER },
   1159 
   1160 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1161 	  "Intel i82574L",
   1162 	  WM_T_82574,		WMP_F_COPPER },
   1163 
   1164 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1165 	  "Intel i82574L",
   1166 	  WM_T_82574,		WMP_F_COPPER },
   1167 
   1168 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1169 	  "Intel i82583V",
   1170 	  WM_T_82583,		WMP_F_COPPER },
   1171 
   1172 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1173 	  "i80003 dual 1000baseT Ethernet",
   1174 	  WM_T_80003,		WMP_F_COPPER },
   1175 
   1176 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1177 	  "i80003 dual 1000baseX Ethernet",
   1178 	  WM_T_80003,		WMP_F_COPPER },
   1179 
   1180 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1181 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1182 	  WM_T_80003,		WMP_F_SERDES },
   1183 
   1184 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1185 	  "Intel i80003 1000baseT Ethernet",
   1186 	  WM_T_80003,		WMP_F_COPPER },
   1187 
   1188 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1189 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1190 	  WM_T_80003,		WMP_F_SERDES },
   1191 
   1192 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1193 	  "Intel i82801H (M_AMT) LAN Controller",
   1194 	  WM_T_ICH8,		WMP_F_COPPER },
   1195 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1196 	  "Intel i82801H (AMT) LAN Controller",
   1197 	  WM_T_ICH8,		WMP_F_COPPER },
   1198 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1199 	  "Intel i82801H LAN Controller",
   1200 	  WM_T_ICH8,		WMP_F_COPPER },
   1201 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1202 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1203 	  WM_T_ICH8,		WMP_F_COPPER },
   1204 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1205 	  "Intel i82801H (M) LAN Controller",
   1206 	  WM_T_ICH8,		WMP_F_COPPER },
   1207 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1208 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1209 	  WM_T_ICH8,		WMP_F_COPPER },
   1210 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1211 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1212 	  WM_T_ICH8,		WMP_F_COPPER },
   1213 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1214 	  "82567V-3 LAN Controller",
   1215 	  WM_T_ICH8,		WMP_F_COPPER },
   1216 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1217 	  "82801I (AMT) LAN Controller",
   1218 	  WM_T_ICH9,		WMP_F_COPPER },
   1219 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1220 	  "82801I 10/100 LAN Controller",
   1221 	  WM_T_ICH9,		WMP_F_COPPER },
   1222 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1223 	  "82801I (G) 10/100 LAN Controller",
   1224 	  WM_T_ICH9,		WMP_F_COPPER },
   1225 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1226 	  "82801I (GT) 10/100 LAN Controller",
   1227 	  WM_T_ICH9,		WMP_F_COPPER },
   1228 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1229 	  "82801I (C) LAN Controller",
   1230 	  WM_T_ICH9,		WMP_F_COPPER },
   1231 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1232 	  "82801I mobile LAN Controller",
   1233 	  WM_T_ICH9,		WMP_F_COPPER },
   1234 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1235 	  "82801I mobile (V) LAN Controller",
   1236 	  WM_T_ICH9,		WMP_F_COPPER },
   1237 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1238 	  "82801I mobile (AMT) LAN Controller",
   1239 	  WM_T_ICH9,		WMP_F_COPPER },
   1240 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1241 	  "82567LM-4 LAN Controller",
   1242 	  WM_T_ICH9,		WMP_F_COPPER },
   1243 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1244 	  "82567LM-2 LAN Controller",
   1245 	  WM_T_ICH10,		WMP_F_COPPER },
   1246 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1247 	  "82567LF-2 LAN Controller",
   1248 	  WM_T_ICH10,		WMP_F_COPPER },
   1249 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1250 	  "82567LM-3 LAN Controller",
   1251 	  WM_T_ICH10,		WMP_F_COPPER },
   1252 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1253 	  "82567LF-3 LAN Controller",
   1254 	  WM_T_ICH10,		WMP_F_COPPER },
   1255 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1256 	  "82567V-2 LAN Controller",
   1257 	  WM_T_ICH10,		WMP_F_COPPER },
   1258 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1259 	  "82567V-3? LAN Controller",
   1260 	  WM_T_ICH10,		WMP_F_COPPER },
   1261 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1262 	  "HANKSVILLE LAN Controller",
   1263 	  WM_T_ICH10,		WMP_F_COPPER },
   1264 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1265 	  "PCH LAN (82577LM) Controller",
   1266 	  WM_T_PCH,		WMP_F_COPPER },
   1267 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1268 	  "PCH LAN (82577LC) Controller",
   1269 	  WM_T_PCH,		WMP_F_COPPER },
   1270 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1271 	  "PCH LAN (82578DM) Controller",
   1272 	  WM_T_PCH,		WMP_F_COPPER },
   1273 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1274 	  "PCH LAN (82578DC) Controller",
   1275 	  WM_T_PCH,		WMP_F_COPPER },
   1276 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1277 	  "PCH2 LAN (82579LM) Controller",
   1278 	  WM_T_PCH2,		WMP_F_COPPER },
   1279 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1280 	  "PCH2 LAN (82579V) Controller",
   1281 	  WM_T_PCH2,		WMP_F_COPPER },
   1282 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1283 	  "82575EB dual-1000baseT Ethernet",
   1284 	  WM_T_82575,		WMP_F_COPPER },
   1285 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1286 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1287 	  WM_T_82575,		WMP_F_SERDES },
   1288 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1289 	  "82575GB quad-1000baseT Ethernet",
   1290 	  WM_T_82575,		WMP_F_COPPER },
   1291 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1292 	  "82575GB quad-1000baseT Ethernet (PM)",
   1293 	  WM_T_82575,		WMP_F_COPPER },
   1294 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1295 	  "82576 1000BaseT Ethernet",
   1296 	  WM_T_82576,		WMP_F_COPPER },
   1297 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1298 	  "82576 1000BaseX Ethernet",
   1299 	  WM_T_82576,		WMP_F_FIBER },
   1300 
   1301 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1302 	  "82576 gigabit Ethernet (SERDES)",
   1303 	  WM_T_82576,		WMP_F_SERDES },
   1304 
   1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1306 	  "82576 quad-1000BaseT Ethernet",
   1307 	  WM_T_82576,		WMP_F_COPPER },
   1308 
   1309 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1310 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1311 	  WM_T_82576,		WMP_F_COPPER },
   1312 
   1313 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1314 	  "82576 gigabit Ethernet",
   1315 	  WM_T_82576,		WMP_F_COPPER },
   1316 
   1317 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1318 	  "82576 gigabit Ethernet (SERDES)",
   1319 	  WM_T_82576,		WMP_F_SERDES },
   1320 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1321 	  "82576 quad-gigabit Ethernet (SERDES)",
   1322 	  WM_T_82576,		WMP_F_SERDES },
   1323 
   1324 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1325 	  "82580 1000BaseT Ethernet",
   1326 	  WM_T_82580,		WMP_F_COPPER },
   1327 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1328 	  "82580 1000BaseX Ethernet",
   1329 	  WM_T_82580,		WMP_F_FIBER },
   1330 
   1331 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1332 	  "82580 1000BaseT Ethernet (SERDES)",
   1333 	  WM_T_82580,		WMP_F_SERDES },
   1334 
   1335 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1336 	  "82580 gigabit Ethernet (SGMII)",
   1337 	  WM_T_82580,		WMP_F_COPPER },
   1338 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1339 	  "82580 dual-1000BaseT Ethernet",
   1340 	  WM_T_82580,		WMP_F_COPPER },
   1341 
   1342 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1343 	  "82580 quad-1000BaseX Ethernet",
   1344 	  WM_T_82580,		WMP_F_FIBER },
   1345 
   1346 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1347 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1348 	  WM_T_82580,		WMP_F_COPPER },
   1349 
   1350 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1351 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1352 	  WM_T_82580,		WMP_F_SERDES },
   1353 
   1354 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1355 	  "DH89XXCC 1000BASE-KX Ethernet",
   1356 	  WM_T_82580,		WMP_F_SERDES },
   1357 
   1358 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1359 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1360 	  WM_T_82580,		WMP_F_SERDES },
   1361 
   1362 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1363 	  "I350 Gigabit Network Connection",
   1364 	  WM_T_I350,		WMP_F_COPPER },
   1365 
   1366 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1367 	  "I350 Gigabit Fiber Network Connection",
   1368 	  WM_T_I350,		WMP_F_FIBER },
   1369 
   1370 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1371 	  "I350 Gigabit Backplane Connection",
   1372 	  WM_T_I350,		WMP_F_SERDES },
   1373 
   1374 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1375 	  "I350 Quad Port Gigabit Ethernet",
   1376 	  WM_T_I350,		WMP_F_SERDES },
   1377 
   1378 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1379 	  "I350 Gigabit Connection",
   1380 	  WM_T_I350,		WMP_F_COPPER },
   1381 
   1382 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1383 	  "I354 Gigabit Ethernet (KX)",
   1384 	  WM_T_I354,		WMP_F_SERDES },
   1385 
   1386 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1387 	  "I354 Gigabit Ethernet (SGMII)",
   1388 	  WM_T_I354,		WMP_F_COPPER },
   1389 
   1390 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1391 	  "I354 Gigabit Ethernet (2.5G)",
   1392 	  WM_T_I354,		WMP_F_COPPER },
   1393 
   1394 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1395 	  "I210-T1 Ethernet Server Adapter",
   1396 	  WM_T_I210,		WMP_F_COPPER },
   1397 
   1398 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1399 	  "I210 Ethernet (Copper OEM)",
   1400 	  WM_T_I210,		WMP_F_COPPER },
   1401 
   1402 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1403 	  "I210 Ethernet (Copper IT)",
   1404 	  WM_T_I210,		WMP_F_COPPER },
   1405 
   1406 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1407 	  "I210 Ethernet (FLASH less)",
   1408 	  WM_T_I210,		WMP_F_COPPER },
   1409 
   1410 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1411 	  "I210 Gigabit Ethernet (Fiber)",
   1412 	  WM_T_I210,		WMP_F_FIBER },
   1413 
   1414 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1415 	  "I210 Gigabit Ethernet (SERDES)",
   1416 	  WM_T_I210,		WMP_F_SERDES },
   1417 
   1418 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1419 	  "I210 Gigabit Ethernet (FLASH less)",
   1420 	  WM_T_I210,		WMP_F_SERDES },
   1421 
   1422 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1423 	  "I210 Gigabit Ethernet (SGMII)",
   1424 	  WM_T_I210,		WMP_F_COPPER },
   1425 
   1426 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1427 	  "I211 Ethernet (COPPER)",
   1428 	  WM_T_I211,		WMP_F_COPPER },
   1429 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1430 	  "I217 V Ethernet Connection",
   1431 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1432 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1433 	  "I217 LM Ethernet Connection",
   1434 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1435 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1436 	  "I218 V Ethernet Connection",
   1437 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1438 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1439 	  "I218 V Ethernet Connection",
   1440 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1441 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1442 	  "I218 V Ethernet Connection",
   1443 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1444 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1445 	  "I218 LM Ethernet Connection",
   1446 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1447 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1448 	  "I218 LM Ethernet Connection",
   1449 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1450 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1451 	  "I218 LM Ethernet Connection",
   1452 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1453 #if 0
   1454 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1455 	  "I219 V Ethernet Connection",
   1456 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1457 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1458 	  "I219 V Ethernet Connection",
   1459 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1460 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1461 	  "I219 V Ethernet Connection",
   1462 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1463 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1464 	  "I219 V Ethernet Connection",
   1465 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1466 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1467 	  "I219 LM Ethernet Connection",
   1468 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1469 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1470 	  "I219 LM Ethernet Connection",
   1471 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1472 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1473 	  "I219 LM Ethernet Connection",
   1474 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1475 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1476 	  "I219 LM Ethernet Connection",
   1477 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1478 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1479 	  "I219 LM Ethernet Connection",
   1480 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1481 #endif
   1482 	{ 0,			0,
   1483 	  NULL,
   1484 	  0,			0 },
   1485 };
   1486 
   1487 /*
   1488  * Register read/write functions.
   1489  * Other than CSR_{READ|WRITE}().
   1490  */
   1491 
   1492 #if 0 /* Not currently used */
   1493 static inline uint32_t
   1494 wm_io_read(struct wm_softc *sc, int reg)
   1495 {
   1496 
   1497 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1498 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1499 }
   1500 #endif
   1501 
   1502 static inline void
   1503 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1504 {
   1505 
   1506 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1507 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1508 }
   1509 
   1510 static inline void
   1511 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1512     uint32_t data)
   1513 {
   1514 	uint32_t regval;
   1515 	int i;
   1516 
   1517 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1518 
   1519 	CSR_WRITE(sc, reg, regval);
   1520 
   1521 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1522 		delay(5);
   1523 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1524 			break;
   1525 	}
   1526 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1527 		aprint_error("%s: WARNING:"
   1528 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1529 		    device_xname(sc->sc_dev), reg);
   1530 	}
   1531 }
   1532 
   1533 static inline void
   1534 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1535 {
   1536 	wa->wa_low = htole32(v & 0xffffffffU);
   1537 	if (sizeof(bus_addr_t) == 8)
   1538 		wa->wa_high = htole32((uint64_t) v >> 32);
   1539 	else
   1540 		wa->wa_high = 0;
   1541 }
   1542 
   1543 /*
   1544  * Descriptor sync/init functions.
   1545  */
   1546 static inline void
   1547 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1548 {
   1549 	struct wm_softc *sc = txq->txq_sc;
   1550 
   1551 	/* If it will wrap around, sync to the end of the ring. */
   1552 	if ((start + num) > WM_NTXDESC(txq)) {
   1553 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1554 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1555 		    (WM_NTXDESC(txq) - start), ops);
   1556 		num -= (WM_NTXDESC(txq) - start);
   1557 		start = 0;
   1558 	}
   1559 
   1560 	/* Now sync whatever is left. */
   1561 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1562 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1563 }
   1564 
   1565 static inline void
   1566 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1567 {
   1568 	struct wm_softc *sc = rxq->rxq_sc;
   1569 
   1570 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1571 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1572 }
   1573 
   1574 static inline void
   1575 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1576 {
   1577 	struct wm_softc *sc = rxq->rxq_sc;
   1578 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1579 	struct mbuf *m = rxs->rxs_mbuf;
   1580 
   1581 	/*
   1582 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1583 	 * so that the payload after the Ethernet header is aligned
   1584 	 * to a 4-byte boundary.
   1585 
   1586 	 * XXX BRAINDAMAGE ALERT!
   1587 	 * The stupid chip uses the same size for every buffer, which
   1588 	 * is set in the Receive Control register.  We are using the 2K
   1589 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1590 	 * reason, we can't "scoot" packets longer than the standard
   1591 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1592 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1593 	 * the upper layer copy the headers.
   1594 	 */
   1595 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1596 
   1597 	if (sc->sc_type == WM_T_82574) {
   1598 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1599 		rxd->erx_data.erxd_addr =
   1600 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1601 		rxd->erx_data.erxd_dd = 0;
   1602 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1603 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1604 
   1605 		rxd->nqrx_data.nrxd_paddr =
   1606 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1607 		/* Currently, split header is not supported. */
   1608 		rxd->nqrx_data.nrxd_haddr = 0;
   1609 	} else {
   1610 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1611 
   1612 		wm_set_dma_addr(&rxd->wrx_addr,
   1613 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1614 		rxd->wrx_len = 0;
   1615 		rxd->wrx_cksum = 0;
   1616 		rxd->wrx_status = 0;
   1617 		rxd->wrx_errors = 0;
   1618 		rxd->wrx_special = 0;
   1619 	}
   1620 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1621 
   1622 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1623 }
   1624 
   1625 /*
   1626  * Device driver interface functions and commonly used functions.
   1627  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1628  */
   1629 
   1630 /* Lookup supported device table */
   1631 static const struct wm_product *
   1632 wm_lookup(const struct pci_attach_args *pa)
   1633 {
   1634 	const struct wm_product *wmp;
   1635 
   1636 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1637 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1638 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1639 			return wmp;
   1640 	}
   1641 	return NULL;
   1642 }
   1643 
   1644 /* The match function (ca_match) */
   1645 static int
   1646 wm_match(device_t parent, cfdata_t cf, void *aux)
   1647 {
   1648 	struct pci_attach_args *pa = aux;
   1649 
   1650 	if (wm_lookup(pa) != NULL)
   1651 		return 1;
   1652 
   1653 	return 0;
   1654 }
   1655 
   1656 /* The attach function (ca_attach) */
   1657 static void
   1658 wm_attach(device_t parent, device_t self, void *aux)
   1659 {
   1660 	struct wm_softc *sc = device_private(self);
   1661 	struct pci_attach_args *pa = aux;
   1662 	prop_dictionary_t dict;
   1663 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1664 	pci_chipset_tag_t pc = pa->pa_pc;
   1665 	int counts[PCI_INTR_TYPE_SIZE];
   1666 	pci_intr_type_t max_type;
   1667 	const char *eetype, *xname;
   1668 	bus_space_tag_t memt;
   1669 	bus_space_handle_t memh;
   1670 	bus_size_t memsize;
   1671 	int memh_valid;
   1672 	int i, error;
   1673 	const struct wm_product *wmp;
   1674 	prop_data_t ea;
   1675 	prop_number_t pn;
   1676 	uint8_t enaddr[ETHER_ADDR_LEN];
   1677 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1678 	pcireg_t preg, memtype;
   1679 	uint16_t eeprom_data, apme_mask;
   1680 	bool force_clear_smbi;
   1681 	uint32_t link_mode;
   1682 	uint32_t reg;
   1683 
   1684 	sc->sc_dev = self;
   1685 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1686 	sc->sc_core_stopping = false;
   1687 
   1688 	wmp = wm_lookup(pa);
   1689 #ifdef DIAGNOSTIC
   1690 	if (wmp == NULL) {
   1691 		printf("\n");
   1692 		panic("wm_attach: impossible");
   1693 	}
   1694 #endif
   1695 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1696 
   1697 	sc->sc_pc = pa->pa_pc;
   1698 	sc->sc_pcitag = pa->pa_tag;
   1699 
   1700 	if (pci_dma64_available(pa))
   1701 		sc->sc_dmat = pa->pa_dmat64;
   1702 	else
   1703 		sc->sc_dmat = pa->pa_dmat;
   1704 
   1705 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1706 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1707 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1708 
   1709 	sc->sc_type = wmp->wmp_type;
   1710 
   1711 	/* Set default function pointers */
   1712 	sc->phy.acquire = wm_get_null;
   1713 	sc->phy.release = wm_put_null;
   1714 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1715 
   1716 	if (sc->sc_type < WM_T_82543) {
   1717 		if (sc->sc_rev < 2) {
   1718 			aprint_error_dev(sc->sc_dev,
   1719 			    "i82542 must be at least rev. 2\n");
   1720 			return;
   1721 		}
   1722 		if (sc->sc_rev < 3)
   1723 			sc->sc_type = WM_T_82542_2_0;
   1724 	}
   1725 
   1726 	/*
   1727 	 * Disable MSI for Errata:
   1728 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1729 	 *
   1730 	 *  82544: Errata 25
   1731 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1732 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1733 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1734 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1735 	 *
   1736 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1737 	 *
   1738 	 *  82571 & 82572: Errata 63
   1739 	 */
   1740 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1741 	    || (sc->sc_type == WM_T_82572))
   1742 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1743 
   1744 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1745 	    || (sc->sc_type == WM_T_82580)
   1746 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1747 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1748 		sc->sc_flags |= WM_F_NEWQUEUE;
   1749 
   1750 	/* Set device properties (mactype) */
   1751 	dict = device_properties(sc->sc_dev);
   1752 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1753 
   1754 	/*
   1755 	 * Map the device.  All devices support memory-mapped acccess,
   1756 	 * and it is really required for normal operation.
   1757 	 */
   1758 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1759 	switch (memtype) {
   1760 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1761 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1762 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1763 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1764 		break;
   1765 	default:
   1766 		memh_valid = 0;
   1767 		break;
   1768 	}
   1769 
   1770 	if (memh_valid) {
   1771 		sc->sc_st = memt;
   1772 		sc->sc_sh = memh;
   1773 		sc->sc_ss = memsize;
   1774 	} else {
   1775 		aprint_error_dev(sc->sc_dev,
   1776 		    "unable to map device registers\n");
   1777 		return;
   1778 	}
   1779 
   1780 	/*
   1781 	 * In addition, i82544 and later support I/O mapped indirect
   1782 	 * register access.  It is not desirable (nor supported in
   1783 	 * this driver) to use it for normal operation, though it is
   1784 	 * required to work around bugs in some chip versions.
   1785 	 */
   1786 	if (sc->sc_type >= WM_T_82544) {
   1787 		/* First we have to find the I/O BAR. */
   1788 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1789 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1790 			if (memtype == PCI_MAPREG_TYPE_IO)
   1791 				break;
   1792 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1793 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1794 				i += 4;	/* skip high bits, too */
   1795 		}
   1796 		if (i < PCI_MAPREG_END) {
   1797 			/*
   1798 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1799 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1800 			 * It's no problem because newer chips has no this
   1801 			 * bug.
   1802 			 *
   1803 			 * The i8254x doesn't apparently respond when the
   1804 			 * I/O BAR is 0, which looks somewhat like it's not
   1805 			 * been configured.
   1806 			 */
   1807 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1808 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1809 				aprint_error_dev(sc->sc_dev,
   1810 				    "WARNING: I/O BAR at zero.\n");
   1811 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1812 					0, &sc->sc_iot, &sc->sc_ioh,
   1813 					NULL, &sc->sc_ios) == 0) {
   1814 				sc->sc_flags |= WM_F_IOH_VALID;
   1815 			} else {
   1816 				aprint_error_dev(sc->sc_dev,
   1817 				    "WARNING: unable to map I/O space\n");
   1818 			}
   1819 		}
   1820 
   1821 	}
   1822 
   1823 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1824 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1825 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1826 	if (sc->sc_type < WM_T_82542_2_1)
   1827 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1828 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1829 
   1830 	/* power up chip */
   1831 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1832 	    NULL)) && error != EOPNOTSUPP) {
   1833 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1834 		return;
   1835 	}
   1836 
   1837 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1838 
   1839 	/* Allocation settings */
   1840 	max_type = PCI_INTR_TYPE_MSIX;
   1841 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1;
   1842 	counts[PCI_INTR_TYPE_MSI] = 1;
   1843 	counts[PCI_INTR_TYPE_INTX] = 1;
   1844 	/* overridden by disable flags */
   1845 	if (wm_disable_msi != 0) {
   1846 		counts[PCI_INTR_TYPE_MSI] = 0;
   1847 		if (wm_disable_msix != 0) {
   1848 			max_type = PCI_INTR_TYPE_INTX;
   1849 			counts[PCI_INTR_TYPE_MSIX] = 0;
   1850 		}
   1851 	} else if (wm_disable_msix != 0) {
   1852 		max_type = PCI_INTR_TYPE_MSI;
   1853 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1854 	}
   1855 
   1856 alloc_retry:
   1857 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1858 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1859 		return;
   1860 	}
   1861 
   1862 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1863 		error = wm_setup_msix(sc);
   1864 		if (error) {
   1865 			pci_intr_release(pc, sc->sc_intrs,
   1866 			    counts[PCI_INTR_TYPE_MSIX]);
   1867 
   1868 			/* Setup for MSI: Disable MSI-X */
   1869 			max_type = PCI_INTR_TYPE_MSI;
   1870 			counts[PCI_INTR_TYPE_MSI] = 1;
   1871 			counts[PCI_INTR_TYPE_INTX] = 1;
   1872 			goto alloc_retry;
   1873 		}
   1874 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1875 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1876 		error = wm_setup_legacy(sc);
   1877 		if (error) {
   1878 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1879 			    counts[PCI_INTR_TYPE_MSI]);
   1880 
   1881 			/* The next try is for INTx: Disable MSI */
   1882 			max_type = PCI_INTR_TYPE_INTX;
   1883 			counts[PCI_INTR_TYPE_INTX] = 1;
   1884 			goto alloc_retry;
   1885 		}
   1886 	} else {
   1887 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1888 		error = wm_setup_legacy(sc);
   1889 		if (error) {
   1890 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1891 			    counts[PCI_INTR_TYPE_INTX]);
   1892 			return;
   1893 		}
   1894 	}
   1895 
   1896 	/*
   1897 	 * Check the function ID (unit number of the chip).
   1898 	 */
   1899 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1900 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1901 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1902 	    || (sc->sc_type == WM_T_82580)
   1903 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1904 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1905 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1906 	else
   1907 		sc->sc_funcid = 0;
   1908 
   1909 	/*
   1910 	 * Determine a few things about the bus we're connected to.
   1911 	 */
   1912 	if (sc->sc_type < WM_T_82543) {
   1913 		/* We don't really know the bus characteristics here. */
   1914 		sc->sc_bus_speed = 33;
   1915 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1916 		/*
   1917 		 * CSA (Communication Streaming Architecture) is about as fast
   1918 		 * a 32-bit 66MHz PCI Bus.
   1919 		 */
   1920 		sc->sc_flags |= WM_F_CSA;
   1921 		sc->sc_bus_speed = 66;
   1922 		aprint_verbose_dev(sc->sc_dev,
   1923 		    "Communication Streaming Architecture\n");
   1924 		if (sc->sc_type == WM_T_82547) {
   1925 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1926 			callout_setfunc(&sc->sc_txfifo_ch,
   1927 					wm_82547_txfifo_stall, sc);
   1928 			aprint_verbose_dev(sc->sc_dev,
   1929 			    "using 82547 Tx FIFO stall work-around\n");
   1930 		}
   1931 	} else if (sc->sc_type >= WM_T_82571) {
   1932 		sc->sc_flags |= WM_F_PCIE;
   1933 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1934 		    && (sc->sc_type != WM_T_ICH10)
   1935 		    && (sc->sc_type != WM_T_PCH)
   1936 		    && (sc->sc_type != WM_T_PCH2)
   1937 		    && (sc->sc_type != WM_T_PCH_LPT)
   1938 		    && (sc->sc_type != WM_T_PCH_SPT)) {
   1939 			/* ICH* and PCH* have no PCIe capability registers */
   1940 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1941 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1942 				NULL) == 0)
   1943 				aprint_error_dev(sc->sc_dev,
   1944 				    "unable to find PCIe capability\n");
   1945 		}
   1946 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1947 	} else {
   1948 		reg = CSR_READ(sc, WMREG_STATUS);
   1949 		if (reg & STATUS_BUS64)
   1950 			sc->sc_flags |= WM_F_BUS64;
   1951 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1952 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1953 
   1954 			sc->sc_flags |= WM_F_PCIX;
   1955 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1956 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1957 				aprint_error_dev(sc->sc_dev,
   1958 				    "unable to find PCIX capability\n");
   1959 			else if (sc->sc_type != WM_T_82545_3 &&
   1960 				 sc->sc_type != WM_T_82546_3) {
   1961 				/*
   1962 				 * Work around a problem caused by the BIOS
   1963 				 * setting the max memory read byte count
   1964 				 * incorrectly.
   1965 				 */
   1966 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1967 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1968 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1969 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1970 
   1971 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1972 				    PCIX_CMD_BYTECNT_SHIFT;
   1973 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1974 				    PCIX_STATUS_MAXB_SHIFT;
   1975 				if (bytecnt > maxb) {
   1976 					aprint_verbose_dev(sc->sc_dev,
   1977 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1978 					    512 << bytecnt, 512 << maxb);
   1979 					pcix_cmd = (pcix_cmd &
   1980 					    ~PCIX_CMD_BYTECNT_MASK) |
   1981 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1982 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1983 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1984 					    pcix_cmd);
   1985 				}
   1986 			}
   1987 		}
   1988 		/*
   1989 		 * The quad port adapter is special; it has a PCIX-PCIX
   1990 		 * bridge on the board, and can run the secondary bus at
   1991 		 * a higher speed.
   1992 		 */
   1993 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   1994 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   1995 								      : 66;
   1996 		} else if (sc->sc_flags & WM_F_PCIX) {
   1997 			switch (reg & STATUS_PCIXSPD_MASK) {
   1998 			case STATUS_PCIXSPD_50_66:
   1999 				sc->sc_bus_speed = 66;
   2000 				break;
   2001 			case STATUS_PCIXSPD_66_100:
   2002 				sc->sc_bus_speed = 100;
   2003 				break;
   2004 			case STATUS_PCIXSPD_100_133:
   2005 				sc->sc_bus_speed = 133;
   2006 				break;
   2007 			default:
   2008 				aprint_error_dev(sc->sc_dev,
   2009 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2010 				    reg & STATUS_PCIXSPD_MASK);
   2011 				sc->sc_bus_speed = 66;
   2012 				break;
   2013 			}
   2014 		} else
   2015 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2016 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2017 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2018 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2019 	}
   2020 
   2021 	/* clear interesting stat counters */
   2022 	CSR_READ(sc, WMREG_COLC);
   2023 	CSR_READ(sc, WMREG_RXERRC);
   2024 
   2025 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2026 	    || (sc->sc_type >= WM_T_ICH8))
   2027 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2028 	if (sc->sc_type >= WM_T_ICH8)
   2029 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2030 
   2031 	/* Set PHY, NVM mutex related stuff */
   2032 	switch (sc->sc_type) {
   2033 	case WM_T_82542_2_0:
   2034 	case WM_T_82542_2_1:
   2035 	case WM_T_82543:
   2036 	case WM_T_82544:
   2037 		/* Microwire */
   2038 		sc->sc_nvm_wordsize = 64;
   2039 		sc->sc_nvm_addrbits = 6;
   2040 		break;
   2041 	case WM_T_82540:
   2042 	case WM_T_82545:
   2043 	case WM_T_82545_3:
   2044 	case WM_T_82546:
   2045 	case WM_T_82546_3:
   2046 		/* Microwire */
   2047 		reg = CSR_READ(sc, WMREG_EECD);
   2048 		if (reg & EECD_EE_SIZE) {
   2049 			sc->sc_nvm_wordsize = 256;
   2050 			sc->sc_nvm_addrbits = 8;
   2051 		} else {
   2052 			sc->sc_nvm_wordsize = 64;
   2053 			sc->sc_nvm_addrbits = 6;
   2054 		}
   2055 		sc->sc_flags |= WM_F_LOCK_EECD;
   2056 		break;
   2057 	case WM_T_82541:
   2058 	case WM_T_82541_2:
   2059 	case WM_T_82547:
   2060 	case WM_T_82547_2:
   2061 		sc->sc_flags |= WM_F_LOCK_EECD;
   2062 		reg = CSR_READ(sc, WMREG_EECD);
   2063 		if (reg & EECD_EE_TYPE) {
   2064 			/* SPI */
   2065 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2066 			wm_nvm_set_addrbits_size_eecd(sc);
   2067 		} else {
   2068 			/* Microwire */
   2069 			if ((reg & EECD_EE_ABITS) != 0) {
   2070 				sc->sc_nvm_wordsize = 256;
   2071 				sc->sc_nvm_addrbits = 8;
   2072 			} else {
   2073 				sc->sc_nvm_wordsize = 64;
   2074 				sc->sc_nvm_addrbits = 6;
   2075 			}
   2076 		}
   2077 		break;
   2078 	case WM_T_82571:
   2079 	case WM_T_82572:
   2080 		/* SPI */
   2081 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2082 		wm_nvm_set_addrbits_size_eecd(sc);
   2083 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
   2084 		sc->phy.acquire = wm_get_swsm_semaphore;
   2085 		sc->phy.release = wm_put_swsm_semaphore;
   2086 		break;
   2087 	case WM_T_82573:
   2088 	case WM_T_82574:
   2089 	case WM_T_82583:
   2090 		if (sc->sc_type == WM_T_82573) {
   2091 			sc->sc_flags |= WM_F_LOCK_SWSM;
   2092 			sc->phy.acquire = wm_get_swsm_semaphore;
   2093 			sc->phy.release = wm_put_swsm_semaphore;
   2094 		} else {
   2095 			sc->sc_flags |= WM_F_LOCK_EXTCNF;
   2096 			/* Both PHY and NVM use the same semaphore. */
   2097 			sc->phy.acquire
   2098 			    = wm_get_swfwhw_semaphore;
   2099 			sc->phy.release
   2100 			    = wm_put_swfwhw_semaphore;
   2101 		}
   2102 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2103 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2104 			sc->sc_nvm_wordsize = 2048;
   2105 		} else {
   2106 			/* SPI */
   2107 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2108 			wm_nvm_set_addrbits_size_eecd(sc);
   2109 		}
   2110 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2111 		break;
   2112 	case WM_T_82575:
   2113 	case WM_T_82576:
   2114 	case WM_T_82580:
   2115 	case WM_T_I350:
   2116 	case WM_T_I354:
   2117 	case WM_T_80003:
   2118 		/* SPI */
   2119 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2120 		wm_nvm_set_addrbits_size_eecd(sc);
   2121 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
   2122 		    | WM_F_LOCK_SWSM;
   2123 		sc->phy.acquire = wm_get_phy_82575;
   2124 		sc->phy.release = wm_put_phy_82575;
   2125 		break;
   2126 	case WM_T_ICH8:
   2127 	case WM_T_ICH9:
   2128 	case WM_T_ICH10:
   2129 	case WM_T_PCH:
   2130 	case WM_T_PCH2:
   2131 	case WM_T_PCH_LPT:
   2132 		/* FLASH */
   2133 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2134 		sc->sc_nvm_wordsize = 2048;
   2135 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2136 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2137 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2138 			aprint_error_dev(sc->sc_dev,
   2139 			    "can't map FLASH registers\n");
   2140 			goto out;
   2141 		}
   2142 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2143 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2144 		    ICH_FLASH_SECTOR_SIZE;
   2145 		sc->sc_ich8_flash_bank_size =
   2146 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2147 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2148 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2149 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2150 		sc->sc_flashreg_offset = 0;
   2151 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2152 		sc->phy.release = wm_put_swflag_ich8lan;
   2153 		break;
   2154 	case WM_T_PCH_SPT:
   2155 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2156 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2157 		sc->sc_flasht = sc->sc_st;
   2158 		sc->sc_flashh = sc->sc_sh;
   2159 		sc->sc_ich8_flash_base = 0;
   2160 		sc->sc_nvm_wordsize =
   2161 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2162 			* NVM_SIZE_MULTIPLIER;
   2163 		/* It is size in bytes, we want words */
   2164 		sc->sc_nvm_wordsize /= 2;
   2165 		/* assume 2 banks */
   2166 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2167 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2168 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2169 		sc->phy.release = wm_put_swflag_ich8lan;
   2170 		break;
   2171 	case WM_T_I210:
   2172 	case WM_T_I211:
   2173 		if (wm_nvm_get_flash_presence_i210(sc)) {
   2174 			wm_nvm_set_addrbits_size_eecd(sc);
   2175 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2176 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2177 		} else {
   2178 			sc->sc_nvm_wordsize = INVM_SIZE;
   2179 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2180 		}
   2181 		sc->sc_flags |= WM_F_LOCK_SWFW | WM_F_LOCK_SWSM;
   2182 		sc->phy.acquire = wm_get_phy_82575;
   2183 		sc->phy.release = wm_put_phy_82575;
   2184 		break;
   2185 	default:
   2186 		break;
   2187 	}
   2188 
   2189 	/* Reset the chip to a known state. */
   2190 	wm_reset(sc);
   2191 
   2192 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2193 	switch (sc->sc_type) {
   2194 	case WM_T_82571:
   2195 	case WM_T_82572:
   2196 		reg = CSR_READ(sc, WMREG_SWSM2);
   2197 		if ((reg & SWSM2_LOCK) == 0) {
   2198 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2199 			force_clear_smbi = true;
   2200 		} else
   2201 			force_clear_smbi = false;
   2202 		break;
   2203 	case WM_T_82573:
   2204 	case WM_T_82574:
   2205 	case WM_T_82583:
   2206 		force_clear_smbi = true;
   2207 		break;
   2208 	default:
   2209 		force_clear_smbi = false;
   2210 		break;
   2211 	}
   2212 	if (force_clear_smbi) {
   2213 		reg = CSR_READ(sc, WMREG_SWSM);
   2214 		if ((reg & SWSM_SMBI) != 0)
   2215 			aprint_error_dev(sc->sc_dev,
   2216 			    "Please update the Bootagent\n");
   2217 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2218 	}
   2219 
   2220 	/*
   2221 	 * Defer printing the EEPROM type until after verifying the checksum
   2222 	 * This allows the EEPROM type to be printed correctly in the case
   2223 	 * that no EEPROM is attached.
   2224 	 */
   2225 	/*
   2226 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2227 	 * this for later, so we can fail future reads from the EEPROM.
   2228 	 */
   2229 	if (wm_nvm_validate_checksum(sc)) {
   2230 		/*
   2231 		 * Read twice again because some PCI-e parts fail the
   2232 		 * first check due to the link being in sleep state.
   2233 		 */
   2234 		if (wm_nvm_validate_checksum(sc))
   2235 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2236 	}
   2237 
   2238 	/* Set device properties (macflags) */
   2239 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2240 
   2241 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2242 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2243 	else {
   2244 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2245 		    sc->sc_nvm_wordsize);
   2246 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2247 			aprint_verbose("iNVM");
   2248 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2249 			aprint_verbose("FLASH(HW)");
   2250 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2251 			aprint_verbose("FLASH");
   2252 		else {
   2253 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2254 				eetype = "SPI";
   2255 			else
   2256 				eetype = "MicroWire";
   2257 			aprint_verbose("(%d address bits) %s EEPROM",
   2258 			    sc->sc_nvm_addrbits, eetype);
   2259 		}
   2260 	}
   2261 	wm_nvm_version(sc);
   2262 	aprint_verbose("\n");
   2263 
   2264 	/* Check for I21[01] PLL workaround */
   2265 	if (sc->sc_type == WM_T_I210)
   2266 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2267 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2268 		/* NVM image release 3.25 has a workaround */
   2269 		if ((sc->sc_nvm_ver_major < 3)
   2270 		    || ((sc->sc_nvm_ver_major == 3)
   2271 			&& (sc->sc_nvm_ver_minor < 25))) {
   2272 			aprint_verbose_dev(sc->sc_dev,
   2273 			    "ROM image version %d.%d is older than 3.25\n",
   2274 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2275 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2276 		}
   2277 	}
   2278 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2279 		wm_pll_workaround_i210(sc);
   2280 
   2281 	wm_get_wakeup(sc);
   2282 
   2283 	/* Non-AMT based hardware can now take control from firmware */
   2284 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2285 		wm_get_hw_control(sc);
   2286 
   2287 	/*
   2288 	 * Read the Ethernet address from the EEPROM, if not first found
   2289 	 * in device properties.
   2290 	 */
   2291 	ea = prop_dictionary_get(dict, "mac-address");
   2292 	if (ea != NULL) {
   2293 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2294 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2295 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2296 	} else {
   2297 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2298 			aprint_error_dev(sc->sc_dev,
   2299 			    "unable to read Ethernet address\n");
   2300 			goto out;
   2301 		}
   2302 	}
   2303 
   2304 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2305 	    ether_sprintf(enaddr));
   2306 
   2307 	/*
   2308 	 * Read the config info from the EEPROM, and set up various
   2309 	 * bits in the control registers based on their contents.
   2310 	 */
   2311 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2312 	if (pn != NULL) {
   2313 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2314 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2315 	} else {
   2316 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2317 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2318 			goto out;
   2319 		}
   2320 	}
   2321 
   2322 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2323 	if (pn != NULL) {
   2324 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2325 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2326 	} else {
   2327 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2328 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2329 			goto out;
   2330 		}
   2331 	}
   2332 
   2333 	/* check for WM_F_WOL */
   2334 	switch (sc->sc_type) {
   2335 	case WM_T_82542_2_0:
   2336 	case WM_T_82542_2_1:
   2337 	case WM_T_82543:
   2338 		/* dummy? */
   2339 		eeprom_data = 0;
   2340 		apme_mask = NVM_CFG3_APME;
   2341 		break;
   2342 	case WM_T_82544:
   2343 		apme_mask = NVM_CFG2_82544_APM_EN;
   2344 		eeprom_data = cfg2;
   2345 		break;
   2346 	case WM_T_82546:
   2347 	case WM_T_82546_3:
   2348 	case WM_T_82571:
   2349 	case WM_T_82572:
   2350 	case WM_T_82573:
   2351 	case WM_T_82574:
   2352 	case WM_T_82583:
   2353 	case WM_T_80003:
   2354 	default:
   2355 		apme_mask = NVM_CFG3_APME;
   2356 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2357 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2358 		break;
   2359 	case WM_T_82575:
   2360 	case WM_T_82576:
   2361 	case WM_T_82580:
   2362 	case WM_T_I350:
   2363 	case WM_T_I354: /* XXX ok? */
   2364 	case WM_T_ICH8:
   2365 	case WM_T_ICH9:
   2366 	case WM_T_ICH10:
   2367 	case WM_T_PCH:
   2368 	case WM_T_PCH2:
   2369 	case WM_T_PCH_LPT:
   2370 	case WM_T_PCH_SPT:
   2371 		/* XXX The funcid should be checked on some devices */
   2372 		apme_mask = WUC_APME;
   2373 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2374 		break;
   2375 	}
   2376 
   2377 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2378 	if ((eeprom_data & apme_mask) != 0)
   2379 		sc->sc_flags |= WM_F_WOL;
   2380 #ifdef WM_DEBUG
   2381 	if ((sc->sc_flags & WM_F_WOL) != 0)
   2382 		printf("WOL\n");
   2383 #endif
   2384 
   2385 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2386 		/* Check NVM for autonegotiation */
   2387 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2388 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2389 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2390 		}
   2391 	}
   2392 
   2393 	/*
   2394 	 * XXX need special handling for some multiple port cards
   2395 	 * to disable a paticular port.
   2396 	 */
   2397 
   2398 	if (sc->sc_type >= WM_T_82544) {
   2399 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2400 		if (pn != NULL) {
   2401 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2402 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2403 		} else {
   2404 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2405 				aprint_error_dev(sc->sc_dev,
   2406 				    "unable to read SWDPIN\n");
   2407 				goto out;
   2408 			}
   2409 		}
   2410 	}
   2411 
   2412 	if (cfg1 & NVM_CFG1_ILOS)
   2413 		sc->sc_ctrl |= CTRL_ILOS;
   2414 
   2415 	/*
   2416 	 * XXX
   2417 	 * This code isn't correct because pin 2 and 3 are located
   2418 	 * in different position on newer chips. Check all datasheet.
   2419 	 *
   2420 	 * Until resolve this problem, check if a chip < 82580
   2421 	 */
   2422 	if (sc->sc_type <= WM_T_82580) {
   2423 		if (sc->sc_type >= WM_T_82544) {
   2424 			sc->sc_ctrl |=
   2425 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2426 			    CTRL_SWDPIO_SHIFT;
   2427 			sc->sc_ctrl |=
   2428 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2429 			    CTRL_SWDPINS_SHIFT;
   2430 		} else {
   2431 			sc->sc_ctrl |=
   2432 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2433 			    CTRL_SWDPIO_SHIFT;
   2434 		}
   2435 	}
   2436 
   2437 	/* XXX For other than 82580? */
   2438 	if (sc->sc_type == WM_T_82580) {
   2439 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2440 		if (nvmword & __BIT(13))
   2441 			sc->sc_ctrl |= CTRL_ILOS;
   2442 	}
   2443 
   2444 #if 0
   2445 	if (sc->sc_type >= WM_T_82544) {
   2446 		if (cfg1 & NVM_CFG1_IPS0)
   2447 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2448 		if (cfg1 & NVM_CFG1_IPS1)
   2449 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2450 		sc->sc_ctrl_ext |=
   2451 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2452 		    CTRL_EXT_SWDPIO_SHIFT;
   2453 		sc->sc_ctrl_ext |=
   2454 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2455 		    CTRL_EXT_SWDPINS_SHIFT;
   2456 	} else {
   2457 		sc->sc_ctrl_ext |=
   2458 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2459 		    CTRL_EXT_SWDPIO_SHIFT;
   2460 	}
   2461 #endif
   2462 
   2463 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2464 #if 0
   2465 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2466 #endif
   2467 
   2468 	if (sc->sc_type == WM_T_PCH) {
   2469 		uint16_t val;
   2470 
   2471 		/* Save the NVM K1 bit setting */
   2472 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2473 
   2474 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2475 			sc->sc_nvm_k1_enabled = 1;
   2476 		else
   2477 			sc->sc_nvm_k1_enabled = 0;
   2478 	}
   2479 
   2480 	/*
   2481 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
   2482 	 * media structures accordingly.
   2483 	 */
   2484 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2485 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2486 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2487 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
   2488 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2489 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
   2490 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2491 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2492 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2493 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2494 	    || (sc->sc_type ==WM_T_I211)) {
   2495 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2496 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2497 		switch (link_mode) {
   2498 		case CTRL_EXT_LINK_MODE_1000KX:
   2499 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2500 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2501 			break;
   2502 		case CTRL_EXT_LINK_MODE_SGMII:
   2503 			if (wm_sgmii_uses_mdio(sc)) {
   2504 				aprint_verbose_dev(sc->sc_dev,
   2505 				    "SGMII(MDIO)\n");
   2506 				sc->sc_flags |= WM_F_SGMII;
   2507 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2508 				break;
   2509 			}
   2510 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2511 			/*FALLTHROUGH*/
   2512 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2513 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2514 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2515 				if (link_mode
   2516 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2517 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2518 					sc->sc_flags |= WM_F_SGMII;
   2519 				} else {
   2520 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2521 					aprint_verbose_dev(sc->sc_dev,
   2522 					    "SERDES\n");
   2523 				}
   2524 				break;
   2525 			}
   2526 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2527 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2528 
   2529 			/* Change current link mode setting */
   2530 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2531 			switch (sc->sc_mediatype) {
   2532 			case WM_MEDIATYPE_COPPER:
   2533 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2534 				break;
   2535 			case WM_MEDIATYPE_SERDES:
   2536 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2537 				break;
   2538 			default:
   2539 				break;
   2540 			}
   2541 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2542 			break;
   2543 		case CTRL_EXT_LINK_MODE_GMII:
   2544 		default:
   2545 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2546 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2547 			break;
   2548 		}
   2549 
   2550 		reg &= ~CTRL_EXT_I2C_ENA;
   2551 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2552 			reg |= CTRL_EXT_I2C_ENA;
   2553 		else
   2554 			reg &= ~CTRL_EXT_I2C_ENA;
   2555 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2556 
   2557 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2558 			wm_gmii_mediainit(sc, wmp->wmp_product);
   2559 		else
   2560 			wm_tbi_mediainit(sc);
   2561 	} else if (sc->sc_type < WM_T_82543 ||
   2562 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2563 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2564 			aprint_error_dev(sc->sc_dev,
   2565 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2566 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2567 		}
   2568 		wm_tbi_mediainit(sc);
   2569 	} else {
   2570 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2571 			aprint_error_dev(sc->sc_dev,
   2572 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2573 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2574 		}
   2575 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2576 	}
   2577 
   2578 	ifp = &sc->sc_ethercom.ec_if;
   2579 	xname = device_xname(sc->sc_dev);
   2580 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2581 	ifp->if_softc = sc;
   2582 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2583 #ifdef WM_MPSAFE
   2584 	ifp->if_extflags = IFEF_START_MPSAFE;
   2585 #endif
   2586 	ifp->if_ioctl = wm_ioctl;
   2587 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2588 		ifp->if_start = wm_nq_start;
   2589 		/*
   2590 		 * When the number of CPUs is one and the controller can use
   2591 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2592 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2593 		 * and the other is used for link status changing.
   2594 		 * In this situation, wm_nq_transmit() is disadvantageous
   2595 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2596 		 */
   2597 		if (wm_is_using_multiqueue(sc))
   2598 			ifp->if_transmit = wm_nq_transmit;
   2599 	} else {
   2600 		ifp->if_start = wm_start;
   2601 		/*
   2602 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2603 		 */
   2604 		if (wm_is_using_multiqueue(sc))
   2605 			ifp->if_transmit = wm_transmit;
   2606 	}
   2607 	ifp->if_watchdog = wm_watchdog;
   2608 	ifp->if_init = wm_init;
   2609 	ifp->if_stop = wm_stop;
   2610 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2611 	IFQ_SET_READY(&ifp->if_snd);
   2612 
   2613 	/* Check for jumbo frame */
   2614 	switch (sc->sc_type) {
   2615 	case WM_T_82573:
   2616 		/* XXX limited to 9234 if ASPM is disabled */
   2617 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2618 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2619 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2620 		break;
   2621 	case WM_T_82571:
   2622 	case WM_T_82572:
   2623 	case WM_T_82574:
   2624 	case WM_T_82575:
   2625 	case WM_T_82576:
   2626 	case WM_T_82580:
   2627 	case WM_T_I350:
   2628 	case WM_T_I354: /* XXXX ok? */
   2629 	case WM_T_I210:
   2630 	case WM_T_I211:
   2631 	case WM_T_80003:
   2632 	case WM_T_ICH9:
   2633 	case WM_T_ICH10:
   2634 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2635 	case WM_T_PCH_LPT:
   2636 	case WM_T_PCH_SPT:
   2637 		/* XXX limited to 9234 */
   2638 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2639 		break;
   2640 	case WM_T_PCH:
   2641 		/* XXX limited to 4096 */
   2642 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2643 		break;
   2644 	case WM_T_82542_2_0:
   2645 	case WM_T_82542_2_1:
   2646 	case WM_T_82583:
   2647 	case WM_T_ICH8:
   2648 		/* No support for jumbo frame */
   2649 		break;
   2650 	default:
   2651 		/* ETHER_MAX_LEN_JUMBO */
   2652 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2653 		break;
   2654 	}
   2655 
   2656 	/* If we're a i82543 or greater, we can support VLANs. */
   2657 	if (sc->sc_type >= WM_T_82543)
   2658 		sc->sc_ethercom.ec_capabilities |=
   2659 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2660 
   2661 	/*
   2662 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2663 	 * on i82543 and later.
   2664 	 */
   2665 	if (sc->sc_type >= WM_T_82543) {
   2666 		ifp->if_capabilities |=
   2667 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2668 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2669 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2670 		    IFCAP_CSUM_TCPv6_Tx |
   2671 		    IFCAP_CSUM_UDPv6_Tx;
   2672 	}
   2673 
   2674 	/*
   2675 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2676 	 *
   2677 	 *	82541GI (8086:1076) ... no
   2678 	 *	82572EI (8086:10b9) ... yes
   2679 	 */
   2680 	if (sc->sc_type >= WM_T_82571) {
   2681 		ifp->if_capabilities |=
   2682 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2683 	}
   2684 
   2685 	/*
   2686 	 * If we're a i82544 or greater (except i82547), we can do
   2687 	 * TCP segmentation offload.
   2688 	 */
   2689 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2690 		ifp->if_capabilities |= IFCAP_TSOv4;
   2691 	}
   2692 
   2693 	if (sc->sc_type >= WM_T_82571) {
   2694 		ifp->if_capabilities |= IFCAP_TSOv6;
   2695 	}
   2696 
   2697 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2698 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2699 
   2700 #ifdef WM_MPSAFE
   2701 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2702 #else
   2703 	sc->sc_core_lock = NULL;
   2704 #endif
   2705 
   2706 	/* Attach the interface. */
   2707 	if_initialize(ifp);
   2708 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2709 	ether_ifattach(ifp, enaddr);
   2710 	if_register(ifp);
   2711 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2712 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2713 			  RND_FLAG_DEFAULT);
   2714 
   2715 #ifdef WM_EVENT_COUNTERS
   2716 	/* Attach event counters. */
   2717 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2718 	    NULL, xname, "linkintr");
   2719 
   2720 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2721 	    NULL, xname, "tx_xoff");
   2722 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2723 	    NULL, xname, "tx_xon");
   2724 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2725 	    NULL, xname, "rx_xoff");
   2726 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2727 	    NULL, xname, "rx_xon");
   2728 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2729 	    NULL, xname, "rx_macctl");
   2730 #endif /* WM_EVENT_COUNTERS */
   2731 
   2732 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2733 		pmf_class_network_register(self, ifp);
   2734 	else
   2735 		aprint_error_dev(self, "couldn't establish power handler\n");
   2736 
   2737 	sc->sc_flags |= WM_F_ATTACHED;
   2738  out:
   2739 	return;
   2740 }
   2741 
   2742 /* The detach function (ca_detach) */
   2743 static int
   2744 wm_detach(device_t self, int flags __unused)
   2745 {
   2746 	struct wm_softc *sc = device_private(self);
   2747 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2748 	int i;
   2749 
   2750 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2751 		return 0;
   2752 
   2753 	/* Stop the interface. Callouts are stopped in it. */
   2754 	wm_stop(ifp, 1);
   2755 
   2756 	pmf_device_deregister(self);
   2757 
   2758 #ifdef WM_EVENT_COUNTERS
   2759 	evcnt_detach(&sc->sc_ev_linkintr);
   2760 
   2761 	evcnt_detach(&sc->sc_ev_tx_xoff);
   2762 	evcnt_detach(&sc->sc_ev_tx_xon);
   2763 	evcnt_detach(&sc->sc_ev_rx_xoff);
   2764 	evcnt_detach(&sc->sc_ev_rx_xon);
   2765 	evcnt_detach(&sc->sc_ev_rx_macctl);
   2766 #endif /* WM_EVENT_COUNTERS */
   2767 
   2768 	/* Tell the firmware about the release */
   2769 	WM_CORE_LOCK(sc);
   2770 	wm_release_manageability(sc);
   2771 	wm_release_hw_control(sc);
   2772 	wm_enable_wakeup(sc);
   2773 	WM_CORE_UNLOCK(sc);
   2774 
   2775 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2776 
   2777 	/* Delete all remaining media. */
   2778 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2779 
   2780 	ether_ifdetach(ifp);
   2781 	if_detach(ifp);
   2782 	if_percpuq_destroy(sc->sc_ipq);
   2783 
   2784 	/* Unload RX dmamaps and free mbufs */
   2785 	for (i = 0; i < sc->sc_nqueues; i++) {
   2786 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2787 		mutex_enter(rxq->rxq_lock);
   2788 		wm_rxdrain(rxq);
   2789 		mutex_exit(rxq->rxq_lock);
   2790 	}
   2791 	/* Must unlock here */
   2792 
   2793 	/* Disestablish the interrupt handler */
   2794 	for (i = 0; i < sc->sc_nintrs; i++) {
   2795 		if (sc->sc_ihs[i] != NULL) {
   2796 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2797 			sc->sc_ihs[i] = NULL;
   2798 		}
   2799 	}
   2800 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2801 
   2802 	wm_free_txrx_queues(sc);
   2803 
   2804 	/* Unmap the registers */
   2805 	if (sc->sc_ss) {
   2806 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2807 		sc->sc_ss = 0;
   2808 	}
   2809 	if (sc->sc_ios) {
   2810 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2811 		sc->sc_ios = 0;
   2812 	}
   2813 	if (sc->sc_flashs) {
   2814 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2815 		sc->sc_flashs = 0;
   2816 	}
   2817 
   2818 	if (sc->sc_core_lock)
   2819 		mutex_obj_free(sc->sc_core_lock);
   2820 	if (sc->sc_ich_phymtx)
   2821 		mutex_obj_free(sc->sc_ich_phymtx);
   2822 	if (sc->sc_ich_nvmmtx)
   2823 		mutex_obj_free(sc->sc_ich_nvmmtx);
   2824 
   2825 	return 0;
   2826 }
   2827 
   2828 static bool
   2829 wm_suspend(device_t self, const pmf_qual_t *qual)
   2830 {
   2831 	struct wm_softc *sc = device_private(self);
   2832 
   2833 	wm_release_manageability(sc);
   2834 	wm_release_hw_control(sc);
   2835 	wm_enable_wakeup(sc);
   2836 
   2837 	return true;
   2838 }
   2839 
   2840 static bool
   2841 wm_resume(device_t self, const pmf_qual_t *qual)
   2842 {
   2843 	struct wm_softc *sc = device_private(self);
   2844 
   2845 	wm_init_manageability(sc);
   2846 
   2847 	return true;
   2848 }
   2849 
   2850 /*
   2851  * wm_watchdog:		[ifnet interface function]
   2852  *
   2853  *	Watchdog timer handler.
   2854  */
   2855 static void
   2856 wm_watchdog(struct ifnet *ifp)
   2857 {
   2858 	int qid;
   2859 	struct wm_softc *sc = ifp->if_softc;
   2860 
   2861 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2862 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2863 
   2864 		wm_watchdog_txq(ifp, txq);
   2865 	}
   2866 
   2867 	/* Reset the interface. */
   2868 	(void) wm_init(ifp);
   2869 
   2870 	/*
   2871 	 * There are still some upper layer processing which call
   2872 	 * ifp->if_start(). e.g. ALTQ or one CPU system
   2873 	 */
   2874 	/* Try to get more packets going. */
   2875 	ifp->if_start(ifp);
   2876 }
   2877 
   2878 static void
   2879 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
   2880 {
   2881 	struct wm_softc *sc = ifp->if_softc;
   2882 
   2883 	/*
   2884 	 * Since we're using delayed interrupts, sweep up
   2885 	 * before we report an error.
   2886 	 */
   2887 	mutex_enter(txq->txq_lock);
   2888 	wm_txeof(sc, txq);
   2889 	mutex_exit(txq->txq_lock);
   2890 
   2891 	if (txq->txq_free != WM_NTXDESC(txq)) {
   2892 #ifdef WM_DEBUG
   2893 		int i, j;
   2894 		struct wm_txsoft *txs;
   2895 #endif
   2896 		log(LOG_ERR,
   2897 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2898 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   2899 		    txq->txq_next);
   2900 		ifp->if_oerrors++;
   2901 #ifdef WM_DEBUG
   2902 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   2903 		    i = WM_NEXTTXS(txq, i)) {
   2904 		    txs = &txq->txq_soft[i];
   2905 		    printf("txs %d tx %d -> %d\n",
   2906 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2907 		    for (j = txs->txs_firstdesc; ;
   2908 			j = WM_NEXTTX(txq, j)) {
   2909 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2910 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   2911 			printf("\t %#08x%08x\n",
   2912 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   2913 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   2914 			if (j == txs->txs_lastdesc)
   2915 				break;
   2916 			}
   2917 		}
   2918 #endif
   2919 	}
   2920 }
   2921 
   2922 /*
   2923  * wm_tick:
   2924  *
   2925  *	One second timer, used to check link status, sweep up
   2926  *	completed transmit jobs, etc.
   2927  */
   2928 static void
   2929 wm_tick(void *arg)
   2930 {
   2931 	struct wm_softc *sc = arg;
   2932 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2933 #ifndef WM_MPSAFE
   2934 	int s = splnet();
   2935 #endif
   2936 
   2937 	WM_CORE_LOCK(sc);
   2938 
   2939 	if (sc->sc_core_stopping)
   2940 		goto out;
   2941 
   2942 	if (sc->sc_type >= WM_T_82542_2_1) {
   2943 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2944 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2945 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2946 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2947 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2948 	}
   2949 
   2950 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2951 	ifp->if_ierrors += 0ULL /* ensure quad_t */
   2952 	    + CSR_READ(sc, WMREG_CRCERRS)
   2953 	    + CSR_READ(sc, WMREG_ALGNERRC)
   2954 	    + CSR_READ(sc, WMREG_SYMERRC)
   2955 	    + CSR_READ(sc, WMREG_RXERRC)
   2956 	    + CSR_READ(sc, WMREG_SEC)
   2957 	    + CSR_READ(sc, WMREG_CEXTERR)
   2958 	    + CSR_READ(sc, WMREG_RLEC);
   2959 	/*
   2960 	 * WMREG_RNBC is incremented when there is no available buffers in host
   2961 	 * memory. It does not mean the number of dropped packet. Because
   2962 	 * ethernet controller can receive packets in such case if there is
   2963 	 * space in phy's FIFO.
   2964 	 *
   2965 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   2966 	 * own EVCNT instead of if_iqdrops.
   2967 	 */
   2968 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   2969 
   2970 	if (sc->sc_flags & WM_F_HAS_MII)
   2971 		mii_tick(&sc->sc_mii);
   2972 	else if ((sc->sc_type >= WM_T_82575)
   2973 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   2974 		wm_serdes_tick(sc);
   2975 	else
   2976 		wm_tbi_tick(sc);
   2977 
   2978 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2979 out:
   2980 	WM_CORE_UNLOCK(sc);
   2981 #ifndef WM_MPSAFE
   2982 	splx(s);
   2983 #endif
   2984 }
   2985 
   2986 static int
   2987 wm_ifflags_cb(struct ethercom *ec)
   2988 {
   2989 	struct ifnet *ifp = &ec->ec_if;
   2990 	struct wm_softc *sc = ifp->if_softc;
   2991 	int rc = 0;
   2992 
   2993 	WM_CORE_LOCK(sc);
   2994 
   2995 	int change = ifp->if_flags ^ sc->sc_if_flags;
   2996 	sc->sc_if_flags = ifp->if_flags;
   2997 
   2998 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   2999 		rc = ENETRESET;
   3000 		goto out;
   3001 	}
   3002 
   3003 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   3004 		wm_set_filter(sc);
   3005 
   3006 	wm_set_vlan(sc);
   3007 
   3008 out:
   3009 	WM_CORE_UNLOCK(sc);
   3010 
   3011 	return rc;
   3012 }
   3013 
   3014 /*
   3015  * wm_ioctl:		[ifnet interface function]
   3016  *
   3017  *	Handle control requests from the operator.
   3018  */
   3019 static int
   3020 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3021 {
   3022 	struct wm_softc *sc = ifp->if_softc;
   3023 	struct ifreq *ifr = (struct ifreq *) data;
   3024 	struct ifaddr *ifa = (struct ifaddr *)data;
   3025 	struct sockaddr_dl *sdl;
   3026 	int s, error;
   3027 
   3028 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3029 		device_xname(sc->sc_dev), __func__));
   3030 
   3031 #ifndef WM_MPSAFE
   3032 	s = splnet();
   3033 #endif
   3034 	switch (cmd) {
   3035 	case SIOCSIFMEDIA:
   3036 	case SIOCGIFMEDIA:
   3037 		WM_CORE_LOCK(sc);
   3038 		/* Flow control requires full-duplex mode. */
   3039 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3040 		    (ifr->ifr_media & IFM_FDX) == 0)
   3041 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3042 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3043 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3044 				/* We can do both TXPAUSE and RXPAUSE. */
   3045 				ifr->ifr_media |=
   3046 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3047 			}
   3048 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3049 		}
   3050 		WM_CORE_UNLOCK(sc);
   3051 #ifdef WM_MPSAFE
   3052 		s = splnet();
   3053 #endif
   3054 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3055 #ifdef WM_MPSAFE
   3056 		splx(s);
   3057 #endif
   3058 		break;
   3059 	case SIOCINITIFADDR:
   3060 		WM_CORE_LOCK(sc);
   3061 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3062 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3063 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3064 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3065 			/* unicast address is first multicast entry */
   3066 			wm_set_filter(sc);
   3067 			error = 0;
   3068 			WM_CORE_UNLOCK(sc);
   3069 			break;
   3070 		}
   3071 		WM_CORE_UNLOCK(sc);
   3072 		/*FALLTHROUGH*/
   3073 	default:
   3074 #ifdef WM_MPSAFE
   3075 		s = splnet();
   3076 #endif
   3077 		/* It may call wm_start, so unlock here */
   3078 		error = ether_ioctl(ifp, cmd, data);
   3079 #ifdef WM_MPSAFE
   3080 		splx(s);
   3081 #endif
   3082 		if (error != ENETRESET)
   3083 			break;
   3084 
   3085 		error = 0;
   3086 
   3087 		if (cmd == SIOCSIFCAP) {
   3088 			error = (*ifp->if_init)(ifp);
   3089 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3090 			;
   3091 		else if (ifp->if_flags & IFF_RUNNING) {
   3092 			/*
   3093 			 * Multicast list has changed; set the hardware filter
   3094 			 * accordingly.
   3095 			 */
   3096 			WM_CORE_LOCK(sc);
   3097 			wm_set_filter(sc);
   3098 			WM_CORE_UNLOCK(sc);
   3099 		}
   3100 		break;
   3101 	}
   3102 
   3103 #ifndef WM_MPSAFE
   3104 	splx(s);
   3105 #endif
   3106 	return error;
   3107 }
   3108 
   3109 /* MAC address related */
   3110 
   3111 /*
   3112  * Get the offset of MAC address and return it.
   3113  * If error occured, use offset 0.
   3114  */
   3115 static uint16_t
   3116 wm_check_alt_mac_addr(struct wm_softc *sc)
   3117 {
   3118 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3119 	uint16_t offset = NVM_OFF_MACADDR;
   3120 
   3121 	/* Try to read alternative MAC address pointer */
   3122 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3123 		return 0;
   3124 
   3125 	/* Check pointer if it's valid or not. */
   3126 	if ((offset == 0x0000) || (offset == 0xffff))
   3127 		return 0;
   3128 
   3129 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3130 	/*
   3131 	 * Check whether alternative MAC address is valid or not.
   3132 	 * Some cards have non 0xffff pointer but those don't use
   3133 	 * alternative MAC address in reality.
   3134 	 *
   3135 	 * Check whether the broadcast bit is set or not.
   3136 	 */
   3137 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3138 		if (((myea[0] & 0xff) & 0x01) == 0)
   3139 			return offset; /* Found */
   3140 
   3141 	/* Not found */
   3142 	return 0;
   3143 }
   3144 
   3145 static int
   3146 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3147 {
   3148 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3149 	uint16_t offset = NVM_OFF_MACADDR;
   3150 	int do_invert = 0;
   3151 
   3152 	switch (sc->sc_type) {
   3153 	case WM_T_82580:
   3154 	case WM_T_I350:
   3155 	case WM_T_I354:
   3156 		/* EEPROM Top Level Partitioning */
   3157 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3158 		break;
   3159 	case WM_T_82571:
   3160 	case WM_T_82575:
   3161 	case WM_T_82576:
   3162 	case WM_T_80003:
   3163 	case WM_T_I210:
   3164 	case WM_T_I211:
   3165 		offset = wm_check_alt_mac_addr(sc);
   3166 		if (offset == 0)
   3167 			if ((sc->sc_funcid & 0x01) == 1)
   3168 				do_invert = 1;
   3169 		break;
   3170 	default:
   3171 		if ((sc->sc_funcid & 0x01) == 1)
   3172 			do_invert = 1;
   3173 		break;
   3174 	}
   3175 
   3176 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3177 		goto bad;
   3178 
   3179 	enaddr[0] = myea[0] & 0xff;
   3180 	enaddr[1] = myea[0] >> 8;
   3181 	enaddr[2] = myea[1] & 0xff;
   3182 	enaddr[3] = myea[1] >> 8;
   3183 	enaddr[4] = myea[2] & 0xff;
   3184 	enaddr[5] = myea[2] >> 8;
   3185 
   3186 	/*
   3187 	 * Toggle the LSB of the MAC address on the second port
   3188 	 * of some dual port cards.
   3189 	 */
   3190 	if (do_invert != 0)
   3191 		enaddr[5] ^= 1;
   3192 
   3193 	return 0;
   3194 
   3195  bad:
   3196 	return -1;
   3197 }
   3198 
   3199 /*
   3200  * wm_set_ral:
   3201  *
   3202  *	Set an entery in the receive address list.
   3203  */
   3204 static void
   3205 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3206 {
   3207 	uint32_t ral_lo, ral_hi;
   3208 
   3209 	if (enaddr != NULL) {
   3210 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3211 		    (enaddr[3] << 24);
   3212 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3213 		ral_hi |= RAL_AV;
   3214 	} else {
   3215 		ral_lo = 0;
   3216 		ral_hi = 0;
   3217 	}
   3218 
   3219 	if (sc->sc_type >= WM_T_82544) {
   3220 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
   3221 		    ral_lo);
   3222 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
   3223 		    ral_hi);
   3224 	} else {
   3225 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
   3226 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
   3227 	}
   3228 }
   3229 
   3230 /*
   3231  * wm_mchash:
   3232  *
   3233  *	Compute the hash of the multicast address for the 4096-bit
   3234  *	multicast filter.
   3235  */
   3236 static uint32_t
   3237 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3238 {
   3239 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3240 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3241 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3242 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3243 	uint32_t hash;
   3244 
   3245 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3246 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3247 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3248 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   3249 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3250 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3251 		return (hash & 0x3ff);
   3252 	}
   3253 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3254 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3255 
   3256 	return (hash & 0xfff);
   3257 }
   3258 
   3259 /*
   3260  * wm_set_filter:
   3261  *
   3262  *	Set up the receive filter.
   3263  */
   3264 static void
   3265 wm_set_filter(struct wm_softc *sc)
   3266 {
   3267 	struct ethercom *ec = &sc->sc_ethercom;
   3268 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3269 	struct ether_multi *enm;
   3270 	struct ether_multistep step;
   3271 	bus_addr_t mta_reg;
   3272 	uint32_t hash, reg, bit;
   3273 	int i, size, ralmax;
   3274 
   3275 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3276 		device_xname(sc->sc_dev), __func__));
   3277 
   3278 	if (sc->sc_type >= WM_T_82544)
   3279 		mta_reg = WMREG_CORDOVA_MTA;
   3280 	else
   3281 		mta_reg = WMREG_MTA;
   3282 
   3283 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3284 
   3285 	if (ifp->if_flags & IFF_BROADCAST)
   3286 		sc->sc_rctl |= RCTL_BAM;
   3287 	if (ifp->if_flags & IFF_PROMISC) {
   3288 		sc->sc_rctl |= RCTL_UPE;
   3289 		goto allmulti;
   3290 	}
   3291 
   3292 	/*
   3293 	 * Set the station address in the first RAL slot, and
   3294 	 * clear the remaining slots.
   3295 	 */
   3296 	if (sc->sc_type == WM_T_ICH8)
   3297 		size = WM_RAL_TABSIZE_ICH8 -1;
   3298 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3299 	    || (sc->sc_type == WM_T_PCH))
   3300 		size = WM_RAL_TABSIZE_ICH8;
   3301 	else if (sc->sc_type == WM_T_PCH2)
   3302 		size = WM_RAL_TABSIZE_PCH2;
   3303 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
   3304 		size = WM_RAL_TABSIZE_PCH_LPT;
   3305 	else if (sc->sc_type == WM_T_82575)
   3306 		size = WM_RAL_TABSIZE_82575;
   3307 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3308 		size = WM_RAL_TABSIZE_82576;
   3309 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3310 		size = WM_RAL_TABSIZE_I350;
   3311 	else
   3312 		size = WM_RAL_TABSIZE;
   3313 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3314 
   3315 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   3316 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3317 		switch (i) {
   3318 		case 0:
   3319 			/* We can use all entries */
   3320 			ralmax = size;
   3321 			break;
   3322 		case 1:
   3323 			/* Only RAR[0] */
   3324 			ralmax = 1;
   3325 			break;
   3326 		default:
   3327 			/* available SHRA + RAR[0] */
   3328 			ralmax = i + 1;
   3329 		}
   3330 	} else
   3331 		ralmax = size;
   3332 	for (i = 1; i < size; i++) {
   3333 		if (i < ralmax)
   3334 			wm_set_ral(sc, NULL, i);
   3335 	}
   3336 
   3337 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3338 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3339 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3340 	    || (sc->sc_type == WM_T_PCH_SPT))
   3341 		size = WM_ICH8_MC_TABSIZE;
   3342 	else
   3343 		size = WM_MC_TABSIZE;
   3344 	/* Clear out the multicast table. */
   3345 	for (i = 0; i < size; i++)
   3346 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3347 
   3348 	ETHER_LOCK(ec);
   3349 	ETHER_FIRST_MULTI(step, ec, enm);
   3350 	while (enm != NULL) {
   3351 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3352 			ETHER_UNLOCK(ec);
   3353 			/*
   3354 			 * We must listen to a range of multicast addresses.
   3355 			 * For now, just accept all multicasts, rather than
   3356 			 * trying to set only those filter bits needed to match
   3357 			 * the range.  (At this time, the only use of address
   3358 			 * ranges is for IP multicast routing, for which the
   3359 			 * range is big enough to require all bits set.)
   3360 			 */
   3361 			goto allmulti;
   3362 		}
   3363 
   3364 		hash = wm_mchash(sc, enm->enm_addrlo);
   3365 
   3366 		reg = (hash >> 5);
   3367 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3368 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3369 		    || (sc->sc_type == WM_T_PCH2)
   3370 		    || (sc->sc_type == WM_T_PCH_LPT)
   3371 		    || (sc->sc_type == WM_T_PCH_SPT))
   3372 			reg &= 0x1f;
   3373 		else
   3374 			reg &= 0x7f;
   3375 		bit = hash & 0x1f;
   3376 
   3377 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3378 		hash |= 1U << bit;
   3379 
   3380 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3381 			/*
   3382 			 * 82544 Errata 9: Certain register cannot be written
   3383 			 * with particular alignments in PCI-X bus operation
   3384 			 * (FCAH, MTA and VFTA).
   3385 			 */
   3386 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3387 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3388 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3389 		} else
   3390 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3391 
   3392 		ETHER_NEXT_MULTI(step, enm);
   3393 	}
   3394 	ETHER_UNLOCK(ec);
   3395 
   3396 	ifp->if_flags &= ~IFF_ALLMULTI;
   3397 	goto setit;
   3398 
   3399  allmulti:
   3400 	ifp->if_flags |= IFF_ALLMULTI;
   3401 	sc->sc_rctl |= RCTL_MPE;
   3402 
   3403  setit:
   3404 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3405 }
   3406 
   3407 /* Reset and init related */
   3408 
   3409 static void
   3410 wm_set_vlan(struct wm_softc *sc)
   3411 {
   3412 
   3413 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3414 		device_xname(sc->sc_dev), __func__));
   3415 
   3416 	/* Deal with VLAN enables. */
   3417 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3418 		sc->sc_ctrl |= CTRL_VME;
   3419 	else
   3420 		sc->sc_ctrl &= ~CTRL_VME;
   3421 
   3422 	/* Write the control registers. */
   3423 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3424 }
   3425 
   3426 static void
   3427 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3428 {
   3429 	uint32_t gcr;
   3430 	pcireg_t ctrl2;
   3431 
   3432 	gcr = CSR_READ(sc, WMREG_GCR);
   3433 
   3434 	/* Only take action if timeout value is defaulted to 0 */
   3435 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3436 		goto out;
   3437 
   3438 	if ((gcr & GCR_CAP_VER2) == 0) {
   3439 		gcr |= GCR_CMPL_TMOUT_10MS;
   3440 		goto out;
   3441 	}
   3442 
   3443 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3444 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3445 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3446 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3447 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3448 
   3449 out:
   3450 	/* Disable completion timeout resend */
   3451 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3452 
   3453 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3454 }
   3455 
   3456 void
   3457 wm_get_auto_rd_done(struct wm_softc *sc)
   3458 {
   3459 	int i;
   3460 
   3461 	/* wait for eeprom to reload */
   3462 	switch (sc->sc_type) {
   3463 	case WM_T_82571:
   3464 	case WM_T_82572:
   3465 	case WM_T_82573:
   3466 	case WM_T_82574:
   3467 	case WM_T_82583:
   3468 	case WM_T_82575:
   3469 	case WM_T_82576:
   3470 	case WM_T_82580:
   3471 	case WM_T_I350:
   3472 	case WM_T_I354:
   3473 	case WM_T_I210:
   3474 	case WM_T_I211:
   3475 	case WM_T_80003:
   3476 	case WM_T_ICH8:
   3477 	case WM_T_ICH9:
   3478 		for (i = 0; i < 10; i++) {
   3479 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3480 				break;
   3481 			delay(1000);
   3482 		}
   3483 		if (i == 10) {
   3484 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3485 			    "complete\n", device_xname(sc->sc_dev));
   3486 		}
   3487 		break;
   3488 	default:
   3489 		break;
   3490 	}
   3491 }
   3492 
   3493 void
   3494 wm_lan_init_done(struct wm_softc *sc)
   3495 {
   3496 	uint32_t reg = 0;
   3497 	int i;
   3498 
   3499 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3500 		device_xname(sc->sc_dev), __func__));
   3501 
   3502 	/* Wait for eeprom to reload */
   3503 	switch (sc->sc_type) {
   3504 	case WM_T_ICH10:
   3505 	case WM_T_PCH:
   3506 	case WM_T_PCH2:
   3507 	case WM_T_PCH_LPT:
   3508 	case WM_T_PCH_SPT:
   3509 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3510 			reg = CSR_READ(sc, WMREG_STATUS);
   3511 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3512 				break;
   3513 			delay(100);
   3514 		}
   3515 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3516 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3517 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3518 		}
   3519 		break;
   3520 	default:
   3521 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3522 		    __func__);
   3523 		break;
   3524 	}
   3525 
   3526 	reg &= ~STATUS_LAN_INIT_DONE;
   3527 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3528 }
   3529 
   3530 void
   3531 wm_get_cfg_done(struct wm_softc *sc)
   3532 {
   3533 	int mask;
   3534 	uint32_t reg;
   3535 	int i;
   3536 
   3537 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3538 		device_xname(sc->sc_dev), __func__));
   3539 
   3540 	/* Wait for eeprom to reload */
   3541 	switch (sc->sc_type) {
   3542 	case WM_T_82542_2_0:
   3543 	case WM_T_82542_2_1:
   3544 		/* null */
   3545 		break;
   3546 	case WM_T_82543:
   3547 	case WM_T_82544:
   3548 	case WM_T_82540:
   3549 	case WM_T_82545:
   3550 	case WM_T_82545_3:
   3551 	case WM_T_82546:
   3552 	case WM_T_82546_3:
   3553 	case WM_T_82541:
   3554 	case WM_T_82541_2:
   3555 	case WM_T_82547:
   3556 	case WM_T_82547_2:
   3557 	case WM_T_82573:
   3558 	case WM_T_82574:
   3559 	case WM_T_82583:
   3560 		/* generic */
   3561 		delay(10*1000);
   3562 		break;
   3563 	case WM_T_80003:
   3564 	case WM_T_82571:
   3565 	case WM_T_82572:
   3566 	case WM_T_82575:
   3567 	case WM_T_82576:
   3568 	case WM_T_82580:
   3569 	case WM_T_I350:
   3570 	case WM_T_I354:
   3571 	case WM_T_I210:
   3572 	case WM_T_I211:
   3573 		if (sc->sc_type == WM_T_82571) {
   3574 			/* Only 82571 shares port 0 */
   3575 			mask = EEMNGCTL_CFGDONE_0;
   3576 		} else
   3577 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3578 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3579 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3580 				break;
   3581 			delay(1000);
   3582 		}
   3583 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3584 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3585 				device_xname(sc->sc_dev), __func__));
   3586 		}
   3587 		break;
   3588 	case WM_T_ICH8:
   3589 	case WM_T_ICH9:
   3590 	case WM_T_ICH10:
   3591 	case WM_T_PCH:
   3592 	case WM_T_PCH2:
   3593 	case WM_T_PCH_LPT:
   3594 	case WM_T_PCH_SPT:
   3595 		delay(10*1000);
   3596 		if (sc->sc_type >= WM_T_ICH10)
   3597 			wm_lan_init_done(sc);
   3598 		else
   3599 			wm_get_auto_rd_done(sc);
   3600 
   3601 		reg = CSR_READ(sc, WMREG_STATUS);
   3602 		if ((reg & STATUS_PHYRA) != 0)
   3603 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3604 		break;
   3605 	default:
   3606 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3607 		    __func__);
   3608 		break;
   3609 	}
   3610 }
   3611 
   3612 /* Init hardware bits */
   3613 void
   3614 wm_initialize_hardware_bits(struct wm_softc *sc)
   3615 {
   3616 	uint32_t tarc0, tarc1, reg;
   3617 
   3618 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3619 		device_xname(sc->sc_dev), __func__));
   3620 
   3621 	/* For 82571 variant, 80003 and ICHs */
   3622 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3623 	    || (sc->sc_type >= WM_T_80003)) {
   3624 
   3625 		/* Transmit Descriptor Control 0 */
   3626 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3627 		reg |= TXDCTL_COUNT_DESC;
   3628 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3629 
   3630 		/* Transmit Descriptor Control 1 */
   3631 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3632 		reg |= TXDCTL_COUNT_DESC;
   3633 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3634 
   3635 		/* TARC0 */
   3636 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3637 		switch (sc->sc_type) {
   3638 		case WM_T_82571:
   3639 		case WM_T_82572:
   3640 		case WM_T_82573:
   3641 		case WM_T_82574:
   3642 		case WM_T_82583:
   3643 		case WM_T_80003:
   3644 			/* Clear bits 30..27 */
   3645 			tarc0 &= ~__BITS(30, 27);
   3646 			break;
   3647 		default:
   3648 			break;
   3649 		}
   3650 
   3651 		switch (sc->sc_type) {
   3652 		case WM_T_82571:
   3653 		case WM_T_82572:
   3654 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3655 
   3656 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3657 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3658 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3659 			/* 8257[12] Errata No.7 */
   3660 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3661 
   3662 			/* TARC1 bit 28 */
   3663 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3664 				tarc1 &= ~__BIT(28);
   3665 			else
   3666 				tarc1 |= __BIT(28);
   3667 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3668 
   3669 			/*
   3670 			 * 8257[12] Errata No.13
   3671 			 * Disable Dyamic Clock Gating.
   3672 			 */
   3673 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3674 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3675 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3676 			break;
   3677 		case WM_T_82573:
   3678 		case WM_T_82574:
   3679 		case WM_T_82583:
   3680 			if ((sc->sc_type == WM_T_82574)
   3681 			    || (sc->sc_type == WM_T_82583))
   3682 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3683 
   3684 			/* Extended Device Control */
   3685 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3686 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3687 			reg |= __BIT(22);	/* Set bit 22 */
   3688 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3689 
   3690 			/* Device Control */
   3691 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   3692 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3693 
   3694 			/* PCIe Control Register */
   3695 			/*
   3696 			 * 82573 Errata (unknown).
   3697 			 *
   3698 			 * 82574 Errata 25 and 82583 Errata 12
   3699 			 * "Dropped Rx Packets":
   3700 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   3701 			 */
   3702 			reg = CSR_READ(sc, WMREG_GCR);
   3703 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   3704 			CSR_WRITE(sc, WMREG_GCR, reg);
   3705 
   3706 			if ((sc->sc_type == WM_T_82574)
   3707 			    || (sc->sc_type == WM_T_82583)) {
   3708 				/*
   3709 				 * Document says this bit must be set for
   3710 				 * proper operation.
   3711 				 */
   3712 				reg = CSR_READ(sc, WMREG_GCR);
   3713 				reg |= __BIT(22);
   3714 				CSR_WRITE(sc, WMREG_GCR, reg);
   3715 
   3716 				/*
   3717 				 * Apply workaround for hardware errata
   3718 				 * documented in errata docs Fixes issue where
   3719 				 * some error prone or unreliable PCIe
   3720 				 * completions are occurring, particularly
   3721 				 * with ASPM enabled. Without fix, issue can
   3722 				 * cause Tx timeouts.
   3723 				 */
   3724 				reg = CSR_READ(sc, WMREG_GCR2);
   3725 				reg |= __BIT(0);
   3726 				CSR_WRITE(sc, WMREG_GCR2, reg);
   3727 			}
   3728 			break;
   3729 		case WM_T_80003:
   3730 			/* TARC0 */
   3731 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   3732 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3733 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   3734 
   3735 			/* TARC1 bit 28 */
   3736 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3737 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3738 				tarc1 &= ~__BIT(28);
   3739 			else
   3740 				tarc1 |= __BIT(28);
   3741 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3742 			break;
   3743 		case WM_T_ICH8:
   3744 		case WM_T_ICH9:
   3745 		case WM_T_ICH10:
   3746 		case WM_T_PCH:
   3747 		case WM_T_PCH2:
   3748 		case WM_T_PCH_LPT:
   3749 		case WM_T_PCH_SPT:
   3750 			/* TARC0 */
   3751 			if ((sc->sc_type == WM_T_ICH8)
   3752 			    || (sc->sc_type == WM_T_PCH_SPT)) {
   3753 				/* Set TARC0 bits 29 and 28 */
   3754 				tarc0 |= __BITS(29, 28);
   3755 			}
   3756 			/* Set TARC0 bits 23,24,26,27 */
   3757 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   3758 
   3759 			/* CTRL_EXT */
   3760 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3761 			reg |= __BIT(22);	/* Set bit 22 */
   3762 			/*
   3763 			 * Enable PHY low-power state when MAC is at D3
   3764 			 * w/o WoL
   3765 			 */
   3766 			if (sc->sc_type >= WM_T_PCH)
   3767 				reg |= CTRL_EXT_PHYPDEN;
   3768 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3769 
   3770 			/* TARC1 */
   3771 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3772 			/* bit 28 */
   3773 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3774 				tarc1 &= ~__BIT(28);
   3775 			else
   3776 				tarc1 |= __BIT(28);
   3777 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   3778 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3779 
   3780 			/* Device Status */
   3781 			if (sc->sc_type == WM_T_ICH8) {
   3782 				reg = CSR_READ(sc, WMREG_STATUS);
   3783 				reg &= ~__BIT(31);
   3784 				CSR_WRITE(sc, WMREG_STATUS, reg);
   3785 
   3786 			}
   3787 
   3788 			/* IOSFPC */
   3789 			if (sc->sc_type == WM_T_PCH_SPT) {
   3790 				reg = CSR_READ(sc, WMREG_IOSFPC);
   3791 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   3792 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   3793 			}
   3794 			/*
   3795 			 * Work-around descriptor data corruption issue during
   3796 			 * NFS v2 UDP traffic, just disable the NFS filtering
   3797 			 * capability.
   3798 			 */
   3799 			reg = CSR_READ(sc, WMREG_RFCTL);
   3800 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   3801 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3802 			break;
   3803 		default:
   3804 			break;
   3805 		}
   3806 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   3807 
   3808 		switch (sc->sc_type) {
   3809 		/*
   3810 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   3811 		 * Avoid RSS Hash Value bug.
   3812 		 */
   3813 		case WM_T_82571:
   3814 		case WM_T_82572:
   3815 		case WM_T_82573:
   3816 		case WM_T_80003:
   3817 		case WM_T_ICH8:
   3818 			reg = CSR_READ(sc, WMREG_RFCTL);
   3819 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   3820 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3821 			break;
   3822 		case WM_T_82574:
   3823 			/* use extened Rx descriptor. */
   3824 			reg = CSR_READ(sc, WMREG_RFCTL);
   3825 			reg |= WMREG_RFCTL_EXSTEN;
   3826 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3827 			break;
   3828 		default:
   3829 			break;
   3830 		}
   3831 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   3832 		/*
   3833 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   3834 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   3835 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   3836 		 * Correctly by the Device"
   3837 		 *
   3838 		 * I354(C2000) Errata AVR53:
   3839 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   3840 		 * Hang"
   3841 		 */
   3842 		reg = CSR_READ(sc, WMREG_RFCTL);
   3843 		reg |= WMREG_RFCTL_IPV6EXDIS;
   3844 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   3845 	}
   3846 }
   3847 
   3848 static uint32_t
   3849 wm_rxpbs_adjust_82580(uint32_t val)
   3850 {
   3851 	uint32_t rv = 0;
   3852 
   3853 	if (val < __arraycount(wm_82580_rxpbs_table))
   3854 		rv = wm_82580_rxpbs_table[val];
   3855 
   3856 	return rv;
   3857 }
   3858 
   3859 /*
   3860  * wm_reset_phy:
   3861  *
   3862  *	generic PHY reset function.
   3863  *	Same as e1000_phy_hw_reset_generic()
   3864  */
   3865 static void
   3866 wm_reset_phy(struct wm_softc *sc)
   3867 {
   3868 	uint32_t reg;
   3869 
   3870 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3871 		device_xname(sc->sc_dev), __func__));
   3872 	if (wm_phy_resetisblocked(sc))
   3873 		return;
   3874 
   3875 	sc->phy.acquire(sc);
   3876 
   3877 	reg = CSR_READ(sc, WMREG_CTRL);
   3878 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   3879 	CSR_WRITE_FLUSH(sc);
   3880 
   3881 	delay(sc->phy.reset_delay_us);
   3882 
   3883 	CSR_WRITE(sc, WMREG_CTRL, reg);
   3884 	CSR_WRITE_FLUSH(sc);
   3885 
   3886 	delay(150);
   3887 
   3888 	sc->phy.release(sc);
   3889 
   3890 	wm_get_cfg_done(sc);
   3891 }
   3892 
   3893 static void
   3894 wm_flush_desc_rings(struct wm_softc *sc)
   3895 {
   3896 	pcireg_t preg;
   3897 	uint32_t reg;
   3898 	int nexttx;
   3899 
   3900 	/* First, disable MULR fix in FEXTNVM11 */
   3901 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   3902 	reg |= FEXTNVM11_DIS_MULRFIX;
   3903 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   3904 
   3905 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   3906 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   3907 	if (((preg & DESCRING_STATUS_FLUSH_REQ) != 0) && (reg != 0)) {
   3908 		struct wm_txqueue *txq;
   3909 		wiseman_txdesc_t *txd;
   3910 
   3911 		/* TX */
   3912 		printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   3913 		    device_xname(sc->sc_dev), preg, reg);
   3914 		reg = CSR_READ(sc, WMREG_TCTL);
   3915 		CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   3916 
   3917 		txq = &sc->sc_queue[0].wmq_txq;
   3918 		nexttx = txq->txq_next;
   3919 		txd = &txq->txq_descs[nexttx];
   3920 		wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   3921 		txd->wtx_cmdlen = htole32(WTX_CMD_IFCS| 512);
   3922 		txd->wtx_fields.wtxu_status = 0;
   3923 		txd->wtx_fields.wtxu_options = 0;
   3924 		txd->wtx_fields.wtxu_vlan = 0;
   3925 
   3926 		bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   3927 			BUS_SPACE_BARRIER_WRITE);
   3928 
   3929 		txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   3930 		CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   3931 		bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   3932 			BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   3933 		delay(250);
   3934 	}
   3935 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   3936 	if (preg & DESCRING_STATUS_FLUSH_REQ) {
   3937 		uint32_t rctl;
   3938 
   3939 		/* RX */
   3940 		printf("%s: Need RX flush (reg = %08x)\n",
   3941 		    device_xname(sc->sc_dev), preg);
   3942 		rctl = CSR_READ(sc, WMREG_RCTL);
   3943 		CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   3944 		CSR_WRITE_FLUSH(sc);
   3945 		delay(150);
   3946 
   3947 		reg = CSR_READ(sc, WMREG_RXDCTL(0));
   3948 		/* zero the lower 14 bits (prefetch and host thresholds) */
   3949 		reg &= 0xffffc000;
   3950 		/*
   3951 		 * update thresholds: prefetch threshold to 31, host threshold
   3952 		 * to 1 and make sure the granularity is "descriptors" and not
   3953 		 * "cache lines"
   3954 		 */
   3955 		reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   3956 		CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   3957 
   3958 		/*
   3959 		 * momentarily enable the RX ring for the changes to take
   3960 		 * effect
   3961 		 */
   3962 		CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   3963 		CSR_WRITE_FLUSH(sc);
   3964 		delay(150);
   3965 		CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   3966 	}
   3967 }
   3968 
   3969 /*
   3970  * wm_reset:
   3971  *
   3972  *	Reset the i82542 chip.
   3973  */
   3974 static void
   3975 wm_reset(struct wm_softc *sc)
   3976 {
   3977 	int phy_reset = 0;
   3978 	int i, error = 0;
   3979 	uint32_t reg;
   3980 
   3981 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3982 		device_xname(sc->sc_dev), __func__));
   3983 	KASSERT(sc->sc_type != 0);
   3984 
   3985 	/*
   3986 	 * Allocate on-chip memory according to the MTU size.
   3987 	 * The Packet Buffer Allocation register must be written
   3988 	 * before the chip is reset.
   3989 	 */
   3990 	switch (sc->sc_type) {
   3991 	case WM_T_82547:
   3992 	case WM_T_82547_2:
   3993 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3994 		    PBA_22K : PBA_30K;
   3995 		for (i = 0; i < sc->sc_nqueues; i++) {
   3996 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3997 			txq->txq_fifo_head = 0;
   3998 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   3999 			txq->txq_fifo_size =
   4000 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4001 			txq->txq_fifo_stall = 0;
   4002 		}
   4003 		break;
   4004 	case WM_T_82571:
   4005 	case WM_T_82572:
   4006 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4007 	case WM_T_80003:
   4008 		sc->sc_pba = PBA_32K;
   4009 		break;
   4010 	case WM_T_82573:
   4011 		sc->sc_pba = PBA_12K;
   4012 		break;
   4013 	case WM_T_82574:
   4014 	case WM_T_82583:
   4015 		sc->sc_pba = PBA_20K;
   4016 		break;
   4017 	case WM_T_82576:
   4018 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4019 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4020 		break;
   4021 	case WM_T_82580:
   4022 	case WM_T_I350:
   4023 	case WM_T_I354:
   4024 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4025 		break;
   4026 	case WM_T_I210:
   4027 	case WM_T_I211:
   4028 		sc->sc_pba = PBA_34K;
   4029 		break;
   4030 	case WM_T_ICH8:
   4031 		/* Workaround for a bit corruption issue in FIFO memory */
   4032 		sc->sc_pba = PBA_8K;
   4033 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4034 		break;
   4035 	case WM_T_ICH9:
   4036 	case WM_T_ICH10:
   4037 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4038 		    PBA_14K : PBA_10K;
   4039 		break;
   4040 	case WM_T_PCH:
   4041 	case WM_T_PCH2:
   4042 	case WM_T_PCH_LPT:
   4043 	case WM_T_PCH_SPT:
   4044 		sc->sc_pba = PBA_26K;
   4045 		break;
   4046 	default:
   4047 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4048 		    PBA_40K : PBA_48K;
   4049 		break;
   4050 	}
   4051 	/*
   4052 	 * Only old or non-multiqueue devices have the PBA register
   4053 	 * XXX Need special handling for 82575.
   4054 	 */
   4055 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4056 	    || (sc->sc_type == WM_T_82575))
   4057 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4058 
   4059 	/* Prevent the PCI-E bus from sticking */
   4060 	if (sc->sc_flags & WM_F_PCIE) {
   4061 		int timeout = 800;
   4062 
   4063 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4064 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4065 
   4066 		while (timeout--) {
   4067 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4068 			    == 0)
   4069 				break;
   4070 			delay(100);
   4071 		}
   4072 	}
   4073 
   4074 	/* Set the completion timeout for interface */
   4075 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4076 	    || (sc->sc_type == WM_T_82580)
   4077 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4078 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4079 		wm_set_pcie_completion_timeout(sc);
   4080 
   4081 	/* Clear interrupt */
   4082 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4083 	if (wm_is_using_msix(sc)) {
   4084 		if (sc->sc_type != WM_T_82574) {
   4085 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4086 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4087 		} else {
   4088 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4089 		}
   4090 	}
   4091 
   4092 	/* Stop the transmit and receive processes. */
   4093 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4094 	sc->sc_rctl &= ~RCTL_EN;
   4095 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4096 	CSR_WRITE_FLUSH(sc);
   4097 
   4098 	/* XXX set_tbi_sbp_82543() */
   4099 
   4100 	delay(10*1000);
   4101 
   4102 	/* Must acquire the MDIO ownership before MAC reset */
   4103 	switch (sc->sc_type) {
   4104 	case WM_T_82573:
   4105 	case WM_T_82574:
   4106 	case WM_T_82583:
   4107 		error = wm_get_hw_semaphore_82573(sc);
   4108 		break;
   4109 	default:
   4110 		break;
   4111 	}
   4112 
   4113 	/*
   4114 	 * 82541 Errata 29? & 82547 Errata 28?
   4115 	 * See also the description about PHY_RST bit in CTRL register
   4116 	 * in 8254x_GBe_SDM.pdf.
   4117 	 */
   4118 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4119 		CSR_WRITE(sc, WMREG_CTRL,
   4120 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4121 		CSR_WRITE_FLUSH(sc);
   4122 		delay(5000);
   4123 	}
   4124 
   4125 	switch (sc->sc_type) {
   4126 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4127 	case WM_T_82541:
   4128 	case WM_T_82541_2:
   4129 	case WM_T_82547:
   4130 	case WM_T_82547_2:
   4131 		/*
   4132 		 * On some chipsets, a reset through a memory-mapped write
   4133 		 * cycle can cause the chip to reset before completing the
   4134 		 * write cycle.  This causes major headache that can be
   4135 		 * avoided by issuing the reset via indirect register writes
   4136 		 * through I/O space.
   4137 		 *
   4138 		 * So, if we successfully mapped the I/O BAR at attach time,
   4139 		 * use that.  Otherwise, try our luck with a memory-mapped
   4140 		 * reset.
   4141 		 */
   4142 		if (sc->sc_flags & WM_F_IOH_VALID)
   4143 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4144 		else
   4145 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4146 		break;
   4147 	case WM_T_82545_3:
   4148 	case WM_T_82546_3:
   4149 		/* Use the shadow control register on these chips. */
   4150 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4151 		break;
   4152 	case WM_T_80003:
   4153 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4154 		sc->phy.acquire(sc);
   4155 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4156 		sc->phy.release(sc);
   4157 		break;
   4158 	case WM_T_ICH8:
   4159 	case WM_T_ICH9:
   4160 	case WM_T_ICH10:
   4161 	case WM_T_PCH:
   4162 	case WM_T_PCH2:
   4163 	case WM_T_PCH_LPT:
   4164 	case WM_T_PCH_SPT:
   4165 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4166 		if (wm_phy_resetisblocked(sc) == false) {
   4167 			/*
   4168 			 * Gate automatic PHY configuration by hardware on
   4169 			 * non-managed 82579
   4170 			 */
   4171 			if ((sc->sc_type == WM_T_PCH2)
   4172 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4173 				== 0))
   4174 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4175 
   4176 			reg |= CTRL_PHY_RESET;
   4177 			phy_reset = 1;
   4178 		} else
   4179 			printf("XXX reset is blocked!!!\n");
   4180 		sc->phy.acquire(sc);
   4181 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4182 		/* Don't insert a completion barrier when reset */
   4183 		delay(20*1000);
   4184 		mutex_exit(sc->sc_ich_phymtx);
   4185 		break;
   4186 	case WM_T_82580:
   4187 	case WM_T_I350:
   4188 	case WM_T_I354:
   4189 	case WM_T_I210:
   4190 	case WM_T_I211:
   4191 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4192 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4193 			CSR_WRITE_FLUSH(sc);
   4194 		delay(5000);
   4195 		break;
   4196 	case WM_T_82542_2_0:
   4197 	case WM_T_82542_2_1:
   4198 	case WM_T_82543:
   4199 	case WM_T_82540:
   4200 	case WM_T_82545:
   4201 	case WM_T_82546:
   4202 	case WM_T_82571:
   4203 	case WM_T_82572:
   4204 	case WM_T_82573:
   4205 	case WM_T_82574:
   4206 	case WM_T_82575:
   4207 	case WM_T_82576:
   4208 	case WM_T_82583:
   4209 	default:
   4210 		/* Everything else can safely use the documented method. */
   4211 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4212 		break;
   4213 	}
   4214 
   4215 	/* Must release the MDIO ownership after MAC reset */
   4216 	switch (sc->sc_type) {
   4217 	case WM_T_82573:
   4218 	case WM_T_82574:
   4219 	case WM_T_82583:
   4220 		if (error == 0)
   4221 			wm_put_hw_semaphore_82573(sc);
   4222 		break;
   4223 	default:
   4224 		break;
   4225 	}
   4226 
   4227 	if (phy_reset != 0)
   4228 		wm_get_cfg_done(sc);
   4229 
   4230 	/* reload EEPROM */
   4231 	switch (sc->sc_type) {
   4232 	case WM_T_82542_2_0:
   4233 	case WM_T_82542_2_1:
   4234 	case WM_T_82543:
   4235 	case WM_T_82544:
   4236 		delay(10);
   4237 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4238 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4239 		CSR_WRITE_FLUSH(sc);
   4240 		delay(2000);
   4241 		break;
   4242 	case WM_T_82540:
   4243 	case WM_T_82545:
   4244 	case WM_T_82545_3:
   4245 	case WM_T_82546:
   4246 	case WM_T_82546_3:
   4247 		delay(5*1000);
   4248 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4249 		break;
   4250 	case WM_T_82541:
   4251 	case WM_T_82541_2:
   4252 	case WM_T_82547:
   4253 	case WM_T_82547_2:
   4254 		delay(20000);
   4255 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4256 		break;
   4257 	case WM_T_82571:
   4258 	case WM_T_82572:
   4259 	case WM_T_82573:
   4260 	case WM_T_82574:
   4261 	case WM_T_82583:
   4262 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4263 			delay(10);
   4264 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4265 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4266 			CSR_WRITE_FLUSH(sc);
   4267 		}
   4268 		/* check EECD_EE_AUTORD */
   4269 		wm_get_auto_rd_done(sc);
   4270 		/*
   4271 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4272 		 * is set.
   4273 		 */
   4274 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4275 		    || (sc->sc_type == WM_T_82583))
   4276 			delay(25*1000);
   4277 		break;
   4278 	case WM_T_82575:
   4279 	case WM_T_82576:
   4280 	case WM_T_82580:
   4281 	case WM_T_I350:
   4282 	case WM_T_I354:
   4283 	case WM_T_I210:
   4284 	case WM_T_I211:
   4285 	case WM_T_80003:
   4286 		/* check EECD_EE_AUTORD */
   4287 		wm_get_auto_rd_done(sc);
   4288 		break;
   4289 	case WM_T_ICH8:
   4290 	case WM_T_ICH9:
   4291 	case WM_T_ICH10:
   4292 	case WM_T_PCH:
   4293 	case WM_T_PCH2:
   4294 	case WM_T_PCH_LPT:
   4295 	case WM_T_PCH_SPT:
   4296 		break;
   4297 	default:
   4298 		panic("%s: unknown type\n", __func__);
   4299 	}
   4300 
   4301 	/* Check whether EEPROM is present or not */
   4302 	switch (sc->sc_type) {
   4303 	case WM_T_82575:
   4304 	case WM_T_82576:
   4305 	case WM_T_82580:
   4306 	case WM_T_I350:
   4307 	case WM_T_I354:
   4308 	case WM_T_ICH8:
   4309 	case WM_T_ICH9:
   4310 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4311 			/* Not found */
   4312 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4313 			if (sc->sc_type == WM_T_82575)
   4314 				wm_reset_init_script_82575(sc);
   4315 		}
   4316 		break;
   4317 	default:
   4318 		break;
   4319 	}
   4320 
   4321 	if ((sc->sc_type == WM_T_82580)
   4322 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4323 		/* clear global device reset status bit */
   4324 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4325 	}
   4326 
   4327 	/* Clear any pending interrupt events. */
   4328 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4329 	reg = CSR_READ(sc, WMREG_ICR);
   4330 	if (wm_is_using_msix(sc)) {
   4331 		if (sc->sc_type != WM_T_82574) {
   4332 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4333 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4334 		} else
   4335 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4336 	}
   4337 
   4338 	/* reload sc_ctrl */
   4339 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4340 
   4341 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4342 		wm_set_eee_i350(sc);
   4343 
   4344 	/* Clear the host wakeup bit after lcd reset */
   4345 	if (sc->sc_type >= WM_T_PCH) {
   4346 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   4347 		    BM_PORT_GEN_CFG);
   4348 		reg &= ~BM_WUC_HOST_WU_BIT;
   4349 		wm_gmii_hv_writereg(sc->sc_dev, 2,
   4350 		    BM_PORT_GEN_CFG, reg);
   4351 	}
   4352 
   4353 	/*
   4354 	 * For PCH, this write will make sure that any noise will be detected
   4355 	 * as a CRC error and be dropped rather than show up as a bad packet
   4356 	 * to the DMA engine
   4357 	 */
   4358 	if (sc->sc_type == WM_T_PCH)
   4359 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4360 
   4361 	if (sc->sc_type >= WM_T_82544)
   4362 		CSR_WRITE(sc, WMREG_WUC, 0);
   4363 
   4364 	wm_reset_mdicnfg_82580(sc);
   4365 
   4366 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4367 		wm_pll_workaround_i210(sc);
   4368 }
   4369 
   4370 /*
   4371  * wm_add_rxbuf:
   4372  *
   4373  *	Add a receive buffer to the indiciated descriptor.
   4374  */
   4375 static int
   4376 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4377 {
   4378 	struct wm_softc *sc = rxq->rxq_sc;
   4379 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4380 	struct mbuf *m;
   4381 	int error;
   4382 
   4383 	KASSERT(mutex_owned(rxq->rxq_lock));
   4384 
   4385 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4386 	if (m == NULL)
   4387 		return ENOBUFS;
   4388 
   4389 	MCLGET(m, M_DONTWAIT);
   4390 	if ((m->m_flags & M_EXT) == 0) {
   4391 		m_freem(m);
   4392 		return ENOBUFS;
   4393 	}
   4394 
   4395 	if (rxs->rxs_mbuf != NULL)
   4396 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4397 
   4398 	rxs->rxs_mbuf = m;
   4399 
   4400 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4401 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4402 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4403 	if (error) {
   4404 		/* XXX XXX XXX */
   4405 		aprint_error_dev(sc->sc_dev,
   4406 		    "unable to load rx DMA map %d, error = %d\n",
   4407 		    idx, error);
   4408 		panic("wm_add_rxbuf");
   4409 	}
   4410 
   4411 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4412 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4413 
   4414 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4415 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4416 			wm_init_rxdesc(rxq, idx);
   4417 	} else
   4418 		wm_init_rxdesc(rxq, idx);
   4419 
   4420 	return 0;
   4421 }
   4422 
   4423 /*
   4424  * wm_rxdrain:
   4425  *
   4426  *	Drain the receive queue.
   4427  */
   4428 static void
   4429 wm_rxdrain(struct wm_rxqueue *rxq)
   4430 {
   4431 	struct wm_softc *sc = rxq->rxq_sc;
   4432 	struct wm_rxsoft *rxs;
   4433 	int i;
   4434 
   4435 	KASSERT(mutex_owned(rxq->rxq_lock));
   4436 
   4437 	for (i = 0; i < WM_NRXDESC; i++) {
   4438 		rxs = &rxq->rxq_soft[i];
   4439 		if (rxs->rxs_mbuf != NULL) {
   4440 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4441 			m_freem(rxs->rxs_mbuf);
   4442 			rxs->rxs_mbuf = NULL;
   4443 		}
   4444 	}
   4445 }
   4446 
   4447 
   4448 /*
   4449  * XXX copy from FreeBSD's sys/net/rss_config.c
   4450  */
   4451 /*
   4452  * RSS secret key, intended to prevent attacks on load-balancing.  Its
   4453  * effectiveness may be limited by algorithm choice and available entropy
   4454  * during the boot.
   4455  *
   4456  * XXXRW: And that we don't randomize it yet!
   4457  *
   4458  * This is the default Microsoft RSS specification key which is also
   4459  * the Chelsio T5 firmware default key.
   4460  */
   4461 #define RSS_KEYSIZE 40
   4462 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
   4463 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
   4464 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
   4465 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
   4466 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
   4467 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
   4468 };
   4469 
   4470 /*
   4471  * Caller must pass an array of size sizeof(rss_key).
   4472  *
   4473  * XXX
   4474  * As if_ixgbe may use this function, this function should not be
   4475  * if_wm specific function.
   4476  */
   4477 static void
   4478 wm_rss_getkey(uint8_t *key)
   4479 {
   4480 
   4481 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
   4482 }
   4483 
   4484 /*
   4485  * Setup registers for RSS.
   4486  *
   4487  * XXX not yet VMDq support
   4488  */
   4489 static void
   4490 wm_init_rss(struct wm_softc *sc)
   4491 {
   4492 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4493 	int i;
   4494 
   4495 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
   4496 
   4497 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4498 		int qid, reta_ent;
   4499 
   4500 		qid  = i % sc->sc_nqueues;
   4501 		switch(sc->sc_type) {
   4502 		case WM_T_82574:
   4503 			reta_ent = __SHIFTIN(qid,
   4504 			    RETA_ENT_QINDEX_MASK_82574);
   4505 			break;
   4506 		case WM_T_82575:
   4507 			reta_ent = __SHIFTIN(qid,
   4508 			    RETA_ENT_QINDEX1_MASK_82575);
   4509 			break;
   4510 		default:
   4511 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4512 			break;
   4513 		}
   4514 
   4515 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4516 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4517 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4518 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4519 	}
   4520 
   4521 	wm_rss_getkey((uint8_t *)rss_key);
   4522 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4523 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4524 
   4525 	if (sc->sc_type == WM_T_82574)
   4526 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4527 	else
   4528 		mrqc = MRQC_ENABLE_RSS_MQ;
   4529 
   4530 	/*
   4531 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   4532 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   4533 	 */
   4534 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4535 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4536 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4537 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4538 
   4539 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4540 }
   4541 
   4542 /*
   4543  * Adjust TX and RX queue numbers which the system actulally uses.
   4544  *
   4545  * The numbers are affected by below parameters.
   4546  *     - The nubmer of hardware queues
   4547  *     - The number of MSI-X vectors (= "nvectors" argument)
   4548  *     - ncpu
   4549  */
   4550 static void
   4551 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4552 {
   4553 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4554 
   4555 	if (nvectors < 2) {
   4556 		sc->sc_nqueues = 1;
   4557 		return;
   4558 	}
   4559 
   4560 	switch(sc->sc_type) {
   4561 	case WM_T_82572:
   4562 		hw_ntxqueues = 2;
   4563 		hw_nrxqueues = 2;
   4564 		break;
   4565 	case WM_T_82574:
   4566 		hw_ntxqueues = 2;
   4567 		hw_nrxqueues = 2;
   4568 		break;
   4569 	case WM_T_82575:
   4570 		hw_ntxqueues = 4;
   4571 		hw_nrxqueues = 4;
   4572 		break;
   4573 	case WM_T_82576:
   4574 		hw_ntxqueues = 16;
   4575 		hw_nrxqueues = 16;
   4576 		break;
   4577 	case WM_T_82580:
   4578 	case WM_T_I350:
   4579 	case WM_T_I354:
   4580 		hw_ntxqueues = 8;
   4581 		hw_nrxqueues = 8;
   4582 		break;
   4583 	case WM_T_I210:
   4584 		hw_ntxqueues = 4;
   4585 		hw_nrxqueues = 4;
   4586 		break;
   4587 	case WM_T_I211:
   4588 		hw_ntxqueues = 2;
   4589 		hw_nrxqueues = 2;
   4590 		break;
   4591 		/*
   4592 		 * As below ethernet controllers does not support MSI-X,
   4593 		 * this driver let them not use multiqueue.
   4594 		 *     - WM_T_80003
   4595 		 *     - WM_T_ICH8
   4596 		 *     - WM_T_ICH9
   4597 		 *     - WM_T_ICH10
   4598 		 *     - WM_T_PCH
   4599 		 *     - WM_T_PCH2
   4600 		 *     - WM_T_PCH_LPT
   4601 		 */
   4602 	default:
   4603 		hw_ntxqueues = 1;
   4604 		hw_nrxqueues = 1;
   4605 		break;
   4606 	}
   4607 
   4608 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
   4609 
   4610 	/*
   4611 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   4612 	 * the number of queues used actually.
   4613 	 */
   4614 	if (nvectors < hw_nqueues + 1) {
   4615 		sc->sc_nqueues = nvectors - 1;
   4616 	} else {
   4617 		sc->sc_nqueues = hw_nqueues;
   4618 	}
   4619 
   4620 	/*
   4621 	 * As queues more then cpus cannot improve scaling, we limit
   4622 	 * the number of queues used actually.
   4623 	 */
   4624 	if (ncpu < sc->sc_nqueues)
   4625 		sc->sc_nqueues = ncpu;
   4626 }
   4627 
   4628 static inline bool
   4629 wm_is_using_msix(struct wm_softc *sc)
   4630 {
   4631 
   4632 	return (sc->sc_nintrs > 1);
   4633 }
   4634 
   4635 static inline bool
   4636 wm_is_using_multiqueue(struct wm_softc *sc)
   4637 {
   4638 
   4639 	return (sc->sc_nqueues > 1);
   4640 }
   4641 
   4642 static int
   4643 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   4644 {
   4645 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   4646 	wmq->wmq_id = qidx;
   4647 	wmq->wmq_intr_idx = intr_idx;
   4648 	wmq->wmq_si = softint_establish(SOFTINT_NET
   4649 #ifdef WM_MPSAFE
   4650 	    | SOFTINT_MPSAFE
   4651 #endif
   4652 	    , wm_handle_queue, wmq);
   4653 	if (wmq->wmq_si != NULL)
   4654 		return 0;
   4655 
   4656 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   4657 	    wmq->wmq_id);
   4658 
   4659 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   4660 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4661 	return ENOMEM;
   4662 }
   4663 
   4664 /*
   4665  * Both single interrupt MSI and INTx can use this function.
   4666  */
   4667 static int
   4668 wm_setup_legacy(struct wm_softc *sc)
   4669 {
   4670 	pci_chipset_tag_t pc = sc->sc_pc;
   4671 	const char *intrstr = NULL;
   4672 	char intrbuf[PCI_INTRSTR_LEN];
   4673 	int error;
   4674 
   4675 	error = wm_alloc_txrx_queues(sc);
   4676 	if (error) {
   4677 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4678 		    error);
   4679 		return ENOMEM;
   4680 	}
   4681 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   4682 	    sizeof(intrbuf));
   4683 #ifdef WM_MPSAFE
   4684 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   4685 #endif
   4686 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   4687 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   4688 	if (sc->sc_ihs[0] == NULL) {
   4689 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   4690 		    (pci_intr_type(pc, sc->sc_intrs[0])
   4691 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   4692 		return ENOMEM;
   4693 	}
   4694 
   4695 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   4696 	sc->sc_nintrs = 1;
   4697 
   4698 	return wm_softint_establish(sc, 0, 0);
   4699 }
   4700 
   4701 static int
   4702 wm_setup_msix(struct wm_softc *sc)
   4703 {
   4704 	void *vih;
   4705 	kcpuset_t *affinity;
   4706 	int qidx, error, intr_idx, txrx_established;
   4707 	pci_chipset_tag_t pc = sc->sc_pc;
   4708 	const char *intrstr = NULL;
   4709 	char intrbuf[PCI_INTRSTR_LEN];
   4710 	char intr_xname[INTRDEVNAMEBUF];
   4711 
   4712 	if (sc->sc_nqueues < ncpu) {
   4713 		/*
   4714 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   4715 		 * interrupts start from CPU#1.
   4716 		 */
   4717 		sc->sc_affinity_offset = 1;
   4718 	} else {
   4719 		/*
   4720 		 * In this case, this device use all CPUs. So, we unify
   4721 		 * affinitied cpu_index to msix vector number for readability.
   4722 		 */
   4723 		sc->sc_affinity_offset = 0;
   4724 	}
   4725 
   4726 	error = wm_alloc_txrx_queues(sc);
   4727 	if (error) {
   4728 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4729 		    error);
   4730 		return ENOMEM;
   4731 	}
   4732 
   4733 	kcpuset_create(&affinity, false);
   4734 	intr_idx = 0;
   4735 
   4736 	/*
   4737 	 * TX and RX
   4738 	 */
   4739 	txrx_established = 0;
   4740 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   4741 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4742 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   4743 
   4744 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4745 		    sizeof(intrbuf));
   4746 #ifdef WM_MPSAFE
   4747 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   4748 		    PCI_INTR_MPSAFE, true);
   4749 #endif
   4750 		memset(intr_xname, 0, sizeof(intr_xname));
   4751 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   4752 		    device_xname(sc->sc_dev), qidx);
   4753 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4754 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   4755 		if (vih == NULL) {
   4756 			aprint_error_dev(sc->sc_dev,
   4757 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   4758 			    intrstr ? " at " : "",
   4759 			    intrstr ? intrstr : "");
   4760 
   4761 			goto fail;
   4762 		}
   4763 		kcpuset_zero(affinity);
   4764 		/* Round-robin affinity */
   4765 		kcpuset_set(affinity, affinity_to);
   4766 		error = interrupt_distribute(vih, affinity, NULL);
   4767 		if (error == 0) {
   4768 			aprint_normal_dev(sc->sc_dev,
   4769 			    "for TX and RX interrupting at %s affinity to %u\n",
   4770 			    intrstr, affinity_to);
   4771 		} else {
   4772 			aprint_normal_dev(sc->sc_dev,
   4773 			    "for TX and RX interrupting at %s\n", intrstr);
   4774 		}
   4775 		sc->sc_ihs[intr_idx] = vih;
   4776 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   4777 			goto fail;
   4778 		txrx_established++;
   4779 		intr_idx++;
   4780 	}
   4781 
   4782 	/*
   4783 	 * LINK
   4784 	 */
   4785 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4786 	    sizeof(intrbuf));
   4787 #ifdef WM_MPSAFE
   4788 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   4789 #endif
   4790 	memset(intr_xname, 0, sizeof(intr_xname));
   4791 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   4792 	    device_xname(sc->sc_dev));
   4793 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4794 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   4795 	if (vih == NULL) {
   4796 		aprint_error_dev(sc->sc_dev,
   4797 		    "unable to establish MSI-X(for LINK)%s%s\n",
   4798 		    intrstr ? " at " : "",
   4799 		    intrstr ? intrstr : "");
   4800 
   4801 		goto fail;
   4802 	}
   4803 	/* keep default affinity to LINK interrupt */
   4804 	aprint_normal_dev(sc->sc_dev,
   4805 	    "for LINK interrupting at %s\n", intrstr);
   4806 	sc->sc_ihs[intr_idx] = vih;
   4807 	sc->sc_link_intr_idx = intr_idx;
   4808 
   4809 	sc->sc_nintrs = sc->sc_nqueues + 1;
   4810 	kcpuset_destroy(affinity);
   4811 	return 0;
   4812 
   4813  fail:
   4814 	for (qidx = 0; qidx < txrx_established; qidx++) {
   4815 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4816 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   4817 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4818 	}
   4819 
   4820 	kcpuset_destroy(affinity);
   4821 	return ENOMEM;
   4822 }
   4823 
   4824 static void
   4825 wm_turnon(struct wm_softc *sc)
   4826 {
   4827 	int i;
   4828 
   4829 	KASSERT(WM_CORE_LOCKED(sc));
   4830 
   4831 	/*
   4832 	 * must unset stopping flags in ascending order.
   4833 	 */
   4834 	for(i = 0; i < sc->sc_nqueues; i++) {
   4835 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4836 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   4837 
   4838 		mutex_enter(txq->txq_lock);
   4839 		txq->txq_stopping = false;
   4840 		mutex_exit(txq->txq_lock);
   4841 
   4842 		mutex_enter(rxq->rxq_lock);
   4843 		rxq->rxq_stopping = false;
   4844 		mutex_exit(rxq->rxq_lock);
   4845 	}
   4846 
   4847 	sc->sc_core_stopping = false;
   4848 }
   4849 
   4850 static void
   4851 wm_turnoff(struct wm_softc *sc)
   4852 {
   4853 	int i;
   4854 
   4855 	KASSERT(WM_CORE_LOCKED(sc));
   4856 
   4857 	sc->sc_core_stopping = true;
   4858 
   4859 	/*
   4860 	 * must set stopping flags in ascending order.
   4861 	 */
   4862 	for(i = 0; i < sc->sc_nqueues; i++) {
   4863 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   4864 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4865 
   4866 		mutex_enter(rxq->rxq_lock);
   4867 		rxq->rxq_stopping = true;
   4868 		mutex_exit(rxq->rxq_lock);
   4869 
   4870 		mutex_enter(txq->txq_lock);
   4871 		txq->txq_stopping = true;
   4872 		mutex_exit(txq->txq_lock);
   4873 	}
   4874 }
   4875 
   4876 /*
   4877  * write interrupt interval value to ITR or EITR
   4878  */
   4879 static void
   4880 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   4881 {
   4882 
   4883 	if (!wmq->wmq_set_itr)
   4884 		return;
   4885 
   4886 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4887 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   4888 
   4889 		/*
   4890 		 * 82575 doesn't have CNT_INGR field.
   4891 		 * So, overwrite counter field by software.
   4892 		 */
   4893 		if (sc->sc_type == WM_T_82575)
   4894 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   4895 		else
   4896 			eitr |= EITR_CNT_INGR;
   4897 
   4898 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   4899 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   4900 		/*
   4901 		 * 82574 has both ITR and EITR. SET EITR when we use
   4902 		 * the multi queue function with MSI-X.
   4903 		 */
   4904 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   4905 			    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   4906 	} else {
   4907 		KASSERT(wmq->wmq_id == 0);
   4908 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   4909 	}
   4910 
   4911 	wmq->wmq_set_itr = false;
   4912 }
   4913 
   4914 /*
   4915  * TODO
   4916  * Below dynamic calculation of itr is almost the same as linux igb,
   4917  * however it does not fit to wm(4). So, we will have been disable AIM
   4918  * until we will find appropriate calculation of itr.
   4919  */
   4920 /*
   4921  * calculate interrupt interval value to be going to write register in
   4922  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   4923  */
   4924 static void
   4925 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   4926 {
   4927 #ifdef NOTYET
   4928 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   4929 	struct wm_txqueue *txq = &wmq->wmq_txq;
   4930 	uint32_t avg_size = 0;
   4931 	uint32_t new_itr;
   4932 
   4933 	if (rxq->rxq_packets)
   4934 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   4935 	if (txq->txq_packets)
   4936 		avg_size = max(avg_size, txq->txq_bytes / txq->txq_packets);
   4937 
   4938 	if (avg_size == 0) {
   4939 		new_itr = 450; /* restore default value */
   4940 		goto out;
   4941 	}
   4942 
   4943 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   4944 	avg_size += 24;
   4945 
   4946 	/* Don't starve jumbo frames */
   4947 	avg_size = min(avg_size, 3000);
   4948 
   4949 	/* Give a little boost to mid-size frames */
   4950 	if ((avg_size > 300) && (avg_size < 1200))
   4951 		new_itr = avg_size / 3;
   4952 	else
   4953 		new_itr = avg_size / 2;
   4954 
   4955 out:
   4956 	/*
   4957 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   4958 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   4959 	 */
   4960 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   4961 		new_itr *= 4;
   4962 
   4963 	if (new_itr != wmq->wmq_itr) {
   4964 		wmq->wmq_itr = new_itr;
   4965 		wmq->wmq_set_itr = true;
   4966 	} else
   4967 		wmq->wmq_set_itr = false;
   4968 
   4969 	rxq->rxq_packets = 0;
   4970 	rxq->rxq_bytes = 0;
   4971 	txq->txq_packets = 0;
   4972 	txq->txq_bytes = 0;
   4973 #endif
   4974 }
   4975 
   4976 /*
   4977  * wm_init:		[ifnet interface function]
   4978  *
   4979  *	Initialize the interface.
   4980  */
   4981 static int
   4982 wm_init(struct ifnet *ifp)
   4983 {
   4984 	struct wm_softc *sc = ifp->if_softc;
   4985 	int ret;
   4986 
   4987 	WM_CORE_LOCK(sc);
   4988 	ret = wm_init_locked(ifp);
   4989 	WM_CORE_UNLOCK(sc);
   4990 
   4991 	return ret;
   4992 }
   4993 
   4994 static int
   4995 wm_init_locked(struct ifnet *ifp)
   4996 {
   4997 	struct wm_softc *sc = ifp->if_softc;
   4998 	int i, j, trynum, error = 0;
   4999 	uint32_t reg;
   5000 
   5001 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5002 		device_xname(sc->sc_dev), __func__));
   5003 	KASSERT(WM_CORE_LOCKED(sc));
   5004 
   5005 	/*
   5006 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5007 	 * There is a small but measurable benefit to avoiding the adjusment
   5008 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5009 	 * on such platforms.  One possibility is that the DMA itself is
   5010 	 * slightly more efficient if the front of the entire packet (instead
   5011 	 * of the front of the headers) is aligned.
   5012 	 *
   5013 	 * Note we must always set align_tweak to 0 if we are using
   5014 	 * jumbo frames.
   5015 	 */
   5016 #ifdef __NO_STRICT_ALIGNMENT
   5017 	sc->sc_align_tweak = 0;
   5018 #else
   5019 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5020 		sc->sc_align_tweak = 0;
   5021 	else
   5022 		sc->sc_align_tweak = 2;
   5023 #endif /* __NO_STRICT_ALIGNMENT */
   5024 
   5025 	/* Cancel any pending I/O. */
   5026 	wm_stop_locked(ifp, 0);
   5027 
   5028 	/* update statistics before reset */
   5029 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   5030 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   5031 
   5032 	/* PCH_SPT hardware workaround */
   5033 	if (sc->sc_type == WM_T_PCH_SPT)
   5034 		wm_flush_desc_rings(sc);
   5035 
   5036 	/* Reset the chip to a known state. */
   5037 	wm_reset(sc);
   5038 
   5039 	/* AMT based hardware can now take control from firmware */
   5040 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5041 		wm_get_hw_control(sc);
   5042 
   5043 	/* Init hardware bits */
   5044 	wm_initialize_hardware_bits(sc);
   5045 
   5046 	/* Reset the PHY. */
   5047 	if (sc->sc_flags & WM_F_HAS_MII)
   5048 		wm_gmii_reset(sc);
   5049 
   5050 	/* Calculate (E)ITR value */
   5051 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5052 		/*
   5053 		 * For NEWQUEUE's EITR (except for 82575).
   5054 		 * 82575's EITR should be set same throttling value as other
   5055 		 * old controllers' ITR because the interrupt/sec calculation
   5056 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5057 		 *
   5058 		 * 82574's EITR should be set same throttling value as ITR.
   5059 		 *
   5060 		 * For N interrupts/sec, set this value to:
   5061 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5062 		 */
   5063 		sc->sc_itr_init = 450;
   5064 	} else if (sc->sc_type >= WM_T_82543) {
   5065 		/*
   5066 		 * Set up the interrupt throttling register (units of 256ns)
   5067 		 * Note that a footnote in Intel's documentation says this
   5068 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5069 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5070 		 * that that is also true for the 1024ns units of the other
   5071 		 * interrupt-related timer registers -- so, really, we ought
   5072 		 * to divide this value by 4 when the link speed is low.
   5073 		 *
   5074 		 * XXX implement this division at link speed change!
   5075 		 */
   5076 
   5077 		/*
   5078 		 * For N interrupts/sec, set this value to:
   5079 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5080 		 * absolute and packet timer values to this value
   5081 		 * divided by 4 to get "simple timer" behavior.
   5082 		 */
   5083 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5084 	}
   5085 
   5086 	error = wm_init_txrx_queues(sc);
   5087 	if (error)
   5088 		goto out;
   5089 
   5090 	/*
   5091 	 * Clear out the VLAN table -- we don't use it (yet).
   5092 	 */
   5093 	CSR_WRITE(sc, WMREG_VET, 0);
   5094 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5095 		trynum = 10; /* Due to hw errata */
   5096 	else
   5097 		trynum = 1;
   5098 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5099 		for (j = 0; j < trynum; j++)
   5100 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5101 
   5102 	/*
   5103 	 * Set up flow-control parameters.
   5104 	 *
   5105 	 * XXX Values could probably stand some tuning.
   5106 	 */
   5107 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5108 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5109 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5110 	    && (sc->sc_type != WM_T_PCH_SPT)) {
   5111 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5112 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5113 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5114 	}
   5115 
   5116 	sc->sc_fcrtl = FCRTL_DFLT;
   5117 	if (sc->sc_type < WM_T_82543) {
   5118 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5119 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5120 	} else {
   5121 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5122 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5123 	}
   5124 
   5125 	if (sc->sc_type == WM_T_80003)
   5126 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5127 	else
   5128 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5129 
   5130 	/* Writes the control register. */
   5131 	wm_set_vlan(sc);
   5132 
   5133 	if (sc->sc_flags & WM_F_HAS_MII) {
   5134 		int val;
   5135 
   5136 		switch (sc->sc_type) {
   5137 		case WM_T_80003:
   5138 		case WM_T_ICH8:
   5139 		case WM_T_ICH9:
   5140 		case WM_T_ICH10:
   5141 		case WM_T_PCH:
   5142 		case WM_T_PCH2:
   5143 		case WM_T_PCH_LPT:
   5144 		case WM_T_PCH_SPT:
   5145 			/*
   5146 			 * Set the mac to wait the maximum time between each
   5147 			 * iteration and increase the max iterations when
   5148 			 * polling the phy; this fixes erroneous timeouts at
   5149 			 * 10Mbps.
   5150 			 */
   5151 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5152 			    0xFFFF);
   5153 			val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
   5154 			val |= 0x3F;
   5155 			wm_kmrn_writereg(sc,
   5156 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
   5157 			break;
   5158 		default:
   5159 			break;
   5160 		}
   5161 
   5162 		if (sc->sc_type == WM_T_80003) {
   5163 			val = CSR_READ(sc, WMREG_CTRL_EXT);
   5164 			val &= ~CTRL_EXT_LINK_MODE_MASK;
   5165 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   5166 
   5167 			/* Bypass RX and TX FIFO's */
   5168 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5169 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5170 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5171 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5172 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5173 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5174 		}
   5175 	}
   5176 #if 0
   5177 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5178 #endif
   5179 
   5180 	/* Set up checksum offload parameters. */
   5181 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5182 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5183 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5184 		reg |= RXCSUM_IPOFL;
   5185 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5186 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5187 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5188 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5189 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5190 
   5191 	/* Set registers about MSI-X */
   5192 	if (wm_is_using_msix(sc)) {
   5193 		uint32_t ivar;
   5194 		struct wm_queue *wmq;
   5195 		int qid, qintr_idx;
   5196 
   5197 		if (sc->sc_type == WM_T_82575) {
   5198 			/* Interrupt control */
   5199 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5200 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5201 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5202 
   5203 			/* TX and RX */
   5204 			for (i = 0; i < sc->sc_nqueues; i++) {
   5205 				wmq = &sc->sc_queue[i];
   5206 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5207 				    EITR_TX_QUEUE(wmq->wmq_id)
   5208 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5209 			}
   5210 			/* Link status */
   5211 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5212 			    EITR_OTHER);
   5213 		} else if (sc->sc_type == WM_T_82574) {
   5214 			/* Interrupt control */
   5215 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5216 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5217 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5218 
   5219 			/*
   5220 			 * workaround issue with spurious interrupts
   5221 			 * in MSI-X mode.
   5222 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5223 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5224 			 */
   5225 			reg = CSR_READ(sc, WMREG_RFCTL);
   5226 			reg |= WMREG_RFCTL_ACKDIS;
   5227 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5228 
   5229 			ivar = 0;
   5230 			/* TX and RX */
   5231 			for (i = 0; i < sc->sc_nqueues; i++) {
   5232 				wmq = &sc->sc_queue[i];
   5233 				qid = wmq->wmq_id;
   5234 				qintr_idx = wmq->wmq_intr_idx;
   5235 
   5236 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5237 				    IVAR_TX_MASK_Q_82574(qid));
   5238 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5239 				    IVAR_RX_MASK_Q_82574(qid));
   5240 			}
   5241 			/* Link status */
   5242 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5243 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5244 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5245 		} else {
   5246 			/* Interrupt control */
   5247 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5248 			    | GPIE_EIAME | GPIE_PBA);
   5249 
   5250 			switch (sc->sc_type) {
   5251 			case WM_T_82580:
   5252 			case WM_T_I350:
   5253 			case WM_T_I354:
   5254 			case WM_T_I210:
   5255 			case WM_T_I211:
   5256 				/* TX and RX */
   5257 				for (i = 0; i < sc->sc_nqueues; i++) {
   5258 					wmq = &sc->sc_queue[i];
   5259 					qid = wmq->wmq_id;
   5260 					qintr_idx = wmq->wmq_intr_idx;
   5261 
   5262 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5263 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5264 					ivar |= __SHIFTIN((qintr_idx
   5265 						| IVAR_VALID),
   5266 					    IVAR_TX_MASK_Q(qid));
   5267 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5268 					ivar |= __SHIFTIN((qintr_idx
   5269 						| IVAR_VALID),
   5270 					    IVAR_RX_MASK_Q(qid));
   5271 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5272 				}
   5273 				break;
   5274 			case WM_T_82576:
   5275 				/* TX and RX */
   5276 				for (i = 0; i < sc->sc_nqueues; i++) {
   5277 					wmq = &sc->sc_queue[i];
   5278 					qid = wmq->wmq_id;
   5279 					qintr_idx = wmq->wmq_intr_idx;
   5280 
   5281 					ivar = CSR_READ(sc,
   5282 					    WMREG_IVAR_Q_82576(qid));
   5283 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   5284 					ivar |= __SHIFTIN((qintr_idx
   5285 						| IVAR_VALID),
   5286 					    IVAR_TX_MASK_Q_82576(qid));
   5287 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   5288 					ivar |= __SHIFTIN((qintr_idx
   5289 						| IVAR_VALID),
   5290 					    IVAR_RX_MASK_Q_82576(qid));
   5291 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   5292 					    ivar);
   5293 				}
   5294 				break;
   5295 			default:
   5296 				break;
   5297 			}
   5298 
   5299 			/* Link status */
   5300 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   5301 			    IVAR_MISC_OTHER);
   5302 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   5303 		}
   5304 
   5305 		if (wm_is_using_multiqueue(sc)) {
   5306 			wm_init_rss(sc);
   5307 
   5308 			/*
   5309 			** NOTE: Receive Full-Packet Checksum Offload
   5310 			** is mutually exclusive with Multiqueue. However
   5311 			** this is not the same as TCP/IP checksums which
   5312 			** still work.
   5313 			*/
   5314 			reg = CSR_READ(sc, WMREG_RXCSUM);
   5315 			reg |= RXCSUM_PCSD;
   5316 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5317 		}
   5318 	}
   5319 
   5320 	/* Set up the interrupt registers. */
   5321 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5322 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   5323 	    ICR_RXO | ICR_RXT0;
   5324 	if (wm_is_using_msix(sc)) {
   5325 		uint32_t mask;
   5326 		struct wm_queue *wmq;
   5327 
   5328 		switch (sc->sc_type) {
   5329 		case WM_T_82574:
   5330 			mask = 0;
   5331 			for (i = 0; i < sc->sc_nqueues; i++) {
   5332 				wmq = &sc->sc_queue[i];
   5333 				mask |= ICR_TXQ(wmq->wmq_id);
   5334 				mask |= ICR_RXQ(wmq->wmq_id);
   5335 			}
   5336 			mask |= ICR_OTHER;
   5337 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   5338 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   5339 			break;
   5340 		default:
   5341 			if (sc->sc_type == WM_T_82575) {
   5342 				mask = 0;
   5343 				for (i = 0; i < sc->sc_nqueues; i++) {
   5344 					wmq = &sc->sc_queue[i];
   5345 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   5346 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   5347 				}
   5348 				mask |= EITR_OTHER;
   5349 			} else {
   5350 				mask = 0;
   5351 				for (i = 0; i < sc->sc_nqueues; i++) {
   5352 					wmq = &sc->sc_queue[i];
   5353 					mask |= 1 << wmq->wmq_intr_idx;
   5354 				}
   5355 				mask |= 1 << sc->sc_link_intr_idx;
   5356 			}
   5357 			CSR_WRITE(sc, WMREG_EIAC, mask);
   5358 			CSR_WRITE(sc, WMREG_EIAM, mask);
   5359 			CSR_WRITE(sc, WMREG_EIMS, mask);
   5360 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   5361 			break;
   5362 		}
   5363 	} else
   5364 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5365 
   5366 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   5367 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   5368 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   5369 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   5370 		reg = CSR_READ(sc, WMREG_KABGTXD);
   5371 		reg |= KABGTXD_BGSQLBIAS;
   5372 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   5373 	}
   5374 
   5375 	/* Set up the inter-packet gap. */
   5376 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   5377 
   5378 	if (sc->sc_type >= WM_T_82543) {
   5379 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5380 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   5381 			wm_itrs_writereg(sc, wmq);
   5382 		}
   5383 		/*
   5384 		 * Link interrupts occur much less than TX
   5385 		 * interrupts and RX interrupts. So, we don't
   5386 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   5387 		 * FreeBSD's if_igb.
   5388 		 */
   5389 	}
   5390 
   5391 	/* Set the VLAN ethernetype. */
   5392 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   5393 
   5394 	/*
   5395 	 * Set up the transmit control register; we start out with
   5396 	 * a collision distance suitable for FDX, but update it whe
   5397 	 * we resolve the media type.
   5398 	 */
   5399 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   5400 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   5401 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5402 	if (sc->sc_type >= WM_T_82571)
   5403 		sc->sc_tctl |= TCTL_MULR;
   5404 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5405 
   5406 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5407 		/* Write TDT after TCTL.EN is set. See the document. */
   5408 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   5409 	}
   5410 
   5411 	if (sc->sc_type == WM_T_80003) {
   5412 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   5413 		reg &= ~TCTL_EXT_GCEX_MASK;
   5414 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   5415 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   5416 	}
   5417 
   5418 	/* Set the media. */
   5419 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   5420 		goto out;
   5421 
   5422 	/* Configure for OS presence */
   5423 	wm_init_manageability(sc);
   5424 
   5425 	/*
   5426 	 * Set up the receive control register; we actually program
   5427 	 * the register when we set the receive filter.  Use multicast
   5428 	 * address offset type 0.
   5429 	 *
   5430 	 * Only the i82544 has the ability to strip the incoming
   5431 	 * CRC, so we don't enable that feature.
   5432 	 */
   5433 	sc->sc_mchash_type = 0;
   5434 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   5435 	    | RCTL_MO(sc->sc_mchash_type);
   5436 
   5437 	/*
   5438 	 * 82574 use one buffer extended Rx descriptor.
   5439 	 */
   5440 	if (sc->sc_type == WM_T_82574)
   5441 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   5442 
   5443 	/*
   5444 	 * The I350 has a bug where it always strips the CRC whether
   5445 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   5446 	 */
   5447 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5448 	    || (sc->sc_type == WM_T_I210))
   5449 		sc->sc_rctl |= RCTL_SECRC;
   5450 
   5451 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5452 	    && (ifp->if_mtu > ETHERMTU)) {
   5453 		sc->sc_rctl |= RCTL_LPE;
   5454 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5455 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5456 	}
   5457 
   5458 	if (MCLBYTES == 2048) {
   5459 		sc->sc_rctl |= RCTL_2k;
   5460 	} else {
   5461 		if (sc->sc_type >= WM_T_82543) {
   5462 			switch (MCLBYTES) {
   5463 			case 4096:
   5464 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5465 				break;
   5466 			case 8192:
   5467 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5468 				break;
   5469 			case 16384:
   5470 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5471 				break;
   5472 			default:
   5473 				panic("wm_init: MCLBYTES %d unsupported",
   5474 				    MCLBYTES);
   5475 				break;
   5476 			}
   5477 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   5478 	}
   5479 
   5480 	/* Set the receive filter. */
   5481 	wm_set_filter(sc);
   5482 
   5483 	/* Enable ECC */
   5484 	switch (sc->sc_type) {
   5485 	case WM_T_82571:
   5486 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5487 		reg |= PBA_ECC_CORR_EN;
   5488 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5489 		break;
   5490 	case WM_T_PCH_LPT:
   5491 	case WM_T_PCH_SPT:
   5492 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5493 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5494 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5495 
   5496 		sc->sc_ctrl |= CTRL_MEHE;
   5497 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5498 		break;
   5499 	default:
   5500 		break;
   5501 	}
   5502 
   5503 	/* On 575 and later set RDT only if RX enabled */
   5504 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5505 		int qidx;
   5506 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5507 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5508 			for (i = 0; i < WM_NRXDESC; i++) {
   5509 				mutex_enter(rxq->rxq_lock);
   5510 				wm_init_rxdesc(rxq, i);
   5511 				mutex_exit(rxq->rxq_lock);
   5512 
   5513 			}
   5514 		}
   5515 	}
   5516 
   5517 	wm_turnon(sc);
   5518 
   5519 	/* Start the one second link check clock. */
   5520 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5521 
   5522 	/* ...all done! */
   5523 	ifp->if_flags |= IFF_RUNNING;
   5524 	ifp->if_flags &= ~IFF_OACTIVE;
   5525 
   5526  out:
   5527 	sc->sc_if_flags = ifp->if_flags;
   5528 	if (error)
   5529 		log(LOG_ERR, "%s: interface not running\n",
   5530 		    device_xname(sc->sc_dev));
   5531 	return error;
   5532 }
   5533 
   5534 /*
   5535  * wm_stop:		[ifnet interface function]
   5536  *
   5537  *	Stop transmission on the interface.
   5538  */
   5539 static void
   5540 wm_stop(struct ifnet *ifp, int disable)
   5541 {
   5542 	struct wm_softc *sc = ifp->if_softc;
   5543 
   5544 	WM_CORE_LOCK(sc);
   5545 	wm_stop_locked(ifp, disable);
   5546 	WM_CORE_UNLOCK(sc);
   5547 }
   5548 
   5549 static void
   5550 wm_stop_locked(struct ifnet *ifp, int disable)
   5551 {
   5552 	struct wm_softc *sc = ifp->if_softc;
   5553 	struct wm_txsoft *txs;
   5554 	int i, qidx;
   5555 
   5556 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5557 		device_xname(sc->sc_dev), __func__));
   5558 	KASSERT(WM_CORE_LOCKED(sc));
   5559 
   5560 	wm_turnoff(sc);
   5561 
   5562 	/* Stop the one second clock. */
   5563 	callout_stop(&sc->sc_tick_ch);
   5564 
   5565 	/* Stop the 82547 Tx FIFO stall check timer. */
   5566 	if (sc->sc_type == WM_T_82547)
   5567 		callout_stop(&sc->sc_txfifo_ch);
   5568 
   5569 	if (sc->sc_flags & WM_F_HAS_MII) {
   5570 		/* Down the MII. */
   5571 		mii_down(&sc->sc_mii);
   5572 	} else {
   5573 #if 0
   5574 		/* Should we clear PHY's status properly? */
   5575 		wm_reset(sc);
   5576 #endif
   5577 	}
   5578 
   5579 	/* Stop the transmit and receive processes. */
   5580 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5581 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5582 	sc->sc_rctl &= ~RCTL_EN;
   5583 
   5584 	/*
   5585 	 * Clear the interrupt mask to ensure the device cannot assert its
   5586 	 * interrupt line.
   5587 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5588 	 * service any currently pending or shared interrupt.
   5589 	 */
   5590 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5591 	sc->sc_icr = 0;
   5592 	if (wm_is_using_msix(sc)) {
   5593 		if (sc->sc_type != WM_T_82574) {
   5594 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5595 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5596 		} else
   5597 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5598 	}
   5599 
   5600 	/* Release any queued transmit buffers. */
   5601 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5602 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5603 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5604 		mutex_enter(txq->txq_lock);
   5605 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5606 			txs = &txq->txq_soft[i];
   5607 			if (txs->txs_mbuf != NULL) {
   5608 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   5609 				m_freem(txs->txs_mbuf);
   5610 				txs->txs_mbuf = NULL;
   5611 			}
   5612 		}
   5613 		mutex_exit(txq->txq_lock);
   5614 	}
   5615 
   5616 	/* Mark the interface as down and cancel the watchdog timer. */
   5617 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5618 	ifp->if_timer = 0;
   5619 
   5620 	if (disable) {
   5621 		for (i = 0; i < sc->sc_nqueues; i++) {
   5622 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5623 			mutex_enter(rxq->rxq_lock);
   5624 			wm_rxdrain(rxq);
   5625 			mutex_exit(rxq->rxq_lock);
   5626 		}
   5627 	}
   5628 
   5629 #if 0 /* notyet */
   5630 	if (sc->sc_type >= WM_T_82544)
   5631 		CSR_WRITE(sc, WMREG_WUC, 0);
   5632 #endif
   5633 }
   5634 
   5635 static void
   5636 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   5637 {
   5638 	struct mbuf *m;
   5639 	int i;
   5640 
   5641 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   5642 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   5643 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   5644 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   5645 		    m->m_data, m->m_len, m->m_flags);
   5646 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   5647 	    i, i == 1 ? "" : "s");
   5648 }
   5649 
   5650 /*
   5651  * wm_82547_txfifo_stall:
   5652  *
   5653  *	Callout used to wait for the 82547 Tx FIFO to drain,
   5654  *	reset the FIFO pointers, and restart packet transmission.
   5655  */
   5656 static void
   5657 wm_82547_txfifo_stall(void *arg)
   5658 {
   5659 	struct wm_softc *sc = arg;
   5660 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5661 
   5662 	mutex_enter(txq->txq_lock);
   5663 
   5664 	if (txq->txq_stopping)
   5665 		goto out;
   5666 
   5667 	if (txq->txq_fifo_stall) {
   5668 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   5669 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   5670 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   5671 			/*
   5672 			 * Packets have drained.  Stop transmitter, reset
   5673 			 * FIFO pointers, restart transmitter, and kick
   5674 			 * the packet queue.
   5675 			 */
   5676 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   5677 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   5678 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   5679 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   5680 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   5681 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   5682 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   5683 			CSR_WRITE_FLUSH(sc);
   5684 
   5685 			txq->txq_fifo_head = 0;
   5686 			txq->txq_fifo_stall = 0;
   5687 			wm_start_locked(&sc->sc_ethercom.ec_if);
   5688 		} else {
   5689 			/*
   5690 			 * Still waiting for packets to drain; try again in
   5691 			 * another tick.
   5692 			 */
   5693 			callout_schedule(&sc->sc_txfifo_ch, 1);
   5694 		}
   5695 	}
   5696 
   5697 out:
   5698 	mutex_exit(txq->txq_lock);
   5699 }
   5700 
   5701 /*
   5702  * wm_82547_txfifo_bugchk:
   5703  *
   5704  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   5705  *	prevent enqueueing a packet that would wrap around the end
   5706  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   5707  *
   5708  *	We do this by checking the amount of space before the end
   5709  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   5710  *	the Tx FIFO, wait for all remaining packets to drain, reset
   5711  *	the internal FIFO pointers to the beginning, and restart
   5712  *	transmission on the interface.
   5713  */
   5714 #define	WM_FIFO_HDR		0x10
   5715 #define	WM_82547_PAD_LEN	0x3e0
   5716 static int
   5717 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   5718 {
   5719 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5720 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   5721 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   5722 
   5723 	/* Just return if already stalled. */
   5724 	if (txq->txq_fifo_stall)
   5725 		return 1;
   5726 
   5727 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   5728 		/* Stall only occurs in half-duplex mode. */
   5729 		goto send_packet;
   5730 	}
   5731 
   5732 	if (len >= WM_82547_PAD_LEN + space) {
   5733 		txq->txq_fifo_stall = 1;
   5734 		callout_schedule(&sc->sc_txfifo_ch, 1);
   5735 		return 1;
   5736 	}
   5737 
   5738  send_packet:
   5739 	txq->txq_fifo_head += len;
   5740 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   5741 		txq->txq_fifo_head -= txq->txq_fifo_size;
   5742 
   5743 	return 0;
   5744 }
   5745 
   5746 static int
   5747 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5748 {
   5749 	int error;
   5750 
   5751 	/*
   5752 	 * Allocate the control data structures, and create and load the
   5753 	 * DMA map for it.
   5754 	 *
   5755 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5756 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5757 	 * both sets within the same 4G segment.
   5758 	 */
   5759 	if (sc->sc_type < WM_T_82544)
   5760 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   5761 	else
   5762 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   5763 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5764 		txq->txq_descsize = sizeof(nq_txdesc_t);
   5765 	else
   5766 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   5767 
   5768 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   5769 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   5770 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   5771 		aprint_error_dev(sc->sc_dev,
   5772 		    "unable to allocate TX control data, error = %d\n",
   5773 		    error);
   5774 		goto fail_0;
   5775 	}
   5776 
   5777 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   5778 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   5779 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5780 		aprint_error_dev(sc->sc_dev,
   5781 		    "unable to map TX control data, error = %d\n", error);
   5782 		goto fail_1;
   5783 	}
   5784 
   5785 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   5786 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   5787 		aprint_error_dev(sc->sc_dev,
   5788 		    "unable to create TX control data DMA map, error = %d\n",
   5789 		    error);
   5790 		goto fail_2;
   5791 	}
   5792 
   5793 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   5794 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   5795 		aprint_error_dev(sc->sc_dev,
   5796 		    "unable to load TX control data DMA map, error = %d\n",
   5797 		    error);
   5798 		goto fail_3;
   5799 	}
   5800 
   5801 	return 0;
   5802 
   5803  fail_3:
   5804 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5805  fail_2:
   5806 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5807 	    WM_TXDESCS_SIZE(txq));
   5808  fail_1:
   5809 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5810  fail_0:
   5811 	return error;
   5812 }
   5813 
   5814 static void
   5815 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5816 {
   5817 
   5818 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   5819 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5820 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5821 	    WM_TXDESCS_SIZE(txq));
   5822 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5823 }
   5824 
   5825 static int
   5826 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5827 {
   5828 	int error;
   5829 	size_t rxq_descs_size;
   5830 
   5831 	/*
   5832 	 * Allocate the control data structures, and create and load the
   5833 	 * DMA map for it.
   5834 	 *
   5835 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5836 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5837 	 * both sets within the same 4G segment.
   5838 	 */
   5839 	rxq->rxq_ndesc = WM_NRXDESC;
   5840 	if (sc->sc_type == WM_T_82574)
   5841 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   5842 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5843 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   5844 	else
   5845 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   5846 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   5847 
   5848 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   5849 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   5850 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   5851 		aprint_error_dev(sc->sc_dev,
   5852 		    "unable to allocate RX control data, error = %d\n",
   5853 		    error);
   5854 		goto fail_0;
   5855 	}
   5856 
   5857 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   5858 		    rxq->rxq_desc_rseg, rxq_descs_size,
   5859 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5860 		aprint_error_dev(sc->sc_dev,
   5861 		    "unable to map RX control data, error = %d\n", error);
   5862 		goto fail_1;
   5863 	}
   5864 
   5865 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   5866 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   5867 		aprint_error_dev(sc->sc_dev,
   5868 		    "unable to create RX control data DMA map, error = %d\n",
   5869 		    error);
   5870 		goto fail_2;
   5871 	}
   5872 
   5873 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   5874 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   5875 		aprint_error_dev(sc->sc_dev,
   5876 		    "unable to load RX control data DMA map, error = %d\n",
   5877 		    error);
   5878 		goto fail_3;
   5879 	}
   5880 
   5881 	return 0;
   5882 
   5883  fail_3:
   5884 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5885  fail_2:
   5886 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   5887 	    rxq_descs_size);
   5888  fail_1:
   5889 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5890  fail_0:
   5891 	return error;
   5892 }
   5893 
   5894 static void
   5895 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5896 {
   5897 
   5898 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5899 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5900 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   5901 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   5902 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5903 }
   5904 
   5905 
   5906 static int
   5907 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5908 {
   5909 	int i, error;
   5910 
   5911 	/* Create the transmit buffer DMA maps. */
   5912 	WM_TXQUEUELEN(txq) =
   5913 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   5914 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   5915 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5916 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   5917 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   5918 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   5919 			aprint_error_dev(sc->sc_dev,
   5920 			    "unable to create Tx DMA map %d, error = %d\n",
   5921 			    i, error);
   5922 			goto fail;
   5923 		}
   5924 	}
   5925 
   5926 	return 0;
   5927 
   5928  fail:
   5929 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5930 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5931 			bus_dmamap_destroy(sc->sc_dmat,
   5932 			    txq->txq_soft[i].txs_dmamap);
   5933 	}
   5934 	return error;
   5935 }
   5936 
   5937 static void
   5938 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5939 {
   5940 	int i;
   5941 
   5942 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5943 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5944 			bus_dmamap_destroy(sc->sc_dmat,
   5945 			    txq->txq_soft[i].txs_dmamap);
   5946 	}
   5947 }
   5948 
   5949 static int
   5950 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5951 {
   5952 	int i, error;
   5953 
   5954 	/* Create the receive buffer DMA maps. */
   5955 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   5956 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   5957 			    MCLBYTES, 0, 0,
   5958 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   5959 			aprint_error_dev(sc->sc_dev,
   5960 			    "unable to create Rx DMA map %d error = %d\n",
   5961 			    i, error);
   5962 			goto fail;
   5963 		}
   5964 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   5965 	}
   5966 
   5967 	return 0;
   5968 
   5969  fail:
   5970 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   5971 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5972 			bus_dmamap_destroy(sc->sc_dmat,
   5973 			    rxq->rxq_soft[i].rxs_dmamap);
   5974 	}
   5975 	return error;
   5976 }
   5977 
   5978 static void
   5979 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5980 {
   5981 	int i;
   5982 
   5983 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   5984 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5985 			bus_dmamap_destroy(sc->sc_dmat,
   5986 			    rxq->rxq_soft[i].rxs_dmamap);
   5987 	}
   5988 }
   5989 
   5990 /*
   5991  * wm_alloc_quques:
   5992  *	Allocate {tx,rx}descs and {tx,rx} buffers
   5993  */
   5994 static int
   5995 wm_alloc_txrx_queues(struct wm_softc *sc)
   5996 {
   5997 	int i, error, tx_done, rx_done;
   5998 
   5999 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6000 	    KM_SLEEP);
   6001 	if (sc->sc_queue == NULL) {
   6002 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6003 		error = ENOMEM;
   6004 		goto fail_0;
   6005 	}
   6006 
   6007 	/*
   6008 	 * For transmission
   6009 	 */
   6010 	error = 0;
   6011 	tx_done = 0;
   6012 	for (i = 0; i < sc->sc_nqueues; i++) {
   6013 #ifdef WM_EVENT_COUNTERS
   6014 		int j;
   6015 		const char *xname;
   6016 #endif
   6017 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6018 		txq->txq_sc = sc;
   6019 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6020 
   6021 		error = wm_alloc_tx_descs(sc, txq);
   6022 		if (error)
   6023 			break;
   6024 		error = wm_alloc_tx_buffer(sc, txq);
   6025 		if (error) {
   6026 			wm_free_tx_descs(sc, txq);
   6027 			break;
   6028 		}
   6029 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6030 		if (txq->txq_interq == NULL) {
   6031 			wm_free_tx_descs(sc, txq);
   6032 			wm_free_tx_buffer(sc, txq);
   6033 			error = ENOMEM;
   6034 			break;
   6035 		}
   6036 
   6037 #ifdef WM_EVENT_COUNTERS
   6038 		xname = device_xname(sc->sc_dev);
   6039 
   6040 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6041 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6042 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
   6043 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6044 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6045 
   6046 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
   6047 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
   6048 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
   6049 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
   6050 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
   6051 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
   6052 
   6053 		for (j = 0; j < WM_NTXSEGS; j++) {
   6054 			snprintf(txq->txq_txseg_evcnt_names[j],
   6055 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6056 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6057 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6058 		}
   6059 
   6060 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
   6061 
   6062 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
   6063 #endif /* WM_EVENT_COUNTERS */
   6064 
   6065 		tx_done++;
   6066 	}
   6067 	if (error)
   6068 		goto fail_1;
   6069 
   6070 	/*
   6071 	 * For recieve
   6072 	 */
   6073 	error = 0;
   6074 	rx_done = 0;
   6075 	for (i = 0; i < sc->sc_nqueues; i++) {
   6076 #ifdef WM_EVENT_COUNTERS
   6077 		const char *xname;
   6078 #endif
   6079 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6080 		rxq->rxq_sc = sc;
   6081 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6082 
   6083 		error = wm_alloc_rx_descs(sc, rxq);
   6084 		if (error)
   6085 			break;
   6086 
   6087 		error = wm_alloc_rx_buffer(sc, rxq);
   6088 		if (error) {
   6089 			wm_free_rx_descs(sc, rxq);
   6090 			break;
   6091 		}
   6092 
   6093 #ifdef WM_EVENT_COUNTERS
   6094 		xname = device_xname(sc->sc_dev);
   6095 
   6096 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
   6097 
   6098 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
   6099 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
   6100 #endif /* WM_EVENT_COUNTERS */
   6101 
   6102 		rx_done++;
   6103 	}
   6104 	if (error)
   6105 		goto fail_2;
   6106 
   6107 	return 0;
   6108 
   6109  fail_2:
   6110 	for (i = 0; i < rx_done; i++) {
   6111 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6112 		wm_free_rx_buffer(sc, rxq);
   6113 		wm_free_rx_descs(sc, rxq);
   6114 		if (rxq->rxq_lock)
   6115 			mutex_obj_free(rxq->rxq_lock);
   6116 	}
   6117  fail_1:
   6118 	for (i = 0; i < tx_done; i++) {
   6119 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6120 		pcq_destroy(txq->txq_interq);
   6121 		wm_free_tx_buffer(sc, txq);
   6122 		wm_free_tx_descs(sc, txq);
   6123 		if (txq->txq_lock)
   6124 			mutex_obj_free(txq->txq_lock);
   6125 	}
   6126 
   6127 	kmem_free(sc->sc_queue,
   6128 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6129  fail_0:
   6130 	return error;
   6131 }
   6132 
   6133 /*
   6134  * wm_free_quques:
   6135  *	Free {tx,rx}descs and {tx,rx} buffers
   6136  */
   6137 static void
   6138 wm_free_txrx_queues(struct wm_softc *sc)
   6139 {
   6140 	int i;
   6141 
   6142 	for (i = 0; i < sc->sc_nqueues; i++) {
   6143 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6144 
   6145 #ifdef WM_EVENT_COUNTERS
   6146 		WM_Q_EVCNT_DETACH(rxq, rxintr, rxq, i);
   6147 		WM_Q_EVCNT_DETACH(rxq, rxipsum, rxq, i);
   6148 		WM_Q_EVCNT_DETACH(rxq, rxtusum, rxq, i);
   6149 #endif /* WM_EVENT_COUNTERS */
   6150 
   6151 		wm_free_rx_buffer(sc, rxq);
   6152 		wm_free_rx_descs(sc, rxq);
   6153 		if (rxq->rxq_lock)
   6154 			mutex_obj_free(rxq->rxq_lock);
   6155 	}
   6156 
   6157 	for (i = 0; i < sc->sc_nqueues; i++) {
   6158 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6159 		struct mbuf *m;
   6160 #ifdef WM_EVENT_COUNTERS
   6161 		int j;
   6162 
   6163 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6164 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6165 		WM_Q_EVCNT_DETACH(txq, txfifo_stall, txq, i);
   6166 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6167 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6168 		WM_Q_EVCNT_DETACH(txq, txipsum, txq, i);
   6169 		WM_Q_EVCNT_DETACH(txq, txtusum, txq, i);
   6170 		WM_Q_EVCNT_DETACH(txq, txtusum6, txq, i);
   6171 		WM_Q_EVCNT_DETACH(txq, txtso, txq, i);
   6172 		WM_Q_EVCNT_DETACH(txq, txtso6, txq, i);
   6173 		WM_Q_EVCNT_DETACH(txq, txtsopain, txq, i);
   6174 
   6175 		for (j = 0; j < WM_NTXSEGS; j++)
   6176 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6177 
   6178 		WM_Q_EVCNT_DETACH(txq, txdrop, txq, i);
   6179 		WM_Q_EVCNT_DETACH(txq, tu, txq, i);
   6180 #endif /* WM_EVENT_COUNTERS */
   6181 
   6182 		/* drain txq_interq */
   6183 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6184 			m_freem(m);
   6185 		pcq_destroy(txq->txq_interq);
   6186 
   6187 		wm_free_tx_buffer(sc, txq);
   6188 		wm_free_tx_descs(sc, txq);
   6189 		if (txq->txq_lock)
   6190 			mutex_obj_free(txq->txq_lock);
   6191 	}
   6192 
   6193 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6194 }
   6195 
   6196 static void
   6197 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6198 {
   6199 
   6200 	KASSERT(mutex_owned(txq->txq_lock));
   6201 
   6202 	/* Initialize the transmit descriptor ring. */
   6203 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6204 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6205 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6206 	txq->txq_free = WM_NTXDESC(txq);
   6207 	txq->txq_next = 0;
   6208 }
   6209 
   6210 static void
   6211 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6212     struct wm_txqueue *txq)
   6213 {
   6214 
   6215 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6216 		device_xname(sc->sc_dev), __func__));
   6217 	KASSERT(mutex_owned(txq->txq_lock));
   6218 
   6219 	if (sc->sc_type < WM_T_82543) {
   6220 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6221 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6222 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6223 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6224 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6225 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6226 	} else {
   6227 		int qid = wmq->wmq_id;
   6228 
   6229 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6230 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6231 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6232 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6233 
   6234 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6235 			/*
   6236 			 * Don't write TDT before TCTL.EN is set.
   6237 			 * See the document.
   6238 			 */
   6239 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6240 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6241 			    | TXDCTL_WTHRESH(0));
   6242 		else {
   6243 			/* XXX should update with AIM? */
   6244 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6245 			if (sc->sc_type >= WM_T_82540) {
   6246 				/* should be same */
   6247 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6248 			}
   6249 
   6250 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6251 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6252 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6253 		}
   6254 	}
   6255 }
   6256 
   6257 static void
   6258 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6259 {
   6260 	int i;
   6261 
   6262 	KASSERT(mutex_owned(txq->txq_lock));
   6263 
   6264 	/* Initialize the transmit job descriptors. */
   6265 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6266 		txq->txq_soft[i].txs_mbuf = NULL;
   6267 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6268 	txq->txq_snext = 0;
   6269 	txq->txq_sdirty = 0;
   6270 }
   6271 
   6272 static void
   6273 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6274     struct wm_txqueue *txq)
   6275 {
   6276 
   6277 	KASSERT(mutex_owned(txq->txq_lock));
   6278 
   6279 	/*
   6280 	 * Set up some register offsets that are different between
   6281 	 * the i82542 and the i82543 and later chips.
   6282 	 */
   6283 	if (sc->sc_type < WM_T_82543)
   6284 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   6285 	else
   6286 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   6287 
   6288 	wm_init_tx_descs(sc, txq);
   6289 	wm_init_tx_regs(sc, wmq, txq);
   6290 	wm_init_tx_buffer(sc, txq);
   6291 }
   6292 
   6293 static void
   6294 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6295     struct wm_rxqueue *rxq)
   6296 {
   6297 
   6298 	KASSERT(mutex_owned(rxq->rxq_lock));
   6299 
   6300 	/*
   6301 	 * Initialize the receive descriptor and receive job
   6302 	 * descriptor rings.
   6303 	 */
   6304 	if (sc->sc_type < WM_T_82543) {
   6305 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   6306 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   6307 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   6308 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6309 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   6310 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   6311 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   6312 
   6313 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   6314 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   6315 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   6316 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   6317 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   6318 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   6319 	} else {
   6320 		int qid = wmq->wmq_id;
   6321 
   6322 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   6323 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   6324 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_descsize * rxq->rxq_ndesc);
   6325 
   6326 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6327 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   6328 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   6329 
   6330 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   6331 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   6332 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   6333 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   6334 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   6335 			    | RXDCTL_WTHRESH(1));
   6336 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6337 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6338 		} else {
   6339 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6340 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6341 			/* XXX should update with AIM? */
   6342 			CSR_WRITE(sc, WMREG_RDTR, (wmq->wmq_itr / 4) | RDTR_FPD);
   6343 			/* MUST be same */
   6344 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   6345 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   6346 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   6347 		}
   6348 	}
   6349 }
   6350 
   6351 static int
   6352 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6353 {
   6354 	struct wm_rxsoft *rxs;
   6355 	int error, i;
   6356 
   6357 	KASSERT(mutex_owned(rxq->rxq_lock));
   6358 
   6359 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6360 		rxs = &rxq->rxq_soft[i];
   6361 		if (rxs->rxs_mbuf == NULL) {
   6362 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   6363 				log(LOG_ERR, "%s: unable to allocate or map "
   6364 				    "rx buffer %d, error = %d\n",
   6365 				    device_xname(sc->sc_dev), i, error);
   6366 				/*
   6367 				 * XXX Should attempt to run with fewer receive
   6368 				 * XXX buffers instead of just failing.
   6369 				 */
   6370 				wm_rxdrain(rxq);
   6371 				return ENOMEM;
   6372 			}
   6373 		} else {
   6374 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   6375 				wm_init_rxdesc(rxq, i);
   6376 			/*
   6377 			 * For 82575 and newer device, the RX descriptors
   6378 			 * must be initialized after the setting of RCTL.EN in
   6379 			 * wm_set_filter()
   6380 			 */
   6381 		}
   6382 	}
   6383 	rxq->rxq_ptr = 0;
   6384 	rxq->rxq_discard = 0;
   6385 	WM_RXCHAIN_RESET(rxq);
   6386 
   6387 	return 0;
   6388 }
   6389 
   6390 static int
   6391 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6392     struct wm_rxqueue *rxq)
   6393 {
   6394 
   6395 	KASSERT(mutex_owned(rxq->rxq_lock));
   6396 
   6397 	/*
   6398 	 * Set up some register offsets that are different between
   6399 	 * the i82542 and the i82543 and later chips.
   6400 	 */
   6401 	if (sc->sc_type < WM_T_82543)
   6402 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   6403 	else
   6404 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   6405 
   6406 	wm_init_rx_regs(sc, wmq, rxq);
   6407 	return wm_init_rx_buffer(sc, rxq);
   6408 }
   6409 
   6410 /*
   6411  * wm_init_quques:
   6412  *	Initialize {tx,rx}descs and {tx,rx} buffers
   6413  */
   6414 static int
   6415 wm_init_txrx_queues(struct wm_softc *sc)
   6416 {
   6417 	int i, error = 0;
   6418 
   6419 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6420 		device_xname(sc->sc_dev), __func__));
   6421 
   6422 	for (i = 0; i < sc->sc_nqueues; i++) {
   6423 		struct wm_queue *wmq = &sc->sc_queue[i];
   6424 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6425 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6426 
   6427 		/*
   6428 		 * TODO
   6429 		 * Currently, use constant variable instead of AIM.
   6430 		 * Furthermore, the interrupt interval of multiqueue which use
   6431 		 * polling mode is less than default value.
   6432 		 * More tuning and AIM are required.
   6433 		 */
   6434 		if (wm_is_using_multiqueue(sc))
   6435 			wmq->wmq_itr = 50;
   6436 		else
   6437 			wmq->wmq_itr = sc->sc_itr_init;
   6438 		wmq->wmq_set_itr = true;
   6439 
   6440 		mutex_enter(txq->txq_lock);
   6441 		wm_init_tx_queue(sc, wmq, txq);
   6442 		mutex_exit(txq->txq_lock);
   6443 
   6444 		mutex_enter(rxq->rxq_lock);
   6445 		error = wm_init_rx_queue(sc, wmq, rxq);
   6446 		mutex_exit(rxq->rxq_lock);
   6447 		if (error)
   6448 			break;
   6449 	}
   6450 
   6451 	return error;
   6452 }
   6453 
   6454 /*
   6455  * wm_tx_offload:
   6456  *
   6457  *	Set up TCP/IP checksumming parameters for the
   6458  *	specified packet.
   6459  */
   6460 static int
   6461 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   6462     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   6463 {
   6464 	struct mbuf *m0 = txs->txs_mbuf;
   6465 	struct livengood_tcpip_ctxdesc *t;
   6466 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   6467 	uint32_t ipcse;
   6468 	struct ether_header *eh;
   6469 	int offset, iphl;
   6470 	uint8_t fields;
   6471 
   6472 	/*
   6473 	 * XXX It would be nice if the mbuf pkthdr had offset
   6474 	 * fields for the protocol headers.
   6475 	 */
   6476 
   6477 	eh = mtod(m0, struct ether_header *);
   6478 	switch (htons(eh->ether_type)) {
   6479 	case ETHERTYPE_IP:
   6480 	case ETHERTYPE_IPV6:
   6481 		offset = ETHER_HDR_LEN;
   6482 		break;
   6483 
   6484 	case ETHERTYPE_VLAN:
   6485 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6486 		break;
   6487 
   6488 	default:
   6489 		/*
   6490 		 * Don't support this protocol or encapsulation.
   6491 		 */
   6492 		*fieldsp = 0;
   6493 		*cmdp = 0;
   6494 		return 0;
   6495 	}
   6496 
   6497 	if ((m0->m_pkthdr.csum_flags &
   6498 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6499 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6500 	} else {
   6501 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6502 	}
   6503 	ipcse = offset + iphl - 1;
   6504 
   6505 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   6506 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   6507 	seg = 0;
   6508 	fields = 0;
   6509 
   6510 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6511 		int hlen = offset + iphl;
   6512 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6513 
   6514 		if (__predict_false(m0->m_len <
   6515 				    (hlen + sizeof(struct tcphdr)))) {
   6516 			/*
   6517 			 * TCP/IP headers are not in the first mbuf; we need
   6518 			 * to do this the slow and painful way.  Let's just
   6519 			 * hope this doesn't happen very often.
   6520 			 */
   6521 			struct tcphdr th;
   6522 
   6523 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6524 
   6525 			m_copydata(m0, hlen, sizeof(th), &th);
   6526 			if (v4) {
   6527 				struct ip ip;
   6528 
   6529 				m_copydata(m0, offset, sizeof(ip), &ip);
   6530 				ip.ip_len = 0;
   6531 				m_copyback(m0,
   6532 				    offset + offsetof(struct ip, ip_len),
   6533 				    sizeof(ip.ip_len), &ip.ip_len);
   6534 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6535 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6536 			} else {
   6537 				struct ip6_hdr ip6;
   6538 
   6539 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6540 				ip6.ip6_plen = 0;
   6541 				m_copyback(m0,
   6542 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6543 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6544 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6545 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6546 			}
   6547 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6548 			    sizeof(th.th_sum), &th.th_sum);
   6549 
   6550 			hlen += th.th_off << 2;
   6551 		} else {
   6552 			/*
   6553 			 * TCP/IP headers are in the first mbuf; we can do
   6554 			 * this the easy way.
   6555 			 */
   6556 			struct tcphdr *th;
   6557 
   6558 			if (v4) {
   6559 				struct ip *ip =
   6560 				    (void *)(mtod(m0, char *) + offset);
   6561 				th = (void *)(mtod(m0, char *) + hlen);
   6562 
   6563 				ip->ip_len = 0;
   6564 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6565 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6566 			} else {
   6567 				struct ip6_hdr *ip6 =
   6568 				    (void *)(mtod(m0, char *) + offset);
   6569 				th = (void *)(mtod(m0, char *) + hlen);
   6570 
   6571 				ip6->ip6_plen = 0;
   6572 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6573 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6574 			}
   6575 			hlen += th->th_off << 2;
   6576 		}
   6577 
   6578 		if (v4) {
   6579 			WM_Q_EVCNT_INCR(txq, txtso);
   6580 			cmdlen |= WTX_TCPIP_CMD_IP;
   6581 		} else {
   6582 			WM_Q_EVCNT_INCR(txq, txtso6);
   6583 			ipcse = 0;
   6584 		}
   6585 		cmd |= WTX_TCPIP_CMD_TSE;
   6586 		cmdlen |= WTX_TCPIP_CMD_TSE |
   6587 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   6588 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   6589 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   6590 	}
   6591 
   6592 	/*
   6593 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   6594 	 * offload feature, if we load the context descriptor, we
   6595 	 * MUST provide valid values for IPCSS and TUCSS fields.
   6596 	 */
   6597 
   6598 	ipcs = WTX_TCPIP_IPCSS(offset) |
   6599 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   6600 	    WTX_TCPIP_IPCSE(ipcse);
   6601 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   6602 		WM_Q_EVCNT_INCR(txq, txipsum);
   6603 		fields |= WTX_IXSM;
   6604 	}
   6605 
   6606 	offset += iphl;
   6607 
   6608 	if (m0->m_pkthdr.csum_flags &
   6609 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   6610 		WM_Q_EVCNT_INCR(txq, txtusum);
   6611 		fields |= WTX_TXSM;
   6612 		tucs = WTX_TCPIP_TUCSS(offset) |
   6613 		    WTX_TCPIP_TUCSO(offset +
   6614 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   6615 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6616 	} else if ((m0->m_pkthdr.csum_flags &
   6617 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   6618 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6619 		fields |= WTX_TXSM;
   6620 		tucs = WTX_TCPIP_TUCSS(offset) |
   6621 		    WTX_TCPIP_TUCSO(offset +
   6622 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   6623 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6624 	} else {
   6625 		/* Just initialize it to a valid TCP context. */
   6626 		tucs = WTX_TCPIP_TUCSS(offset) |
   6627 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   6628 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6629 	}
   6630 
   6631 	/*
   6632 	 * We don't have to write context descriptor for every packet
   6633 	 * except for 82574. For 82574, we must write context descriptor
   6634 	 * for every packet when we use two descriptor queues.
   6635 	 * It would be overhead to write context descriptor for every packet,
   6636 	 * however it does not cause problems.
   6637 	 */
   6638 	/* Fill in the context descriptor. */
   6639 	t = (struct livengood_tcpip_ctxdesc *)
   6640 	    &txq->txq_descs[txq->txq_next];
   6641 	t->tcpip_ipcs = htole32(ipcs);
   6642 	t->tcpip_tucs = htole32(tucs);
   6643 	t->tcpip_cmdlen = htole32(cmdlen);
   6644 	t->tcpip_seg = htole32(seg);
   6645 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6646 
   6647 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6648 	txs->txs_ndesc++;
   6649 
   6650 	*cmdp = cmd;
   6651 	*fieldsp = fields;
   6652 
   6653 	return 0;
   6654 }
   6655 
   6656 static inline int
   6657 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   6658 {
   6659 	struct wm_softc *sc = ifp->if_softc;
   6660 	u_int cpuid = cpu_index(curcpu());
   6661 
   6662 	/*
   6663 	 * Currently, simple distribute strategy.
   6664 	 * TODO:
   6665 	 * distribute by flowid(RSS has value).
   6666 	 */
   6667         return (cpuid + ncpu - sc->sc_affinity_offset) % sc->sc_nqueues;
   6668 }
   6669 
   6670 /*
   6671  * wm_start:		[ifnet interface function]
   6672  *
   6673  *	Start packet transmission on the interface.
   6674  */
   6675 static void
   6676 wm_start(struct ifnet *ifp)
   6677 {
   6678 	struct wm_softc *sc = ifp->if_softc;
   6679 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6680 
   6681 #ifdef WM_MPSAFE
   6682 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6683 #endif
   6684 	/*
   6685 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   6686 	 */
   6687 
   6688 	mutex_enter(txq->txq_lock);
   6689 	if (!txq->txq_stopping)
   6690 		wm_start_locked(ifp);
   6691 	mutex_exit(txq->txq_lock);
   6692 }
   6693 
   6694 static void
   6695 wm_start_locked(struct ifnet *ifp)
   6696 {
   6697 	struct wm_softc *sc = ifp->if_softc;
   6698 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6699 
   6700 	wm_send_common_locked(ifp, txq, false);
   6701 }
   6702 
   6703 static int
   6704 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   6705 {
   6706 	int qid;
   6707 	struct wm_softc *sc = ifp->if_softc;
   6708 	struct wm_txqueue *txq;
   6709 
   6710 	qid = wm_select_txqueue(ifp, m);
   6711 	txq = &sc->sc_queue[qid].wmq_txq;
   6712 
   6713 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   6714 		m_freem(m);
   6715 		WM_Q_EVCNT_INCR(txq, txdrop);
   6716 		return ENOBUFS;
   6717 	}
   6718 
   6719 	/*
   6720 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   6721 	 */
   6722 	ifp->if_obytes += m->m_pkthdr.len;
   6723 	if (m->m_flags & M_MCAST)
   6724 		ifp->if_omcasts++;
   6725 
   6726 	if (mutex_tryenter(txq->txq_lock)) {
   6727 		if (!txq->txq_stopping)
   6728 			wm_transmit_locked(ifp, txq);
   6729 		mutex_exit(txq->txq_lock);
   6730 	}
   6731 
   6732 	return 0;
   6733 }
   6734 
   6735 static void
   6736 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   6737 {
   6738 
   6739 	wm_send_common_locked(ifp, txq, true);
   6740 }
   6741 
   6742 static void
   6743 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   6744     bool is_transmit)
   6745 {
   6746 	struct wm_softc *sc = ifp->if_softc;
   6747 	struct mbuf *m0;
   6748 	struct m_tag *mtag;
   6749 	struct wm_txsoft *txs;
   6750 	bus_dmamap_t dmamap;
   6751 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   6752 	bus_addr_t curaddr;
   6753 	bus_size_t seglen, curlen;
   6754 	uint32_t cksumcmd;
   6755 	uint8_t cksumfields;
   6756 
   6757 	KASSERT(mutex_owned(txq->txq_lock));
   6758 
   6759 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   6760 		return;
   6761 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   6762 		return;
   6763 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   6764 		return;
   6765 
   6766 	/* Remember the previous number of free descriptors. */
   6767 	ofree = txq->txq_free;
   6768 
   6769 	/*
   6770 	 * Loop through the send queue, setting up transmit descriptors
   6771 	 * until we drain the queue, or use up all available transmit
   6772 	 * descriptors.
   6773 	 */
   6774 	for (;;) {
   6775 		m0 = NULL;
   6776 
   6777 		/* Get a work queue entry. */
   6778 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6779 			wm_txeof(sc, txq);
   6780 			if (txq->txq_sfree == 0) {
   6781 				DPRINTF(WM_DEBUG_TX,
   6782 				    ("%s: TX: no free job descriptors\n",
   6783 					device_xname(sc->sc_dev)));
   6784 				WM_Q_EVCNT_INCR(txq, txsstall);
   6785 				break;
   6786 			}
   6787 		}
   6788 
   6789 		/* Grab a packet off the queue. */
   6790 		if (is_transmit)
   6791 			m0 = pcq_get(txq->txq_interq);
   6792 		else
   6793 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   6794 		if (m0 == NULL)
   6795 			break;
   6796 
   6797 		DPRINTF(WM_DEBUG_TX,
   6798 		    ("%s: TX: have packet to transmit: %p\n",
   6799 		    device_xname(sc->sc_dev), m0));
   6800 
   6801 		txs = &txq->txq_soft[txq->txq_snext];
   6802 		dmamap = txs->txs_dmamap;
   6803 
   6804 		use_tso = (m0->m_pkthdr.csum_flags &
   6805 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   6806 
   6807 		/*
   6808 		 * So says the Linux driver:
   6809 		 * The controller does a simple calculation to make sure
   6810 		 * there is enough room in the FIFO before initiating the
   6811 		 * DMA for each buffer.  The calc is:
   6812 		 *	4 = ceil(buffer len / MSS)
   6813 		 * To make sure we don't overrun the FIFO, adjust the max
   6814 		 * buffer len if the MSS drops.
   6815 		 */
   6816 		dmamap->dm_maxsegsz =
   6817 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   6818 		    ? m0->m_pkthdr.segsz << 2
   6819 		    : WTX_MAX_LEN;
   6820 
   6821 		/*
   6822 		 * Load the DMA map.  If this fails, the packet either
   6823 		 * didn't fit in the allotted number of segments, or we
   6824 		 * were short on resources.  For the too-many-segments
   6825 		 * case, we simply report an error and drop the packet,
   6826 		 * since we can't sanely copy a jumbo packet to a single
   6827 		 * buffer.
   6828 		 */
   6829 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6830 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   6831 		if (error) {
   6832 			if (error == EFBIG) {
   6833 				WM_Q_EVCNT_INCR(txq, txdrop);
   6834 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6835 				    "DMA segments, dropping...\n",
   6836 				    device_xname(sc->sc_dev));
   6837 				wm_dump_mbuf_chain(sc, m0);
   6838 				m_freem(m0);
   6839 				continue;
   6840 			}
   6841 			/*  Short on resources, just stop for now. */
   6842 			DPRINTF(WM_DEBUG_TX,
   6843 			    ("%s: TX: dmamap load failed: %d\n",
   6844 			    device_xname(sc->sc_dev), error));
   6845 			break;
   6846 		}
   6847 
   6848 		segs_needed = dmamap->dm_nsegs;
   6849 		if (use_tso) {
   6850 			/* For sentinel descriptor; see below. */
   6851 			segs_needed++;
   6852 		}
   6853 
   6854 		/*
   6855 		 * Ensure we have enough descriptors free to describe
   6856 		 * the packet.  Note, we always reserve one descriptor
   6857 		 * at the end of the ring due to the semantics of the
   6858 		 * TDT register, plus one more in the event we need
   6859 		 * to load offload context.
   6860 		 */
   6861 		if (segs_needed > txq->txq_free - 2) {
   6862 			/*
   6863 			 * Not enough free descriptors to transmit this
   6864 			 * packet.  We haven't committed anything yet,
   6865 			 * so just unload the DMA map, put the packet
   6866 			 * pack on the queue, and punt.  Notify the upper
   6867 			 * layer that there are no more slots left.
   6868 			 */
   6869 			DPRINTF(WM_DEBUG_TX,
   6870 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6871 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6872 			    segs_needed, txq->txq_free - 1));
   6873 			if (!is_transmit)
   6874 				ifp->if_flags |= IFF_OACTIVE;
   6875 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   6876 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6877 			WM_Q_EVCNT_INCR(txq, txdstall);
   6878 			break;
   6879 		}
   6880 
   6881 		/*
   6882 		 * Check for 82547 Tx FIFO bug.  We need to do this
   6883 		 * once we know we can transmit the packet, since we
   6884 		 * do some internal FIFO space accounting here.
   6885 		 */
   6886 		if (sc->sc_type == WM_T_82547 &&
   6887 		    wm_82547_txfifo_bugchk(sc, m0)) {
   6888 			DPRINTF(WM_DEBUG_TX,
   6889 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   6890 			    device_xname(sc->sc_dev)));
   6891 			if (!is_transmit)
   6892 				ifp->if_flags |= IFF_OACTIVE;
   6893 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   6894 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6895 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
   6896 			break;
   6897 		}
   6898 
   6899 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6900 
   6901 		DPRINTF(WM_DEBUG_TX,
   6902 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6903 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6904 
   6905 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   6906 
   6907 		/*
   6908 		 * Store a pointer to the packet so that we can free it
   6909 		 * later.
   6910 		 *
   6911 		 * Initially, we consider the number of descriptors the
   6912 		 * packet uses the number of DMA segments.  This may be
   6913 		 * incremented by 1 if we do checksum offload (a descriptor
   6914 		 * is used to set the checksum context).
   6915 		 */
   6916 		txs->txs_mbuf = m0;
   6917 		txs->txs_firstdesc = txq->txq_next;
   6918 		txs->txs_ndesc = segs_needed;
   6919 
   6920 		/* Set up offload parameters for this packet. */
   6921 		if (m0->m_pkthdr.csum_flags &
   6922 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   6923 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   6924 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   6925 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   6926 					  &cksumfields) != 0) {
   6927 				/* Error message already displayed. */
   6928 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   6929 				continue;
   6930 			}
   6931 		} else {
   6932 			cksumcmd = 0;
   6933 			cksumfields = 0;
   6934 		}
   6935 
   6936 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   6937 
   6938 		/* Sync the DMA map. */
   6939 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   6940 		    BUS_DMASYNC_PREWRITE);
   6941 
   6942 		/* Initialize the transmit descriptor. */
   6943 		for (nexttx = txq->txq_next, seg = 0;
   6944 		     seg < dmamap->dm_nsegs; seg++) {
   6945 			for (seglen = dmamap->dm_segs[seg].ds_len,
   6946 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   6947 			     seglen != 0;
   6948 			     curaddr += curlen, seglen -= curlen,
   6949 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   6950 				curlen = seglen;
   6951 
   6952 				/*
   6953 				 * So says the Linux driver:
   6954 				 * Work around for premature descriptor
   6955 				 * write-backs in TSO mode.  Append a
   6956 				 * 4-byte sentinel descriptor.
   6957 				 */
   6958 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   6959 				    curlen > 8)
   6960 					curlen -= 4;
   6961 
   6962 				wm_set_dma_addr(
   6963 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   6964 				txq->txq_descs[nexttx].wtx_cmdlen
   6965 				    = htole32(cksumcmd | curlen);
   6966 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   6967 				    = 0;
   6968 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   6969 				    = cksumfields;
   6970 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   6971 				lasttx = nexttx;
   6972 
   6973 				DPRINTF(WM_DEBUG_TX,
   6974 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   6975 				     "len %#04zx\n",
   6976 				    device_xname(sc->sc_dev), nexttx,
   6977 				    (uint64_t)curaddr, curlen));
   6978 			}
   6979 		}
   6980 
   6981 		KASSERT(lasttx != -1);
   6982 
   6983 		/*
   6984 		 * Set up the command byte on the last descriptor of
   6985 		 * the packet.  If we're in the interrupt delay window,
   6986 		 * delay the interrupt.
   6987 		 */
   6988 		txq->txq_descs[lasttx].wtx_cmdlen |=
   6989 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   6990 
   6991 		/*
   6992 		 * If VLANs are enabled and the packet has a VLAN tag, set
   6993 		 * up the descriptor to encapsulate the packet for us.
   6994 		 *
   6995 		 * This is only valid on the last descriptor of the packet.
   6996 		 */
   6997 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6998 			txq->txq_descs[lasttx].wtx_cmdlen |=
   6999 			    htole32(WTX_CMD_VLE);
   7000 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7001 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   7002 		}
   7003 
   7004 		txs->txs_lastdesc = lasttx;
   7005 
   7006 		DPRINTF(WM_DEBUG_TX,
   7007 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7008 		    device_xname(sc->sc_dev),
   7009 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7010 
   7011 		/* Sync the descriptors we're using. */
   7012 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7013 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7014 
   7015 		/* Give the packet to the chip. */
   7016 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7017 
   7018 		DPRINTF(WM_DEBUG_TX,
   7019 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7020 
   7021 		DPRINTF(WM_DEBUG_TX,
   7022 		    ("%s: TX: finished transmitting packet, job %d\n",
   7023 		    device_xname(sc->sc_dev), txq->txq_snext));
   7024 
   7025 		/* Advance the tx pointer. */
   7026 		txq->txq_free -= txs->txs_ndesc;
   7027 		txq->txq_next = nexttx;
   7028 
   7029 		txq->txq_sfree--;
   7030 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7031 
   7032 		/* Pass the packet to any BPF listeners. */
   7033 		bpf_mtap(ifp, m0);
   7034 	}
   7035 
   7036 	if (m0 != NULL) {
   7037 		if (!is_transmit)
   7038 			ifp->if_flags |= IFF_OACTIVE;
   7039 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7040 		WM_Q_EVCNT_INCR(txq, txdrop);
   7041 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7042 			__func__));
   7043 		m_freem(m0);
   7044 	}
   7045 
   7046 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7047 		/* No more slots; notify upper layer. */
   7048 		if (!is_transmit)
   7049 			ifp->if_flags |= IFF_OACTIVE;
   7050 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7051 	}
   7052 
   7053 	if (txq->txq_free != ofree) {
   7054 		/* Set a watchdog timer in case the chip flakes out. */
   7055 		ifp->if_timer = 5;
   7056 	}
   7057 }
   7058 
   7059 /*
   7060  * wm_nq_tx_offload:
   7061  *
   7062  *	Set up TCP/IP checksumming parameters for the
   7063  *	specified packet, for NEWQUEUE devices
   7064  */
   7065 static int
   7066 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7067     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7068 {
   7069 	struct mbuf *m0 = txs->txs_mbuf;
   7070 	struct m_tag *mtag;
   7071 	uint32_t vl_len, mssidx, cmdc;
   7072 	struct ether_header *eh;
   7073 	int offset, iphl;
   7074 
   7075 	/*
   7076 	 * XXX It would be nice if the mbuf pkthdr had offset
   7077 	 * fields for the protocol headers.
   7078 	 */
   7079 	*cmdlenp = 0;
   7080 	*fieldsp = 0;
   7081 
   7082 	eh = mtod(m0, struct ether_header *);
   7083 	switch (htons(eh->ether_type)) {
   7084 	case ETHERTYPE_IP:
   7085 	case ETHERTYPE_IPV6:
   7086 		offset = ETHER_HDR_LEN;
   7087 		break;
   7088 
   7089 	case ETHERTYPE_VLAN:
   7090 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7091 		break;
   7092 
   7093 	default:
   7094 		/* Don't support this protocol or encapsulation. */
   7095 		*do_csum = false;
   7096 		return 0;
   7097 	}
   7098 	*do_csum = true;
   7099 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7100 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7101 
   7102 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7103 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7104 
   7105 	if ((m0->m_pkthdr.csum_flags &
   7106 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7107 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7108 	} else {
   7109 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   7110 	}
   7111 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7112 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7113 
   7114 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   7115 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   7116 		     << NQTXC_VLLEN_VLAN_SHIFT);
   7117 		*cmdlenp |= NQTX_CMD_VLE;
   7118 	}
   7119 
   7120 	mssidx = 0;
   7121 
   7122 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7123 		int hlen = offset + iphl;
   7124 		int tcp_hlen;
   7125 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7126 
   7127 		if (__predict_false(m0->m_len <
   7128 				    (hlen + sizeof(struct tcphdr)))) {
   7129 			/*
   7130 			 * TCP/IP headers are not in the first mbuf; we need
   7131 			 * to do this the slow and painful way.  Let's just
   7132 			 * hope this doesn't happen very often.
   7133 			 */
   7134 			struct tcphdr th;
   7135 
   7136 			WM_Q_EVCNT_INCR(txq, txtsopain);
   7137 
   7138 			m_copydata(m0, hlen, sizeof(th), &th);
   7139 			if (v4) {
   7140 				struct ip ip;
   7141 
   7142 				m_copydata(m0, offset, sizeof(ip), &ip);
   7143 				ip.ip_len = 0;
   7144 				m_copyback(m0,
   7145 				    offset + offsetof(struct ip, ip_len),
   7146 				    sizeof(ip.ip_len), &ip.ip_len);
   7147 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7148 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7149 			} else {
   7150 				struct ip6_hdr ip6;
   7151 
   7152 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7153 				ip6.ip6_plen = 0;
   7154 				m_copyback(m0,
   7155 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7156 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7157 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7158 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7159 			}
   7160 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7161 			    sizeof(th.th_sum), &th.th_sum);
   7162 
   7163 			tcp_hlen = th.th_off << 2;
   7164 		} else {
   7165 			/*
   7166 			 * TCP/IP headers are in the first mbuf; we can do
   7167 			 * this the easy way.
   7168 			 */
   7169 			struct tcphdr *th;
   7170 
   7171 			if (v4) {
   7172 				struct ip *ip =
   7173 				    (void *)(mtod(m0, char *) + offset);
   7174 				th = (void *)(mtod(m0, char *) + hlen);
   7175 
   7176 				ip->ip_len = 0;
   7177 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7178 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7179 			} else {
   7180 				struct ip6_hdr *ip6 =
   7181 				    (void *)(mtod(m0, char *) + offset);
   7182 				th = (void *)(mtod(m0, char *) + hlen);
   7183 
   7184 				ip6->ip6_plen = 0;
   7185 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7186 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7187 			}
   7188 			tcp_hlen = th->th_off << 2;
   7189 		}
   7190 		hlen += tcp_hlen;
   7191 		*cmdlenp |= NQTX_CMD_TSE;
   7192 
   7193 		if (v4) {
   7194 			WM_Q_EVCNT_INCR(txq, txtso);
   7195 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7196 		} else {
   7197 			WM_Q_EVCNT_INCR(txq, txtso6);
   7198 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7199 		}
   7200 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7201 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7202 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7203 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7204 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7205 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7206 	} else {
   7207 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7208 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7209 	}
   7210 
   7211 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7212 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7213 		cmdc |= NQTXC_CMD_IP4;
   7214 	}
   7215 
   7216 	if (m0->m_pkthdr.csum_flags &
   7217 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7218 		WM_Q_EVCNT_INCR(txq, txtusum);
   7219 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7220 			cmdc |= NQTXC_CMD_TCP;
   7221 		} else {
   7222 			cmdc |= NQTXC_CMD_UDP;
   7223 		}
   7224 		cmdc |= NQTXC_CMD_IP4;
   7225 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7226 	}
   7227 	if (m0->m_pkthdr.csum_flags &
   7228 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7229 		WM_Q_EVCNT_INCR(txq, txtusum6);
   7230 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7231 			cmdc |= NQTXC_CMD_TCP;
   7232 		} else {
   7233 			cmdc |= NQTXC_CMD_UDP;
   7234 		}
   7235 		cmdc |= NQTXC_CMD_IP6;
   7236 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7237 	}
   7238 
   7239 	/*
   7240 	 * We don't have to write context descriptor for every packet to
   7241 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   7242 	 * I210 and I211. It is enough to write once per a Tx queue for these
   7243 	 * controllers.
   7244 	 * It would be overhead to write context descriptor for every packet,
   7245 	 * however it does not cause problems.
   7246 	 */
   7247 	/* Fill in the context descriptor. */
   7248 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   7249 	    htole32(vl_len);
   7250 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   7251 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   7252 	    htole32(cmdc);
   7253 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   7254 	    htole32(mssidx);
   7255 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7256 	DPRINTF(WM_DEBUG_TX,
   7257 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   7258 	    txq->txq_next, 0, vl_len));
   7259 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   7260 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7261 	txs->txs_ndesc++;
   7262 	return 0;
   7263 }
   7264 
   7265 /*
   7266  * wm_nq_start:		[ifnet interface function]
   7267  *
   7268  *	Start packet transmission on the interface for NEWQUEUE devices
   7269  */
   7270 static void
   7271 wm_nq_start(struct ifnet *ifp)
   7272 {
   7273 	struct wm_softc *sc = ifp->if_softc;
   7274 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7275 
   7276 #ifdef WM_MPSAFE
   7277 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   7278 #endif
   7279 	/*
   7280 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7281 	 */
   7282 
   7283 	mutex_enter(txq->txq_lock);
   7284 	if (!txq->txq_stopping)
   7285 		wm_nq_start_locked(ifp);
   7286 	mutex_exit(txq->txq_lock);
   7287 }
   7288 
   7289 static void
   7290 wm_nq_start_locked(struct ifnet *ifp)
   7291 {
   7292 	struct wm_softc *sc = ifp->if_softc;
   7293 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7294 
   7295 	wm_nq_send_common_locked(ifp, txq, false);
   7296 }
   7297 
   7298 static int
   7299 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   7300 {
   7301 	int qid;
   7302 	struct wm_softc *sc = ifp->if_softc;
   7303 	struct wm_txqueue *txq;
   7304 
   7305 	qid = wm_select_txqueue(ifp, m);
   7306 	txq = &sc->sc_queue[qid].wmq_txq;
   7307 
   7308 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7309 		m_freem(m);
   7310 		WM_Q_EVCNT_INCR(txq, txdrop);
   7311 		return ENOBUFS;
   7312 	}
   7313 
   7314 	/*
   7315 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7316 	 */
   7317 	ifp->if_obytes += m->m_pkthdr.len;
   7318 	if (m->m_flags & M_MCAST)
   7319 		ifp->if_omcasts++;
   7320 
   7321 	/*
   7322 	 * The situations which this mutex_tryenter() fails at running time
   7323 	 * are below two patterns.
   7324 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   7325 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   7326 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   7327 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   7328 	 * In the case of (2), the last packet enqueued to txq->txq_interq is also
   7329 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck, either.
   7330 	 */
   7331 	if (mutex_tryenter(txq->txq_lock)) {
   7332 		if (!txq->txq_stopping)
   7333 			wm_nq_transmit_locked(ifp, txq);
   7334 		mutex_exit(txq->txq_lock);
   7335 	}
   7336 
   7337 	return 0;
   7338 }
   7339 
   7340 static void
   7341 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7342 {
   7343 
   7344 	wm_nq_send_common_locked(ifp, txq, true);
   7345 }
   7346 
   7347 static void
   7348 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7349     bool is_transmit)
   7350 {
   7351 	struct wm_softc *sc = ifp->if_softc;
   7352 	struct mbuf *m0;
   7353 	struct m_tag *mtag;
   7354 	struct wm_txsoft *txs;
   7355 	bus_dmamap_t dmamap;
   7356 	int error, nexttx, lasttx = -1, seg, segs_needed;
   7357 	bool do_csum, sent;
   7358 
   7359 	KASSERT(mutex_owned(txq->txq_lock));
   7360 
   7361 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7362 		return;
   7363 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7364 		return;
   7365 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7366 		return;
   7367 
   7368 	sent = false;
   7369 
   7370 	/*
   7371 	 * Loop through the send queue, setting up transmit descriptors
   7372 	 * until we drain the queue, or use up all available transmit
   7373 	 * descriptors.
   7374 	 */
   7375 	for (;;) {
   7376 		m0 = NULL;
   7377 
   7378 		/* Get a work queue entry. */
   7379 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7380 			wm_txeof(sc, txq);
   7381 			if (txq->txq_sfree == 0) {
   7382 				DPRINTF(WM_DEBUG_TX,
   7383 				    ("%s: TX: no free job descriptors\n",
   7384 					device_xname(sc->sc_dev)));
   7385 				WM_Q_EVCNT_INCR(txq, txsstall);
   7386 				break;
   7387 			}
   7388 		}
   7389 
   7390 		/* Grab a packet off the queue. */
   7391 		if (is_transmit)
   7392 			m0 = pcq_get(txq->txq_interq);
   7393 		else
   7394 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7395 		if (m0 == NULL)
   7396 			break;
   7397 
   7398 		DPRINTF(WM_DEBUG_TX,
   7399 		    ("%s: TX: have packet to transmit: %p\n",
   7400 		    device_xname(sc->sc_dev), m0));
   7401 
   7402 		txs = &txq->txq_soft[txq->txq_snext];
   7403 		dmamap = txs->txs_dmamap;
   7404 
   7405 		/*
   7406 		 * Load the DMA map.  If this fails, the packet either
   7407 		 * didn't fit in the allotted number of segments, or we
   7408 		 * were short on resources.  For the too-many-segments
   7409 		 * case, we simply report an error and drop the packet,
   7410 		 * since we can't sanely copy a jumbo packet to a single
   7411 		 * buffer.
   7412 		 */
   7413 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7414 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7415 		if (error) {
   7416 			if (error == EFBIG) {
   7417 				WM_Q_EVCNT_INCR(txq, txdrop);
   7418 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7419 				    "DMA segments, dropping...\n",
   7420 				    device_xname(sc->sc_dev));
   7421 				wm_dump_mbuf_chain(sc, m0);
   7422 				m_freem(m0);
   7423 				continue;
   7424 			}
   7425 			/* Short on resources, just stop for now. */
   7426 			DPRINTF(WM_DEBUG_TX,
   7427 			    ("%s: TX: dmamap load failed: %d\n",
   7428 			    device_xname(sc->sc_dev), error));
   7429 			break;
   7430 		}
   7431 
   7432 		segs_needed = dmamap->dm_nsegs;
   7433 
   7434 		/*
   7435 		 * Ensure we have enough descriptors free to describe
   7436 		 * the packet.  Note, we always reserve one descriptor
   7437 		 * at the end of the ring due to the semantics of the
   7438 		 * TDT register, plus one more in the event we need
   7439 		 * to load offload context.
   7440 		 */
   7441 		if (segs_needed > txq->txq_free - 2) {
   7442 			/*
   7443 			 * Not enough free descriptors to transmit this
   7444 			 * packet.  We haven't committed anything yet,
   7445 			 * so just unload the DMA map, put the packet
   7446 			 * pack on the queue, and punt.  Notify the upper
   7447 			 * layer that there are no more slots left.
   7448 			 */
   7449 			DPRINTF(WM_DEBUG_TX,
   7450 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7451 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7452 			    segs_needed, txq->txq_free - 1));
   7453 			if (!is_transmit)
   7454 				ifp->if_flags |= IFF_OACTIVE;
   7455 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7456 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7457 			WM_Q_EVCNT_INCR(txq, txdstall);
   7458 			break;
   7459 		}
   7460 
   7461 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7462 
   7463 		DPRINTF(WM_DEBUG_TX,
   7464 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7465 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7466 
   7467 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7468 
   7469 		/*
   7470 		 * Store a pointer to the packet so that we can free it
   7471 		 * later.
   7472 		 *
   7473 		 * Initially, we consider the number of descriptors the
   7474 		 * packet uses the number of DMA segments.  This may be
   7475 		 * incremented by 1 if we do checksum offload (a descriptor
   7476 		 * is used to set the checksum context).
   7477 		 */
   7478 		txs->txs_mbuf = m0;
   7479 		txs->txs_firstdesc = txq->txq_next;
   7480 		txs->txs_ndesc = segs_needed;
   7481 
   7482 		/* Set up offload parameters for this packet. */
   7483 		uint32_t cmdlen, fields, dcmdlen;
   7484 		if (m0->m_pkthdr.csum_flags &
   7485 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7486 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7487 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7488 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   7489 			    &do_csum) != 0) {
   7490 				/* Error message already displayed. */
   7491 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7492 				continue;
   7493 			}
   7494 		} else {
   7495 			do_csum = false;
   7496 			cmdlen = 0;
   7497 			fields = 0;
   7498 		}
   7499 
   7500 		/* Sync the DMA map. */
   7501 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7502 		    BUS_DMASYNC_PREWRITE);
   7503 
   7504 		/* Initialize the first transmit descriptor. */
   7505 		nexttx = txq->txq_next;
   7506 		if (!do_csum) {
   7507 			/* setup a legacy descriptor */
   7508 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   7509 			    dmamap->dm_segs[0].ds_addr);
   7510 			txq->txq_descs[nexttx].wtx_cmdlen =
   7511 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   7512 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   7513 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   7514 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   7515 			    NULL) {
   7516 				txq->txq_descs[nexttx].wtx_cmdlen |=
   7517 				    htole32(WTX_CMD_VLE);
   7518 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   7519 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   7520 			} else {
   7521 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7522 			}
   7523 			dcmdlen = 0;
   7524 		} else {
   7525 			/* setup an advanced data descriptor */
   7526 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7527 			    htole64(dmamap->dm_segs[0].ds_addr);
   7528 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   7529 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7530 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   7531 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   7532 			    htole32(fields);
   7533 			DPRINTF(WM_DEBUG_TX,
   7534 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   7535 			    device_xname(sc->sc_dev), nexttx,
   7536 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   7537 			DPRINTF(WM_DEBUG_TX,
   7538 			    ("\t 0x%08x%08x\n", fields,
   7539 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   7540 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   7541 		}
   7542 
   7543 		lasttx = nexttx;
   7544 		nexttx = WM_NEXTTX(txq, nexttx);
   7545 		/*
   7546 		 * fill in the next descriptors. legacy or adcanced format
   7547 		 * is the same here
   7548 		 */
   7549 		for (seg = 1; seg < dmamap->dm_nsegs;
   7550 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   7551 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7552 			    htole64(dmamap->dm_segs[seg].ds_addr);
   7553 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7554 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   7555 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   7556 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   7557 			lasttx = nexttx;
   7558 
   7559 			DPRINTF(WM_DEBUG_TX,
   7560 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   7561 			     "len %#04zx\n",
   7562 			    device_xname(sc->sc_dev), nexttx,
   7563 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   7564 			    dmamap->dm_segs[seg].ds_len));
   7565 		}
   7566 
   7567 		KASSERT(lasttx != -1);
   7568 
   7569 		/*
   7570 		 * Set up the command byte on the last descriptor of
   7571 		 * the packet.  If we're in the interrupt delay window,
   7572 		 * delay the interrupt.
   7573 		 */
   7574 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   7575 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   7576 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7577 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7578 
   7579 		txs->txs_lastdesc = lasttx;
   7580 
   7581 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7582 		    device_xname(sc->sc_dev),
   7583 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7584 
   7585 		/* Sync the descriptors we're using. */
   7586 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7587 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7588 
   7589 		/* Give the packet to the chip. */
   7590 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7591 		sent = true;
   7592 
   7593 		DPRINTF(WM_DEBUG_TX,
   7594 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7595 
   7596 		DPRINTF(WM_DEBUG_TX,
   7597 		    ("%s: TX: finished transmitting packet, job %d\n",
   7598 		    device_xname(sc->sc_dev), txq->txq_snext));
   7599 
   7600 		/* Advance the tx pointer. */
   7601 		txq->txq_free -= txs->txs_ndesc;
   7602 		txq->txq_next = nexttx;
   7603 
   7604 		txq->txq_sfree--;
   7605 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7606 
   7607 		/* Pass the packet to any BPF listeners. */
   7608 		bpf_mtap(ifp, m0);
   7609 	}
   7610 
   7611 	if (m0 != NULL) {
   7612 		if (!is_transmit)
   7613 			ifp->if_flags |= IFF_OACTIVE;
   7614 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7615 		WM_Q_EVCNT_INCR(txq, txdrop);
   7616 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7617 			__func__));
   7618 		m_freem(m0);
   7619 	}
   7620 
   7621 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7622 		/* No more slots; notify upper layer. */
   7623 		if (!is_transmit)
   7624 			ifp->if_flags |= IFF_OACTIVE;
   7625 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7626 	}
   7627 
   7628 	if (sent) {
   7629 		/* Set a watchdog timer in case the chip flakes out. */
   7630 		ifp->if_timer = 5;
   7631 	}
   7632 }
   7633 
   7634 static void
   7635 wm_deferred_start_locked(struct wm_txqueue *txq)
   7636 {
   7637 	struct wm_softc *sc = txq->txq_sc;
   7638 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7639 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   7640 	int qid = wmq->wmq_id;
   7641 
   7642 	KASSERT(mutex_owned(txq->txq_lock));
   7643 
   7644 	if (txq->txq_stopping) {
   7645 		mutex_exit(txq->txq_lock);
   7646 		return;
   7647 	}
   7648 
   7649 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7650 		/* XXX need for ALTQ or one CPU system */
   7651 		if (qid == 0)
   7652 			wm_nq_start_locked(ifp);
   7653 		wm_nq_transmit_locked(ifp, txq);
   7654 	} else {
   7655 		/* XXX need for ALTQ or one CPU system */
   7656 		if (qid == 0)
   7657 			wm_start_locked(ifp);
   7658 		wm_transmit_locked(ifp, txq);
   7659 	}
   7660 }
   7661 
   7662 /* Interrupt */
   7663 
   7664 /*
   7665  * wm_txeof:
   7666  *
   7667  *	Helper; handle transmit interrupts.
   7668  */
   7669 static int
   7670 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
   7671 {
   7672 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7673 	struct wm_txsoft *txs;
   7674 	bool processed = false;
   7675 	int count = 0;
   7676 	int i;
   7677 	uint8_t status;
   7678 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   7679 
   7680 	KASSERT(mutex_owned(txq->txq_lock));
   7681 
   7682 	if (txq->txq_stopping)
   7683 		return 0;
   7684 
   7685 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   7686 	/* for ALTQ and legacy(not use multiqueue) ethernet controller */
   7687 	if (wmq->wmq_id == 0)
   7688 		ifp->if_flags &= ~IFF_OACTIVE;
   7689 
   7690 	/*
   7691 	 * Go through the Tx list and free mbufs for those
   7692 	 * frames which have been transmitted.
   7693 	 */
   7694 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   7695 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   7696 		txs = &txq->txq_soft[i];
   7697 
   7698 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   7699 			device_xname(sc->sc_dev), i));
   7700 
   7701 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   7702 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   7703 
   7704 		status =
   7705 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   7706 		if ((status & WTX_ST_DD) == 0) {
   7707 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   7708 			    BUS_DMASYNC_PREREAD);
   7709 			break;
   7710 		}
   7711 
   7712 		processed = true;
   7713 		count++;
   7714 		DPRINTF(WM_DEBUG_TX,
   7715 		    ("%s: TX: job %d done: descs %d..%d\n",
   7716 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   7717 		    txs->txs_lastdesc));
   7718 
   7719 		/*
   7720 		 * XXX We should probably be using the statistics
   7721 		 * XXX registers, but I don't know if they exist
   7722 		 * XXX on chips before the i82544.
   7723 		 */
   7724 
   7725 #ifdef WM_EVENT_COUNTERS
   7726 		if (status & WTX_ST_TU)
   7727 			WM_Q_EVCNT_INCR(txq, tu);
   7728 #endif /* WM_EVENT_COUNTERS */
   7729 
   7730 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   7731 			ifp->if_oerrors++;
   7732 			if (status & WTX_ST_LC)
   7733 				log(LOG_WARNING, "%s: late collision\n",
   7734 				    device_xname(sc->sc_dev));
   7735 			else if (status & WTX_ST_EC) {
   7736 				ifp->if_collisions += 16;
   7737 				log(LOG_WARNING, "%s: excessive collisions\n",
   7738 				    device_xname(sc->sc_dev));
   7739 			}
   7740 		} else
   7741 			ifp->if_opackets++;
   7742 
   7743 		txq->txq_packets++;
   7744 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   7745 
   7746 		txq->txq_free += txs->txs_ndesc;
   7747 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   7748 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   7749 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   7750 		m_freem(txs->txs_mbuf);
   7751 		txs->txs_mbuf = NULL;
   7752 	}
   7753 
   7754 	/* Update the dirty transmit buffer pointer. */
   7755 	txq->txq_sdirty = i;
   7756 	DPRINTF(WM_DEBUG_TX,
   7757 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   7758 
   7759 	if (count != 0)
   7760 		rnd_add_uint32(&sc->rnd_source, count);
   7761 
   7762 	/*
   7763 	 * If there are no more pending transmissions, cancel the watchdog
   7764 	 * timer.
   7765 	 */
   7766 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   7767 		ifp->if_timer = 0;
   7768 
   7769 	return processed;
   7770 }
   7771 
   7772 static inline uint32_t
   7773 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   7774 {
   7775 	struct wm_softc *sc = rxq->rxq_sc;
   7776 
   7777 	if (sc->sc_type == WM_T_82574)
   7778 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   7779 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7780 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   7781 	else
   7782 		return rxq->rxq_descs[idx].wrx_status;
   7783 }
   7784 
   7785 static inline uint32_t
   7786 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   7787 {
   7788 	struct wm_softc *sc = rxq->rxq_sc;
   7789 
   7790 	if (sc->sc_type == WM_T_82574)
   7791 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   7792 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7793 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   7794 	else
   7795 		return rxq->rxq_descs[idx].wrx_errors;
   7796 }
   7797 
   7798 static inline uint16_t
   7799 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   7800 {
   7801 	struct wm_softc *sc = rxq->rxq_sc;
   7802 
   7803 	if (sc->sc_type == WM_T_82574)
   7804 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   7805 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7806 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   7807 	else
   7808 		return rxq->rxq_descs[idx].wrx_special;
   7809 }
   7810 
   7811 static inline int
   7812 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   7813 {
   7814 	struct wm_softc *sc = rxq->rxq_sc;
   7815 
   7816 	if (sc->sc_type == WM_T_82574)
   7817 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   7818 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7819 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   7820 	else
   7821 		return rxq->rxq_descs[idx].wrx_len;
   7822 }
   7823 
   7824 #ifdef WM_DEBUG
   7825 static inline uint32_t
   7826 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   7827 {
   7828 	struct wm_softc *sc = rxq->rxq_sc;
   7829 
   7830 	if (sc->sc_type == WM_T_82574)
   7831 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   7832 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7833 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   7834 	else
   7835 		return 0;
   7836 }
   7837 
   7838 static inline uint8_t
   7839 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   7840 {
   7841 	struct wm_softc *sc = rxq->rxq_sc;
   7842 
   7843 	if (sc->sc_type == WM_T_82574)
   7844 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   7845 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7846 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   7847 	else
   7848 		return 0;
   7849 }
   7850 #endif /* WM_DEBUG */
   7851 
   7852 static inline bool
   7853 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   7854     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   7855 {
   7856 
   7857 	if (sc->sc_type == WM_T_82574)
   7858 		return (status & ext_bit) != 0;
   7859 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7860 		return (status & nq_bit) != 0;
   7861 	else
   7862 		return (status & legacy_bit) != 0;
   7863 }
   7864 
   7865 static inline bool
   7866 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   7867     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   7868 {
   7869 
   7870 	if (sc->sc_type == WM_T_82574)
   7871 		return (error & ext_bit) != 0;
   7872 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7873 		return (error & nq_bit) != 0;
   7874 	else
   7875 		return (error & legacy_bit) != 0;
   7876 }
   7877 
   7878 static inline bool
   7879 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   7880 {
   7881 
   7882 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   7883 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   7884 		return true;
   7885 	else
   7886 		return false;
   7887 }
   7888 
   7889 static inline bool
   7890 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   7891 {
   7892 	struct wm_softc *sc = rxq->rxq_sc;
   7893 
   7894 	/* XXXX missing error bit for newqueue? */
   7895 	if (wm_rxdesc_is_set_error(sc, errors,
   7896 		WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE,
   7897 		EXTRXC_ERROR_CE|EXTRXC_ERROR_SE|EXTRXC_ERROR_SEQ|EXTRXC_ERROR_CXE|EXTRXC_ERROR_RXE,
   7898 		NQRXC_ERROR_RXE)) {
   7899 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE, EXTRXC_ERROR_SE, 0))
   7900 			log(LOG_WARNING, "%s: symbol error\n",
   7901 			    device_xname(sc->sc_dev));
   7902 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ, EXTRXC_ERROR_SEQ, 0))
   7903 			log(LOG_WARNING, "%s: receive sequence error\n",
   7904 			    device_xname(sc->sc_dev));
   7905 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE, EXTRXC_ERROR_CE, 0))
   7906 			log(LOG_WARNING, "%s: CRC error\n",
   7907 			    device_xname(sc->sc_dev));
   7908 		return true;
   7909 	}
   7910 
   7911 	return false;
   7912 }
   7913 
   7914 static inline bool
   7915 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   7916 {
   7917 	struct wm_softc *sc = rxq->rxq_sc;
   7918 
   7919 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   7920 		NQRXC_STATUS_DD)) {
   7921 		/* We have processed all of the receive descriptors. */
   7922 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   7923 		return false;
   7924 	}
   7925 
   7926 	return true;
   7927 }
   7928 
   7929 static inline bool
   7930 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status, uint16_t vlantag,
   7931     struct mbuf *m)
   7932 {
   7933 	struct ifnet *ifp = &rxq->rxq_sc->sc_ethercom.ec_if;
   7934 
   7935 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   7936 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   7937 		VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), return false);
   7938 	}
   7939 
   7940 	return true;
   7941 }
   7942 
   7943 static inline void
   7944 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   7945     uint32_t errors, struct mbuf *m)
   7946 {
   7947 	struct wm_softc *sc = rxq->rxq_sc;
   7948 
   7949 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   7950 		if (wm_rxdesc_is_set_status(sc, status,
   7951 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   7952 			WM_Q_EVCNT_INCR(rxq, rxipsum);
   7953 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   7954 			if (wm_rxdesc_is_set_error(sc, errors,
   7955 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   7956 				m->m_pkthdr.csum_flags |=
   7957 					M_CSUM_IPv4_BAD;
   7958 		}
   7959 		if (wm_rxdesc_is_set_status(sc, status,
   7960 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   7961 			/*
   7962 			 * Note: we don't know if this was TCP or UDP,
   7963 			 * so we just set both bits, and expect the
   7964 			 * upper layers to deal.
   7965 			 */
   7966 			WM_Q_EVCNT_INCR(rxq, rxtusum);
   7967 			m->m_pkthdr.csum_flags |=
   7968 				M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7969 				M_CSUM_TCPv6 | M_CSUM_UDPv6;
   7970 			if (wm_rxdesc_is_set_error(sc, errors,
   7971 				WRX_ER_TCPE, EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   7972 				m->m_pkthdr.csum_flags |=
   7973 					M_CSUM_TCP_UDP_BAD;
   7974 		}
   7975 	}
   7976 }
   7977 
   7978 /*
   7979  * wm_rxeof:
   7980  *
   7981  *	Helper; handle receive interrupts.
   7982  */
   7983 static void
   7984 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   7985 {
   7986 	struct wm_softc *sc = rxq->rxq_sc;
   7987 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7988 	struct wm_rxsoft *rxs;
   7989 	struct mbuf *m;
   7990 	int i, len;
   7991 	int count = 0;
   7992 	uint32_t status, errors;
   7993 	uint16_t vlantag;
   7994 
   7995 	KASSERT(mutex_owned(rxq->rxq_lock));
   7996 
   7997 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   7998 		if (limit-- == 0) {
   7999 			rxq->rxq_ptr = i;
   8000 			break;
   8001 		}
   8002 
   8003 		rxs = &rxq->rxq_soft[i];
   8004 
   8005 		DPRINTF(WM_DEBUG_RX,
   8006 		    ("%s: RX: checking descriptor %d\n",
   8007 		    device_xname(sc->sc_dev), i));
   8008 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   8009 
   8010 		status = wm_rxdesc_get_status(rxq, i);
   8011 		errors = wm_rxdesc_get_errors(rxq, i);
   8012 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8013 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8014 #ifdef WM_DEBUG
   8015 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8016 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8017 #endif
   8018 
   8019 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8020 			/*
   8021 			 * Update the receive pointer holding rxq_lock
   8022 			 * consistent with increment counter.
   8023 			 */
   8024 			rxq->rxq_ptr = i;
   8025 			break;
   8026 		}
   8027 
   8028 		count++;
   8029 		if (__predict_false(rxq->rxq_discard)) {
   8030 			DPRINTF(WM_DEBUG_RX,
   8031 			    ("%s: RX: discarding contents of descriptor %d\n",
   8032 			    device_xname(sc->sc_dev), i));
   8033 			wm_init_rxdesc(rxq, i);
   8034 			if (wm_rxdesc_is_eop(rxq, status)) {
   8035 				/* Reset our state. */
   8036 				DPRINTF(WM_DEBUG_RX,
   8037 				    ("%s: RX: resetting rxdiscard -> 0\n",
   8038 				    device_xname(sc->sc_dev)));
   8039 				rxq->rxq_discard = 0;
   8040 			}
   8041 			continue;
   8042 		}
   8043 
   8044 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8045 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   8046 
   8047 		m = rxs->rxs_mbuf;
   8048 
   8049 		/*
   8050 		 * Add a new receive buffer to the ring, unless of
   8051 		 * course the length is zero. Treat the latter as a
   8052 		 * failed mapping.
   8053 		 */
   8054 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   8055 			/*
   8056 			 * Failed, throw away what we've done so
   8057 			 * far, and discard the rest of the packet.
   8058 			 */
   8059 			ifp->if_ierrors++;
   8060 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8061 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   8062 			wm_init_rxdesc(rxq, i);
   8063 			if (!wm_rxdesc_is_eop(rxq, status))
   8064 				rxq->rxq_discard = 1;
   8065 			if (rxq->rxq_head != NULL)
   8066 				m_freem(rxq->rxq_head);
   8067 			WM_RXCHAIN_RESET(rxq);
   8068 			DPRINTF(WM_DEBUG_RX,
   8069 			    ("%s: RX: Rx buffer allocation failed, "
   8070 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8071 			    rxq->rxq_discard ? " (discard)" : ""));
   8072 			continue;
   8073 		}
   8074 
   8075 		m->m_len = len;
   8076 		rxq->rxq_len += len;
   8077 		DPRINTF(WM_DEBUG_RX,
   8078 		    ("%s: RX: buffer at %p len %d\n",
   8079 		    device_xname(sc->sc_dev), m->m_data, len));
   8080 
   8081 		/* If this is not the end of the packet, keep looking. */
   8082 		if (!wm_rxdesc_is_eop(rxq, status)) {
   8083 			WM_RXCHAIN_LINK(rxq, m);
   8084 			DPRINTF(WM_DEBUG_RX,
   8085 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   8086 			    device_xname(sc->sc_dev), rxq->rxq_len));
   8087 			continue;
   8088 		}
   8089 
   8090 		/*
   8091 		 * Okay, we have the entire packet now.  The chip is
   8092 		 * configured to include the FCS except I350 and I21[01]
   8093 		 * (not all chips can be configured to strip it),
   8094 		 * so we need to trim it.
   8095 		 * May need to adjust length of previous mbuf in the
   8096 		 * chain if the current mbuf is too short.
   8097 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   8098 		 * is always set in I350, so we don't trim it.
   8099 		 */
   8100 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   8101 		    && (sc->sc_type != WM_T_I210)
   8102 		    && (sc->sc_type != WM_T_I211)) {
   8103 			if (m->m_len < ETHER_CRC_LEN) {
   8104 				rxq->rxq_tail->m_len
   8105 				    -= (ETHER_CRC_LEN - m->m_len);
   8106 				m->m_len = 0;
   8107 			} else
   8108 				m->m_len -= ETHER_CRC_LEN;
   8109 			len = rxq->rxq_len - ETHER_CRC_LEN;
   8110 		} else
   8111 			len = rxq->rxq_len;
   8112 
   8113 		WM_RXCHAIN_LINK(rxq, m);
   8114 
   8115 		*rxq->rxq_tailp = NULL;
   8116 		m = rxq->rxq_head;
   8117 
   8118 		WM_RXCHAIN_RESET(rxq);
   8119 
   8120 		DPRINTF(WM_DEBUG_RX,
   8121 		    ("%s: RX: have entire packet, len -> %d\n",
   8122 		    device_xname(sc->sc_dev), len));
   8123 
   8124 		/* If an error occurred, update stats and drop the packet. */
   8125 		if (wm_rxdesc_has_errors(rxq, errors)) {
   8126 			m_freem(m);
   8127 			continue;
   8128 		}
   8129 
   8130 		/* No errors.  Receive the packet. */
   8131 		m_set_rcvif(m, ifp);
   8132 		m->m_pkthdr.len = len;
   8133 		/*
   8134 		 * TODO
   8135 		 * should be save rsshash and rsstype to this mbuf.
   8136 		 */
   8137 		DPRINTF(WM_DEBUG_RX,
   8138 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   8139 			device_xname(sc->sc_dev), rsstype, rsshash));
   8140 
   8141 		/*
   8142 		 * If VLANs are enabled, VLAN packets have been unwrapped
   8143 		 * for us.  Associate the tag with the packet.
   8144 		 */
   8145 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   8146 			continue;
   8147 
   8148 		/* Set up checksum info for this packet. */
   8149 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   8150 		/*
   8151 		 * Update the receive pointer holding rxq_lock consistent with
   8152 		 * increment counter.
   8153 		 */
   8154 		rxq->rxq_ptr = i;
   8155 		rxq->rxq_packets++;
   8156 		rxq->rxq_bytes += len;
   8157 		mutex_exit(rxq->rxq_lock);
   8158 
   8159 		/* Pass it on. */
   8160 		if_percpuq_enqueue(sc->sc_ipq, m);
   8161 
   8162 		mutex_enter(rxq->rxq_lock);
   8163 
   8164 		if (rxq->rxq_stopping)
   8165 			break;
   8166 	}
   8167 
   8168 	if (count != 0)
   8169 		rnd_add_uint32(&sc->rnd_source, count);
   8170 
   8171 	DPRINTF(WM_DEBUG_RX,
   8172 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8173 }
   8174 
   8175 /*
   8176  * wm_linkintr_gmii:
   8177  *
   8178  *	Helper; handle link interrupts for GMII.
   8179  */
   8180 static void
   8181 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8182 {
   8183 
   8184 	KASSERT(WM_CORE_LOCKED(sc));
   8185 
   8186 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8187 		__func__));
   8188 
   8189 	if (icr & ICR_LSC) {
   8190 		uint32_t reg;
   8191 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   8192 
   8193 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   8194 			wm_gig_downshift_workaround_ich8lan(sc);
   8195 
   8196 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   8197 			device_xname(sc->sc_dev)));
   8198 		mii_pollstat(&sc->sc_mii);
   8199 		if (sc->sc_type == WM_T_82543) {
   8200 			int miistatus, active;
   8201 
   8202 			/*
   8203 			 * With 82543, we need to force speed and
   8204 			 * duplex on the MAC equal to what the PHY
   8205 			 * speed and duplex configuration is.
   8206 			 */
   8207 			miistatus = sc->sc_mii.mii_media_status;
   8208 
   8209 			if (miistatus & IFM_ACTIVE) {
   8210 				active = sc->sc_mii.mii_media_active;
   8211 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8212 				switch (IFM_SUBTYPE(active)) {
   8213 				case IFM_10_T:
   8214 					sc->sc_ctrl |= CTRL_SPEED_10;
   8215 					break;
   8216 				case IFM_100_TX:
   8217 					sc->sc_ctrl |= CTRL_SPEED_100;
   8218 					break;
   8219 				case IFM_1000_T:
   8220 					sc->sc_ctrl |= CTRL_SPEED_1000;
   8221 					break;
   8222 				default:
   8223 					/*
   8224 					 * fiber?
   8225 					 * Shoud not enter here.
   8226 					 */
   8227 					printf("unknown media (%x)\n", active);
   8228 					break;
   8229 				}
   8230 				if (active & IFM_FDX)
   8231 					sc->sc_ctrl |= CTRL_FD;
   8232 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8233 			}
   8234 		} else if ((sc->sc_type == WM_T_ICH8)
   8235 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   8236 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   8237 		} else if (sc->sc_type == WM_T_PCH) {
   8238 			wm_k1_gig_workaround_hv(sc,
   8239 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   8240 		}
   8241 
   8242 		if ((sc->sc_phytype == WMPHY_82578)
   8243 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   8244 			== IFM_1000_T)) {
   8245 
   8246 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   8247 				delay(200*1000); /* XXX too big */
   8248 
   8249 				/* Link stall fix for link up */
   8250 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8251 				    HV_MUX_DATA_CTRL,
   8252 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   8253 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   8254 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8255 				    HV_MUX_DATA_CTRL,
   8256 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   8257 			}
   8258 		}
   8259 		/*
   8260 		 * I217 Packet Loss issue:
   8261 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
   8262 		 * on power up.
   8263 		 * Set the Beacon Duration for I217 to 8 usec
   8264 		 */
   8265 		if ((sc->sc_type == WM_T_PCH_LPT)
   8266 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8267 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   8268 			reg &= ~FEXTNVM4_BEACON_DURATION;
   8269 			reg |= FEXTNVM4_BEACON_DURATION_8US;
   8270 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   8271 		}
   8272 
   8273 		/* XXX Work-around I218 hang issue */
   8274 		/* e1000_k1_workaround_lpt_lp() */
   8275 
   8276 		if ((sc->sc_type == WM_T_PCH_LPT)
   8277 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8278 			/*
   8279 			 * Set platform power management values for Latency
   8280 			 * Tolerance Reporting (LTR)
   8281 			 */
   8282 			wm_platform_pm_pch_lpt(sc,
   8283 				((sc->sc_mii.mii_media_status & IFM_ACTIVE)
   8284 				    != 0));
   8285 		}
   8286 
   8287 		/* FEXTNVM6 K1-off workaround */
   8288 		if (sc->sc_type == WM_T_PCH_SPT) {
   8289 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
   8290 			if (CSR_READ(sc, WMREG_PCIEANACFG)
   8291 			    & FEXTNVM6_K1_OFF_ENABLE)
   8292 				reg |= FEXTNVM6_K1_OFF_ENABLE;
   8293 			else
   8294 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   8295 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   8296 		}
   8297 	} else if (icr & ICR_RXSEQ) {
   8298 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   8299 			device_xname(sc->sc_dev)));
   8300 	}
   8301 }
   8302 
   8303 /*
   8304  * wm_linkintr_tbi:
   8305  *
   8306  *	Helper; handle link interrupts for TBI mode.
   8307  */
   8308 static void
   8309 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   8310 {
   8311 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8312 	uint32_t status;
   8313 
   8314 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8315 		__func__));
   8316 
   8317 	status = CSR_READ(sc, WMREG_STATUS);
   8318 	if (icr & ICR_LSC) {
   8319 		if (status & STATUS_LU) {
   8320 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8321 			    device_xname(sc->sc_dev),
   8322 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   8323 			/*
   8324 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   8325 			 * so we should update sc->sc_ctrl
   8326 			 */
   8327 
   8328 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   8329 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8330 			sc->sc_fcrtl &= ~FCRTL_XONE;
   8331 			if (status & STATUS_FD)
   8332 				sc->sc_tctl |=
   8333 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8334 			else
   8335 				sc->sc_tctl |=
   8336 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8337 			if (sc->sc_ctrl & CTRL_TFCE)
   8338 				sc->sc_fcrtl |= FCRTL_XONE;
   8339 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8340 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   8341 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   8342 				      sc->sc_fcrtl);
   8343 			sc->sc_tbi_linkup = 1;
   8344 			if_link_state_change(ifp, LINK_STATE_UP);
   8345 		} else {
   8346 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8347 			    device_xname(sc->sc_dev)));
   8348 			sc->sc_tbi_linkup = 0;
   8349 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8350 		}
   8351 		/* Update LED */
   8352 		wm_tbi_serdes_set_linkled(sc);
   8353 	} else if (icr & ICR_RXSEQ) {
   8354 		DPRINTF(WM_DEBUG_LINK,
   8355 		    ("%s: LINK: Receive sequence error\n",
   8356 		    device_xname(sc->sc_dev)));
   8357 	}
   8358 }
   8359 
   8360 /*
   8361  * wm_linkintr_serdes:
   8362  *
   8363  *	Helper; handle link interrupts for TBI mode.
   8364  */
   8365 static void
   8366 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   8367 {
   8368 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8369 	struct mii_data *mii = &sc->sc_mii;
   8370 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8371 	uint32_t pcs_adv, pcs_lpab, reg;
   8372 
   8373 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8374 		__func__));
   8375 
   8376 	if (icr & ICR_LSC) {
   8377 		/* Check PCS */
   8378 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8379 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   8380 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   8381 				device_xname(sc->sc_dev)));
   8382 			mii->mii_media_status |= IFM_ACTIVE;
   8383 			sc->sc_tbi_linkup = 1;
   8384 			if_link_state_change(ifp, LINK_STATE_UP);
   8385 		} else {
   8386 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8387 				device_xname(sc->sc_dev)));
   8388 			mii->mii_media_status |= IFM_NONE;
   8389 			sc->sc_tbi_linkup = 0;
   8390 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8391 			wm_tbi_serdes_set_linkled(sc);
   8392 			return;
   8393 		}
   8394 		mii->mii_media_active |= IFM_1000_SX;
   8395 		if ((reg & PCS_LSTS_FDX) != 0)
   8396 			mii->mii_media_active |= IFM_FDX;
   8397 		else
   8398 			mii->mii_media_active |= IFM_HDX;
   8399 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   8400 			/* Check flow */
   8401 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8402 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   8403 				DPRINTF(WM_DEBUG_LINK,
   8404 				    ("XXX LINKOK but not ACOMP\n"));
   8405 				return;
   8406 			}
   8407 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   8408 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   8409 			DPRINTF(WM_DEBUG_LINK,
   8410 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   8411 			if ((pcs_adv & TXCW_SYM_PAUSE)
   8412 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   8413 				mii->mii_media_active |= IFM_FLOW
   8414 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   8415 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   8416 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8417 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   8418 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8419 				mii->mii_media_active |= IFM_FLOW
   8420 				    | IFM_ETH_TXPAUSE;
   8421 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   8422 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8423 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   8424 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8425 				mii->mii_media_active |= IFM_FLOW
   8426 				    | IFM_ETH_RXPAUSE;
   8427 		}
   8428 		/* Update LED */
   8429 		wm_tbi_serdes_set_linkled(sc);
   8430 	} else {
   8431 		DPRINTF(WM_DEBUG_LINK,
   8432 		    ("%s: LINK: Receive sequence error\n",
   8433 		    device_xname(sc->sc_dev)));
   8434 	}
   8435 }
   8436 
   8437 /*
   8438  * wm_linkintr:
   8439  *
   8440  *	Helper; handle link interrupts.
   8441  */
   8442 static void
   8443 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   8444 {
   8445 
   8446 	KASSERT(WM_CORE_LOCKED(sc));
   8447 
   8448 	if (sc->sc_flags & WM_F_HAS_MII)
   8449 		wm_linkintr_gmii(sc, icr);
   8450 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   8451 	    && (sc->sc_type >= WM_T_82575))
   8452 		wm_linkintr_serdes(sc, icr);
   8453 	else
   8454 		wm_linkintr_tbi(sc, icr);
   8455 }
   8456 
   8457 /*
   8458  * wm_intr_legacy:
   8459  *
   8460  *	Interrupt service routine for INTx and MSI.
   8461  */
   8462 static int
   8463 wm_intr_legacy(void *arg)
   8464 {
   8465 	struct wm_softc *sc = arg;
   8466 	struct wm_queue *wmq = &sc->sc_queue[0];
   8467 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8468 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8469 	uint32_t icr, rndval = 0;
   8470 	int handled = 0;
   8471 
   8472 	DPRINTF(WM_DEBUG_TX,
   8473 	    ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
   8474 	while (1 /* CONSTCOND */) {
   8475 		icr = CSR_READ(sc, WMREG_ICR);
   8476 		if ((icr & sc->sc_icr) == 0)
   8477 			break;
   8478 		if (rndval == 0)
   8479 			rndval = icr;
   8480 
   8481 		mutex_enter(rxq->rxq_lock);
   8482 
   8483 		if (rxq->rxq_stopping) {
   8484 			mutex_exit(rxq->rxq_lock);
   8485 			break;
   8486 		}
   8487 
   8488 		handled = 1;
   8489 
   8490 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8491 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   8492 			DPRINTF(WM_DEBUG_RX,
   8493 			    ("%s: RX: got Rx intr 0x%08x\n",
   8494 			    device_xname(sc->sc_dev),
   8495 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
   8496 			WM_Q_EVCNT_INCR(rxq, rxintr);
   8497 		}
   8498 #endif
   8499 		wm_rxeof(rxq, UINT_MAX);
   8500 
   8501 		mutex_exit(rxq->rxq_lock);
   8502 		mutex_enter(txq->txq_lock);
   8503 
   8504 		if (txq->txq_stopping) {
   8505 			mutex_exit(txq->txq_lock);
   8506 			break;
   8507 		}
   8508 
   8509 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8510 		if (icr & ICR_TXDW) {
   8511 			DPRINTF(WM_DEBUG_TX,
   8512 			    ("%s: TX: got TXDW interrupt\n",
   8513 			    device_xname(sc->sc_dev)));
   8514 			WM_Q_EVCNT_INCR(txq, txdw);
   8515 		}
   8516 #endif
   8517 		wm_txeof(sc, txq);
   8518 
   8519 		mutex_exit(txq->txq_lock);
   8520 		WM_CORE_LOCK(sc);
   8521 
   8522 		if (sc->sc_core_stopping) {
   8523 			WM_CORE_UNLOCK(sc);
   8524 			break;
   8525 		}
   8526 
   8527 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   8528 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8529 			wm_linkintr(sc, icr);
   8530 		}
   8531 
   8532 		WM_CORE_UNLOCK(sc);
   8533 
   8534 		if (icr & ICR_RXO) {
   8535 #if defined(WM_DEBUG)
   8536 			log(LOG_WARNING, "%s: Receive overrun\n",
   8537 			    device_xname(sc->sc_dev));
   8538 #endif /* defined(WM_DEBUG) */
   8539 		}
   8540 	}
   8541 
   8542 	rnd_add_uint32(&sc->rnd_source, rndval);
   8543 
   8544 	if (handled) {
   8545 		/* Try to get more packets going. */
   8546 		softint_schedule(wmq->wmq_si);
   8547 	}
   8548 
   8549 	return handled;
   8550 }
   8551 
   8552 static inline void
   8553 wm_txrxintr_disable(struct wm_queue *wmq)
   8554 {
   8555 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8556 
   8557 	if (sc->sc_type == WM_T_82574)
   8558 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8559 	else if (sc->sc_type == WM_T_82575)
   8560 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8561 	else
   8562 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   8563 }
   8564 
   8565 static inline void
   8566 wm_txrxintr_enable(struct wm_queue *wmq)
   8567 {
   8568 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8569 
   8570 	wm_itrs_calculate(sc, wmq);
   8571 
   8572 	if (sc->sc_type == WM_T_82574)
   8573 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8574 	else if (sc->sc_type == WM_T_82575)
   8575 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8576 	else
   8577 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   8578 }
   8579 
   8580 static int
   8581 wm_txrxintr_msix(void *arg)
   8582 {
   8583 	struct wm_queue *wmq = arg;
   8584 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8585 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8586 	struct wm_softc *sc = txq->txq_sc;
   8587 	u_int limit = sc->sc_rx_intr_process_limit;
   8588 
   8589 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   8590 
   8591 	DPRINTF(WM_DEBUG_TX,
   8592 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   8593 
   8594 	wm_txrxintr_disable(wmq);
   8595 
   8596 	mutex_enter(txq->txq_lock);
   8597 
   8598 	if (txq->txq_stopping) {
   8599 		mutex_exit(txq->txq_lock);
   8600 		return 0;
   8601 	}
   8602 
   8603 	WM_Q_EVCNT_INCR(txq, txdw);
   8604 	wm_txeof(sc, txq);
   8605 	/* wm_deferred start() is done in wm_handle_queue(). */
   8606 	mutex_exit(txq->txq_lock);
   8607 
   8608 	DPRINTF(WM_DEBUG_RX,
   8609 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   8610 	mutex_enter(rxq->rxq_lock);
   8611 
   8612 	if (rxq->rxq_stopping) {
   8613 		mutex_exit(rxq->rxq_lock);
   8614 		return 0;
   8615 	}
   8616 
   8617 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8618 	wm_rxeof(rxq, limit);
   8619 	mutex_exit(rxq->rxq_lock);
   8620 
   8621 	wm_itrs_writereg(sc, wmq);
   8622 
   8623 	softint_schedule(wmq->wmq_si);
   8624 
   8625 	return 1;
   8626 }
   8627 
   8628 static void
   8629 wm_handle_queue(void *arg)
   8630 {
   8631 	struct wm_queue *wmq = arg;
   8632 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8633 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8634 	struct wm_softc *sc = txq->txq_sc;
   8635 	u_int limit = sc->sc_rx_process_limit;
   8636 
   8637 	mutex_enter(txq->txq_lock);
   8638 	if (txq->txq_stopping) {
   8639 		mutex_exit(txq->txq_lock);
   8640 		return;
   8641 	}
   8642 	wm_txeof(sc, txq);
   8643 	wm_deferred_start_locked(txq);
   8644 	mutex_exit(txq->txq_lock);
   8645 
   8646 	mutex_enter(rxq->rxq_lock);
   8647 	if (rxq->rxq_stopping) {
   8648 		mutex_exit(rxq->rxq_lock);
   8649 		return;
   8650 	}
   8651 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8652 	wm_rxeof(rxq, limit);
   8653 	mutex_exit(rxq->rxq_lock);
   8654 
   8655 	wm_txrxintr_enable(wmq);
   8656 }
   8657 
   8658 /*
   8659  * wm_linkintr_msix:
   8660  *
   8661  *	Interrupt service routine for link status change for MSI-X.
   8662  */
   8663 static int
   8664 wm_linkintr_msix(void *arg)
   8665 {
   8666 	struct wm_softc *sc = arg;
   8667 	uint32_t reg;
   8668 
   8669 	DPRINTF(WM_DEBUG_LINK,
   8670 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   8671 
   8672 	reg = CSR_READ(sc, WMREG_ICR);
   8673 	WM_CORE_LOCK(sc);
   8674 	if ((sc->sc_core_stopping) || ((reg & ICR_LSC) == 0))
   8675 		goto out;
   8676 
   8677 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8678 	wm_linkintr(sc, ICR_LSC);
   8679 
   8680 out:
   8681 	WM_CORE_UNLOCK(sc);
   8682 
   8683 	if (sc->sc_type == WM_T_82574)
   8684 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   8685 	else if (sc->sc_type == WM_T_82575)
   8686 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   8687 	else
   8688 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   8689 
   8690 	return 1;
   8691 }
   8692 
   8693 /*
   8694  * Media related.
   8695  * GMII, SGMII, TBI (and SERDES)
   8696  */
   8697 
   8698 /* Common */
   8699 
   8700 /*
   8701  * wm_tbi_serdes_set_linkled:
   8702  *
   8703  *	Update the link LED on TBI and SERDES devices.
   8704  */
   8705 static void
   8706 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   8707 {
   8708 
   8709 	if (sc->sc_tbi_linkup)
   8710 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   8711 	else
   8712 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   8713 
   8714 	/* 82540 or newer devices are active low */
   8715 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   8716 
   8717 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8718 }
   8719 
   8720 /* GMII related */
   8721 
   8722 /*
   8723  * wm_gmii_reset:
   8724  *
   8725  *	Reset the PHY.
   8726  */
   8727 static void
   8728 wm_gmii_reset(struct wm_softc *sc)
   8729 {
   8730 	uint32_t reg;
   8731 	int rv;
   8732 
   8733 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   8734 		device_xname(sc->sc_dev), __func__));
   8735 
   8736 	rv = sc->phy.acquire(sc);
   8737 	if (rv != 0) {
   8738 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8739 		    __func__);
   8740 		return;
   8741 	}
   8742 
   8743 	switch (sc->sc_type) {
   8744 	case WM_T_82542_2_0:
   8745 	case WM_T_82542_2_1:
   8746 		/* null */
   8747 		break;
   8748 	case WM_T_82543:
   8749 		/*
   8750 		 * With 82543, we need to force speed and duplex on the MAC
   8751 		 * equal to what the PHY speed and duplex configuration is.
   8752 		 * In addition, we need to perform a hardware reset on the PHY
   8753 		 * to take it out of reset.
   8754 		 */
   8755 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   8756 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8757 
   8758 		/* The PHY reset pin is active-low. */
   8759 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   8760 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   8761 		    CTRL_EXT_SWDPIN(4));
   8762 		reg |= CTRL_EXT_SWDPIO(4);
   8763 
   8764 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   8765 		CSR_WRITE_FLUSH(sc);
   8766 		delay(10*1000);
   8767 
   8768 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   8769 		CSR_WRITE_FLUSH(sc);
   8770 		delay(150);
   8771 #if 0
   8772 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   8773 #endif
   8774 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   8775 		break;
   8776 	case WM_T_82544:	/* reset 10000us */
   8777 	case WM_T_82540:
   8778 	case WM_T_82545:
   8779 	case WM_T_82545_3:
   8780 	case WM_T_82546:
   8781 	case WM_T_82546_3:
   8782 	case WM_T_82541:
   8783 	case WM_T_82541_2:
   8784 	case WM_T_82547:
   8785 	case WM_T_82547_2:
   8786 	case WM_T_82571:	/* reset 100us */
   8787 	case WM_T_82572:
   8788 	case WM_T_82573:
   8789 	case WM_T_82574:
   8790 	case WM_T_82575:
   8791 	case WM_T_82576:
   8792 	case WM_T_82580:
   8793 	case WM_T_I350:
   8794 	case WM_T_I354:
   8795 	case WM_T_I210:
   8796 	case WM_T_I211:
   8797 	case WM_T_82583:
   8798 	case WM_T_80003:
   8799 		/* generic reset */
   8800 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8801 		CSR_WRITE_FLUSH(sc);
   8802 		delay(20000);
   8803 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8804 		CSR_WRITE_FLUSH(sc);
   8805 		delay(20000);
   8806 
   8807 		if ((sc->sc_type == WM_T_82541)
   8808 		    || (sc->sc_type == WM_T_82541_2)
   8809 		    || (sc->sc_type == WM_T_82547)
   8810 		    || (sc->sc_type == WM_T_82547_2)) {
   8811 			/* workaround for igp are done in igp_reset() */
   8812 			/* XXX add code to set LED after phy reset */
   8813 		}
   8814 		break;
   8815 	case WM_T_ICH8:
   8816 	case WM_T_ICH9:
   8817 	case WM_T_ICH10:
   8818 	case WM_T_PCH:
   8819 	case WM_T_PCH2:
   8820 	case WM_T_PCH_LPT:
   8821 	case WM_T_PCH_SPT:
   8822 		/* generic reset */
   8823 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8824 		CSR_WRITE_FLUSH(sc);
   8825 		delay(100);
   8826 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8827 		CSR_WRITE_FLUSH(sc);
   8828 		delay(150);
   8829 		break;
   8830 	default:
   8831 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   8832 		    __func__);
   8833 		break;
   8834 	}
   8835 
   8836 	sc->phy.release(sc);
   8837 
   8838 	/* get_cfg_done */
   8839 	wm_get_cfg_done(sc);
   8840 
   8841 	/* extra setup */
   8842 	switch (sc->sc_type) {
   8843 	case WM_T_82542_2_0:
   8844 	case WM_T_82542_2_1:
   8845 	case WM_T_82543:
   8846 	case WM_T_82544:
   8847 	case WM_T_82540:
   8848 	case WM_T_82545:
   8849 	case WM_T_82545_3:
   8850 	case WM_T_82546:
   8851 	case WM_T_82546_3:
   8852 	case WM_T_82541_2:
   8853 	case WM_T_82547_2:
   8854 	case WM_T_82571:
   8855 	case WM_T_82572:
   8856 	case WM_T_82573:
   8857 	case WM_T_82575:
   8858 	case WM_T_82576:
   8859 	case WM_T_82580:
   8860 	case WM_T_I350:
   8861 	case WM_T_I354:
   8862 	case WM_T_I210:
   8863 	case WM_T_I211:
   8864 	case WM_T_80003:
   8865 		/* null */
   8866 		break;
   8867 	case WM_T_82574:
   8868 	case WM_T_82583:
   8869 		wm_lplu_d0_disable(sc);
   8870 		break;
   8871 	case WM_T_82541:
   8872 	case WM_T_82547:
   8873 		/* XXX Configure actively LED after PHY reset */
   8874 		break;
   8875 	case WM_T_ICH8:
   8876 	case WM_T_ICH9:
   8877 	case WM_T_ICH10:
   8878 	case WM_T_PCH:
   8879 	case WM_T_PCH2:
   8880 	case WM_T_PCH_LPT:
   8881 	case WM_T_PCH_SPT:
   8882 		/* Allow time for h/w to get to a quiescent state afer reset */
   8883 		delay(10*1000);
   8884 
   8885 		if (sc->sc_type == WM_T_PCH)
   8886 			wm_hv_phy_workaround_ich8lan(sc);
   8887 
   8888 		if (sc->sc_type == WM_T_PCH2)
   8889 			wm_lv_phy_workaround_ich8lan(sc);
   8890 
   8891 		/* Clear the host wakeup bit after lcd reset */
   8892 		if (sc->sc_type >= WM_T_PCH) {
   8893 			reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   8894 			    BM_PORT_GEN_CFG);
   8895 			reg &= ~BM_WUC_HOST_WU_BIT;
   8896 			wm_gmii_hv_writereg(sc->sc_dev, 2,
   8897 			    BM_PORT_GEN_CFG, reg);
   8898 		}
   8899 
   8900 		/*
   8901 		 * XXX Configure the LCD with th extended configuration region
   8902 		 * in NVM
   8903 		 */
   8904 
   8905 		/* Disable D0 LPLU. */
   8906 		if (sc->sc_type >= WM_T_PCH)	/* PCH* */
   8907 			wm_lplu_d0_disable_pch(sc);
   8908 		else
   8909 			wm_lplu_d0_disable(sc);	/* ICH* */
   8910 		break;
   8911 	default:
   8912 		panic("%s: unknown type\n", __func__);
   8913 		break;
   8914 	}
   8915 }
   8916 
   8917 /*
   8918  * Setup sc_phytype and mii_{read|write}reg.
   8919  *
   8920  *  To identify PHY type, correct read/write function should be selected.
   8921  * To select correct read/write function, PCI ID or MAC type are required
   8922  * without accessing PHY registers.
   8923  *
   8924  *  On the first call of this function, PHY ID is not known yet. Check
   8925  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   8926  * result might be incorrect.
   8927  *
   8928  *  In the second call, PHY OUI and model is used to identify PHY type.
   8929  * It might not be perfpect because of the lack of compared entry, but it
   8930  * would be better than the first call.
   8931  *
   8932  *  If the detected new result and previous assumption is different,
   8933  * diagnous message will be printed.
   8934  */
   8935 static void
   8936 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   8937     uint16_t phy_model)
   8938 {
   8939 	device_t dev = sc->sc_dev;
   8940 	struct mii_data *mii = &sc->sc_mii;
   8941 	uint16_t new_phytype = WMPHY_UNKNOWN;
   8942 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   8943 	mii_readreg_t new_readreg;
   8944 	mii_writereg_t new_writereg;
   8945 
   8946 	if (mii->mii_readreg == NULL) {
   8947 		/*
   8948 		 *  This is the first call of this function. For ICH and PCH
   8949 		 * variants, it's difficult to determine the PHY access method
   8950 		 * by sc_type, so use the PCI product ID for some devices.
   8951 		 */
   8952 
   8953 		switch (sc->sc_pcidevid) {
   8954 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   8955 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   8956 			/* 82577 */
   8957 			new_phytype = WMPHY_82577;
   8958 			break;
   8959 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   8960 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   8961 			/* 82578 */
   8962 			new_phytype = WMPHY_82578;
   8963 			break;
   8964 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   8965 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   8966 			/* 82579 */
   8967 			new_phytype = WMPHY_82579;
   8968 			break;
   8969 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   8970 		case PCI_PRODUCT_INTEL_82801I_BM:
   8971 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   8972 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   8973 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   8974 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   8975 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   8976 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   8977 			/* ICH8, 9, 10 with 82567 */
   8978 			new_phytype = WMPHY_BM;
   8979 			break;
   8980 		default:
   8981 			break;
   8982 		}
   8983 	} else {
   8984 		/* It's not the first call. Use PHY OUI and model */
   8985 		switch (phy_oui) {
   8986 		case MII_OUI_ATHEROS: /* XXX ??? */
   8987 			switch (phy_model) {
   8988 			case 0x0004: /* XXX */
   8989 				new_phytype = WMPHY_82578;
   8990 				break;
   8991 			default:
   8992 				break;
   8993 			}
   8994 			break;
   8995 		case MII_OUI_xxMARVELL:
   8996 			switch (phy_model) {
   8997 			case MII_MODEL_xxMARVELL_I210:
   8998 				new_phytype = WMPHY_I210;
   8999 				break;
   9000 			case MII_MODEL_xxMARVELL_E1011:
   9001 			case MII_MODEL_xxMARVELL_E1000_3:
   9002 			case MII_MODEL_xxMARVELL_E1000_5:
   9003 			case MII_MODEL_xxMARVELL_E1112:
   9004 				new_phytype = WMPHY_M88;
   9005 				break;
   9006 			case MII_MODEL_xxMARVELL_E1149:
   9007 				new_phytype = WMPHY_BM;
   9008 				break;
   9009 			case MII_MODEL_xxMARVELL_E1111:
   9010 			case MII_MODEL_xxMARVELL_I347:
   9011 			case MII_MODEL_xxMARVELL_E1512:
   9012 			case MII_MODEL_xxMARVELL_E1340M:
   9013 			case MII_MODEL_xxMARVELL_E1543:
   9014 				new_phytype = WMPHY_M88;
   9015 				break;
   9016 			case MII_MODEL_xxMARVELL_I82563:
   9017 				new_phytype = WMPHY_GG82563;
   9018 				break;
   9019 			default:
   9020 				break;
   9021 			}
   9022 			break;
   9023 		case MII_OUI_INTEL:
   9024 			switch (phy_model) {
   9025 			case MII_MODEL_INTEL_I82577:
   9026 				new_phytype = WMPHY_82577;
   9027 				break;
   9028 			case MII_MODEL_INTEL_I82579:
   9029 				new_phytype = WMPHY_82579;
   9030 				break;
   9031 			case MII_MODEL_INTEL_I217:
   9032 				new_phytype = WMPHY_I217;
   9033 				break;
   9034 			case MII_MODEL_INTEL_I82580:
   9035 			case MII_MODEL_INTEL_I350:
   9036 				new_phytype = WMPHY_82580;
   9037 				break;
   9038 			default:
   9039 				break;
   9040 			}
   9041 			break;
   9042 		case MII_OUI_yyINTEL:
   9043 			switch (phy_model) {
   9044 			case MII_MODEL_yyINTEL_I82562G:
   9045 			case MII_MODEL_yyINTEL_I82562EM:
   9046 			case MII_MODEL_yyINTEL_I82562ET:
   9047 				new_phytype = WMPHY_IFE;
   9048 				break;
   9049 			case MII_MODEL_yyINTEL_IGP01E1000:
   9050 				new_phytype = WMPHY_IGP;
   9051 				break;
   9052 			case MII_MODEL_yyINTEL_I82566:
   9053 				new_phytype = WMPHY_IGP_3;
   9054 				break;
   9055 			default:
   9056 				break;
   9057 			}
   9058 			break;
   9059 		default:
   9060 			break;
   9061 		}
   9062 		if (new_phytype == WMPHY_UNKNOWN)
   9063 			aprint_verbose_dev(dev, "%s: unknown PHY model\n",
   9064 			    __func__);
   9065 
   9066 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9067 		    && (sc->sc_phytype != new_phytype )) {
   9068 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9069 			    "was incorrect. PHY type from PHY ID = %u\n",
   9070 			    sc->sc_phytype, new_phytype);
   9071 		}
   9072 	}
   9073 
   9074 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   9075 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   9076 		/* SGMII */
   9077 		new_readreg = wm_sgmii_readreg;
   9078 		new_writereg = wm_sgmii_writereg;
   9079 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   9080 		/* BM2 (phyaddr == 1) */
   9081 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9082 		    && (new_phytype != WMPHY_BM)
   9083 		    && (new_phytype != WMPHY_UNKNOWN))
   9084 			doubt_phytype = new_phytype;
   9085 		new_phytype = WMPHY_BM;
   9086 		new_readreg = wm_gmii_bm_readreg;
   9087 		new_writereg = wm_gmii_bm_writereg;
   9088 	} else if (sc->sc_type >= WM_T_PCH) {
   9089 		/* All PCH* use _hv_ */
   9090 		new_readreg = wm_gmii_hv_readreg;
   9091 		new_writereg = wm_gmii_hv_writereg;
   9092 	} else if (sc->sc_type >= WM_T_ICH8) {
   9093 		/* non-82567 ICH8, 9 and 10 */
   9094 		new_readreg = wm_gmii_i82544_readreg;
   9095 		new_writereg = wm_gmii_i82544_writereg;
   9096 	} else if (sc->sc_type >= WM_T_80003) {
   9097 		/* 80003 */
   9098 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9099 		    && (new_phytype != WMPHY_GG82563)
   9100 		    && (new_phytype != WMPHY_UNKNOWN))
   9101 			doubt_phytype = new_phytype;
   9102 		new_phytype = WMPHY_GG82563;
   9103 		new_readreg = wm_gmii_i80003_readreg;
   9104 		new_writereg = wm_gmii_i80003_writereg;
   9105 	} else if (sc->sc_type >= WM_T_I210) {
   9106 		/* I210 and I211 */
   9107 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9108 		    && (new_phytype != WMPHY_I210)
   9109 		    && (new_phytype != WMPHY_UNKNOWN))
   9110 			doubt_phytype = new_phytype;
   9111 		new_phytype = WMPHY_I210;
   9112 		new_readreg = wm_gmii_gs40g_readreg;
   9113 		new_writereg = wm_gmii_gs40g_writereg;
   9114 	} else if (sc->sc_type >= WM_T_82580) {
   9115 		/* 82580, I350 and I354 */
   9116 		new_readreg = wm_gmii_82580_readreg;
   9117 		new_writereg = wm_gmii_82580_writereg;
   9118 	} else if (sc->sc_type >= WM_T_82544) {
   9119 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   9120 		new_readreg = wm_gmii_i82544_readreg;
   9121 		new_writereg = wm_gmii_i82544_writereg;
   9122 	} else {
   9123 		new_readreg = wm_gmii_i82543_readreg;
   9124 		new_writereg = wm_gmii_i82543_writereg;
   9125 	}
   9126 
   9127 	if (new_phytype == WMPHY_BM) {
   9128 		/* All BM use _bm_ */
   9129 		new_readreg = wm_gmii_bm_readreg;
   9130 		new_writereg = wm_gmii_bm_writereg;
   9131 	}
   9132 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
   9133 		/* All PCH* use _hv_ */
   9134 		new_readreg = wm_gmii_hv_readreg;
   9135 		new_writereg = wm_gmii_hv_writereg;
   9136 	}
   9137 
   9138 	/* Diag output */
   9139 	if (doubt_phytype != WMPHY_UNKNOWN)
   9140 		aprint_error_dev(dev, "Assumed new PHY type was "
   9141 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   9142 		    new_phytype);
   9143 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9144 	    && (sc->sc_phytype != new_phytype ))
   9145 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9146 		    "was incorrect. New PHY type = %u\n",
   9147 		    sc->sc_phytype, new_phytype);
   9148 
   9149 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   9150 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   9151 
   9152 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   9153 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   9154 		    "function was incorrect.\n");
   9155 
   9156 	/* Update now */
   9157 	sc->sc_phytype = new_phytype;
   9158 	mii->mii_readreg = new_readreg;
   9159 	mii->mii_writereg = new_writereg;
   9160 }
   9161 
   9162 /*
   9163  * wm_get_phy_id_82575:
   9164  *
   9165  * Return PHY ID. Return -1 if it failed.
   9166  */
   9167 static int
   9168 wm_get_phy_id_82575(struct wm_softc *sc)
   9169 {
   9170 	uint32_t reg;
   9171 	int phyid = -1;
   9172 
   9173 	/* XXX */
   9174 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   9175 		return -1;
   9176 
   9177 	if (wm_sgmii_uses_mdio(sc)) {
   9178 		switch (sc->sc_type) {
   9179 		case WM_T_82575:
   9180 		case WM_T_82576:
   9181 			reg = CSR_READ(sc, WMREG_MDIC);
   9182 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   9183 			break;
   9184 		case WM_T_82580:
   9185 		case WM_T_I350:
   9186 		case WM_T_I354:
   9187 		case WM_T_I210:
   9188 		case WM_T_I211:
   9189 			reg = CSR_READ(sc, WMREG_MDICNFG);
   9190 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   9191 			break;
   9192 		default:
   9193 			return -1;
   9194 		}
   9195 	}
   9196 
   9197 	return phyid;
   9198 }
   9199 
   9200 
   9201 /*
   9202  * wm_gmii_mediainit:
   9203  *
   9204  *	Initialize media for use on 1000BASE-T devices.
   9205  */
   9206 static void
   9207 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   9208 {
   9209 	device_t dev = sc->sc_dev;
   9210 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9211 	struct mii_data *mii = &sc->sc_mii;
   9212 	uint32_t reg;
   9213 
   9214 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9215 		device_xname(sc->sc_dev), __func__));
   9216 
   9217 	/* We have GMII. */
   9218 	sc->sc_flags |= WM_F_HAS_MII;
   9219 
   9220 	if (sc->sc_type == WM_T_80003)
   9221 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9222 	else
   9223 		sc->sc_tipg = TIPG_1000T_DFLT;
   9224 
   9225 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   9226 	if ((sc->sc_type == WM_T_82580)
   9227 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   9228 	    || (sc->sc_type == WM_T_I211)) {
   9229 		reg = CSR_READ(sc, WMREG_PHPM);
   9230 		reg &= ~PHPM_GO_LINK_D;
   9231 		CSR_WRITE(sc, WMREG_PHPM, reg);
   9232 	}
   9233 
   9234 	/*
   9235 	 * Let the chip set speed/duplex on its own based on
   9236 	 * signals from the PHY.
   9237 	 * XXXbouyer - I'm not sure this is right for the 80003,
   9238 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   9239 	 */
   9240 	sc->sc_ctrl |= CTRL_SLU;
   9241 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9242 
   9243 	/* Initialize our media structures and probe the GMII. */
   9244 	mii->mii_ifp = ifp;
   9245 
   9246 	/*
   9247 	 * The first call of wm_mii_setup_phytype. The result might be
   9248 	 * incorrect.
   9249 	 */
   9250 	wm_gmii_setup_phytype(sc, 0, 0);
   9251 
   9252 	mii->mii_statchg = wm_gmii_statchg;
   9253 
   9254 	/* get PHY control from SMBus to PCIe */
   9255 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   9256 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
   9257 		wm_smbustopci(sc);
   9258 
   9259 	wm_gmii_reset(sc);
   9260 
   9261 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9262 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   9263 	    wm_gmii_mediastatus);
   9264 
   9265 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   9266 	    || (sc->sc_type == WM_T_82580)
   9267 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   9268 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   9269 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   9270 			/* Attach only one port */
   9271 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   9272 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9273 		} else {
   9274 			int i, id;
   9275 			uint32_t ctrl_ext;
   9276 
   9277 			id = wm_get_phy_id_82575(sc);
   9278 			if (id != -1) {
   9279 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   9280 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   9281 			}
   9282 			if ((id == -1)
   9283 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9284 				/* Power on sgmii phy if it is disabled */
   9285 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9286 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   9287 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   9288 				CSR_WRITE_FLUSH(sc);
   9289 				delay(300*1000); /* XXX too long */
   9290 
   9291 				/* from 1 to 8 */
   9292 				for (i = 1; i < 8; i++)
   9293 					mii_attach(sc->sc_dev, &sc->sc_mii,
   9294 					    0xffffffff, i, MII_OFFSET_ANY,
   9295 					    MIIF_DOPAUSE);
   9296 
   9297 				/* restore previous sfp cage power state */
   9298 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9299 			}
   9300 		}
   9301 	} else {
   9302 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9303 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9304 	}
   9305 
   9306 	/*
   9307 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   9308 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   9309 	 */
   9310 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   9311 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9312 		wm_set_mdio_slow_mode_hv(sc);
   9313 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9314 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9315 	}
   9316 
   9317 	/*
   9318 	 * (For ICH8 variants)
   9319 	 * If PHY detection failed, use BM's r/w function and retry.
   9320 	 */
   9321 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9322 		/* if failed, retry with *_bm_* */
   9323 		aprint_verbose_dev(dev, "Assumed PHY access function "
   9324 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   9325 		    sc->sc_phytype);
   9326 		sc->sc_phytype = WMPHY_BM;
   9327 		mii->mii_readreg = wm_gmii_bm_readreg;
   9328 		mii->mii_writereg = wm_gmii_bm_writereg;
   9329 
   9330 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9331 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9332 	}
   9333 
   9334 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9335 		/* Any PHY wasn't find */
   9336 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   9337 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   9338 		sc->sc_phytype = WMPHY_NONE;
   9339 	} else {
   9340 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   9341 
   9342 		/*
   9343 		 * PHY Found! Check PHY type again by the second call of
   9344 		 * wm_mii_setup_phytype.
   9345 		 */
   9346 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   9347 		    child->mii_mpd_model);
   9348 
   9349 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   9350 	}
   9351 }
   9352 
   9353 /*
   9354  * wm_gmii_mediachange:	[ifmedia interface function]
   9355  *
   9356  *	Set hardware to newly-selected media on a 1000BASE-T device.
   9357  */
   9358 static int
   9359 wm_gmii_mediachange(struct ifnet *ifp)
   9360 {
   9361 	struct wm_softc *sc = ifp->if_softc;
   9362 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9363 	int rc;
   9364 
   9365 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9366 		device_xname(sc->sc_dev), __func__));
   9367 	if ((ifp->if_flags & IFF_UP) == 0)
   9368 		return 0;
   9369 
   9370 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9371 	sc->sc_ctrl |= CTRL_SLU;
   9372 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9373 	    || (sc->sc_type > WM_T_82543)) {
   9374 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   9375 	} else {
   9376 		sc->sc_ctrl &= ~CTRL_ASDE;
   9377 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9378 		if (ife->ifm_media & IFM_FDX)
   9379 			sc->sc_ctrl |= CTRL_FD;
   9380 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   9381 		case IFM_10_T:
   9382 			sc->sc_ctrl |= CTRL_SPEED_10;
   9383 			break;
   9384 		case IFM_100_TX:
   9385 			sc->sc_ctrl |= CTRL_SPEED_100;
   9386 			break;
   9387 		case IFM_1000_T:
   9388 			sc->sc_ctrl |= CTRL_SPEED_1000;
   9389 			break;
   9390 		default:
   9391 			panic("wm_gmii_mediachange: bad media 0x%x",
   9392 			    ife->ifm_media);
   9393 		}
   9394 	}
   9395 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9396 	if (sc->sc_type <= WM_T_82543)
   9397 		wm_gmii_reset(sc);
   9398 
   9399 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   9400 		return 0;
   9401 	return rc;
   9402 }
   9403 
   9404 /*
   9405  * wm_gmii_mediastatus:	[ifmedia interface function]
   9406  *
   9407  *	Get the current interface media status on a 1000BASE-T device.
   9408  */
   9409 static void
   9410 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9411 {
   9412 	struct wm_softc *sc = ifp->if_softc;
   9413 
   9414 	ether_mediastatus(ifp, ifmr);
   9415 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9416 	    | sc->sc_flowflags;
   9417 }
   9418 
   9419 #define	MDI_IO		CTRL_SWDPIN(2)
   9420 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   9421 #define	MDI_CLK		CTRL_SWDPIN(3)
   9422 
   9423 static void
   9424 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   9425 {
   9426 	uint32_t i, v;
   9427 
   9428 	v = CSR_READ(sc, WMREG_CTRL);
   9429 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9430 	v |= MDI_DIR | CTRL_SWDPIO(3);
   9431 
   9432 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   9433 		if (data & i)
   9434 			v |= MDI_IO;
   9435 		else
   9436 			v &= ~MDI_IO;
   9437 		CSR_WRITE(sc, WMREG_CTRL, v);
   9438 		CSR_WRITE_FLUSH(sc);
   9439 		delay(10);
   9440 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9441 		CSR_WRITE_FLUSH(sc);
   9442 		delay(10);
   9443 		CSR_WRITE(sc, WMREG_CTRL, v);
   9444 		CSR_WRITE_FLUSH(sc);
   9445 		delay(10);
   9446 	}
   9447 }
   9448 
   9449 static uint32_t
   9450 wm_i82543_mii_recvbits(struct wm_softc *sc)
   9451 {
   9452 	uint32_t v, i, data = 0;
   9453 
   9454 	v = CSR_READ(sc, WMREG_CTRL);
   9455 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9456 	v |= CTRL_SWDPIO(3);
   9457 
   9458 	CSR_WRITE(sc, WMREG_CTRL, v);
   9459 	CSR_WRITE_FLUSH(sc);
   9460 	delay(10);
   9461 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9462 	CSR_WRITE_FLUSH(sc);
   9463 	delay(10);
   9464 	CSR_WRITE(sc, WMREG_CTRL, v);
   9465 	CSR_WRITE_FLUSH(sc);
   9466 	delay(10);
   9467 
   9468 	for (i = 0; i < 16; i++) {
   9469 		data <<= 1;
   9470 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9471 		CSR_WRITE_FLUSH(sc);
   9472 		delay(10);
   9473 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   9474 			data |= 1;
   9475 		CSR_WRITE(sc, WMREG_CTRL, v);
   9476 		CSR_WRITE_FLUSH(sc);
   9477 		delay(10);
   9478 	}
   9479 
   9480 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9481 	CSR_WRITE_FLUSH(sc);
   9482 	delay(10);
   9483 	CSR_WRITE(sc, WMREG_CTRL, v);
   9484 	CSR_WRITE_FLUSH(sc);
   9485 	delay(10);
   9486 
   9487 	return data;
   9488 }
   9489 
   9490 #undef MDI_IO
   9491 #undef MDI_DIR
   9492 #undef MDI_CLK
   9493 
   9494 /*
   9495  * wm_gmii_i82543_readreg:	[mii interface function]
   9496  *
   9497  *	Read a PHY register on the GMII (i82543 version).
   9498  */
   9499 static int
   9500 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
   9501 {
   9502 	struct wm_softc *sc = device_private(self);
   9503 	int rv;
   9504 
   9505 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9506 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   9507 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   9508 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   9509 
   9510 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   9511 	    device_xname(sc->sc_dev), phy, reg, rv));
   9512 
   9513 	return rv;
   9514 }
   9515 
   9516 /*
   9517  * wm_gmii_i82543_writereg:	[mii interface function]
   9518  *
   9519  *	Write a PHY register on the GMII (i82543 version).
   9520  */
   9521 static void
   9522 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
   9523 {
   9524 	struct wm_softc *sc = device_private(self);
   9525 
   9526 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9527 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   9528 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   9529 	    (MII_COMMAND_START << 30), 32);
   9530 }
   9531 
   9532 /*
   9533  * wm_gmii_mdic_readreg:	[mii interface function]
   9534  *
   9535  *	Read a PHY register on the GMII.
   9536  */
   9537 static int
   9538 wm_gmii_mdic_readreg(device_t self, int phy, int reg)
   9539 {
   9540 	struct wm_softc *sc = device_private(self);
   9541 	uint32_t mdic = 0;
   9542 	int i, rv;
   9543 
   9544 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   9545 	    MDIC_REGADD(reg));
   9546 
   9547 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9548 		mdic = CSR_READ(sc, WMREG_MDIC);
   9549 		if (mdic & MDIC_READY)
   9550 			break;
   9551 		delay(50);
   9552 	}
   9553 
   9554 	if ((mdic & MDIC_READY) == 0) {
   9555 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   9556 		    device_xname(sc->sc_dev), phy, reg);
   9557 		rv = 0;
   9558 	} else if (mdic & MDIC_E) {
   9559 #if 0 /* This is normal if no PHY is present. */
   9560 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   9561 		    device_xname(sc->sc_dev), phy, reg);
   9562 #endif
   9563 		rv = 0;
   9564 	} else {
   9565 		rv = MDIC_DATA(mdic);
   9566 		if (rv == 0xffff)
   9567 			rv = 0;
   9568 	}
   9569 
   9570 	return rv;
   9571 }
   9572 
   9573 /*
   9574  * wm_gmii_mdic_writereg:	[mii interface function]
   9575  *
   9576  *	Write a PHY register on the GMII.
   9577  */
   9578 static void
   9579 wm_gmii_mdic_writereg(device_t self, int phy, int reg, int val)
   9580 {
   9581 	struct wm_softc *sc = device_private(self);
   9582 	uint32_t mdic = 0;
   9583 	int i;
   9584 
   9585 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   9586 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   9587 
   9588 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9589 		mdic = CSR_READ(sc, WMREG_MDIC);
   9590 		if (mdic & MDIC_READY)
   9591 			break;
   9592 		delay(50);
   9593 	}
   9594 
   9595 	if ((mdic & MDIC_READY) == 0)
   9596 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   9597 		    device_xname(sc->sc_dev), phy, reg);
   9598 	else if (mdic & MDIC_E)
   9599 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   9600 		    device_xname(sc->sc_dev), phy, reg);
   9601 }
   9602 
   9603 /*
   9604  * wm_gmii_i82544_readreg:	[mii interface function]
   9605  *
   9606  *	Read a PHY register on the GMII.
   9607  */
   9608 static int
   9609 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
   9610 {
   9611 	struct wm_softc *sc = device_private(self);
   9612 	int rv;
   9613 
   9614 	if (sc->phy.acquire(sc)) {
   9615 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9616 		    __func__);
   9617 		return 0;
   9618 	}
   9619 	rv = wm_gmii_mdic_readreg(self, phy, reg);
   9620 	sc->phy.release(sc);
   9621 
   9622 	return rv;
   9623 }
   9624 
   9625 /*
   9626  * wm_gmii_i82544_writereg:	[mii interface function]
   9627  *
   9628  *	Write a PHY register on the GMII.
   9629  */
   9630 static void
   9631 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
   9632 {
   9633 	struct wm_softc *sc = device_private(self);
   9634 
   9635 	if (sc->phy.acquire(sc)) {
   9636 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9637 		    __func__);
   9638 	}
   9639 	wm_gmii_mdic_writereg(self, phy, reg, val);
   9640 	sc->phy.release(sc);
   9641 }
   9642 
   9643 /*
   9644  * wm_gmii_i80003_readreg:	[mii interface function]
   9645  *
   9646  *	Read a PHY register on the kumeran
   9647  * This could be handled by the PHY layer if we didn't have to lock the
   9648  * ressource ...
   9649  */
   9650 static int
   9651 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
   9652 {
   9653 	struct wm_softc *sc = device_private(self);
   9654 	int rv;
   9655 
   9656 	if (phy != 1) /* only one PHY on kumeran bus */
   9657 		return 0;
   9658 
   9659 	if (sc->phy.acquire(sc)) {
   9660 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9661 		    __func__);
   9662 		return 0;
   9663 	}
   9664 
   9665 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   9666 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   9667 		    reg >> GG82563_PAGE_SHIFT);
   9668 	} else {
   9669 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   9670 		    reg >> GG82563_PAGE_SHIFT);
   9671 	}
   9672 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   9673 	delay(200);
   9674 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
   9675 	delay(200);
   9676 	sc->phy.release(sc);
   9677 
   9678 	return rv;
   9679 }
   9680 
   9681 /*
   9682  * wm_gmii_i80003_writereg:	[mii interface function]
   9683  *
   9684  *	Write a PHY register on the kumeran.
   9685  * This could be handled by the PHY layer if we didn't have to lock the
   9686  * ressource ...
   9687  */
   9688 static void
   9689 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
   9690 {
   9691 	struct wm_softc *sc = device_private(self);
   9692 
   9693 	if (phy != 1) /* only one PHY on kumeran bus */
   9694 		return;
   9695 
   9696 	if (sc->phy.acquire(sc)) {
   9697 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9698 		    __func__);
   9699 		return;
   9700 	}
   9701 
   9702 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   9703 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   9704 		    reg >> GG82563_PAGE_SHIFT);
   9705 	} else {
   9706 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   9707 		    reg >> GG82563_PAGE_SHIFT);
   9708 	}
   9709 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   9710 	delay(200);
   9711 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
   9712 	delay(200);
   9713 
   9714 	sc->phy.release(sc);
   9715 }
   9716 
   9717 /*
   9718  * wm_gmii_bm_readreg:	[mii interface function]
   9719  *
   9720  *	Read a PHY register on the kumeran
   9721  * This could be handled by the PHY layer if we didn't have to lock the
   9722  * ressource ...
   9723  */
   9724 static int
   9725 wm_gmii_bm_readreg(device_t self, int phy, int reg)
   9726 {
   9727 	struct wm_softc *sc = device_private(self);
   9728 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   9729 	uint16_t val;
   9730 	int rv;
   9731 
   9732 	if (sc->phy.acquire(sc)) {
   9733 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9734 		    __func__);
   9735 		return 0;
   9736 	}
   9737 
   9738 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   9739 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   9740 		    || (reg == 31)) ? 1 : phy;
   9741 	/* Page 800 works differently than the rest so it has its own func */
   9742 	if (page == BM_WUC_PAGE) {
   9743 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   9744 		rv = val;
   9745 		goto release;
   9746 	}
   9747 
   9748 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9749 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   9750 		    && (sc->sc_type != WM_T_82583))
   9751 			wm_gmii_mdic_writereg(self, phy,
   9752 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   9753 		else
   9754 			wm_gmii_mdic_writereg(self, phy,
   9755 			    BME1000_PHY_PAGE_SELECT, page);
   9756 	}
   9757 
   9758 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
   9759 
   9760 release:
   9761 	sc->phy.release(sc);
   9762 	return rv;
   9763 }
   9764 
   9765 /*
   9766  * wm_gmii_bm_writereg:	[mii interface function]
   9767  *
   9768  *	Write a PHY register on the kumeran.
   9769  * This could be handled by the PHY layer if we didn't have to lock the
   9770  * ressource ...
   9771  */
   9772 static void
   9773 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
   9774 {
   9775 	struct wm_softc *sc = device_private(self);
   9776 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   9777 
   9778 	if (sc->phy.acquire(sc)) {
   9779 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9780 		    __func__);
   9781 		return;
   9782 	}
   9783 
   9784 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   9785 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   9786 		    || (reg == 31)) ? 1 : phy;
   9787 	/* Page 800 works differently than the rest so it has its own func */
   9788 	if (page == BM_WUC_PAGE) {
   9789 		uint16_t tmp;
   9790 
   9791 		tmp = val;
   9792 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   9793 		goto release;
   9794 	}
   9795 
   9796 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9797 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   9798 		    && (sc->sc_type != WM_T_82583))
   9799 			wm_gmii_mdic_writereg(self, phy,
   9800 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   9801 		else
   9802 			wm_gmii_mdic_writereg(self, phy,
   9803 			    BME1000_PHY_PAGE_SELECT, page);
   9804 	}
   9805 
   9806 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
   9807 
   9808 release:
   9809 	sc->phy.release(sc);
   9810 }
   9811 
   9812 static void
   9813 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
   9814 {
   9815 	struct wm_softc *sc = device_private(self);
   9816 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   9817 	uint16_t wuce, reg;
   9818 
   9819 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9820 		device_xname(sc->sc_dev), __func__));
   9821 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   9822 	if (sc->sc_type == WM_T_PCH) {
   9823 		/* XXX e1000 driver do nothing... why? */
   9824 	}
   9825 
   9826 	/*
   9827 	 * 1) Enable PHY wakeup register first.
   9828 	 * See e1000_enable_phy_wakeup_reg_access_bm().
   9829 	 */
   9830 
   9831 	/* Set page 769 */
   9832 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9833 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   9834 
   9835 	/* Read WUCE and save it */
   9836 	wuce = wm_gmii_mdic_readreg(self, 1, BM_WUC_ENABLE_REG);
   9837 
   9838 	reg = wuce | BM_WUC_ENABLE_BIT;
   9839 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   9840 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, reg);
   9841 
   9842 	/* Select page 800 */
   9843 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9844 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   9845 
   9846 	/*
   9847 	 * 2) Access PHY wakeup register.
   9848 	 * See e1000_access_phy_wakeup_reg_bm.
   9849 	 */
   9850 
   9851 	/* Write page 800 */
   9852 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   9853 
   9854 	if (rd)
   9855 		*val = wm_gmii_mdic_readreg(self, 1, BM_WUC_DATA_OPCODE);
   9856 	else
   9857 		wm_gmii_mdic_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
   9858 
   9859 	/*
   9860 	 * 3) Disable PHY wakeup register.
   9861 	 * See e1000_disable_phy_wakeup_reg_access_bm().
   9862 	 */
   9863 	/* Set page 769 */
   9864 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9865 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   9866 
   9867 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
   9868 }
   9869 
   9870 /*
   9871  * wm_gmii_hv_readreg:	[mii interface function]
   9872  *
   9873  *	Read a PHY register on the kumeran
   9874  * This could be handled by the PHY layer if we didn't have to lock the
   9875  * ressource ...
   9876  */
   9877 static int
   9878 wm_gmii_hv_readreg(device_t self, int phy, int reg)
   9879 {
   9880 	struct wm_softc *sc = device_private(self);
   9881 	int rv;
   9882 
   9883 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9884 		device_xname(sc->sc_dev), __func__));
   9885 	if (sc->phy.acquire(sc)) {
   9886 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9887 		    __func__);
   9888 		return 0;
   9889 	}
   9890 
   9891 	rv = wm_gmii_hv_readreg_locked(self, phy, reg);
   9892 	sc->phy.release(sc);
   9893 	return rv;
   9894 }
   9895 
   9896 static int
   9897 wm_gmii_hv_readreg_locked(device_t self, int phy, int reg)
   9898 {
   9899 	uint16_t page = BM_PHY_REG_PAGE(reg);
   9900 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   9901 	uint16_t val;
   9902 	int rv;
   9903 
   9904 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   9905 
   9906 	/* Page 800 works differently than the rest so it has its own func */
   9907 	if (page == BM_WUC_PAGE) {
   9908 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   9909 		return val;
   9910 	}
   9911 
   9912 	/*
   9913 	 * Lower than page 768 works differently than the rest so it has its
   9914 	 * own func
   9915 	 */
   9916 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   9917 		printf("gmii_hv_readreg!!!\n");
   9918 		return 0;
   9919 	}
   9920 
   9921 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   9922 		wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9923 		    page << BME1000_PAGE_SHIFT);
   9924 	}
   9925 
   9926 	rv = wm_gmii_mdic_readreg(self, phy, regnum & MII_ADDRMASK);
   9927 	return rv;
   9928 }
   9929 
   9930 /*
   9931  * wm_gmii_hv_writereg:	[mii interface function]
   9932  *
   9933  *	Write a PHY register on the kumeran.
   9934  * This could be handled by the PHY layer if we didn't have to lock the
   9935  * ressource ...
   9936  */
   9937 static void
   9938 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
   9939 {
   9940 	struct wm_softc *sc = device_private(self);
   9941 
   9942 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9943 		device_xname(sc->sc_dev), __func__));
   9944 
   9945 	if (sc->phy.acquire(sc)) {
   9946 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9947 		    __func__);
   9948 		return;
   9949 	}
   9950 
   9951 	wm_gmii_hv_writereg_locked(self, phy, reg, val);
   9952 	sc->phy.release(sc);
   9953 }
   9954 
   9955 static void
   9956 wm_gmii_hv_writereg_locked(device_t self, int phy, int reg, int val)
   9957 {
   9958 	struct wm_softc *sc = device_private(self);
   9959 	uint16_t page = BM_PHY_REG_PAGE(reg);
   9960 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   9961 
   9962 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   9963 
   9964 	/* Page 800 works differently than the rest so it has its own func */
   9965 	if (page == BM_WUC_PAGE) {
   9966 		uint16_t tmp;
   9967 
   9968 		tmp = val;
   9969 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   9970 		return;
   9971 	}
   9972 
   9973 	/*
   9974 	 * Lower than page 768 works differently than the rest so it has its
   9975 	 * own func
   9976 	 */
   9977 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   9978 		printf("gmii_hv_writereg!!!\n");
   9979 		return;
   9980 	}
   9981 
   9982 	{
   9983 		/*
   9984 		 * XXX Workaround MDIO accesses being disabled after entering
   9985 		 * IEEE Power Down (whenever bit 11 of the PHY control
   9986 		 * register is set)
   9987 		 */
   9988 		if (sc->sc_phytype == WMPHY_82578) {
   9989 			struct mii_softc *child;
   9990 
   9991 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   9992 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   9993 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   9994 			    && ((val & (1 << 11)) != 0)) {
   9995 				printf("XXX need workaround\n");
   9996 			}
   9997 		}
   9998 
   9999 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10000 			wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   10001 			    page << BME1000_PAGE_SHIFT);
   10002 		}
   10003 	}
   10004 
   10005 	wm_gmii_mdic_writereg(self, phy, regnum & MII_ADDRMASK, val);
   10006 }
   10007 
   10008 /*
   10009  * wm_gmii_82580_readreg:	[mii interface function]
   10010  *
   10011  *	Read a PHY register on the 82580 and I350.
   10012  * This could be handled by the PHY layer if we didn't have to lock the
   10013  * ressource ...
   10014  */
   10015 static int
   10016 wm_gmii_82580_readreg(device_t self, int phy, int reg)
   10017 {
   10018 	struct wm_softc *sc = device_private(self);
   10019 	int rv;
   10020 
   10021 	if (sc->phy.acquire(sc) != 0) {
   10022 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10023 		    __func__);
   10024 		return 0;
   10025 	}
   10026 
   10027 	rv = wm_gmii_mdic_readreg(self, phy, reg);
   10028 
   10029 	sc->phy.release(sc);
   10030 	return rv;
   10031 }
   10032 
   10033 /*
   10034  * wm_gmii_82580_writereg:	[mii interface function]
   10035  *
   10036  *	Write a PHY register on the 82580 and I350.
   10037  * This could be handled by the PHY layer if we didn't have to lock the
   10038  * ressource ...
   10039  */
   10040 static void
   10041 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
   10042 {
   10043 	struct wm_softc *sc = device_private(self);
   10044 
   10045 	if (sc->phy.acquire(sc) != 0) {
   10046 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10047 		    __func__);
   10048 		return;
   10049 	}
   10050 
   10051 	wm_gmii_mdic_writereg(self, phy, reg, val);
   10052 
   10053 	sc->phy.release(sc);
   10054 }
   10055 
   10056 /*
   10057  * wm_gmii_gs40g_readreg:	[mii interface function]
   10058  *
   10059  *	Read a PHY register on the I2100 and I211.
   10060  * This could be handled by the PHY layer if we didn't have to lock the
   10061  * ressource ...
   10062  */
   10063 static int
   10064 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
   10065 {
   10066 	struct wm_softc *sc = device_private(self);
   10067 	int page, offset;
   10068 	int rv;
   10069 
   10070 	/* Acquire semaphore */
   10071 	if (sc->phy.acquire(sc)) {
   10072 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10073 		    __func__);
   10074 		return 0;
   10075 	}
   10076 
   10077 	/* Page select */
   10078 	page = reg >> GS40G_PAGE_SHIFT;
   10079 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
   10080 
   10081 	/* Read reg */
   10082 	offset = reg & GS40G_OFFSET_MASK;
   10083 	rv = wm_gmii_mdic_readreg(self, phy, offset);
   10084 
   10085 	sc->phy.release(sc);
   10086 	return rv;
   10087 }
   10088 
   10089 /*
   10090  * wm_gmii_gs40g_writereg:	[mii interface function]
   10091  *
   10092  *	Write a PHY register on the I210 and I211.
   10093  * This could be handled by the PHY layer if we didn't have to lock the
   10094  * ressource ...
   10095  */
   10096 static void
   10097 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
   10098 {
   10099 	struct wm_softc *sc = device_private(self);
   10100 	int page, offset;
   10101 
   10102 	/* Acquire semaphore */
   10103 	if (sc->phy.acquire(sc)) {
   10104 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10105 		    __func__);
   10106 		return;
   10107 	}
   10108 
   10109 	/* Page select */
   10110 	page = reg >> GS40G_PAGE_SHIFT;
   10111 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
   10112 
   10113 	/* Write reg */
   10114 	offset = reg & GS40G_OFFSET_MASK;
   10115 	wm_gmii_mdic_writereg(self, phy, offset, val);
   10116 
   10117 	/* Release semaphore */
   10118 	sc->phy.release(sc);
   10119 }
   10120 
   10121 /*
   10122  * wm_gmii_statchg:	[mii interface function]
   10123  *
   10124  *	Callback from MII layer when media changes.
   10125  */
   10126 static void
   10127 wm_gmii_statchg(struct ifnet *ifp)
   10128 {
   10129 	struct wm_softc *sc = ifp->if_softc;
   10130 	struct mii_data *mii = &sc->sc_mii;
   10131 
   10132 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   10133 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10134 	sc->sc_fcrtl &= ~FCRTL_XONE;
   10135 
   10136 	/*
   10137 	 * Get flow control negotiation result.
   10138 	 */
   10139 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   10140 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   10141 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   10142 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   10143 	}
   10144 
   10145 	if (sc->sc_flowflags & IFM_FLOW) {
   10146 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   10147 			sc->sc_ctrl |= CTRL_TFCE;
   10148 			sc->sc_fcrtl |= FCRTL_XONE;
   10149 		}
   10150 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   10151 			sc->sc_ctrl |= CTRL_RFCE;
   10152 	}
   10153 
   10154 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   10155 		DPRINTF(WM_DEBUG_LINK,
   10156 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   10157 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10158 	} else {
   10159 		DPRINTF(WM_DEBUG_LINK,
   10160 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   10161 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10162 	}
   10163 
   10164 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10165 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10166 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   10167 						 : WMREG_FCRTL, sc->sc_fcrtl);
   10168 	if (sc->sc_type == WM_T_80003) {
   10169 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   10170 		case IFM_1000_T:
   10171 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10172 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   10173 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10174 			break;
   10175 		default:
   10176 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10177 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   10178 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   10179 			break;
   10180 		}
   10181 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   10182 	}
   10183 }
   10184 
   10185 /* kumeran related (80003, ICH* and PCH*) */
   10186 
   10187 /*
   10188  * wm_kmrn_readreg:
   10189  *
   10190  *	Read a kumeran register
   10191  */
   10192 static int
   10193 wm_kmrn_readreg(struct wm_softc *sc, int reg)
   10194 {
   10195 	int rv;
   10196 
   10197 	if (sc->sc_type == WM_T_80003)
   10198 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10199 	else
   10200 		rv = sc->phy.acquire(sc);
   10201 	if (rv != 0) {
   10202 		aprint_error_dev(sc->sc_dev,
   10203 		    "%s: failed to get semaphore\n", __func__);
   10204 		return 0;
   10205 	}
   10206 
   10207 	rv = wm_kmrn_readreg_locked(sc, reg);
   10208 
   10209 	if (sc->sc_type == WM_T_80003)
   10210 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10211 	else
   10212 		sc->phy.release(sc);
   10213 
   10214 	return rv;
   10215 }
   10216 
   10217 static int
   10218 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg)
   10219 {
   10220 	int rv;
   10221 
   10222 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10223 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10224 	    KUMCTRLSTA_REN);
   10225 	CSR_WRITE_FLUSH(sc);
   10226 	delay(2);
   10227 
   10228 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   10229 
   10230 	return rv;
   10231 }
   10232 
   10233 /*
   10234  * wm_kmrn_writereg:
   10235  *
   10236  *	Write a kumeran register
   10237  */
   10238 static void
   10239 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
   10240 {
   10241 	int rv;
   10242 
   10243 	if (sc->sc_type == WM_T_80003)
   10244 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10245 	else
   10246 		rv = sc->phy.acquire(sc);
   10247 	if (rv != 0) {
   10248 		aprint_error_dev(sc->sc_dev,
   10249 		    "%s: failed to get semaphore\n", __func__);
   10250 		return;
   10251 	}
   10252 
   10253 	wm_kmrn_writereg_locked(sc, reg, val);
   10254 
   10255 	if (sc->sc_type == WM_T_80003)
   10256 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10257 	else
   10258 		sc->phy.release(sc);
   10259 }
   10260 
   10261 static void
   10262 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, int val)
   10263 {
   10264 
   10265 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10266 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10267 	    (val & KUMCTRLSTA_MASK));
   10268 }
   10269 
   10270 /* SGMII related */
   10271 
   10272 /*
   10273  * wm_sgmii_uses_mdio
   10274  *
   10275  * Check whether the transaction is to the internal PHY or the external
   10276  * MDIO interface. Return true if it's MDIO.
   10277  */
   10278 static bool
   10279 wm_sgmii_uses_mdio(struct wm_softc *sc)
   10280 {
   10281 	uint32_t reg;
   10282 	bool ismdio = false;
   10283 
   10284 	switch (sc->sc_type) {
   10285 	case WM_T_82575:
   10286 	case WM_T_82576:
   10287 		reg = CSR_READ(sc, WMREG_MDIC);
   10288 		ismdio = ((reg & MDIC_DEST) != 0);
   10289 		break;
   10290 	case WM_T_82580:
   10291 	case WM_T_I350:
   10292 	case WM_T_I354:
   10293 	case WM_T_I210:
   10294 	case WM_T_I211:
   10295 		reg = CSR_READ(sc, WMREG_MDICNFG);
   10296 		ismdio = ((reg & MDICNFG_DEST) != 0);
   10297 		break;
   10298 	default:
   10299 		break;
   10300 	}
   10301 
   10302 	return ismdio;
   10303 }
   10304 
   10305 /*
   10306  * wm_sgmii_readreg:	[mii interface function]
   10307  *
   10308  *	Read a PHY register on the SGMII
   10309  * This could be handled by the PHY layer if we didn't have to lock the
   10310  * ressource ...
   10311  */
   10312 static int
   10313 wm_sgmii_readreg(device_t self, int phy, int reg)
   10314 {
   10315 	struct wm_softc *sc = device_private(self);
   10316 	uint32_t i2ccmd;
   10317 	int i, rv;
   10318 
   10319 	if (sc->phy.acquire(sc)) {
   10320 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10321 		    __func__);
   10322 		return 0;
   10323 	}
   10324 
   10325 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10326 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10327 	    | I2CCMD_OPCODE_READ;
   10328 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10329 
   10330 	/* Poll the ready bit */
   10331 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10332 		delay(50);
   10333 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10334 		if (i2ccmd & I2CCMD_READY)
   10335 			break;
   10336 	}
   10337 	if ((i2ccmd & I2CCMD_READY) == 0)
   10338 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
   10339 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10340 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   10341 
   10342 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   10343 
   10344 	sc->phy.release(sc);
   10345 	return rv;
   10346 }
   10347 
   10348 /*
   10349  * wm_sgmii_writereg:	[mii interface function]
   10350  *
   10351  *	Write a PHY register on the SGMII.
   10352  * This could be handled by the PHY layer if we didn't have to lock the
   10353  * ressource ...
   10354  */
   10355 static void
   10356 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
   10357 {
   10358 	struct wm_softc *sc = device_private(self);
   10359 	uint32_t i2ccmd;
   10360 	int i;
   10361 	int val_swapped;
   10362 
   10363 	if (sc->phy.acquire(sc) != 0) {
   10364 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10365 		    __func__);
   10366 		return;
   10367 	}
   10368 	/* Swap the data bytes for the I2C interface */
   10369 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   10370 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10371 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10372 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   10373 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10374 
   10375 	/* Poll the ready bit */
   10376 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10377 		delay(50);
   10378 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10379 		if (i2ccmd & I2CCMD_READY)
   10380 			break;
   10381 	}
   10382 	if ((i2ccmd & I2CCMD_READY) == 0)
   10383 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
   10384 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10385 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   10386 
   10387 	sc->phy.release(sc);
   10388 }
   10389 
   10390 /* TBI related */
   10391 
   10392 /*
   10393  * wm_tbi_mediainit:
   10394  *
   10395  *	Initialize media for use on 1000BASE-X devices.
   10396  */
   10397 static void
   10398 wm_tbi_mediainit(struct wm_softc *sc)
   10399 {
   10400 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10401 	const char *sep = "";
   10402 
   10403 	if (sc->sc_type < WM_T_82543)
   10404 		sc->sc_tipg = TIPG_WM_DFLT;
   10405 	else
   10406 		sc->sc_tipg = TIPG_LG_DFLT;
   10407 
   10408 	sc->sc_tbi_serdes_anegticks = 5;
   10409 
   10410 	/* Initialize our media structures */
   10411 	sc->sc_mii.mii_ifp = ifp;
   10412 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10413 
   10414 	if ((sc->sc_type >= WM_T_82575)
   10415 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   10416 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10417 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   10418 	else
   10419 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10420 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   10421 
   10422 	/*
   10423 	 * SWD Pins:
   10424 	 *
   10425 	 *	0 = Link LED (output)
   10426 	 *	1 = Loss Of Signal (input)
   10427 	 */
   10428 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   10429 
   10430 	/* XXX Perhaps this is only for TBI */
   10431 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   10432 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   10433 
   10434 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   10435 		sc->sc_ctrl &= ~CTRL_LRST;
   10436 
   10437 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10438 
   10439 #define	ADD(ss, mm, dd)							\
   10440 do {									\
   10441 	aprint_normal("%s%s", sep, ss);					\
   10442 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   10443 	sep = ", ";							\
   10444 } while (/*CONSTCOND*/0)
   10445 
   10446 	aprint_normal_dev(sc->sc_dev, "");
   10447 
   10448 	if (sc->sc_type == WM_T_I354) {
   10449 		uint32_t status;
   10450 
   10451 		status = CSR_READ(sc, WMREG_STATUS);
   10452 		if (((status & STATUS_2P5_SKU) != 0)
   10453 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   10454 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   10455 		} else
   10456 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   10457 	} else if (sc->sc_type == WM_T_82545) {
   10458 		/* Only 82545 is LX (XXX except SFP) */
   10459 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   10460 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   10461 	} else {
   10462 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   10463 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   10464 	}
   10465 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   10466 	aprint_normal("\n");
   10467 
   10468 #undef ADD
   10469 
   10470 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   10471 }
   10472 
   10473 /*
   10474  * wm_tbi_mediachange:	[ifmedia interface function]
   10475  *
   10476  *	Set hardware to newly-selected media on a 1000BASE-X device.
   10477  */
   10478 static int
   10479 wm_tbi_mediachange(struct ifnet *ifp)
   10480 {
   10481 	struct wm_softc *sc = ifp->if_softc;
   10482 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10483 	uint32_t status;
   10484 	int i;
   10485 
   10486 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   10487 		/* XXX need some work for >= 82571 and < 82575 */
   10488 		if (sc->sc_type < WM_T_82575)
   10489 			return 0;
   10490 	}
   10491 
   10492 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   10493 	    || (sc->sc_type >= WM_T_82575))
   10494 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   10495 
   10496 	sc->sc_ctrl &= ~CTRL_LRST;
   10497 	sc->sc_txcw = TXCW_ANE;
   10498 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10499 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   10500 	else if (ife->ifm_media & IFM_FDX)
   10501 		sc->sc_txcw |= TXCW_FD;
   10502 	else
   10503 		sc->sc_txcw |= TXCW_HD;
   10504 
   10505 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   10506 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   10507 
   10508 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   10509 		    device_xname(sc->sc_dev), sc->sc_txcw));
   10510 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10511 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10512 	CSR_WRITE_FLUSH(sc);
   10513 	delay(1000);
   10514 
   10515 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   10516 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   10517 
   10518 	/*
   10519 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   10520 	 * optics detect a signal, 0 if they don't.
   10521 	 */
   10522 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   10523 		/* Have signal; wait for the link to come up. */
   10524 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   10525 			delay(10000);
   10526 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   10527 				break;
   10528 		}
   10529 
   10530 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   10531 			    device_xname(sc->sc_dev),i));
   10532 
   10533 		status = CSR_READ(sc, WMREG_STATUS);
   10534 		DPRINTF(WM_DEBUG_LINK,
   10535 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   10536 			device_xname(sc->sc_dev),status, STATUS_LU));
   10537 		if (status & STATUS_LU) {
   10538 			/* Link is up. */
   10539 			DPRINTF(WM_DEBUG_LINK,
   10540 			    ("%s: LINK: set media -> link up %s\n",
   10541 			    device_xname(sc->sc_dev),
   10542 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   10543 
   10544 			/*
   10545 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   10546 			 * so we should update sc->sc_ctrl
   10547 			 */
   10548 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   10549 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10550 			sc->sc_fcrtl &= ~FCRTL_XONE;
   10551 			if (status & STATUS_FD)
   10552 				sc->sc_tctl |=
   10553 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10554 			else
   10555 				sc->sc_tctl |=
   10556 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10557 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   10558 				sc->sc_fcrtl |= FCRTL_XONE;
   10559 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10560 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   10561 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   10562 				      sc->sc_fcrtl);
   10563 			sc->sc_tbi_linkup = 1;
   10564 		} else {
   10565 			if (i == WM_LINKUP_TIMEOUT)
   10566 				wm_check_for_link(sc);
   10567 			/* Link is down. */
   10568 			DPRINTF(WM_DEBUG_LINK,
   10569 			    ("%s: LINK: set media -> link down\n",
   10570 			    device_xname(sc->sc_dev)));
   10571 			sc->sc_tbi_linkup = 0;
   10572 		}
   10573 	} else {
   10574 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   10575 		    device_xname(sc->sc_dev)));
   10576 		sc->sc_tbi_linkup = 0;
   10577 	}
   10578 
   10579 	wm_tbi_serdes_set_linkled(sc);
   10580 
   10581 	return 0;
   10582 }
   10583 
   10584 /*
   10585  * wm_tbi_mediastatus:	[ifmedia interface function]
   10586  *
   10587  *	Get the current interface media status on a 1000BASE-X device.
   10588  */
   10589 static void
   10590 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10591 {
   10592 	struct wm_softc *sc = ifp->if_softc;
   10593 	uint32_t ctrl, status;
   10594 
   10595 	ifmr->ifm_status = IFM_AVALID;
   10596 	ifmr->ifm_active = IFM_ETHER;
   10597 
   10598 	status = CSR_READ(sc, WMREG_STATUS);
   10599 	if ((status & STATUS_LU) == 0) {
   10600 		ifmr->ifm_active |= IFM_NONE;
   10601 		return;
   10602 	}
   10603 
   10604 	ifmr->ifm_status |= IFM_ACTIVE;
   10605 	/* Only 82545 is LX */
   10606 	if (sc->sc_type == WM_T_82545)
   10607 		ifmr->ifm_active |= IFM_1000_LX;
   10608 	else
   10609 		ifmr->ifm_active |= IFM_1000_SX;
   10610 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   10611 		ifmr->ifm_active |= IFM_FDX;
   10612 	else
   10613 		ifmr->ifm_active |= IFM_HDX;
   10614 	ctrl = CSR_READ(sc, WMREG_CTRL);
   10615 	if (ctrl & CTRL_RFCE)
   10616 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   10617 	if (ctrl & CTRL_TFCE)
   10618 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   10619 }
   10620 
   10621 /* XXX TBI only */
   10622 static int
   10623 wm_check_for_link(struct wm_softc *sc)
   10624 {
   10625 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10626 	uint32_t rxcw;
   10627 	uint32_t ctrl;
   10628 	uint32_t status;
   10629 	uint32_t sig;
   10630 
   10631 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   10632 		/* XXX need some work for >= 82571 */
   10633 		if (sc->sc_type >= WM_T_82571) {
   10634 			sc->sc_tbi_linkup = 1;
   10635 			return 0;
   10636 		}
   10637 	}
   10638 
   10639 	rxcw = CSR_READ(sc, WMREG_RXCW);
   10640 	ctrl = CSR_READ(sc, WMREG_CTRL);
   10641 	status = CSR_READ(sc, WMREG_STATUS);
   10642 
   10643 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   10644 
   10645 	DPRINTF(WM_DEBUG_LINK,
   10646 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   10647 		device_xname(sc->sc_dev), __func__,
   10648 		((ctrl & CTRL_SWDPIN(1)) == sig),
   10649 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   10650 
   10651 	/*
   10652 	 * SWDPIN   LU RXCW
   10653 	 *      0    0    0
   10654 	 *      0    0    1	(should not happen)
   10655 	 *      0    1    0	(should not happen)
   10656 	 *      0    1    1	(should not happen)
   10657 	 *      1    0    0	Disable autonego and force linkup
   10658 	 *      1    0    1	got /C/ but not linkup yet
   10659 	 *      1    1    0	(linkup)
   10660 	 *      1    1    1	If IFM_AUTO, back to autonego
   10661 	 *
   10662 	 */
   10663 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   10664 	    && ((status & STATUS_LU) == 0)
   10665 	    && ((rxcw & RXCW_C) == 0)) {
   10666 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   10667 			__func__));
   10668 		sc->sc_tbi_linkup = 0;
   10669 		/* Disable auto-negotiation in the TXCW register */
   10670 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   10671 
   10672 		/*
   10673 		 * Force link-up and also force full-duplex.
   10674 		 *
   10675 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   10676 		 * so we should update sc->sc_ctrl
   10677 		 */
   10678 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   10679 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10680 	} else if (((status & STATUS_LU) != 0)
   10681 	    && ((rxcw & RXCW_C) != 0)
   10682 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   10683 		sc->sc_tbi_linkup = 1;
   10684 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   10685 			__func__));
   10686 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10687 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   10688 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   10689 	    && ((rxcw & RXCW_C) != 0)) {
   10690 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   10691 	} else {
   10692 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   10693 			status));
   10694 	}
   10695 
   10696 	return 0;
   10697 }
   10698 
   10699 /*
   10700  * wm_tbi_tick:
   10701  *
   10702  *	Check the link on TBI devices.
   10703  *	This function acts as mii_tick().
   10704  */
   10705 static void
   10706 wm_tbi_tick(struct wm_softc *sc)
   10707 {
   10708 	struct mii_data *mii = &sc->sc_mii;
   10709 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10710 	uint32_t status;
   10711 
   10712 	KASSERT(WM_CORE_LOCKED(sc));
   10713 
   10714 	status = CSR_READ(sc, WMREG_STATUS);
   10715 
   10716 	/* XXX is this needed? */
   10717 	(void)CSR_READ(sc, WMREG_RXCW);
   10718 	(void)CSR_READ(sc, WMREG_CTRL);
   10719 
   10720 	/* set link status */
   10721 	if ((status & STATUS_LU) == 0) {
   10722 		DPRINTF(WM_DEBUG_LINK,
   10723 		    ("%s: LINK: checklink -> down\n",
   10724 			device_xname(sc->sc_dev)));
   10725 		sc->sc_tbi_linkup = 0;
   10726 	} else if (sc->sc_tbi_linkup == 0) {
   10727 		DPRINTF(WM_DEBUG_LINK,
   10728 		    ("%s: LINK: checklink -> up %s\n",
   10729 			device_xname(sc->sc_dev),
   10730 			(status & STATUS_FD) ? "FDX" : "HDX"));
   10731 		sc->sc_tbi_linkup = 1;
   10732 		sc->sc_tbi_serdes_ticks = 0;
   10733 	}
   10734 
   10735 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   10736 		goto setled;
   10737 
   10738 	if ((status & STATUS_LU) == 0) {
   10739 		sc->sc_tbi_linkup = 0;
   10740 		/* If the timer expired, retry autonegotiation */
   10741 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10742 		    && (++sc->sc_tbi_serdes_ticks
   10743 			>= sc->sc_tbi_serdes_anegticks)) {
   10744 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   10745 			sc->sc_tbi_serdes_ticks = 0;
   10746 			/*
   10747 			 * Reset the link, and let autonegotiation do
   10748 			 * its thing
   10749 			 */
   10750 			sc->sc_ctrl |= CTRL_LRST;
   10751 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10752 			CSR_WRITE_FLUSH(sc);
   10753 			delay(1000);
   10754 			sc->sc_ctrl &= ~CTRL_LRST;
   10755 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10756 			CSR_WRITE_FLUSH(sc);
   10757 			delay(1000);
   10758 			CSR_WRITE(sc, WMREG_TXCW,
   10759 			    sc->sc_txcw & ~TXCW_ANE);
   10760 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10761 		}
   10762 	}
   10763 
   10764 setled:
   10765 	wm_tbi_serdes_set_linkled(sc);
   10766 }
   10767 
   10768 /* SERDES related */
   10769 static void
   10770 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   10771 {
   10772 	uint32_t reg;
   10773 
   10774 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   10775 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   10776 		return;
   10777 
   10778 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   10779 	reg |= PCS_CFG_PCS_EN;
   10780 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   10781 
   10782 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   10783 	reg &= ~CTRL_EXT_SWDPIN(3);
   10784 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   10785 	CSR_WRITE_FLUSH(sc);
   10786 }
   10787 
   10788 static int
   10789 wm_serdes_mediachange(struct ifnet *ifp)
   10790 {
   10791 	struct wm_softc *sc = ifp->if_softc;
   10792 	bool pcs_autoneg = true; /* XXX */
   10793 	uint32_t ctrl_ext, pcs_lctl, reg;
   10794 
   10795 	/* XXX Currently, this function is not called on 8257[12] */
   10796 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   10797 	    || (sc->sc_type >= WM_T_82575))
   10798 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   10799 
   10800 	wm_serdes_power_up_link_82575(sc);
   10801 
   10802 	sc->sc_ctrl |= CTRL_SLU;
   10803 
   10804 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   10805 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   10806 
   10807 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10808 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   10809 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   10810 	case CTRL_EXT_LINK_MODE_SGMII:
   10811 		pcs_autoneg = true;
   10812 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   10813 		break;
   10814 	case CTRL_EXT_LINK_MODE_1000KX:
   10815 		pcs_autoneg = false;
   10816 		/* FALLTHROUGH */
   10817 	default:
   10818 		if ((sc->sc_type == WM_T_82575)
   10819 		    || (sc->sc_type == WM_T_82576)) {
   10820 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   10821 				pcs_autoneg = false;
   10822 		}
   10823 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   10824 		    | CTRL_FRCFDX;
   10825 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   10826 	}
   10827 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10828 
   10829 	if (pcs_autoneg) {
   10830 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   10831 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   10832 
   10833 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   10834 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   10835 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   10836 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   10837 	} else
   10838 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   10839 
   10840 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   10841 
   10842 
   10843 	return 0;
   10844 }
   10845 
   10846 static void
   10847 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10848 {
   10849 	struct wm_softc *sc = ifp->if_softc;
   10850 	struct mii_data *mii = &sc->sc_mii;
   10851 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10852 	uint32_t pcs_adv, pcs_lpab, reg;
   10853 
   10854 	ifmr->ifm_status = IFM_AVALID;
   10855 	ifmr->ifm_active = IFM_ETHER;
   10856 
   10857 	/* Check PCS */
   10858 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10859 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   10860 		ifmr->ifm_active |= IFM_NONE;
   10861 		sc->sc_tbi_linkup = 0;
   10862 		goto setled;
   10863 	}
   10864 
   10865 	sc->sc_tbi_linkup = 1;
   10866 	ifmr->ifm_status |= IFM_ACTIVE;
   10867 	if (sc->sc_type == WM_T_I354) {
   10868 		uint32_t status;
   10869 
   10870 		status = CSR_READ(sc, WMREG_STATUS);
   10871 		if (((status & STATUS_2P5_SKU) != 0)
   10872 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   10873 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
   10874 		} else
   10875 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
   10876 	} else {
   10877 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   10878 		case PCS_LSTS_SPEED_10:
   10879 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   10880 			break;
   10881 		case PCS_LSTS_SPEED_100:
   10882 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   10883 			break;
   10884 		case PCS_LSTS_SPEED_1000:
   10885 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   10886 			break;
   10887 		default:
   10888 			device_printf(sc->sc_dev, "Unknown speed\n");
   10889 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   10890 			break;
   10891 		}
   10892 	}
   10893 	if ((reg & PCS_LSTS_FDX) != 0)
   10894 		ifmr->ifm_active |= IFM_FDX;
   10895 	else
   10896 		ifmr->ifm_active |= IFM_HDX;
   10897 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   10898 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   10899 		/* Check flow */
   10900 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10901 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   10902 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   10903 			goto setled;
   10904 		}
   10905 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   10906 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   10907 		DPRINTF(WM_DEBUG_LINK,
   10908 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   10909 		if ((pcs_adv & TXCW_SYM_PAUSE)
   10910 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   10911 			mii->mii_media_active |= IFM_FLOW
   10912 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   10913 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   10914 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   10915 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   10916 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   10917 			mii->mii_media_active |= IFM_FLOW
   10918 			    | IFM_ETH_TXPAUSE;
   10919 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   10920 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   10921 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   10922 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   10923 			mii->mii_media_active |= IFM_FLOW
   10924 			    | IFM_ETH_RXPAUSE;
   10925 		}
   10926 	}
   10927 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10928 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   10929 setled:
   10930 	wm_tbi_serdes_set_linkled(sc);
   10931 }
   10932 
   10933 /*
   10934  * wm_serdes_tick:
   10935  *
   10936  *	Check the link on serdes devices.
   10937  */
   10938 static void
   10939 wm_serdes_tick(struct wm_softc *sc)
   10940 {
   10941 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10942 	struct mii_data *mii = &sc->sc_mii;
   10943 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10944 	uint32_t reg;
   10945 
   10946 	KASSERT(WM_CORE_LOCKED(sc));
   10947 
   10948 	mii->mii_media_status = IFM_AVALID;
   10949 	mii->mii_media_active = IFM_ETHER;
   10950 
   10951 	/* Check PCS */
   10952 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10953 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   10954 		mii->mii_media_status |= IFM_ACTIVE;
   10955 		sc->sc_tbi_linkup = 1;
   10956 		sc->sc_tbi_serdes_ticks = 0;
   10957 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   10958 		if ((reg & PCS_LSTS_FDX) != 0)
   10959 			mii->mii_media_active |= IFM_FDX;
   10960 		else
   10961 			mii->mii_media_active |= IFM_HDX;
   10962 	} else {
   10963 		mii->mii_media_status |= IFM_NONE;
   10964 		sc->sc_tbi_linkup = 0;
   10965 		/* If the timer expired, retry autonegotiation */
   10966 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10967 		    && (++sc->sc_tbi_serdes_ticks
   10968 			>= sc->sc_tbi_serdes_anegticks)) {
   10969 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   10970 			sc->sc_tbi_serdes_ticks = 0;
   10971 			/* XXX */
   10972 			wm_serdes_mediachange(ifp);
   10973 		}
   10974 	}
   10975 
   10976 	wm_tbi_serdes_set_linkled(sc);
   10977 }
   10978 
   10979 /* SFP related */
   10980 
   10981 static int
   10982 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   10983 {
   10984 	uint32_t i2ccmd;
   10985 	int i;
   10986 
   10987 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   10988 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10989 
   10990 	/* Poll the ready bit */
   10991 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10992 		delay(50);
   10993 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10994 		if (i2ccmd & I2CCMD_READY)
   10995 			break;
   10996 	}
   10997 	if ((i2ccmd & I2CCMD_READY) == 0)
   10998 		return -1;
   10999 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   11000 		return -1;
   11001 
   11002 	*data = i2ccmd & 0x00ff;
   11003 
   11004 	return 0;
   11005 }
   11006 
   11007 static uint32_t
   11008 wm_sfp_get_media_type(struct wm_softc *sc)
   11009 {
   11010 	uint32_t ctrl_ext;
   11011 	uint8_t val = 0;
   11012 	int timeout = 3;
   11013 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   11014 	int rv = -1;
   11015 
   11016 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11017 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   11018 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   11019 	CSR_WRITE_FLUSH(sc);
   11020 
   11021 	/* Read SFP module data */
   11022 	while (timeout) {
   11023 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   11024 		if (rv == 0)
   11025 			break;
   11026 		delay(100*1000); /* XXX too big */
   11027 		timeout--;
   11028 	}
   11029 	if (rv != 0)
   11030 		goto out;
   11031 	switch (val) {
   11032 	case SFF_SFP_ID_SFF:
   11033 		aprint_normal_dev(sc->sc_dev,
   11034 		    "Module/Connector soldered to board\n");
   11035 		break;
   11036 	case SFF_SFP_ID_SFP:
   11037 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   11038 		break;
   11039 	case SFF_SFP_ID_UNKNOWN:
   11040 		goto out;
   11041 	default:
   11042 		break;
   11043 	}
   11044 
   11045 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   11046 	if (rv != 0) {
   11047 		goto out;
   11048 	}
   11049 
   11050 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   11051 		mediatype = WM_MEDIATYPE_SERDES;
   11052 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   11053 		sc->sc_flags |= WM_F_SGMII;
   11054 		mediatype = WM_MEDIATYPE_COPPER;
   11055 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   11056 		sc->sc_flags |= WM_F_SGMII;
   11057 		mediatype = WM_MEDIATYPE_SERDES;
   11058 	}
   11059 
   11060 out:
   11061 	/* Restore I2C interface setting */
   11062 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11063 
   11064 	return mediatype;
   11065 }
   11066 
   11067 /*
   11068  * NVM related.
   11069  * Microwire, SPI (w/wo EERD) and Flash.
   11070  */
   11071 
   11072 /* Both spi and uwire */
   11073 
   11074 /*
   11075  * wm_eeprom_sendbits:
   11076  *
   11077  *	Send a series of bits to the EEPROM.
   11078  */
   11079 static void
   11080 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   11081 {
   11082 	uint32_t reg;
   11083 	int x;
   11084 
   11085 	reg = CSR_READ(sc, WMREG_EECD);
   11086 
   11087 	for (x = nbits; x > 0; x--) {
   11088 		if (bits & (1U << (x - 1)))
   11089 			reg |= EECD_DI;
   11090 		else
   11091 			reg &= ~EECD_DI;
   11092 		CSR_WRITE(sc, WMREG_EECD, reg);
   11093 		CSR_WRITE_FLUSH(sc);
   11094 		delay(2);
   11095 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11096 		CSR_WRITE_FLUSH(sc);
   11097 		delay(2);
   11098 		CSR_WRITE(sc, WMREG_EECD, reg);
   11099 		CSR_WRITE_FLUSH(sc);
   11100 		delay(2);
   11101 	}
   11102 }
   11103 
   11104 /*
   11105  * wm_eeprom_recvbits:
   11106  *
   11107  *	Receive a series of bits from the EEPROM.
   11108  */
   11109 static void
   11110 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   11111 {
   11112 	uint32_t reg, val;
   11113 	int x;
   11114 
   11115 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   11116 
   11117 	val = 0;
   11118 	for (x = nbits; x > 0; x--) {
   11119 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11120 		CSR_WRITE_FLUSH(sc);
   11121 		delay(2);
   11122 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   11123 			val |= (1U << (x - 1));
   11124 		CSR_WRITE(sc, WMREG_EECD, reg);
   11125 		CSR_WRITE_FLUSH(sc);
   11126 		delay(2);
   11127 	}
   11128 	*valp = val;
   11129 }
   11130 
   11131 /* Microwire */
   11132 
   11133 /*
   11134  * wm_nvm_read_uwire:
   11135  *
   11136  *	Read a word from the EEPROM using the MicroWire protocol.
   11137  */
   11138 static int
   11139 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11140 {
   11141 	uint32_t reg, val;
   11142 	int i;
   11143 
   11144 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11145 		device_xname(sc->sc_dev), __func__));
   11146 
   11147 	for (i = 0; i < wordcnt; i++) {
   11148 		/* Clear SK and DI. */
   11149 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   11150 		CSR_WRITE(sc, WMREG_EECD, reg);
   11151 
   11152 		/*
   11153 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   11154 		 * and Xen.
   11155 		 *
   11156 		 * We use this workaround only for 82540 because qemu's
   11157 		 * e1000 act as 82540.
   11158 		 */
   11159 		if (sc->sc_type == WM_T_82540) {
   11160 			reg |= EECD_SK;
   11161 			CSR_WRITE(sc, WMREG_EECD, reg);
   11162 			reg &= ~EECD_SK;
   11163 			CSR_WRITE(sc, WMREG_EECD, reg);
   11164 			CSR_WRITE_FLUSH(sc);
   11165 			delay(2);
   11166 		}
   11167 		/* XXX: end of workaround */
   11168 
   11169 		/* Set CHIP SELECT. */
   11170 		reg |= EECD_CS;
   11171 		CSR_WRITE(sc, WMREG_EECD, reg);
   11172 		CSR_WRITE_FLUSH(sc);
   11173 		delay(2);
   11174 
   11175 		/* Shift in the READ command. */
   11176 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   11177 
   11178 		/* Shift in address. */
   11179 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   11180 
   11181 		/* Shift out the data. */
   11182 		wm_eeprom_recvbits(sc, &val, 16);
   11183 		data[i] = val & 0xffff;
   11184 
   11185 		/* Clear CHIP SELECT. */
   11186 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   11187 		CSR_WRITE(sc, WMREG_EECD, reg);
   11188 		CSR_WRITE_FLUSH(sc);
   11189 		delay(2);
   11190 	}
   11191 
   11192 	return 0;
   11193 }
   11194 
   11195 /* SPI */
   11196 
   11197 /*
   11198  * Set SPI and FLASH related information from the EECD register.
   11199  * For 82541 and 82547, the word size is taken from EEPROM.
   11200  */
   11201 static int
   11202 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   11203 {
   11204 	int size;
   11205 	uint32_t reg;
   11206 	uint16_t data;
   11207 
   11208 	reg = CSR_READ(sc, WMREG_EECD);
   11209 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   11210 
   11211 	/* Read the size of NVM from EECD by default */
   11212 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11213 	switch (sc->sc_type) {
   11214 	case WM_T_82541:
   11215 	case WM_T_82541_2:
   11216 	case WM_T_82547:
   11217 	case WM_T_82547_2:
   11218 		/* Set dummy value to access EEPROM */
   11219 		sc->sc_nvm_wordsize = 64;
   11220 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
   11221 		reg = data;
   11222 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11223 		if (size == 0)
   11224 			size = 6; /* 64 word size */
   11225 		else
   11226 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   11227 		break;
   11228 	case WM_T_80003:
   11229 	case WM_T_82571:
   11230 	case WM_T_82572:
   11231 	case WM_T_82573: /* SPI case */
   11232 	case WM_T_82574: /* SPI case */
   11233 	case WM_T_82583: /* SPI case */
   11234 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11235 		if (size > 14)
   11236 			size = 14;
   11237 		break;
   11238 	case WM_T_82575:
   11239 	case WM_T_82576:
   11240 	case WM_T_82580:
   11241 	case WM_T_I350:
   11242 	case WM_T_I354:
   11243 	case WM_T_I210:
   11244 	case WM_T_I211:
   11245 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11246 		if (size > 15)
   11247 			size = 15;
   11248 		break;
   11249 	default:
   11250 		aprint_error_dev(sc->sc_dev,
   11251 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   11252 		return -1;
   11253 		break;
   11254 	}
   11255 
   11256 	sc->sc_nvm_wordsize = 1 << size;
   11257 
   11258 	return 0;
   11259 }
   11260 
   11261 /*
   11262  * wm_nvm_ready_spi:
   11263  *
   11264  *	Wait for a SPI EEPROM to be ready for commands.
   11265  */
   11266 static int
   11267 wm_nvm_ready_spi(struct wm_softc *sc)
   11268 {
   11269 	uint32_t val;
   11270 	int usec;
   11271 
   11272 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11273 		device_xname(sc->sc_dev), __func__));
   11274 
   11275 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   11276 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   11277 		wm_eeprom_recvbits(sc, &val, 8);
   11278 		if ((val & SPI_SR_RDY) == 0)
   11279 			break;
   11280 	}
   11281 	if (usec >= SPI_MAX_RETRIES) {
   11282 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   11283 		return 1;
   11284 	}
   11285 	return 0;
   11286 }
   11287 
   11288 /*
   11289  * wm_nvm_read_spi:
   11290  *
   11291  *	Read a work from the EEPROM using the SPI protocol.
   11292  */
   11293 static int
   11294 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11295 {
   11296 	uint32_t reg, val;
   11297 	int i;
   11298 	uint8_t opc;
   11299 
   11300 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11301 		device_xname(sc->sc_dev), __func__));
   11302 
   11303 	/* Clear SK and CS. */
   11304 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   11305 	CSR_WRITE(sc, WMREG_EECD, reg);
   11306 	CSR_WRITE_FLUSH(sc);
   11307 	delay(2);
   11308 
   11309 	if (wm_nvm_ready_spi(sc))
   11310 		return 1;
   11311 
   11312 	/* Toggle CS to flush commands. */
   11313 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   11314 	CSR_WRITE_FLUSH(sc);
   11315 	delay(2);
   11316 	CSR_WRITE(sc, WMREG_EECD, reg);
   11317 	CSR_WRITE_FLUSH(sc);
   11318 	delay(2);
   11319 
   11320 	opc = SPI_OPC_READ;
   11321 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   11322 		opc |= SPI_OPC_A8;
   11323 
   11324 	wm_eeprom_sendbits(sc, opc, 8);
   11325 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   11326 
   11327 	for (i = 0; i < wordcnt; i++) {
   11328 		wm_eeprom_recvbits(sc, &val, 16);
   11329 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   11330 	}
   11331 
   11332 	/* Raise CS and clear SK. */
   11333 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   11334 	CSR_WRITE(sc, WMREG_EECD, reg);
   11335 	CSR_WRITE_FLUSH(sc);
   11336 	delay(2);
   11337 
   11338 	return 0;
   11339 }
   11340 
   11341 /* Using with EERD */
   11342 
   11343 static int
   11344 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   11345 {
   11346 	uint32_t attempts = 100000;
   11347 	uint32_t i, reg = 0;
   11348 	int32_t done = -1;
   11349 
   11350 	for (i = 0; i < attempts; i++) {
   11351 		reg = CSR_READ(sc, rw);
   11352 
   11353 		if (reg & EERD_DONE) {
   11354 			done = 0;
   11355 			break;
   11356 		}
   11357 		delay(5);
   11358 	}
   11359 
   11360 	return done;
   11361 }
   11362 
   11363 static int
   11364 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   11365     uint16_t *data)
   11366 {
   11367 	int i, eerd = 0;
   11368 	int error = 0;
   11369 
   11370 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11371 		device_xname(sc->sc_dev), __func__));
   11372 
   11373 	for (i = 0; i < wordcnt; i++) {
   11374 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   11375 
   11376 		CSR_WRITE(sc, WMREG_EERD, eerd);
   11377 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   11378 		if (error != 0)
   11379 			break;
   11380 
   11381 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   11382 	}
   11383 
   11384 	return error;
   11385 }
   11386 
   11387 /* Flash */
   11388 
   11389 static int
   11390 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   11391 {
   11392 	uint32_t eecd;
   11393 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   11394 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   11395 	uint8_t sig_byte = 0;
   11396 
   11397 	switch (sc->sc_type) {
   11398 	case WM_T_PCH_SPT:
   11399 		/*
   11400 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
   11401 		 * sector valid bits from the NVM.
   11402 		 */
   11403 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
   11404 		if ((*bank == 0) || (*bank == 1)) {
   11405 			aprint_error_dev(sc->sc_dev,
   11406 			    "%s: no valid NVM bank present (%u)\n", __func__,
   11407 				*bank);
   11408 			return -1;
   11409 		} else {
   11410 			*bank = *bank - 2;
   11411 			return 0;
   11412 		}
   11413 	case WM_T_ICH8:
   11414 	case WM_T_ICH9:
   11415 		eecd = CSR_READ(sc, WMREG_EECD);
   11416 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   11417 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   11418 			return 0;
   11419 		}
   11420 		/* FALLTHROUGH */
   11421 	default:
   11422 		/* Default to 0 */
   11423 		*bank = 0;
   11424 
   11425 		/* Check bank 0 */
   11426 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   11427 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11428 			*bank = 0;
   11429 			return 0;
   11430 		}
   11431 
   11432 		/* Check bank 1 */
   11433 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   11434 		    &sig_byte);
   11435 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11436 			*bank = 1;
   11437 			return 0;
   11438 		}
   11439 	}
   11440 
   11441 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   11442 		device_xname(sc->sc_dev)));
   11443 	return -1;
   11444 }
   11445 
   11446 /******************************************************************************
   11447  * This function does initial flash setup so that a new read/write/erase cycle
   11448  * can be started.
   11449  *
   11450  * sc - The pointer to the hw structure
   11451  ****************************************************************************/
   11452 static int32_t
   11453 wm_ich8_cycle_init(struct wm_softc *sc)
   11454 {
   11455 	uint16_t hsfsts;
   11456 	int32_t error = 1;
   11457 	int32_t i     = 0;
   11458 
   11459 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11460 
   11461 	/* May be check the Flash Des Valid bit in Hw status */
   11462 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   11463 		return error;
   11464 	}
   11465 
   11466 	/* Clear FCERR in Hw status by writing 1 */
   11467 	/* Clear DAEL in Hw status by writing a 1 */
   11468 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   11469 
   11470 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11471 
   11472 	/*
   11473 	 * Either we should have a hardware SPI cycle in progress bit to check
   11474 	 * against, in order to start a new cycle or FDONE bit should be
   11475 	 * changed in the hardware so that it is 1 after harware reset, which
   11476 	 * can then be used as an indication whether a cycle is in progress or
   11477 	 * has been completed .. we should also have some software semaphore
   11478 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   11479 	 * threads access to those bits can be sequentiallized or a way so that
   11480 	 * 2 threads dont start the cycle at the same time
   11481 	 */
   11482 
   11483 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11484 		/*
   11485 		 * There is no cycle running at present, so we can start a
   11486 		 * cycle
   11487 		 */
   11488 
   11489 		/* Begin by setting Flash Cycle Done. */
   11490 		hsfsts |= HSFSTS_DONE;
   11491 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11492 		error = 0;
   11493 	} else {
   11494 		/*
   11495 		 * otherwise poll for sometime so the current cycle has a
   11496 		 * chance to end before giving up.
   11497 		 */
   11498 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   11499 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11500 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11501 				error = 0;
   11502 				break;
   11503 			}
   11504 			delay(1);
   11505 		}
   11506 		if (error == 0) {
   11507 			/*
   11508 			 * Successful in waiting for previous cycle to timeout,
   11509 			 * now set the Flash Cycle Done.
   11510 			 */
   11511 			hsfsts |= HSFSTS_DONE;
   11512 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11513 		}
   11514 	}
   11515 	return error;
   11516 }
   11517 
   11518 /******************************************************************************
   11519  * This function starts a flash cycle and waits for its completion
   11520  *
   11521  * sc - The pointer to the hw structure
   11522  ****************************************************************************/
   11523 static int32_t
   11524 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   11525 {
   11526 	uint16_t hsflctl;
   11527 	uint16_t hsfsts;
   11528 	int32_t error = 1;
   11529 	uint32_t i = 0;
   11530 
   11531 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   11532 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11533 	hsflctl |= HSFCTL_GO;
   11534 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11535 
   11536 	/* Wait till FDONE bit is set to 1 */
   11537 	do {
   11538 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11539 		if (hsfsts & HSFSTS_DONE)
   11540 			break;
   11541 		delay(1);
   11542 		i++;
   11543 	} while (i < timeout);
   11544 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   11545 		error = 0;
   11546 
   11547 	return error;
   11548 }
   11549 
   11550 /******************************************************************************
   11551  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   11552  *
   11553  * sc - The pointer to the hw structure
   11554  * index - The index of the byte or word to read.
   11555  * size - Size of data to read, 1=byte 2=word, 4=dword
   11556  * data - Pointer to the word to store the value read.
   11557  *****************************************************************************/
   11558 static int32_t
   11559 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   11560     uint32_t size, uint32_t *data)
   11561 {
   11562 	uint16_t hsfsts;
   11563 	uint16_t hsflctl;
   11564 	uint32_t flash_linear_address;
   11565 	uint32_t flash_data = 0;
   11566 	int32_t error = 1;
   11567 	int32_t count = 0;
   11568 
   11569 	if (size < 1  || size > 4 || data == 0x0 ||
   11570 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   11571 		return error;
   11572 
   11573 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   11574 	    sc->sc_ich8_flash_base;
   11575 
   11576 	do {
   11577 		delay(1);
   11578 		/* Steps */
   11579 		error = wm_ich8_cycle_init(sc);
   11580 		if (error)
   11581 			break;
   11582 
   11583 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11584 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   11585 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   11586 		    & HSFCTL_BCOUNT_MASK;
   11587 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   11588 		if (sc->sc_type == WM_T_PCH_SPT) {
   11589 			/*
   11590 			 * In SPT, This register is in Lan memory space, not
   11591 			 * flash. Therefore, only 32 bit access is supported.
   11592 			 */
   11593 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
   11594 			    (uint32_t)hsflctl);
   11595 		} else
   11596 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11597 
   11598 		/*
   11599 		 * Write the last 24 bits of index into Flash Linear address
   11600 		 * field in Flash Address
   11601 		 */
   11602 		/* TODO: TBD maybe check the index against the size of flash */
   11603 
   11604 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   11605 
   11606 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   11607 
   11608 		/*
   11609 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   11610 		 * the whole sequence a few more times, else read in (shift in)
   11611 		 * the Flash Data0, the order is least significant byte first
   11612 		 * msb to lsb
   11613 		 */
   11614 		if (error == 0) {
   11615 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   11616 			if (size == 1)
   11617 				*data = (uint8_t)(flash_data & 0x000000FF);
   11618 			else if (size == 2)
   11619 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   11620 			else if (size == 4)
   11621 				*data = (uint32_t)flash_data;
   11622 			break;
   11623 		} else {
   11624 			/*
   11625 			 * If we've gotten here, then things are probably
   11626 			 * completely hosed, but if the error condition is
   11627 			 * detected, it won't hurt to give it another try...
   11628 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   11629 			 */
   11630 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11631 			if (hsfsts & HSFSTS_ERR) {
   11632 				/* Repeat for some time before giving up. */
   11633 				continue;
   11634 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   11635 				break;
   11636 		}
   11637 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   11638 
   11639 	return error;
   11640 }
   11641 
   11642 /******************************************************************************
   11643  * Reads a single byte from the NVM using the ICH8 flash access registers.
   11644  *
   11645  * sc - pointer to wm_hw structure
   11646  * index - The index of the byte to read.
   11647  * data - Pointer to a byte to store the value read.
   11648  *****************************************************************************/
   11649 static int32_t
   11650 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   11651 {
   11652 	int32_t status;
   11653 	uint32_t word = 0;
   11654 
   11655 	status = wm_read_ich8_data(sc, index, 1, &word);
   11656 	if (status == 0)
   11657 		*data = (uint8_t)word;
   11658 	else
   11659 		*data = 0;
   11660 
   11661 	return status;
   11662 }
   11663 
   11664 /******************************************************************************
   11665  * Reads a word from the NVM using the ICH8 flash access registers.
   11666  *
   11667  * sc - pointer to wm_hw structure
   11668  * index - The starting byte index of the word to read.
   11669  * data - Pointer to a word to store the value read.
   11670  *****************************************************************************/
   11671 static int32_t
   11672 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   11673 {
   11674 	int32_t status;
   11675 	uint32_t word = 0;
   11676 
   11677 	status = wm_read_ich8_data(sc, index, 2, &word);
   11678 	if (status == 0)
   11679 		*data = (uint16_t)word;
   11680 	else
   11681 		*data = 0;
   11682 
   11683 	return status;
   11684 }
   11685 
   11686 /******************************************************************************
   11687  * Reads a dword from the NVM using the ICH8 flash access registers.
   11688  *
   11689  * sc - pointer to wm_hw structure
   11690  * index - The starting byte index of the word to read.
   11691  * data - Pointer to a word to store the value read.
   11692  *****************************************************************************/
   11693 static int32_t
   11694 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   11695 {
   11696 	int32_t status;
   11697 
   11698 	status = wm_read_ich8_data(sc, index, 4, data);
   11699 	return status;
   11700 }
   11701 
   11702 /******************************************************************************
   11703  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   11704  * register.
   11705  *
   11706  * sc - Struct containing variables accessed by shared code
   11707  * offset - offset of word in the EEPROM to read
   11708  * data - word read from the EEPROM
   11709  * words - number of words to read
   11710  *****************************************************************************/
   11711 static int
   11712 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11713 {
   11714 	int32_t  error = 0;
   11715 	uint32_t flash_bank = 0;
   11716 	uint32_t act_offset = 0;
   11717 	uint32_t bank_offset = 0;
   11718 	uint16_t word = 0;
   11719 	uint16_t i = 0;
   11720 
   11721 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11722 		device_xname(sc->sc_dev), __func__));
   11723 
   11724 	/*
   11725 	 * We need to know which is the valid flash bank.  In the event
   11726 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   11727 	 * managing flash_bank.  So it cannot be trusted and needs
   11728 	 * to be updated with each read.
   11729 	 */
   11730 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   11731 	if (error) {
   11732 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   11733 			device_xname(sc->sc_dev)));
   11734 		flash_bank = 0;
   11735 	}
   11736 
   11737 	/*
   11738 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   11739 	 * size
   11740 	 */
   11741 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   11742 
   11743 	error = wm_get_swfwhw_semaphore(sc);
   11744 	if (error) {
   11745 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11746 		    __func__);
   11747 		return error;
   11748 	}
   11749 
   11750 	for (i = 0; i < words; i++) {
   11751 		/* The NVM part needs a byte offset, hence * 2 */
   11752 		act_offset = bank_offset + ((offset + i) * 2);
   11753 		error = wm_read_ich8_word(sc, act_offset, &word);
   11754 		if (error) {
   11755 			aprint_error_dev(sc->sc_dev,
   11756 			    "%s: failed to read NVM\n", __func__);
   11757 			break;
   11758 		}
   11759 		data[i] = word;
   11760 	}
   11761 
   11762 	wm_put_swfwhw_semaphore(sc);
   11763 	return error;
   11764 }
   11765 
   11766 /******************************************************************************
   11767  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   11768  * register.
   11769  *
   11770  * sc - Struct containing variables accessed by shared code
   11771  * offset - offset of word in the EEPROM to read
   11772  * data - word read from the EEPROM
   11773  * words - number of words to read
   11774  *****************************************************************************/
   11775 static int
   11776 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11777 {
   11778 	int32_t  error = 0;
   11779 	uint32_t flash_bank = 0;
   11780 	uint32_t act_offset = 0;
   11781 	uint32_t bank_offset = 0;
   11782 	uint32_t dword = 0;
   11783 	uint16_t i = 0;
   11784 
   11785 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11786 		device_xname(sc->sc_dev), __func__));
   11787 
   11788 	/*
   11789 	 * We need to know which is the valid flash bank.  In the event
   11790 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   11791 	 * managing flash_bank.  So it cannot be trusted and needs
   11792 	 * to be updated with each read.
   11793 	 */
   11794 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   11795 	if (error) {
   11796 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   11797 			device_xname(sc->sc_dev)));
   11798 		flash_bank = 0;
   11799 	}
   11800 
   11801 	/*
   11802 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   11803 	 * size
   11804 	 */
   11805 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   11806 
   11807 	error = wm_get_swfwhw_semaphore(sc);
   11808 	if (error) {
   11809 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11810 		    __func__);
   11811 		return error;
   11812 	}
   11813 
   11814 	for (i = 0; i < words; i++) {
   11815 		/* The NVM part needs a byte offset, hence * 2 */
   11816 		act_offset = bank_offset + ((offset + i) * 2);
   11817 		/* but we must read dword aligned, so mask ... */
   11818 		error = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   11819 		if (error) {
   11820 			aprint_error_dev(sc->sc_dev,
   11821 			    "%s: failed to read NVM\n", __func__);
   11822 			break;
   11823 		}
   11824 		/* ... and pick out low or high word */
   11825 		if ((act_offset & 0x2) == 0)
   11826 			data[i] = (uint16_t)(dword & 0xFFFF);
   11827 		else
   11828 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   11829 	}
   11830 
   11831 	wm_put_swfwhw_semaphore(sc);
   11832 	return error;
   11833 }
   11834 
   11835 /* iNVM */
   11836 
   11837 static int
   11838 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   11839 {
   11840 	int32_t  rv = 0;
   11841 	uint32_t invm_dword;
   11842 	uint16_t i;
   11843 	uint8_t record_type, word_address;
   11844 
   11845 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11846 		device_xname(sc->sc_dev), __func__));
   11847 
   11848 	for (i = 0; i < INVM_SIZE; i++) {
   11849 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   11850 		/* Get record type */
   11851 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   11852 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   11853 			break;
   11854 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   11855 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   11856 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   11857 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   11858 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   11859 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   11860 			if (word_address == address) {
   11861 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   11862 				rv = 0;
   11863 				break;
   11864 			}
   11865 		}
   11866 	}
   11867 
   11868 	return rv;
   11869 }
   11870 
   11871 static int
   11872 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11873 {
   11874 	int rv = 0;
   11875 	int i;
   11876 
   11877 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11878 		device_xname(sc->sc_dev), __func__));
   11879 
   11880 	for (i = 0; i < words; i++) {
   11881 		switch (offset + i) {
   11882 		case NVM_OFF_MACADDR:
   11883 		case NVM_OFF_MACADDR1:
   11884 		case NVM_OFF_MACADDR2:
   11885 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   11886 			if (rv != 0) {
   11887 				data[i] = 0xffff;
   11888 				rv = -1;
   11889 			}
   11890 			break;
   11891 		case NVM_OFF_CFG2:
   11892 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11893 			if (rv != 0) {
   11894 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   11895 				rv = 0;
   11896 			}
   11897 			break;
   11898 		case NVM_OFF_CFG4:
   11899 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11900 			if (rv != 0) {
   11901 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   11902 				rv = 0;
   11903 			}
   11904 			break;
   11905 		case NVM_OFF_LED_1_CFG:
   11906 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11907 			if (rv != 0) {
   11908 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   11909 				rv = 0;
   11910 			}
   11911 			break;
   11912 		case NVM_OFF_LED_0_2_CFG:
   11913 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11914 			if (rv != 0) {
   11915 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   11916 				rv = 0;
   11917 			}
   11918 			break;
   11919 		case NVM_OFF_ID_LED_SETTINGS:
   11920 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11921 			if (rv != 0) {
   11922 				*data = ID_LED_RESERVED_FFFF;
   11923 				rv = 0;
   11924 			}
   11925 			break;
   11926 		default:
   11927 			DPRINTF(WM_DEBUG_NVM,
   11928 			    ("NVM word 0x%02x is not mapped.\n", offset));
   11929 			*data = NVM_RESERVED_WORD;
   11930 			break;
   11931 		}
   11932 	}
   11933 
   11934 	return rv;
   11935 }
   11936 
   11937 /* Lock, detecting NVM type, validate checksum, version and read */
   11938 
   11939 /*
   11940  * wm_nvm_acquire:
   11941  *
   11942  *	Perform the EEPROM handshake required on some chips.
   11943  */
   11944 static int
   11945 wm_nvm_acquire(struct wm_softc *sc)
   11946 {
   11947 	uint32_t reg;
   11948 	int x;
   11949 	int ret = 0;
   11950 
   11951 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11952 		device_xname(sc->sc_dev), __func__));
   11953 
   11954 	if (sc->sc_type >= WM_T_ICH8) {
   11955 		ret = wm_get_nvm_ich8lan(sc);
   11956 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   11957 		ret = wm_get_swfwhw_semaphore(sc);
   11958 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
   11959 		/* This will also do wm_get_swsm_semaphore() if needed */
   11960 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
   11961 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11962 		ret = wm_get_swsm_semaphore(sc);
   11963 	}
   11964 
   11965 	if (ret) {
   11966 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11967 			__func__);
   11968 		return 1;
   11969 	}
   11970 
   11971 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   11972 		reg = CSR_READ(sc, WMREG_EECD);
   11973 
   11974 		/* Request EEPROM access. */
   11975 		reg |= EECD_EE_REQ;
   11976 		CSR_WRITE(sc, WMREG_EECD, reg);
   11977 
   11978 		/* ..and wait for it to be granted. */
   11979 		for (x = 0; x < 1000; x++) {
   11980 			reg = CSR_READ(sc, WMREG_EECD);
   11981 			if (reg & EECD_EE_GNT)
   11982 				break;
   11983 			delay(5);
   11984 		}
   11985 		if ((reg & EECD_EE_GNT) == 0) {
   11986 			aprint_error_dev(sc->sc_dev,
   11987 			    "could not acquire EEPROM GNT\n");
   11988 			reg &= ~EECD_EE_REQ;
   11989 			CSR_WRITE(sc, WMREG_EECD, reg);
   11990 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   11991 				wm_put_swfwhw_semaphore(sc);
   11992 			if (sc->sc_flags & WM_F_LOCK_SWFW)
   11993 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   11994 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
   11995 				wm_put_swsm_semaphore(sc);
   11996 			return 1;
   11997 		}
   11998 	}
   11999 
   12000 	return 0;
   12001 }
   12002 
   12003 /*
   12004  * wm_nvm_release:
   12005  *
   12006  *	Release the EEPROM mutex.
   12007  */
   12008 static void
   12009 wm_nvm_release(struct wm_softc *sc)
   12010 {
   12011 	uint32_t reg;
   12012 
   12013 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12014 		device_xname(sc->sc_dev), __func__));
   12015 
   12016 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   12017 		reg = CSR_READ(sc, WMREG_EECD);
   12018 		reg &= ~EECD_EE_REQ;
   12019 		CSR_WRITE(sc, WMREG_EECD, reg);
   12020 	}
   12021 
   12022 	if (sc->sc_type >= WM_T_ICH8) {
   12023 		wm_put_nvm_ich8lan(sc);
   12024 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   12025 		wm_put_swfwhw_semaphore(sc);
   12026 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   12027 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   12028 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
   12029 		wm_put_swsm_semaphore(sc);
   12030 }
   12031 
   12032 static int
   12033 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   12034 {
   12035 	uint32_t eecd = 0;
   12036 
   12037 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   12038 	    || sc->sc_type == WM_T_82583) {
   12039 		eecd = CSR_READ(sc, WMREG_EECD);
   12040 
   12041 		/* Isolate bits 15 & 16 */
   12042 		eecd = ((eecd >> 15) & 0x03);
   12043 
   12044 		/* If both bits are set, device is Flash type */
   12045 		if (eecd == 0x03)
   12046 			return 0;
   12047 	}
   12048 	return 1;
   12049 }
   12050 
   12051 static int
   12052 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   12053 {
   12054 	uint32_t eec;
   12055 
   12056 	eec = CSR_READ(sc, WMREG_EEC);
   12057 	if ((eec & EEC_FLASH_DETECTED) != 0)
   12058 		return 1;
   12059 
   12060 	return 0;
   12061 }
   12062 
   12063 /*
   12064  * wm_nvm_validate_checksum
   12065  *
   12066  * The checksum is defined as the sum of the first 64 (16 bit) words.
   12067  */
   12068 static int
   12069 wm_nvm_validate_checksum(struct wm_softc *sc)
   12070 {
   12071 	uint16_t checksum;
   12072 	uint16_t eeprom_data;
   12073 #ifdef WM_DEBUG
   12074 	uint16_t csum_wordaddr, valid_checksum;
   12075 #endif
   12076 	int i;
   12077 
   12078 	checksum = 0;
   12079 
   12080 	/* Don't check for I211 */
   12081 	if (sc->sc_type == WM_T_I211)
   12082 		return 0;
   12083 
   12084 #ifdef WM_DEBUG
   12085 	if (sc->sc_type == WM_T_PCH_LPT) {
   12086 		csum_wordaddr = NVM_OFF_COMPAT;
   12087 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   12088 	} else {
   12089 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   12090 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   12091 	}
   12092 
   12093 	/* Dump EEPROM image for debug */
   12094 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12095 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12096 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   12097 		/* XXX PCH_SPT? */
   12098 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   12099 		if ((eeprom_data & valid_checksum) == 0) {
   12100 			DPRINTF(WM_DEBUG_NVM,
   12101 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   12102 				device_xname(sc->sc_dev), eeprom_data,
   12103 				    valid_checksum));
   12104 		}
   12105 	}
   12106 
   12107 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   12108 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   12109 		for (i = 0; i < NVM_SIZE; i++) {
   12110 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12111 				printf("XXXX ");
   12112 			else
   12113 				printf("%04hx ", eeprom_data);
   12114 			if (i % 8 == 7)
   12115 				printf("\n");
   12116 		}
   12117 	}
   12118 
   12119 #endif /* WM_DEBUG */
   12120 
   12121 	for (i = 0; i < NVM_SIZE; i++) {
   12122 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12123 			return 1;
   12124 		checksum += eeprom_data;
   12125 	}
   12126 
   12127 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   12128 #ifdef WM_DEBUG
   12129 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   12130 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   12131 #endif
   12132 	}
   12133 
   12134 	return 0;
   12135 }
   12136 
   12137 static void
   12138 wm_nvm_version_invm(struct wm_softc *sc)
   12139 {
   12140 	uint32_t dword;
   12141 
   12142 	/*
   12143 	 * Linux's code to decode version is very strange, so we don't
   12144 	 * obey that algorithm and just use word 61 as the document.
   12145 	 * Perhaps it's not perfect though...
   12146 	 *
   12147 	 * Example:
   12148 	 *
   12149 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   12150 	 */
   12151 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   12152 	dword = __SHIFTOUT(dword, INVM_VER_1);
   12153 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   12154 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   12155 }
   12156 
   12157 static void
   12158 wm_nvm_version(struct wm_softc *sc)
   12159 {
   12160 	uint16_t major, minor, build, patch;
   12161 	uint16_t uid0, uid1;
   12162 	uint16_t nvm_data;
   12163 	uint16_t off;
   12164 	bool check_version = false;
   12165 	bool check_optionrom = false;
   12166 	bool have_build = false;
   12167 
   12168 	/*
   12169 	 * Version format:
   12170 	 *
   12171 	 * XYYZ
   12172 	 * X0YZ
   12173 	 * X0YY
   12174 	 *
   12175 	 * Example:
   12176 	 *
   12177 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   12178 	 *	82571	0x50a6	5.10.6?
   12179 	 *	82572	0x506a	5.6.10?
   12180 	 *	82572EI	0x5069	5.6.9?
   12181 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   12182 	 *		0x2013	2.1.3?
   12183 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   12184 	 */
   12185 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
   12186 	switch (sc->sc_type) {
   12187 	case WM_T_82571:
   12188 	case WM_T_82572:
   12189 	case WM_T_82574:
   12190 	case WM_T_82583:
   12191 		check_version = true;
   12192 		check_optionrom = true;
   12193 		have_build = true;
   12194 		break;
   12195 	case WM_T_82575:
   12196 	case WM_T_82576:
   12197 	case WM_T_82580:
   12198 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   12199 			check_version = true;
   12200 		break;
   12201 	case WM_T_I211:
   12202 		wm_nvm_version_invm(sc);
   12203 		goto printver;
   12204 	case WM_T_I210:
   12205 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   12206 			wm_nvm_version_invm(sc);
   12207 			goto printver;
   12208 		}
   12209 		/* FALLTHROUGH */
   12210 	case WM_T_I350:
   12211 	case WM_T_I354:
   12212 		check_version = true;
   12213 		check_optionrom = true;
   12214 		break;
   12215 	default:
   12216 		return;
   12217 	}
   12218 	if (check_version) {
   12219 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
   12220 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   12221 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   12222 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   12223 			build = nvm_data & NVM_BUILD_MASK;
   12224 			have_build = true;
   12225 		} else
   12226 			minor = nvm_data & 0x00ff;
   12227 
   12228 		/* Decimal */
   12229 		minor = (minor / 16) * 10 + (minor % 16);
   12230 		sc->sc_nvm_ver_major = major;
   12231 		sc->sc_nvm_ver_minor = minor;
   12232 
   12233 printver:
   12234 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   12235 		    sc->sc_nvm_ver_minor);
   12236 		if (have_build) {
   12237 			sc->sc_nvm_ver_build = build;
   12238 			aprint_verbose(".%d", build);
   12239 		}
   12240 	}
   12241 	if (check_optionrom) {
   12242 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
   12243 		/* Option ROM Version */
   12244 		if ((off != 0x0000) && (off != 0xffff)) {
   12245 			off += NVM_COMBO_VER_OFF;
   12246 			wm_nvm_read(sc, off + 1, 1, &uid1);
   12247 			wm_nvm_read(sc, off, 1, &uid0);
   12248 			if ((uid0 != 0) && (uid0 != 0xffff)
   12249 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   12250 				/* 16bits */
   12251 				major = uid0 >> 8;
   12252 				build = (uid0 << 8) | (uid1 >> 8);
   12253 				patch = uid1 & 0x00ff;
   12254 				aprint_verbose(", option ROM Version %d.%d.%d",
   12255 				    major, build, patch);
   12256 			}
   12257 		}
   12258 	}
   12259 
   12260 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
   12261 	aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   12262 }
   12263 
   12264 /*
   12265  * wm_nvm_read:
   12266  *
   12267  *	Read data from the serial EEPROM.
   12268  */
   12269 static int
   12270 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12271 {
   12272 	int rv;
   12273 
   12274 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12275 		device_xname(sc->sc_dev), __func__));
   12276 
   12277 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   12278 		return 1;
   12279 
   12280 	if (wm_nvm_acquire(sc))
   12281 		return 1;
   12282 
   12283 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12284 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12285 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   12286 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
   12287 	else if (sc->sc_type == WM_T_PCH_SPT)
   12288 		rv = wm_nvm_read_spt(sc, word, wordcnt, data);
   12289 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
   12290 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
   12291 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
   12292 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
   12293 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
   12294 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
   12295 	else
   12296 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
   12297 
   12298 	wm_nvm_release(sc);
   12299 	return rv;
   12300 }
   12301 
   12302 /*
   12303  * Hardware semaphores.
   12304  * Very complexed...
   12305  */
   12306 
   12307 static int
   12308 wm_get_null(struct wm_softc *sc)
   12309 {
   12310 
   12311 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12312 		device_xname(sc->sc_dev), __func__));
   12313 	return 0;
   12314 }
   12315 
   12316 static void
   12317 wm_put_null(struct wm_softc *sc)
   12318 {
   12319 
   12320 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12321 		device_xname(sc->sc_dev), __func__));
   12322 	return;
   12323 }
   12324 
   12325 /*
   12326  * Get hardware semaphore.
   12327  * Same as e1000_get_hw_semaphore_generic()
   12328  */
   12329 static int
   12330 wm_get_swsm_semaphore(struct wm_softc *sc)
   12331 {
   12332 	int32_t timeout;
   12333 	uint32_t swsm;
   12334 
   12335 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12336 		device_xname(sc->sc_dev), __func__));
   12337 	KASSERT(sc->sc_nvm_wordsize > 0);
   12338 
   12339 	/* Get the SW semaphore. */
   12340 	timeout = sc->sc_nvm_wordsize + 1;
   12341 	while (timeout) {
   12342 		swsm = CSR_READ(sc, WMREG_SWSM);
   12343 
   12344 		if ((swsm & SWSM_SMBI) == 0)
   12345 			break;
   12346 
   12347 		delay(50);
   12348 		timeout--;
   12349 	}
   12350 
   12351 	if (timeout == 0) {
   12352 		aprint_error_dev(sc->sc_dev,
   12353 		    "could not acquire SWSM SMBI\n");
   12354 		return 1;
   12355 	}
   12356 
   12357 	/* Get the FW semaphore. */
   12358 	timeout = sc->sc_nvm_wordsize + 1;
   12359 	while (timeout) {
   12360 		swsm = CSR_READ(sc, WMREG_SWSM);
   12361 		swsm |= SWSM_SWESMBI;
   12362 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   12363 		/* If we managed to set the bit we got the semaphore. */
   12364 		swsm = CSR_READ(sc, WMREG_SWSM);
   12365 		if (swsm & SWSM_SWESMBI)
   12366 			break;
   12367 
   12368 		delay(50);
   12369 		timeout--;
   12370 	}
   12371 
   12372 	if (timeout == 0) {
   12373 		aprint_error_dev(sc->sc_dev,
   12374 		    "could not acquire SWSM SWESMBI\n");
   12375 		/* Release semaphores */
   12376 		wm_put_swsm_semaphore(sc);
   12377 		return 1;
   12378 	}
   12379 	return 0;
   12380 }
   12381 
   12382 /*
   12383  * Put hardware semaphore.
   12384  * Same as e1000_put_hw_semaphore_generic()
   12385  */
   12386 static void
   12387 wm_put_swsm_semaphore(struct wm_softc *sc)
   12388 {
   12389 	uint32_t swsm;
   12390 
   12391 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12392 		device_xname(sc->sc_dev), __func__));
   12393 
   12394 	swsm = CSR_READ(sc, WMREG_SWSM);
   12395 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   12396 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   12397 }
   12398 
   12399 /*
   12400  * Get SW/FW semaphore.
   12401  * Same as e1000_acquire_swfw_sync_82575().
   12402  */
   12403 static int
   12404 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12405 {
   12406 	uint32_t swfw_sync;
   12407 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   12408 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   12409 	int timeout = 200;
   12410 
   12411 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12412 		device_xname(sc->sc_dev), __func__));
   12413 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   12414 
   12415 	for (timeout = 0; timeout < 200; timeout++) {
   12416 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
   12417 			if (wm_get_swsm_semaphore(sc)) {
   12418 				aprint_error_dev(sc->sc_dev,
   12419 				    "%s: failed to get semaphore\n",
   12420 				    __func__);
   12421 				return 1;
   12422 			}
   12423 		}
   12424 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12425 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   12426 			swfw_sync |= swmask;
   12427 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12428 			if (sc->sc_flags & WM_F_LOCK_SWSM)
   12429 				wm_put_swsm_semaphore(sc);
   12430 			return 0;
   12431 		}
   12432 		if (sc->sc_flags & WM_F_LOCK_SWSM)
   12433 			wm_put_swsm_semaphore(sc);
   12434 		delay(5000);
   12435 	}
   12436 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   12437 	    device_xname(sc->sc_dev), mask, swfw_sync);
   12438 	return 1;
   12439 }
   12440 
   12441 static void
   12442 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12443 {
   12444 	uint32_t swfw_sync;
   12445 
   12446 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12447 		device_xname(sc->sc_dev), __func__));
   12448 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   12449 
   12450 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   12451 		while (wm_get_swsm_semaphore(sc) != 0)
   12452 			continue;
   12453 	}
   12454 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12455 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   12456 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12457 	if (sc->sc_flags & WM_F_LOCK_SWSM)
   12458 		wm_put_swsm_semaphore(sc);
   12459 }
   12460 
   12461 static int
   12462 wm_get_phy_82575(struct wm_softc *sc)
   12463 {
   12464 
   12465 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12466 		device_xname(sc->sc_dev), __func__));
   12467 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   12468 }
   12469 
   12470 static void
   12471 wm_put_phy_82575(struct wm_softc *sc)
   12472 {
   12473 
   12474 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12475 		device_xname(sc->sc_dev), __func__));
   12476 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   12477 }
   12478 
   12479 static int
   12480 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   12481 {
   12482 	uint32_t ext_ctrl;
   12483 	int timeout = 200;
   12484 
   12485 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12486 		device_xname(sc->sc_dev), __func__));
   12487 
   12488 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12489 	for (timeout = 0; timeout < 200; timeout++) {
   12490 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12491 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12492 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12493 
   12494 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12495 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   12496 			return 0;
   12497 		delay(5000);
   12498 	}
   12499 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   12500 	    device_xname(sc->sc_dev), ext_ctrl);
   12501 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12502 	return 1;
   12503 }
   12504 
   12505 static void
   12506 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   12507 {
   12508 	uint32_t ext_ctrl;
   12509 
   12510 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12511 		device_xname(sc->sc_dev), __func__));
   12512 
   12513 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12514 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12515 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12516 
   12517 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12518 }
   12519 
   12520 static int
   12521 wm_get_swflag_ich8lan(struct wm_softc *sc)
   12522 {
   12523 	uint32_t ext_ctrl;
   12524 	int timeout;
   12525 
   12526 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12527 		device_xname(sc->sc_dev), __func__));
   12528 	mutex_enter(sc->sc_ich_phymtx);
   12529 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   12530 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12531 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   12532 			break;
   12533 		delay(1000);
   12534 	}
   12535 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   12536 		printf("%s: SW has already locked the resource\n",
   12537 		    device_xname(sc->sc_dev));
   12538 		goto out;
   12539 	}
   12540 
   12541 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12542 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12543 	for (timeout = 0; timeout < 1000; timeout++) {
   12544 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12545 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   12546 			break;
   12547 		delay(1000);
   12548 	}
   12549 	if (timeout >= 1000) {
   12550 		printf("%s: failed to acquire semaphore\n",
   12551 		    device_xname(sc->sc_dev));
   12552 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12553 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12554 		goto out;
   12555 	}
   12556 	return 0;
   12557 
   12558 out:
   12559 	mutex_exit(sc->sc_ich_phymtx);
   12560 	return 1;
   12561 }
   12562 
   12563 static void
   12564 wm_put_swflag_ich8lan(struct wm_softc *sc)
   12565 {
   12566 	uint32_t ext_ctrl;
   12567 
   12568 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12569 		device_xname(sc->sc_dev), __func__));
   12570 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12571 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   12572 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12573 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12574 	} else {
   12575 		printf("%s: Semaphore unexpectedly released\n",
   12576 		    device_xname(sc->sc_dev));
   12577 	}
   12578 
   12579 	mutex_exit(sc->sc_ich_phymtx);
   12580 }
   12581 
   12582 static int
   12583 wm_get_nvm_ich8lan(struct wm_softc *sc)
   12584 {
   12585 
   12586 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12587 		device_xname(sc->sc_dev), __func__));
   12588 	mutex_enter(sc->sc_ich_nvmmtx);
   12589 
   12590 	return 0;
   12591 }
   12592 
   12593 static void
   12594 wm_put_nvm_ich8lan(struct wm_softc *sc)
   12595 {
   12596 
   12597 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12598 		device_xname(sc->sc_dev), __func__));
   12599 	mutex_exit(sc->sc_ich_nvmmtx);
   12600 }
   12601 
   12602 static int
   12603 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   12604 {
   12605 	int i = 0;
   12606 	uint32_t reg;
   12607 
   12608 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12609 		device_xname(sc->sc_dev), __func__));
   12610 
   12611 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12612 	do {
   12613 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   12614 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   12615 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12616 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   12617 			break;
   12618 		delay(2*1000);
   12619 		i++;
   12620 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   12621 
   12622 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   12623 		wm_put_hw_semaphore_82573(sc);
   12624 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   12625 		    device_xname(sc->sc_dev));
   12626 		return -1;
   12627 	}
   12628 
   12629 	return 0;
   12630 }
   12631 
   12632 static void
   12633 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   12634 {
   12635 	uint32_t reg;
   12636 
   12637 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12638 		device_xname(sc->sc_dev), __func__));
   12639 
   12640 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12641 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12642 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   12643 }
   12644 
   12645 /*
   12646  * Management mode and power management related subroutines.
   12647  * BMC, AMT, suspend/resume and EEE.
   12648  */
   12649 
   12650 #ifdef WM_WOL
   12651 static int
   12652 wm_check_mng_mode(struct wm_softc *sc)
   12653 {
   12654 	int rv;
   12655 
   12656 	switch (sc->sc_type) {
   12657 	case WM_T_ICH8:
   12658 	case WM_T_ICH9:
   12659 	case WM_T_ICH10:
   12660 	case WM_T_PCH:
   12661 	case WM_T_PCH2:
   12662 	case WM_T_PCH_LPT:
   12663 	case WM_T_PCH_SPT:
   12664 		rv = wm_check_mng_mode_ich8lan(sc);
   12665 		break;
   12666 	case WM_T_82574:
   12667 	case WM_T_82583:
   12668 		rv = wm_check_mng_mode_82574(sc);
   12669 		break;
   12670 	case WM_T_82571:
   12671 	case WM_T_82572:
   12672 	case WM_T_82573:
   12673 	case WM_T_80003:
   12674 		rv = wm_check_mng_mode_generic(sc);
   12675 		break;
   12676 	default:
   12677 		/* noting to do */
   12678 		rv = 0;
   12679 		break;
   12680 	}
   12681 
   12682 	return rv;
   12683 }
   12684 
   12685 static int
   12686 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   12687 {
   12688 	uint32_t fwsm;
   12689 
   12690 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12691 
   12692 	if (((fwsm & FWSM_FW_VALID) != 0)
   12693 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   12694 		return 1;
   12695 
   12696 	return 0;
   12697 }
   12698 
   12699 static int
   12700 wm_check_mng_mode_82574(struct wm_softc *sc)
   12701 {
   12702 	uint16_t data;
   12703 
   12704 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   12705 
   12706 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   12707 		return 1;
   12708 
   12709 	return 0;
   12710 }
   12711 
   12712 static int
   12713 wm_check_mng_mode_generic(struct wm_softc *sc)
   12714 {
   12715 	uint32_t fwsm;
   12716 
   12717 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12718 
   12719 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   12720 		return 1;
   12721 
   12722 	return 0;
   12723 }
   12724 #endif /* WM_WOL */
   12725 
   12726 static int
   12727 wm_enable_mng_pass_thru(struct wm_softc *sc)
   12728 {
   12729 	uint32_t manc, fwsm, factps;
   12730 
   12731 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   12732 		return 0;
   12733 
   12734 	manc = CSR_READ(sc, WMREG_MANC);
   12735 
   12736 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   12737 		device_xname(sc->sc_dev), manc));
   12738 	if ((manc & MANC_RECV_TCO_EN) == 0)
   12739 		return 0;
   12740 
   12741 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   12742 		fwsm = CSR_READ(sc, WMREG_FWSM);
   12743 		factps = CSR_READ(sc, WMREG_FACTPS);
   12744 		if (((factps & FACTPS_MNGCG) == 0)
   12745 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   12746 			return 1;
   12747 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   12748 		uint16_t data;
   12749 
   12750 		factps = CSR_READ(sc, WMREG_FACTPS);
   12751 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   12752 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   12753 			device_xname(sc->sc_dev), factps, data));
   12754 		if (((factps & FACTPS_MNGCG) == 0)
   12755 		    && ((data & NVM_CFG2_MNGM_MASK)
   12756 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   12757 			return 1;
   12758 	} else if (((manc & MANC_SMBUS_EN) != 0)
   12759 	    && ((manc & MANC_ASF_EN) == 0))
   12760 		return 1;
   12761 
   12762 	return 0;
   12763 }
   12764 
   12765 static bool
   12766 wm_phy_resetisblocked(struct wm_softc *sc)
   12767 {
   12768 	bool blocked = false;
   12769 	uint32_t reg;
   12770 	int i = 0;
   12771 
   12772 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12773 		device_xname(sc->sc_dev), __func__));
   12774 
   12775 	switch (sc->sc_type) {
   12776 	case WM_T_ICH8:
   12777 	case WM_T_ICH9:
   12778 	case WM_T_ICH10:
   12779 	case WM_T_PCH:
   12780 	case WM_T_PCH2:
   12781 	case WM_T_PCH_LPT:
   12782 	case WM_T_PCH_SPT:
   12783 		do {
   12784 			reg = CSR_READ(sc, WMREG_FWSM);
   12785 			if ((reg & FWSM_RSPCIPHY) == 0) {
   12786 				blocked = true;
   12787 				delay(10*1000);
   12788 				continue;
   12789 			}
   12790 			blocked = false;
   12791 		} while (blocked && (i++ < 30));
   12792 		return blocked;
   12793 		break;
   12794 	case WM_T_82571:
   12795 	case WM_T_82572:
   12796 	case WM_T_82573:
   12797 	case WM_T_82574:
   12798 	case WM_T_82583:
   12799 	case WM_T_80003:
   12800 		reg = CSR_READ(sc, WMREG_MANC);
   12801 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   12802 			return true;
   12803 		else
   12804 			return false;
   12805 		break;
   12806 	default:
   12807 		/* no problem */
   12808 		break;
   12809 	}
   12810 
   12811 	return false;
   12812 }
   12813 
   12814 static void
   12815 wm_get_hw_control(struct wm_softc *sc)
   12816 {
   12817 	uint32_t reg;
   12818 
   12819 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12820 		device_xname(sc->sc_dev), __func__));
   12821 
   12822 	if (sc->sc_type == WM_T_82573) {
   12823 		reg = CSR_READ(sc, WMREG_SWSM);
   12824 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   12825 	} else if (sc->sc_type >= WM_T_82571) {
   12826 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12827 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   12828 	}
   12829 }
   12830 
   12831 static void
   12832 wm_release_hw_control(struct wm_softc *sc)
   12833 {
   12834 	uint32_t reg;
   12835 
   12836 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12837 		device_xname(sc->sc_dev), __func__));
   12838 
   12839 	if (sc->sc_type == WM_T_82573) {
   12840 		reg = CSR_READ(sc, WMREG_SWSM);
   12841 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   12842 	} else if (sc->sc_type >= WM_T_82571) {
   12843 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12844 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   12845 	}
   12846 }
   12847 
   12848 static void
   12849 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   12850 {
   12851 	uint32_t reg;
   12852 
   12853 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12854 		device_xname(sc->sc_dev), __func__));
   12855 
   12856 	if (sc->sc_type < WM_T_PCH2)
   12857 		return;
   12858 
   12859 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12860 
   12861 	if (gate)
   12862 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   12863 	else
   12864 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   12865 
   12866 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   12867 }
   12868 
   12869 static void
   12870 wm_smbustopci(struct wm_softc *sc)
   12871 {
   12872 	uint32_t fwsm, reg;
   12873 	int rv = 0;
   12874 
   12875 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12876 		device_xname(sc->sc_dev), __func__));
   12877 
   12878 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   12879 	wm_gate_hw_phy_config_ich8lan(sc, true);
   12880 
   12881 	/* Disable ULP */
   12882 	wm_ulp_disable(sc);
   12883 
   12884 	/* Acquire PHY semaphore */
   12885 	sc->phy.acquire(sc);
   12886 
   12887 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12888 	switch (sc->sc_type) {
   12889 	case WM_T_PCH_LPT:
   12890 	case WM_T_PCH_SPT:
   12891 		if (wm_phy_is_accessible_pchlan(sc))
   12892 			break;
   12893 
   12894 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12895 		reg |= CTRL_EXT_FORCE_SMBUS;
   12896 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12897 #if 0
   12898 		/* XXX Isn't this required??? */
   12899 		CSR_WRITE_FLUSH(sc);
   12900 #endif
   12901 		delay(50 * 1000);
   12902 		/* FALLTHROUGH */
   12903 	case WM_T_PCH2:
   12904 		if (wm_phy_is_accessible_pchlan(sc) == true)
   12905 			break;
   12906 		/* FALLTHROUGH */
   12907 	case WM_T_PCH:
   12908 		if (sc->sc_type == WM_T_PCH)
   12909 			if ((fwsm & FWSM_FW_VALID) != 0)
   12910 				break;
   12911 
   12912 		if (wm_phy_resetisblocked(sc) == true) {
   12913 			printf("XXX reset is blocked(3)\n");
   12914 			break;
   12915 		}
   12916 
   12917 		wm_toggle_lanphypc_pch_lpt(sc);
   12918 
   12919 		if (sc->sc_type >= WM_T_PCH_LPT) {
   12920 			if (wm_phy_is_accessible_pchlan(sc) == true)
   12921 				break;
   12922 
   12923 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12924 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   12925 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12926 
   12927 			if (wm_phy_is_accessible_pchlan(sc) == true)
   12928 				break;
   12929 			rv = -1;
   12930 		}
   12931 		break;
   12932 	default:
   12933 		break;
   12934 	}
   12935 
   12936 	/* Release semaphore */
   12937 	sc->phy.release(sc);
   12938 
   12939 	if (rv == 0) {
   12940 		if (wm_phy_resetisblocked(sc)) {
   12941 			printf("XXX reset is blocked(4)\n");
   12942 			goto out;
   12943 		}
   12944 		wm_reset_phy(sc);
   12945 		if (wm_phy_resetisblocked(sc))
   12946 			printf("XXX reset is blocked(4)\n");
   12947 	}
   12948 
   12949 out:
   12950 	/*
   12951 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   12952 	 */
   12953 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   12954 		delay(10*1000);
   12955 		wm_gate_hw_phy_config_ich8lan(sc, false);
   12956 	}
   12957 }
   12958 
   12959 static void
   12960 wm_init_manageability(struct wm_softc *sc)
   12961 {
   12962 
   12963 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12964 		device_xname(sc->sc_dev), __func__));
   12965 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   12966 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   12967 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   12968 
   12969 		/* Disable hardware interception of ARP */
   12970 		manc &= ~MANC_ARP_EN;
   12971 
   12972 		/* Enable receiving management packets to the host */
   12973 		if (sc->sc_type >= WM_T_82571) {
   12974 			manc |= MANC_EN_MNG2HOST;
   12975 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   12976 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   12977 		}
   12978 
   12979 		CSR_WRITE(sc, WMREG_MANC, manc);
   12980 	}
   12981 }
   12982 
   12983 static void
   12984 wm_release_manageability(struct wm_softc *sc)
   12985 {
   12986 
   12987 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   12988 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   12989 
   12990 		manc |= MANC_ARP_EN;
   12991 		if (sc->sc_type >= WM_T_82571)
   12992 			manc &= ~MANC_EN_MNG2HOST;
   12993 
   12994 		CSR_WRITE(sc, WMREG_MANC, manc);
   12995 	}
   12996 }
   12997 
   12998 static void
   12999 wm_get_wakeup(struct wm_softc *sc)
   13000 {
   13001 
   13002 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   13003 	switch (sc->sc_type) {
   13004 	case WM_T_82573:
   13005 	case WM_T_82583:
   13006 		sc->sc_flags |= WM_F_HAS_AMT;
   13007 		/* FALLTHROUGH */
   13008 	case WM_T_80003:
   13009 	case WM_T_82575:
   13010 	case WM_T_82576:
   13011 	case WM_T_82580:
   13012 	case WM_T_I350:
   13013 	case WM_T_I354:
   13014 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   13015 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   13016 		/* FALLTHROUGH */
   13017 	case WM_T_82541:
   13018 	case WM_T_82541_2:
   13019 	case WM_T_82547:
   13020 	case WM_T_82547_2:
   13021 	case WM_T_82571:
   13022 	case WM_T_82572:
   13023 	case WM_T_82574:
   13024 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13025 		break;
   13026 	case WM_T_ICH8:
   13027 	case WM_T_ICH9:
   13028 	case WM_T_ICH10:
   13029 	case WM_T_PCH:
   13030 	case WM_T_PCH2:
   13031 	case WM_T_PCH_LPT:
   13032 	case WM_T_PCH_SPT:
   13033 		sc->sc_flags |= WM_F_HAS_AMT;
   13034 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13035 		break;
   13036 	default:
   13037 		break;
   13038 	}
   13039 
   13040 	/* 1: HAS_MANAGE */
   13041 	if (wm_enable_mng_pass_thru(sc) != 0)
   13042 		sc->sc_flags |= WM_F_HAS_MANAGE;
   13043 
   13044 #ifdef WM_DEBUG
   13045 	printf("\n");
   13046 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   13047 		printf("HAS_AMT,");
   13048 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
   13049 		printf("ARC_SUBSYS_VALID,");
   13050 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
   13051 		printf("ASF_FIRMWARE_PRES,");
   13052 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
   13053 		printf("HAS_MANAGE,");
   13054 	printf("\n");
   13055 #endif
   13056 	/*
   13057 	 * Note that the WOL flags is set after the resetting of the eeprom
   13058 	 * stuff
   13059 	 */
   13060 }
   13061 
   13062 /*
   13063  * Unconfigure Ultra Low Power mode.
   13064  * Only for I217 and newer (see below).
   13065  */
   13066 static void
   13067 wm_ulp_disable(struct wm_softc *sc)
   13068 {
   13069 	uint32_t reg;
   13070 	int i = 0;
   13071 
   13072 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13073 		device_xname(sc->sc_dev), __func__));
   13074 	/* Exclude old devices */
   13075 	if ((sc->sc_type < WM_T_PCH_LPT)
   13076 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   13077 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   13078 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   13079 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   13080 		return;
   13081 
   13082 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   13083 		/* Request ME un-configure ULP mode in the PHY */
   13084 		reg = CSR_READ(sc, WMREG_H2ME);
   13085 		reg &= ~H2ME_ULP;
   13086 		reg |= H2ME_ENFORCE_SETTINGS;
   13087 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13088 
   13089 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   13090 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   13091 			if (i++ == 30) {
   13092 				printf("%s timed out\n", __func__);
   13093 				return;
   13094 			}
   13095 			delay(10 * 1000);
   13096 		}
   13097 		reg = CSR_READ(sc, WMREG_H2ME);
   13098 		reg &= ~H2ME_ENFORCE_SETTINGS;
   13099 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13100 
   13101 		return;
   13102 	}
   13103 
   13104 	/* Acquire semaphore */
   13105 	sc->phy.acquire(sc);
   13106 
   13107 	/* Toggle LANPHYPC */
   13108 	wm_toggle_lanphypc_pch_lpt(sc);
   13109 
   13110 	/* Unforce SMBus mode in PHY */
   13111 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13112 	if (reg == 0x0000 || reg == 0xffff) {
   13113 		uint32_t reg2;
   13114 
   13115 		printf("%s: Force SMBus first.\n", __func__);
   13116 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   13117 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   13118 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   13119 		delay(50 * 1000);
   13120 
   13121 		reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13122 	}
   13123 	reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13124 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg);
   13125 
   13126 	/* Unforce SMBus mode in MAC */
   13127 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13128 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   13129 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13130 
   13131 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL);
   13132 	reg |= HV_PM_CTRL_K1_ENA;
   13133 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg);
   13134 
   13135 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1);
   13136 	reg &= ~(I218_ULP_CONFIG1_IND
   13137 	    | I218_ULP_CONFIG1_STICKY_ULP
   13138 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   13139 	    | I218_ULP_CONFIG1_WOL_HOST
   13140 	    | I218_ULP_CONFIG1_INBAND_EXIT
   13141 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   13142 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   13143 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   13144 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13145 	reg |= I218_ULP_CONFIG1_START;
   13146 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13147 
   13148 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   13149 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   13150 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   13151 
   13152 	/* Release semaphore */
   13153 	sc->phy.release(sc);
   13154 	wm_gmii_reset(sc);
   13155 	delay(50 * 1000);
   13156 }
   13157 
   13158 /* WOL in the newer chipset interfaces (pchlan) */
   13159 static void
   13160 wm_enable_phy_wakeup(struct wm_softc *sc)
   13161 {
   13162 #if 0
   13163 	uint16_t preg;
   13164 
   13165 	/* Copy MAC RARs to PHY RARs */
   13166 
   13167 	/* Copy MAC MTA to PHY MTA */
   13168 
   13169 	/* Configure PHY Rx Control register */
   13170 
   13171 	/* Enable PHY wakeup in MAC register */
   13172 
   13173 	/* Configure and enable PHY wakeup in PHY registers */
   13174 
   13175 	/* Activate PHY wakeup */
   13176 
   13177 	/* XXX */
   13178 #endif
   13179 }
   13180 
   13181 /* Power down workaround on D3 */
   13182 static void
   13183 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   13184 {
   13185 	uint32_t reg;
   13186 	int i;
   13187 
   13188 	for (i = 0; i < 2; i++) {
   13189 		/* Disable link */
   13190 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13191 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13192 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13193 
   13194 		/*
   13195 		 * Call gig speed drop workaround on Gig disable before
   13196 		 * accessing any PHY registers
   13197 		 */
   13198 		if (sc->sc_type == WM_T_ICH8)
   13199 			wm_gig_downshift_workaround_ich8lan(sc);
   13200 
   13201 		/* Write VR power-down enable */
   13202 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13203 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13204 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   13205 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   13206 
   13207 		/* Read it back and test */
   13208 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13209 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13210 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   13211 			break;
   13212 
   13213 		/* Issue PHY reset and repeat at most one more time */
   13214 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   13215 	}
   13216 }
   13217 
   13218 static void
   13219 wm_enable_wakeup(struct wm_softc *sc)
   13220 {
   13221 	uint32_t reg, pmreg;
   13222 	pcireg_t pmode;
   13223 
   13224 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13225 		device_xname(sc->sc_dev), __func__));
   13226 
   13227 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   13228 		&pmreg, NULL) == 0)
   13229 		return;
   13230 
   13231 	/* Advertise the wakeup capability */
   13232 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   13233 	    | CTRL_SWDPIN(3));
   13234 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   13235 
   13236 	/* ICH workaround */
   13237 	switch (sc->sc_type) {
   13238 	case WM_T_ICH8:
   13239 	case WM_T_ICH9:
   13240 	case WM_T_ICH10:
   13241 	case WM_T_PCH:
   13242 	case WM_T_PCH2:
   13243 	case WM_T_PCH_LPT:
   13244 	case WM_T_PCH_SPT:
   13245 		/* Disable gig during WOL */
   13246 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13247 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   13248 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13249 		if (sc->sc_type == WM_T_PCH)
   13250 			wm_gmii_reset(sc);
   13251 
   13252 		/* Power down workaround */
   13253 		if (sc->sc_phytype == WMPHY_82577) {
   13254 			struct mii_softc *child;
   13255 
   13256 			/* Assume that the PHY is copper */
   13257 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13258 			if ((child != NULL) && (child->mii_mpd_rev <= 2))
   13259 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   13260 				    (768 << 5) | 25, 0x0444); /* magic num */
   13261 		}
   13262 		break;
   13263 	default:
   13264 		break;
   13265 	}
   13266 
   13267 	/* Keep the laser running on fiber adapters */
   13268 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   13269 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   13270 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13271 		reg |= CTRL_EXT_SWDPIN(3);
   13272 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13273 	}
   13274 
   13275 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   13276 #if 0	/* for the multicast packet */
   13277 	reg |= WUFC_MC;
   13278 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   13279 #endif
   13280 
   13281 	if (sc->sc_type >= WM_T_PCH)
   13282 		wm_enable_phy_wakeup(sc);
   13283 	else {
   13284 		CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
   13285 		CSR_WRITE(sc, WMREG_WUFC, reg);
   13286 	}
   13287 
   13288 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13289 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13290 		|| (sc->sc_type == WM_T_PCH2))
   13291 		    && (sc->sc_phytype == WMPHY_IGP_3))
   13292 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   13293 
   13294 	/* Request PME */
   13295 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   13296 #if 0
   13297 	/* Disable WOL */
   13298 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   13299 #else
   13300 	/* For WOL */
   13301 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   13302 #endif
   13303 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   13304 }
   13305 
   13306 /* LPLU */
   13307 
   13308 static void
   13309 wm_lplu_d0_disable(struct wm_softc *sc)
   13310 {
   13311 	uint32_t reg;
   13312 
   13313 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13314 		device_xname(sc->sc_dev), __func__));
   13315 
   13316 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13317 	reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   13318 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13319 }
   13320 
   13321 static void
   13322 wm_lplu_d0_disable_pch(struct wm_softc *sc)
   13323 {
   13324 	uint32_t reg;
   13325 
   13326 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13327 		device_xname(sc->sc_dev), __func__));
   13328 
   13329 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   13330 	reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   13331 	reg |= HV_OEM_BITS_ANEGNOW;
   13332 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   13333 }
   13334 
   13335 /* EEE */
   13336 
   13337 static void
   13338 wm_set_eee_i350(struct wm_softc *sc)
   13339 {
   13340 	uint32_t ipcnfg, eeer;
   13341 
   13342 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   13343 	eeer = CSR_READ(sc, WMREG_EEER);
   13344 
   13345 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   13346 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   13347 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   13348 		    | EEER_LPI_FC);
   13349 	} else {
   13350 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   13351 		ipcnfg &= ~IPCNFG_10BASE_TE;
   13352 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   13353 		    | EEER_LPI_FC);
   13354 	}
   13355 
   13356 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   13357 	CSR_WRITE(sc, WMREG_EEER, eeer);
   13358 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   13359 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   13360 }
   13361 
   13362 /*
   13363  * Workarounds (mainly PHY related).
   13364  * Basically, PHY's workarounds are in the PHY drivers.
   13365  */
   13366 
   13367 /* Work-around for 82566 Kumeran PCS lock loss */
   13368 static void
   13369 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   13370 {
   13371 #if 0
   13372 	int miistatus, active, i;
   13373 	int reg;
   13374 
   13375 	miistatus = sc->sc_mii.mii_media_status;
   13376 
   13377 	/* If the link is not up, do nothing */
   13378 	if ((miistatus & IFM_ACTIVE) == 0)
   13379 		return;
   13380 
   13381 	active = sc->sc_mii.mii_media_active;
   13382 
   13383 	/* Nothing to do if the link is other than 1Gbps */
   13384 	if (IFM_SUBTYPE(active) != IFM_1000_T)
   13385 		return;
   13386 
   13387 	for (i = 0; i < 10; i++) {
   13388 		/* read twice */
   13389 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   13390 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   13391 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   13392 			goto out;	/* GOOD! */
   13393 
   13394 		/* Reset the PHY */
   13395 		wm_gmii_reset(sc);
   13396 		delay(5*1000);
   13397 	}
   13398 
   13399 	/* Disable GigE link negotiation */
   13400 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13401 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13402 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13403 
   13404 	/*
   13405 	 * Call gig speed drop workaround on Gig disable before accessing
   13406 	 * any PHY registers.
   13407 	 */
   13408 	wm_gig_downshift_workaround_ich8lan(sc);
   13409 
   13410 out:
   13411 	return;
   13412 #endif
   13413 }
   13414 
   13415 /* WOL from S5 stops working */
   13416 static void
   13417 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   13418 {
   13419 	uint16_t kmrn_reg;
   13420 
   13421 	/* Only for igp3 */
   13422 	if (sc->sc_phytype == WMPHY_IGP_3) {
   13423 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
   13424 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
   13425 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   13426 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
   13427 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   13428 	}
   13429 }
   13430 
   13431 /*
   13432  * Workaround for pch's PHYs
   13433  * XXX should be moved to new PHY driver?
   13434  */
   13435 static void
   13436 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   13437 {
   13438 
   13439 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13440 		device_xname(sc->sc_dev), __func__));
   13441 	KASSERT(sc->sc_type == WM_T_PCH);
   13442 
   13443 	if (sc->sc_phytype == WMPHY_82577)
   13444 		wm_set_mdio_slow_mode_hv(sc);
   13445 
   13446 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   13447 
   13448 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   13449 
   13450 	/* 82578 */
   13451 	if (sc->sc_phytype == WMPHY_82578) {
   13452 		struct mii_softc *child;
   13453 
   13454 		/*
   13455 		 * Return registers to default by doing a soft reset then
   13456 		 * writing 0x3140 to the control register
   13457 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   13458 		 */
   13459 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13460 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   13461 			PHY_RESET(child);
   13462 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   13463 			    0x3140);
   13464 		}
   13465 	}
   13466 
   13467 	/* Select page 0 */
   13468 	sc->phy.acquire(sc);
   13469 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   13470 	sc->phy.release(sc);
   13471 
   13472 	/*
   13473 	 * Configure the K1 Si workaround during phy reset assuming there is
   13474 	 * link so that it disables K1 if link is in 1Gbps.
   13475 	 */
   13476 	wm_k1_gig_workaround_hv(sc, 1);
   13477 }
   13478 
   13479 static void
   13480 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   13481 {
   13482 
   13483 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13484 		device_xname(sc->sc_dev), __func__));
   13485 	KASSERT(sc->sc_type == WM_T_PCH2);
   13486 
   13487 	wm_set_mdio_slow_mode_hv(sc);
   13488 }
   13489 
   13490 static int
   13491 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   13492 {
   13493 	int k1_enable = sc->sc_nvm_k1_enabled;
   13494 
   13495 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13496 		device_xname(sc->sc_dev), __func__));
   13497 
   13498 	if (sc->phy.acquire(sc) != 0)
   13499 		return -1;
   13500 
   13501 	if (link) {
   13502 		k1_enable = 0;
   13503 
   13504 		/* Link stall fix for link up */
   13505 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   13506 	} else {
   13507 		/* Link stall fix for link down */
   13508 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   13509 	}
   13510 
   13511 	wm_configure_k1_ich8lan(sc, k1_enable);
   13512 	sc->phy.release(sc);
   13513 
   13514 	return 0;
   13515 }
   13516 
   13517 static void
   13518 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   13519 {
   13520 	uint32_t reg;
   13521 
   13522 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   13523 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   13524 	    reg | HV_KMRN_MDIO_SLOW);
   13525 }
   13526 
   13527 static void
   13528 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   13529 {
   13530 	uint32_t ctrl, ctrl_ext, tmp;
   13531 	uint16_t kmrn_reg;
   13532 
   13533 	kmrn_reg = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
   13534 
   13535 	if (k1_enable)
   13536 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
   13537 	else
   13538 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
   13539 
   13540 	wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
   13541 
   13542 	delay(20);
   13543 
   13544 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13545 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   13546 
   13547 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   13548 	tmp |= CTRL_FRCSPD;
   13549 
   13550 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   13551 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   13552 	CSR_WRITE_FLUSH(sc);
   13553 	delay(20);
   13554 
   13555 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   13556 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13557 	CSR_WRITE_FLUSH(sc);
   13558 	delay(20);
   13559 }
   13560 
   13561 /* special case - for 82575 - need to do manual init ... */
   13562 static void
   13563 wm_reset_init_script_82575(struct wm_softc *sc)
   13564 {
   13565 	/*
   13566 	 * remark: this is untested code - we have no board without EEPROM
   13567 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   13568 	 */
   13569 
   13570 	/* SerDes configuration via SERDESCTRL */
   13571 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   13572 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   13573 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   13574 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   13575 
   13576 	/* CCM configuration via CCMCTL register */
   13577 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   13578 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   13579 
   13580 	/* PCIe lanes configuration */
   13581 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   13582 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   13583 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   13584 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   13585 
   13586 	/* PCIe PLL Configuration */
   13587 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   13588 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   13589 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   13590 }
   13591 
   13592 static void
   13593 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   13594 {
   13595 	uint32_t reg;
   13596 	uint16_t nvmword;
   13597 	int rv;
   13598 
   13599 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   13600 		return;
   13601 
   13602 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   13603 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   13604 	if (rv != 0) {
   13605 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   13606 		    __func__);
   13607 		return;
   13608 	}
   13609 
   13610 	reg = CSR_READ(sc, WMREG_MDICNFG);
   13611 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   13612 		reg |= MDICNFG_DEST;
   13613 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   13614 		reg |= MDICNFG_COM_MDIO;
   13615 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   13616 }
   13617 
   13618 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   13619 
   13620 static bool
   13621 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   13622 {
   13623 	int i;
   13624 	uint32_t reg;
   13625 	uint16_t id1, id2;
   13626 
   13627 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13628 		device_xname(sc->sc_dev), __func__));
   13629 	id1 = id2 = 0xffff;
   13630 	for (i = 0; i < 2; i++) {
   13631 		id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1);
   13632 		if (MII_INVALIDID(id1))
   13633 			continue;
   13634 		id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2);
   13635 		if (MII_INVALIDID(id2))
   13636 			continue;
   13637 		break;
   13638 	}
   13639 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) {
   13640 		goto out;
   13641 	}
   13642 
   13643 	if (sc->sc_type < WM_T_PCH_LPT) {
   13644 		sc->phy.release(sc);
   13645 		wm_set_mdio_slow_mode_hv(sc);
   13646 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
   13647 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
   13648 		sc->phy.acquire(sc);
   13649 	}
   13650 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   13651 		printf("XXX return with false\n");
   13652 		return false;
   13653 	}
   13654 out:
   13655 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   13656 		/* Only unforce SMBus if ME is not active */
   13657 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   13658 			/* Unforce SMBus mode in PHY */
   13659 			reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   13660 			    CV_SMB_CTRL);
   13661 			reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13662 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   13663 			    CV_SMB_CTRL, reg);
   13664 
   13665 			/* Unforce SMBus mode in MAC */
   13666 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13667 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   13668 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13669 		}
   13670 	}
   13671 	return true;
   13672 }
   13673 
   13674 static void
   13675 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   13676 {
   13677 	uint32_t reg;
   13678 	int i;
   13679 
   13680 	/* Set PHY Config Counter to 50msec */
   13681 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   13682 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   13683 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   13684 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   13685 
   13686 	/* Toggle LANPHYPC */
   13687 	reg = CSR_READ(sc, WMREG_CTRL);
   13688 	reg |= CTRL_LANPHYPC_OVERRIDE;
   13689 	reg &= ~CTRL_LANPHYPC_VALUE;
   13690 	CSR_WRITE(sc, WMREG_CTRL, reg);
   13691 	CSR_WRITE_FLUSH(sc);
   13692 	delay(1000);
   13693 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   13694 	CSR_WRITE(sc, WMREG_CTRL, reg);
   13695 	CSR_WRITE_FLUSH(sc);
   13696 
   13697 	if (sc->sc_type < WM_T_PCH_LPT)
   13698 		delay(50 * 1000);
   13699 	else {
   13700 		i = 20;
   13701 
   13702 		do {
   13703 			delay(5 * 1000);
   13704 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   13705 		    && i--);
   13706 
   13707 		delay(30 * 1000);
   13708 	}
   13709 }
   13710 
   13711 static int
   13712 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   13713 {
   13714 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   13715 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   13716 	uint32_t rxa;
   13717 	uint16_t scale = 0, lat_enc = 0;
   13718 	int64_t lat_ns, value;
   13719 
   13720 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13721 		device_xname(sc->sc_dev), __func__));
   13722 
   13723 	if (link) {
   13724 		pcireg_t preg;
   13725 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   13726 
   13727 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   13728 
   13729 		/*
   13730 		 * Determine the maximum latency tolerated by the device.
   13731 		 *
   13732 		 * Per the PCIe spec, the tolerated latencies are encoded as
   13733 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   13734 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   13735 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   13736 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   13737 		 */
   13738 		lat_ns = ((int64_t)rxa * 1024 -
   13739 		    (2 * (int64_t)sc->sc_ethercom.ec_if.if_mtu)) * 8 * 1000;
   13740 		if (lat_ns < 0)
   13741 			lat_ns = 0;
   13742 		else {
   13743 			uint32_t status;
   13744 			uint16_t speed;
   13745 
   13746 			status = CSR_READ(sc, WMREG_STATUS);
   13747 			switch (__SHIFTOUT(status, STATUS_SPEED)) {
   13748 			case STATUS_SPEED_10:
   13749 				speed = 10;
   13750 				break;
   13751 			case STATUS_SPEED_100:
   13752 				speed = 100;
   13753 				break;
   13754 			case STATUS_SPEED_1000:
   13755 				speed = 1000;
   13756 				break;
   13757 			default:
   13758 				printf("%s: Unknown speed (status = %08x)\n",
   13759 				    device_xname(sc->sc_dev), status);
   13760 				return -1;
   13761 			}
   13762 			lat_ns /= speed;
   13763 		}
   13764 		value = lat_ns;
   13765 
   13766 		while (value > LTRV_VALUE) {
   13767 			scale ++;
   13768 			value = howmany(value, __BIT(5));
   13769 		}
   13770 		if (scale > LTRV_SCALE_MAX) {
   13771 			printf("%s: Invalid LTR latency scale %d\n",
   13772 			    device_xname(sc->sc_dev), scale);
   13773 			return -1;
   13774 		}
   13775 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   13776 
   13777 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   13778 		    WM_PCI_LTR_CAP_LPT);
   13779 		max_snoop = preg & 0xffff;
   13780 		max_nosnoop = preg >> 16;
   13781 
   13782 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   13783 
   13784 		if (lat_enc > max_ltr_enc) {
   13785 			lat_enc = max_ltr_enc;
   13786 		}
   13787 	}
   13788 	/* Snoop and No-Snoop latencies the same */
   13789 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   13790 	CSR_WRITE(sc, WMREG_LTRV, reg);
   13791 
   13792 	return 0;
   13793 }
   13794 
   13795 /*
   13796  * I210 Errata 25 and I211 Errata 10
   13797  * Slow System Clock.
   13798  */
   13799 static void
   13800 wm_pll_workaround_i210(struct wm_softc *sc)
   13801 {
   13802 	uint32_t mdicnfg, wuc;
   13803 	uint32_t reg;
   13804 	pcireg_t pcireg;
   13805 	uint32_t pmreg;
   13806 	uint16_t nvmword, tmp_nvmword;
   13807 	int phyval;
   13808 	bool wa_done = false;
   13809 	int i;
   13810 
   13811 	/* Save WUC and MDICNFG registers */
   13812 	wuc = CSR_READ(sc, WMREG_WUC);
   13813 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   13814 
   13815 	reg = mdicnfg & ~MDICNFG_DEST;
   13816 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   13817 
   13818 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   13819 		nvmword = INVM_DEFAULT_AL;
   13820 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   13821 
   13822 	/* Get Power Management cap offset */
   13823 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   13824 		&pmreg, NULL) == 0)
   13825 		return;
   13826 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   13827 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   13828 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   13829 
   13830 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   13831 			break; /* OK */
   13832 		}
   13833 
   13834 		wa_done = true;
   13835 		/* Directly reset the internal PHY */
   13836 		reg = CSR_READ(sc, WMREG_CTRL);
   13837 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   13838 
   13839 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13840 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   13841 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13842 
   13843 		CSR_WRITE(sc, WMREG_WUC, 0);
   13844 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   13845 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   13846 
   13847 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   13848 		    pmreg + PCI_PMCSR);
   13849 		pcireg |= PCI_PMCSR_STATE_D3;
   13850 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   13851 		    pmreg + PCI_PMCSR, pcireg);
   13852 		delay(1000);
   13853 		pcireg &= ~PCI_PMCSR_STATE_D3;
   13854 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   13855 		    pmreg + PCI_PMCSR, pcireg);
   13856 
   13857 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   13858 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   13859 
   13860 		/* Restore WUC register */
   13861 		CSR_WRITE(sc, WMREG_WUC, wuc);
   13862 	}
   13863 
   13864 	/* Restore MDICNFG setting */
   13865 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   13866 	if (wa_done)
   13867 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   13868 }
   13869