Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.512
      1 /*	$NetBSD: if_wm.c,v 1.512 2017/06/23 06:10:31 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- Disable D0 LPLU on 8257[12356], 82580 and I350.
     77  *	- TX Multi queue improvement (refine queue selection logic)
     78  *	- Split header buffer for newer descriptors
     79  *	- EEE (Energy Efficiency Ethernet)
     80  *	- Virtual Function
     81  *	- Set LED correctly (based on contents in EEPROM)
     82  *	- Rework how parameters are loaded from the EEPROM.
     83  *	- Image Unique ID
     84  */
     85 
     86 #include <sys/cdefs.h>
     87 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.512 2017/06/23 06:10:31 msaitoh Exp $");
     88 
     89 #ifdef _KERNEL_OPT
     90 #include "opt_net_mpsafe.h"
     91 #include "opt_if_wm.h"
     92 #endif
     93 
     94 #include <sys/param.h>
     95 #include <sys/systm.h>
     96 #include <sys/callout.h>
     97 #include <sys/mbuf.h>
     98 #include <sys/malloc.h>
     99 #include <sys/kmem.h>
    100 #include <sys/kernel.h>
    101 #include <sys/socket.h>
    102 #include <sys/ioctl.h>
    103 #include <sys/errno.h>
    104 #include <sys/device.h>
    105 #include <sys/queue.h>
    106 #include <sys/syslog.h>
    107 #include <sys/interrupt.h>
    108 #include <sys/cpu.h>
    109 #include <sys/pcq.h>
    110 
    111 #include <sys/rndsource.h>
    112 
    113 #include <net/if.h>
    114 #include <net/if_dl.h>
    115 #include <net/if_media.h>
    116 #include <net/if_ether.h>
    117 
    118 #include <net/bpf.h>
    119 
    120 #include <netinet/in.h>			/* XXX for struct ip */
    121 #include <netinet/in_systm.h>		/* XXX for struct ip */
    122 #include <netinet/ip.h>			/* XXX for struct ip */
    123 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    124 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    125 
    126 #include <sys/bus.h>
    127 #include <sys/intr.h>
    128 #include <machine/endian.h>
    129 
    130 #include <dev/mii/mii.h>
    131 #include <dev/mii/miivar.h>
    132 #include <dev/mii/miidevs.h>
    133 #include <dev/mii/mii_bitbang.h>
    134 #include <dev/mii/ikphyreg.h>
    135 #include <dev/mii/igphyreg.h>
    136 #include <dev/mii/igphyvar.h>
    137 #include <dev/mii/inbmphyreg.h>
    138 
    139 #include <dev/pci/pcireg.h>
    140 #include <dev/pci/pcivar.h>
    141 #include <dev/pci/pcidevs.h>
    142 
    143 #include <dev/pci/if_wmreg.h>
    144 #include <dev/pci/if_wmvar.h>
    145 
    146 #ifdef WM_DEBUG
    147 #define	WM_DEBUG_LINK		__BIT(0)
    148 #define	WM_DEBUG_TX		__BIT(1)
    149 #define	WM_DEBUG_RX		__BIT(2)
    150 #define	WM_DEBUG_GMII		__BIT(3)
    151 #define	WM_DEBUG_MANAGE		__BIT(4)
    152 #define	WM_DEBUG_NVM		__BIT(5)
    153 #define	WM_DEBUG_INIT		__BIT(6)
    154 #define	WM_DEBUG_LOCK		__BIT(7)
    155 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    156     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    157 
    158 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    159 #else
    160 #define	DPRINTF(x, y)	/* nothing */
    161 #endif /* WM_DEBUG */
    162 
    163 #ifdef NET_MPSAFE
    164 #define WM_MPSAFE	1
    165 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    166 #else
    167 #define CALLOUT_FLAGS	0
    168 #endif
    169 
    170 /*
    171  * This device driver's max interrupt numbers.
    172  */
    173 #define WM_MAX_NQUEUEINTR	16
    174 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    175 
    176 #ifndef WM_DISABLE_MSI
    177 #define	WM_DISABLE_MSI 0
    178 #endif
    179 #ifndef WM_DISABLE_MSIX
    180 #define	WM_DISABLE_MSIX 0
    181 #endif
    182 
    183 int wm_disable_msi = WM_DISABLE_MSI;
    184 int wm_disable_msix = WM_DISABLE_MSIX;
    185 
    186 /*
    187  * Transmit descriptor list size.  Due to errata, we can only have
    188  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    189  * on >= 82544.  We tell the upper layers that they can queue a lot
    190  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    191  * of them at a time.
    192  *
    193  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    194  * chains containing many small mbufs have been observed in zero-copy
    195  * situations with jumbo frames.
    196  */
    197 #define	WM_NTXSEGS		256
    198 #define	WM_IFQUEUELEN		256
    199 #define	WM_TXQUEUELEN_MAX	64
    200 #define	WM_TXQUEUELEN_MAX_82547	16
    201 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    202 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    203 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    204 #define	WM_NTXDESC_82542	256
    205 #define	WM_NTXDESC_82544	4096
    206 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    207 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    208 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    209 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    210 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    211 
    212 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    213 
    214 #define	WM_TXINTERQSIZE		256
    215 
    216 /*
    217  * Receive descriptor list size.  We have one Rx buffer for normal
    218  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    219  * packet.  We allocate 256 receive descriptors, each with a 2k
    220  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    221  */
    222 #define	WM_NRXDESC		256
    223 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    224 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    225 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    226 
    227 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    228 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    229 #endif
    230 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    231 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    232 #endif
    233 
    234 typedef union txdescs {
    235 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    236 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    237 } txdescs_t;
    238 
    239 typedef union rxdescs {
    240 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    241 	ext_rxdesc_t      sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    242 	nq_rxdesc_t      sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    243 } rxdescs_t;
    244 
    245 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    246 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    247 
    248 /*
    249  * Software state for transmit jobs.
    250  */
    251 struct wm_txsoft {
    252 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    253 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    254 	int txs_firstdesc;		/* first descriptor in packet */
    255 	int txs_lastdesc;		/* last descriptor in packet */
    256 	int txs_ndesc;			/* # of descriptors used */
    257 };
    258 
    259 /*
    260  * Software state for receive buffers.  Each descriptor gets a
    261  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    262  * more than one buffer, we chain them together.
    263  */
    264 struct wm_rxsoft {
    265 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    266 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    267 };
    268 
    269 #define WM_LINKUP_TIMEOUT	50
    270 
    271 static uint16_t swfwphysem[] = {
    272 	SWFW_PHY0_SM,
    273 	SWFW_PHY1_SM,
    274 	SWFW_PHY2_SM,
    275 	SWFW_PHY3_SM
    276 };
    277 
    278 static const uint32_t wm_82580_rxpbs_table[] = {
    279 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    280 };
    281 
    282 struct wm_softc;
    283 
    284 #ifdef WM_EVENT_COUNTERS
    285 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    286 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    287 	struct evcnt qname##_ev_##evname;
    288 
    289 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    290 	do{								\
    291 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    292 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    293 		    "%s%02d%s", #qname, (qnum), #evname);		\
    294 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    295 		    (evtype), NULL, (xname),				\
    296 		    (q)->qname##_##evname##_evcnt_name);		\
    297 	}while(0)
    298 
    299 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    300 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    301 
    302 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    303 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    304 
    305 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    306 	evcnt_detach(&(q)->qname##_ev_##evname);
    307 #endif /* WM_EVENT_COUNTERS */
    308 
    309 struct wm_txqueue {
    310 	kmutex_t *txq_lock;		/* lock for tx operations */
    311 
    312 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    313 
    314 	/* Software state for the transmit descriptors. */
    315 	int txq_num;			/* must be a power of two */
    316 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    317 
    318 	/* TX control data structures. */
    319 	int txq_ndesc;			/* must be a power of two */
    320 	size_t txq_descsize;		/* a tx descriptor size */
    321 	txdescs_t *txq_descs_u;
    322         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    323 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    324 	int txq_desc_rseg;		/* real number of control segment */
    325 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    326 #define	txq_descs	txq_descs_u->sctxu_txdescs
    327 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    328 
    329 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    330 
    331 	int txq_free;			/* number of free Tx descriptors */
    332 	int txq_next;			/* next ready Tx descriptor */
    333 
    334 	int txq_sfree;			/* number of free Tx jobs */
    335 	int txq_snext;			/* next free Tx job */
    336 	int txq_sdirty;			/* dirty Tx jobs */
    337 
    338 	/* These 4 variables are used only on the 82547. */
    339 	int txq_fifo_size;		/* Tx FIFO size */
    340 	int txq_fifo_head;		/* current head of FIFO */
    341 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    342 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    343 
    344 	/*
    345 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    346 	 * CPUs. This queue intermediate them without block.
    347 	 */
    348 	pcq_t *txq_interq;
    349 
    350 	/*
    351 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    352 	 * to manage Tx H/W queue's busy flag.
    353 	 */
    354 	int txq_flags;			/* flags for H/W queue, see below */
    355 #define	WM_TXQ_NO_SPACE	0x1
    356 
    357 	bool txq_stopping;
    358 
    359 	uint32_t txq_packets;		/* for AIM */
    360 	uint32_t txq_bytes;		/* for AIM */
    361 #ifdef WM_EVENT_COUNTERS
    362 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
    363 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
    364 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
    365 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
    366 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
    367 						/* XXX not used? */
    368 
    369 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
    370 	WM_Q_EVCNT_DEFINE(txq,txtusum)		/* TCP/UDP cksums comp. out-bound */
    371 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
    372 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
    373 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
    374 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
    375 
    376 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
    377 
    378 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
    379 
    380 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    381 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    382 #endif /* WM_EVENT_COUNTERS */
    383 };
    384 
    385 struct wm_rxqueue {
    386 	kmutex_t *rxq_lock;		/* lock for rx operations */
    387 
    388 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    389 
    390 	/* Software state for the receive descriptors. */
    391 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    392 
    393 	/* RX control data structures. */
    394 	int rxq_ndesc;			/* must be a power of two */
    395 	size_t rxq_descsize;		/* a rx descriptor size */
    396 	rxdescs_t *rxq_descs_u;
    397 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    398 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    399 	int rxq_desc_rseg;		/* real number of control segment */
    400 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    401 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    402 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    403 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    404 
    405 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    406 
    407 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    408 	int rxq_discard;
    409 	int rxq_len;
    410 	struct mbuf *rxq_head;
    411 	struct mbuf *rxq_tail;
    412 	struct mbuf **rxq_tailp;
    413 
    414 	bool rxq_stopping;
    415 
    416 	uint32_t rxq_packets;		/* for AIM */
    417 	uint32_t rxq_bytes;		/* for AIM */
    418 #ifdef WM_EVENT_COUNTERS
    419 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
    420 
    421 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
    422 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
    423 #endif
    424 };
    425 
    426 struct wm_queue {
    427 	int wmq_id;			/* index of transmit and receive queues */
    428 	int wmq_intr_idx;		/* index of MSI-X tables */
    429 
    430 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    431 	bool wmq_set_itr;
    432 
    433 	struct wm_txqueue wmq_txq;
    434 	struct wm_rxqueue wmq_rxq;
    435 
    436 	void *wmq_si;
    437 };
    438 
    439 struct wm_phyop {
    440 	int (*acquire)(struct wm_softc *);
    441 	void (*release)(struct wm_softc *);
    442 	int reset_delay_us;
    443 };
    444 
    445 /*
    446  * Software state per device.
    447  */
    448 struct wm_softc {
    449 	device_t sc_dev;		/* generic device information */
    450 	bus_space_tag_t sc_st;		/* bus space tag */
    451 	bus_space_handle_t sc_sh;	/* bus space handle */
    452 	bus_size_t sc_ss;		/* bus space size */
    453 	bus_space_tag_t sc_iot;		/* I/O space tag */
    454 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    455 	bus_size_t sc_ios;		/* I/O space size */
    456 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    457 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    458 	bus_size_t sc_flashs;		/* flash registers space size */
    459 	off_t sc_flashreg_offset;	/*
    460 					 * offset to flash registers from
    461 					 * start of BAR
    462 					 */
    463 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    464 
    465 	struct ethercom sc_ethercom;	/* ethernet common data */
    466 	struct mii_data sc_mii;		/* MII/media information */
    467 
    468 	pci_chipset_tag_t sc_pc;
    469 	pcitag_t sc_pcitag;
    470 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    471 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    472 
    473 	uint16_t sc_pcidevid;		/* PCI device ID */
    474 	wm_chip_type sc_type;		/* MAC type */
    475 	int sc_rev;			/* MAC revision */
    476 	wm_phy_type sc_phytype;		/* PHY type */
    477 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    478 #define	WM_MEDIATYPE_UNKNOWN		0x00
    479 #define	WM_MEDIATYPE_FIBER		0x01
    480 #define	WM_MEDIATYPE_COPPER		0x02
    481 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    482 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    483 	int sc_flags;			/* flags; see below */
    484 	int sc_if_flags;		/* last if_flags */
    485 	int sc_flowflags;		/* 802.3x flow control flags */
    486 	int sc_align_tweak;
    487 
    488 	void *sc_ihs[WM_MAX_NINTR];	/*
    489 					 * interrupt cookie.
    490 					 * - legacy and msi use sc_ihs[0] only
    491 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    492 					 */
    493 	pci_intr_handle_t *sc_intrs;	/*
    494 					 * legacy and msi use sc_intrs[0] only
    495 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    496 					 */
    497 	int sc_nintrs;			/* number of interrupts */
    498 
    499 	int sc_link_intr_idx;		/* index of MSI-X tables */
    500 
    501 	callout_t sc_tick_ch;		/* tick callout */
    502 	bool sc_core_stopping;
    503 
    504 	int sc_nvm_ver_major;
    505 	int sc_nvm_ver_minor;
    506 	int sc_nvm_ver_build;
    507 	int sc_nvm_addrbits;		/* NVM address bits */
    508 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    509 	int sc_ich8_flash_base;
    510 	int sc_ich8_flash_bank_size;
    511 	int sc_nvm_k1_enabled;
    512 
    513 	int sc_nqueues;
    514 	struct wm_queue *sc_queue;
    515 	u_int sc_rx_process_limit;	/* Rx processing repeat limit in softint */
    516 	u_int sc_rx_intr_process_limit;	/* Rx processing repeat limit in H/W intr */
    517 
    518 	int sc_affinity_offset;
    519 
    520 #ifdef WM_EVENT_COUNTERS
    521 	/* Event counters. */
    522 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    523 
    524         /* WM_T_82542_2_1 only */
    525 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    526 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    527 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    528 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    529 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    530 #endif /* WM_EVENT_COUNTERS */
    531 
    532 	/* This variable are used only on the 82547. */
    533 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    534 
    535 	uint32_t sc_ctrl;		/* prototype CTRL register */
    536 #if 0
    537 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    538 #endif
    539 	uint32_t sc_icr;		/* prototype interrupt bits */
    540 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    541 	uint32_t sc_tctl;		/* prototype TCTL register */
    542 	uint32_t sc_rctl;		/* prototype RCTL register */
    543 	uint32_t sc_txcw;		/* prototype TXCW register */
    544 	uint32_t sc_tipg;		/* prototype TIPG register */
    545 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    546 	uint32_t sc_pba;		/* prototype PBA register */
    547 
    548 	int sc_tbi_linkup;		/* TBI link status */
    549 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    550 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    551 
    552 	int sc_mchash_type;		/* multicast filter offset */
    553 
    554 	krndsource_t rnd_source;	/* random source */
    555 
    556 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    557 
    558 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    559 	kmutex_t *sc_ich_phymtx;	/*
    560 					 * 82574/82583/ICH/PCH specific PHY
    561 					 * mutex. For 82574/82583, the mutex
    562 					 * is used for both PHY and NVM.
    563 					 */
    564 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    565 
    566 	struct wm_phyop phy;
    567 };
    568 
    569 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    570 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    571 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    572 
    573 #define	WM_RXCHAIN_RESET(rxq)						\
    574 do {									\
    575 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    576 	*(rxq)->rxq_tailp = NULL;					\
    577 	(rxq)->rxq_len = 0;						\
    578 } while (/*CONSTCOND*/0)
    579 
    580 #define	WM_RXCHAIN_LINK(rxq, m)						\
    581 do {									\
    582 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    583 	(rxq)->rxq_tailp = &(m)->m_next;				\
    584 } while (/*CONSTCOND*/0)
    585 
    586 #ifdef WM_EVENT_COUNTERS
    587 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    588 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    589 
    590 #define WM_Q_EVCNT_INCR(qname, evname)			\
    591 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    592 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    593 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    594 #else /* !WM_EVENT_COUNTERS */
    595 #define	WM_EVCNT_INCR(ev)	/* nothing */
    596 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    597 
    598 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    599 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    600 #endif /* !WM_EVENT_COUNTERS */
    601 
    602 #define	CSR_READ(sc, reg)						\
    603 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    604 #define	CSR_WRITE(sc, reg, val)						\
    605 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    606 #define	CSR_WRITE_FLUSH(sc)						\
    607 	(void) CSR_READ((sc), WMREG_STATUS)
    608 
    609 #define ICH8_FLASH_READ32(sc, reg)					\
    610 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    611 	    (reg) + sc->sc_flashreg_offset)
    612 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    613 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    614 	    (reg) + sc->sc_flashreg_offset, (data))
    615 
    616 #define ICH8_FLASH_READ16(sc, reg)					\
    617 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    618 	    (reg) + sc->sc_flashreg_offset)
    619 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    620 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    621 	    (reg) + sc->sc_flashreg_offset, (data))
    622 
    623 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    624 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    625 
    626 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    627 #define	WM_CDTXADDR_HI(txq, x)						\
    628 	(sizeof(bus_addr_t) == 8 ?					\
    629 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    630 
    631 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    632 #define	WM_CDRXADDR_HI(rxq, x)						\
    633 	(sizeof(bus_addr_t) == 8 ?					\
    634 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    635 
    636 /*
    637  * Register read/write functions.
    638  * Other than CSR_{READ|WRITE}().
    639  */
    640 #if 0
    641 static inline uint32_t wm_io_read(struct wm_softc *, int);
    642 #endif
    643 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    644 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    645 	uint32_t, uint32_t);
    646 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    647 
    648 /*
    649  * Descriptor sync/init functions.
    650  */
    651 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    652 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    653 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    654 
    655 /*
    656  * Device driver interface functions and commonly used functions.
    657  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    658  */
    659 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    660 static int	wm_match(device_t, cfdata_t, void *);
    661 static void	wm_attach(device_t, device_t, void *);
    662 static int	wm_detach(device_t, int);
    663 static bool	wm_suspend(device_t, const pmf_qual_t *);
    664 static bool	wm_resume(device_t, const pmf_qual_t *);
    665 static void	wm_watchdog(struct ifnet *);
    666 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
    667 static void	wm_tick(void *);
    668 static int	wm_ifflags_cb(struct ethercom *);
    669 static int	wm_ioctl(struct ifnet *, u_long, void *);
    670 /* MAC address related */
    671 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    672 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    673 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    674 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    675 static void	wm_set_filter(struct wm_softc *);
    676 /* Reset and init related */
    677 static void	wm_set_vlan(struct wm_softc *);
    678 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    679 static void	wm_get_auto_rd_done(struct wm_softc *);
    680 static void	wm_lan_init_done(struct wm_softc *);
    681 static void	wm_get_cfg_done(struct wm_softc *);
    682 static void	wm_initialize_hardware_bits(struct wm_softc *);
    683 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    684 static void	wm_reset_phy(struct wm_softc *);
    685 static void	wm_flush_desc_rings(struct wm_softc *);
    686 static void	wm_reset(struct wm_softc *);
    687 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    688 static void	wm_rxdrain(struct wm_rxqueue *);
    689 static void	wm_rss_getkey(uint8_t *);
    690 static void	wm_init_rss(struct wm_softc *);
    691 static void	wm_adjust_qnum(struct wm_softc *, int);
    692 static inline bool	wm_is_using_msix(struct wm_softc *);
    693 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    694 static int	wm_softint_establish(struct wm_softc *, int, int);
    695 static int	wm_setup_legacy(struct wm_softc *);
    696 static int	wm_setup_msix(struct wm_softc *);
    697 static int	wm_init(struct ifnet *);
    698 static int	wm_init_locked(struct ifnet *);
    699 static void	wm_turnon(struct wm_softc *);
    700 static void	wm_turnoff(struct wm_softc *);
    701 static void	wm_stop(struct ifnet *, int);
    702 static void	wm_stop_locked(struct ifnet *, int);
    703 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    704 static void	wm_82547_txfifo_stall(void *);
    705 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    706 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    707 /* DMA related */
    708 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    709 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    710 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    711 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    712     struct wm_txqueue *);
    713 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    714 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    715 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    716     struct wm_rxqueue *);
    717 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    718 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    719 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    720 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    721 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    722 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    723 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    724     struct wm_txqueue *);
    725 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    726     struct wm_rxqueue *);
    727 static int	wm_alloc_txrx_queues(struct wm_softc *);
    728 static void	wm_free_txrx_queues(struct wm_softc *);
    729 static int	wm_init_txrx_queues(struct wm_softc *);
    730 /* Start */
    731 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    732     struct wm_txsoft *, uint32_t *, uint8_t *);
    733 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    734 static void	wm_start(struct ifnet *);
    735 static void	wm_start_locked(struct ifnet *);
    736 static int	wm_transmit(struct ifnet *, struct mbuf *);
    737 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    738 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    739 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    740     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    741 static void	wm_nq_start(struct ifnet *);
    742 static void	wm_nq_start_locked(struct ifnet *);
    743 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    744 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    745 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    746 static void	wm_deferred_start_locked(struct wm_txqueue *);
    747 static void	wm_handle_queue(void *);
    748 /* Interrupt */
    749 static int	wm_txeof(struct wm_softc *, struct wm_txqueue *);
    750 static void	wm_rxeof(struct wm_rxqueue *, u_int);
    751 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    752 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    753 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    754 static void	wm_linkintr(struct wm_softc *, uint32_t);
    755 static int	wm_intr_legacy(void *);
    756 static inline void	wm_txrxintr_disable(struct wm_queue *);
    757 static inline void	wm_txrxintr_enable(struct wm_queue *);
    758 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    759 static int	wm_txrxintr_msix(void *);
    760 static int	wm_linkintr_msix(void *);
    761 
    762 /*
    763  * Media related.
    764  * GMII, SGMII, TBI, SERDES and SFP.
    765  */
    766 /* Common */
    767 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    768 /* GMII related */
    769 static void	wm_gmii_reset(struct wm_softc *);
    770 static void	wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t, uint16_t);
    771 static int	wm_get_phy_id_82575(struct wm_softc *);
    772 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    773 static int	wm_gmii_mediachange(struct ifnet *);
    774 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    775 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    776 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    777 static int	wm_gmii_i82543_readreg(device_t, int, int);
    778 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    779 static int	wm_gmii_mdic_readreg(device_t, int, int);
    780 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    781 static int	wm_gmii_i82544_readreg(device_t, int, int);
    782 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    783 static int	wm_gmii_i80003_readreg(device_t, int, int);
    784 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    785 static int	wm_gmii_bm_readreg(device_t, int, int);
    786 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    787 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    788 static int	wm_gmii_hv_readreg(device_t, int, int);
    789 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
    790 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    791 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
    792 static int	wm_gmii_82580_readreg(device_t, int, int);
    793 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    794 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    795 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    796 static void	wm_gmii_statchg(struct ifnet *);
    797 /*
    798  * kumeran related (80003, ICH* and PCH*).
    799  * These functions are not for accessing MII registers but for accessing
    800  * kumeran specific registers.
    801  */
    802 static int	wm_kmrn_readreg(struct wm_softc *, int);
    803 static int	wm_kmrn_readreg_locked(struct wm_softc *, int);
    804 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
    805 static void	wm_kmrn_writereg_locked(struct wm_softc *, int, int);
    806 /* SGMII */
    807 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    808 static int	wm_sgmii_readreg(device_t, int, int);
    809 static void	wm_sgmii_writereg(device_t, int, int, int);
    810 /* TBI related */
    811 static void	wm_tbi_mediainit(struct wm_softc *);
    812 static int	wm_tbi_mediachange(struct ifnet *);
    813 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    814 static int	wm_check_for_link(struct wm_softc *);
    815 static void	wm_tbi_tick(struct wm_softc *);
    816 /* SERDES related */
    817 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    818 static int	wm_serdes_mediachange(struct ifnet *);
    819 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    820 static void	wm_serdes_tick(struct wm_softc *);
    821 /* SFP related */
    822 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    823 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    824 
    825 /*
    826  * NVM related.
    827  * Microwire, SPI (w/wo EERD) and Flash.
    828  */
    829 /* Misc functions */
    830 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    831 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    832 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    833 /* Microwire */
    834 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    835 /* SPI */
    836 static int	wm_nvm_ready_spi(struct wm_softc *);
    837 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    838 /* Using with EERD */
    839 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    840 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    841 /* Flash */
    842 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    843     unsigned int *);
    844 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    845 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    846 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    847 	uint32_t *);
    848 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    849 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    850 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    851 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    852 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    853 /* iNVM */
    854 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    855 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    856 /* Lock, detecting NVM type, validate checksum and read */
    857 static int	wm_nvm_acquire(struct wm_softc *);
    858 static void	wm_nvm_release(struct wm_softc *);
    859 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    860 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    861 static int	wm_nvm_validate_checksum(struct wm_softc *);
    862 static void	wm_nvm_version_invm(struct wm_softc *);
    863 static void	wm_nvm_version(struct wm_softc *);
    864 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    865 
    866 /*
    867  * Hardware semaphores.
    868  * Very complexed...
    869  */
    870 static int	wm_get_null(struct wm_softc *);
    871 static void	wm_put_null(struct wm_softc *);
    872 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    873 static void	wm_put_swsm_semaphore(struct wm_softc *);
    874 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    875 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    876 static int	wm_get_phy_82575(struct wm_softc *);
    877 static void	wm_put_phy_82575(struct wm_softc *);
    878 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    879 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    880 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    881 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    882 static int	wm_get_nvm_ich8lan(struct wm_softc *);		/* For NVM */
    883 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    884 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    885 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    886 
    887 /*
    888  * Management mode and power management related subroutines.
    889  * BMC, AMT, suspend/resume and EEE.
    890  */
    891 #if 0
    892 static int	wm_check_mng_mode(struct wm_softc *);
    893 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    894 static int	wm_check_mng_mode_82574(struct wm_softc *);
    895 static int	wm_check_mng_mode_generic(struct wm_softc *);
    896 #endif
    897 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    898 static bool	wm_phy_resetisblocked(struct wm_softc *);
    899 static void	wm_get_hw_control(struct wm_softc *);
    900 static void	wm_release_hw_control(struct wm_softc *);
    901 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    902 static void	wm_smbustopci(struct wm_softc *);
    903 static void	wm_init_manageability(struct wm_softc *);
    904 static void	wm_release_manageability(struct wm_softc *);
    905 static void	wm_get_wakeup(struct wm_softc *);
    906 static void	wm_ulp_disable(struct wm_softc *);
    907 static void	wm_enable_phy_wakeup(struct wm_softc *);
    908 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    909 static void	wm_enable_wakeup(struct wm_softc *);
    910 /* LPLU (Low Power Link Up) */
    911 static void	wm_lplu_d0_disable(struct wm_softc *);
    912 static void	wm_lplu_d0_disable_pch(struct wm_softc *);
    913 /* EEE */
    914 static void	wm_set_eee_i350(struct wm_softc *);
    915 
    916 /*
    917  * Workarounds (mainly PHY related).
    918  * Basically, PHY's workarounds are in the PHY drivers.
    919  */
    920 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    921 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    922 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    923 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    924 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    925 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    926 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    927 static void	wm_reset_init_script_82575(struct wm_softc *);
    928 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    929 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
    930 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
    931 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
    932 static void	wm_pll_workaround_i210(struct wm_softc *);
    933 
    934 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    935     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    936 
    937 /*
    938  * Devices supported by this driver.
    939  */
    940 static const struct wm_product {
    941 	pci_vendor_id_t		wmp_vendor;
    942 	pci_product_id_t	wmp_product;
    943 	const char		*wmp_name;
    944 	wm_chip_type		wmp_type;
    945 	uint32_t		wmp_flags;
    946 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    947 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    948 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    949 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    950 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    951 } wm_products[] = {
    952 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    953 	  "Intel i82542 1000BASE-X Ethernet",
    954 	  WM_T_82542_2_1,	WMP_F_FIBER },
    955 
    956 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    957 	  "Intel i82543GC 1000BASE-X Ethernet",
    958 	  WM_T_82543,		WMP_F_FIBER },
    959 
    960 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    961 	  "Intel i82543GC 1000BASE-T Ethernet",
    962 	  WM_T_82543,		WMP_F_COPPER },
    963 
    964 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    965 	  "Intel i82544EI 1000BASE-T Ethernet",
    966 	  WM_T_82544,		WMP_F_COPPER },
    967 
    968 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    969 	  "Intel i82544EI 1000BASE-X Ethernet",
    970 	  WM_T_82544,		WMP_F_FIBER },
    971 
    972 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    973 	  "Intel i82544GC 1000BASE-T Ethernet",
    974 	  WM_T_82544,		WMP_F_COPPER },
    975 
    976 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    977 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    978 	  WM_T_82544,		WMP_F_COPPER },
    979 
    980 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    981 	  "Intel i82540EM 1000BASE-T Ethernet",
    982 	  WM_T_82540,		WMP_F_COPPER },
    983 
    984 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    985 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    986 	  WM_T_82540,		WMP_F_COPPER },
    987 
    988 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    989 	  "Intel i82540EP 1000BASE-T Ethernet",
    990 	  WM_T_82540,		WMP_F_COPPER },
    991 
    992 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    993 	  "Intel i82540EP 1000BASE-T Ethernet",
    994 	  WM_T_82540,		WMP_F_COPPER },
    995 
    996 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    997 	  "Intel i82540EP 1000BASE-T Ethernet",
    998 	  WM_T_82540,		WMP_F_COPPER },
    999 
   1000 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1001 	  "Intel i82545EM 1000BASE-T Ethernet",
   1002 	  WM_T_82545,		WMP_F_COPPER },
   1003 
   1004 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1005 	  "Intel i82545GM 1000BASE-T Ethernet",
   1006 	  WM_T_82545_3,		WMP_F_COPPER },
   1007 
   1008 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1009 	  "Intel i82545GM 1000BASE-X Ethernet",
   1010 	  WM_T_82545_3,		WMP_F_FIBER },
   1011 
   1012 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1013 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1014 	  WM_T_82545_3,		WMP_F_SERDES },
   1015 
   1016 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1017 	  "Intel i82546EB 1000BASE-T Ethernet",
   1018 	  WM_T_82546,		WMP_F_COPPER },
   1019 
   1020 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1021 	  "Intel i82546EB 1000BASE-T Ethernet",
   1022 	  WM_T_82546,		WMP_F_COPPER },
   1023 
   1024 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1025 	  "Intel i82545EM 1000BASE-X Ethernet",
   1026 	  WM_T_82545,		WMP_F_FIBER },
   1027 
   1028 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1029 	  "Intel i82546EB 1000BASE-X Ethernet",
   1030 	  WM_T_82546,		WMP_F_FIBER },
   1031 
   1032 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1033 	  "Intel i82546GB 1000BASE-T Ethernet",
   1034 	  WM_T_82546_3,		WMP_F_COPPER },
   1035 
   1036 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1037 	  "Intel i82546GB 1000BASE-X Ethernet",
   1038 	  WM_T_82546_3,		WMP_F_FIBER },
   1039 
   1040 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1041 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1042 	  WM_T_82546_3,		WMP_F_SERDES },
   1043 
   1044 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1045 	  "i82546GB quad-port Gigabit Ethernet",
   1046 	  WM_T_82546_3,		WMP_F_COPPER },
   1047 
   1048 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1049 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1050 	  WM_T_82546_3,		WMP_F_COPPER },
   1051 
   1052 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1053 	  "Intel PRO/1000MT (82546GB)",
   1054 	  WM_T_82546_3,		WMP_F_COPPER },
   1055 
   1056 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1057 	  "Intel i82541EI 1000BASE-T Ethernet",
   1058 	  WM_T_82541,		WMP_F_COPPER },
   1059 
   1060 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1061 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1062 	  WM_T_82541,		WMP_F_COPPER },
   1063 
   1064 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1065 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1066 	  WM_T_82541,		WMP_F_COPPER },
   1067 
   1068 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1069 	  "Intel i82541ER 1000BASE-T Ethernet",
   1070 	  WM_T_82541_2,		WMP_F_COPPER },
   1071 
   1072 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1073 	  "Intel i82541GI 1000BASE-T Ethernet",
   1074 	  WM_T_82541_2,		WMP_F_COPPER },
   1075 
   1076 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1077 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1078 	  WM_T_82541_2,		WMP_F_COPPER },
   1079 
   1080 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1081 	  "Intel i82541PI 1000BASE-T Ethernet",
   1082 	  WM_T_82541_2,		WMP_F_COPPER },
   1083 
   1084 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1085 	  "Intel i82547EI 1000BASE-T Ethernet",
   1086 	  WM_T_82547,		WMP_F_COPPER },
   1087 
   1088 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1089 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1090 	  WM_T_82547,		WMP_F_COPPER },
   1091 
   1092 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1093 	  "Intel i82547GI 1000BASE-T Ethernet",
   1094 	  WM_T_82547_2,		WMP_F_COPPER },
   1095 
   1096 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1097 	  "Intel PRO/1000 PT (82571EB)",
   1098 	  WM_T_82571,		WMP_F_COPPER },
   1099 
   1100 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1101 	  "Intel PRO/1000 PF (82571EB)",
   1102 	  WM_T_82571,		WMP_F_FIBER },
   1103 
   1104 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1105 	  "Intel PRO/1000 PB (82571EB)",
   1106 	  WM_T_82571,		WMP_F_SERDES },
   1107 
   1108 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1109 	  "Intel PRO/1000 QT (82571EB)",
   1110 	  WM_T_82571,		WMP_F_COPPER },
   1111 
   1112 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1113 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1114 	  WM_T_82571,		WMP_F_COPPER, },
   1115 
   1116 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1117 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1118 	  WM_T_82571,		WMP_F_COPPER, },
   1119 
   1120 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1121 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1122 	  WM_T_82571,		WMP_F_SERDES, },
   1123 
   1124 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1125 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1126 	  WM_T_82571,		WMP_F_SERDES, },
   1127 
   1128 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1129 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1130 	  WM_T_82571,		WMP_F_FIBER, },
   1131 
   1132 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1133 	  "Intel i82572EI 1000baseT Ethernet",
   1134 	  WM_T_82572,		WMP_F_COPPER },
   1135 
   1136 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1137 	  "Intel i82572EI 1000baseX Ethernet",
   1138 	  WM_T_82572,		WMP_F_FIBER },
   1139 
   1140 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1141 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1142 	  WM_T_82572,		WMP_F_SERDES },
   1143 
   1144 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1145 	  "Intel i82572EI 1000baseT Ethernet",
   1146 	  WM_T_82572,		WMP_F_COPPER },
   1147 
   1148 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1149 	  "Intel i82573E",
   1150 	  WM_T_82573,		WMP_F_COPPER },
   1151 
   1152 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1153 	  "Intel i82573E IAMT",
   1154 	  WM_T_82573,		WMP_F_COPPER },
   1155 
   1156 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1157 	  "Intel i82573L Gigabit Ethernet",
   1158 	  WM_T_82573,		WMP_F_COPPER },
   1159 
   1160 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1161 	  "Intel i82574L",
   1162 	  WM_T_82574,		WMP_F_COPPER },
   1163 
   1164 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1165 	  "Intel i82574L",
   1166 	  WM_T_82574,		WMP_F_COPPER },
   1167 
   1168 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1169 	  "Intel i82583V",
   1170 	  WM_T_82583,		WMP_F_COPPER },
   1171 
   1172 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1173 	  "i80003 dual 1000baseT Ethernet",
   1174 	  WM_T_80003,		WMP_F_COPPER },
   1175 
   1176 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1177 	  "i80003 dual 1000baseX Ethernet",
   1178 	  WM_T_80003,		WMP_F_COPPER },
   1179 
   1180 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1181 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1182 	  WM_T_80003,		WMP_F_SERDES },
   1183 
   1184 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1185 	  "Intel i80003 1000baseT Ethernet",
   1186 	  WM_T_80003,		WMP_F_COPPER },
   1187 
   1188 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1189 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1190 	  WM_T_80003,		WMP_F_SERDES },
   1191 
   1192 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1193 	  "Intel i82801H (M_AMT) LAN Controller",
   1194 	  WM_T_ICH8,		WMP_F_COPPER },
   1195 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1196 	  "Intel i82801H (AMT) LAN Controller",
   1197 	  WM_T_ICH8,		WMP_F_COPPER },
   1198 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1199 	  "Intel i82801H LAN Controller",
   1200 	  WM_T_ICH8,		WMP_F_COPPER },
   1201 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1202 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1203 	  WM_T_ICH8,		WMP_F_COPPER },
   1204 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1205 	  "Intel i82801H (M) LAN Controller",
   1206 	  WM_T_ICH8,		WMP_F_COPPER },
   1207 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1208 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1209 	  WM_T_ICH8,		WMP_F_COPPER },
   1210 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1211 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1212 	  WM_T_ICH8,		WMP_F_COPPER },
   1213 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1214 	  "82567V-3 LAN Controller",
   1215 	  WM_T_ICH8,		WMP_F_COPPER },
   1216 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1217 	  "82801I (AMT) LAN Controller",
   1218 	  WM_T_ICH9,		WMP_F_COPPER },
   1219 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1220 	  "82801I 10/100 LAN Controller",
   1221 	  WM_T_ICH9,		WMP_F_COPPER },
   1222 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1223 	  "82801I (G) 10/100 LAN Controller",
   1224 	  WM_T_ICH9,		WMP_F_COPPER },
   1225 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1226 	  "82801I (GT) 10/100 LAN Controller",
   1227 	  WM_T_ICH9,		WMP_F_COPPER },
   1228 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1229 	  "82801I (C) LAN Controller",
   1230 	  WM_T_ICH9,		WMP_F_COPPER },
   1231 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1232 	  "82801I mobile LAN Controller",
   1233 	  WM_T_ICH9,		WMP_F_COPPER },
   1234 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1235 	  "82801I mobile (V) LAN Controller",
   1236 	  WM_T_ICH9,		WMP_F_COPPER },
   1237 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1238 	  "82801I mobile (AMT) LAN Controller",
   1239 	  WM_T_ICH9,		WMP_F_COPPER },
   1240 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1241 	  "82567LM-4 LAN Controller",
   1242 	  WM_T_ICH9,		WMP_F_COPPER },
   1243 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1244 	  "82567LM-2 LAN Controller",
   1245 	  WM_T_ICH10,		WMP_F_COPPER },
   1246 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1247 	  "82567LF-2 LAN Controller",
   1248 	  WM_T_ICH10,		WMP_F_COPPER },
   1249 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1250 	  "82567LM-3 LAN Controller",
   1251 	  WM_T_ICH10,		WMP_F_COPPER },
   1252 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1253 	  "82567LF-3 LAN Controller",
   1254 	  WM_T_ICH10,		WMP_F_COPPER },
   1255 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1256 	  "82567V-2 LAN Controller",
   1257 	  WM_T_ICH10,		WMP_F_COPPER },
   1258 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1259 	  "82567V-3? LAN Controller",
   1260 	  WM_T_ICH10,		WMP_F_COPPER },
   1261 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1262 	  "HANKSVILLE LAN Controller",
   1263 	  WM_T_ICH10,		WMP_F_COPPER },
   1264 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1265 	  "PCH LAN (82577LM) Controller",
   1266 	  WM_T_PCH,		WMP_F_COPPER },
   1267 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1268 	  "PCH LAN (82577LC) Controller",
   1269 	  WM_T_PCH,		WMP_F_COPPER },
   1270 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1271 	  "PCH LAN (82578DM) Controller",
   1272 	  WM_T_PCH,		WMP_F_COPPER },
   1273 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1274 	  "PCH LAN (82578DC) Controller",
   1275 	  WM_T_PCH,		WMP_F_COPPER },
   1276 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1277 	  "PCH2 LAN (82579LM) Controller",
   1278 	  WM_T_PCH2,		WMP_F_COPPER },
   1279 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1280 	  "PCH2 LAN (82579V) Controller",
   1281 	  WM_T_PCH2,		WMP_F_COPPER },
   1282 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1283 	  "82575EB dual-1000baseT Ethernet",
   1284 	  WM_T_82575,		WMP_F_COPPER },
   1285 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1286 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1287 	  WM_T_82575,		WMP_F_SERDES },
   1288 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1289 	  "82575GB quad-1000baseT Ethernet",
   1290 	  WM_T_82575,		WMP_F_COPPER },
   1291 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1292 	  "82575GB quad-1000baseT Ethernet (PM)",
   1293 	  WM_T_82575,		WMP_F_COPPER },
   1294 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1295 	  "82576 1000BaseT Ethernet",
   1296 	  WM_T_82576,		WMP_F_COPPER },
   1297 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1298 	  "82576 1000BaseX Ethernet",
   1299 	  WM_T_82576,		WMP_F_FIBER },
   1300 
   1301 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1302 	  "82576 gigabit Ethernet (SERDES)",
   1303 	  WM_T_82576,		WMP_F_SERDES },
   1304 
   1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1306 	  "82576 quad-1000BaseT Ethernet",
   1307 	  WM_T_82576,		WMP_F_COPPER },
   1308 
   1309 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1310 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1311 	  WM_T_82576,		WMP_F_COPPER },
   1312 
   1313 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1314 	  "82576 gigabit Ethernet",
   1315 	  WM_T_82576,		WMP_F_COPPER },
   1316 
   1317 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1318 	  "82576 gigabit Ethernet (SERDES)",
   1319 	  WM_T_82576,		WMP_F_SERDES },
   1320 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1321 	  "82576 quad-gigabit Ethernet (SERDES)",
   1322 	  WM_T_82576,		WMP_F_SERDES },
   1323 
   1324 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1325 	  "82580 1000BaseT Ethernet",
   1326 	  WM_T_82580,		WMP_F_COPPER },
   1327 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1328 	  "82580 1000BaseX Ethernet",
   1329 	  WM_T_82580,		WMP_F_FIBER },
   1330 
   1331 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1332 	  "82580 1000BaseT Ethernet (SERDES)",
   1333 	  WM_T_82580,		WMP_F_SERDES },
   1334 
   1335 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1336 	  "82580 gigabit Ethernet (SGMII)",
   1337 	  WM_T_82580,		WMP_F_COPPER },
   1338 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1339 	  "82580 dual-1000BaseT Ethernet",
   1340 	  WM_T_82580,		WMP_F_COPPER },
   1341 
   1342 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1343 	  "82580 quad-1000BaseX Ethernet",
   1344 	  WM_T_82580,		WMP_F_FIBER },
   1345 
   1346 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1347 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1348 	  WM_T_82580,		WMP_F_COPPER },
   1349 
   1350 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1351 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1352 	  WM_T_82580,		WMP_F_SERDES },
   1353 
   1354 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1355 	  "DH89XXCC 1000BASE-KX Ethernet",
   1356 	  WM_T_82580,		WMP_F_SERDES },
   1357 
   1358 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1359 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1360 	  WM_T_82580,		WMP_F_SERDES },
   1361 
   1362 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1363 	  "I350 Gigabit Network Connection",
   1364 	  WM_T_I350,		WMP_F_COPPER },
   1365 
   1366 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1367 	  "I350 Gigabit Fiber Network Connection",
   1368 	  WM_T_I350,		WMP_F_FIBER },
   1369 
   1370 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1371 	  "I350 Gigabit Backplane Connection",
   1372 	  WM_T_I350,		WMP_F_SERDES },
   1373 
   1374 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1375 	  "I350 Quad Port Gigabit Ethernet",
   1376 	  WM_T_I350,		WMP_F_SERDES },
   1377 
   1378 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1379 	  "I350 Gigabit Connection",
   1380 	  WM_T_I350,		WMP_F_COPPER },
   1381 
   1382 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1383 	  "I354 Gigabit Ethernet (KX)",
   1384 	  WM_T_I354,		WMP_F_SERDES },
   1385 
   1386 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1387 	  "I354 Gigabit Ethernet (SGMII)",
   1388 	  WM_T_I354,		WMP_F_COPPER },
   1389 
   1390 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1391 	  "I354 Gigabit Ethernet (2.5G)",
   1392 	  WM_T_I354,		WMP_F_COPPER },
   1393 
   1394 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1395 	  "I210-T1 Ethernet Server Adapter",
   1396 	  WM_T_I210,		WMP_F_COPPER },
   1397 
   1398 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1399 	  "I210 Ethernet (Copper OEM)",
   1400 	  WM_T_I210,		WMP_F_COPPER },
   1401 
   1402 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1403 	  "I210 Ethernet (Copper IT)",
   1404 	  WM_T_I210,		WMP_F_COPPER },
   1405 
   1406 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1407 	  "I210 Ethernet (FLASH less)",
   1408 	  WM_T_I210,		WMP_F_COPPER },
   1409 
   1410 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1411 	  "I210 Gigabit Ethernet (Fiber)",
   1412 	  WM_T_I210,		WMP_F_FIBER },
   1413 
   1414 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1415 	  "I210 Gigabit Ethernet (SERDES)",
   1416 	  WM_T_I210,		WMP_F_SERDES },
   1417 
   1418 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1419 	  "I210 Gigabit Ethernet (FLASH less)",
   1420 	  WM_T_I210,		WMP_F_SERDES },
   1421 
   1422 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1423 	  "I210 Gigabit Ethernet (SGMII)",
   1424 	  WM_T_I210,		WMP_F_COPPER },
   1425 
   1426 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1427 	  "I211 Ethernet (COPPER)",
   1428 	  WM_T_I211,		WMP_F_COPPER },
   1429 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1430 	  "I217 V Ethernet Connection",
   1431 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1432 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1433 	  "I217 LM Ethernet Connection",
   1434 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1435 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1436 	  "I218 V Ethernet Connection",
   1437 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1438 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1439 	  "I218 V Ethernet Connection",
   1440 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1441 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1442 	  "I218 V Ethernet Connection",
   1443 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1444 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1445 	  "I218 LM Ethernet Connection",
   1446 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1447 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1448 	  "I218 LM Ethernet Connection",
   1449 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1450 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1451 	  "I218 LM Ethernet Connection",
   1452 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1453 #if 0
   1454 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1455 	  "I219 V Ethernet Connection",
   1456 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1457 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1458 	  "I219 V Ethernet Connection",
   1459 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1460 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1461 	  "I219 V Ethernet Connection",
   1462 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1463 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1464 	  "I219 V Ethernet Connection",
   1465 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1466 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1467 	  "I219 LM Ethernet Connection",
   1468 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1469 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1470 	  "I219 LM Ethernet Connection",
   1471 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1472 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1473 	  "I219 LM Ethernet Connection",
   1474 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1475 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1476 	  "I219 LM Ethernet Connection",
   1477 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1478 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1479 	  "I219 LM Ethernet Connection",
   1480 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1481 #endif
   1482 	{ 0,			0,
   1483 	  NULL,
   1484 	  0,			0 },
   1485 };
   1486 
   1487 /*
   1488  * Register read/write functions.
   1489  * Other than CSR_{READ|WRITE}().
   1490  */
   1491 
   1492 #if 0 /* Not currently used */
   1493 static inline uint32_t
   1494 wm_io_read(struct wm_softc *sc, int reg)
   1495 {
   1496 
   1497 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1498 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1499 }
   1500 #endif
   1501 
   1502 static inline void
   1503 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1504 {
   1505 
   1506 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1507 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1508 }
   1509 
   1510 static inline void
   1511 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1512     uint32_t data)
   1513 {
   1514 	uint32_t regval;
   1515 	int i;
   1516 
   1517 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1518 
   1519 	CSR_WRITE(sc, reg, regval);
   1520 
   1521 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1522 		delay(5);
   1523 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1524 			break;
   1525 	}
   1526 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1527 		aprint_error("%s: WARNING:"
   1528 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1529 		    device_xname(sc->sc_dev), reg);
   1530 	}
   1531 }
   1532 
   1533 static inline void
   1534 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1535 {
   1536 	wa->wa_low = htole32(v & 0xffffffffU);
   1537 	if (sizeof(bus_addr_t) == 8)
   1538 		wa->wa_high = htole32((uint64_t) v >> 32);
   1539 	else
   1540 		wa->wa_high = 0;
   1541 }
   1542 
   1543 /*
   1544  * Descriptor sync/init functions.
   1545  */
   1546 static inline void
   1547 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1548 {
   1549 	struct wm_softc *sc = txq->txq_sc;
   1550 
   1551 	/* If it will wrap around, sync to the end of the ring. */
   1552 	if ((start + num) > WM_NTXDESC(txq)) {
   1553 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1554 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1555 		    (WM_NTXDESC(txq) - start), ops);
   1556 		num -= (WM_NTXDESC(txq) - start);
   1557 		start = 0;
   1558 	}
   1559 
   1560 	/* Now sync whatever is left. */
   1561 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1562 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1563 }
   1564 
   1565 static inline void
   1566 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1567 {
   1568 	struct wm_softc *sc = rxq->rxq_sc;
   1569 
   1570 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1571 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1572 }
   1573 
   1574 static inline void
   1575 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1576 {
   1577 	struct wm_softc *sc = rxq->rxq_sc;
   1578 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1579 	struct mbuf *m = rxs->rxs_mbuf;
   1580 
   1581 	/*
   1582 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1583 	 * so that the payload after the Ethernet header is aligned
   1584 	 * to a 4-byte boundary.
   1585 
   1586 	 * XXX BRAINDAMAGE ALERT!
   1587 	 * The stupid chip uses the same size for every buffer, which
   1588 	 * is set in the Receive Control register.  We are using the 2K
   1589 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1590 	 * reason, we can't "scoot" packets longer than the standard
   1591 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1592 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1593 	 * the upper layer copy the headers.
   1594 	 */
   1595 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1596 
   1597 	if (sc->sc_type == WM_T_82574) {
   1598 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1599 		rxd->erx_data.erxd_addr =
   1600 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1601 		rxd->erx_data.erxd_dd = 0;
   1602 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1603 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1604 
   1605 		rxd->nqrx_data.nrxd_paddr =
   1606 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1607 		/* Currently, split header is not supported. */
   1608 		rxd->nqrx_data.nrxd_haddr = 0;
   1609 	} else {
   1610 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1611 
   1612 		wm_set_dma_addr(&rxd->wrx_addr,
   1613 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1614 		rxd->wrx_len = 0;
   1615 		rxd->wrx_cksum = 0;
   1616 		rxd->wrx_status = 0;
   1617 		rxd->wrx_errors = 0;
   1618 		rxd->wrx_special = 0;
   1619 	}
   1620 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1621 
   1622 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1623 }
   1624 
   1625 /*
   1626  * Device driver interface functions and commonly used functions.
   1627  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1628  */
   1629 
   1630 /* Lookup supported device table */
   1631 static const struct wm_product *
   1632 wm_lookup(const struct pci_attach_args *pa)
   1633 {
   1634 	const struct wm_product *wmp;
   1635 
   1636 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1637 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1638 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1639 			return wmp;
   1640 	}
   1641 	return NULL;
   1642 }
   1643 
   1644 /* The match function (ca_match) */
   1645 static int
   1646 wm_match(device_t parent, cfdata_t cf, void *aux)
   1647 {
   1648 	struct pci_attach_args *pa = aux;
   1649 
   1650 	if (wm_lookup(pa) != NULL)
   1651 		return 1;
   1652 
   1653 	return 0;
   1654 }
   1655 
   1656 /* The attach function (ca_attach) */
   1657 static void
   1658 wm_attach(device_t parent, device_t self, void *aux)
   1659 {
   1660 	struct wm_softc *sc = device_private(self);
   1661 	struct pci_attach_args *pa = aux;
   1662 	prop_dictionary_t dict;
   1663 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1664 	pci_chipset_tag_t pc = pa->pa_pc;
   1665 	int counts[PCI_INTR_TYPE_SIZE];
   1666 	pci_intr_type_t max_type;
   1667 	const char *eetype, *xname;
   1668 	bus_space_tag_t memt;
   1669 	bus_space_handle_t memh;
   1670 	bus_size_t memsize;
   1671 	int memh_valid;
   1672 	int i, error;
   1673 	const struct wm_product *wmp;
   1674 	prop_data_t ea;
   1675 	prop_number_t pn;
   1676 	uint8_t enaddr[ETHER_ADDR_LEN];
   1677 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1678 	pcireg_t preg, memtype;
   1679 	uint16_t eeprom_data, apme_mask;
   1680 	bool force_clear_smbi;
   1681 	uint32_t link_mode;
   1682 	uint32_t reg;
   1683 
   1684 	sc->sc_dev = self;
   1685 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1686 	sc->sc_core_stopping = false;
   1687 
   1688 	wmp = wm_lookup(pa);
   1689 #ifdef DIAGNOSTIC
   1690 	if (wmp == NULL) {
   1691 		printf("\n");
   1692 		panic("wm_attach: impossible");
   1693 	}
   1694 #endif
   1695 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1696 
   1697 	sc->sc_pc = pa->pa_pc;
   1698 	sc->sc_pcitag = pa->pa_tag;
   1699 
   1700 	if (pci_dma64_available(pa))
   1701 		sc->sc_dmat = pa->pa_dmat64;
   1702 	else
   1703 		sc->sc_dmat = pa->pa_dmat;
   1704 
   1705 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1706 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1707 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1708 
   1709 	sc->sc_type = wmp->wmp_type;
   1710 
   1711 	/* Set default function pointers */
   1712 	sc->phy.acquire = wm_get_null;
   1713 	sc->phy.release = wm_put_null;
   1714 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1715 
   1716 	if (sc->sc_type < WM_T_82543) {
   1717 		if (sc->sc_rev < 2) {
   1718 			aprint_error_dev(sc->sc_dev,
   1719 			    "i82542 must be at least rev. 2\n");
   1720 			return;
   1721 		}
   1722 		if (sc->sc_rev < 3)
   1723 			sc->sc_type = WM_T_82542_2_0;
   1724 	}
   1725 
   1726 	/*
   1727 	 * Disable MSI for Errata:
   1728 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1729 	 *
   1730 	 *  82544: Errata 25
   1731 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1732 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1733 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1734 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1735 	 *
   1736 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1737 	 *
   1738 	 *  82571 & 82572: Errata 63
   1739 	 */
   1740 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1741 	    || (sc->sc_type == WM_T_82572))
   1742 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1743 
   1744 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1745 	    || (sc->sc_type == WM_T_82580)
   1746 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1747 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1748 		sc->sc_flags |= WM_F_NEWQUEUE;
   1749 
   1750 	/* Set device properties (mactype) */
   1751 	dict = device_properties(sc->sc_dev);
   1752 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1753 
   1754 	/*
   1755 	 * Map the device.  All devices support memory-mapped acccess,
   1756 	 * and it is really required for normal operation.
   1757 	 */
   1758 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1759 	switch (memtype) {
   1760 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1761 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1762 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1763 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1764 		break;
   1765 	default:
   1766 		memh_valid = 0;
   1767 		break;
   1768 	}
   1769 
   1770 	if (memh_valid) {
   1771 		sc->sc_st = memt;
   1772 		sc->sc_sh = memh;
   1773 		sc->sc_ss = memsize;
   1774 	} else {
   1775 		aprint_error_dev(sc->sc_dev,
   1776 		    "unable to map device registers\n");
   1777 		return;
   1778 	}
   1779 
   1780 	/*
   1781 	 * In addition, i82544 and later support I/O mapped indirect
   1782 	 * register access.  It is not desirable (nor supported in
   1783 	 * this driver) to use it for normal operation, though it is
   1784 	 * required to work around bugs in some chip versions.
   1785 	 */
   1786 	if (sc->sc_type >= WM_T_82544) {
   1787 		/* First we have to find the I/O BAR. */
   1788 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1789 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1790 			if (memtype == PCI_MAPREG_TYPE_IO)
   1791 				break;
   1792 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1793 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1794 				i += 4;	/* skip high bits, too */
   1795 		}
   1796 		if (i < PCI_MAPREG_END) {
   1797 			/*
   1798 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1799 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1800 			 * It's no problem because newer chips has no this
   1801 			 * bug.
   1802 			 *
   1803 			 * The i8254x doesn't apparently respond when the
   1804 			 * I/O BAR is 0, which looks somewhat like it's not
   1805 			 * been configured.
   1806 			 */
   1807 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1808 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1809 				aprint_error_dev(sc->sc_dev,
   1810 				    "WARNING: I/O BAR at zero.\n");
   1811 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1812 					0, &sc->sc_iot, &sc->sc_ioh,
   1813 					NULL, &sc->sc_ios) == 0) {
   1814 				sc->sc_flags |= WM_F_IOH_VALID;
   1815 			} else {
   1816 				aprint_error_dev(sc->sc_dev,
   1817 				    "WARNING: unable to map I/O space\n");
   1818 			}
   1819 		}
   1820 
   1821 	}
   1822 
   1823 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1824 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1825 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1826 	if (sc->sc_type < WM_T_82542_2_1)
   1827 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1828 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1829 
   1830 	/* power up chip */
   1831 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1832 	    NULL)) && error != EOPNOTSUPP) {
   1833 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1834 		return;
   1835 	}
   1836 
   1837 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1838 
   1839 	/* Allocation settings */
   1840 	max_type = PCI_INTR_TYPE_MSIX;
   1841 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1;
   1842 	counts[PCI_INTR_TYPE_MSI] = 1;
   1843 	counts[PCI_INTR_TYPE_INTX] = 1;
   1844 	/* overridden by disable flags */
   1845 	if (wm_disable_msi != 0) {
   1846 		counts[PCI_INTR_TYPE_MSI] = 0;
   1847 		if (wm_disable_msix != 0) {
   1848 			max_type = PCI_INTR_TYPE_INTX;
   1849 			counts[PCI_INTR_TYPE_MSIX] = 0;
   1850 		}
   1851 	} else if (wm_disable_msix != 0) {
   1852 		max_type = PCI_INTR_TYPE_MSI;
   1853 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1854 	}
   1855 
   1856 alloc_retry:
   1857 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1858 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1859 		return;
   1860 	}
   1861 
   1862 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1863 		error = wm_setup_msix(sc);
   1864 		if (error) {
   1865 			pci_intr_release(pc, sc->sc_intrs,
   1866 			    counts[PCI_INTR_TYPE_MSIX]);
   1867 
   1868 			/* Setup for MSI: Disable MSI-X */
   1869 			max_type = PCI_INTR_TYPE_MSI;
   1870 			counts[PCI_INTR_TYPE_MSI] = 1;
   1871 			counts[PCI_INTR_TYPE_INTX] = 1;
   1872 			goto alloc_retry;
   1873 		}
   1874 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1875 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1876 		error = wm_setup_legacy(sc);
   1877 		if (error) {
   1878 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1879 			    counts[PCI_INTR_TYPE_MSI]);
   1880 
   1881 			/* The next try is for INTx: Disable MSI */
   1882 			max_type = PCI_INTR_TYPE_INTX;
   1883 			counts[PCI_INTR_TYPE_INTX] = 1;
   1884 			goto alloc_retry;
   1885 		}
   1886 	} else {
   1887 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1888 		error = wm_setup_legacy(sc);
   1889 		if (error) {
   1890 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1891 			    counts[PCI_INTR_TYPE_INTX]);
   1892 			return;
   1893 		}
   1894 	}
   1895 
   1896 	/*
   1897 	 * Check the function ID (unit number of the chip).
   1898 	 */
   1899 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1900 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1901 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1902 	    || (sc->sc_type == WM_T_82580)
   1903 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1904 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1905 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1906 	else
   1907 		sc->sc_funcid = 0;
   1908 
   1909 	/*
   1910 	 * Determine a few things about the bus we're connected to.
   1911 	 */
   1912 	if (sc->sc_type < WM_T_82543) {
   1913 		/* We don't really know the bus characteristics here. */
   1914 		sc->sc_bus_speed = 33;
   1915 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1916 		/*
   1917 		 * CSA (Communication Streaming Architecture) is about as fast
   1918 		 * a 32-bit 66MHz PCI Bus.
   1919 		 */
   1920 		sc->sc_flags |= WM_F_CSA;
   1921 		sc->sc_bus_speed = 66;
   1922 		aprint_verbose_dev(sc->sc_dev,
   1923 		    "Communication Streaming Architecture\n");
   1924 		if (sc->sc_type == WM_T_82547) {
   1925 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1926 			callout_setfunc(&sc->sc_txfifo_ch,
   1927 					wm_82547_txfifo_stall, sc);
   1928 			aprint_verbose_dev(sc->sc_dev,
   1929 			    "using 82547 Tx FIFO stall work-around\n");
   1930 		}
   1931 	} else if (sc->sc_type >= WM_T_82571) {
   1932 		sc->sc_flags |= WM_F_PCIE;
   1933 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1934 		    && (sc->sc_type != WM_T_ICH10)
   1935 		    && (sc->sc_type != WM_T_PCH)
   1936 		    && (sc->sc_type != WM_T_PCH2)
   1937 		    && (sc->sc_type != WM_T_PCH_LPT)
   1938 		    && (sc->sc_type != WM_T_PCH_SPT)) {
   1939 			/* ICH* and PCH* have no PCIe capability registers */
   1940 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1941 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1942 				NULL) == 0)
   1943 				aprint_error_dev(sc->sc_dev,
   1944 				    "unable to find PCIe capability\n");
   1945 		}
   1946 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1947 	} else {
   1948 		reg = CSR_READ(sc, WMREG_STATUS);
   1949 		if (reg & STATUS_BUS64)
   1950 			sc->sc_flags |= WM_F_BUS64;
   1951 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1952 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1953 
   1954 			sc->sc_flags |= WM_F_PCIX;
   1955 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1956 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1957 				aprint_error_dev(sc->sc_dev,
   1958 				    "unable to find PCIX capability\n");
   1959 			else if (sc->sc_type != WM_T_82545_3 &&
   1960 				 sc->sc_type != WM_T_82546_3) {
   1961 				/*
   1962 				 * Work around a problem caused by the BIOS
   1963 				 * setting the max memory read byte count
   1964 				 * incorrectly.
   1965 				 */
   1966 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1967 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1968 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1969 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1970 
   1971 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1972 				    PCIX_CMD_BYTECNT_SHIFT;
   1973 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1974 				    PCIX_STATUS_MAXB_SHIFT;
   1975 				if (bytecnt > maxb) {
   1976 					aprint_verbose_dev(sc->sc_dev,
   1977 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1978 					    512 << bytecnt, 512 << maxb);
   1979 					pcix_cmd = (pcix_cmd &
   1980 					    ~PCIX_CMD_BYTECNT_MASK) |
   1981 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1982 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1983 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1984 					    pcix_cmd);
   1985 				}
   1986 			}
   1987 		}
   1988 		/*
   1989 		 * The quad port adapter is special; it has a PCIX-PCIX
   1990 		 * bridge on the board, and can run the secondary bus at
   1991 		 * a higher speed.
   1992 		 */
   1993 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   1994 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   1995 								      : 66;
   1996 		} else if (sc->sc_flags & WM_F_PCIX) {
   1997 			switch (reg & STATUS_PCIXSPD_MASK) {
   1998 			case STATUS_PCIXSPD_50_66:
   1999 				sc->sc_bus_speed = 66;
   2000 				break;
   2001 			case STATUS_PCIXSPD_66_100:
   2002 				sc->sc_bus_speed = 100;
   2003 				break;
   2004 			case STATUS_PCIXSPD_100_133:
   2005 				sc->sc_bus_speed = 133;
   2006 				break;
   2007 			default:
   2008 				aprint_error_dev(sc->sc_dev,
   2009 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2010 				    reg & STATUS_PCIXSPD_MASK);
   2011 				sc->sc_bus_speed = 66;
   2012 				break;
   2013 			}
   2014 		} else
   2015 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2016 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2017 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2018 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2019 	}
   2020 
   2021 	/* clear interesting stat counters */
   2022 	CSR_READ(sc, WMREG_COLC);
   2023 	CSR_READ(sc, WMREG_RXERRC);
   2024 
   2025 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2026 	    || (sc->sc_type >= WM_T_ICH8))
   2027 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2028 	if (sc->sc_type >= WM_T_ICH8)
   2029 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2030 
   2031 	/* Set PHY, NVM mutex related stuff */
   2032 	switch (sc->sc_type) {
   2033 	case WM_T_82542_2_0:
   2034 	case WM_T_82542_2_1:
   2035 	case WM_T_82543:
   2036 	case WM_T_82544:
   2037 		/* Microwire */
   2038 		sc->sc_nvm_wordsize = 64;
   2039 		sc->sc_nvm_addrbits = 6;
   2040 		break;
   2041 	case WM_T_82540:
   2042 	case WM_T_82545:
   2043 	case WM_T_82545_3:
   2044 	case WM_T_82546:
   2045 	case WM_T_82546_3:
   2046 		/* Microwire */
   2047 		reg = CSR_READ(sc, WMREG_EECD);
   2048 		if (reg & EECD_EE_SIZE) {
   2049 			sc->sc_nvm_wordsize = 256;
   2050 			sc->sc_nvm_addrbits = 8;
   2051 		} else {
   2052 			sc->sc_nvm_wordsize = 64;
   2053 			sc->sc_nvm_addrbits = 6;
   2054 		}
   2055 		sc->sc_flags |= WM_F_LOCK_EECD;
   2056 		break;
   2057 	case WM_T_82541:
   2058 	case WM_T_82541_2:
   2059 	case WM_T_82547:
   2060 	case WM_T_82547_2:
   2061 		sc->sc_flags |= WM_F_LOCK_EECD;
   2062 		reg = CSR_READ(sc, WMREG_EECD);
   2063 		if (reg & EECD_EE_TYPE) {
   2064 			/* SPI */
   2065 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2066 			wm_nvm_set_addrbits_size_eecd(sc);
   2067 		} else {
   2068 			/* Microwire */
   2069 			if ((reg & EECD_EE_ABITS) != 0) {
   2070 				sc->sc_nvm_wordsize = 256;
   2071 				sc->sc_nvm_addrbits = 8;
   2072 			} else {
   2073 				sc->sc_nvm_wordsize = 64;
   2074 				sc->sc_nvm_addrbits = 6;
   2075 			}
   2076 		}
   2077 		break;
   2078 	case WM_T_82571:
   2079 	case WM_T_82572:
   2080 		/* SPI */
   2081 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2082 		wm_nvm_set_addrbits_size_eecd(sc);
   2083 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
   2084 		sc->phy.acquire = wm_get_swsm_semaphore;
   2085 		sc->phy.release = wm_put_swsm_semaphore;
   2086 		break;
   2087 	case WM_T_82573:
   2088 	case WM_T_82574:
   2089 	case WM_T_82583:
   2090 		if (sc->sc_type == WM_T_82573) {
   2091 			sc->sc_flags |= WM_F_LOCK_SWSM;
   2092 			sc->phy.acquire = wm_get_swsm_semaphore;
   2093 			sc->phy.release = wm_put_swsm_semaphore;
   2094 		} else {
   2095 			sc->sc_flags |= WM_F_LOCK_EXTCNF;
   2096 			/* Both PHY and NVM use the same semaphore. */
   2097 			sc->phy.acquire
   2098 			    = wm_get_swfwhw_semaphore;
   2099 			sc->phy.release
   2100 			    = wm_put_swfwhw_semaphore;
   2101 		}
   2102 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2103 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2104 			sc->sc_nvm_wordsize = 2048;
   2105 		} else {
   2106 			/* SPI */
   2107 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2108 			wm_nvm_set_addrbits_size_eecd(sc);
   2109 		}
   2110 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2111 		break;
   2112 	case WM_T_82575:
   2113 	case WM_T_82576:
   2114 	case WM_T_82580:
   2115 	case WM_T_I350:
   2116 	case WM_T_I354:
   2117 	case WM_T_80003:
   2118 		/* SPI */
   2119 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2120 		wm_nvm_set_addrbits_size_eecd(sc);
   2121 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
   2122 		    | WM_F_LOCK_SWSM;
   2123 		sc->phy.acquire = wm_get_phy_82575;
   2124 		sc->phy.release = wm_put_phy_82575;
   2125 		break;
   2126 	case WM_T_ICH8:
   2127 	case WM_T_ICH9:
   2128 	case WM_T_ICH10:
   2129 	case WM_T_PCH:
   2130 	case WM_T_PCH2:
   2131 	case WM_T_PCH_LPT:
   2132 		/* FLASH */
   2133 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2134 		sc->sc_nvm_wordsize = 2048;
   2135 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2136 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2137 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2138 			aprint_error_dev(sc->sc_dev,
   2139 			    "can't map FLASH registers\n");
   2140 			goto out;
   2141 		}
   2142 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2143 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2144 		    ICH_FLASH_SECTOR_SIZE;
   2145 		sc->sc_ich8_flash_bank_size =
   2146 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2147 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2148 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2149 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2150 		sc->sc_flashreg_offset = 0;
   2151 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2152 		sc->phy.release = wm_put_swflag_ich8lan;
   2153 		break;
   2154 	case WM_T_PCH_SPT:
   2155 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2156 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2157 		sc->sc_flasht = sc->sc_st;
   2158 		sc->sc_flashh = sc->sc_sh;
   2159 		sc->sc_ich8_flash_base = 0;
   2160 		sc->sc_nvm_wordsize =
   2161 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2162 			* NVM_SIZE_MULTIPLIER;
   2163 		/* It is size in bytes, we want words */
   2164 		sc->sc_nvm_wordsize /= 2;
   2165 		/* assume 2 banks */
   2166 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2167 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2168 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2169 		sc->phy.release = wm_put_swflag_ich8lan;
   2170 		break;
   2171 	case WM_T_I210:
   2172 	case WM_T_I211:
   2173 		if (wm_nvm_get_flash_presence_i210(sc)) {
   2174 			wm_nvm_set_addrbits_size_eecd(sc);
   2175 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2176 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2177 		} else {
   2178 			sc->sc_nvm_wordsize = INVM_SIZE;
   2179 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2180 		}
   2181 		sc->sc_flags |= WM_F_LOCK_SWFW | WM_F_LOCK_SWSM;
   2182 		sc->phy.acquire = wm_get_phy_82575;
   2183 		sc->phy.release = wm_put_phy_82575;
   2184 		break;
   2185 	default:
   2186 		break;
   2187 	}
   2188 
   2189 	/* Reset the chip to a known state. */
   2190 	wm_reset(sc);
   2191 
   2192 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2193 	switch (sc->sc_type) {
   2194 	case WM_T_82571:
   2195 	case WM_T_82572:
   2196 		reg = CSR_READ(sc, WMREG_SWSM2);
   2197 		if ((reg & SWSM2_LOCK) == 0) {
   2198 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2199 			force_clear_smbi = true;
   2200 		} else
   2201 			force_clear_smbi = false;
   2202 		break;
   2203 	case WM_T_82573:
   2204 	case WM_T_82574:
   2205 	case WM_T_82583:
   2206 		force_clear_smbi = true;
   2207 		break;
   2208 	default:
   2209 		force_clear_smbi = false;
   2210 		break;
   2211 	}
   2212 	if (force_clear_smbi) {
   2213 		reg = CSR_READ(sc, WMREG_SWSM);
   2214 		if ((reg & SWSM_SMBI) != 0)
   2215 			aprint_error_dev(sc->sc_dev,
   2216 			    "Please update the Bootagent\n");
   2217 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2218 	}
   2219 
   2220 	/*
   2221 	 * Defer printing the EEPROM type until after verifying the checksum
   2222 	 * This allows the EEPROM type to be printed correctly in the case
   2223 	 * that no EEPROM is attached.
   2224 	 */
   2225 	/*
   2226 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2227 	 * this for later, so we can fail future reads from the EEPROM.
   2228 	 */
   2229 	if (wm_nvm_validate_checksum(sc)) {
   2230 		/*
   2231 		 * Read twice again because some PCI-e parts fail the
   2232 		 * first check due to the link being in sleep state.
   2233 		 */
   2234 		if (wm_nvm_validate_checksum(sc))
   2235 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2236 	}
   2237 
   2238 	/* Set device properties (macflags) */
   2239 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2240 
   2241 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2242 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2243 	else {
   2244 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2245 		    sc->sc_nvm_wordsize);
   2246 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2247 			aprint_verbose("iNVM");
   2248 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2249 			aprint_verbose("FLASH(HW)");
   2250 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2251 			aprint_verbose("FLASH");
   2252 		else {
   2253 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2254 				eetype = "SPI";
   2255 			else
   2256 				eetype = "MicroWire";
   2257 			aprint_verbose("(%d address bits) %s EEPROM",
   2258 			    sc->sc_nvm_addrbits, eetype);
   2259 		}
   2260 	}
   2261 	wm_nvm_version(sc);
   2262 	aprint_verbose("\n");
   2263 
   2264 	/* Check for I21[01] PLL workaround */
   2265 	if (sc->sc_type == WM_T_I210)
   2266 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2267 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2268 		/* NVM image release 3.25 has a workaround */
   2269 		if ((sc->sc_nvm_ver_major < 3)
   2270 		    || ((sc->sc_nvm_ver_major == 3)
   2271 			&& (sc->sc_nvm_ver_minor < 25))) {
   2272 			aprint_verbose_dev(sc->sc_dev,
   2273 			    "ROM image version %d.%d is older than 3.25\n",
   2274 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2275 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2276 		}
   2277 	}
   2278 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2279 		wm_pll_workaround_i210(sc);
   2280 
   2281 	wm_get_wakeup(sc);
   2282 
   2283 	/* Non-AMT based hardware can now take control from firmware */
   2284 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2285 		wm_get_hw_control(sc);
   2286 
   2287 	/*
   2288 	 * Read the Ethernet address from the EEPROM, if not first found
   2289 	 * in device properties.
   2290 	 */
   2291 	ea = prop_dictionary_get(dict, "mac-address");
   2292 	if (ea != NULL) {
   2293 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2294 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2295 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2296 	} else {
   2297 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2298 			aprint_error_dev(sc->sc_dev,
   2299 			    "unable to read Ethernet address\n");
   2300 			goto out;
   2301 		}
   2302 	}
   2303 
   2304 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2305 	    ether_sprintf(enaddr));
   2306 
   2307 	/*
   2308 	 * Read the config info from the EEPROM, and set up various
   2309 	 * bits in the control registers based on their contents.
   2310 	 */
   2311 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2312 	if (pn != NULL) {
   2313 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2314 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2315 	} else {
   2316 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2317 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2318 			goto out;
   2319 		}
   2320 	}
   2321 
   2322 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2323 	if (pn != NULL) {
   2324 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2325 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2326 	} else {
   2327 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2328 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2329 			goto out;
   2330 		}
   2331 	}
   2332 
   2333 	/* check for WM_F_WOL */
   2334 	switch (sc->sc_type) {
   2335 	case WM_T_82542_2_0:
   2336 	case WM_T_82542_2_1:
   2337 	case WM_T_82543:
   2338 		/* dummy? */
   2339 		eeprom_data = 0;
   2340 		apme_mask = NVM_CFG3_APME;
   2341 		break;
   2342 	case WM_T_82544:
   2343 		apme_mask = NVM_CFG2_82544_APM_EN;
   2344 		eeprom_data = cfg2;
   2345 		break;
   2346 	case WM_T_82546:
   2347 	case WM_T_82546_3:
   2348 	case WM_T_82571:
   2349 	case WM_T_82572:
   2350 	case WM_T_82573:
   2351 	case WM_T_82574:
   2352 	case WM_T_82583:
   2353 	case WM_T_80003:
   2354 	default:
   2355 		apme_mask = NVM_CFG3_APME;
   2356 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2357 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2358 		break;
   2359 	case WM_T_82575:
   2360 	case WM_T_82576:
   2361 	case WM_T_82580:
   2362 	case WM_T_I350:
   2363 	case WM_T_I354: /* XXX ok? */
   2364 	case WM_T_ICH8:
   2365 	case WM_T_ICH9:
   2366 	case WM_T_ICH10:
   2367 	case WM_T_PCH:
   2368 	case WM_T_PCH2:
   2369 	case WM_T_PCH_LPT:
   2370 	case WM_T_PCH_SPT:
   2371 		/* XXX The funcid should be checked on some devices */
   2372 		apme_mask = WUC_APME;
   2373 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2374 		break;
   2375 	}
   2376 
   2377 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2378 	if ((eeprom_data & apme_mask) != 0)
   2379 		sc->sc_flags |= WM_F_WOL;
   2380 #ifdef WM_DEBUG
   2381 	if ((sc->sc_flags & WM_F_WOL) != 0)
   2382 		printf("WOL\n");
   2383 #endif
   2384 
   2385 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2386 		/* Check NVM for autonegotiation */
   2387 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2388 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2389 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2390 		}
   2391 	}
   2392 
   2393 	/*
   2394 	 * XXX need special handling for some multiple port cards
   2395 	 * to disable a paticular port.
   2396 	 */
   2397 
   2398 	if (sc->sc_type >= WM_T_82544) {
   2399 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2400 		if (pn != NULL) {
   2401 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2402 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2403 		} else {
   2404 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2405 				aprint_error_dev(sc->sc_dev,
   2406 				    "unable to read SWDPIN\n");
   2407 				goto out;
   2408 			}
   2409 		}
   2410 	}
   2411 
   2412 	if (cfg1 & NVM_CFG1_ILOS)
   2413 		sc->sc_ctrl |= CTRL_ILOS;
   2414 
   2415 	/*
   2416 	 * XXX
   2417 	 * This code isn't correct because pin 2 and 3 are located
   2418 	 * in different position on newer chips. Check all datasheet.
   2419 	 *
   2420 	 * Until resolve this problem, check if a chip < 82580
   2421 	 */
   2422 	if (sc->sc_type <= WM_T_82580) {
   2423 		if (sc->sc_type >= WM_T_82544) {
   2424 			sc->sc_ctrl |=
   2425 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2426 			    CTRL_SWDPIO_SHIFT;
   2427 			sc->sc_ctrl |=
   2428 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2429 			    CTRL_SWDPINS_SHIFT;
   2430 		} else {
   2431 			sc->sc_ctrl |=
   2432 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2433 			    CTRL_SWDPIO_SHIFT;
   2434 		}
   2435 	}
   2436 
   2437 	/* XXX For other than 82580? */
   2438 	if (sc->sc_type == WM_T_82580) {
   2439 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2440 		if (nvmword & __BIT(13))
   2441 			sc->sc_ctrl |= CTRL_ILOS;
   2442 	}
   2443 
   2444 #if 0
   2445 	if (sc->sc_type >= WM_T_82544) {
   2446 		if (cfg1 & NVM_CFG1_IPS0)
   2447 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2448 		if (cfg1 & NVM_CFG1_IPS1)
   2449 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2450 		sc->sc_ctrl_ext |=
   2451 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2452 		    CTRL_EXT_SWDPIO_SHIFT;
   2453 		sc->sc_ctrl_ext |=
   2454 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2455 		    CTRL_EXT_SWDPINS_SHIFT;
   2456 	} else {
   2457 		sc->sc_ctrl_ext |=
   2458 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2459 		    CTRL_EXT_SWDPIO_SHIFT;
   2460 	}
   2461 #endif
   2462 
   2463 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2464 #if 0
   2465 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2466 #endif
   2467 
   2468 	if (sc->sc_type == WM_T_PCH) {
   2469 		uint16_t val;
   2470 
   2471 		/* Save the NVM K1 bit setting */
   2472 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2473 
   2474 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2475 			sc->sc_nvm_k1_enabled = 1;
   2476 		else
   2477 			sc->sc_nvm_k1_enabled = 0;
   2478 	}
   2479 
   2480 	/*
   2481 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
   2482 	 * media structures accordingly.
   2483 	 */
   2484 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2485 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2486 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2487 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
   2488 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2489 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
   2490 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2491 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2492 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2493 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2494 	    || (sc->sc_type ==WM_T_I211)) {
   2495 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2496 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2497 		switch (link_mode) {
   2498 		case CTRL_EXT_LINK_MODE_1000KX:
   2499 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2500 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2501 			break;
   2502 		case CTRL_EXT_LINK_MODE_SGMII:
   2503 			if (wm_sgmii_uses_mdio(sc)) {
   2504 				aprint_verbose_dev(sc->sc_dev,
   2505 				    "SGMII(MDIO)\n");
   2506 				sc->sc_flags |= WM_F_SGMII;
   2507 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2508 				break;
   2509 			}
   2510 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2511 			/*FALLTHROUGH*/
   2512 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2513 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2514 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2515 				if (link_mode
   2516 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2517 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2518 					sc->sc_flags |= WM_F_SGMII;
   2519 				} else {
   2520 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2521 					aprint_verbose_dev(sc->sc_dev,
   2522 					    "SERDES\n");
   2523 				}
   2524 				break;
   2525 			}
   2526 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2527 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2528 
   2529 			/* Change current link mode setting */
   2530 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2531 			switch (sc->sc_mediatype) {
   2532 			case WM_MEDIATYPE_COPPER:
   2533 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2534 				break;
   2535 			case WM_MEDIATYPE_SERDES:
   2536 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2537 				break;
   2538 			default:
   2539 				break;
   2540 			}
   2541 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2542 			break;
   2543 		case CTRL_EXT_LINK_MODE_GMII:
   2544 		default:
   2545 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2546 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2547 			break;
   2548 		}
   2549 
   2550 		reg &= ~CTRL_EXT_I2C_ENA;
   2551 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2552 			reg |= CTRL_EXT_I2C_ENA;
   2553 		else
   2554 			reg &= ~CTRL_EXT_I2C_ENA;
   2555 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2556 
   2557 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2558 			wm_gmii_mediainit(sc, wmp->wmp_product);
   2559 		else
   2560 			wm_tbi_mediainit(sc);
   2561 	} else if (sc->sc_type < WM_T_82543 ||
   2562 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2563 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2564 			aprint_error_dev(sc->sc_dev,
   2565 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2566 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2567 		}
   2568 		wm_tbi_mediainit(sc);
   2569 	} else {
   2570 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2571 			aprint_error_dev(sc->sc_dev,
   2572 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2573 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2574 		}
   2575 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2576 	}
   2577 
   2578 	ifp = &sc->sc_ethercom.ec_if;
   2579 	xname = device_xname(sc->sc_dev);
   2580 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2581 	ifp->if_softc = sc;
   2582 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2583 #ifdef WM_MPSAFE
   2584 	ifp->if_extflags = IFEF_START_MPSAFE;
   2585 #endif
   2586 	ifp->if_ioctl = wm_ioctl;
   2587 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2588 		ifp->if_start = wm_nq_start;
   2589 		/*
   2590 		 * When the number of CPUs is one and the controller can use
   2591 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2592 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2593 		 * and the other is used for link status changing.
   2594 		 * In this situation, wm_nq_transmit() is disadvantageous
   2595 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2596 		 */
   2597 		if (wm_is_using_multiqueue(sc))
   2598 			ifp->if_transmit = wm_nq_transmit;
   2599 	} else {
   2600 		ifp->if_start = wm_start;
   2601 		/*
   2602 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2603 		 */
   2604 		if (wm_is_using_multiqueue(sc))
   2605 			ifp->if_transmit = wm_transmit;
   2606 	}
   2607 	ifp->if_watchdog = wm_watchdog;
   2608 	ifp->if_init = wm_init;
   2609 	ifp->if_stop = wm_stop;
   2610 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2611 	IFQ_SET_READY(&ifp->if_snd);
   2612 
   2613 	/* Check for jumbo frame */
   2614 	switch (sc->sc_type) {
   2615 	case WM_T_82573:
   2616 		/* XXX limited to 9234 if ASPM is disabled */
   2617 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2618 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2619 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2620 		break;
   2621 	case WM_T_82571:
   2622 	case WM_T_82572:
   2623 	case WM_T_82574:
   2624 	case WM_T_82575:
   2625 	case WM_T_82576:
   2626 	case WM_T_82580:
   2627 	case WM_T_I350:
   2628 	case WM_T_I354: /* XXXX ok? */
   2629 	case WM_T_I210:
   2630 	case WM_T_I211:
   2631 	case WM_T_80003:
   2632 	case WM_T_ICH9:
   2633 	case WM_T_ICH10:
   2634 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2635 	case WM_T_PCH_LPT:
   2636 	case WM_T_PCH_SPT:
   2637 		/* XXX limited to 9234 */
   2638 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2639 		break;
   2640 	case WM_T_PCH:
   2641 		/* XXX limited to 4096 */
   2642 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2643 		break;
   2644 	case WM_T_82542_2_0:
   2645 	case WM_T_82542_2_1:
   2646 	case WM_T_82583:
   2647 	case WM_T_ICH8:
   2648 		/* No support for jumbo frame */
   2649 		break;
   2650 	default:
   2651 		/* ETHER_MAX_LEN_JUMBO */
   2652 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2653 		break;
   2654 	}
   2655 
   2656 	/* If we're a i82543 or greater, we can support VLANs. */
   2657 	if (sc->sc_type >= WM_T_82543)
   2658 		sc->sc_ethercom.ec_capabilities |=
   2659 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2660 
   2661 	/*
   2662 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2663 	 * on i82543 and later.
   2664 	 */
   2665 	if (sc->sc_type >= WM_T_82543) {
   2666 		ifp->if_capabilities |=
   2667 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2668 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2669 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2670 		    IFCAP_CSUM_TCPv6_Tx |
   2671 		    IFCAP_CSUM_UDPv6_Tx;
   2672 	}
   2673 
   2674 	/*
   2675 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2676 	 *
   2677 	 *	82541GI (8086:1076) ... no
   2678 	 *	82572EI (8086:10b9) ... yes
   2679 	 */
   2680 	if (sc->sc_type >= WM_T_82571) {
   2681 		ifp->if_capabilities |=
   2682 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2683 	}
   2684 
   2685 	/*
   2686 	 * If we're a i82544 or greater (except i82547), we can do
   2687 	 * TCP segmentation offload.
   2688 	 */
   2689 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2690 		ifp->if_capabilities |= IFCAP_TSOv4;
   2691 	}
   2692 
   2693 	if (sc->sc_type >= WM_T_82571) {
   2694 		ifp->if_capabilities |= IFCAP_TSOv6;
   2695 	}
   2696 
   2697 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2698 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2699 
   2700 #ifdef WM_MPSAFE
   2701 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2702 #else
   2703 	sc->sc_core_lock = NULL;
   2704 #endif
   2705 
   2706 	/* Attach the interface. */
   2707 	if_initialize(ifp);
   2708 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2709 	ether_ifattach(ifp, enaddr);
   2710 	if_register(ifp);
   2711 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2712 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2713 			  RND_FLAG_DEFAULT);
   2714 
   2715 #ifdef WM_EVENT_COUNTERS
   2716 	/* Attach event counters. */
   2717 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2718 	    NULL, xname, "linkintr");
   2719 
   2720 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2721 	    NULL, xname, "tx_xoff");
   2722 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2723 	    NULL, xname, "tx_xon");
   2724 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2725 	    NULL, xname, "rx_xoff");
   2726 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2727 	    NULL, xname, "rx_xon");
   2728 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2729 	    NULL, xname, "rx_macctl");
   2730 #endif /* WM_EVENT_COUNTERS */
   2731 
   2732 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2733 		pmf_class_network_register(self, ifp);
   2734 	else
   2735 		aprint_error_dev(self, "couldn't establish power handler\n");
   2736 
   2737 	sc->sc_flags |= WM_F_ATTACHED;
   2738  out:
   2739 	return;
   2740 }
   2741 
   2742 /* The detach function (ca_detach) */
   2743 static int
   2744 wm_detach(device_t self, int flags __unused)
   2745 {
   2746 	struct wm_softc *sc = device_private(self);
   2747 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2748 	int i;
   2749 
   2750 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2751 		return 0;
   2752 
   2753 	/* Stop the interface. Callouts are stopped in it. */
   2754 	wm_stop(ifp, 1);
   2755 
   2756 	pmf_device_deregister(self);
   2757 
   2758 #ifdef WM_EVENT_COUNTERS
   2759 	evcnt_detach(&sc->sc_ev_linkintr);
   2760 
   2761 	evcnt_detach(&sc->sc_ev_tx_xoff);
   2762 	evcnt_detach(&sc->sc_ev_tx_xon);
   2763 	evcnt_detach(&sc->sc_ev_rx_xoff);
   2764 	evcnt_detach(&sc->sc_ev_rx_xon);
   2765 	evcnt_detach(&sc->sc_ev_rx_macctl);
   2766 #endif /* WM_EVENT_COUNTERS */
   2767 
   2768 	/* Tell the firmware about the release */
   2769 	WM_CORE_LOCK(sc);
   2770 	wm_release_manageability(sc);
   2771 	wm_release_hw_control(sc);
   2772 	wm_enable_wakeup(sc);
   2773 	WM_CORE_UNLOCK(sc);
   2774 
   2775 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2776 
   2777 	/* Delete all remaining media. */
   2778 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2779 
   2780 	ether_ifdetach(ifp);
   2781 	if_detach(ifp);
   2782 	if_percpuq_destroy(sc->sc_ipq);
   2783 
   2784 	/* Unload RX dmamaps and free mbufs */
   2785 	for (i = 0; i < sc->sc_nqueues; i++) {
   2786 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2787 		mutex_enter(rxq->rxq_lock);
   2788 		wm_rxdrain(rxq);
   2789 		mutex_exit(rxq->rxq_lock);
   2790 	}
   2791 	/* Must unlock here */
   2792 
   2793 	/* Disestablish the interrupt handler */
   2794 	for (i = 0; i < sc->sc_nintrs; i++) {
   2795 		if (sc->sc_ihs[i] != NULL) {
   2796 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2797 			sc->sc_ihs[i] = NULL;
   2798 		}
   2799 	}
   2800 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2801 
   2802 	wm_free_txrx_queues(sc);
   2803 
   2804 	/* Unmap the registers */
   2805 	if (sc->sc_ss) {
   2806 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2807 		sc->sc_ss = 0;
   2808 	}
   2809 	if (sc->sc_ios) {
   2810 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2811 		sc->sc_ios = 0;
   2812 	}
   2813 	if (sc->sc_flashs) {
   2814 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2815 		sc->sc_flashs = 0;
   2816 	}
   2817 
   2818 	if (sc->sc_core_lock)
   2819 		mutex_obj_free(sc->sc_core_lock);
   2820 	if (sc->sc_ich_phymtx)
   2821 		mutex_obj_free(sc->sc_ich_phymtx);
   2822 	if (sc->sc_ich_nvmmtx)
   2823 		mutex_obj_free(sc->sc_ich_nvmmtx);
   2824 
   2825 	return 0;
   2826 }
   2827 
   2828 static bool
   2829 wm_suspend(device_t self, const pmf_qual_t *qual)
   2830 {
   2831 	struct wm_softc *sc = device_private(self);
   2832 
   2833 	wm_release_manageability(sc);
   2834 	wm_release_hw_control(sc);
   2835 	wm_enable_wakeup(sc);
   2836 
   2837 	return true;
   2838 }
   2839 
   2840 static bool
   2841 wm_resume(device_t self, const pmf_qual_t *qual)
   2842 {
   2843 	struct wm_softc *sc = device_private(self);
   2844 
   2845 	wm_init_manageability(sc);
   2846 
   2847 	return true;
   2848 }
   2849 
   2850 /*
   2851  * wm_watchdog:		[ifnet interface function]
   2852  *
   2853  *	Watchdog timer handler.
   2854  */
   2855 static void
   2856 wm_watchdog(struct ifnet *ifp)
   2857 {
   2858 	int qid;
   2859 	struct wm_softc *sc = ifp->if_softc;
   2860 
   2861 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2862 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2863 
   2864 		wm_watchdog_txq(ifp, txq);
   2865 	}
   2866 
   2867 	/* Reset the interface. */
   2868 	(void) wm_init(ifp);
   2869 
   2870 	/*
   2871 	 * There are still some upper layer processing which call
   2872 	 * ifp->if_start(). e.g. ALTQ or one CPU system
   2873 	 */
   2874 	/* Try to get more packets going. */
   2875 	ifp->if_start(ifp);
   2876 }
   2877 
   2878 static void
   2879 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
   2880 {
   2881 	struct wm_softc *sc = ifp->if_softc;
   2882 
   2883 	/*
   2884 	 * Since we're using delayed interrupts, sweep up
   2885 	 * before we report an error.
   2886 	 */
   2887 	mutex_enter(txq->txq_lock);
   2888 	wm_txeof(sc, txq);
   2889 	mutex_exit(txq->txq_lock);
   2890 
   2891 	if (txq->txq_free != WM_NTXDESC(txq)) {
   2892 #ifdef WM_DEBUG
   2893 		int i, j;
   2894 		struct wm_txsoft *txs;
   2895 #endif
   2896 		log(LOG_ERR,
   2897 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2898 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   2899 		    txq->txq_next);
   2900 		ifp->if_oerrors++;
   2901 #ifdef WM_DEBUG
   2902 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   2903 		    i = WM_NEXTTXS(txq, i)) {
   2904 		    txs = &txq->txq_soft[i];
   2905 		    printf("txs %d tx %d -> %d\n",
   2906 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2907 		    for (j = txs->txs_firstdesc; ;
   2908 			j = WM_NEXTTX(txq, j)) {
   2909 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2910 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   2911 			printf("\t %#08x%08x\n",
   2912 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   2913 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   2914 			if (j == txs->txs_lastdesc)
   2915 				break;
   2916 			}
   2917 		}
   2918 #endif
   2919 	}
   2920 }
   2921 
   2922 /*
   2923  * wm_tick:
   2924  *
   2925  *	One second timer, used to check link status, sweep up
   2926  *	completed transmit jobs, etc.
   2927  */
   2928 static void
   2929 wm_tick(void *arg)
   2930 {
   2931 	struct wm_softc *sc = arg;
   2932 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2933 #ifndef WM_MPSAFE
   2934 	int s = splnet();
   2935 #endif
   2936 
   2937 	WM_CORE_LOCK(sc);
   2938 
   2939 	if (sc->sc_core_stopping)
   2940 		goto out;
   2941 
   2942 	if (sc->sc_type >= WM_T_82542_2_1) {
   2943 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2944 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2945 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2946 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2947 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2948 	}
   2949 
   2950 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2951 	ifp->if_ierrors += 0ULL /* ensure quad_t */
   2952 	    + CSR_READ(sc, WMREG_CRCERRS)
   2953 	    + CSR_READ(sc, WMREG_ALGNERRC)
   2954 	    + CSR_READ(sc, WMREG_SYMERRC)
   2955 	    + CSR_READ(sc, WMREG_RXERRC)
   2956 	    + CSR_READ(sc, WMREG_SEC)
   2957 	    + CSR_READ(sc, WMREG_CEXTERR)
   2958 	    + CSR_READ(sc, WMREG_RLEC);
   2959 	/*
   2960 	 * WMREG_RNBC is incremented when there is no available buffers in host
   2961 	 * memory. It does not mean the number of dropped packet. Because
   2962 	 * ethernet controller can receive packets in such case if there is
   2963 	 * space in phy's FIFO.
   2964 	 *
   2965 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   2966 	 * own EVCNT instead of if_iqdrops.
   2967 	 */
   2968 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   2969 
   2970 	if (sc->sc_flags & WM_F_HAS_MII)
   2971 		mii_tick(&sc->sc_mii);
   2972 	else if ((sc->sc_type >= WM_T_82575)
   2973 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   2974 		wm_serdes_tick(sc);
   2975 	else
   2976 		wm_tbi_tick(sc);
   2977 
   2978 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2979 out:
   2980 	WM_CORE_UNLOCK(sc);
   2981 #ifndef WM_MPSAFE
   2982 	splx(s);
   2983 #endif
   2984 }
   2985 
   2986 static int
   2987 wm_ifflags_cb(struct ethercom *ec)
   2988 {
   2989 	struct ifnet *ifp = &ec->ec_if;
   2990 	struct wm_softc *sc = ifp->if_softc;
   2991 	int rc = 0;
   2992 
   2993 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   2994 		device_xname(sc->sc_dev), __func__));
   2995 
   2996 	WM_CORE_LOCK(sc);
   2997 
   2998 	int change = ifp->if_flags ^ sc->sc_if_flags;
   2999 	sc->sc_if_flags = ifp->if_flags;
   3000 
   3001 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3002 		rc = ENETRESET;
   3003 		goto out;
   3004 	}
   3005 
   3006 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   3007 		wm_set_filter(sc);
   3008 
   3009 	wm_set_vlan(sc);
   3010 
   3011 out:
   3012 	WM_CORE_UNLOCK(sc);
   3013 
   3014 	return rc;
   3015 }
   3016 
   3017 /*
   3018  * wm_ioctl:		[ifnet interface function]
   3019  *
   3020  *	Handle control requests from the operator.
   3021  */
   3022 static int
   3023 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3024 {
   3025 	struct wm_softc *sc = ifp->if_softc;
   3026 	struct ifreq *ifr = (struct ifreq *) data;
   3027 	struct ifaddr *ifa = (struct ifaddr *)data;
   3028 	struct sockaddr_dl *sdl;
   3029 	int s, error;
   3030 
   3031 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3032 		device_xname(sc->sc_dev), __func__));
   3033 
   3034 #ifndef WM_MPSAFE
   3035 	s = splnet();
   3036 #endif
   3037 	switch (cmd) {
   3038 	case SIOCSIFMEDIA:
   3039 	case SIOCGIFMEDIA:
   3040 		WM_CORE_LOCK(sc);
   3041 		/* Flow control requires full-duplex mode. */
   3042 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3043 		    (ifr->ifr_media & IFM_FDX) == 0)
   3044 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3045 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3046 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3047 				/* We can do both TXPAUSE and RXPAUSE. */
   3048 				ifr->ifr_media |=
   3049 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3050 			}
   3051 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3052 		}
   3053 		WM_CORE_UNLOCK(sc);
   3054 #ifdef WM_MPSAFE
   3055 		s = splnet();
   3056 #endif
   3057 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3058 #ifdef WM_MPSAFE
   3059 		splx(s);
   3060 #endif
   3061 		break;
   3062 	case SIOCINITIFADDR:
   3063 		WM_CORE_LOCK(sc);
   3064 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3065 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3066 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3067 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3068 			/* unicast address is first multicast entry */
   3069 			wm_set_filter(sc);
   3070 			error = 0;
   3071 			WM_CORE_UNLOCK(sc);
   3072 			break;
   3073 		}
   3074 		WM_CORE_UNLOCK(sc);
   3075 		/*FALLTHROUGH*/
   3076 	default:
   3077 #ifdef WM_MPSAFE
   3078 		s = splnet();
   3079 #endif
   3080 		/* It may call wm_start, so unlock here */
   3081 		error = ether_ioctl(ifp, cmd, data);
   3082 #ifdef WM_MPSAFE
   3083 		splx(s);
   3084 #endif
   3085 		if (error != ENETRESET)
   3086 			break;
   3087 
   3088 		error = 0;
   3089 
   3090 		if (cmd == SIOCSIFCAP) {
   3091 			error = (*ifp->if_init)(ifp);
   3092 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3093 			;
   3094 		else if (ifp->if_flags & IFF_RUNNING) {
   3095 			/*
   3096 			 * Multicast list has changed; set the hardware filter
   3097 			 * accordingly.
   3098 			 */
   3099 			WM_CORE_LOCK(sc);
   3100 			wm_set_filter(sc);
   3101 			WM_CORE_UNLOCK(sc);
   3102 		}
   3103 		break;
   3104 	}
   3105 
   3106 #ifndef WM_MPSAFE
   3107 	splx(s);
   3108 #endif
   3109 	return error;
   3110 }
   3111 
   3112 /* MAC address related */
   3113 
   3114 /*
   3115  * Get the offset of MAC address and return it.
   3116  * If error occured, use offset 0.
   3117  */
   3118 static uint16_t
   3119 wm_check_alt_mac_addr(struct wm_softc *sc)
   3120 {
   3121 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3122 	uint16_t offset = NVM_OFF_MACADDR;
   3123 
   3124 	/* Try to read alternative MAC address pointer */
   3125 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3126 		return 0;
   3127 
   3128 	/* Check pointer if it's valid or not. */
   3129 	if ((offset == 0x0000) || (offset == 0xffff))
   3130 		return 0;
   3131 
   3132 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3133 	/*
   3134 	 * Check whether alternative MAC address is valid or not.
   3135 	 * Some cards have non 0xffff pointer but those don't use
   3136 	 * alternative MAC address in reality.
   3137 	 *
   3138 	 * Check whether the broadcast bit is set or not.
   3139 	 */
   3140 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3141 		if (((myea[0] & 0xff) & 0x01) == 0)
   3142 			return offset; /* Found */
   3143 
   3144 	/* Not found */
   3145 	return 0;
   3146 }
   3147 
   3148 static int
   3149 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3150 {
   3151 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3152 	uint16_t offset = NVM_OFF_MACADDR;
   3153 	int do_invert = 0;
   3154 
   3155 	switch (sc->sc_type) {
   3156 	case WM_T_82580:
   3157 	case WM_T_I350:
   3158 	case WM_T_I354:
   3159 		/* EEPROM Top Level Partitioning */
   3160 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3161 		break;
   3162 	case WM_T_82571:
   3163 	case WM_T_82575:
   3164 	case WM_T_82576:
   3165 	case WM_T_80003:
   3166 	case WM_T_I210:
   3167 	case WM_T_I211:
   3168 		offset = wm_check_alt_mac_addr(sc);
   3169 		if (offset == 0)
   3170 			if ((sc->sc_funcid & 0x01) == 1)
   3171 				do_invert = 1;
   3172 		break;
   3173 	default:
   3174 		if ((sc->sc_funcid & 0x01) == 1)
   3175 			do_invert = 1;
   3176 		break;
   3177 	}
   3178 
   3179 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3180 		goto bad;
   3181 
   3182 	enaddr[0] = myea[0] & 0xff;
   3183 	enaddr[1] = myea[0] >> 8;
   3184 	enaddr[2] = myea[1] & 0xff;
   3185 	enaddr[3] = myea[1] >> 8;
   3186 	enaddr[4] = myea[2] & 0xff;
   3187 	enaddr[5] = myea[2] >> 8;
   3188 
   3189 	/*
   3190 	 * Toggle the LSB of the MAC address on the second port
   3191 	 * of some dual port cards.
   3192 	 */
   3193 	if (do_invert != 0)
   3194 		enaddr[5] ^= 1;
   3195 
   3196 	return 0;
   3197 
   3198  bad:
   3199 	return -1;
   3200 }
   3201 
   3202 /*
   3203  * wm_set_ral:
   3204  *
   3205  *	Set an entery in the receive address list.
   3206  */
   3207 static void
   3208 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3209 {
   3210 	uint32_t ral_lo, ral_hi;
   3211 
   3212 	if (enaddr != NULL) {
   3213 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3214 		    (enaddr[3] << 24);
   3215 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3216 		ral_hi |= RAL_AV;
   3217 	} else {
   3218 		ral_lo = 0;
   3219 		ral_hi = 0;
   3220 	}
   3221 
   3222 	if (sc->sc_type >= WM_T_82544) {
   3223 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
   3224 		    ral_lo);
   3225 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
   3226 		    ral_hi);
   3227 	} else {
   3228 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
   3229 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
   3230 	}
   3231 }
   3232 
   3233 /*
   3234  * wm_mchash:
   3235  *
   3236  *	Compute the hash of the multicast address for the 4096-bit
   3237  *	multicast filter.
   3238  */
   3239 static uint32_t
   3240 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3241 {
   3242 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3243 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3244 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3245 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3246 	uint32_t hash;
   3247 
   3248 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3249 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3250 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3251 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   3252 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3253 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3254 		return (hash & 0x3ff);
   3255 	}
   3256 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3257 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3258 
   3259 	return (hash & 0xfff);
   3260 }
   3261 
   3262 /*
   3263  * wm_set_filter:
   3264  *
   3265  *	Set up the receive filter.
   3266  */
   3267 static void
   3268 wm_set_filter(struct wm_softc *sc)
   3269 {
   3270 	struct ethercom *ec = &sc->sc_ethercom;
   3271 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3272 	struct ether_multi *enm;
   3273 	struct ether_multistep step;
   3274 	bus_addr_t mta_reg;
   3275 	uint32_t hash, reg, bit;
   3276 	int i, size, ralmax;
   3277 
   3278 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3279 		device_xname(sc->sc_dev), __func__));
   3280 
   3281 	if (sc->sc_type >= WM_T_82544)
   3282 		mta_reg = WMREG_CORDOVA_MTA;
   3283 	else
   3284 		mta_reg = WMREG_MTA;
   3285 
   3286 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3287 
   3288 	if (ifp->if_flags & IFF_BROADCAST)
   3289 		sc->sc_rctl |= RCTL_BAM;
   3290 	if (ifp->if_flags & IFF_PROMISC) {
   3291 		sc->sc_rctl |= RCTL_UPE;
   3292 		goto allmulti;
   3293 	}
   3294 
   3295 	/*
   3296 	 * Set the station address in the first RAL slot, and
   3297 	 * clear the remaining slots.
   3298 	 */
   3299 	if (sc->sc_type == WM_T_ICH8)
   3300 		size = WM_RAL_TABSIZE_ICH8 -1;
   3301 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3302 	    || (sc->sc_type == WM_T_PCH))
   3303 		size = WM_RAL_TABSIZE_ICH8;
   3304 	else if (sc->sc_type == WM_T_PCH2)
   3305 		size = WM_RAL_TABSIZE_PCH2;
   3306 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
   3307 		size = WM_RAL_TABSIZE_PCH_LPT;
   3308 	else if (sc->sc_type == WM_T_82575)
   3309 		size = WM_RAL_TABSIZE_82575;
   3310 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3311 		size = WM_RAL_TABSIZE_82576;
   3312 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3313 		size = WM_RAL_TABSIZE_I350;
   3314 	else
   3315 		size = WM_RAL_TABSIZE;
   3316 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3317 
   3318 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   3319 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3320 		switch (i) {
   3321 		case 0:
   3322 			/* We can use all entries */
   3323 			ralmax = size;
   3324 			break;
   3325 		case 1:
   3326 			/* Only RAR[0] */
   3327 			ralmax = 1;
   3328 			break;
   3329 		default:
   3330 			/* available SHRA + RAR[0] */
   3331 			ralmax = i + 1;
   3332 		}
   3333 	} else
   3334 		ralmax = size;
   3335 	for (i = 1; i < size; i++) {
   3336 		if (i < ralmax)
   3337 			wm_set_ral(sc, NULL, i);
   3338 	}
   3339 
   3340 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3341 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3342 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3343 	    || (sc->sc_type == WM_T_PCH_SPT))
   3344 		size = WM_ICH8_MC_TABSIZE;
   3345 	else
   3346 		size = WM_MC_TABSIZE;
   3347 	/* Clear out the multicast table. */
   3348 	for (i = 0; i < size; i++)
   3349 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3350 
   3351 	ETHER_LOCK(ec);
   3352 	ETHER_FIRST_MULTI(step, ec, enm);
   3353 	while (enm != NULL) {
   3354 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3355 			ETHER_UNLOCK(ec);
   3356 			/*
   3357 			 * We must listen to a range of multicast addresses.
   3358 			 * For now, just accept all multicasts, rather than
   3359 			 * trying to set only those filter bits needed to match
   3360 			 * the range.  (At this time, the only use of address
   3361 			 * ranges is for IP multicast routing, for which the
   3362 			 * range is big enough to require all bits set.)
   3363 			 */
   3364 			goto allmulti;
   3365 		}
   3366 
   3367 		hash = wm_mchash(sc, enm->enm_addrlo);
   3368 
   3369 		reg = (hash >> 5);
   3370 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3371 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3372 		    || (sc->sc_type == WM_T_PCH2)
   3373 		    || (sc->sc_type == WM_T_PCH_LPT)
   3374 		    || (sc->sc_type == WM_T_PCH_SPT))
   3375 			reg &= 0x1f;
   3376 		else
   3377 			reg &= 0x7f;
   3378 		bit = hash & 0x1f;
   3379 
   3380 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3381 		hash |= 1U << bit;
   3382 
   3383 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3384 			/*
   3385 			 * 82544 Errata 9: Certain register cannot be written
   3386 			 * with particular alignments in PCI-X bus operation
   3387 			 * (FCAH, MTA and VFTA).
   3388 			 */
   3389 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3390 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3391 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3392 		} else
   3393 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3394 
   3395 		ETHER_NEXT_MULTI(step, enm);
   3396 	}
   3397 	ETHER_UNLOCK(ec);
   3398 
   3399 	ifp->if_flags &= ~IFF_ALLMULTI;
   3400 	goto setit;
   3401 
   3402  allmulti:
   3403 	ifp->if_flags |= IFF_ALLMULTI;
   3404 	sc->sc_rctl |= RCTL_MPE;
   3405 
   3406  setit:
   3407 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3408 }
   3409 
   3410 /* Reset and init related */
   3411 
   3412 static void
   3413 wm_set_vlan(struct wm_softc *sc)
   3414 {
   3415 
   3416 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3417 		device_xname(sc->sc_dev), __func__));
   3418 
   3419 	/* Deal with VLAN enables. */
   3420 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3421 		sc->sc_ctrl |= CTRL_VME;
   3422 	else
   3423 		sc->sc_ctrl &= ~CTRL_VME;
   3424 
   3425 	/* Write the control registers. */
   3426 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3427 }
   3428 
   3429 static void
   3430 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3431 {
   3432 	uint32_t gcr;
   3433 	pcireg_t ctrl2;
   3434 
   3435 	gcr = CSR_READ(sc, WMREG_GCR);
   3436 
   3437 	/* Only take action if timeout value is defaulted to 0 */
   3438 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3439 		goto out;
   3440 
   3441 	if ((gcr & GCR_CAP_VER2) == 0) {
   3442 		gcr |= GCR_CMPL_TMOUT_10MS;
   3443 		goto out;
   3444 	}
   3445 
   3446 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3447 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3448 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3449 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3450 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3451 
   3452 out:
   3453 	/* Disable completion timeout resend */
   3454 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3455 
   3456 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3457 }
   3458 
   3459 void
   3460 wm_get_auto_rd_done(struct wm_softc *sc)
   3461 {
   3462 	int i;
   3463 
   3464 	/* wait for eeprom to reload */
   3465 	switch (sc->sc_type) {
   3466 	case WM_T_82571:
   3467 	case WM_T_82572:
   3468 	case WM_T_82573:
   3469 	case WM_T_82574:
   3470 	case WM_T_82583:
   3471 	case WM_T_82575:
   3472 	case WM_T_82576:
   3473 	case WM_T_82580:
   3474 	case WM_T_I350:
   3475 	case WM_T_I354:
   3476 	case WM_T_I210:
   3477 	case WM_T_I211:
   3478 	case WM_T_80003:
   3479 	case WM_T_ICH8:
   3480 	case WM_T_ICH9:
   3481 		for (i = 0; i < 10; i++) {
   3482 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3483 				break;
   3484 			delay(1000);
   3485 		}
   3486 		if (i == 10) {
   3487 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3488 			    "complete\n", device_xname(sc->sc_dev));
   3489 		}
   3490 		break;
   3491 	default:
   3492 		break;
   3493 	}
   3494 }
   3495 
   3496 void
   3497 wm_lan_init_done(struct wm_softc *sc)
   3498 {
   3499 	uint32_t reg = 0;
   3500 	int i;
   3501 
   3502 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3503 		device_xname(sc->sc_dev), __func__));
   3504 
   3505 	/* Wait for eeprom to reload */
   3506 	switch (sc->sc_type) {
   3507 	case WM_T_ICH10:
   3508 	case WM_T_PCH:
   3509 	case WM_T_PCH2:
   3510 	case WM_T_PCH_LPT:
   3511 	case WM_T_PCH_SPT:
   3512 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3513 			reg = CSR_READ(sc, WMREG_STATUS);
   3514 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3515 				break;
   3516 			delay(100);
   3517 		}
   3518 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3519 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3520 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3521 		}
   3522 		break;
   3523 	default:
   3524 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3525 		    __func__);
   3526 		break;
   3527 	}
   3528 
   3529 	reg &= ~STATUS_LAN_INIT_DONE;
   3530 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3531 }
   3532 
   3533 void
   3534 wm_get_cfg_done(struct wm_softc *sc)
   3535 {
   3536 	int mask;
   3537 	uint32_t reg;
   3538 	int i;
   3539 
   3540 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3541 		device_xname(sc->sc_dev), __func__));
   3542 
   3543 	/* Wait for eeprom to reload */
   3544 	switch (sc->sc_type) {
   3545 	case WM_T_82542_2_0:
   3546 	case WM_T_82542_2_1:
   3547 		/* null */
   3548 		break;
   3549 	case WM_T_82543:
   3550 	case WM_T_82544:
   3551 	case WM_T_82540:
   3552 	case WM_T_82545:
   3553 	case WM_T_82545_3:
   3554 	case WM_T_82546:
   3555 	case WM_T_82546_3:
   3556 	case WM_T_82541:
   3557 	case WM_T_82541_2:
   3558 	case WM_T_82547:
   3559 	case WM_T_82547_2:
   3560 	case WM_T_82573:
   3561 	case WM_T_82574:
   3562 	case WM_T_82583:
   3563 		/* generic */
   3564 		delay(10*1000);
   3565 		break;
   3566 	case WM_T_80003:
   3567 	case WM_T_82571:
   3568 	case WM_T_82572:
   3569 	case WM_T_82575:
   3570 	case WM_T_82576:
   3571 	case WM_T_82580:
   3572 	case WM_T_I350:
   3573 	case WM_T_I354:
   3574 	case WM_T_I210:
   3575 	case WM_T_I211:
   3576 		if (sc->sc_type == WM_T_82571) {
   3577 			/* Only 82571 shares port 0 */
   3578 			mask = EEMNGCTL_CFGDONE_0;
   3579 		} else
   3580 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3581 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3582 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3583 				break;
   3584 			delay(1000);
   3585 		}
   3586 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3587 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3588 				device_xname(sc->sc_dev), __func__));
   3589 		}
   3590 		break;
   3591 	case WM_T_ICH8:
   3592 	case WM_T_ICH9:
   3593 	case WM_T_ICH10:
   3594 	case WM_T_PCH:
   3595 	case WM_T_PCH2:
   3596 	case WM_T_PCH_LPT:
   3597 	case WM_T_PCH_SPT:
   3598 		delay(10*1000);
   3599 		if (sc->sc_type >= WM_T_ICH10)
   3600 			wm_lan_init_done(sc);
   3601 		else
   3602 			wm_get_auto_rd_done(sc);
   3603 
   3604 		reg = CSR_READ(sc, WMREG_STATUS);
   3605 		if ((reg & STATUS_PHYRA) != 0)
   3606 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3607 		break;
   3608 	default:
   3609 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3610 		    __func__);
   3611 		break;
   3612 	}
   3613 }
   3614 
   3615 /* Init hardware bits */
   3616 void
   3617 wm_initialize_hardware_bits(struct wm_softc *sc)
   3618 {
   3619 	uint32_t tarc0, tarc1, reg;
   3620 
   3621 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3622 		device_xname(sc->sc_dev), __func__));
   3623 
   3624 	/* For 82571 variant, 80003 and ICHs */
   3625 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3626 	    || (sc->sc_type >= WM_T_80003)) {
   3627 
   3628 		/* Transmit Descriptor Control 0 */
   3629 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3630 		reg |= TXDCTL_COUNT_DESC;
   3631 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3632 
   3633 		/* Transmit Descriptor Control 1 */
   3634 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3635 		reg |= TXDCTL_COUNT_DESC;
   3636 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3637 
   3638 		/* TARC0 */
   3639 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3640 		switch (sc->sc_type) {
   3641 		case WM_T_82571:
   3642 		case WM_T_82572:
   3643 		case WM_T_82573:
   3644 		case WM_T_82574:
   3645 		case WM_T_82583:
   3646 		case WM_T_80003:
   3647 			/* Clear bits 30..27 */
   3648 			tarc0 &= ~__BITS(30, 27);
   3649 			break;
   3650 		default:
   3651 			break;
   3652 		}
   3653 
   3654 		switch (sc->sc_type) {
   3655 		case WM_T_82571:
   3656 		case WM_T_82572:
   3657 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3658 
   3659 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3660 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3661 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3662 			/* 8257[12] Errata No.7 */
   3663 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3664 
   3665 			/* TARC1 bit 28 */
   3666 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3667 				tarc1 &= ~__BIT(28);
   3668 			else
   3669 				tarc1 |= __BIT(28);
   3670 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3671 
   3672 			/*
   3673 			 * 8257[12] Errata No.13
   3674 			 * Disable Dyamic Clock Gating.
   3675 			 */
   3676 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3677 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3678 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3679 			break;
   3680 		case WM_T_82573:
   3681 		case WM_T_82574:
   3682 		case WM_T_82583:
   3683 			if ((sc->sc_type == WM_T_82574)
   3684 			    || (sc->sc_type == WM_T_82583))
   3685 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3686 
   3687 			/* Extended Device Control */
   3688 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3689 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3690 			reg |= __BIT(22);	/* Set bit 22 */
   3691 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3692 
   3693 			/* Device Control */
   3694 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   3695 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3696 
   3697 			/* PCIe Control Register */
   3698 			/*
   3699 			 * 82573 Errata (unknown).
   3700 			 *
   3701 			 * 82574 Errata 25 and 82583 Errata 12
   3702 			 * "Dropped Rx Packets":
   3703 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   3704 			 */
   3705 			reg = CSR_READ(sc, WMREG_GCR);
   3706 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   3707 			CSR_WRITE(sc, WMREG_GCR, reg);
   3708 
   3709 			if ((sc->sc_type == WM_T_82574)
   3710 			    || (sc->sc_type == WM_T_82583)) {
   3711 				/*
   3712 				 * Document says this bit must be set for
   3713 				 * proper operation.
   3714 				 */
   3715 				reg = CSR_READ(sc, WMREG_GCR);
   3716 				reg |= __BIT(22);
   3717 				CSR_WRITE(sc, WMREG_GCR, reg);
   3718 
   3719 				/*
   3720 				 * Apply workaround for hardware errata
   3721 				 * documented in errata docs Fixes issue where
   3722 				 * some error prone or unreliable PCIe
   3723 				 * completions are occurring, particularly
   3724 				 * with ASPM enabled. Without fix, issue can
   3725 				 * cause Tx timeouts.
   3726 				 */
   3727 				reg = CSR_READ(sc, WMREG_GCR2);
   3728 				reg |= __BIT(0);
   3729 				CSR_WRITE(sc, WMREG_GCR2, reg);
   3730 			}
   3731 			break;
   3732 		case WM_T_80003:
   3733 			/* TARC0 */
   3734 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   3735 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3736 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   3737 
   3738 			/* TARC1 bit 28 */
   3739 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3740 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3741 				tarc1 &= ~__BIT(28);
   3742 			else
   3743 				tarc1 |= __BIT(28);
   3744 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3745 			break;
   3746 		case WM_T_ICH8:
   3747 		case WM_T_ICH9:
   3748 		case WM_T_ICH10:
   3749 		case WM_T_PCH:
   3750 		case WM_T_PCH2:
   3751 		case WM_T_PCH_LPT:
   3752 		case WM_T_PCH_SPT:
   3753 			/* TARC0 */
   3754 			if ((sc->sc_type == WM_T_ICH8)
   3755 			    || (sc->sc_type == WM_T_PCH_SPT)) {
   3756 				/* Set TARC0 bits 29 and 28 */
   3757 				tarc0 |= __BITS(29, 28);
   3758 			}
   3759 			/* Set TARC0 bits 23,24,26,27 */
   3760 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   3761 
   3762 			/* CTRL_EXT */
   3763 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3764 			reg |= __BIT(22);	/* Set bit 22 */
   3765 			/*
   3766 			 * Enable PHY low-power state when MAC is at D3
   3767 			 * w/o WoL
   3768 			 */
   3769 			if (sc->sc_type >= WM_T_PCH)
   3770 				reg |= CTRL_EXT_PHYPDEN;
   3771 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3772 
   3773 			/* TARC1 */
   3774 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3775 			/* bit 28 */
   3776 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3777 				tarc1 &= ~__BIT(28);
   3778 			else
   3779 				tarc1 |= __BIT(28);
   3780 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   3781 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3782 
   3783 			/* Device Status */
   3784 			if (sc->sc_type == WM_T_ICH8) {
   3785 				reg = CSR_READ(sc, WMREG_STATUS);
   3786 				reg &= ~__BIT(31);
   3787 				CSR_WRITE(sc, WMREG_STATUS, reg);
   3788 
   3789 			}
   3790 
   3791 			/* IOSFPC */
   3792 			if (sc->sc_type == WM_T_PCH_SPT) {
   3793 				reg = CSR_READ(sc, WMREG_IOSFPC);
   3794 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   3795 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   3796 			}
   3797 			/*
   3798 			 * Work-around descriptor data corruption issue during
   3799 			 * NFS v2 UDP traffic, just disable the NFS filtering
   3800 			 * capability.
   3801 			 */
   3802 			reg = CSR_READ(sc, WMREG_RFCTL);
   3803 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   3804 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3805 			break;
   3806 		default:
   3807 			break;
   3808 		}
   3809 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   3810 
   3811 		switch (sc->sc_type) {
   3812 		/*
   3813 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   3814 		 * Avoid RSS Hash Value bug.
   3815 		 */
   3816 		case WM_T_82571:
   3817 		case WM_T_82572:
   3818 		case WM_T_82573:
   3819 		case WM_T_80003:
   3820 		case WM_T_ICH8:
   3821 			reg = CSR_READ(sc, WMREG_RFCTL);
   3822 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   3823 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3824 			break;
   3825 		case WM_T_82574:
   3826 			/* use extened Rx descriptor. */
   3827 			reg = CSR_READ(sc, WMREG_RFCTL);
   3828 			reg |= WMREG_RFCTL_EXSTEN;
   3829 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3830 			break;
   3831 		default:
   3832 			break;
   3833 		}
   3834 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   3835 		/*
   3836 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   3837 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   3838 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   3839 		 * Correctly by the Device"
   3840 		 *
   3841 		 * I354(C2000) Errata AVR53:
   3842 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   3843 		 * Hang"
   3844 		 */
   3845 		reg = CSR_READ(sc, WMREG_RFCTL);
   3846 		reg |= WMREG_RFCTL_IPV6EXDIS;
   3847 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   3848 	}
   3849 }
   3850 
   3851 static uint32_t
   3852 wm_rxpbs_adjust_82580(uint32_t val)
   3853 {
   3854 	uint32_t rv = 0;
   3855 
   3856 	if (val < __arraycount(wm_82580_rxpbs_table))
   3857 		rv = wm_82580_rxpbs_table[val];
   3858 
   3859 	return rv;
   3860 }
   3861 
   3862 /*
   3863  * wm_reset_phy:
   3864  *
   3865  *	generic PHY reset function.
   3866  *	Same as e1000_phy_hw_reset_generic()
   3867  */
   3868 static void
   3869 wm_reset_phy(struct wm_softc *sc)
   3870 {
   3871 	uint32_t reg;
   3872 
   3873 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3874 		device_xname(sc->sc_dev), __func__));
   3875 	if (wm_phy_resetisblocked(sc))
   3876 		return;
   3877 
   3878 	sc->phy.acquire(sc);
   3879 
   3880 	reg = CSR_READ(sc, WMREG_CTRL);
   3881 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   3882 	CSR_WRITE_FLUSH(sc);
   3883 
   3884 	delay(sc->phy.reset_delay_us);
   3885 
   3886 	CSR_WRITE(sc, WMREG_CTRL, reg);
   3887 	CSR_WRITE_FLUSH(sc);
   3888 
   3889 	delay(150);
   3890 
   3891 	sc->phy.release(sc);
   3892 
   3893 	wm_get_cfg_done(sc);
   3894 }
   3895 
   3896 static void
   3897 wm_flush_desc_rings(struct wm_softc *sc)
   3898 {
   3899 	pcireg_t preg;
   3900 	uint32_t reg;
   3901 	int nexttx;
   3902 
   3903 	/* First, disable MULR fix in FEXTNVM11 */
   3904 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   3905 	reg |= FEXTNVM11_DIS_MULRFIX;
   3906 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   3907 
   3908 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   3909 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   3910 	if (((preg & DESCRING_STATUS_FLUSH_REQ) != 0) && (reg != 0)) {
   3911 		struct wm_txqueue *txq;
   3912 		wiseman_txdesc_t *txd;
   3913 
   3914 		/* TX */
   3915 		printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   3916 		    device_xname(sc->sc_dev), preg, reg);
   3917 		reg = CSR_READ(sc, WMREG_TCTL);
   3918 		CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   3919 
   3920 		txq = &sc->sc_queue[0].wmq_txq;
   3921 		nexttx = txq->txq_next;
   3922 		txd = &txq->txq_descs[nexttx];
   3923 		wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   3924 		txd->wtx_cmdlen = htole32(WTX_CMD_IFCS| 512);
   3925 		txd->wtx_fields.wtxu_status = 0;
   3926 		txd->wtx_fields.wtxu_options = 0;
   3927 		txd->wtx_fields.wtxu_vlan = 0;
   3928 
   3929 		bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   3930 			BUS_SPACE_BARRIER_WRITE);
   3931 
   3932 		txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   3933 		CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   3934 		bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   3935 			BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   3936 		delay(250);
   3937 	}
   3938 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   3939 	if (preg & DESCRING_STATUS_FLUSH_REQ) {
   3940 		uint32_t rctl;
   3941 
   3942 		/* RX */
   3943 		printf("%s: Need RX flush (reg = %08x)\n",
   3944 		    device_xname(sc->sc_dev), preg);
   3945 		rctl = CSR_READ(sc, WMREG_RCTL);
   3946 		CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   3947 		CSR_WRITE_FLUSH(sc);
   3948 		delay(150);
   3949 
   3950 		reg = CSR_READ(sc, WMREG_RXDCTL(0));
   3951 		/* zero the lower 14 bits (prefetch and host thresholds) */
   3952 		reg &= 0xffffc000;
   3953 		/*
   3954 		 * update thresholds: prefetch threshold to 31, host threshold
   3955 		 * to 1 and make sure the granularity is "descriptors" and not
   3956 		 * "cache lines"
   3957 		 */
   3958 		reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   3959 		CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   3960 
   3961 		/*
   3962 		 * momentarily enable the RX ring for the changes to take
   3963 		 * effect
   3964 		 */
   3965 		CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   3966 		CSR_WRITE_FLUSH(sc);
   3967 		delay(150);
   3968 		CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   3969 	}
   3970 }
   3971 
   3972 /*
   3973  * wm_reset:
   3974  *
   3975  *	Reset the i82542 chip.
   3976  */
   3977 static void
   3978 wm_reset(struct wm_softc *sc)
   3979 {
   3980 	int phy_reset = 0;
   3981 	int i, error = 0;
   3982 	uint32_t reg;
   3983 
   3984 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3985 		device_xname(sc->sc_dev), __func__));
   3986 	KASSERT(sc->sc_type != 0);
   3987 
   3988 	/*
   3989 	 * Allocate on-chip memory according to the MTU size.
   3990 	 * The Packet Buffer Allocation register must be written
   3991 	 * before the chip is reset.
   3992 	 */
   3993 	switch (sc->sc_type) {
   3994 	case WM_T_82547:
   3995 	case WM_T_82547_2:
   3996 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3997 		    PBA_22K : PBA_30K;
   3998 		for (i = 0; i < sc->sc_nqueues; i++) {
   3999 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4000 			txq->txq_fifo_head = 0;
   4001 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4002 			txq->txq_fifo_size =
   4003 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4004 			txq->txq_fifo_stall = 0;
   4005 		}
   4006 		break;
   4007 	case WM_T_82571:
   4008 	case WM_T_82572:
   4009 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4010 	case WM_T_80003:
   4011 		sc->sc_pba = PBA_32K;
   4012 		break;
   4013 	case WM_T_82573:
   4014 		sc->sc_pba = PBA_12K;
   4015 		break;
   4016 	case WM_T_82574:
   4017 	case WM_T_82583:
   4018 		sc->sc_pba = PBA_20K;
   4019 		break;
   4020 	case WM_T_82576:
   4021 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4022 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4023 		break;
   4024 	case WM_T_82580:
   4025 	case WM_T_I350:
   4026 	case WM_T_I354:
   4027 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4028 		break;
   4029 	case WM_T_I210:
   4030 	case WM_T_I211:
   4031 		sc->sc_pba = PBA_34K;
   4032 		break;
   4033 	case WM_T_ICH8:
   4034 		/* Workaround for a bit corruption issue in FIFO memory */
   4035 		sc->sc_pba = PBA_8K;
   4036 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4037 		break;
   4038 	case WM_T_ICH9:
   4039 	case WM_T_ICH10:
   4040 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4041 		    PBA_14K : PBA_10K;
   4042 		break;
   4043 	case WM_T_PCH:
   4044 	case WM_T_PCH2:
   4045 	case WM_T_PCH_LPT:
   4046 	case WM_T_PCH_SPT:
   4047 		sc->sc_pba = PBA_26K;
   4048 		break;
   4049 	default:
   4050 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4051 		    PBA_40K : PBA_48K;
   4052 		break;
   4053 	}
   4054 	/*
   4055 	 * Only old or non-multiqueue devices have the PBA register
   4056 	 * XXX Need special handling for 82575.
   4057 	 */
   4058 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4059 	    || (sc->sc_type == WM_T_82575))
   4060 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4061 
   4062 	/* Prevent the PCI-E bus from sticking */
   4063 	if (sc->sc_flags & WM_F_PCIE) {
   4064 		int timeout = 800;
   4065 
   4066 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4067 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4068 
   4069 		while (timeout--) {
   4070 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4071 			    == 0)
   4072 				break;
   4073 			delay(100);
   4074 		}
   4075 		if (timeout == 0)
   4076 			device_printf(sc->sc_dev,
   4077 			    "failed to disable busmastering\n");
   4078 	}
   4079 
   4080 	/* Set the completion timeout for interface */
   4081 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4082 	    || (sc->sc_type == WM_T_82580)
   4083 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4084 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4085 		wm_set_pcie_completion_timeout(sc);
   4086 
   4087 	/* Clear interrupt */
   4088 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4089 	if (wm_is_using_msix(sc)) {
   4090 		if (sc->sc_type != WM_T_82574) {
   4091 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4092 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4093 		} else {
   4094 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4095 		}
   4096 	}
   4097 
   4098 	/* Stop the transmit and receive processes. */
   4099 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4100 	sc->sc_rctl &= ~RCTL_EN;
   4101 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4102 	CSR_WRITE_FLUSH(sc);
   4103 
   4104 	/* XXX set_tbi_sbp_82543() */
   4105 
   4106 	delay(10*1000);
   4107 
   4108 	/* Must acquire the MDIO ownership before MAC reset */
   4109 	switch (sc->sc_type) {
   4110 	case WM_T_82573:
   4111 	case WM_T_82574:
   4112 	case WM_T_82583:
   4113 		error = wm_get_hw_semaphore_82573(sc);
   4114 		break;
   4115 	default:
   4116 		break;
   4117 	}
   4118 
   4119 	/*
   4120 	 * 82541 Errata 29? & 82547 Errata 28?
   4121 	 * See also the description about PHY_RST bit in CTRL register
   4122 	 * in 8254x_GBe_SDM.pdf.
   4123 	 */
   4124 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4125 		CSR_WRITE(sc, WMREG_CTRL,
   4126 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4127 		CSR_WRITE_FLUSH(sc);
   4128 		delay(5000);
   4129 	}
   4130 
   4131 	switch (sc->sc_type) {
   4132 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4133 	case WM_T_82541:
   4134 	case WM_T_82541_2:
   4135 	case WM_T_82547:
   4136 	case WM_T_82547_2:
   4137 		/*
   4138 		 * On some chipsets, a reset through a memory-mapped write
   4139 		 * cycle can cause the chip to reset before completing the
   4140 		 * write cycle.  This causes major headache that can be
   4141 		 * avoided by issuing the reset via indirect register writes
   4142 		 * through I/O space.
   4143 		 *
   4144 		 * So, if we successfully mapped the I/O BAR at attach time,
   4145 		 * use that.  Otherwise, try our luck with a memory-mapped
   4146 		 * reset.
   4147 		 */
   4148 		if (sc->sc_flags & WM_F_IOH_VALID)
   4149 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4150 		else
   4151 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4152 		break;
   4153 	case WM_T_82545_3:
   4154 	case WM_T_82546_3:
   4155 		/* Use the shadow control register on these chips. */
   4156 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4157 		break;
   4158 	case WM_T_80003:
   4159 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4160 		sc->phy.acquire(sc);
   4161 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4162 		sc->phy.release(sc);
   4163 		break;
   4164 	case WM_T_ICH8:
   4165 	case WM_T_ICH9:
   4166 	case WM_T_ICH10:
   4167 	case WM_T_PCH:
   4168 	case WM_T_PCH2:
   4169 	case WM_T_PCH_LPT:
   4170 	case WM_T_PCH_SPT:
   4171 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4172 		if (wm_phy_resetisblocked(sc) == false) {
   4173 			/*
   4174 			 * Gate automatic PHY configuration by hardware on
   4175 			 * non-managed 82579
   4176 			 */
   4177 			if ((sc->sc_type == WM_T_PCH2)
   4178 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4179 				== 0))
   4180 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4181 
   4182 			reg |= CTRL_PHY_RESET;
   4183 			phy_reset = 1;
   4184 		} else
   4185 			printf("XXX reset is blocked!!!\n");
   4186 		sc->phy.acquire(sc);
   4187 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4188 		/* Don't insert a completion barrier when reset */
   4189 		delay(20*1000);
   4190 		mutex_exit(sc->sc_ich_phymtx);
   4191 		break;
   4192 	case WM_T_82580:
   4193 	case WM_T_I350:
   4194 	case WM_T_I354:
   4195 	case WM_T_I210:
   4196 	case WM_T_I211:
   4197 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4198 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4199 			CSR_WRITE_FLUSH(sc);
   4200 		delay(5000);
   4201 		break;
   4202 	case WM_T_82542_2_0:
   4203 	case WM_T_82542_2_1:
   4204 	case WM_T_82543:
   4205 	case WM_T_82540:
   4206 	case WM_T_82545:
   4207 	case WM_T_82546:
   4208 	case WM_T_82571:
   4209 	case WM_T_82572:
   4210 	case WM_T_82573:
   4211 	case WM_T_82574:
   4212 	case WM_T_82575:
   4213 	case WM_T_82576:
   4214 	case WM_T_82583:
   4215 	default:
   4216 		/* Everything else can safely use the documented method. */
   4217 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4218 		break;
   4219 	}
   4220 
   4221 	/* Must release the MDIO ownership after MAC reset */
   4222 	switch (sc->sc_type) {
   4223 	case WM_T_82573:
   4224 	case WM_T_82574:
   4225 	case WM_T_82583:
   4226 		if (error == 0)
   4227 			wm_put_hw_semaphore_82573(sc);
   4228 		break;
   4229 	default:
   4230 		break;
   4231 	}
   4232 
   4233 	if (phy_reset != 0)
   4234 		wm_get_cfg_done(sc);
   4235 
   4236 	/* reload EEPROM */
   4237 	switch (sc->sc_type) {
   4238 	case WM_T_82542_2_0:
   4239 	case WM_T_82542_2_1:
   4240 	case WM_T_82543:
   4241 	case WM_T_82544:
   4242 		delay(10);
   4243 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4244 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4245 		CSR_WRITE_FLUSH(sc);
   4246 		delay(2000);
   4247 		break;
   4248 	case WM_T_82540:
   4249 	case WM_T_82545:
   4250 	case WM_T_82545_3:
   4251 	case WM_T_82546:
   4252 	case WM_T_82546_3:
   4253 		delay(5*1000);
   4254 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4255 		break;
   4256 	case WM_T_82541:
   4257 	case WM_T_82541_2:
   4258 	case WM_T_82547:
   4259 	case WM_T_82547_2:
   4260 		delay(20000);
   4261 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4262 		break;
   4263 	case WM_T_82571:
   4264 	case WM_T_82572:
   4265 	case WM_T_82573:
   4266 	case WM_T_82574:
   4267 	case WM_T_82583:
   4268 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4269 			delay(10);
   4270 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4271 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4272 			CSR_WRITE_FLUSH(sc);
   4273 		}
   4274 		/* check EECD_EE_AUTORD */
   4275 		wm_get_auto_rd_done(sc);
   4276 		/*
   4277 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4278 		 * is set.
   4279 		 */
   4280 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4281 		    || (sc->sc_type == WM_T_82583))
   4282 			delay(25*1000);
   4283 		break;
   4284 	case WM_T_82575:
   4285 	case WM_T_82576:
   4286 	case WM_T_82580:
   4287 	case WM_T_I350:
   4288 	case WM_T_I354:
   4289 	case WM_T_I210:
   4290 	case WM_T_I211:
   4291 	case WM_T_80003:
   4292 		/* check EECD_EE_AUTORD */
   4293 		wm_get_auto_rd_done(sc);
   4294 		break;
   4295 	case WM_T_ICH8:
   4296 	case WM_T_ICH9:
   4297 	case WM_T_ICH10:
   4298 	case WM_T_PCH:
   4299 	case WM_T_PCH2:
   4300 	case WM_T_PCH_LPT:
   4301 	case WM_T_PCH_SPT:
   4302 		break;
   4303 	default:
   4304 		panic("%s: unknown type\n", __func__);
   4305 	}
   4306 
   4307 	/* Check whether EEPROM is present or not */
   4308 	switch (sc->sc_type) {
   4309 	case WM_T_82575:
   4310 	case WM_T_82576:
   4311 	case WM_T_82580:
   4312 	case WM_T_I350:
   4313 	case WM_T_I354:
   4314 	case WM_T_ICH8:
   4315 	case WM_T_ICH9:
   4316 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4317 			/* Not found */
   4318 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4319 			if (sc->sc_type == WM_T_82575)
   4320 				wm_reset_init_script_82575(sc);
   4321 		}
   4322 		break;
   4323 	default:
   4324 		break;
   4325 	}
   4326 
   4327 	if ((sc->sc_type == WM_T_82580)
   4328 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4329 		/* clear global device reset status bit */
   4330 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4331 	}
   4332 
   4333 	/* Clear any pending interrupt events. */
   4334 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4335 	reg = CSR_READ(sc, WMREG_ICR);
   4336 	if (wm_is_using_msix(sc)) {
   4337 		if (sc->sc_type != WM_T_82574) {
   4338 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4339 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4340 		} else
   4341 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4342 	}
   4343 
   4344 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4345 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4346 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4347 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   4348 		reg = CSR_READ(sc, WMREG_KABGTXD);
   4349 		reg |= KABGTXD_BGSQLBIAS;
   4350 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   4351 	}
   4352 
   4353 	/* reload sc_ctrl */
   4354 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4355 
   4356 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4357 		wm_set_eee_i350(sc);
   4358 
   4359 	/* Clear the host wakeup bit after lcd reset */
   4360 	if (sc->sc_type >= WM_T_PCH) {
   4361 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   4362 		    BM_PORT_GEN_CFG);
   4363 		reg &= ~BM_WUC_HOST_WU_BIT;
   4364 		wm_gmii_hv_writereg(sc->sc_dev, 2,
   4365 		    BM_PORT_GEN_CFG, reg);
   4366 	}
   4367 
   4368 	/*
   4369 	 * For PCH, this write will make sure that any noise will be detected
   4370 	 * as a CRC error and be dropped rather than show up as a bad packet
   4371 	 * to the DMA engine
   4372 	 */
   4373 	if (sc->sc_type == WM_T_PCH)
   4374 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4375 
   4376 	if (sc->sc_type >= WM_T_82544)
   4377 		CSR_WRITE(sc, WMREG_WUC, 0);
   4378 
   4379 	wm_reset_mdicnfg_82580(sc);
   4380 
   4381 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4382 		wm_pll_workaround_i210(sc);
   4383 }
   4384 
   4385 /*
   4386  * wm_add_rxbuf:
   4387  *
   4388  *	Add a receive buffer to the indiciated descriptor.
   4389  */
   4390 static int
   4391 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4392 {
   4393 	struct wm_softc *sc = rxq->rxq_sc;
   4394 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4395 	struct mbuf *m;
   4396 	int error;
   4397 
   4398 	KASSERT(mutex_owned(rxq->rxq_lock));
   4399 
   4400 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4401 	if (m == NULL)
   4402 		return ENOBUFS;
   4403 
   4404 	MCLGET(m, M_DONTWAIT);
   4405 	if ((m->m_flags & M_EXT) == 0) {
   4406 		m_freem(m);
   4407 		return ENOBUFS;
   4408 	}
   4409 
   4410 	if (rxs->rxs_mbuf != NULL)
   4411 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4412 
   4413 	rxs->rxs_mbuf = m;
   4414 
   4415 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4416 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4417 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4418 	if (error) {
   4419 		/* XXX XXX XXX */
   4420 		aprint_error_dev(sc->sc_dev,
   4421 		    "unable to load rx DMA map %d, error = %d\n",
   4422 		    idx, error);
   4423 		panic("wm_add_rxbuf");
   4424 	}
   4425 
   4426 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4427 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4428 
   4429 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4430 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4431 			wm_init_rxdesc(rxq, idx);
   4432 	} else
   4433 		wm_init_rxdesc(rxq, idx);
   4434 
   4435 	return 0;
   4436 }
   4437 
   4438 /*
   4439  * wm_rxdrain:
   4440  *
   4441  *	Drain the receive queue.
   4442  */
   4443 static void
   4444 wm_rxdrain(struct wm_rxqueue *rxq)
   4445 {
   4446 	struct wm_softc *sc = rxq->rxq_sc;
   4447 	struct wm_rxsoft *rxs;
   4448 	int i;
   4449 
   4450 	KASSERT(mutex_owned(rxq->rxq_lock));
   4451 
   4452 	for (i = 0; i < WM_NRXDESC; i++) {
   4453 		rxs = &rxq->rxq_soft[i];
   4454 		if (rxs->rxs_mbuf != NULL) {
   4455 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4456 			m_freem(rxs->rxs_mbuf);
   4457 			rxs->rxs_mbuf = NULL;
   4458 		}
   4459 	}
   4460 }
   4461 
   4462 
   4463 /*
   4464  * XXX copy from FreeBSD's sys/net/rss_config.c
   4465  */
   4466 /*
   4467  * RSS secret key, intended to prevent attacks on load-balancing.  Its
   4468  * effectiveness may be limited by algorithm choice and available entropy
   4469  * during the boot.
   4470  *
   4471  * XXXRW: And that we don't randomize it yet!
   4472  *
   4473  * This is the default Microsoft RSS specification key which is also
   4474  * the Chelsio T5 firmware default key.
   4475  */
   4476 #define RSS_KEYSIZE 40
   4477 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
   4478 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
   4479 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
   4480 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
   4481 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
   4482 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
   4483 };
   4484 
   4485 /*
   4486  * Caller must pass an array of size sizeof(rss_key).
   4487  *
   4488  * XXX
   4489  * As if_ixgbe may use this function, this function should not be
   4490  * if_wm specific function.
   4491  */
   4492 static void
   4493 wm_rss_getkey(uint8_t *key)
   4494 {
   4495 
   4496 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
   4497 }
   4498 
   4499 /*
   4500  * Setup registers for RSS.
   4501  *
   4502  * XXX not yet VMDq support
   4503  */
   4504 static void
   4505 wm_init_rss(struct wm_softc *sc)
   4506 {
   4507 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4508 	int i;
   4509 
   4510 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
   4511 
   4512 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4513 		int qid, reta_ent;
   4514 
   4515 		qid  = i % sc->sc_nqueues;
   4516 		switch(sc->sc_type) {
   4517 		case WM_T_82574:
   4518 			reta_ent = __SHIFTIN(qid,
   4519 			    RETA_ENT_QINDEX_MASK_82574);
   4520 			break;
   4521 		case WM_T_82575:
   4522 			reta_ent = __SHIFTIN(qid,
   4523 			    RETA_ENT_QINDEX1_MASK_82575);
   4524 			break;
   4525 		default:
   4526 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4527 			break;
   4528 		}
   4529 
   4530 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4531 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4532 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4533 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4534 	}
   4535 
   4536 	wm_rss_getkey((uint8_t *)rss_key);
   4537 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4538 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4539 
   4540 	if (sc->sc_type == WM_T_82574)
   4541 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4542 	else
   4543 		mrqc = MRQC_ENABLE_RSS_MQ;
   4544 
   4545 	/*
   4546 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   4547 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   4548 	 */
   4549 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4550 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4551 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4552 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4553 
   4554 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4555 }
   4556 
   4557 /*
   4558  * Adjust TX and RX queue numbers which the system actulally uses.
   4559  *
   4560  * The numbers are affected by below parameters.
   4561  *     - The nubmer of hardware queues
   4562  *     - The number of MSI-X vectors (= "nvectors" argument)
   4563  *     - ncpu
   4564  */
   4565 static void
   4566 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4567 {
   4568 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4569 
   4570 	if (nvectors < 2) {
   4571 		sc->sc_nqueues = 1;
   4572 		return;
   4573 	}
   4574 
   4575 	switch(sc->sc_type) {
   4576 	case WM_T_82572:
   4577 		hw_ntxqueues = 2;
   4578 		hw_nrxqueues = 2;
   4579 		break;
   4580 	case WM_T_82574:
   4581 		hw_ntxqueues = 2;
   4582 		hw_nrxqueues = 2;
   4583 		break;
   4584 	case WM_T_82575:
   4585 		hw_ntxqueues = 4;
   4586 		hw_nrxqueues = 4;
   4587 		break;
   4588 	case WM_T_82576:
   4589 		hw_ntxqueues = 16;
   4590 		hw_nrxqueues = 16;
   4591 		break;
   4592 	case WM_T_82580:
   4593 	case WM_T_I350:
   4594 	case WM_T_I354:
   4595 		hw_ntxqueues = 8;
   4596 		hw_nrxqueues = 8;
   4597 		break;
   4598 	case WM_T_I210:
   4599 		hw_ntxqueues = 4;
   4600 		hw_nrxqueues = 4;
   4601 		break;
   4602 	case WM_T_I211:
   4603 		hw_ntxqueues = 2;
   4604 		hw_nrxqueues = 2;
   4605 		break;
   4606 		/*
   4607 		 * As below ethernet controllers does not support MSI-X,
   4608 		 * this driver let them not use multiqueue.
   4609 		 *     - WM_T_80003
   4610 		 *     - WM_T_ICH8
   4611 		 *     - WM_T_ICH9
   4612 		 *     - WM_T_ICH10
   4613 		 *     - WM_T_PCH
   4614 		 *     - WM_T_PCH2
   4615 		 *     - WM_T_PCH_LPT
   4616 		 */
   4617 	default:
   4618 		hw_ntxqueues = 1;
   4619 		hw_nrxqueues = 1;
   4620 		break;
   4621 	}
   4622 
   4623 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
   4624 
   4625 	/*
   4626 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   4627 	 * the number of queues used actually.
   4628 	 */
   4629 	if (nvectors < hw_nqueues + 1) {
   4630 		sc->sc_nqueues = nvectors - 1;
   4631 	} else {
   4632 		sc->sc_nqueues = hw_nqueues;
   4633 	}
   4634 
   4635 	/*
   4636 	 * As queues more then cpus cannot improve scaling, we limit
   4637 	 * the number of queues used actually.
   4638 	 */
   4639 	if (ncpu < sc->sc_nqueues)
   4640 		sc->sc_nqueues = ncpu;
   4641 }
   4642 
   4643 static inline bool
   4644 wm_is_using_msix(struct wm_softc *sc)
   4645 {
   4646 
   4647 	return (sc->sc_nintrs > 1);
   4648 }
   4649 
   4650 static inline bool
   4651 wm_is_using_multiqueue(struct wm_softc *sc)
   4652 {
   4653 
   4654 	return (sc->sc_nqueues > 1);
   4655 }
   4656 
   4657 static int
   4658 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   4659 {
   4660 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   4661 	wmq->wmq_id = qidx;
   4662 	wmq->wmq_intr_idx = intr_idx;
   4663 	wmq->wmq_si = softint_establish(SOFTINT_NET
   4664 #ifdef WM_MPSAFE
   4665 	    | SOFTINT_MPSAFE
   4666 #endif
   4667 	    , wm_handle_queue, wmq);
   4668 	if (wmq->wmq_si != NULL)
   4669 		return 0;
   4670 
   4671 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   4672 	    wmq->wmq_id);
   4673 
   4674 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   4675 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4676 	return ENOMEM;
   4677 }
   4678 
   4679 /*
   4680  * Both single interrupt MSI and INTx can use this function.
   4681  */
   4682 static int
   4683 wm_setup_legacy(struct wm_softc *sc)
   4684 {
   4685 	pci_chipset_tag_t pc = sc->sc_pc;
   4686 	const char *intrstr = NULL;
   4687 	char intrbuf[PCI_INTRSTR_LEN];
   4688 	int error;
   4689 
   4690 	error = wm_alloc_txrx_queues(sc);
   4691 	if (error) {
   4692 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4693 		    error);
   4694 		return ENOMEM;
   4695 	}
   4696 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   4697 	    sizeof(intrbuf));
   4698 #ifdef WM_MPSAFE
   4699 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   4700 #endif
   4701 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   4702 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   4703 	if (sc->sc_ihs[0] == NULL) {
   4704 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   4705 		    (pci_intr_type(pc, sc->sc_intrs[0])
   4706 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   4707 		return ENOMEM;
   4708 	}
   4709 
   4710 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   4711 	sc->sc_nintrs = 1;
   4712 
   4713 	return wm_softint_establish(sc, 0, 0);
   4714 }
   4715 
   4716 static int
   4717 wm_setup_msix(struct wm_softc *sc)
   4718 {
   4719 	void *vih;
   4720 	kcpuset_t *affinity;
   4721 	int qidx, error, intr_idx, txrx_established;
   4722 	pci_chipset_tag_t pc = sc->sc_pc;
   4723 	const char *intrstr = NULL;
   4724 	char intrbuf[PCI_INTRSTR_LEN];
   4725 	char intr_xname[INTRDEVNAMEBUF];
   4726 
   4727 	if (sc->sc_nqueues < ncpu) {
   4728 		/*
   4729 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   4730 		 * interrupts start from CPU#1.
   4731 		 */
   4732 		sc->sc_affinity_offset = 1;
   4733 	} else {
   4734 		/*
   4735 		 * In this case, this device use all CPUs. So, we unify
   4736 		 * affinitied cpu_index to msix vector number for readability.
   4737 		 */
   4738 		sc->sc_affinity_offset = 0;
   4739 	}
   4740 
   4741 	error = wm_alloc_txrx_queues(sc);
   4742 	if (error) {
   4743 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4744 		    error);
   4745 		return ENOMEM;
   4746 	}
   4747 
   4748 	kcpuset_create(&affinity, false);
   4749 	intr_idx = 0;
   4750 
   4751 	/*
   4752 	 * TX and RX
   4753 	 */
   4754 	txrx_established = 0;
   4755 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   4756 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4757 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   4758 
   4759 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4760 		    sizeof(intrbuf));
   4761 #ifdef WM_MPSAFE
   4762 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   4763 		    PCI_INTR_MPSAFE, true);
   4764 #endif
   4765 		memset(intr_xname, 0, sizeof(intr_xname));
   4766 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   4767 		    device_xname(sc->sc_dev), qidx);
   4768 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4769 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   4770 		if (vih == NULL) {
   4771 			aprint_error_dev(sc->sc_dev,
   4772 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   4773 			    intrstr ? " at " : "",
   4774 			    intrstr ? intrstr : "");
   4775 
   4776 			goto fail;
   4777 		}
   4778 		kcpuset_zero(affinity);
   4779 		/* Round-robin affinity */
   4780 		kcpuset_set(affinity, affinity_to);
   4781 		error = interrupt_distribute(vih, affinity, NULL);
   4782 		if (error == 0) {
   4783 			aprint_normal_dev(sc->sc_dev,
   4784 			    "for TX and RX interrupting at %s affinity to %u\n",
   4785 			    intrstr, affinity_to);
   4786 		} else {
   4787 			aprint_normal_dev(sc->sc_dev,
   4788 			    "for TX and RX interrupting at %s\n", intrstr);
   4789 		}
   4790 		sc->sc_ihs[intr_idx] = vih;
   4791 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   4792 			goto fail;
   4793 		txrx_established++;
   4794 		intr_idx++;
   4795 	}
   4796 
   4797 	/*
   4798 	 * LINK
   4799 	 */
   4800 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4801 	    sizeof(intrbuf));
   4802 #ifdef WM_MPSAFE
   4803 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   4804 #endif
   4805 	memset(intr_xname, 0, sizeof(intr_xname));
   4806 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   4807 	    device_xname(sc->sc_dev));
   4808 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4809 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   4810 	if (vih == NULL) {
   4811 		aprint_error_dev(sc->sc_dev,
   4812 		    "unable to establish MSI-X(for LINK)%s%s\n",
   4813 		    intrstr ? " at " : "",
   4814 		    intrstr ? intrstr : "");
   4815 
   4816 		goto fail;
   4817 	}
   4818 	/* keep default affinity to LINK interrupt */
   4819 	aprint_normal_dev(sc->sc_dev,
   4820 	    "for LINK interrupting at %s\n", intrstr);
   4821 	sc->sc_ihs[intr_idx] = vih;
   4822 	sc->sc_link_intr_idx = intr_idx;
   4823 
   4824 	sc->sc_nintrs = sc->sc_nqueues + 1;
   4825 	kcpuset_destroy(affinity);
   4826 	return 0;
   4827 
   4828  fail:
   4829 	for (qidx = 0; qidx < txrx_established; qidx++) {
   4830 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4831 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   4832 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4833 	}
   4834 
   4835 	kcpuset_destroy(affinity);
   4836 	return ENOMEM;
   4837 }
   4838 
   4839 static void
   4840 wm_turnon(struct wm_softc *sc)
   4841 {
   4842 	int i;
   4843 
   4844 	KASSERT(WM_CORE_LOCKED(sc));
   4845 
   4846 	/*
   4847 	 * must unset stopping flags in ascending order.
   4848 	 */
   4849 	for(i = 0; i < sc->sc_nqueues; i++) {
   4850 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4851 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   4852 
   4853 		mutex_enter(txq->txq_lock);
   4854 		txq->txq_stopping = false;
   4855 		mutex_exit(txq->txq_lock);
   4856 
   4857 		mutex_enter(rxq->rxq_lock);
   4858 		rxq->rxq_stopping = false;
   4859 		mutex_exit(rxq->rxq_lock);
   4860 	}
   4861 
   4862 	sc->sc_core_stopping = false;
   4863 }
   4864 
   4865 static void
   4866 wm_turnoff(struct wm_softc *sc)
   4867 {
   4868 	int i;
   4869 
   4870 	KASSERT(WM_CORE_LOCKED(sc));
   4871 
   4872 	sc->sc_core_stopping = true;
   4873 
   4874 	/*
   4875 	 * must set stopping flags in ascending order.
   4876 	 */
   4877 	for(i = 0; i < sc->sc_nqueues; i++) {
   4878 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   4879 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4880 
   4881 		mutex_enter(rxq->rxq_lock);
   4882 		rxq->rxq_stopping = true;
   4883 		mutex_exit(rxq->rxq_lock);
   4884 
   4885 		mutex_enter(txq->txq_lock);
   4886 		txq->txq_stopping = true;
   4887 		mutex_exit(txq->txq_lock);
   4888 	}
   4889 }
   4890 
   4891 /*
   4892  * write interrupt interval value to ITR or EITR
   4893  */
   4894 static void
   4895 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   4896 {
   4897 
   4898 	if (!wmq->wmq_set_itr)
   4899 		return;
   4900 
   4901 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4902 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   4903 
   4904 		/*
   4905 		 * 82575 doesn't have CNT_INGR field.
   4906 		 * So, overwrite counter field by software.
   4907 		 */
   4908 		if (sc->sc_type == WM_T_82575)
   4909 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   4910 		else
   4911 			eitr |= EITR_CNT_INGR;
   4912 
   4913 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   4914 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   4915 		/*
   4916 		 * 82574 has both ITR and EITR. SET EITR when we use
   4917 		 * the multi queue function with MSI-X.
   4918 		 */
   4919 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   4920 			    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   4921 	} else {
   4922 		KASSERT(wmq->wmq_id == 0);
   4923 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   4924 	}
   4925 
   4926 	wmq->wmq_set_itr = false;
   4927 }
   4928 
   4929 /*
   4930  * TODO
   4931  * Below dynamic calculation of itr is almost the same as linux igb,
   4932  * however it does not fit to wm(4). So, we will have been disable AIM
   4933  * until we will find appropriate calculation of itr.
   4934  */
   4935 /*
   4936  * calculate interrupt interval value to be going to write register in
   4937  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   4938  */
   4939 static void
   4940 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   4941 {
   4942 #ifdef NOTYET
   4943 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   4944 	struct wm_txqueue *txq = &wmq->wmq_txq;
   4945 	uint32_t avg_size = 0;
   4946 	uint32_t new_itr;
   4947 
   4948 	if (rxq->rxq_packets)
   4949 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   4950 	if (txq->txq_packets)
   4951 		avg_size = max(avg_size, txq->txq_bytes / txq->txq_packets);
   4952 
   4953 	if (avg_size == 0) {
   4954 		new_itr = 450; /* restore default value */
   4955 		goto out;
   4956 	}
   4957 
   4958 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   4959 	avg_size += 24;
   4960 
   4961 	/* Don't starve jumbo frames */
   4962 	avg_size = min(avg_size, 3000);
   4963 
   4964 	/* Give a little boost to mid-size frames */
   4965 	if ((avg_size > 300) && (avg_size < 1200))
   4966 		new_itr = avg_size / 3;
   4967 	else
   4968 		new_itr = avg_size / 2;
   4969 
   4970 out:
   4971 	/*
   4972 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   4973 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   4974 	 */
   4975 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   4976 		new_itr *= 4;
   4977 
   4978 	if (new_itr != wmq->wmq_itr) {
   4979 		wmq->wmq_itr = new_itr;
   4980 		wmq->wmq_set_itr = true;
   4981 	} else
   4982 		wmq->wmq_set_itr = false;
   4983 
   4984 	rxq->rxq_packets = 0;
   4985 	rxq->rxq_bytes = 0;
   4986 	txq->txq_packets = 0;
   4987 	txq->txq_bytes = 0;
   4988 #endif
   4989 }
   4990 
   4991 /*
   4992  * wm_init:		[ifnet interface function]
   4993  *
   4994  *	Initialize the interface.
   4995  */
   4996 static int
   4997 wm_init(struct ifnet *ifp)
   4998 {
   4999 	struct wm_softc *sc = ifp->if_softc;
   5000 	int ret;
   5001 
   5002 	WM_CORE_LOCK(sc);
   5003 	ret = wm_init_locked(ifp);
   5004 	WM_CORE_UNLOCK(sc);
   5005 
   5006 	return ret;
   5007 }
   5008 
   5009 static int
   5010 wm_init_locked(struct ifnet *ifp)
   5011 {
   5012 	struct wm_softc *sc = ifp->if_softc;
   5013 	int i, j, trynum, error = 0;
   5014 	uint32_t reg;
   5015 
   5016 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5017 		device_xname(sc->sc_dev), __func__));
   5018 	KASSERT(WM_CORE_LOCKED(sc));
   5019 
   5020 	/*
   5021 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5022 	 * There is a small but measurable benefit to avoiding the adjusment
   5023 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5024 	 * on such platforms.  One possibility is that the DMA itself is
   5025 	 * slightly more efficient if the front of the entire packet (instead
   5026 	 * of the front of the headers) is aligned.
   5027 	 *
   5028 	 * Note we must always set align_tweak to 0 if we are using
   5029 	 * jumbo frames.
   5030 	 */
   5031 #ifdef __NO_STRICT_ALIGNMENT
   5032 	sc->sc_align_tweak = 0;
   5033 #else
   5034 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5035 		sc->sc_align_tweak = 0;
   5036 	else
   5037 		sc->sc_align_tweak = 2;
   5038 #endif /* __NO_STRICT_ALIGNMENT */
   5039 
   5040 	/* Cancel any pending I/O. */
   5041 	wm_stop_locked(ifp, 0);
   5042 
   5043 	/* update statistics before reset */
   5044 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   5045 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   5046 
   5047 	/* PCH_SPT hardware workaround */
   5048 	if (sc->sc_type == WM_T_PCH_SPT)
   5049 		wm_flush_desc_rings(sc);
   5050 
   5051 	/* Reset the chip to a known state. */
   5052 	wm_reset(sc);
   5053 
   5054 	/* AMT based hardware can now take control from firmware */
   5055 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5056 		wm_get_hw_control(sc);
   5057 
   5058 	/* Init hardware bits */
   5059 	wm_initialize_hardware_bits(sc);
   5060 
   5061 	/* Reset the PHY. */
   5062 	if (sc->sc_flags & WM_F_HAS_MII)
   5063 		wm_gmii_reset(sc);
   5064 
   5065 	/* Calculate (E)ITR value */
   5066 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5067 		/*
   5068 		 * For NEWQUEUE's EITR (except for 82575).
   5069 		 * 82575's EITR should be set same throttling value as other
   5070 		 * old controllers' ITR because the interrupt/sec calculation
   5071 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5072 		 *
   5073 		 * 82574's EITR should be set same throttling value as ITR.
   5074 		 *
   5075 		 * For N interrupts/sec, set this value to:
   5076 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5077 		 */
   5078 		sc->sc_itr_init = 450;
   5079 	} else if (sc->sc_type >= WM_T_82543) {
   5080 		/*
   5081 		 * Set up the interrupt throttling register (units of 256ns)
   5082 		 * Note that a footnote in Intel's documentation says this
   5083 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5084 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5085 		 * that that is also true for the 1024ns units of the other
   5086 		 * interrupt-related timer registers -- so, really, we ought
   5087 		 * to divide this value by 4 when the link speed is low.
   5088 		 *
   5089 		 * XXX implement this division at link speed change!
   5090 		 */
   5091 
   5092 		/*
   5093 		 * For N interrupts/sec, set this value to:
   5094 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5095 		 * absolute and packet timer values to this value
   5096 		 * divided by 4 to get "simple timer" behavior.
   5097 		 */
   5098 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5099 	}
   5100 
   5101 	error = wm_init_txrx_queues(sc);
   5102 	if (error)
   5103 		goto out;
   5104 
   5105 	/*
   5106 	 * Clear out the VLAN table -- we don't use it (yet).
   5107 	 */
   5108 	CSR_WRITE(sc, WMREG_VET, 0);
   5109 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5110 		trynum = 10; /* Due to hw errata */
   5111 	else
   5112 		trynum = 1;
   5113 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5114 		for (j = 0; j < trynum; j++)
   5115 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5116 
   5117 	/*
   5118 	 * Set up flow-control parameters.
   5119 	 *
   5120 	 * XXX Values could probably stand some tuning.
   5121 	 */
   5122 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5123 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5124 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5125 	    && (sc->sc_type != WM_T_PCH_SPT)) {
   5126 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5127 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5128 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5129 	}
   5130 
   5131 	sc->sc_fcrtl = FCRTL_DFLT;
   5132 	if (sc->sc_type < WM_T_82543) {
   5133 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5134 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5135 	} else {
   5136 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5137 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5138 	}
   5139 
   5140 	if (sc->sc_type == WM_T_80003)
   5141 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5142 	else
   5143 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5144 
   5145 	/* Writes the control register. */
   5146 	wm_set_vlan(sc);
   5147 
   5148 	if (sc->sc_flags & WM_F_HAS_MII) {
   5149 		int val;
   5150 
   5151 		switch (sc->sc_type) {
   5152 		case WM_T_80003:
   5153 		case WM_T_ICH8:
   5154 		case WM_T_ICH9:
   5155 		case WM_T_ICH10:
   5156 		case WM_T_PCH:
   5157 		case WM_T_PCH2:
   5158 		case WM_T_PCH_LPT:
   5159 		case WM_T_PCH_SPT:
   5160 			/*
   5161 			 * Set the mac to wait the maximum time between each
   5162 			 * iteration and increase the max iterations when
   5163 			 * polling the phy; this fixes erroneous timeouts at
   5164 			 * 10Mbps.
   5165 			 */
   5166 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5167 			    0xFFFF);
   5168 			val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
   5169 			val |= 0x3F;
   5170 			wm_kmrn_writereg(sc,
   5171 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
   5172 			break;
   5173 		default:
   5174 			break;
   5175 		}
   5176 
   5177 		if (sc->sc_type == WM_T_80003) {
   5178 			val = CSR_READ(sc, WMREG_CTRL_EXT);
   5179 			val &= ~CTRL_EXT_LINK_MODE_MASK;
   5180 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   5181 
   5182 			/* Bypass RX and TX FIFO's */
   5183 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5184 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5185 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5186 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5187 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5188 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5189 		}
   5190 	}
   5191 #if 0
   5192 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5193 #endif
   5194 
   5195 	/* Set up checksum offload parameters. */
   5196 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5197 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5198 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5199 		reg |= RXCSUM_IPOFL;
   5200 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5201 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5202 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5203 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5204 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5205 
   5206 	/* Set registers about MSI-X */
   5207 	if (wm_is_using_msix(sc)) {
   5208 		uint32_t ivar;
   5209 		struct wm_queue *wmq;
   5210 		int qid, qintr_idx;
   5211 
   5212 		if (sc->sc_type == WM_T_82575) {
   5213 			/* Interrupt control */
   5214 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5215 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5216 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5217 
   5218 			/* TX and RX */
   5219 			for (i = 0; i < sc->sc_nqueues; i++) {
   5220 				wmq = &sc->sc_queue[i];
   5221 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5222 				    EITR_TX_QUEUE(wmq->wmq_id)
   5223 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5224 			}
   5225 			/* Link status */
   5226 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5227 			    EITR_OTHER);
   5228 		} else if (sc->sc_type == WM_T_82574) {
   5229 			/* Interrupt control */
   5230 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5231 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5232 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5233 
   5234 			/*
   5235 			 * workaround issue with spurious interrupts
   5236 			 * in MSI-X mode.
   5237 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5238 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5239 			 */
   5240 			reg = CSR_READ(sc, WMREG_RFCTL);
   5241 			reg |= WMREG_RFCTL_ACKDIS;
   5242 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5243 
   5244 			ivar = 0;
   5245 			/* TX and RX */
   5246 			for (i = 0; i < sc->sc_nqueues; i++) {
   5247 				wmq = &sc->sc_queue[i];
   5248 				qid = wmq->wmq_id;
   5249 				qintr_idx = wmq->wmq_intr_idx;
   5250 
   5251 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5252 				    IVAR_TX_MASK_Q_82574(qid));
   5253 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5254 				    IVAR_RX_MASK_Q_82574(qid));
   5255 			}
   5256 			/* Link status */
   5257 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5258 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5259 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5260 		} else {
   5261 			/* Interrupt control */
   5262 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5263 			    | GPIE_EIAME | GPIE_PBA);
   5264 
   5265 			switch (sc->sc_type) {
   5266 			case WM_T_82580:
   5267 			case WM_T_I350:
   5268 			case WM_T_I354:
   5269 			case WM_T_I210:
   5270 			case WM_T_I211:
   5271 				/* TX and RX */
   5272 				for (i = 0; i < sc->sc_nqueues; i++) {
   5273 					wmq = &sc->sc_queue[i];
   5274 					qid = wmq->wmq_id;
   5275 					qintr_idx = wmq->wmq_intr_idx;
   5276 
   5277 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5278 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5279 					ivar |= __SHIFTIN((qintr_idx
   5280 						| IVAR_VALID),
   5281 					    IVAR_TX_MASK_Q(qid));
   5282 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5283 					ivar |= __SHIFTIN((qintr_idx
   5284 						| IVAR_VALID),
   5285 					    IVAR_RX_MASK_Q(qid));
   5286 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5287 				}
   5288 				break;
   5289 			case WM_T_82576:
   5290 				/* TX and RX */
   5291 				for (i = 0; i < sc->sc_nqueues; i++) {
   5292 					wmq = &sc->sc_queue[i];
   5293 					qid = wmq->wmq_id;
   5294 					qintr_idx = wmq->wmq_intr_idx;
   5295 
   5296 					ivar = CSR_READ(sc,
   5297 					    WMREG_IVAR_Q_82576(qid));
   5298 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   5299 					ivar |= __SHIFTIN((qintr_idx
   5300 						| IVAR_VALID),
   5301 					    IVAR_TX_MASK_Q_82576(qid));
   5302 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   5303 					ivar |= __SHIFTIN((qintr_idx
   5304 						| IVAR_VALID),
   5305 					    IVAR_RX_MASK_Q_82576(qid));
   5306 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   5307 					    ivar);
   5308 				}
   5309 				break;
   5310 			default:
   5311 				break;
   5312 			}
   5313 
   5314 			/* Link status */
   5315 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   5316 			    IVAR_MISC_OTHER);
   5317 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   5318 		}
   5319 
   5320 		if (wm_is_using_multiqueue(sc)) {
   5321 			wm_init_rss(sc);
   5322 
   5323 			/*
   5324 			** NOTE: Receive Full-Packet Checksum Offload
   5325 			** is mutually exclusive with Multiqueue. However
   5326 			** this is not the same as TCP/IP checksums which
   5327 			** still work.
   5328 			*/
   5329 			reg = CSR_READ(sc, WMREG_RXCSUM);
   5330 			reg |= RXCSUM_PCSD;
   5331 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5332 		}
   5333 	}
   5334 
   5335 	/* Set up the interrupt registers. */
   5336 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5337 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   5338 	    ICR_RXO | ICR_RXT0;
   5339 	if (wm_is_using_msix(sc)) {
   5340 		uint32_t mask;
   5341 		struct wm_queue *wmq;
   5342 
   5343 		switch (sc->sc_type) {
   5344 		case WM_T_82574:
   5345 			mask = 0;
   5346 			for (i = 0; i < sc->sc_nqueues; i++) {
   5347 				wmq = &sc->sc_queue[i];
   5348 				mask |= ICR_TXQ(wmq->wmq_id);
   5349 				mask |= ICR_RXQ(wmq->wmq_id);
   5350 			}
   5351 			mask |= ICR_OTHER;
   5352 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   5353 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   5354 			break;
   5355 		default:
   5356 			if (sc->sc_type == WM_T_82575) {
   5357 				mask = 0;
   5358 				for (i = 0; i < sc->sc_nqueues; i++) {
   5359 					wmq = &sc->sc_queue[i];
   5360 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   5361 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   5362 				}
   5363 				mask |= EITR_OTHER;
   5364 			} else {
   5365 				mask = 0;
   5366 				for (i = 0; i < sc->sc_nqueues; i++) {
   5367 					wmq = &sc->sc_queue[i];
   5368 					mask |= 1 << wmq->wmq_intr_idx;
   5369 				}
   5370 				mask |= 1 << sc->sc_link_intr_idx;
   5371 			}
   5372 			CSR_WRITE(sc, WMREG_EIAC, mask);
   5373 			CSR_WRITE(sc, WMREG_EIAM, mask);
   5374 			CSR_WRITE(sc, WMREG_EIMS, mask);
   5375 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   5376 			break;
   5377 		}
   5378 	} else
   5379 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5380 
   5381 	/* Set up the inter-packet gap. */
   5382 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   5383 
   5384 	if (sc->sc_type >= WM_T_82543) {
   5385 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5386 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   5387 			wm_itrs_writereg(sc, wmq);
   5388 		}
   5389 		/*
   5390 		 * Link interrupts occur much less than TX
   5391 		 * interrupts and RX interrupts. So, we don't
   5392 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   5393 		 * FreeBSD's if_igb.
   5394 		 */
   5395 	}
   5396 
   5397 	/* Set the VLAN ethernetype. */
   5398 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   5399 
   5400 	/*
   5401 	 * Set up the transmit control register; we start out with
   5402 	 * a collision distance suitable for FDX, but update it whe
   5403 	 * we resolve the media type.
   5404 	 */
   5405 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   5406 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   5407 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5408 	if (sc->sc_type >= WM_T_82571)
   5409 		sc->sc_tctl |= TCTL_MULR;
   5410 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5411 
   5412 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5413 		/* Write TDT after TCTL.EN is set. See the document. */
   5414 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   5415 	}
   5416 
   5417 	if (sc->sc_type == WM_T_80003) {
   5418 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   5419 		reg &= ~TCTL_EXT_GCEX_MASK;
   5420 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   5421 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   5422 	}
   5423 
   5424 	/* Set the media. */
   5425 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   5426 		goto out;
   5427 
   5428 	/* Configure for OS presence */
   5429 	wm_init_manageability(sc);
   5430 
   5431 	/*
   5432 	 * Set up the receive control register; we actually program
   5433 	 * the register when we set the receive filter.  Use multicast
   5434 	 * address offset type 0.
   5435 	 *
   5436 	 * Only the i82544 has the ability to strip the incoming
   5437 	 * CRC, so we don't enable that feature.
   5438 	 */
   5439 	sc->sc_mchash_type = 0;
   5440 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   5441 	    | RCTL_MO(sc->sc_mchash_type);
   5442 
   5443 	/*
   5444 	 * 82574 use one buffer extended Rx descriptor.
   5445 	 */
   5446 	if (sc->sc_type == WM_T_82574)
   5447 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   5448 
   5449 	/*
   5450 	 * The I350 has a bug where it always strips the CRC whether
   5451 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   5452 	 */
   5453 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5454 	    || (sc->sc_type == WM_T_I210))
   5455 		sc->sc_rctl |= RCTL_SECRC;
   5456 
   5457 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5458 	    && (ifp->if_mtu > ETHERMTU)) {
   5459 		sc->sc_rctl |= RCTL_LPE;
   5460 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5461 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5462 	}
   5463 
   5464 	if (MCLBYTES == 2048) {
   5465 		sc->sc_rctl |= RCTL_2k;
   5466 	} else {
   5467 		if (sc->sc_type >= WM_T_82543) {
   5468 			switch (MCLBYTES) {
   5469 			case 4096:
   5470 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5471 				break;
   5472 			case 8192:
   5473 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5474 				break;
   5475 			case 16384:
   5476 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5477 				break;
   5478 			default:
   5479 				panic("wm_init: MCLBYTES %d unsupported",
   5480 				    MCLBYTES);
   5481 				break;
   5482 			}
   5483 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   5484 	}
   5485 
   5486 	/* Set the receive filter. */
   5487 	wm_set_filter(sc);
   5488 
   5489 	/* Enable ECC */
   5490 	switch (sc->sc_type) {
   5491 	case WM_T_82571:
   5492 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5493 		reg |= PBA_ECC_CORR_EN;
   5494 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5495 		break;
   5496 	case WM_T_PCH_LPT:
   5497 	case WM_T_PCH_SPT:
   5498 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5499 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5500 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5501 
   5502 		sc->sc_ctrl |= CTRL_MEHE;
   5503 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5504 		break;
   5505 	default:
   5506 		break;
   5507 	}
   5508 
   5509 	/* On 575 and later set RDT only if RX enabled */
   5510 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5511 		int qidx;
   5512 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5513 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5514 			for (i = 0; i < WM_NRXDESC; i++) {
   5515 				mutex_enter(rxq->rxq_lock);
   5516 				wm_init_rxdesc(rxq, i);
   5517 				mutex_exit(rxq->rxq_lock);
   5518 
   5519 			}
   5520 		}
   5521 	}
   5522 
   5523 	wm_turnon(sc);
   5524 
   5525 	/* Start the one second link check clock. */
   5526 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5527 
   5528 	/* ...all done! */
   5529 	ifp->if_flags |= IFF_RUNNING;
   5530 	ifp->if_flags &= ~IFF_OACTIVE;
   5531 
   5532  out:
   5533 	sc->sc_if_flags = ifp->if_flags;
   5534 	if (error)
   5535 		log(LOG_ERR, "%s: interface not running\n",
   5536 		    device_xname(sc->sc_dev));
   5537 	return error;
   5538 }
   5539 
   5540 /*
   5541  * wm_stop:		[ifnet interface function]
   5542  *
   5543  *	Stop transmission on the interface.
   5544  */
   5545 static void
   5546 wm_stop(struct ifnet *ifp, int disable)
   5547 {
   5548 	struct wm_softc *sc = ifp->if_softc;
   5549 
   5550 	WM_CORE_LOCK(sc);
   5551 	wm_stop_locked(ifp, disable);
   5552 	WM_CORE_UNLOCK(sc);
   5553 }
   5554 
   5555 static void
   5556 wm_stop_locked(struct ifnet *ifp, int disable)
   5557 {
   5558 	struct wm_softc *sc = ifp->if_softc;
   5559 	struct wm_txsoft *txs;
   5560 	int i, qidx;
   5561 
   5562 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5563 		device_xname(sc->sc_dev), __func__));
   5564 	KASSERT(WM_CORE_LOCKED(sc));
   5565 
   5566 	wm_turnoff(sc);
   5567 
   5568 	/* Stop the one second clock. */
   5569 	callout_stop(&sc->sc_tick_ch);
   5570 
   5571 	/* Stop the 82547 Tx FIFO stall check timer. */
   5572 	if (sc->sc_type == WM_T_82547)
   5573 		callout_stop(&sc->sc_txfifo_ch);
   5574 
   5575 	if (sc->sc_flags & WM_F_HAS_MII) {
   5576 		/* Down the MII. */
   5577 		mii_down(&sc->sc_mii);
   5578 	} else {
   5579 #if 0
   5580 		/* Should we clear PHY's status properly? */
   5581 		wm_reset(sc);
   5582 #endif
   5583 	}
   5584 
   5585 	/* Stop the transmit and receive processes. */
   5586 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5587 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5588 	sc->sc_rctl &= ~RCTL_EN;
   5589 
   5590 	/*
   5591 	 * Clear the interrupt mask to ensure the device cannot assert its
   5592 	 * interrupt line.
   5593 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5594 	 * service any currently pending or shared interrupt.
   5595 	 */
   5596 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5597 	sc->sc_icr = 0;
   5598 	if (wm_is_using_msix(sc)) {
   5599 		if (sc->sc_type != WM_T_82574) {
   5600 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5601 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5602 		} else
   5603 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5604 	}
   5605 
   5606 	/* Release any queued transmit buffers. */
   5607 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5608 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5609 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5610 		mutex_enter(txq->txq_lock);
   5611 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5612 			txs = &txq->txq_soft[i];
   5613 			if (txs->txs_mbuf != NULL) {
   5614 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   5615 				m_freem(txs->txs_mbuf);
   5616 				txs->txs_mbuf = NULL;
   5617 			}
   5618 		}
   5619 		mutex_exit(txq->txq_lock);
   5620 	}
   5621 
   5622 	/* Mark the interface as down and cancel the watchdog timer. */
   5623 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5624 	ifp->if_timer = 0;
   5625 
   5626 	if (disable) {
   5627 		for (i = 0; i < sc->sc_nqueues; i++) {
   5628 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5629 			mutex_enter(rxq->rxq_lock);
   5630 			wm_rxdrain(rxq);
   5631 			mutex_exit(rxq->rxq_lock);
   5632 		}
   5633 	}
   5634 
   5635 #if 0 /* notyet */
   5636 	if (sc->sc_type >= WM_T_82544)
   5637 		CSR_WRITE(sc, WMREG_WUC, 0);
   5638 #endif
   5639 }
   5640 
   5641 static void
   5642 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   5643 {
   5644 	struct mbuf *m;
   5645 	int i;
   5646 
   5647 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   5648 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   5649 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   5650 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   5651 		    m->m_data, m->m_len, m->m_flags);
   5652 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   5653 	    i, i == 1 ? "" : "s");
   5654 }
   5655 
   5656 /*
   5657  * wm_82547_txfifo_stall:
   5658  *
   5659  *	Callout used to wait for the 82547 Tx FIFO to drain,
   5660  *	reset the FIFO pointers, and restart packet transmission.
   5661  */
   5662 static void
   5663 wm_82547_txfifo_stall(void *arg)
   5664 {
   5665 	struct wm_softc *sc = arg;
   5666 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5667 
   5668 	mutex_enter(txq->txq_lock);
   5669 
   5670 	if (txq->txq_stopping)
   5671 		goto out;
   5672 
   5673 	if (txq->txq_fifo_stall) {
   5674 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   5675 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   5676 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   5677 			/*
   5678 			 * Packets have drained.  Stop transmitter, reset
   5679 			 * FIFO pointers, restart transmitter, and kick
   5680 			 * the packet queue.
   5681 			 */
   5682 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   5683 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   5684 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   5685 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   5686 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   5687 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   5688 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   5689 			CSR_WRITE_FLUSH(sc);
   5690 
   5691 			txq->txq_fifo_head = 0;
   5692 			txq->txq_fifo_stall = 0;
   5693 			wm_start_locked(&sc->sc_ethercom.ec_if);
   5694 		} else {
   5695 			/*
   5696 			 * Still waiting for packets to drain; try again in
   5697 			 * another tick.
   5698 			 */
   5699 			callout_schedule(&sc->sc_txfifo_ch, 1);
   5700 		}
   5701 	}
   5702 
   5703 out:
   5704 	mutex_exit(txq->txq_lock);
   5705 }
   5706 
   5707 /*
   5708  * wm_82547_txfifo_bugchk:
   5709  *
   5710  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   5711  *	prevent enqueueing a packet that would wrap around the end
   5712  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   5713  *
   5714  *	We do this by checking the amount of space before the end
   5715  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   5716  *	the Tx FIFO, wait for all remaining packets to drain, reset
   5717  *	the internal FIFO pointers to the beginning, and restart
   5718  *	transmission on the interface.
   5719  */
   5720 #define	WM_FIFO_HDR		0x10
   5721 #define	WM_82547_PAD_LEN	0x3e0
   5722 static int
   5723 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   5724 {
   5725 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5726 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   5727 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   5728 
   5729 	/* Just return if already stalled. */
   5730 	if (txq->txq_fifo_stall)
   5731 		return 1;
   5732 
   5733 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   5734 		/* Stall only occurs in half-duplex mode. */
   5735 		goto send_packet;
   5736 	}
   5737 
   5738 	if (len >= WM_82547_PAD_LEN + space) {
   5739 		txq->txq_fifo_stall = 1;
   5740 		callout_schedule(&sc->sc_txfifo_ch, 1);
   5741 		return 1;
   5742 	}
   5743 
   5744  send_packet:
   5745 	txq->txq_fifo_head += len;
   5746 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   5747 		txq->txq_fifo_head -= txq->txq_fifo_size;
   5748 
   5749 	return 0;
   5750 }
   5751 
   5752 static int
   5753 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5754 {
   5755 	int error;
   5756 
   5757 	/*
   5758 	 * Allocate the control data structures, and create and load the
   5759 	 * DMA map for it.
   5760 	 *
   5761 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5762 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5763 	 * both sets within the same 4G segment.
   5764 	 */
   5765 	if (sc->sc_type < WM_T_82544)
   5766 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   5767 	else
   5768 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   5769 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5770 		txq->txq_descsize = sizeof(nq_txdesc_t);
   5771 	else
   5772 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   5773 
   5774 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   5775 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   5776 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   5777 		aprint_error_dev(sc->sc_dev,
   5778 		    "unable to allocate TX control data, error = %d\n",
   5779 		    error);
   5780 		goto fail_0;
   5781 	}
   5782 
   5783 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   5784 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   5785 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5786 		aprint_error_dev(sc->sc_dev,
   5787 		    "unable to map TX control data, error = %d\n", error);
   5788 		goto fail_1;
   5789 	}
   5790 
   5791 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   5792 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   5793 		aprint_error_dev(sc->sc_dev,
   5794 		    "unable to create TX control data DMA map, error = %d\n",
   5795 		    error);
   5796 		goto fail_2;
   5797 	}
   5798 
   5799 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   5800 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   5801 		aprint_error_dev(sc->sc_dev,
   5802 		    "unable to load TX control data DMA map, error = %d\n",
   5803 		    error);
   5804 		goto fail_3;
   5805 	}
   5806 
   5807 	return 0;
   5808 
   5809  fail_3:
   5810 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5811  fail_2:
   5812 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5813 	    WM_TXDESCS_SIZE(txq));
   5814  fail_1:
   5815 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5816  fail_0:
   5817 	return error;
   5818 }
   5819 
   5820 static void
   5821 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5822 {
   5823 
   5824 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   5825 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5826 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5827 	    WM_TXDESCS_SIZE(txq));
   5828 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5829 }
   5830 
   5831 static int
   5832 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5833 {
   5834 	int error;
   5835 	size_t rxq_descs_size;
   5836 
   5837 	/*
   5838 	 * Allocate the control data structures, and create and load the
   5839 	 * DMA map for it.
   5840 	 *
   5841 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5842 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5843 	 * both sets within the same 4G segment.
   5844 	 */
   5845 	rxq->rxq_ndesc = WM_NRXDESC;
   5846 	if (sc->sc_type == WM_T_82574)
   5847 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   5848 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5849 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   5850 	else
   5851 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   5852 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   5853 
   5854 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   5855 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   5856 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   5857 		aprint_error_dev(sc->sc_dev,
   5858 		    "unable to allocate RX control data, error = %d\n",
   5859 		    error);
   5860 		goto fail_0;
   5861 	}
   5862 
   5863 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   5864 		    rxq->rxq_desc_rseg, rxq_descs_size,
   5865 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5866 		aprint_error_dev(sc->sc_dev,
   5867 		    "unable to map RX control data, error = %d\n", error);
   5868 		goto fail_1;
   5869 	}
   5870 
   5871 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   5872 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   5873 		aprint_error_dev(sc->sc_dev,
   5874 		    "unable to create RX control data DMA map, error = %d\n",
   5875 		    error);
   5876 		goto fail_2;
   5877 	}
   5878 
   5879 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   5880 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   5881 		aprint_error_dev(sc->sc_dev,
   5882 		    "unable to load RX control data DMA map, error = %d\n",
   5883 		    error);
   5884 		goto fail_3;
   5885 	}
   5886 
   5887 	return 0;
   5888 
   5889  fail_3:
   5890 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5891  fail_2:
   5892 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   5893 	    rxq_descs_size);
   5894  fail_1:
   5895 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5896  fail_0:
   5897 	return error;
   5898 }
   5899 
   5900 static void
   5901 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5902 {
   5903 
   5904 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5905 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5906 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   5907 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   5908 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5909 }
   5910 
   5911 
   5912 static int
   5913 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5914 {
   5915 	int i, error;
   5916 
   5917 	/* Create the transmit buffer DMA maps. */
   5918 	WM_TXQUEUELEN(txq) =
   5919 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   5920 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   5921 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5922 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   5923 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   5924 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   5925 			aprint_error_dev(sc->sc_dev,
   5926 			    "unable to create Tx DMA map %d, error = %d\n",
   5927 			    i, error);
   5928 			goto fail;
   5929 		}
   5930 	}
   5931 
   5932 	return 0;
   5933 
   5934  fail:
   5935 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5936 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5937 			bus_dmamap_destroy(sc->sc_dmat,
   5938 			    txq->txq_soft[i].txs_dmamap);
   5939 	}
   5940 	return error;
   5941 }
   5942 
   5943 static void
   5944 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5945 {
   5946 	int i;
   5947 
   5948 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5949 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5950 			bus_dmamap_destroy(sc->sc_dmat,
   5951 			    txq->txq_soft[i].txs_dmamap);
   5952 	}
   5953 }
   5954 
   5955 static int
   5956 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5957 {
   5958 	int i, error;
   5959 
   5960 	/* Create the receive buffer DMA maps. */
   5961 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   5962 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   5963 			    MCLBYTES, 0, 0,
   5964 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   5965 			aprint_error_dev(sc->sc_dev,
   5966 			    "unable to create Rx DMA map %d error = %d\n",
   5967 			    i, error);
   5968 			goto fail;
   5969 		}
   5970 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   5971 	}
   5972 
   5973 	return 0;
   5974 
   5975  fail:
   5976 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   5977 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5978 			bus_dmamap_destroy(sc->sc_dmat,
   5979 			    rxq->rxq_soft[i].rxs_dmamap);
   5980 	}
   5981 	return error;
   5982 }
   5983 
   5984 static void
   5985 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5986 {
   5987 	int i;
   5988 
   5989 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   5990 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5991 			bus_dmamap_destroy(sc->sc_dmat,
   5992 			    rxq->rxq_soft[i].rxs_dmamap);
   5993 	}
   5994 }
   5995 
   5996 /*
   5997  * wm_alloc_quques:
   5998  *	Allocate {tx,rx}descs and {tx,rx} buffers
   5999  */
   6000 static int
   6001 wm_alloc_txrx_queues(struct wm_softc *sc)
   6002 {
   6003 	int i, error, tx_done, rx_done;
   6004 
   6005 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6006 	    KM_SLEEP);
   6007 	if (sc->sc_queue == NULL) {
   6008 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6009 		error = ENOMEM;
   6010 		goto fail_0;
   6011 	}
   6012 
   6013 	/*
   6014 	 * For transmission
   6015 	 */
   6016 	error = 0;
   6017 	tx_done = 0;
   6018 	for (i = 0; i < sc->sc_nqueues; i++) {
   6019 #ifdef WM_EVENT_COUNTERS
   6020 		int j;
   6021 		const char *xname;
   6022 #endif
   6023 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6024 		txq->txq_sc = sc;
   6025 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6026 
   6027 		error = wm_alloc_tx_descs(sc, txq);
   6028 		if (error)
   6029 			break;
   6030 		error = wm_alloc_tx_buffer(sc, txq);
   6031 		if (error) {
   6032 			wm_free_tx_descs(sc, txq);
   6033 			break;
   6034 		}
   6035 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6036 		if (txq->txq_interq == NULL) {
   6037 			wm_free_tx_descs(sc, txq);
   6038 			wm_free_tx_buffer(sc, txq);
   6039 			error = ENOMEM;
   6040 			break;
   6041 		}
   6042 
   6043 #ifdef WM_EVENT_COUNTERS
   6044 		xname = device_xname(sc->sc_dev);
   6045 
   6046 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6047 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6048 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
   6049 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6050 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6051 
   6052 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
   6053 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
   6054 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
   6055 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
   6056 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
   6057 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
   6058 
   6059 		for (j = 0; j < WM_NTXSEGS; j++) {
   6060 			snprintf(txq->txq_txseg_evcnt_names[j],
   6061 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6062 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6063 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6064 		}
   6065 
   6066 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
   6067 
   6068 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
   6069 #endif /* WM_EVENT_COUNTERS */
   6070 
   6071 		tx_done++;
   6072 	}
   6073 	if (error)
   6074 		goto fail_1;
   6075 
   6076 	/*
   6077 	 * For recieve
   6078 	 */
   6079 	error = 0;
   6080 	rx_done = 0;
   6081 	for (i = 0; i < sc->sc_nqueues; i++) {
   6082 #ifdef WM_EVENT_COUNTERS
   6083 		const char *xname;
   6084 #endif
   6085 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6086 		rxq->rxq_sc = sc;
   6087 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6088 
   6089 		error = wm_alloc_rx_descs(sc, rxq);
   6090 		if (error)
   6091 			break;
   6092 
   6093 		error = wm_alloc_rx_buffer(sc, rxq);
   6094 		if (error) {
   6095 			wm_free_rx_descs(sc, rxq);
   6096 			break;
   6097 		}
   6098 
   6099 #ifdef WM_EVENT_COUNTERS
   6100 		xname = device_xname(sc->sc_dev);
   6101 
   6102 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
   6103 
   6104 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
   6105 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
   6106 #endif /* WM_EVENT_COUNTERS */
   6107 
   6108 		rx_done++;
   6109 	}
   6110 	if (error)
   6111 		goto fail_2;
   6112 
   6113 	return 0;
   6114 
   6115  fail_2:
   6116 	for (i = 0; i < rx_done; i++) {
   6117 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6118 		wm_free_rx_buffer(sc, rxq);
   6119 		wm_free_rx_descs(sc, rxq);
   6120 		if (rxq->rxq_lock)
   6121 			mutex_obj_free(rxq->rxq_lock);
   6122 	}
   6123  fail_1:
   6124 	for (i = 0; i < tx_done; i++) {
   6125 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6126 		pcq_destroy(txq->txq_interq);
   6127 		wm_free_tx_buffer(sc, txq);
   6128 		wm_free_tx_descs(sc, txq);
   6129 		if (txq->txq_lock)
   6130 			mutex_obj_free(txq->txq_lock);
   6131 	}
   6132 
   6133 	kmem_free(sc->sc_queue,
   6134 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6135  fail_0:
   6136 	return error;
   6137 }
   6138 
   6139 /*
   6140  * wm_free_quques:
   6141  *	Free {tx,rx}descs and {tx,rx} buffers
   6142  */
   6143 static void
   6144 wm_free_txrx_queues(struct wm_softc *sc)
   6145 {
   6146 	int i;
   6147 
   6148 	for (i = 0; i < sc->sc_nqueues; i++) {
   6149 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6150 
   6151 #ifdef WM_EVENT_COUNTERS
   6152 		WM_Q_EVCNT_DETACH(rxq, rxintr, rxq, i);
   6153 		WM_Q_EVCNT_DETACH(rxq, rxipsum, rxq, i);
   6154 		WM_Q_EVCNT_DETACH(rxq, rxtusum, rxq, i);
   6155 #endif /* WM_EVENT_COUNTERS */
   6156 
   6157 		wm_free_rx_buffer(sc, rxq);
   6158 		wm_free_rx_descs(sc, rxq);
   6159 		if (rxq->rxq_lock)
   6160 			mutex_obj_free(rxq->rxq_lock);
   6161 	}
   6162 
   6163 	for (i = 0; i < sc->sc_nqueues; i++) {
   6164 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6165 		struct mbuf *m;
   6166 #ifdef WM_EVENT_COUNTERS
   6167 		int j;
   6168 
   6169 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6170 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6171 		WM_Q_EVCNT_DETACH(txq, txfifo_stall, txq, i);
   6172 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6173 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6174 		WM_Q_EVCNT_DETACH(txq, txipsum, txq, i);
   6175 		WM_Q_EVCNT_DETACH(txq, txtusum, txq, i);
   6176 		WM_Q_EVCNT_DETACH(txq, txtusum6, txq, i);
   6177 		WM_Q_EVCNT_DETACH(txq, txtso, txq, i);
   6178 		WM_Q_EVCNT_DETACH(txq, txtso6, txq, i);
   6179 		WM_Q_EVCNT_DETACH(txq, txtsopain, txq, i);
   6180 
   6181 		for (j = 0; j < WM_NTXSEGS; j++)
   6182 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6183 
   6184 		WM_Q_EVCNT_DETACH(txq, txdrop, txq, i);
   6185 		WM_Q_EVCNT_DETACH(txq, tu, txq, i);
   6186 #endif /* WM_EVENT_COUNTERS */
   6187 
   6188 		/* drain txq_interq */
   6189 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6190 			m_freem(m);
   6191 		pcq_destroy(txq->txq_interq);
   6192 
   6193 		wm_free_tx_buffer(sc, txq);
   6194 		wm_free_tx_descs(sc, txq);
   6195 		if (txq->txq_lock)
   6196 			mutex_obj_free(txq->txq_lock);
   6197 	}
   6198 
   6199 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6200 }
   6201 
   6202 static void
   6203 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6204 {
   6205 
   6206 	KASSERT(mutex_owned(txq->txq_lock));
   6207 
   6208 	/* Initialize the transmit descriptor ring. */
   6209 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6210 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6211 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6212 	txq->txq_free = WM_NTXDESC(txq);
   6213 	txq->txq_next = 0;
   6214 }
   6215 
   6216 static void
   6217 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6218     struct wm_txqueue *txq)
   6219 {
   6220 
   6221 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6222 		device_xname(sc->sc_dev), __func__));
   6223 	KASSERT(mutex_owned(txq->txq_lock));
   6224 
   6225 	if (sc->sc_type < WM_T_82543) {
   6226 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6227 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6228 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6229 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6230 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6231 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6232 	} else {
   6233 		int qid = wmq->wmq_id;
   6234 
   6235 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6236 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6237 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6238 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6239 
   6240 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6241 			/*
   6242 			 * Don't write TDT before TCTL.EN is set.
   6243 			 * See the document.
   6244 			 */
   6245 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6246 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6247 			    | TXDCTL_WTHRESH(0));
   6248 		else {
   6249 			/* XXX should update with AIM? */
   6250 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6251 			if (sc->sc_type >= WM_T_82540) {
   6252 				/* should be same */
   6253 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6254 			}
   6255 
   6256 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6257 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6258 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6259 		}
   6260 	}
   6261 }
   6262 
   6263 static void
   6264 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6265 {
   6266 	int i;
   6267 
   6268 	KASSERT(mutex_owned(txq->txq_lock));
   6269 
   6270 	/* Initialize the transmit job descriptors. */
   6271 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6272 		txq->txq_soft[i].txs_mbuf = NULL;
   6273 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6274 	txq->txq_snext = 0;
   6275 	txq->txq_sdirty = 0;
   6276 }
   6277 
   6278 static void
   6279 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6280     struct wm_txqueue *txq)
   6281 {
   6282 
   6283 	KASSERT(mutex_owned(txq->txq_lock));
   6284 
   6285 	/*
   6286 	 * Set up some register offsets that are different between
   6287 	 * the i82542 and the i82543 and later chips.
   6288 	 */
   6289 	if (sc->sc_type < WM_T_82543)
   6290 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   6291 	else
   6292 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   6293 
   6294 	wm_init_tx_descs(sc, txq);
   6295 	wm_init_tx_regs(sc, wmq, txq);
   6296 	wm_init_tx_buffer(sc, txq);
   6297 }
   6298 
   6299 static void
   6300 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6301     struct wm_rxqueue *rxq)
   6302 {
   6303 
   6304 	KASSERT(mutex_owned(rxq->rxq_lock));
   6305 
   6306 	/*
   6307 	 * Initialize the receive descriptor and receive job
   6308 	 * descriptor rings.
   6309 	 */
   6310 	if (sc->sc_type < WM_T_82543) {
   6311 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   6312 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   6313 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   6314 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6315 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   6316 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   6317 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   6318 
   6319 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   6320 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   6321 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   6322 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   6323 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   6324 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   6325 	} else {
   6326 		int qid = wmq->wmq_id;
   6327 
   6328 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   6329 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   6330 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_descsize * rxq->rxq_ndesc);
   6331 
   6332 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6333 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   6334 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   6335 
   6336 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   6337 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   6338 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   6339 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   6340 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   6341 			    | RXDCTL_WTHRESH(1));
   6342 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6343 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6344 		} else {
   6345 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6346 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6347 			/* XXX should update with AIM? */
   6348 			CSR_WRITE(sc, WMREG_RDTR, (wmq->wmq_itr / 4) | RDTR_FPD);
   6349 			/* MUST be same */
   6350 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   6351 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   6352 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   6353 		}
   6354 	}
   6355 }
   6356 
   6357 static int
   6358 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6359 {
   6360 	struct wm_rxsoft *rxs;
   6361 	int error, i;
   6362 
   6363 	KASSERT(mutex_owned(rxq->rxq_lock));
   6364 
   6365 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6366 		rxs = &rxq->rxq_soft[i];
   6367 		if (rxs->rxs_mbuf == NULL) {
   6368 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   6369 				log(LOG_ERR, "%s: unable to allocate or map "
   6370 				    "rx buffer %d, error = %d\n",
   6371 				    device_xname(sc->sc_dev), i, error);
   6372 				/*
   6373 				 * XXX Should attempt to run with fewer receive
   6374 				 * XXX buffers instead of just failing.
   6375 				 */
   6376 				wm_rxdrain(rxq);
   6377 				return ENOMEM;
   6378 			}
   6379 		} else {
   6380 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   6381 				wm_init_rxdesc(rxq, i);
   6382 			/*
   6383 			 * For 82575 and newer device, the RX descriptors
   6384 			 * must be initialized after the setting of RCTL.EN in
   6385 			 * wm_set_filter()
   6386 			 */
   6387 		}
   6388 	}
   6389 	rxq->rxq_ptr = 0;
   6390 	rxq->rxq_discard = 0;
   6391 	WM_RXCHAIN_RESET(rxq);
   6392 
   6393 	return 0;
   6394 }
   6395 
   6396 static int
   6397 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6398     struct wm_rxqueue *rxq)
   6399 {
   6400 
   6401 	KASSERT(mutex_owned(rxq->rxq_lock));
   6402 
   6403 	/*
   6404 	 * Set up some register offsets that are different between
   6405 	 * the i82542 and the i82543 and later chips.
   6406 	 */
   6407 	if (sc->sc_type < WM_T_82543)
   6408 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   6409 	else
   6410 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   6411 
   6412 	wm_init_rx_regs(sc, wmq, rxq);
   6413 	return wm_init_rx_buffer(sc, rxq);
   6414 }
   6415 
   6416 /*
   6417  * wm_init_quques:
   6418  *	Initialize {tx,rx}descs and {tx,rx} buffers
   6419  */
   6420 static int
   6421 wm_init_txrx_queues(struct wm_softc *sc)
   6422 {
   6423 	int i, error = 0;
   6424 
   6425 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6426 		device_xname(sc->sc_dev), __func__));
   6427 
   6428 	for (i = 0; i < sc->sc_nqueues; i++) {
   6429 		struct wm_queue *wmq = &sc->sc_queue[i];
   6430 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6431 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6432 
   6433 		/*
   6434 		 * TODO
   6435 		 * Currently, use constant variable instead of AIM.
   6436 		 * Furthermore, the interrupt interval of multiqueue which use
   6437 		 * polling mode is less than default value.
   6438 		 * More tuning and AIM are required.
   6439 		 */
   6440 		if (wm_is_using_multiqueue(sc))
   6441 			wmq->wmq_itr = 50;
   6442 		else
   6443 			wmq->wmq_itr = sc->sc_itr_init;
   6444 		wmq->wmq_set_itr = true;
   6445 
   6446 		mutex_enter(txq->txq_lock);
   6447 		wm_init_tx_queue(sc, wmq, txq);
   6448 		mutex_exit(txq->txq_lock);
   6449 
   6450 		mutex_enter(rxq->rxq_lock);
   6451 		error = wm_init_rx_queue(sc, wmq, rxq);
   6452 		mutex_exit(rxq->rxq_lock);
   6453 		if (error)
   6454 			break;
   6455 	}
   6456 
   6457 	return error;
   6458 }
   6459 
   6460 /*
   6461  * wm_tx_offload:
   6462  *
   6463  *	Set up TCP/IP checksumming parameters for the
   6464  *	specified packet.
   6465  */
   6466 static int
   6467 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   6468     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   6469 {
   6470 	struct mbuf *m0 = txs->txs_mbuf;
   6471 	struct livengood_tcpip_ctxdesc *t;
   6472 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   6473 	uint32_t ipcse;
   6474 	struct ether_header *eh;
   6475 	int offset, iphl;
   6476 	uint8_t fields;
   6477 
   6478 	/*
   6479 	 * XXX It would be nice if the mbuf pkthdr had offset
   6480 	 * fields for the protocol headers.
   6481 	 */
   6482 
   6483 	eh = mtod(m0, struct ether_header *);
   6484 	switch (htons(eh->ether_type)) {
   6485 	case ETHERTYPE_IP:
   6486 	case ETHERTYPE_IPV6:
   6487 		offset = ETHER_HDR_LEN;
   6488 		break;
   6489 
   6490 	case ETHERTYPE_VLAN:
   6491 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6492 		break;
   6493 
   6494 	default:
   6495 		/*
   6496 		 * Don't support this protocol or encapsulation.
   6497 		 */
   6498 		*fieldsp = 0;
   6499 		*cmdp = 0;
   6500 		return 0;
   6501 	}
   6502 
   6503 	if ((m0->m_pkthdr.csum_flags &
   6504 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6505 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6506 	} else {
   6507 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6508 	}
   6509 	ipcse = offset + iphl - 1;
   6510 
   6511 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   6512 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   6513 	seg = 0;
   6514 	fields = 0;
   6515 
   6516 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6517 		int hlen = offset + iphl;
   6518 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6519 
   6520 		if (__predict_false(m0->m_len <
   6521 				    (hlen + sizeof(struct tcphdr)))) {
   6522 			/*
   6523 			 * TCP/IP headers are not in the first mbuf; we need
   6524 			 * to do this the slow and painful way.  Let's just
   6525 			 * hope this doesn't happen very often.
   6526 			 */
   6527 			struct tcphdr th;
   6528 
   6529 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6530 
   6531 			m_copydata(m0, hlen, sizeof(th), &th);
   6532 			if (v4) {
   6533 				struct ip ip;
   6534 
   6535 				m_copydata(m0, offset, sizeof(ip), &ip);
   6536 				ip.ip_len = 0;
   6537 				m_copyback(m0,
   6538 				    offset + offsetof(struct ip, ip_len),
   6539 				    sizeof(ip.ip_len), &ip.ip_len);
   6540 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6541 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6542 			} else {
   6543 				struct ip6_hdr ip6;
   6544 
   6545 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6546 				ip6.ip6_plen = 0;
   6547 				m_copyback(m0,
   6548 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6549 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6550 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6551 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6552 			}
   6553 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6554 			    sizeof(th.th_sum), &th.th_sum);
   6555 
   6556 			hlen += th.th_off << 2;
   6557 		} else {
   6558 			/*
   6559 			 * TCP/IP headers are in the first mbuf; we can do
   6560 			 * this the easy way.
   6561 			 */
   6562 			struct tcphdr *th;
   6563 
   6564 			if (v4) {
   6565 				struct ip *ip =
   6566 				    (void *)(mtod(m0, char *) + offset);
   6567 				th = (void *)(mtod(m0, char *) + hlen);
   6568 
   6569 				ip->ip_len = 0;
   6570 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6571 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6572 			} else {
   6573 				struct ip6_hdr *ip6 =
   6574 				    (void *)(mtod(m0, char *) + offset);
   6575 				th = (void *)(mtod(m0, char *) + hlen);
   6576 
   6577 				ip6->ip6_plen = 0;
   6578 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6579 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6580 			}
   6581 			hlen += th->th_off << 2;
   6582 		}
   6583 
   6584 		if (v4) {
   6585 			WM_Q_EVCNT_INCR(txq, txtso);
   6586 			cmdlen |= WTX_TCPIP_CMD_IP;
   6587 		} else {
   6588 			WM_Q_EVCNT_INCR(txq, txtso6);
   6589 			ipcse = 0;
   6590 		}
   6591 		cmd |= WTX_TCPIP_CMD_TSE;
   6592 		cmdlen |= WTX_TCPIP_CMD_TSE |
   6593 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   6594 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   6595 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   6596 	}
   6597 
   6598 	/*
   6599 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   6600 	 * offload feature, if we load the context descriptor, we
   6601 	 * MUST provide valid values for IPCSS and TUCSS fields.
   6602 	 */
   6603 
   6604 	ipcs = WTX_TCPIP_IPCSS(offset) |
   6605 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   6606 	    WTX_TCPIP_IPCSE(ipcse);
   6607 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   6608 		WM_Q_EVCNT_INCR(txq, txipsum);
   6609 		fields |= WTX_IXSM;
   6610 	}
   6611 
   6612 	offset += iphl;
   6613 
   6614 	if (m0->m_pkthdr.csum_flags &
   6615 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   6616 		WM_Q_EVCNT_INCR(txq, txtusum);
   6617 		fields |= WTX_TXSM;
   6618 		tucs = WTX_TCPIP_TUCSS(offset) |
   6619 		    WTX_TCPIP_TUCSO(offset +
   6620 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   6621 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6622 	} else if ((m0->m_pkthdr.csum_flags &
   6623 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   6624 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6625 		fields |= WTX_TXSM;
   6626 		tucs = WTX_TCPIP_TUCSS(offset) |
   6627 		    WTX_TCPIP_TUCSO(offset +
   6628 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   6629 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6630 	} else {
   6631 		/* Just initialize it to a valid TCP context. */
   6632 		tucs = WTX_TCPIP_TUCSS(offset) |
   6633 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   6634 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6635 	}
   6636 
   6637 	/*
   6638 	 * We don't have to write context descriptor for every packet
   6639 	 * except for 82574. For 82574, we must write context descriptor
   6640 	 * for every packet when we use two descriptor queues.
   6641 	 * It would be overhead to write context descriptor for every packet,
   6642 	 * however it does not cause problems.
   6643 	 */
   6644 	/* Fill in the context descriptor. */
   6645 	t = (struct livengood_tcpip_ctxdesc *)
   6646 	    &txq->txq_descs[txq->txq_next];
   6647 	t->tcpip_ipcs = htole32(ipcs);
   6648 	t->tcpip_tucs = htole32(tucs);
   6649 	t->tcpip_cmdlen = htole32(cmdlen);
   6650 	t->tcpip_seg = htole32(seg);
   6651 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6652 
   6653 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6654 	txs->txs_ndesc++;
   6655 
   6656 	*cmdp = cmd;
   6657 	*fieldsp = fields;
   6658 
   6659 	return 0;
   6660 }
   6661 
   6662 static inline int
   6663 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   6664 {
   6665 	struct wm_softc *sc = ifp->if_softc;
   6666 	u_int cpuid = cpu_index(curcpu());
   6667 
   6668 	/*
   6669 	 * Currently, simple distribute strategy.
   6670 	 * TODO:
   6671 	 * distribute by flowid(RSS has value).
   6672 	 */
   6673         return (cpuid + ncpu - sc->sc_affinity_offset) % sc->sc_nqueues;
   6674 }
   6675 
   6676 /*
   6677  * wm_start:		[ifnet interface function]
   6678  *
   6679  *	Start packet transmission on the interface.
   6680  */
   6681 static void
   6682 wm_start(struct ifnet *ifp)
   6683 {
   6684 	struct wm_softc *sc = ifp->if_softc;
   6685 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6686 
   6687 #ifdef WM_MPSAFE
   6688 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6689 #endif
   6690 	/*
   6691 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   6692 	 */
   6693 
   6694 	mutex_enter(txq->txq_lock);
   6695 	if (!txq->txq_stopping)
   6696 		wm_start_locked(ifp);
   6697 	mutex_exit(txq->txq_lock);
   6698 }
   6699 
   6700 static void
   6701 wm_start_locked(struct ifnet *ifp)
   6702 {
   6703 	struct wm_softc *sc = ifp->if_softc;
   6704 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6705 
   6706 	wm_send_common_locked(ifp, txq, false);
   6707 }
   6708 
   6709 static int
   6710 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   6711 {
   6712 	int qid;
   6713 	struct wm_softc *sc = ifp->if_softc;
   6714 	struct wm_txqueue *txq;
   6715 
   6716 	qid = wm_select_txqueue(ifp, m);
   6717 	txq = &sc->sc_queue[qid].wmq_txq;
   6718 
   6719 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   6720 		m_freem(m);
   6721 		WM_Q_EVCNT_INCR(txq, txdrop);
   6722 		return ENOBUFS;
   6723 	}
   6724 
   6725 	/*
   6726 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   6727 	 */
   6728 	ifp->if_obytes += m->m_pkthdr.len;
   6729 	if (m->m_flags & M_MCAST)
   6730 		ifp->if_omcasts++;
   6731 
   6732 	if (mutex_tryenter(txq->txq_lock)) {
   6733 		if (!txq->txq_stopping)
   6734 			wm_transmit_locked(ifp, txq);
   6735 		mutex_exit(txq->txq_lock);
   6736 	}
   6737 
   6738 	return 0;
   6739 }
   6740 
   6741 static void
   6742 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   6743 {
   6744 
   6745 	wm_send_common_locked(ifp, txq, true);
   6746 }
   6747 
   6748 static void
   6749 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   6750     bool is_transmit)
   6751 {
   6752 	struct wm_softc *sc = ifp->if_softc;
   6753 	struct mbuf *m0;
   6754 	struct m_tag *mtag;
   6755 	struct wm_txsoft *txs;
   6756 	bus_dmamap_t dmamap;
   6757 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   6758 	bus_addr_t curaddr;
   6759 	bus_size_t seglen, curlen;
   6760 	uint32_t cksumcmd;
   6761 	uint8_t cksumfields;
   6762 
   6763 	KASSERT(mutex_owned(txq->txq_lock));
   6764 
   6765 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   6766 		return;
   6767 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   6768 		return;
   6769 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   6770 		return;
   6771 
   6772 	/* Remember the previous number of free descriptors. */
   6773 	ofree = txq->txq_free;
   6774 
   6775 	/*
   6776 	 * Loop through the send queue, setting up transmit descriptors
   6777 	 * until we drain the queue, or use up all available transmit
   6778 	 * descriptors.
   6779 	 */
   6780 	for (;;) {
   6781 		m0 = NULL;
   6782 
   6783 		/* Get a work queue entry. */
   6784 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6785 			wm_txeof(sc, txq);
   6786 			if (txq->txq_sfree == 0) {
   6787 				DPRINTF(WM_DEBUG_TX,
   6788 				    ("%s: TX: no free job descriptors\n",
   6789 					device_xname(sc->sc_dev)));
   6790 				WM_Q_EVCNT_INCR(txq, txsstall);
   6791 				break;
   6792 			}
   6793 		}
   6794 
   6795 		/* Grab a packet off the queue. */
   6796 		if (is_transmit)
   6797 			m0 = pcq_get(txq->txq_interq);
   6798 		else
   6799 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   6800 		if (m0 == NULL)
   6801 			break;
   6802 
   6803 		DPRINTF(WM_DEBUG_TX,
   6804 		    ("%s: TX: have packet to transmit: %p\n",
   6805 		    device_xname(sc->sc_dev), m0));
   6806 
   6807 		txs = &txq->txq_soft[txq->txq_snext];
   6808 		dmamap = txs->txs_dmamap;
   6809 
   6810 		use_tso = (m0->m_pkthdr.csum_flags &
   6811 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   6812 
   6813 		/*
   6814 		 * So says the Linux driver:
   6815 		 * The controller does a simple calculation to make sure
   6816 		 * there is enough room in the FIFO before initiating the
   6817 		 * DMA for each buffer.  The calc is:
   6818 		 *	4 = ceil(buffer len / MSS)
   6819 		 * To make sure we don't overrun the FIFO, adjust the max
   6820 		 * buffer len if the MSS drops.
   6821 		 */
   6822 		dmamap->dm_maxsegsz =
   6823 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   6824 		    ? m0->m_pkthdr.segsz << 2
   6825 		    : WTX_MAX_LEN;
   6826 
   6827 		/*
   6828 		 * Load the DMA map.  If this fails, the packet either
   6829 		 * didn't fit in the allotted number of segments, or we
   6830 		 * were short on resources.  For the too-many-segments
   6831 		 * case, we simply report an error and drop the packet,
   6832 		 * since we can't sanely copy a jumbo packet to a single
   6833 		 * buffer.
   6834 		 */
   6835 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6836 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   6837 		if (error) {
   6838 			if (error == EFBIG) {
   6839 				WM_Q_EVCNT_INCR(txq, txdrop);
   6840 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6841 				    "DMA segments, dropping...\n",
   6842 				    device_xname(sc->sc_dev));
   6843 				wm_dump_mbuf_chain(sc, m0);
   6844 				m_freem(m0);
   6845 				continue;
   6846 			}
   6847 			/*  Short on resources, just stop for now. */
   6848 			DPRINTF(WM_DEBUG_TX,
   6849 			    ("%s: TX: dmamap load failed: %d\n",
   6850 			    device_xname(sc->sc_dev), error));
   6851 			break;
   6852 		}
   6853 
   6854 		segs_needed = dmamap->dm_nsegs;
   6855 		if (use_tso) {
   6856 			/* For sentinel descriptor; see below. */
   6857 			segs_needed++;
   6858 		}
   6859 
   6860 		/*
   6861 		 * Ensure we have enough descriptors free to describe
   6862 		 * the packet.  Note, we always reserve one descriptor
   6863 		 * at the end of the ring due to the semantics of the
   6864 		 * TDT register, plus one more in the event we need
   6865 		 * to load offload context.
   6866 		 */
   6867 		if (segs_needed > txq->txq_free - 2) {
   6868 			/*
   6869 			 * Not enough free descriptors to transmit this
   6870 			 * packet.  We haven't committed anything yet,
   6871 			 * so just unload the DMA map, put the packet
   6872 			 * pack on the queue, and punt.  Notify the upper
   6873 			 * layer that there are no more slots left.
   6874 			 */
   6875 			DPRINTF(WM_DEBUG_TX,
   6876 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6877 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6878 			    segs_needed, txq->txq_free - 1));
   6879 			if (!is_transmit)
   6880 				ifp->if_flags |= IFF_OACTIVE;
   6881 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   6882 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6883 			WM_Q_EVCNT_INCR(txq, txdstall);
   6884 			break;
   6885 		}
   6886 
   6887 		/*
   6888 		 * Check for 82547 Tx FIFO bug.  We need to do this
   6889 		 * once we know we can transmit the packet, since we
   6890 		 * do some internal FIFO space accounting here.
   6891 		 */
   6892 		if (sc->sc_type == WM_T_82547 &&
   6893 		    wm_82547_txfifo_bugchk(sc, m0)) {
   6894 			DPRINTF(WM_DEBUG_TX,
   6895 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   6896 			    device_xname(sc->sc_dev)));
   6897 			if (!is_transmit)
   6898 				ifp->if_flags |= IFF_OACTIVE;
   6899 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   6900 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6901 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
   6902 			break;
   6903 		}
   6904 
   6905 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6906 
   6907 		DPRINTF(WM_DEBUG_TX,
   6908 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6909 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6910 
   6911 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   6912 
   6913 		/*
   6914 		 * Store a pointer to the packet so that we can free it
   6915 		 * later.
   6916 		 *
   6917 		 * Initially, we consider the number of descriptors the
   6918 		 * packet uses the number of DMA segments.  This may be
   6919 		 * incremented by 1 if we do checksum offload (a descriptor
   6920 		 * is used to set the checksum context).
   6921 		 */
   6922 		txs->txs_mbuf = m0;
   6923 		txs->txs_firstdesc = txq->txq_next;
   6924 		txs->txs_ndesc = segs_needed;
   6925 
   6926 		/* Set up offload parameters for this packet. */
   6927 		if (m0->m_pkthdr.csum_flags &
   6928 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   6929 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   6930 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   6931 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   6932 					  &cksumfields) != 0) {
   6933 				/* Error message already displayed. */
   6934 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   6935 				continue;
   6936 			}
   6937 		} else {
   6938 			cksumcmd = 0;
   6939 			cksumfields = 0;
   6940 		}
   6941 
   6942 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   6943 
   6944 		/* Sync the DMA map. */
   6945 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   6946 		    BUS_DMASYNC_PREWRITE);
   6947 
   6948 		/* Initialize the transmit descriptor. */
   6949 		for (nexttx = txq->txq_next, seg = 0;
   6950 		     seg < dmamap->dm_nsegs; seg++) {
   6951 			for (seglen = dmamap->dm_segs[seg].ds_len,
   6952 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   6953 			     seglen != 0;
   6954 			     curaddr += curlen, seglen -= curlen,
   6955 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   6956 				curlen = seglen;
   6957 
   6958 				/*
   6959 				 * So says the Linux driver:
   6960 				 * Work around for premature descriptor
   6961 				 * write-backs in TSO mode.  Append a
   6962 				 * 4-byte sentinel descriptor.
   6963 				 */
   6964 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   6965 				    curlen > 8)
   6966 					curlen -= 4;
   6967 
   6968 				wm_set_dma_addr(
   6969 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   6970 				txq->txq_descs[nexttx].wtx_cmdlen
   6971 				    = htole32(cksumcmd | curlen);
   6972 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   6973 				    = 0;
   6974 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   6975 				    = cksumfields;
   6976 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   6977 				lasttx = nexttx;
   6978 
   6979 				DPRINTF(WM_DEBUG_TX,
   6980 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   6981 				     "len %#04zx\n",
   6982 				    device_xname(sc->sc_dev), nexttx,
   6983 				    (uint64_t)curaddr, curlen));
   6984 			}
   6985 		}
   6986 
   6987 		KASSERT(lasttx != -1);
   6988 
   6989 		/*
   6990 		 * Set up the command byte on the last descriptor of
   6991 		 * the packet.  If we're in the interrupt delay window,
   6992 		 * delay the interrupt.
   6993 		 */
   6994 		txq->txq_descs[lasttx].wtx_cmdlen |=
   6995 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   6996 
   6997 		/*
   6998 		 * If VLANs are enabled and the packet has a VLAN tag, set
   6999 		 * up the descriptor to encapsulate the packet for us.
   7000 		 *
   7001 		 * This is only valid on the last descriptor of the packet.
   7002 		 */
   7003 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   7004 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7005 			    htole32(WTX_CMD_VLE);
   7006 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7007 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   7008 		}
   7009 
   7010 		txs->txs_lastdesc = lasttx;
   7011 
   7012 		DPRINTF(WM_DEBUG_TX,
   7013 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7014 		    device_xname(sc->sc_dev),
   7015 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7016 
   7017 		/* Sync the descriptors we're using. */
   7018 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7019 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7020 
   7021 		/* Give the packet to the chip. */
   7022 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7023 
   7024 		DPRINTF(WM_DEBUG_TX,
   7025 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7026 
   7027 		DPRINTF(WM_DEBUG_TX,
   7028 		    ("%s: TX: finished transmitting packet, job %d\n",
   7029 		    device_xname(sc->sc_dev), txq->txq_snext));
   7030 
   7031 		/* Advance the tx pointer. */
   7032 		txq->txq_free -= txs->txs_ndesc;
   7033 		txq->txq_next = nexttx;
   7034 
   7035 		txq->txq_sfree--;
   7036 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7037 
   7038 		/* Pass the packet to any BPF listeners. */
   7039 		bpf_mtap(ifp, m0);
   7040 	}
   7041 
   7042 	if (m0 != NULL) {
   7043 		if (!is_transmit)
   7044 			ifp->if_flags |= IFF_OACTIVE;
   7045 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7046 		WM_Q_EVCNT_INCR(txq, txdrop);
   7047 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7048 			__func__));
   7049 		m_freem(m0);
   7050 	}
   7051 
   7052 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7053 		/* No more slots; notify upper layer. */
   7054 		if (!is_transmit)
   7055 			ifp->if_flags |= IFF_OACTIVE;
   7056 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7057 	}
   7058 
   7059 	if (txq->txq_free != ofree) {
   7060 		/* Set a watchdog timer in case the chip flakes out. */
   7061 		ifp->if_timer = 5;
   7062 	}
   7063 }
   7064 
   7065 /*
   7066  * wm_nq_tx_offload:
   7067  *
   7068  *	Set up TCP/IP checksumming parameters for the
   7069  *	specified packet, for NEWQUEUE devices
   7070  */
   7071 static int
   7072 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7073     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7074 {
   7075 	struct mbuf *m0 = txs->txs_mbuf;
   7076 	struct m_tag *mtag;
   7077 	uint32_t vl_len, mssidx, cmdc;
   7078 	struct ether_header *eh;
   7079 	int offset, iphl;
   7080 
   7081 	/*
   7082 	 * XXX It would be nice if the mbuf pkthdr had offset
   7083 	 * fields for the protocol headers.
   7084 	 */
   7085 	*cmdlenp = 0;
   7086 	*fieldsp = 0;
   7087 
   7088 	eh = mtod(m0, struct ether_header *);
   7089 	switch (htons(eh->ether_type)) {
   7090 	case ETHERTYPE_IP:
   7091 	case ETHERTYPE_IPV6:
   7092 		offset = ETHER_HDR_LEN;
   7093 		break;
   7094 
   7095 	case ETHERTYPE_VLAN:
   7096 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7097 		break;
   7098 
   7099 	default:
   7100 		/* Don't support this protocol or encapsulation. */
   7101 		*do_csum = false;
   7102 		return 0;
   7103 	}
   7104 	*do_csum = true;
   7105 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7106 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7107 
   7108 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7109 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7110 
   7111 	if ((m0->m_pkthdr.csum_flags &
   7112 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7113 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7114 	} else {
   7115 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   7116 	}
   7117 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7118 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7119 
   7120 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   7121 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   7122 		     << NQTXC_VLLEN_VLAN_SHIFT);
   7123 		*cmdlenp |= NQTX_CMD_VLE;
   7124 	}
   7125 
   7126 	mssidx = 0;
   7127 
   7128 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7129 		int hlen = offset + iphl;
   7130 		int tcp_hlen;
   7131 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7132 
   7133 		if (__predict_false(m0->m_len <
   7134 				    (hlen + sizeof(struct tcphdr)))) {
   7135 			/*
   7136 			 * TCP/IP headers are not in the first mbuf; we need
   7137 			 * to do this the slow and painful way.  Let's just
   7138 			 * hope this doesn't happen very often.
   7139 			 */
   7140 			struct tcphdr th;
   7141 
   7142 			WM_Q_EVCNT_INCR(txq, txtsopain);
   7143 
   7144 			m_copydata(m0, hlen, sizeof(th), &th);
   7145 			if (v4) {
   7146 				struct ip ip;
   7147 
   7148 				m_copydata(m0, offset, sizeof(ip), &ip);
   7149 				ip.ip_len = 0;
   7150 				m_copyback(m0,
   7151 				    offset + offsetof(struct ip, ip_len),
   7152 				    sizeof(ip.ip_len), &ip.ip_len);
   7153 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7154 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7155 			} else {
   7156 				struct ip6_hdr ip6;
   7157 
   7158 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7159 				ip6.ip6_plen = 0;
   7160 				m_copyback(m0,
   7161 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7162 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7163 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7164 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7165 			}
   7166 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7167 			    sizeof(th.th_sum), &th.th_sum);
   7168 
   7169 			tcp_hlen = th.th_off << 2;
   7170 		} else {
   7171 			/*
   7172 			 * TCP/IP headers are in the first mbuf; we can do
   7173 			 * this the easy way.
   7174 			 */
   7175 			struct tcphdr *th;
   7176 
   7177 			if (v4) {
   7178 				struct ip *ip =
   7179 				    (void *)(mtod(m0, char *) + offset);
   7180 				th = (void *)(mtod(m0, char *) + hlen);
   7181 
   7182 				ip->ip_len = 0;
   7183 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7184 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7185 			} else {
   7186 				struct ip6_hdr *ip6 =
   7187 				    (void *)(mtod(m0, char *) + offset);
   7188 				th = (void *)(mtod(m0, char *) + hlen);
   7189 
   7190 				ip6->ip6_plen = 0;
   7191 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7192 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7193 			}
   7194 			tcp_hlen = th->th_off << 2;
   7195 		}
   7196 		hlen += tcp_hlen;
   7197 		*cmdlenp |= NQTX_CMD_TSE;
   7198 
   7199 		if (v4) {
   7200 			WM_Q_EVCNT_INCR(txq, txtso);
   7201 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7202 		} else {
   7203 			WM_Q_EVCNT_INCR(txq, txtso6);
   7204 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7205 		}
   7206 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7207 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7208 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7209 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7210 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7211 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7212 	} else {
   7213 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7214 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7215 	}
   7216 
   7217 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7218 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7219 		cmdc |= NQTXC_CMD_IP4;
   7220 	}
   7221 
   7222 	if (m0->m_pkthdr.csum_flags &
   7223 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7224 		WM_Q_EVCNT_INCR(txq, txtusum);
   7225 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7226 			cmdc |= NQTXC_CMD_TCP;
   7227 		} else {
   7228 			cmdc |= NQTXC_CMD_UDP;
   7229 		}
   7230 		cmdc |= NQTXC_CMD_IP4;
   7231 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7232 	}
   7233 	if (m0->m_pkthdr.csum_flags &
   7234 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7235 		WM_Q_EVCNT_INCR(txq, txtusum6);
   7236 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7237 			cmdc |= NQTXC_CMD_TCP;
   7238 		} else {
   7239 			cmdc |= NQTXC_CMD_UDP;
   7240 		}
   7241 		cmdc |= NQTXC_CMD_IP6;
   7242 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7243 	}
   7244 
   7245 	/*
   7246 	 * We don't have to write context descriptor for every packet to
   7247 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   7248 	 * I210 and I211. It is enough to write once per a Tx queue for these
   7249 	 * controllers.
   7250 	 * It would be overhead to write context descriptor for every packet,
   7251 	 * however it does not cause problems.
   7252 	 */
   7253 	/* Fill in the context descriptor. */
   7254 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   7255 	    htole32(vl_len);
   7256 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   7257 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   7258 	    htole32(cmdc);
   7259 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   7260 	    htole32(mssidx);
   7261 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7262 	DPRINTF(WM_DEBUG_TX,
   7263 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   7264 	    txq->txq_next, 0, vl_len));
   7265 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   7266 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7267 	txs->txs_ndesc++;
   7268 	return 0;
   7269 }
   7270 
   7271 /*
   7272  * wm_nq_start:		[ifnet interface function]
   7273  *
   7274  *	Start packet transmission on the interface for NEWQUEUE devices
   7275  */
   7276 static void
   7277 wm_nq_start(struct ifnet *ifp)
   7278 {
   7279 	struct wm_softc *sc = ifp->if_softc;
   7280 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7281 
   7282 #ifdef WM_MPSAFE
   7283 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   7284 #endif
   7285 	/*
   7286 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7287 	 */
   7288 
   7289 	mutex_enter(txq->txq_lock);
   7290 	if (!txq->txq_stopping)
   7291 		wm_nq_start_locked(ifp);
   7292 	mutex_exit(txq->txq_lock);
   7293 }
   7294 
   7295 static void
   7296 wm_nq_start_locked(struct ifnet *ifp)
   7297 {
   7298 	struct wm_softc *sc = ifp->if_softc;
   7299 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7300 
   7301 	wm_nq_send_common_locked(ifp, txq, false);
   7302 }
   7303 
   7304 static int
   7305 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   7306 {
   7307 	int qid;
   7308 	struct wm_softc *sc = ifp->if_softc;
   7309 	struct wm_txqueue *txq;
   7310 
   7311 	qid = wm_select_txqueue(ifp, m);
   7312 	txq = &sc->sc_queue[qid].wmq_txq;
   7313 
   7314 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7315 		m_freem(m);
   7316 		WM_Q_EVCNT_INCR(txq, txdrop);
   7317 		return ENOBUFS;
   7318 	}
   7319 
   7320 	/*
   7321 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7322 	 */
   7323 	ifp->if_obytes += m->m_pkthdr.len;
   7324 	if (m->m_flags & M_MCAST)
   7325 		ifp->if_omcasts++;
   7326 
   7327 	/*
   7328 	 * The situations which this mutex_tryenter() fails at running time
   7329 	 * are below two patterns.
   7330 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   7331 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   7332 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   7333 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   7334 	 * In the case of (2), the last packet enqueued to txq->txq_interq is also
   7335 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck, either.
   7336 	 */
   7337 	if (mutex_tryenter(txq->txq_lock)) {
   7338 		if (!txq->txq_stopping)
   7339 			wm_nq_transmit_locked(ifp, txq);
   7340 		mutex_exit(txq->txq_lock);
   7341 	}
   7342 
   7343 	return 0;
   7344 }
   7345 
   7346 static void
   7347 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7348 {
   7349 
   7350 	wm_nq_send_common_locked(ifp, txq, true);
   7351 }
   7352 
   7353 static void
   7354 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7355     bool is_transmit)
   7356 {
   7357 	struct wm_softc *sc = ifp->if_softc;
   7358 	struct mbuf *m0;
   7359 	struct m_tag *mtag;
   7360 	struct wm_txsoft *txs;
   7361 	bus_dmamap_t dmamap;
   7362 	int error, nexttx, lasttx = -1, seg, segs_needed;
   7363 	bool do_csum, sent;
   7364 
   7365 	KASSERT(mutex_owned(txq->txq_lock));
   7366 
   7367 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7368 		return;
   7369 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7370 		return;
   7371 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7372 		return;
   7373 
   7374 	sent = false;
   7375 
   7376 	/*
   7377 	 * Loop through the send queue, setting up transmit descriptors
   7378 	 * until we drain the queue, or use up all available transmit
   7379 	 * descriptors.
   7380 	 */
   7381 	for (;;) {
   7382 		m0 = NULL;
   7383 
   7384 		/* Get a work queue entry. */
   7385 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7386 			wm_txeof(sc, txq);
   7387 			if (txq->txq_sfree == 0) {
   7388 				DPRINTF(WM_DEBUG_TX,
   7389 				    ("%s: TX: no free job descriptors\n",
   7390 					device_xname(sc->sc_dev)));
   7391 				WM_Q_EVCNT_INCR(txq, txsstall);
   7392 				break;
   7393 			}
   7394 		}
   7395 
   7396 		/* Grab a packet off the queue. */
   7397 		if (is_transmit)
   7398 			m0 = pcq_get(txq->txq_interq);
   7399 		else
   7400 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7401 		if (m0 == NULL)
   7402 			break;
   7403 
   7404 		DPRINTF(WM_DEBUG_TX,
   7405 		    ("%s: TX: have packet to transmit: %p\n",
   7406 		    device_xname(sc->sc_dev), m0));
   7407 
   7408 		txs = &txq->txq_soft[txq->txq_snext];
   7409 		dmamap = txs->txs_dmamap;
   7410 
   7411 		/*
   7412 		 * Load the DMA map.  If this fails, the packet either
   7413 		 * didn't fit in the allotted number of segments, or we
   7414 		 * were short on resources.  For the too-many-segments
   7415 		 * case, we simply report an error and drop the packet,
   7416 		 * since we can't sanely copy a jumbo packet to a single
   7417 		 * buffer.
   7418 		 */
   7419 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7420 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7421 		if (error) {
   7422 			if (error == EFBIG) {
   7423 				WM_Q_EVCNT_INCR(txq, txdrop);
   7424 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7425 				    "DMA segments, dropping...\n",
   7426 				    device_xname(sc->sc_dev));
   7427 				wm_dump_mbuf_chain(sc, m0);
   7428 				m_freem(m0);
   7429 				continue;
   7430 			}
   7431 			/* Short on resources, just stop for now. */
   7432 			DPRINTF(WM_DEBUG_TX,
   7433 			    ("%s: TX: dmamap load failed: %d\n",
   7434 			    device_xname(sc->sc_dev), error));
   7435 			break;
   7436 		}
   7437 
   7438 		segs_needed = dmamap->dm_nsegs;
   7439 
   7440 		/*
   7441 		 * Ensure we have enough descriptors free to describe
   7442 		 * the packet.  Note, we always reserve one descriptor
   7443 		 * at the end of the ring due to the semantics of the
   7444 		 * TDT register, plus one more in the event we need
   7445 		 * to load offload context.
   7446 		 */
   7447 		if (segs_needed > txq->txq_free - 2) {
   7448 			/*
   7449 			 * Not enough free descriptors to transmit this
   7450 			 * packet.  We haven't committed anything yet,
   7451 			 * so just unload the DMA map, put the packet
   7452 			 * pack on the queue, and punt.  Notify the upper
   7453 			 * layer that there are no more slots left.
   7454 			 */
   7455 			DPRINTF(WM_DEBUG_TX,
   7456 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7457 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7458 			    segs_needed, txq->txq_free - 1));
   7459 			if (!is_transmit)
   7460 				ifp->if_flags |= IFF_OACTIVE;
   7461 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7462 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7463 			WM_Q_EVCNT_INCR(txq, txdstall);
   7464 			break;
   7465 		}
   7466 
   7467 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7468 
   7469 		DPRINTF(WM_DEBUG_TX,
   7470 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7471 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7472 
   7473 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7474 
   7475 		/*
   7476 		 * Store a pointer to the packet so that we can free it
   7477 		 * later.
   7478 		 *
   7479 		 * Initially, we consider the number of descriptors the
   7480 		 * packet uses the number of DMA segments.  This may be
   7481 		 * incremented by 1 if we do checksum offload (a descriptor
   7482 		 * is used to set the checksum context).
   7483 		 */
   7484 		txs->txs_mbuf = m0;
   7485 		txs->txs_firstdesc = txq->txq_next;
   7486 		txs->txs_ndesc = segs_needed;
   7487 
   7488 		/* Set up offload parameters for this packet. */
   7489 		uint32_t cmdlen, fields, dcmdlen;
   7490 		if (m0->m_pkthdr.csum_flags &
   7491 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7492 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7493 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7494 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   7495 			    &do_csum) != 0) {
   7496 				/* Error message already displayed. */
   7497 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7498 				continue;
   7499 			}
   7500 		} else {
   7501 			do_csum = false;
   7502 			cmdlen = 0;
   7503 			fields = 0;
   7504 		}
   7505 
   7506 		/* Sync the DMA map. */
   7507 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7508 		    BUS_DMASYNC_PREWRITE);
   7509 
   7510 		/* Initialize the first transmit descriptor. */
   7511 		nexttx = txq->txq_next;
   7512 		if (!do_csum) {
   7513 			/* setup a legacy descriptor */
   7514 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   7515 			    dmamap->dm_segs[0].ds_addr);
   7516 			txq->txq_descs[nexttx].wtx_cmdlen =
   7517 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   7518 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   7519 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   7520 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   7521 			    NULL) {
   7522 				txq->txq_descs[nexttx].wtx_cmdlen |=
   7523 				    htole32(WTX_CMD_VLE);
   7524 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   7525 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   7526 			} else {
   7527 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7528 			}
   7529 			dcmdlen = 0;
   7530 		} else {
   7531 			/* setup an advanced data descriptor */
   7532 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7533 			    htole64(dmamap->dm_segs[0].ds_addr);
   7534 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   7535 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7536 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   7537 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   7538 			    htole32(fields);
   7539 			DPRINTF(WM_DEBUG_TX,
   7540 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   7541 			    device_xname(sc->sc_dev), nexttx,
   7542 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   7543 			DPRINTF(WM_DEBUG_TX,
   7544 			    ("\t 0x%08x%08x\n", fields,
   7545 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   7546 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   7547 		}
   7548 
   7549 		lasttx = nexttx;
   7550 		nexttx = WM_NEXTTX(txq, nexttx);
   7551 		/*
   7552 		 * fill in the next descriptors. legacy or adcanced format
   7553 		 * is the same here
   7554 		 */
   7555 		for (seg = 1; seg < dmamap->dm_nsegs;
   7556 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   7557 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7558 			    htole64(dmamap->dm_segs[seg].ds_addr);
   7559 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7560 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   7561 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   7562 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   7563 			lasttx = nexttx;
   7564 
   7565 			DPRINTF(WM_DEBUG_TX,
   7566 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   7567 			     "len %#04zx\n",
   7568 			    device_xname(sc->sc_dev), nexttx,
   7569 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   7570 			    dmamap->dm_segs[seg].ds_len));
   7571 		}
   7572 
   7573 		KASSERT(lasttx != -1);
   7574 
   7575 		/*
   7576 		 * Set up the command byte on the last descriptor of
   7577 		 * the packet.  If we're in the interrupt delay window,
   7578 		 * delay the interrupt.
   7579 		 */
   7580 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   7581 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   7582 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7583 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7584 
   7585 		txs->txs_lastdesc = lasttx;
   7586 
   7587 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7588 		    device_xname(sc->sc_dev),
   7589 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7590 
   7591 		/* Sync the descriptors we're using. */
   7592 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7593 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7594 
   7595 		/* Give the packet to the chip. */
   7596 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7597 		sent = true;
   7598 
   7599 		DPRINTF(WM_DEBUG_TX,
   7600 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7601 
   7602 		DPRINTF(WM_DEBUG_TX,
   7603 		    ("%s: TX: finished transmitting packet, job %d\n",
   7604 		    device_xname(sc->sc_dev), txq->txq_snext));
   7605 
   7606 		/* Advance the tx pointer. */
   7607 		txq->txq_free -= txs->txs_ndesc;
   7608 		txq->txq_next = nexttx;
   7609 
   7610 		txq->txq_sfree--;
   7611 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7612 
   7613 		/* Pass the packet to any BPF listeners. */
   7614 		bpf_mtap(ifp, m0);
   7615 	}
   7616 
   7617 	if (m0 != NULL) {
   7618 		if (!is_transmit)
   7619 			ifp->if_flags |= IFF_OACTIVE;
   7620 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7621 		WM_Q_EVCNT_INCR(txq, txdrop);
   7622 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7623 			__func__));
   7624 		m_freem(m0);
   7625 	}
   7626 
   7627 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7628 		/* No more slots; notify upper layer. */
   7629 		if (!is_transmit)
   7630 			ifp->if_flags |= IFF_OACTIVE;
   7631 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7632 	}
   7633 
   7634 	if (sent) {
   7635 		/* Set a watchdog timer in case the chip flakes out. */
   7636 		ifp->if_timer = 5;
   7637 	}
   7638 }
   7639 
   7640 static void
   7641 wm_deferred_start_locked(struct wm_txqueue *txq)
   7642 {
   7643 	struct wm_softc *sc = txq->txq_sc;
   7644 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7645 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   7646 	int qid = wmq->wmq_id;
   7647 
   7648 	KASSERT(mutex_owned(txq->txq_lock));
   7649 
   7650 	if (txq->txq_stopping) {
   7651 		mutex_exit(txq->txq_lock);
   7652 		return;
   7653 	}
   7654 
   7655 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7656 		/* XXX need for ALTQ or one CPU system */
   7657 		if (qid == 0)
   7658 			wm_nq_start_locked(ifp);
   7659 		wm_nq_transmit_locked(ifp, txq);
   7660 	} else {
   7661 		/* XXX need for ALTQ or one CPU system */
   7662 		if (qid == 0)
   7663 			wm_start_locked(ifp);
   7664 		wm_transmit_locked(ifp, txq);
   7665 	}
   7666 }
   7667 
   7668 /* Interrupt */
   7669 
   7670 /*
   7671  * wm_txeof:
   7672  *
   7673  *	Helper; handle transmit interrupts.
   7674  */
   7675 static int
   7676 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
   7677 {
   7678 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7679 	struct wm_txsoft *txs;
   7680 	bool processed = false;
   7681 	int count = 0;
   7682 	int i;
   7683 	uint8_t status;
   7684 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   7685 
   7686 	KASSERT(mutex_owned(txq->txq_lock));
   7687 
   7688 	if (txq->txq_stopping)
   7689 		return 0;
   7690 
   7691 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   7692 	/* for ALTQ and legacy(not use multiqueue) ethernet controller */
   7693 	if (wmq->wmq_id == 0)
   7694 		ifp->if_flags &= ~IFF_OACTIVE;
   7695 
   7696 	/*
   7697 	 * Go through the Tx list and free mbufs for those
   7698 	 * frames which have been transmitted.
   7699 	 */
   7700 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   7701 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   7702 		txs = &txq->txq_soft[i];
   7703 
   7704 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   7705 			device_xname(sc->sc_dev), i));
   7706 
   7707 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   7708 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   7709 
   7710 		status =
   7711 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   7712 		if ((status & WTX_ST_DD) == 0) {
   7713 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   7714 			    BUS_DMASYNC_PREREAD);
   7715 			break;
   7716 		}
   7717 
   7718 		processed = true;
   7719 		count++;
   7720 		DPRINTF(WM_DEBUG_TX,
   7721 		    ("%s: TX: job %d done: descs %d..%d\n",
   7722 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   7723 		    txs->txs_lastdesc));
   7724 
   7725 		/*
   7726 		 * XXX We should probably be using the statistics
   7727 		 * XXX registers, but I don't know if they exist
   7728 		 * XXX on chips before the i82544.
   7729 		 */
   7730 
   7731 #ifdef WM_EVENT_COUNTERS
   7732 		if (status & WTX_ST_TU)
   7733 			WM_Q_EVCNT_INCR(txq, tu);
   7734 #endif /* WM_EVENT_COUNTERS */
   7735 
   7736 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   7737 			ifp->if_oerrors++;
   7738 			if (status & WTX_ST_LC)
   7739 				log(LOG_WARNING, "%s: late collision\n",
   7740 				    device_xname(sc->sc_dev));
   7741 			else if (status & WTX_ST_EC) {
   7742 				ifp->if_collisions += 16;
   7743 				log(LOG_WARNING, "%s: excessive collisions\n",
   7744 				    device_xname(sc->sc_dev));
   7745 			}
   7746 		} else
   7747 			ifp->if_opackets++;
   7748 
   7749 		txq->txq_packets++;
   7750 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   7751 
   7752 		txq->txq_free += txs->txs_ndesc;
   7753 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   7754 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   7755 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   7756 		m_freem(txs->txs_mbuf);
   7757 		txs->txs_mbuf = NULL;
   7758 	}
   7759 
   7760 	/* Update the dirty transmit buffer pointer. */
   7761 	txq->txq_sdirty = i;
   7762 	DPRINTF(WM_DEBUG_TX,
   7763 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   7764 
   7765 	if (count != 0)
   7766 		rnd_add_uint32(&sc->rnd_source, count);
   7767 
   7768 	/*
   7769 	 * If there are no more pending transmissions, cancel the watchdog
   7770 	 * timer.
   7771 	 */
   7772 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   7773 		ifp->if_timer = 0;
   7774 
   7775 	return processed;
   7776 }
   7777 
   7778 static inline uint32_t
   7779 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   7780 {
   7781 	struct wm_softc *sc = rxq->rxq_sc;
   7782 
   7783 	if (sc->sc_type == WM_T_82574)
   7784 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   7785 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7786 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   7787 	else
   7788 		return rxq->rxq_descs[idx].wrx_status;
   7789 }
   7790 
   7791 static inline uint32_t
   7792 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   7793 {
   7794 	struct wm_softc *sc = rxq->rxq_sc;
   7795 
   7796 	if (sc->sc_type == WM_T_82574)
   7797 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   7798 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7799 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   7800 	else
   7801 		return rxq->rxq_descs[idx].wrx_errors;
   7802 }
   7803 
   7804 static inline uint16_t
   7805 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   7806 {
   7807 	struct wm_softc *sc = rxq->rxq_sc;
   7808 
   7809 	if (sc->sc_type == WM_T_82574)
   7810 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   7811 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7812 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   7813 	else
   7814 		return rxq->rxq_descs[idx].wrx_special;
   7815 }
   7816 
   7817 static inline int
   7818 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   7819 {
   7820 	struct wm_softc *sc = rxq->rxq_sc;
   7821 
   7822 	if (sc->sc_type == WM_T_82574)
   7823 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   7824 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7825 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   7826 	else
   7827 		return rxq->rxq_descs[idx].wrx_len;
   7828 }
   7829 
   7830 #ifdef WM_DEBUG
   7831 static inline uint32_t
   7832 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   7833 {
   7834 	struct wm_softc *sc = rxq->rxq_sc;
   7835 
   7836 	if (sc->sc_type == WM_T_82574)
   7837 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   7838 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7839 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   7840 	else
   7841 		return 0;
   7842 }
   7843 
   7844 static inline uint8_t
   7845 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   7846 {
   7847 	struct wm_softc *sc = rxq->rxq_sc;
   7848 
   7849 	if (sc->sc_type == WM_T_82574)
   7850 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   7851 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7852 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   7853 	else
   7854 		return 0;
   7855 }
   7856 #endif /* WM_DEBUG */
   7857 
   7858 static inline bool
   7859 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   7860     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   7861 {
   7862 
   7863 	if (sc->sc_type == WM_T_82574)
   7864 		return (status & ext_bit) != 0;
   7865 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7866 		return (status & nq_bit) != 0;
   7867 	else
   7868 		return (status & legacy_bit) != 0;
   7869 }
   7870 
   7871 static inline bool
   7872 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   7873     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   7874 {
   7875 
   7876 	if (sc->sc_type == WM_T_82574)
   7877 		return (error & ext_bit) != 0;
   7878 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7879 		return (error & nq_bit) != 0;
   7880 	else
   7881 		return (error & legacy_bit) != 0;
   7882 }
   7883 
   7884 static inline bool
   7885 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   7886 {
   7887 
   7888 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   7889 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   7890 		return true;
   7891 	else
   7892 		return false;
   7893 }
   7894 
   7895 static inline bool
   7896 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   7897 {
   7898 	struct wm_softc *sc = rxq->rxq_sc;
   7899 
   7900 	/* XXXX missing error bit for newqueue? */
   7901 	if (wm_rxdesc_is_set_error(sc, errors,
   7902 		WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE,
   7903 		EXTRXC_ERROR_CE|EXTRXC_ERROR_SE|EXTRXC_ERROR_SEQ|EXTRXC_ERROR_CXE|EXTRXC_ERROR_RXE,
   7904 		NQRXC_ERROR_RXE)) {
   7905 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE, EXTRXC_ERROR_SE, 0))
   7906 			log(LOG_WARNING, "%s: symbol error\n",
   7907 			    device_xname(sc->sc_dev));
   7908 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ, EXTRXC_ERROR_SEQ, 0))
   7909 			log(LOG_WARNING, "%s: receive sequence error\n",
   7910 			    device_xname(sc->sc_dev));
   7911 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE, EXTRXC_ERROR_CE, 0))
   7912 			log(LOG_WARNING, "%s: CRC error\n",
   7913 			    device_xname(sc->sc_dev));
   7914 		return true;
   7915 	}
   7916 
   7917 	return false;
   7918 }
   7919 
   7920 static inline bool
   7921 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   7922 {
   7923 	struct wm_softc *sc = rxq->rxq_sc;
   7924 
   7925 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   7926 		NQRXC_STATUS_DD)) {
   7927 		/* We have processed all of the receive descriptors. */
   7928 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   7929 		return false;
   7930 	}
   7931 
   7932 	return true;
   7933 }
   7934 
   7935 static inline bool
   7936 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status, uint16_t vlantag,
   7937     struct mbuf *m)
   7938 {
   7939 	struct ifnet *ifp = &rxq->rxq_sc->sc_ethercom.ec_if;
   7940 
   7941 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   7942 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   7943 		VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), return false);
   7944 	}
   7945 
   7946 	return true;
   7947 }
   7948 
   7949 static inline void
   7950 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   7951     uint32_t errors, struct mbuf *m)
   7952 {
   7953 	struct wm_softc *sc = rxq->rxq_sc;
   7954 
   7955 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   7956 		if (wm_rxdesc_is_set_status(sc, status,
   7957 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   7958 			WM_Q_EVCNT_INCR(rxq, rxipsum);
   7959 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   7960 			if (wm_rxdesc_is_set_error(sc, errors,
   7961 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   7962 				m->m_pkthdr.csum_flags |=
   7963 					M_CSUM_IPv4_BAD;
   7964 		}
   7965 		if (wm_rxdesc_is_set_status(sc, status,
   7966 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   7967 			/*
   7968 			 * Note: we don't know if this was TCP or UDP,
   7969 			 * so we just set both bits, and expect the
   7970 			 * upper layers to deal.
   7971 			 */
   7972 			WM_Q_EVCNT_INCR(rxq, rxtusum);
   7973 			m->m_pkthdr.csum_flags |=
   7974 				M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7975 				M_CSUM_TCPv6 | M_CSUM_UDPv6;
   7976 			if (wm_rxdesc_is_set_error(sc, errors,
   7977 				WRX_ER_TCPE, EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   7978 				m->m_pkthdr.csum_flags |=
   7979 					M_CSUM_TCP_UDP_BAD;
   7980 		}
   7981 	}
   7982 }
   7983 
   7984 /*
   7985  * wm_rxeof:
   7986  *
   7987  *	Helper; handle receive interrupts.
   7988  */
   7989 static void
   7990 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   7991 {
   7992 	struct wm_softc *sc = rxq->rxq_sc;
   7993 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7994 	struct wm_rxsoft *rxs;
   7995 	struct mbuf *m;
   7996 	int i, len;
   7997 	int count = 0;
   7998 	uint32_t status, errors;
   7999 	uint16_t vlantag;
   8000 
   8001 	KASSERT(mutex_owned(rxq->rxq_lock));
   8002 
   8003 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8004 		if (limit-- == 0) {
   8005 			rxq->rxq_ptr = i;
   8006 			break;
   8007 		}
   8008 
   8009 		rxs = &rxq->rxq_soft[i];
   8010 
   8011 		DPRINTF(WM_DEBUG_RX,
   8012 		    ("%s: RX: checking descriptor %d\n",
   8013 		    device_xname(sc->sc_dev), i));
   8014 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   8015 
   8016 		status = wm_rxdesc_get_status(rxq, i);
   8017 		errors = wm_rxdesc_get_errors(rxq, i);
   8018 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8019 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8020 #ifdef WM_DEBUG
   8021 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8022 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8023 #endif
   8024 
   8025 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8026 			/*
   8027 			 * Update the receive pointer holding rxq_lock
   8028 			 * consistent with increment counter.
   8029 			 */
   8030 			rxq->rxq_ptr = i;
   8031 			break;
   8032 		}
   8033 
   8034 		count++;
   8035 		if (__predict_false(rxq->rxq_discard)) {
   8036 			DPRINTF(WM_DEBUG_RX,
   8037 			    ("%s: RX: discarding contents of descriptor %d\n",
   8038 			    device_xname(sc->sc_dev), i));
   8039 			wm_init_rxdesc(rxq, i);
   8040 			if (wm_rxdesc_is_eop(rxq, status)) {
   8041 				/* Reset our state. */
   8042 				DPRINTF(WM_DEBUG_RX,
   8043 				    ("%s: RX: resetting rxdiscard -> 0\n",
   8044 				    device_xname(sc->sc_dev)));
   8045 				rxq->rxq_discard = 0;
   8046 			}
   8047 			continue;
   8048 		}
   8049 
   8050 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8051 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   8052 
   8053 		m = rxs->rxs_mbuf;
   8054 
   8055 		/*
   8056 		 * Add a new receive buffer to the ring, unless of
   8057 		 * course the length is zero. Treat the latter as a
   8058 		 * failed mapping.
   8059 		 */
   8060 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   8061 			/*
   8062 			 * Failed, throw away what we've done so
   8063 			 * far, and discard the rest of the packet.
   8064 			 */
   8065 			ifp->if_ierrors++;
   8066 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8067 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   8068 			wm_init_rxdesc(rxq, i);
   8069 			if (!wm_rxdesc_is_eop(rxq, status))
   8070 				rxq->rxq_discard = 1;
   8071 			if (rxq->rxq_head != NULL)
   8072 				m_freem(rxq->rxq_head);
   8073 			WM_RXCHAIN_RESET(rxq);
   8074 			DPRINTF(WM_DEBUG_RX,
   8075 			    ("%s: RX: Rx buffer allocation failed, "
   8076 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8077 			    rxq->rxq_discard ? " (discard)" : ""));
   8078 			continue;
   8079 		}
   8080 
   8081 		m->m_len = len;
   8082 		rxq->rxq_len += len;
   8083 		DPRINTF(WM_DEBUG_RX,
   8084 		    ("%s: RX: buffer at %p len %d\n",
   8085 		    device_xname(sc->sc_dev), m->m_data, len));
   8086 
   8087 		/* If this is not the end of the packet, keep looking. */
   8088 		if (!wm_rxdesc_is_eop(rxq, status)) {
   8089 			WM_RXCHAIN_LINK(rxq, m);
   8090 			DPRINTF(WM_DEBUG_RX,
   8091 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   8092 			    device_xname(sc->sc_dev), rxq->rxq_len));
   8093 			continue;
   8094 		}
   8095 
   8096 		/*
   8097 		 * Okay, we have the entire packet now.  The chip is
   8098 		 * configured to include the FCS except I350 and I21[01]
   8099 		 * (not all chips can be configured to strip it),
   8100 		 * so we need to trim it.
   8101 		 * May need to adjust length of previous mbuf in the
   8102 		 * chain if the current mbuf is too short.
   8103 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   8104 		 * is always set in I350, so we don't trim it.
   8105 		 */
   8106 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   8107 		    && (sc->sc_type != WM_T_I210)
   8108 		    && (sc->sc_type != WM_T_I211)) {
   8109 			if (m->m_len < ETHER_CRC_LEN) {
   8110 				rxq->rxq_tail->m_len
   8111 				    -= (ETHER_CRC_LEN - m->m_len);
   8112 				m->m_len = 0;
   8113 			} else
   8114 				m->m_len -= ETHER_CRC_LEN;
   8115 			len = rxq->rxq_len - ETHER_CRC_LEN;
   8116 		} else
   8117 			len = rxq->rxq_len;
   8118 
   8119 		WM_RXCHAIN_LINK(rxq, m);
   8120 
   8121 		*rxq->rxq_tailp = NULL;
   8122 		m = rxq->rxq_head;
   8123 
   8124 		WM_RXCHAIN_RESET(rxq);
   8125 
   8126 		DPRINTF(WM_DEBUG_RX,
   8127 		    ("%s: RX: have entire packet, len -> %d\n",
   8128 		    device_xname(sc->sc_dev), len));
   8129 
   8130 		/* If an error occurred, update stats and drop the packet. */
   8131 		if (wm_rxdesc_has_errors(rxq, errors)) {
   8132 			m_freem(m);
   8133 			continue;
   8134 		}
   8135 
   8136 		/* No errors.  Receive the packet. */
   8137 		m_set_rcvif(m, ifp);
   8138 		m->m_pkthdr.len = len;
   8139 		/*
   8140 		 * TODO
   8141 		 * should be save rsshash and rsstype to this mbuf.
   8142 		 */
   8143 		DPRINTF(WM_DEBUG_RX,
   8144 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   8145 			device_xname(sc->sc_dev), rsstype, rsshash));
   8146 
   8147 		/*
   8148 		 * If VLANs are enabled, VLAN packets have been unwrapped
   8149 		 * for us.  Associate the tag with the packet.
   8150 		 */
   8151 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   8152 			continue;
   8153 
   8154 		/* Set up checksum info for this packet. */
   8155 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   8156 		/*
   8157 		 * Update the receive pointer holding rxq_lock consistent with
   8158 		 * increment counter.
   8159 		 */
   8160 		rxq->rxq_ptr = i;
   8161 		rxq->rxq_packets++;
   8162 		rxq->rxq_bytes += len;
   8163 		mutex_exit(rxq->rxq_lock);
   8164 
   8165 		/* Pass it on. */
   8166 		if_percpuq_enqueue(sc->sc_ipq, m);
   8167 
   8168 		mutex_enter(rxq->rxq_lock);
   8169 
   8170 		if (rxq->rxq_stopping)
   8171 			break;
   8172 	}
   8173 
   8174 	if (count != 0)
   8175 		rnd_add_uint32(&sc->rnd_source, count);
   8176 
   8177 	DPRINTF(WM_DEBUG_RX,
   8178 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8179 }
   8180 
   8181 /*
   8182  * wm_linkintr_gmii:
   8183  *
   8184  *	Helper; handle link interrupts for GMII.
   8185  */
   8186 static void
   8187 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8188 {
   8189 
   8190 	KASSERT(WM_CORE_LOCKED(sc));
   8191 
   8192 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8193 		__func__));
   8194 
   8195 	if (icr & ICR_LSC) {
   8196 		uint32_t reg;
   8197 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   8198 
   8199 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   8200 			wm_gig_downshift_workaround_ich8lan(sc);
   8201 
   8202 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   8203 			device_xname(sc->sc_dev)));
   8204 		mii_pollstat(&sc->sc_mii);
   8205 		if (sc->sc_type == WM_T_82543) {
   8206 			int miistatus, active;
   8207 
   8208 			/*
   8209 			 * With 82543, we need to force speed and
   8210 			 * duplex on the MAC equal to what the PHY
   8211 			 * speed and duplex configuration is.
   8212 			 */
   8213 			miistatus = sc->sc_mii.mii_media_status;
   8214 
   8215 			if (miistatus & IFM_ACTIVE) {
   8216 				active = sc->sc_mii.mii_media_active;
   8217 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8218 				switch (IFM_SUBTYPE(active)) {
   8219 				case IFM_10_T:
   8220 					sc->sc_ctrl |= CTRL_SPEED_10;
   8221 					break;
   8222 				case IFM_100_TX:
   8223 					sc->sc_ctrl |= CTRL_SPEED_100;
   8224 					break;
   8225 				case IFM_1000_T:
   8226 					sc->sc_ctrl |= CTRL_SPEED_1000;
   8227 					break;
   8228 				default:
   8229 					/*
   8230 					 * fiber?
   8231 					 * Shoud not enter here.
   8232 					 */
   8233 					printf("unknown media (%x)\n", active);
   8234 					break;
   8235 				}
   8236 				if (active & IFM_FDX)
   8237 					sc->sc_ctrl |= CTRL_FD;
   8238 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8239 			}
   8240 		} else if ((sc->sc_type == WM_T_ICH8)
   8241 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   8242 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   8243 		} else if (sc->sc_type == WM_T_PCH) {
   8244 			wm_k1_gig_workaround_hv(sc,
   8245 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   8246 		}
   8247 
   8248 		if ((sc->sc_phytype == WMPHY_82578)
   8249 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   8250 			== IFM_1000_T)) {
   8251 
   8252 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   8253 				delay(200*1000); /* XXX too big */
   8254 
   8255 				/* Link stall fix for link up */
   8256 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8257 				    HV_MUX_DATA_CTRL,
   8258 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   8259 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   8260 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8261 				    HV_MUX_DATA_CTRL,
   8262 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   8263 			}
   8264 		}
   8265 		/*
   8266 		 * I217 Packet Loss issue:
   8267 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
   8268 		 * on power up.
   8269 		 * Set the Beacon Duration for I217 to 8 usec
   8270 		 */
   8271 		if ((sc->sc_type == WM_T_PCH_LPT)
   8272 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8273 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   8274 			reg &= ~FEXTNVM4_BEACON_DURATION;
   8275 			reg |= FEXTNVM4_BEACON_DURATION_8US;
   8276 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   8277 		}
   8278 
   8279 		/* XXX Work-around I218 hang issue */
   8280 		/* e1000_k1_workaround_lpt_lp() */
   8281 
   8282 		if ((sc->sc_type == WM_T_PCH_LPT)
   8283 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8284 			/*
   8285 			 * Set platform power management values for Latency
   8286 			 * Tolerance Reporting (LTR)
   8287 			 */
   8288 			wm_platform_pm_pch_lpt(sc,
   8289 				((sc->sc_mii.mii_media_status & IFM_ACTIVE)
   8290 				    != 0));
   8291 		}
   8292 
   8293 		/* FEXTNVM6 K1-off workaround */
   8294 		if (sc->sc_type == WM_T_PCH_SPT) {
   8295 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
   8296 			if (CSR_READ(sc, WMREG_PCIEANACFG)
   8297 			    & FEXTNVM6_K1_OFF_ENABLE)
   8298 				reg |= FEXTNVM6_K1_OFF_ENABLE;
   8299 			else
   8300 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   8301 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   8302 		}
   8303 	} else if (icr & ICR_RXSEQ) {
   8304 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   8305 			device_xname(sc->sc_dev)));
   8306 	}
   8307 }
   8308 
   8309 /*
   8310  * wm_linkintr_tbi:
   8311  *
   8312  *	Helper; handle link interrupts for TBI mode.
   8313  */
   8314 static void
   8315 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   8316 {
   8317 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8318 	uint32_t status;
   8319 
   8320 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8321 		__func__));
   8322 
   8323 	status = CSR_READ(sc, WMREG_STATUS);
   8324 	if (icr & ICR_LSC) {
   8325 		if (status & STATUS_LU) {
   8326 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8327 			    device_xname(sc->sc_dev),
   8328 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   8329 			/*
   8330 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   8331 			 * so we should update sc->sc_ctrl
   8332 			 */
   8333 
   8334 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   8335 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8336 			sc->sc_fcrtl &= ~FCRTL_XONE;
   8337 			if (status & STATUS_FD)
   8338 				sc->sc_tctl |=
   8339 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8340 			else
   8341 				sc->sc_tctl |=
   8342 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8343 			if (sc->sc_ctrl & CTRL_TFCE)
   8344 				sc->sc_fcrtl |= FCRTL_XONE;
   8345 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8346 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   8347 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   8348 				      sc->sc_fcrtl);
   8349 			sc->sc_tbi_linkup = 1;
   8350 			if_link_state_change(ifp, LINK_STATE_UP);
   8351 		} else {
   8352 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8353 			    device_xname(sc->sc_dev)));
   8354 			sc->sc_tbi_linkup = 0;
   8355 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8356 		}
   8357 		/* Update LED */
   8358 		wm_tbi_serdes_set_linkled(sc);
   8359 	} else if (icr & ICR_RXSEQ) {
   8360 		DPRINTF(WM_DEBUG_LINK,
   8361 		    ("%s: LINK: Receive sequence error\n",
   8362 		    device_xname(sc->sc_dev)));
   8363 	}
   8364 }
   8365 
   8366 /*
   8367  * wm_linkintr_serdes:
   8368  *
   8369  *	Helper; handle link interrupts for TBI mode.
   8370  */
   8371 static void
   8372 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   8373 {
   8374 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8375 	struct mii_data *mii = &sc->sc_mii;
   8376 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8377 	uint32_t pcs_adv, pcs_lpab, reg;
   8378 
   8379 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8380 		__func__));
   8381 
   8382 	if (icr & ICR_LSC) {
   8383 		/* Check PCS */
   8384 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8385 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   8386 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   8387 				device_xname(sc->sc_dev)));
   8388 			mii->mii_media_status |= IFM_ACTIVE;
   8389 			sc->sc_tbi_linkup = 1;
   8390 			if_link_state_change(ifp, LINK_STATE_UP);
   8391 		} else {
   8392 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8393 				device_xname(sc->sc_dev)));
   8394 			mii->mii_media_status |= IFM_NONE;
   8395 			sc->sc_tbi_linkup = 0;
   8396 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8397 			wm_tbi_serdes_set_linkled(sc);
   8398 			return;
   8399 		}
   8400 		mii->mii_media_active |= IFM_1000_SX;
   8401 		if ((reg & PCS_LSTS_FDX) != 0)
   8402 			mii->mii_media_active |= IFM_FDX;
   8403 		else
   8404 			mii->mii_media_active |= IFM_HDX;
   8405 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   8406 			/* Check flow */
   8407 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8408 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   8409 				DPRINTF(WM_DEBUG_LINK,
   8410 				    ("XXX LINKOK but not ACOMP\n"));
   8411 				return;
   8412 			}
   8413 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   8414 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   8415 			DPRINTF(WM_DEBUG_LINK,
   8416 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   8417 			if ((pcs_adv & TXCW_SYM_PAUSE)
   8418 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   8419 				mii->mii_media_active |= IFM_FLOW
   8420 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   8421 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   8422 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8423 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   8424 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8425 				mii->mii_media_active |= IFM_FLOW
   8426 				    | IFM_ETH_TXPAUSE;
   8427 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   8428 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8429 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   8430 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8431 				mii->mii_media_active |= IFM_FLOW
   8432 				    | IFM_ETH_RXPAUSE;
   8433 		}
   8434 		/* Update LED */
   8435 		wm_tbi_serdes_set_linkled(sc);
   8436 	} else {
   8437 		DPRINTF(WM_DEBUG_LINK,
   8438 		    ("%s: LINK: Receive sequence error\n",
   8439 		    device_xname(sc->sc_dev)));
   8440 	}
   8441 }
   8442 
   8443 /*
   8444  * wm_linkintr:
   8445  *
   8446  *	Helper; handle link interrupts.
   8447  */
   8448 static void
   8449 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   8450 {
   8451 
   8452 	KASSERT(WM_CORE_LOCKED(sc));
   8453 
   8454 	if (sc->sc_flags & WM_F_HAS_MII)
   8455 		wm_linkintr_gmii(sc, icr);
   8456 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   8457 	    && (sc->sc_type >= WM_T_82575))
   8458 		wm_linkintr_serdes(sc, icr);
   8459 	else
   8460 		wm_linkintr_tbi(sc, icr);
   8461 }
   8462 
   8463 /*
   8464  * wm_intr_legacy:
   8465  *
   8466  *	Interrupt service routine for INTx and MSI.
   8467  */
   8468 static int
   8469 wm_intr_legacy(void *arg)
   8470 {
   8471 	struct wm_softc *sc = arg;
   8472 	struct wm_queue *wmq = &sc->sc_queue[0];
   8473 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8474 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8475 	uint32_t icr, rndval = 0;
   8476 	int handled = 0;
   8477 
   8478 	while (1 /* CONSTCOND */) {
   8479 		icr = CSR_READ(sc, WMREG_ICR);
   8480 		if ((icr & sc->sc_icr) == 0)
   8481 			break;
   8482 		if (handled == 0) {
   8483 			DPRINTF(WM_DEBUG_TX,
   8484 			    ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
   8485 		}
   8486 		if (rndval == 0)
   8487 			rndval = icr;
   8488 
   8489 		mutex_enter(rxq->rxq_lock);
   8490 
   8491 		if (rxq->rxq_stopping) {
   8492 			mutex_exit(rxq->rxq_lock);
   8493 			break;
   8494 		}
   8495 
   8496 		handled = 1;
   8497 
   8498 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8499 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   8500 			DPRINTF(WM_DEBUG_RX,
   8501 			    ("%s: RX: got Rx intr 0x%08x\n",
   8502 			    device_xname(sc->sc_dev),
   8503 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
   8504 			WM_Q_EVCNT_INCR(rxq, rxintr);
   8505 		}
   8506 #endif
   8507 		wm_rxeof(rxq, UINT_MAX);
   8508 
   8509 		mutex_exit(rxq->rxq_lock);
   8510 		mutex_enter(txq->txq_lock);
   8511 
   8512 		if (txq->txq_stopping) {
   8513 			mutex_exit(txq->txq_lock);
   8514 			break;
   8515 		}
   8516 
   8517 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8518 		if (icr & ICR_TXDW) {
   8519 			DPRINTF(WM_DEBUG_TX,
   8520 			    ("%s: TX: got TXDW interrupt\n",
   8521 			    device_xname(sc->sc_dev)));
   8522 			WM_Q_EVCNT_INCR(txq, txdw);
   8523 		}
   8524 #endif
   8525 		wm_txeof(sc, txq);
   8526 
   8527 		mutex_exit(txq->txq_lock);
   8528 		WM_CORE_LOCK(sc);
   8529 
   8530 		if (sc->sc_core_stopping) {
   8531 			WM_CORE_UNLOCK(sc);
   8532 			break;
   8533 		}
   8534 
   8535 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   8536 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8537 			wm_linkintr(sc, icr);
   8538 		}
   8539 
   8540 		WM_CORE_UNLOCK(sc);
   8541 
   8542 		if (icr & ICR_RXO) {
   8543 #if defined(WM_DEBUG)
   8544 			log(LOG_WARNING, "%s: Receive overrun\n",
   8545 			    device_xname(sc->sc_dev));
   8546 #endif /* defined(WM_DEBUG) */
   8547 		}
   8548 	}
   8549 
   8550 	rnd_add_uint32(&sc->rnd_source, rndval);
   8551 
   8552 	if (handled) {
   8553 		/* Try to get more packets going. */
   8554 		softint_schedule(wmq->wmq_si);
   8555 	}
   8556 
   8557 	return handled;
   8558 }
   8559 
   8560 static inline void
   8561 wm_txrxintr_disable(struct wm_queue *wmq)
   8562 {
   8563 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8564 
   8565 	if (sc->sc_type == WM_T_82574)
   8566 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8567 	else if (sc->sc_type == WM_T_82575)
   8568 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8569 	else
   8570 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   8571 }
   8572 
   8573 static inline void
   8574 wm_txrxintr_enable(struct wm_queue *wmq)
   8575 {
   8576 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8577 
   8578 	wm_itrs_calculate(sc, wmq);
   8579 
   8580 	if (sc->sc_type == WM_T_82574)
   8581 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8582 	else if (sc->sc_type == WM_T_82575)
   8583 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8584 	else
   8585 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   8586 }
   8587 
   8588 static int
   8589 wm_txrxintr_msix(void *arg)
   8590 {
   8591 	struct wm_queue *wmq = arg;
   8592 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8593 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8594 	struct wm_softc *sc = txq->txq_sc;
   8595 	u_int limit = sc->sc_rx_intr_process_limit;
   8596 
   8597 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   8598 
   8599 	DPRINTF(WM_DEBUG_TX,
   8600 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   8601 
   8602 	wm_txrxintr_disable(wmq);
   8603 
   8604 	mutex_enter(txq->txq_lock);
   8605 
   8606 	if (txq->txq_stopping) {
   8607 		mutex_exit(txq->txq_lock);
   8608 		return 0;
   8609 	}
   8610 
   8611 	WM_Q_EVCNT_INCR(txq, txdw);
   8612 	wm_txeof(sc, txq);
   8613 	/* wm_deferred start() is done in wm_handle_queue(). */
   8614 	mutex_exit(txq->txq_lock);
   8615 
   8616 	DPRINTF(WM_DEBUG_RX,
   8617 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   8618 	mutex_enter(rxq->rxq_lock);
   8619 
   8620 	if (rxq->rxq_stopping) {
   8621 		mutex_exit(rxq->rxq_lock);
   8622 		return 0;
   8623 	}
   8624 
   8625 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8626 	wm_rxeof(rxq, limit);
   8627 	mutex_exit(rxq->rxq_lock);
   8628 
   8629 	wm_itrs_writereg(sc, wmq);
   8630 
   8631 	softint_schedule(wmq->wmq_si);
   8632 
   8633 	return 1;
   8634 }
   8635 
   8636 static void
   8637 wm_handle_queue(void *arg)
   8638 {
   8639 	struct wm_queue *wmq = arg;
   8640 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8641 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8642 	struct wm_softc *sc = txq->txq_sc;
   8643 	u_int limit = sc->sc_rx_process_limit;
   8644 
   8645 	mutex_enter(txq->txq_lock);
   8646 	if (txq->txq_stopping) {
   8647 		mutex_exit(txq->txq_lock);
   8648 		return;
   8649 	}
   8650 	wm_txeof(sc, txq);
   8651 	wm_deferred_start_locked(txq);
   8652 	mutex_exit(txq->txq_lock);
   8653 
   8654 	mutex_enter(rxq->rxq_lock);
   8655 	if (rxq->rxq_stopping) {
   8656 		mutex_exit(rxq->rxq_lock);
   8657 		return;
   8658 	}
   8659 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8660 	wm_rxeof(rxq, limit);
   8661 	mutex_exit(rxq->rxq_lock);
   8662 
   8663 	wm_txrxintr_enable(wmq);
   8664 }
   8665 
   8666 /*
   8667  * wm_linkintr_msix:
   8668  *
   8669  *	Interrupt service routine for link status change for MSI-X.
   8670  */
   8671 static int
   8672 wm_linkintr_msix(void *arg)
   8673 {
   8674 	struct wm_softc *sc = arg;
   8675 	uint32_t reg;
   8676 
   8677 	DPRINTF(WM_DEBUG_LINK,
   8678 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   8679 
   8680 	reg = CSR_READ(sc, WMREG_ICR);
   8681 	WM_CORE_LOCK(sc);
   8682 	if ((sc->sc_core_stopping) || ((reg & ICR_LSC) == 0))
   8683 		goto out;
   8684 
   8685 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8686 	wm_linkintr(sc, ICR_LSC);
   8687 
   8688 out:
   8689 	WM_CORE_UNLOCK(sc);
   8690 
   8691 	if (sc->sc_type == WM_T_82574)
   8692 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   8693 	else if (sc->sc_type == WM_T_82575)
   8694 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   8695 	else
   8696 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   8697 
   8698 	return 1;
   8699 }
   8700 
   8701 /*
   8702  * Media related.
   8703  * GMII, SGMII, TBI (and SERDES)
   8704  */
   8705 
   8706 /* Common */
   8707 
   8708 /*
   8709  * wm_tbi_serdes_set_linkled:
   8710  *
   8711  *	Update the link LED on TBI and SERDES devices.
   8712  */
   8713 static void
   8714 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   8715 {
   8716 
   8717 	if (sc->sc_tbi_linkup)
   8718 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   8719 	else
   8720 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   8721 
   8722 	/* 82540 or newer devices are active low */
   8723 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   8724 
   8725 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8726 }
   8727 
   8728 /* GMII related */
   8729 
   8730 /*
   8731  * wm_gmii_reset:
   8732  *
   8733  *	Reset the PHY.
   8734  */
   8735 static void
   8736 wm_gmii_reset(struct wm_softc *sc)
   8737 {
   8738 	uint32_t reg;
   8739 	int rv;
   8740 
   8741 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   8742 		device_xname(sc->sc_dev), __func__));
   8743 
   8744 	rv = sc->phy.acquire(sc);
   8745 	if (rv != 0) {
   8746 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8747 		    __func__);
   8748 		return;
   8749 	}
   8750 
   8751 	switch (sc->sc_type) {
   8752 	case WM_T_82542_2_0:
   8753 	case WM_T_82542_2_1:
   8754 		/* null */
   8755 		break;
   8756 	case WM_T_82543:
   8757 		/*
   8758 		 * With 82543, we need to force speed and duplex on the MAC
   8759 		 * equal to what the PHY speed and duplex configuration is.
   8760 		 * In addition, we need to perform a hardware reset on the PHY
   8761 		 * to take it out of reset.
   8762 		 */
   8763 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   8764 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8765 
   8766 		/* The PHY reset pin is active-low. */
   8767 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   8768 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   8769 		    CTRL_EXT_SWDPIN(4));
   8770 		reg |= CTRL_EXT_SWDPIO(4);
   8771 
   8772 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   8773 		CSR_WRITE_FLUSH(sc);
   8774 		delay(10*1000);
   8775 
   8776 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   8777 		CSR_WRITE_FLUSH(sc);
   8778 		delay(150);
   8779 #if 0
   8780 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   8781 #endif
   8782 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   8783 		break;
   8784 	case WM_T_82544:	/* reset 10000us */
   8785 	case WM_T_82540:
   8786 	case WM_T_82545:
   8787 	case WM_T_82545_3:
   8788 	case WM_T_82546:
   8789 	case WM_T_82546_3:
   8790 	case WM_T_82541:
   8791 	case WM_T_82541_2:
   8792 	case WM_T_82547:
   8793 	case WM_T_82547_2:
   8794 	case WM_T_82571:	/* reset 100us */
   8795 	case WM_T_82572:
   8796 	case WM_T_82573:
   8797 	case WM_T_82574:
   8798 	case WM_T_82575:
   8799 	case WM_T_82576:
   8800 	case WM_T_82580:
   8801 	case WM_T_I350:
   8802 	case WM_T_I354:
   8803 	case WM_T_I210:
   8804 	case WM_T_I211:
   8805 	case WM_T_82583:
   8806 	case WM_T_80003:
   8807 		/* generic reset */
   8808 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8809 		CSR_WRITE_FLUSH(sc);
   8810 		delay(20000);
   8811 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8812 		CSR_WRITE_FLUSH(sc);
   8813 		delay(20000);
   8814 
   8815 		if ((sc->sc_type == WM_T_82541)
   8816 		    || (sc->sc_type == WM_T_82541_2)
   8817 		    || (sc->sc_type == WM_T_82547)
   8818 		    || (sc->sc_type == WM_T_82547_2)) {
   8819 			/* workaround for igp are done in igp_reset() */
   8820 			/* XXX add code to set LED after phy reset */
   8821 		}
   8822 		break;
   8823 	case WM_T_ICH8:
   8824 	case WM_T_ICH9:
   8825 	case WM_T_ICH10:
   8826 	case WM_T_PCH:
   8827 	case WM_T_PCH2:
   8828 	case WM_T_PCH_LPT:
   8829 	case WM_T_PCH_SPT:
   8830 		/* generic reset */
   8831 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8832 		CSR_WRITE_FLUSH(sc);
   8833 		delay(100);
   8834 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8835 		CSR_WRITE_FLUSH(sc);
   8836 		delay(150);
   8837 		break;
   8838 	default:
   8839 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   8840 		    __func__);
   8841 		break;
   8842 	}
   8843 
   8844 	sc->phy.release(sc);
   8845 
   8846 	/* get_cfg_done */
   8847 	wm_get_cfg_done(sc);
   8848 
   8849 	/* extra setup */
   8850 	switch (sc->sc_type) {
   8851 	case WM_T_82542_2_0:
   8852 	case WM_T_82542_2_1:
   8853 	case WM_T_82543:
   8854 	case WM_T_82544:
   8855 	case WM_T_82540:
   8856 	case WM_T_82545:
   8857 	case WM_T_82545_3:
   8858 	case WM_T_82546:
   8859 	case WM_T_82546_3:
   8860 	case WM_T_82541_2:
   8861 	case WM_T_82547_2:
   8862 	case WM_T_82571:
   8863 	case WM_T_82572:
   8864 	case WM_T_82573:
   8865 	case WM_T_82575:
   8866 	case WM_T_82576:
   8867 	case WM_T_82580:
   8868 	case WM_T_I350:
   8869 	case WM_T_I354:
   8870 	case WM_T_I210:
   8871 	case WM_T_I211:
   8872 	case WM_T_80003:
   8873 		/* null */
   8874 		break;
   8875 	case WM_T_82574:
   8876 	case WM_T_82583:
   8877 		wm_lplu_d0_disable(sc);
   8878 		break;
   8879 	case WM_T_82541:
   8880 	case WM_T_82547:
   8881 		/* XXX Configure actively LED after PHY reset */
   8882 		break;
   8883 	case WM_T_ICH8:
   8884 	case WM_T_ICH9:
   8885 	case WM_T_ICH10:
   8886 	case WM_T_PCH:
   8887 	case WM_T_PCH2:
   8888 	case WM_T_PCH_LPT:
   8889 	case WM_T_PCH_SPT:
   8890 		/* Allow time for h/w to get to a quiescent state afer reset */
   8891 		delay(10*1000);
   8892 
   8893 		if (sc->sc_type == WM_T_PCH)
   8894 			wm_hv_phy_workaround_ich8lan(sc);
   8895 
   8896 		if (sc->sc_type == WM_T_PCH2)
   8897 			wm_lv_phy_workaround_ich8lan(sc);
   8898 
   8899 		/* Clear the host wakeup bit after lcd reset */
   8900 		if (sc->sc_type >= WM_T_PCH) {
   8901 			reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   8902 			    BM_PORT_GEN_CFG);
   8903 			reg &= ~BM_WUC_HOST_WU_BIT;
   8904 			wm_gmii_hv_writereg(sc->sc_dev, 2,
   8905 			    BM_PORT_GEN_CFG, reg);
   8906 		}
   8907 
   8908 		/*
   8909 		 * XXX Configure the LCD with th extended configuration region
   8910 		 * in NVM
   8911 		 */
   8912 
   8913 		/* Disable D0 LPLU. */
   8914 		if (sc->sc_type >= WM_T_PCH)	/* PCH* */
   8915 			wm_lplu_d0_disable_pch(sc);
   8916 		else
   8917 			wm_lplu_d0_disable(sc);	/* ICH* */
   8918 		break;
   8919 	default:
   8920 		panic("%s: unknown type\n", __func__);
   8921 		break;
   8922 	}
   8923 }
   8924 
   8925 /*
   8926  * Setup sc_phytype and mii_{read|write}reg.
   8927  *
   8928  *  To identify PHY type, correct read/write function should be selected.
   8929  * To select correct read/write function, PCI ID or MAC type are required
   8930  * without accessing PHY registers.
   8931  *
   8932  *  On the first call of this function, PHY ID is not known yet. Check
   8933  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   8934  * result might be incorrect.
   8935  *
   8936  *  In the second call, PHY OUI and model is used to identify PHY type.
   8937  * It might not be perfpect because of the lack of compared entry, but it
   8938  * would be better than the first call.
   8939  *
   8940  *  If the detected new result and previous assumption is different,
   8941  * diagnous message will be printed.
   8942  */
   8943 static void
   8944 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   8945     uint16_t phy_model)
   8946 {
   8947 	device_t dev = sc->sc_dev;
   8948 	struct mii_data *mii = &sc->sc_mii;
   8949 	uint16_t new_phytype = WMPHY_UNKNOWN;
   8950 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   8951 	mii_readreg_t new_readreg;
   8952 	mii_writereg_t new_writereg;
   8953 
   8954 	if (mii->mii_readreg == NULL) {
   8955 		/*
   8956 		 *  This is the first call of this function. For ICH and PCH
   8957 		 * variants, it's difficult to determine the PHY access method
   8958 		 * by sc_type, so use the PCI product ID for some devices.
   8959 		 */
   8960 
   8961 		switch (sc->sc_pcidevid) {
   8962 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   8963 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   8964 			/* 82577 */
   8965 			new_phytype = WMPHY_82577;
   8966 			break;
   8967 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   8968 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   8969 			/* 82578 */
   8970 			new_phytype = WMPHY_82578;
   8971 			break;
   8972 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   8973 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   8974 			/* 82579 */
   8975 			new_phytype = WMPHY_82579;
   8976 			break;
   8977 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   8978 		case PCI_PRODUCT_INTEL_82801I_BM:
   8979 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   8980 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   8981 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   8982 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   8983 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   8984 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   8985 			/* ICH8, 9, 10 with 82567 */
   8986 			new_phytype = WMPHY_BM;
   8987 			break;
   8988 		default:
   8989 			break;
   8990 		}
   8991 	} else {
   8992 		/* It's not the first call. Use PHY OUI and model */
   8993 		switch (phy_oui) {
   8994 		case MII_OUI_ATHEROS: /* XXX ??? */
   8995 			switch (phy_model) {
   8996 			case 0x0004: /* XXX */
   8997 				new_phytype = WMPHY_82578;
   8998 				break;
   8999 			default:
   9000 				break;
   9001 			}
   9002 			break;
   9003 		case MII_OUI_xxMARVELL:
   9004 			switch (phy_model) {
   9005 			case MII_MODEL_xxMARVELL_I210:
   9006 				new_phytype = WMPHY_I210;
   9007 				break;
   9008 			case MII_MODEL_xxMARVELL_E1011:
   9009 			case MII_MODEL_xxMARVELL_E1000_3:
   9010 			case MII_MODEL_xxMARVELL_E1000_5:
   9011 			case MII_MODEL_xxMARVELL_E1112:
   9012 				new_phytype = WMPHY_M88;
   9013 				break;
   9014 			case MII_MODEL_xxMARVELL_E1149:
   9015 				new_phytype = WMPHY_BM;
   9016 				break;
   9017 			case MII_MODEL_xxMARVELL_E1111:
   9018 			case MII_MODEL_xxMARVELL_I347:
   9019 			case MII_MODEL_xxMARVELL_E1512:
   9020 			case MII_MODEL_xxMARVELL_E1340M:
   9021 			case MII_MODEL_xxMARVELL_E1543:
   9022 				new_phytype = WMPHY_M88;
   9023 				break;
   9024 			case MII_MODEL_xxMARVELL_I82563:
   9025 				new_phytype = WMPHY_GG82563;
   9026 				break;
   9027 			default:
   9028 				break;
   9029 			}
   9030 			break;
   9031 		case MII_OUI_INTEL:
   9032 			switch (phy_model) {
   9033 			case MII_MODEL_INTEL_I82577:
   9034 				new_phytype = WMPHY_82577;
   9035 				break;
   9036 			case MII_MODEL_INTEL_I82579:
   9037 				new_phytype = WMPHY_82579;
   9038 				break;
   9039 			case MII_MODEL_INTEL_I217:
   9040 				new_phytype = WMPHY_I217;
   9041 				break;
   9042 			case MII_MODEL_INTEL_I82580:
   9043 			case MII_MODEL_INTEL_I350:
   9044 				new_phytype = WMPHY_82580;
   9045 				break;
   9046 			default:
   9047 				break;
   9048 			}
   9049 			break;
   9050 		case MII_OUI_yyINTEL:
   9051 			switch (phy_model) {
   9052 			case MII_MODEL_yyINTEL_I82562G:
   9053 			case MII_MODEL_yyINTEL_I82562EM:
   9054 			case MII_MODEL_yyINTEL_I82562ET:
   9055 				new_phytype = WMPHY_IFE;
   9056 				break;
   9057 			case MII_MODEL_yyINTEL_IGP01E1000:
   9058 				new_phytype = WMPHY_IGP;
   9059 				break;
   9060 			case MII_MODEL_yyINTEL_I82566:
   9061 				new_phytype = WMPHY_IGP_3;
   9062 				break;
   9063 			default:
   9064 				break;
   9065 			}
   9066 			break;
   9067 		default:
   9068 			break;
   9069 		}
   9070 		if (new_phytype == WMPHY_UNKNOWN)
   9071 			aprint_verbose_dev(dev, "%s: unknown PHY model\n",
   9072 			    __func__);
   9073 
   9074 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9075 		    && (sc->sc_phytype != new_phytype )) {
   9076 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9077 			    "was incorrect. PHY type from PHY ID = %u\n",
   9078 			    sc->sc_phytype, new_phytype);
   9079 		}
   9080 	}
   9081 
   9082 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   9083 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   9084 		/* SGMII */
   9085 		new_readreg = wm_sgmii_readreg;
   9086 		new_writereg = wm_sgmii_writereg;
   9087 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   9088 		/* BM2 (phyaddr == 1) */
   9089 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9090 		    && (new_phytype != WMPHY_BM)
   9091 		    && (new_phytype != WMPHY_UNKNOWN))
   9092 			doubt_phytype = new_phytype;
   9093 		new_phytype = WMPHY_BM;
   9094 		new_readreg = wm_gmii_bm_readreg;
   9095 		new_writereg = wm_gmii_bm_writereg;
   9096 	} else if (sc->sc_type >= WM_T_PCH) {
   9097 		/* All PCH* use _hv_ */
   9098 		new_readreg = wm_gmii_hv_readreg;
   9099 		new_writereg = wm_gmii_hv_writereg;
   9100 	} else if (sc->sc_type >= WM_T_ICH8) {
   9101 		/* non-82567 ICH8, 9 and 10 */
   9102 		new_readreg = wm_gmii_i82544_readreg;
   9103 		new_writereg = wm_gmii_i82544_writereg;
   9104 	} else if (sc->sc_type >= WM_T_80003) {
   9105 		/* 80003 */
   9106 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9107 		    && (new_phytype != WMPHY_GG82563)
   9108 		    && (new_phytype != WMPHY_UNKNOWN))
   9109 			doubt_phytype = new_phytype;
   9110 		new_phytype = WMPHY_GG82563;
   9111 		new_readreg = wm_gmii_i80003_readreg;
   9112 		new_writereg = wm_gmii_i80003_writereg;
   9113 	} else if (sc->sc_type >= WM_T_I210) {
   9114 		/* I210 and I211 */
   9115 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9116 		    && (new_phytype != WMPHY_I210)
   9117 		    && (new_phytype != WMPHY_UNKNOWN))
   9118 			doubt_phytype = new_phytype;
   9119 		new_phytype = WMPHY_I210;
   9120 		new_readreg = wm_gmii_gs40g_readreg;
   9121 		new_writereg = wm_gmii_gs40g_writereg;
   9122 	} else if (sc->sc_type >= WM_T_82580) {
   9123 		/* 82580, I350 and I354 */
   9124 		new_readreg = wm_gmii_82580_readreg;
   9125 		new_writereg = wm_gmii_82580_writereg;
   9126 	} else if (sc->sc_type >= WM_T_82544) {
   9127 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   9128 		new_readreg = wm_gmii_i82544_readreg;
   9129 		new_writereg = wm_gmii_i82544_writereg;
   9130 	} else {
   9131 		new_readreg = wm_gmii_i82543_readreg;
   9132 		new_writereg = wm_gmii_i82543_writereg;
   9133 	}
   9134 
   9135 	if (new_phytype == WMPHY_BM) {
   9136 		/* All BM use _bm_ */
   9137 		new_readreg = wm_gmii_bm_readreg;
   9138 		new_writereg = wm_gmii_bm_writereg;
   9139 	}
   9140 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
   9141 		/* All PCH* use _hv_ */
   9142 		new_readreg = wm_gmii_hv_readreg;
   9143 		new_writereg = wm_gmii_hv_writereg;
   9144 	}
   9145 
   9146 	/* Diag output */
   9147 	if (doubt_phytype != WMPHY_UNKNOWN)
   9148 		aprint_error_dev(dev, "Assumed new PHY type was "
   9149 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   9150 		    new_phytype);
   9151 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9152 	    && (sc->sc_phytype != new_phytype ))
   9153 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9154 		    "was incorrect. New PHY type = %u\n",
   9155 		    sc->sc_phytype, new_phytype);
   9156 
   9157 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   9158 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   9159 
   9160 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   9161 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   9162 		    "function was incorrect.\n");
   9163 
   9164 	/* Update now */
   9165 	sc->sc_phytype = new_phytype;
   9166 	mii->mii_readreg = new_readreg;
   9167 	mii->mii_writereg = new_writereg;
   9168 }
   9169 
   9170 /*
   9171  * wm_get_phy_id_82575:
   9172  *
   9173  * Return PHY ID. Return -1 if it failed.
   9174  */
   9175 static int
   9176 wm_get_phy_id_82575(struct wm_softc *sc)
   9177 {
   9178 	uint32_t reg;
   9179 	int phyid = -1;
   9180 
   9181 	/* XXX */
   9182 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   9183 		return -1;
   9184 
   9185 	if (wm_sgmii_uses_mdio(sc)) {
   9186 		switch (sc->sc_type) {
   9187 		case WM_T_82575:
   9188 		case WM_T_82576:
   9189 			reg = CSR_READ(sc, WMREG_MDIC);
   9190 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   9191 			break;
   9192 		case WM_T_82580:
   9193 		case WM_T_I350:
   9194 		case WM_T_I354:
   9195 		case WM_T_I210:
   9196 		case WM_T_I211:
   9197 			reg = CSR_READ(sc, WMREG_MDICNFG);
   9198 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   9199 			break;
   9200 		default:
   9201 			return -1;
   9202 		}
   9203 	}
   9204 
   9205 	return phyid;
   9206 }
   9207 
   9208 
   9209 /*
   9210  * wm_gmii_mediainit:
   9211  *
   9212  *	Initialize media for use on 1000BASE-T devices.
   9213  */
   9214 static void
   9215 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   9216 {
   9217 	device_t dev = sc->sc_dev;
   9218 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9219 	struct mii_data *mii = &sc->sc_mii;
   9220 	uint32_t reg;
   9221 
   9222 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9223 		device_xname(sc->sc_dev), __func__));
   9224 
   9225 	/* We have GMII. */
   9226 	sc->sc_flags |= WM_F_HAS_MII;
   9227 
   9228 	if (sc->sc_type == WM_T_80003)
   9229 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9230 	else
   9231 		sc->sc_tipg = TIPG_1000T_DFLT;
   9232 
   9233 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   9234 	if ((sc->sc_type == WM_T_82580)
   9235 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   9236 	    || (sc->sc_type == WM_T_I211)) {
   9237 		reg = CSR_READ(sc, WMREG_PHPM);
   9238 		reg &= ~PHPM_GO_LINK_D;
   9239 		CSR_WRITE(sc, WMREG_PHPM, reg);
   9240 	}
   9241 
   9242 	/*
   9243 	 * Let the chip set speed/duplex on its own based on
   9244 	 * signals from the PHY.
   9245 	 * XXXbouyer - I'm not sure this is right for the 80003,
   9246 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   9247 	 */
   9248 	sc->sc_ctrl |= CTRL_SLU;
   9249 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9250 
   9251 	/* Initialize our media structures and probe the GMII. */
   9252 	mii->mii_ifp = ifp;
   9253 
   9254 	/*
   9255 	 * The first call of wm_mii_setup_phytype. The result might be
   9256 	 * incorrect.
   9257 	 */
   9258 	wm_gmii_setup_phytype(sc, 0, 0);
   9259 
   9260 	mii->mii_statchg = wm_gmii_statchg;
   9261 
   9262 	/* get PHY control from SMBus to PCIe */
   9263 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   9264 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
   9265 		wm_smbustopci(sc);
   9266 
   9267 	wm_gmii_reset(sc);
   9268 
   9269 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9270 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   9271 	    wm_gmii_mediastatus);
   9272 
   9273 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   9274 	    || (sc->sc_type == WM_T_82580)
   9275 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   9276 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   9277 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   9278 			/* Attach only one port */
   9279 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   9280 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9281 		} else {
   9282 			int i, id;
   9283 			uint32_t ctrl_ext;
   9284 
   9285 			id = wm_get_phy_id_82575(sc);
   9286 			if (id != -1) {
   9287 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   9288 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   9289 			}
   9290 			if ((id == -1)
   9291 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9292 				/* Power on sgmii phy if it is disabled */
   9293 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9294 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   9295 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   9296 				CSR_WRITE_FLUSH(sc);
   9297 				delay(300*1000); /* XXX too long */
   9298 
   9299 				/* from 1 to 8 */
   9300 				for (i = 1; i < 8; i++)
   9301 					mii_attach(sc->sc_dev, &sc->sc_mii,
   9302 					    0xffffffff, i, MII_OFFSET_ANY,
   9303 					    MIIF_DOPAUSE);
   9304 
   9305 				/* restore previous sfp cage power state */
   9306 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9307 			}
   9308 		}
   9309 	} else {
   9310 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9311 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9312 	}
   9313 
   9314 	/*
   9315 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   9316 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   9317 	 */
   9318 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   9319 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9320 		wm_set_mdio_slow_mode_hv(sc);
   9321 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9322 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9323 	}
   9324 
   9325 	/*
   9326 	 * (For ICH8 variants)
   9327 	 * If PHY detection failed, use BM's r/w function and retry.
   9328 	 */
   9329 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9330 		/* if failed, retry with *_bm_* */
   9331 		aprint_verbose_dev(dev, "Assumed PHY access function "
   9332 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   9333 		    sc->sc_phytype);
   9334 		sc->sc_phytype = WMPHY_BM;
   9335 		mii->mii_readreg = wm_gmii_bm_readreg;
   9336 		mii->mii_writereg = wm_gmii_bm_writereg;
   9337 
   9338 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9339 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9340 	}
   9341 
   9342 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9343 		/* Any PHY wasn't find */
   9344 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   9345 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   9346 		sc->sc_phytype = WMPHY_NONE;
   9347 	} else {
   9348 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   9349 
   9350 		/*
   9351 		 * PHY Found! Check PHY type again by the second call of
   9352 		 * wm_mii_setup_phytype.
   9353 		 */
   9354 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   9355 		    child->mii_mpd_model);
   9356 
   9357 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   9358 	}
   9359 }
   9360 
   9361 /*
   9362  * wm_gmii_mediachange:	[ifmedia interface function]
   9363  *
   9364  *	Set hardware to newly-selected media on a 1000BASE-T device.
   9365  */
   9366 static int
   9367 wm_gmii_mediachange(struct ifnet *ifp)
   9368 {
   9369 	struct wm_softc *sc = ifp->if_softc;
   9370 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9371 	int rc;
   9372 
   9373 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9374 		device_xname(sc->sc_dev), __func__));
   9375 	if ((ifp->if_flags & IFF_UP) == 0)
   9376 		return 0;
   9377 
   9378 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9379 	sc->sc_ctrl |= CTRL_SLU;
   9380 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9381 	    || (sc->sc_type > WM_T_82543)) {
   9382 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   9383 	} else {
   9384 		sc->sc_ctrl &= ~CTRL_ASDE;
   9385 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9386 		if (ife->ifm_media & IFM_FDX)
   9387 			sc->sc_ctrl |= CTRL_FD;
   9388 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   9389 		case IFM_10_T:
   9390 			sc->sc_ctrl |= CTRL_SPEED_10;
   9391 			break;
   9392 		case IFM_100_TX:
   9393 			sc->sc_ctrl |= CTRL_SPEED_100;
   9394 			break;
   9395 		case IFM_1000_T:
   9396 			sc->sc_ctrl |= CTRL_SPEED_1000;
   9397 			break;
   9398 		default:
   9399 			panic("wm_gmii_mediachange: bad media 0x%x",
   9400 			    ife->ifm_media);
   9401 		}
   9402 	}
   9403 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9404 	if (sc->sc_type <= WM_T_82543)
   9405 		wm_gmii_reset(sc);
   9406 
   9407 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   9408 		return 0;
   9409 	return rc;
   9410 }
   9411 
   9412 /*
   9413  * wm_gmii_mediastatus:	[ifmedia interface function]
   9414  *
   9415  *	Get the current interface media status on a 1000BASE-T device.
   9416  */
   9417 static void
   9418 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9419 {
   9420 	struct wm_softc *sc = ifp->if_softc;
   9421 
   9422 	ether_mediastatus(ifp, ifmr);
   9423 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9424 	    | sc->sc_flowflags;
   9425 }
   9426 
   9427 #define	MDI_IO		CTRL_SWDPIN(2)
   9428 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   9429 #define	MDI_CLK		CTRL_SWDPIN(3)
   9430 
   9431 static void
   9432 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   9433 {
   9434 	uint32_t i, v;
   9435 
   9436 	v = CSR_READ(sc, WMREG_CTRL);
   9437 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9438 	v |= MDI_DIR | CTRL_SWDPIO(3);
   9439 
   9440 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   9441 		if (data & i)
   9442 			v |= MDI_IO;
   9443 		else
   9444 			v &= ~MDI_IO;
   9445 		CSR_WRITE(sc, WMREG_CTRL, v);
   9446 		CSR_WRITE_FLUSH(sc);
   9447 		delay(10);
   9448 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9449 		CSR_WRITE_FLUSH(sc);
   9450 		delay(10);
   9451 		CSR_WRITE(sc, WMREG_CTRL, v);
   9452 		CSR_WRITE_FLUSH(sc);
   9453 		delay(10);
   9454 	}
   9455 }
   9456 
   9457 static uint32_t
   9458 wm_i82543_mii_recvbits(struct wm_softc *sc)
   9459 {
   9460 	uint32_t v, i, data = 0;
   9461 
   9462 	v = CSR_READ(sc, WMREG_CTRL);
   9463 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9464 	v |= CTRL_SWDPIO(3);
   9465 
   9466 	CSR_WRITE(sc, WMREG_CTRL, v);
   9467 	CSR_WRITE_FLUSH(sc);
   9468 	delay(10);
   9469 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9470 	CSR_WRITE_FLUSH(sc);
   9471 	delay(10);
   9472 	CSR_WRITE(sc, WMREG_CTRL, v);
   9473 	CSR_WRITE_FLUSH(sc);
   9474 	delay(10);
   9475 
   9476 	for (i = 0; i < 16; i++) {
   9477 		data <<= 1;
   9478 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9479 		CSR_WRITE_FLUSH(sc);
   9480 		delay(10);
   9481 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   9482 			data |= 1;
   9483 		CSR_WRITE(sc, WMREG_CTRL, v);
   9484 		CSR_WRITE_FLUSH(sc);
   9485 		delay(10);
   9486 	}
   9487 
   9488 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9489 	CSR_WRITE_FLUSH(sc);
   9490 	delay(10);
   9491 	CSR_WRITE(sc, WMREG_CTRL, v);
   9492 	CSR_WRITE_FLUSH(sc);
   9493 	delay(10);
   9494 
   9495 	return data;
   9496 }
   9497 
   9498 #undef MDI_IO
   9499 #undef MDI_DIR
   9500 #undef MDI_CLK
   9501 
   9502 /*
   9503  * wm_gmii_i82543_readreg:	[mii interface function]
   9504  *
   9505  *	Read a PHY register on the GMII (i82543 version).
   9506  */
   9507 static int
   9508 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
   9509 {
   9510 	struct wm_softc *sc = device_private(self);
   9511 	int rv;
   9512 
   9513 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9514 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   9515 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   9516 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   9517 
   9518 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   9519 	    device_xname(sc->sc_dev), phy, reg, rv));
   9520 
   9521 	return rv;
   9522 }
   9523 
   9524 /*
   9525  * wm_gmii_i82543_writereg:	[mii interface function]
   9526  *
   9527  *	Write a PHY register on the GMII (i82543 version).
   9528  */
   9529 static void
   9530 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
   9531 {
   9532 	struct wm_softc *sc = device_private(self);
   9533 
   9534 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9535 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   9536 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   9537 	    (MII_COMMAND_START << 30), 32);
   9538 }
   9539 
   9540 /*
   9541  * wm_gmii_mdic_readreg:	[mii interface function]
   9542  *
   9543  *	Read a PHY register on the GMII.
   9544  */
   9545 static int
   9546 wm_gmii_mdic_readreg(device_t self, int phy, int reg)
   9547 {
   9548 	struct wm_softc *sc = device_private(self);
   9549 	uint32_t mdic = 0;
   9550 	int i, rv;
   9551 
   9552 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   9553 	    MDIC_REGADD(reg));
   9554 
   9555 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9556 		mdic = CSR_READ(sc, WMREG_MDIC);
   9557 		if (mdic & MDIC_READY)
   9558 			break;
   9559 		delay(50);
   9560 	}
   9561 
   9562 	if ((mdic & MDIC_READY) == 0) {
   9563 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   9564 		    device_xname(sc->sc_dev), phy, reg);
   9565 		rv = 0;
   9566 	} else if (mdic & MDIC_E) {
   9567 #if 0 /* This is normal if no PHY is present. */
   9568 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   9569 		    device_xname(sc->sc_dev), phy, reg);
   9570 #endif
   9571 		rv = 0;
   9572 	} else {
   9573 		rv = MDIC_DATA(mdic);
   9574 		if (rv == 0xffff)
   9575 			rv = 0;
   9576 	}
   9577 
   9578 	return rv;
   9579 }
   9580 
   9581 /*
   9582  * wm_gmii_mdic_writereg:	[mii interface function]
   9583  *
   9584  *	Write a PHY register on the GMII.
   9585  */
   9586 static void
   9587 wm_gmii_mdic_writereg(device_t self, int phy, int reg, int val)
   9588 {
   9589 	struct wm_softc *sc = device_private(self);
   9590 	uint32_t mdic = 0;
   9591 	int i;
   9592 
   9593 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   9594 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   9595 
   9596 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9597 		mdic = CSR_READ(sc, WMREG_MDIC);
   9598 		if (mdic & MDIC_READY)
   9599 			break;
   9600 		delay(50);
   9601 	}
   9602 
   9603 	if ((mdic & MDIC_READY) == 0)
   9604 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   9605 		    device_xname(sc->sc_dev), phy, reg);
   9606 	else if (mdic & MDIC_E)
   9607 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   9608 		    device_xname(sc->sc_dev), phy, reg);
   9609 }
   9610 
   9611 /*
   9612  * wm_gmii_i82544_readreg:	[mii interface function]
   9613  *
   9614  *	Read a PHY register on the GMII.
   9615  */
   9616 static int
   9617 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
   9618 {
   9619 	struct wm_softc *sc = device_private(self);
   9620 	int rv;
   9621 
   9622 	if (sc->phy.acquire(sc)) {
   9623 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9624 		    __func__);
   9625 		return 0;
   9626 	}
   9627 	rv = wm_gmii_mdic_readreg(self, phy, reg);
   9628 	sc->phy.release(sc);
   9629 
   9630 	return rv;
   9631 }
   9632 
   9633 /*
   9634  * wm_gmii_i82544_writereg:	[mii interface function]
   9635  *
   9636  *	Write a PHY register on the GMII.
   9637  */
   9638 static void
   9639 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
   9640 {
   9641 	struct wm_softc *sc = device_private(self);
   9642 
   9643 	if (sc->phy.acquire(sc)) {
   9644 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9645 		    __func__);
   9646 	}
   9647 	wm_gmii_mdic_writereg(self, phy, reg, val);
   9648 	sc->phy.release(sc);
   9649 }
   9650 
   9651 /*
   9652  * wm_gmii_i80003_readreg:	[mii interface function]
   9653  *
   9654  *	Read a PHY register on the kumeran
   9655  * This could be handled by the PHY layer if we didn't have to lock the
   9656  * ressource ...
   9657  */
   9658 static int
   9659 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
   9660 {
   9661 	struct wm_softc *sc = device_private(self);
   9662 	int rv;
   9663 
   9664 	if (phy != 1) /* only one PHY on kumeran bus */
   9665 		return 0;
   9666 
   9667 	if (sc->phy.acquire(sc)) {
   9668 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9669 		    __func__);
   9670 		return 0;
   9671 	}
   9672 
   9673 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   9674 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   9675 		    reg >> GG82563_PAGE_SHIFT);
   9676 	} else {
   9677 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   9678 		    reg >> GG82563_PAGE_SHIFT);
   9679 	}
   9680 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   9681 	delay(200);
   9682 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
   9683 	delay(200);
   9684 	sc->phy.release(sc);
   9685 
   9686 	return rv;
   9687 }
   9688 
   9689 /*
   9690  * wm_gmii_i80003_writereg:	[mii interface function]
   9691  *
   9692  *	Write a PHY register on the kumeran.
   9693  * This could be handled by the PHY layer if we didn't have to lock the
   9694  * ressource ...
   9695  */
   9696 static void
   9697 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
   9698 {
   9699 	struct wm_softc *sc = device_private(self);
   9700 
   9701 	if (phy != 1) /* only one PHY on kumeran bus */
   9702 		return;
   9703 
   9704 	if (sc->phy.acquire(sc)) {
   9705 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9706 		    __func__);
   9707 		return;
   9708 	}
   9709 
   9710 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   9711 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   9712 		    reg >> GG82563_PAGE_SHIFT);
   9713 	} else {
   9714 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   9715 		    reg >> GG82563_PAGE_SHIFT);
   9716 	}
   9717 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   9718 	delay(200);
   9719 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
   9720 	delay(200);
   9721 
   9722 	sc->phy.release(sc);
   9723 }
   9724 
   9725 /*
   9726  * wm_gmii_bm_readreg:	[mii interface function]
   9727  *
   9728  *	Read a PHY register on the kumeran
   9729  * This could be handled by the PHY layer if we didn't have to lock the
   9730  * ressource ...
   9731  */
   9732 static int
   9733 wm_gmii_bm_readreg(device_t self, int phy, int reg)
   9734 {
   9735 	struct wm_softc *sc = device_private(self);
   9736 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   9737 	uint16_t val;
   9738 	int rv;
   9739 
   9740 	if (sc->phy.acquire(sc)) {
   9741 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9742 		    __func__);
   9743 		return 0;
   9744 	}
   9745 
   9746 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   9747 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   9748 		    || (reg == 31)) ? 1 : phy;
   9749 	/* Page 800 works differently than the rest so it has its own func */
   9750 	if (page == BM_WUC_PAGE) {
   9751 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   9752 		rv = val;
   9753 		goto release;
   9754 	}
   9755 
   9756 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9757 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   9758 		    && (sc->sc_type != WM_T_82583))
   9759 			wm_gmii_mdic_writereg(self, phy,
   9760 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   9761 		else
   9762 			wm_gmii_mdic_writereg(self, phy,
   9763 			    BME1000_PHY_PAGE_SELECT, page);
   9764 	}
   9765 
   9766 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
   9767 
   9768 release:
   9769 	sc->phy.release(sc);
   9770 	return rv;
   9771 }
   9772 
   9773 /*
   9774  * wm_gmii_bm_writereg:	[mii interface function]
   9775  *
   9776  *	Write a PHY register on the kumeran.
   9777  * This could be handled by the PHY layer if we didn't have to lock the
   9778  * ressource ...
   9779  */
   9780 static void
   9781 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
   9782 {
   9783 	struct wm_softc *sc = device_private(self);
   9784 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   9785 
   9786 	if (sc->phy.acquire(sc)) {
   9787 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9788 		    __func__);
   9789 		return;
   9790 	}
   9791 
   9792 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   9793 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   9794 		    || (reg == 31)) ? 1 : phy;
   9795 	/* Page 800 works differently than the rest so it has its own func */
   9796 	if (page == BM_WUC_PAGE) {
   9797 		uint16_t tmp;
   9798 
   9799 		tmp = val;
   9800 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   9801 		goto release;
   9802 	}
   9803 
   9804 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9805 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   9806 		    && (sc->sc_type != WM_T_82583))
   9807 			wm_gmii_mdic_writereg(self, phy,
   9808 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   9809 		else
   9810 			wm_gmii_mdic_writereg(self, phy,
   9811 			    BME1000_PHY_PAGE_SELECT, page);
   9812 	}
   9813 
   9814 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
   9815 
   9816 release:
   9817 	sc->phy.release(sc);
   9818 }
   9819 
   9820 static void
   9821 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
   9822 {
   9823 	struct wm_softc *sc = device_private(self);
   9824 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   9825 	uint16_t wuce, reg;
   9826 
   9827 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9828 		device_xname(sc->sc_dev), __func__));
   9829 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   9830 	if (sc->sc_type == WM_T_PCH) {
   9831 		/* XXX e1000 driver do nothing... why? */
   9832 	}
   9833 
   9834 	/*
   9835 	 * 1) Enable PHY wakeup register first.
   9836 	 * See e1000_enable_phy_wakeup_reg_access_bm().
   9837 	 */
   9838 
   9839 	/* Set page 769 */
   9840 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9841 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   9842 
   9843 	/* Read WUCE and save it */
   9844 	wuce = wm_gmii_mdic_readreg(self, 1, BM_WUC_ENABLE_REG);
   9845 
   9846 	reg = wuce | BM_WUC_ENABLE_BIT;
   9847 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   9848 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, reg);
   9849 
   9850 	/* Select page 800 */
   9851 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9852 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   9853 
   9854 	/*
   9855 	 * 2) Access PHY wakeup register.
   9856 	 * See e1000_access_phy_wakeup_reg_bm.
   9857 	 */
   9858 
   9859 	/* Write page 800 */
   9860 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   9861 
   9862 	if (rd)
   9863 		*val = wm_gmii_mdic_readreg(self, 1, BM_WUC_DATA_OPCODE);
   9864 	else
   9865 		wm_gmii_mdic_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
   9866 
   9867 	/*
   9868 	 * 3) Disable PHY wakeup register.
   9869 	 * See e1000_disable_phy_wakeup_reg_access_bm().
   9870 	 */
   9871 	/* Set page 769 */
   9872 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9873 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   9874 
   9875 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
   9876 }
   9877 
   9878 /*
   9879  * wm_gmii_hv_readreg:	[mii interface function]
   9880  *
   9881  *	Read a PHY register on the kumeran
   9882  * This could be handled by the PHY layer if we didn't have to lock the
   9883  * ressource ...
   9884  */
   9885 static int
   9886 wm_gmii_hv_readreg(device_t self, int phy, int reg)
   9887 {
   9888 	struct wm_softc *sc = device_private(self);
   9889 	int rv;
   9890 
   9891 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9892 		device_xname(sc->sc_dev), __func__));
   9893 	if (sc->phy.acquire(sc)) {
   9894 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9895 		    __func__);
   9896 		return 0;
   9897 	}
   9898 
   9899 	rv = wm_gmii_hv_readreg_locked(self, phy, reg);
   9900 	sc->phy.release(sc);
   9901 	return rv;
   9902 }
   9903 
   9904 static int
   9905 wm_gmii_hv_readreg_locked(device_t self, int phy, int reg)
   9906 {
   9907 	uint16_t page = BM_PHY_REG_PAGE(reg);
   9908 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   9909 	uint16_t val;
   9910 	int rv;
   9911 
   9912 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   9913 
   9914 	/* Page 800 works differently than the rest so it has its own func */
   9915 	if (page == BM_WUC_PAGE) {
   9916 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   9917 		return val;
   9918 	}
   9919 
   9920 	/*
   9921 	 * Lower than page 768 works differently than the rest so it has its
   9922 	 * own func
   9923 	 */
   9924 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   9925 		printf("gmii_hv_readreg!!!\n");
   9926 		return 0;
   9927 	}
   9928 
   9929 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   9930 		wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9931 		    page << BME1000_PAGE_SHIFT);
   9932 	}
   9933 
   9934 	rv = wm_gmii_mdic_readreg(self, phy, regnum & MII_ADDRMASK);
   9935 	return rv;
   9936 }
   9937 
   9938 /*
   9939  * wm_gmii_hv_writereg:	[mii interface function]
   9940  *
   9941  *	Write a PHY register on the kumeran.
   9942  * This could be handled by the PHY layer if we didn't have to lock the
   9943  * ressource ...
   9944  */
   9945 static void
   9946 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
   9947 {
   9948 	struct wm_softc *sc = device_private(self);
   9949 
   9950 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9951 		device_xname(sc->sc_dev), __func__));
   9952 
   9953 	if (sc->phy.acquire(sc)) {
   9954 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9955 		    __func__);
   9956 		return;
   9957 	}
   9958 
   9959 	wm_gmii_hv_writereg_locked(self, phy, reg, val);
   9960 	sc->phy.release(sc);
   9961 }
   9962 
   9963 static void
   9964 wm_gmii_hv_writereg_locked(device_t self, int phy, int reg, int val)
   9965 {
   9966 	struct wm_softc *sc = device_private(self);
   9967 	uint16_t page = BM_PHY_REG_PAGE(reg);
   9968 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   9969 
   9970 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   9971 
   9972 	/* Page 800 works differently than the rest so it has its own func */
   9973 	if (page == BM_WUC_PAGE) {
   9974 		uint16_t tmp;
   9975 
   9976 		tmp = val;
   9977 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   9978 		return;
   9979 	}
   9980 
   9981 	/*
   9982 	 * Lower than page 768 works differently than the rest so it has its
   9983 	 * own func
   9984 	 */
   9985 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   9986 		printf("gmii_hv_writereg!!!\n");
   9987 		return;
   9988 	}
   9989 
   9990 	{
   9991 		/*
   9992 		 * XXX Workaround MDIO accesses being disabled after entering
   9993 		 * IEEE Power Down (whenever bit 11 of the PHY control
   9994 		 * register is set)
   9995 		 */
   9996 		if (sc->sc_phytype == WMPHY_82578) {
   9997 			struct mii_softc *child;
   9998 
   9999 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   10000 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   10001 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   10002 			    && ((val & (1 << 11)) != 0)) {
   10003 				printf("XXX need workaround\n");
   10004 			}
   10005 		}
   10006 
   10007 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10008 			wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   10009 			    page << BME1000_PAGE_SHIFT);
   10010 		}
   10011 	}
   10012 
   10013 	wm_gmii_mdic_writereg(self, phy, regnum & MII_ADDRMASK, val);
   10014 }
   10015 
   10016 /*
   10017  * wm_gmii_82580_readreg:	[mii interface function]
   10018  *
   10019  *	Read a PHY register on the 82580 and I350.
   10020  * This could be handled by the PHY layer if we didn't have to lock the
   10021  * ressource ...
   10022  */
   10023 static int
   10024 wm_gmii_82580_readreg(device_t self, int phy, int reg)
   10025 {
   10026 	struct wm_softc *sc = device_private(self);
   10027 	int rv;
   10028 
   10029 	if (sc->phy.acquire(sc) != 0) {
   10030 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10031 		    __func__);
   10032 		return 0;
   10033 	}
   10034 
   10035 	rv = wm_gmii_mdic_readreg(self, phy, reg);
   10036 
   10037 	sc->phy.release(sc);
   10038 	return rv;
   10039 }
   10040 
   10041 /*
   10042  * wm_gmii_82580_writereg:	[mii interface function]
   10043  *
   10044  *	Write a PHY register on the 82580 and I350.
   10045  * This could be handled by the PHY layer if we didn't have to lock the
   10046  * ressource ...
   10047  */
   10048 static void
   10049 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
   10050 {
   10051 	struct wm_softc *sc = device_private(self);
   10052 
   10053 	if (sc->phy.acquire(sc) != 0) {
   10054 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10055 		    __func__);
   10056 		return;
   10057 	}
   10058 
   10059 	wm_gmii_mdic_writereg(self, phy, reg, val);
   10060 
   10061 	sc->phy.release(sc);
   10062 }
   10063 
   10064 /*
   10065  * wm_gmii_gs40g_readreg:	[mii interface function]
   10066  *
   10067  *	Read a PHY register on the I2100 and I211.
   10068  * This could be handled by the PHY layer if we didn't have to lock the
   10069  * ressource ...
   10070  */
   10071 static int
   10072 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
   10073 {
   10074 	struct wm_softc *sc = device_private(self);
   10075 	int page, offset;
   10076 	int rv;
   10077 
   10078 	/* Acquire semaphore */
   10079 	if (sc->phy.acquire(sc)) {
   10080 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10081 		    __func__);
   10082 		return 0;
   10083 	}
   10084 
   10085 	/* Page select */
   10086 	page = reg >> GS40G_PAGE_SHIFT;
   10087 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
   10088 
   10089 	/* Read reg */
   10090 	offset = reg & GS40G_OFFSET_MASK;
   10091 	rv = wm_gmii_mdic_readreg(self, phy, offset);
   10092 
   10093 	sc->phy.release(sc);
   10094 	return rv;
   10095 }
   10096 
   10097 /*
   10098  * wm_gmii_gs40g_writereg:	[mii interface function]
   10099  *
   10100  *	Write a PHY register on the I210 and I211.
   10101  * This could be handled by the PHY layer if we didn't have to lock the
   10102  * ressource ...
   10103  */
   10104 static void
   10105 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
   10106 {
   10107 	struct wm_softc *sc = device_private(self);
   10108 	int page, offset;
   10109 
   10110 	/* Acquire semaphore */
   10111 	if (sc->phy.acquire(sc)) {
   10112 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10113 		    __func__);
   10114 		return;
   10115 	}
   10116 
   10117 	/* Page select */
   10118 	page = reg >> GS40G_PAGE_SHIFT;
   10119 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
   10120 
   10121 	/* Write reg */
   10122 	offset = reg & GS40G_OFFSET_MASK;
   10123 	wm_gmii_mdic_writereg(self, phy, offset, val);
   10124 
   10125 	/* Release semaphore */
   10126 	sc->phy.release(sc);
   10127 }
   10128 
   10129 /*
   10130  * wm_gmii_statchg:	[mii interface function]
   10131  *
   10132  *	Callback from MII layer when media changes.
   10133  */
   10134 static void
   10135 wm_gmii_statchg(struct ifnet *ifp)
   10136 {
   10137 	struct wm_softc *sc = ifp->if_softc;
   10138 	struct mii_data *mii = &sc->sc_mii;
   10139 
   10140 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   10141 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10142 	sc->sc_fcrtl &= ~FCRTL_XONE;
   10143 
   10144 	/*
   10145 	 * Get flow control negotiation result.
   10146 	 */
   10147 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   10148 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   10149 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   10150 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   10151 	}
   10152 
   10153 	if (sc->sc_flowflags & IFM_FLOW) {
   10154 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   10155 			sc->sc_ctrl |= CTRL_TFCE;
   10156 			sc->sc_fcrtl |= FCRTL_XONE;
   10157 		}
   10158 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   10159 			sc->sc_ctrl |= CTRL_RFCE;
   10160 	}
   10161 
   10162 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   10163 		DPRINTF(WM_DEBUG_LINK,
   10164 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   10165 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10166 	} else {
   10167 		DPRINTF(WM_DEBUG_LINK,
   10168 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   10169 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10170 	}
   10171 
   10172 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10173 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10174 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   10175 						 : WMREG_FCRTL, sc->sc_fcrtl);
   10176 	if (sc->sc_type == WM_T_80003) {
   10177 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   10178 		case IFM_1000_T:
   10179 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10180 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   10181 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10182 			break;
   10183 		default:
   10184 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10185 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   10186 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   10187 			break;
   10188 		}
   10189 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   10190 	}
   10191 }
   10192 
   10193 /* kumeran related (80003, ICH* and PCH*) */
   10194 
   10195 /*
   10196  * wm_kmrn_readreg:
   10197  *
   10198  *	Read a kumeran register
   10199  */
   10200 static int
   10201 wm_kmrn_readreg(struct wm_softc *sc, int reg)
   10202 {
   10203 	int rv;
   10204 
   10205 	if (sc->sc_type == WM_T_80003)
   10206 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10207 	else
   10208 		rv = sc->phy.acquire(sc);
   10209 	if (rv != 0) {
   10210 		aprint_error_dev(sc->sc_dev,
   10211 		    "%s: failed to get semaphore\n", __func__);
   10212 		return 0;
   10213 	}
   10214 
   10215 	rv = wm_kmrn_readreg_locked(sc, reg);
   10216 
   10217 	if (sc->sc_type == WM_T_80003)
   10218 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10219 	else
   10220 		sc->phy.release(sc);
   10221 
   10222 	return rv;
   10223 }
   10224 
   10225 static int
   10226 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg)
   10227 {
   10228 	int rv;
   10229 
   10230 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10231 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10232 	    KUMCTRLSTA_REN);
   10233 	CSR_WRITE_FLUSH(sc);
   10234 	delay(2);
   10235 
   10236 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   10237 
   10238 	return rv;
   10239 }
   10240 
   10241 /*
   10242  * wm_kmrn_writereg:
   10243  *
   10244  *	Write a kumeran register
   10245  */
   10246 static void
   10247 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
   10248 {
   10249 	int rv;
   10250 
   10251 	if (sc->sc_type == WM_T_80003)
   10252 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10253 	else
   10254 		rv = sc->phy.acquire(sc);
   10255 	if (rv != 0) {
   10256 		aprint_error_dev(sc->sc_dev,
   10257 		    "%s: failed to get semaphore\n", __func__);
   10258 		return;
   10259 	}
   10260 
   10261 	wm_kmrn_writereg_locked(sc, reg, val);
   10262 
   10263 	if (sc->sc_type == WM_T_80003)
   10264 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10265 	else
   10266 		sc->phy.release(sc);
   10267 }
   10268 
   10269 static void
   10270 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, int val)
   10271 {
   10272 
   10273 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10274 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10275 	    (val & KUMCTRLSTA_MASK));
   10276 }
   10277 
   10278 /* SGMII related */
   10279 
   10280 /*
   10281  * wm_sgmii_uses_mdio
   10282  *
   10283  * Check whether the transaction is to the internal PHY or the external
   10284  * MDIO interface. Return true if it's MDIO.
   10285  */
   10286 static bool
   10287 wm_sgmii_uses_mdio(struct wm_softc *sc)
   10288 {
   10289 	uint32_t reg;
   10290 	bool ismdio = false;
   10291 
   10292 	switch (sc->sc_type) {
   10293 	case WM_T_82575:
   10294 	case WM_T_82576:
   10295 		reg = CSR_READ(sc, WMREG_MDIC);
   10296 		ismdio = ((reg & MDIC_DEST) != 0);
   10297 		break;
   10298 	case WM_T_82580:
   10299 	case WM_T_I350:
   10300 	case WM_T_I354:
   10301 	case WM_T_I210:
   10302 	case WM_T_I211:
   10303 		reg = CSR_READ(sc, WMREG_MDICNFG);
   10304 		ismdio = ((reg & MDICNFG_DEST) != 0);
   10305 		break;
   10306 	default:
   10307 		break;
   10308 	}
   10309 
   10310 	return ismdio;
   10311 }
   10312 
   10313 /*
   10314  * wm_sgmii_readreg:	[mii interface function]
   10315  *
   10316  *	Read a PHY register on the SGMII
   10317  * This could be handled by the PHY layer if we didn't have to lock the
   10318  * ressource ...
   10319  */
   10320 static int
   10321 wm_sgmii_readreg(device_t self, int phy, int reg)
   10322 {
   10323 	struct wm_softc *sc = device_private(self);
   10324 	uint32_t i2ccmd;
   10325 	int i, rv;
   10326 
   10327 	if (sc->phy.acquire(sc)) {
   10328 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10329 		    __func__);
   10330 		return 0;
   10331 	}
   10332 
   10333 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10334 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10335 	    | I2CCMD_OPCODE_READ;
   10336 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10337 
   10338 	/* Poll the ready bit */
   10339 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10340 		delay(50);
   10341 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10342 		if (i2ccmd & I2CCMD_READY)
   10343 			break;
   10344 	}
   10345 	if ((i2ccmd & I2CCMD_READY) == 0)
   10346 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
   10347 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10348 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   10349 
   10350 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   10351 
   10352 	sc->phy.release(sc);
   10353 	return rv;
   10354 }
   10355 
   10356 /*
   10357  * wm_sgmii_writereg:	[mii interface function]
   10358  *
   10359  *	Write a PHY register on the SGMII.
   10360  * This could be handled by the PHY layer if we didn't have to lock the
   10361  * ressource ...
   10362  */
   10363 static void
   10364 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
   10365 {
   10366 	struct wm_softc *sc = device_private(self);
   10367 	uint32_t i2ccmd;
   10368 	int i;
   10369 	int val_swapped;
   10370 
   10371 	if (sc->phy.acquire(sc) != 0) {
   10372 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10373 		    __func__);
   10374 		return;
   10375 	}
   10376 	/* Swap the data bytes for the I2C interface */
   10377 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   10378 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10379 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10380 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   10381 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10382 
   10383 	/* Poll the ready bit */
   10384 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10385 		delay(50);
   10386 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10387 		if (i2ccmd & I2CCMD_READY)
   10388 			break;
   10389 	}
   10390 	if ((i2ccmd & I2CCMD_READY) == 0)
   10391 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
   10392 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10393 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   10394 
   10395 	sc->phy.release(sc);
   10396 }
   10397 
   10398 /* TBI related */
   10399 
   10400 /*
   10401  * wm_tbi_mediainit:
   10402  *
   10403  *	Initialize media for use on 1000BASE-X devices.
   10404  */
   10405 static void
   10406 wm_tbi_mediainit(struct wm_softc *sc)
   10407 {
   10408 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10409 	const char *sep = "";
   10410 
   10411 	if (sc->sc_type < WM_T_82543)
   10412 		sc->sc_tipg = TIPG_WM_DFLT;
   10413 	else
   10414 		sc->sc_tipg = TIPG_LG_DFLT;
   10415 
   10416 	sc->sc_tbi_serdes_anegticks = 5;
   10417 
   10418 	/* Initialize our media structures */
   10419 	sc->sc_mii.mii_ifp = ifp;
   10420 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10421 
   10422 	if ((sc->sc_type >= WM_T_82575)
   10423 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   10424 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10425 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   10426 	else
   10427 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10428 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   10429 
   10430 	/*
   10431 	 * SWD Pins:
   10432 	 *
   10433 	 *	0 = Link LED (output)
   10434 	 *	1 = Loss Of Signal (input)
   10435 	 */
   10436 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   10437 
   10438 	/* XXX Perhaps this is only for TBI */
   10439 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   10440 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   10441 
   10442 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   10443 		sc->sc_ctrl &= ~CTRL_LRST;
   10444 
   10445 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10446 
   10447 #define	ADD(ss, mm, dd)							\
   10448 do {									\
   10449 	aprint_normal("%s%s", sep, ss);					\
   10450 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   10451 	sep = ", ";							\
   10452 } while (/*CONSTCOND*/0)
   10453 
   10454 	aprint_normal_dev(sc->sc_dev, "");
   10455 
   10456 	if (sc->sc_type == WM_T_I354) {
   10457 		uint32_t status;
   10458 
   10459 		status = CSR_READ(sc, WMREG_STATUS);
   10460 		if (((status & STATUS_2P5_SKU) != 0)
   10461 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   10462 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   10463 		} else
   10464 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   10465 	} else if (sc->sc_type == WM_T_82545) {
   10466 		/* Only 82545 is LX (XXX except SFP) */
   10467 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   10468 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   10469 	} else {
   10470 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   10471 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   10472 	}
   10473 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   10474 	aprint_normal("\n");
   10475 
   10476 #undef ADD
   10477 
   10478 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   10479 }
   10480 
   10481 /*
   10482  * wm_tbi_mediachange:	[ifmedia interface function]
   10483  *
   10484  *	Set hardware to newly-selected media on a 1000BASE-X device.
   10485  */
   10486 static int
   10487 wm_tbi_mediachange(struct ifnet *ifp)
   10488 {
   10489 	struct wm_softc *sc = ifp->if_softc;
   10490 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10491 	uint32_t status;
   10492 	int i;
   10493 
   10494 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   10495 		/* XXX need some work for >= 82571 and < 82575 */
   10496 		if (sc->sc_type < WM_T_82575)
   10497 			return 0;
   10498 	}
   10499 
   10500 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   10501 	    || (sc->sc_type >= WM_T_82575))
   10502 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   10503 
   10504 	sc->sc_ctrl &= ~CTRL_LRST;
   10505 	sc->sc_txcw = TXCW_ANE;
   10506 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10507 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   10508 	else if (ife->ifm_media & IFM_FDX)
   10509 		sc->sc_txcw |= TXCW_FD;
   10510 	else
   10511 		sc->sc_txcw |= TXCW_HD;
   10512 
   10513 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   10514 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   10515 
   10516 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   10517 		    device_xname(sc->sc_dev), sc->sc_txcw));
   10518 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10519 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10520 	CSR_WRITE_FLUSH(sc);
   10521 	delay(1000);
   10522 
   10523 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   10524 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   10525 
   10526 	/*
   10527 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   10528 	 * optics detect a signal, 0 if they don't.
   10529 	 */
   10530 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   10531 		/* Have signal; wait for the link to come up. */
   10532 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   10533 			delay(10000);
   10534 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   10535 				break;
   10536 		}
   10537 
   10538 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   10539 			    device_xname(sc->sc_dev),i));
   10540 
   10541 		status = CSR_READ(sc, WMREG_STATUS);
   10542 		DPRINTF(WM_DEBUG_LINK,
   10543 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   10544 			device_xname(sc->sc_dev),status, STATUS_LU));
   10545 		if (status & STATUS_LU) {
   10546 			/* Link is up. */
   10547 			DPRINTF(WM_DEBUG_LINK,
   10548 			    ("%s: LINK: set media -> link up %s\n",
   10549 			    device_xname(sc->sc_dev),
   10550 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   10551 
   10552 			/*
   10553 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   10554 			 * so we should update sc->sc_ctrl
   10555 			 */
   10556 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   10557 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10558 			sc->sc_fcrtl &= ~FCRTL_XONE;
   10559 			if (status & STATUS_FD)
   10560 				sc->sc_tctl |=
   10561 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10562 			else
   10563 				sc->sc_tctl |=
   10564 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10565 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   10566 				sc->sc_fcrtl |= FCRTL_XONE;
   10567 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10568 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   10569 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   10570 				      sc->sc_fcrtl);
   10571 			sc->sc_tbi_linkup = 1;
   10572 		} else {
   10573 			if (i == WM_LINKUP_TIMEOUT)
   10574 				wm_check_for_link(sc);
   10575 			/* Link is down. */
   10576 			DPRINTF(WM_DEBUG_LINK,
   10577 			    ("%s: LINK: set media -> link down\n",
   10578 			    device_xname(sc->sc_dev)));
   10579 			sc->sc_tbi_linkup = 0;
   10580 		}
   10581 	} else {
   10582 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   10583 		    device_xname(sc->sc_dev)));
   10584 		sc->sc_tbi_linkup = 0;
   10585 	}
   10586 
   10587 	wm_tbi_serdes_set_linkled(sc);
   10588 
   10589 	return 0;
   10590 }
   10591 
   10592 /*
   10593  * wm_tbi_mediastatus:	[ifmedia interface function]
   10594  *
   10595  *	Get the current interface media status on a 1000BASE-X device.
   10596  */
   10597 static void
   10598 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10599 {
   10600 	struct wm_softc *sc = ifp->if_softc;
   10601 	uint32_t ctrl, status;
   10602 
   10603 	ifmr->ifm_status = IFM_AVALID;
   10604 	ifmr->ifm_active = IFM_ETHER;
   10605 
   10606 	status = CSR_READ(sc, WMREG_STATUS);
   10607 	if ((status & STATUS_LU) == 0) {
   10608 		ifmr->ifm_active |= IFM_NONE;
   10609 		return;
   10610 	}
   10611 
   10612 	ifmr->ifm_status |= IFM_ACTIVE;
   10613 	/* Only 82545 is LX */
   10614 	if (sc->sc_type == WM_T_82545)
   10615 		ifmr->ifm_active |= IFM_1000_LX;
   10616 	else
   10617 		ifmr->ifm_active |= IFM_1000_SX;
   10618 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   10619 		ifmr->ifm_active |= IFM_FDX;
   10620 	else
   10621 		ifmr->ifm_active |= IFM_HDX;
   10622 	ctrl = CSR_READ(sc, WMREG_CTRL);
   10623 	if (ctrl & CTRL_RFCE)
   10624 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   10625 	if (ctrl & CTRL_TFCE)
   10626 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   10627 }
   10628 
   10629 /* XXX TBI only */
   10630 static int
   10631 wm_check_for_link(struct wm_softc *sc)
   10632 {
   10633 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10634 	uint32_t rxcw;
   10635 	uint32_t ctrl;
   10636 	uint32_t status;
   10637 	uint32_t sig;
   10638 
   10639 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   10640 		/* XXX need some work for >= 82571 */
   10641 		if (sc->sc_type >= WM_T_82571) {
   10642 			sc->sc_tbi_linkup = 1;
   10643 			return 0;
   10644 		}
   10645 	}
   10646 
   10647 	rxcw = CSR_READ(sc, WMREG_RXCW);
   10648 	ctrl = CSR_READ(sc, WMREG_CTRL);
   10649 	status = CSR_READ(sc, WMREG_STATUS);
   10650 
   10651 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   10652 
   10653 	DPRINTF(WM_DEBUG_LINK,
   10654 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   10655 		device_xname(sc->sc_dev), __func__,
   10656 		((ctrl & CTRL_SWDPIN(1)) == sig),
   10657 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   10658 
   10659 	/*
   10660 	 * SWDPIN   LU RXCW
   10661 	 *      0    0    0
   10662 	 *      0    0    1	(should not happen)
   10663 	 *      0    1    0	(should not happen)
   10664 	 *      0    1    1	(should not happen)
   10665 	 *      1    0    0	Disable autonego and force linkup
   10666 	 *      1    0    1	got /C/ but not linkup yet
   10667 	 *      1    1    0	(linkup)
   10668 	 *      1    1    1	If IFM_AUTO, back to autonego
   10669 	 *
   10670 	 */
   10671 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   10672 	    && ((status & STATUS_LU) == 0)
   10673 	    && ((rxcw & RXCW_C) == 0)) {
   10674 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   10675 			__func__));
   10676 		sc->sc_tbi_linkup = 0;
   10677 		/* Disable auto-negotiation in the TXCW register */
   10678 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   10679 
   10680 		/*
   10681 		 * Force link-up and also force full-duplex.
   10682 		 *
   10683 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   10684 		 * so we should update sc->sc_ctrl
   10685 		 */
   10686 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   10687 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10688 	} else if (((status & STATUS_LU) != 0)
   10689 	    && ((rxcw & RXCW_C) != 0)
   10690 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   10691 		sc->sc_tbi_linkup = 1;
   10692 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   10693 			__func__));
   10694 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10695 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   10696 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   10697 	    && ((rxcw & RXCW_C) != 0)) {
   10698 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   10699 	} else {
   10700 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   10701 			status));
   10702 	}
   10703 
   10704 	return 0;
   10705 }
   10706 
   10707 /*
   10708  * wm_tbi_tick:
   10709  *
   10710  *	Check the link on TBI devices.
   10711  *	This function acts as mii_tick().
   10712  */
   10713 static void
   10714 wm_tbi_tick(struct wm_softc *sc)
   10715 {
   10716 	struct mii_data *mii = &sc->sc_mii;
   10717 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10718 	uint32_t status;
   10719 
   10720 	KASSERT(WM_CORE_LOCKED(sc));
   10721 
   10722 	status = CSR_READ(sc, WMREG_STATUS);
   10723 
   10724 	/* XXX is this needed? */
   10725 	(void)CSR_READ(sc, WMREG_RXCW);
   10726 	(void)CSR_READ(sc, WMREG_CTRL);
   10727 
   10728 	/* set link status */
   10729 	if ((status & STATUS_LU) == 0) {
   10730 		DPRINTF(WM_DEBUG_LINK,
   10731 		    ("%s: LINK: checklink -> down\n",
   10732 			device_xname(sc->sc_dev)));
   10733 		sc->sc_tbi_linkup = 0;
   10734 	} else if (sc->sc_tbi_linkup == 0) {
   10735 		DPRINTF(WM_DEBUG_LINK,
   10736 		    ("%s: LINK: checklink -> up %s\n",
   10737 			device_xname(sc->sc_dev),
   10738 			(status & STATUS_FD) ? "FDX" : "HDX"));
   10739 		sc->sc_tbi_linkup = 1;
   10740 		sc->sc_tbi_serdes_ticks = 0;
   10741 	}
   10742 
   10743 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   10744 		goto setled;
   10745 
   10746 	if ((status & STATUS_LU) == 0) {
   10747 		sc->sc_tbi_linkup = 0;
   10748 		/* If the timer expired, retry autonegotiation */
   10749 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10750 		    && (++sc->sc_tbi_serdes_ticks
   10751 			>= sc->sc_tbi_serdes_anegticks)) {
   10752 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   10753 			sc->sc_tbi_serdes_ticks = 0;
   10754 			/*
   10755 			 * Reset the link, and let autonegotiation do
   10756 			 * its thing
   10757 			 */
   10758 			sc->sc_ctrl |= CTRL_LRST;
   10759 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10760 			CSR_WRITE_FLUSH(sc);
   10761 			delay(1000);
   10762 			sc->sc_ctrl &= ~CTRL_LRST;
   10763 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10764 			CSR_WRITE_FLUSH(sc);
   10765 			delay(1000);
   10766 			CSR_WRITE(sc, WMREG_TXCW,
   10767 			    sc->sc_txcw & ~TXCW_ANE);
   10768 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10769 		}
   10770 	}
   10771 
   10772 setled:
   10773 	wm_tbi_serdes_set_linkled(sc);
   10774 }
   10775 
   10776 /* SERDES related */
   10777 static void
   10778 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   10779 {
   10780 	uint32_t reg;
   10781 
   10782 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   10783 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   10784 		return;
   10785 
   10786 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   10787 	reg |= PCS_CFG_PCS_EN;
   10788 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   10789 
   10790 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   10791 	reg &= ~CTRL_EXT_SWDPIN(3);
   10792 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   10793 	CSR_WRITE_FLUSH(sc);
   10794 }
   10795 
   10796 static int
   10797 wm_serdes_mediachange(struct ifnet *ifp)
   10798 {
   10799 	struct wm_softc *sc = ifp->if_softc;
   10800 	bool pcs_autoneg = true; /* XXX */
   10801 	uint32_t ctrl_ext, pcs_lctl, reg;
   10802 
   10803 	/* XXX Currently, this function is not called on 8257[12] */
   10804 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   10805 	    || (sc->sc_type >= WM_T_82575))
   10806 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   10807 
   10808 	wm_serdes_power_up_link_82575(sc);
   10809 
   10810 	sc->sc_ctrl |= CTRL_SLU;
   10811 
   10812 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   10813 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   10814 
   10815 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10816 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   10817 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   10818 	case CTRL_EXT_LINK_MODE_SGMII:
   10819 		pcs_autoneg = true;
   10820 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   10821 		break;
   10822 	case CTRL_EXT_LINK_MODE_1000KX:
   10823 		pcs_autoneg = false;
   10824 		/* FALLTHROUGH */
   10825 	default:
   10826 		if ((sc->sc_type == WM_T_82575)
   10827 		    || (sc->sc_type == WM_T_82576)) {
   10828 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   10829 				pcs_autoneg = false;
   10830 		}
   10831 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   10832 		    | CTRL_FRCFDX;
   10833 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   10834 	}
   10835 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10836 
   10837 	if (pcs_autoneg) {
   10838 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   10839 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   10840 
   10841 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   10842 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   10843 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   10844 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   10845 	} else
   10846 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   10847 
   10848 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   10849 
   10850 
   10851 	return 0;
   10852 }
   10853 
   10854 static void
   10855 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10856 {
   10857 	struct wm_softc *sc = ifp->if_softc;
   10858 	struct mii_data *mii = &sc->sc_mii;
   10859 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10860 	uint32_t pcs_adv, pcs_lpab, reg;
   10861 
   10862 	ifmr->ifm_status = IFM_AVALID;
   10863 	ifmr->ifm_active = IFM_ETHER;
   10864 
   10865 	/* Check PCS */
   10866 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10867 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   10868 		ifmr->ifm_active |= IFM_NONE;
   10869 		sc->sc_tbi_linkup = 0;
   10870 		goto setled;
   10871 	}
   10872 
   10873 	sc->sc_tbi_linkup = 1;
   10874 	ifmr->ifm_status |= IFM_ACTIVE;
   10875 	if (sc->sc_type == WM_T_I354) {
   10876 		uint32_t status;
   10877 
   10878 		status = CSR_READ(sc, WMREG_STATUS);
   10879 		if (((status & STATUS_2P5_SKU) != 0)
   10880 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   10881 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
   10882 		} else
   10883 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
   10884 	} else {
   10885 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   10886 		case PCS_LSTS_SPEED_10:
   10887 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   10888 			break;
   10889 		case PCS_LSTS_SPEED_100:
   10890 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   10891 			break;
   10892 		case PCS_LSTS_SPEED_1000:
   10893 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   10894 			break;
   10895 		default:
   10896 			device_printf(sc->sc_dev, "Unknown speed\n");
   10897 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   10898 			break;
   10899 		}
   10900 	}
   10901 	if ((reg & PCS_LSTS_FDX) != 0)
   10902 		ifmr->ifm_active |= IFM_FDX;
   10903 	else
   10904 		ifmr->ifm_active |= IFM_HDX;
   10905 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   10906 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   10907 		/* Check flow */
   10908 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10909 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   10910 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   10911 			goto setled;
   10912 		}
   10913 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   10914 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   10915 		DPRINTF(WM_DEBUG_LINK,
   10916 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   10917 		if ((pcs_adv & TXCW_SYM_PAUSE)
   10918 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   10919 			mii->mii_media_active |= IFM_FLOW
   10920 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   10921 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   10922 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   10923 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   10924 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   10925 			mii->mii_media_active |= IFM_FLOW
   10926 			    | IFM_ETH_TXPAUSE;
   10927 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   10928 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   10929 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   10930 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   10931 			mii->mii_media_active |= IFM_FLOW
   10932 			    | IFM_ETH_RXPAUSE;
   10933 		}
   10934 	}
   10935 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10936 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   10937 setled:
   10938 	wm_tbi_serdes_set_linkled(sc);
   10939 }
   10940 
   10941 /*
   10942  * wm_serdes_tick:
   10943  *
   10944  *	Check the link on serdes devices.
   10945  */
   10946 static void
   10947 wm_serdes_tick(struct wm_softc *sc)
   10948 {
   10949 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10950 	struct mii_data *mii = &sc->sc_mii;
   10951 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10952 	uint32_t reg;
   10953 
   10954 	KASSERT(WM_CORE_LOCKED(sc));
   10955 
   10956 	mii->mii_media_status = IFM_AVALID;
   10957 	mii->mii_media_active = IFM_ETHER;
   10958 
   10959 	/* Check PCS */
   10960 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10961 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   10962 		mii->mii_media_status |= IFM_ACTIVE;
   10963 		sc->sc_tbi_linkup = 1;
   10964 		sc->sc_tbi_serdes_ticks = 0;
   10965 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   10966 		if ((reg & PCS_LSTS_FDX) != 0)
   10967 			mii->mii_media_active |= IFM_FDX;
   10968 		else
   10969 			mii->mii_media_active |= IFM_HDX;
   10970 	} else {
   10971 		mii->mii_media_status |= IFM_NONE;
   10972 		sc->sc_tbi_linkup = 0;
   10973 		/* If the timer expired, retry autonegotiation */
   10974 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10975 		    && (++sc->sc_tbi_serdes_ticks
   10976 			>= sc->sc_tbi_serdes_anegticks)) {
   10977 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   10978 			sc->sc_tbi_serdes_ticks = 0;
   10979 			/* XXX */
   10980 			wm_serdes_mediachange(ifp);
   10981 		}
   10982 	}
   10983 
   10984 	wm_tbi_serdes_set_linkled(sc);
   10985 }
   10986 
   10987 /* SFP related */
   10988 
   10989 static int
   10990 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   10991 {
   10992 	uint32_t i2ccmd;
   10993 	int i;
   10994 
   10995 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   10996 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10997 
   10998 	/* Poll the ready bit */
   10999 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11000 		delay(50);
   11001 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11002 		if (i2ccmd & I2CCMD_READY)
   11003 			break;
   11004 	}
   11005 	if ((i2ccmd & I2CCMD_READY) == 0)
   11006 		return -1;
   11007 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   11008 		return -1;
   11009 
   11010 	*data = i2ccmd & 0x00ff;
   11011 
   11012 	return 0;
   11013 }
   11014 
   11015 static uint32_t
   11016 wm_sfp_get_media_type(struct wm_softc *sc)
   11017 {
   11018 	uint32_t ctrl_ext;
   11019 	uint8_t val = 0;
   11020 	int timeout = 3;
   11021 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   11022 	int rv = -1;
   11023 
   11024 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11025 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   11026 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   11027 	CSR_WRITE_FLUSH(sc);
   11028 
   11029 	/* Read SFP module data */
   11030 	while (timeout) {
   11031 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   11032 		if (rv == 0)
   11033 			break;
   11034 		delay(100*1000); /* XXX too big */
   11035 		timeout--;
   11036 	}
   11037 	if (rv != 0)
   11038 		goto out;
   11039 	switch (val) {
   11040 	case SFF_SFP_ID_SFF:
   11041 		aprint_normal_dev(sc->sc_dev,
   11042 		    "Module/Connector soldered to board\n");
   11043 		break;
   11044 	case SFF_SFP_ID_SFP:
   11045 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   11046 		break;
   11047 	case SFF_SFP_ID_UNKNOWN:
   11048 		goto out;
   11049 	default:
   11050 		break;
   11051 	}
   11052 
   11053 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   11054 	if (rv != 0) {
   11055 		goto out;
   11056 	}
   11057 
   11058 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   11059 		mediatype = WM_MEDIATYPE_SERDES;
   11060 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   11061 		sc->sc_flags |= WM_F_SGMII;
   11062 		mediatype = WM_MEDIATYPE_COPPER;
   11063 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   11064 		sc->sc_flags |= WM_F_SGMII;
   11065 		mediatype = WM_MEDIATYPE_SERDES;
   11066 	}
   11067 
   11068 out:
   11069 	/* Restore I2C interface setting */
   11070 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11071 
   11072 	return mediatype;
   11073 }
   11074 
   11075 /*
   11076  * NVM related.
   11077  * Microwire, SPI (w/wo EERD) and Flash.
   11078  */
   11079 
   11080 /* Both spi and uwire */
   11081 
   11082 /*
   11083  * wm_eeprom_sendbits:
   11084  *
   11085  *	Send a series of bits to the EEPROM.
   11086  */
   11087 static void
   11088 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   11089 {
   11090 	uint32_t reg;
   11091 	int x;
   11092 
   11093 	reg = CSR_READ(sc, WMREG_EECD);
   11094 
   11095 	for (x = nbits; x > 0; x--) {
   11096 		if (bits & (1U << (x - 1)))
   11097 			reg |= EECD_DI;
   11098 		else
   11099 			reg &= ~EECD_DI;
   11100 		CSR_WRITE(sc, WMREG_EECD, reg);
   11101 		CSR_WRITE_FLUSH(sc);
   11102 		delay(2);
   11103 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11104 		CSR_WRITE_FLUSH(sc);
   11105 		delay(2);
   11106 		CSR_WRITE(sc, WMREG_EECD, reg);
   11107 		CSR_WRITE_FLUSH(sc);
   11108 		delay(2);
   11109 	}
   11110 }
   11111 
   11112 /*
   11113  * wm_eeprom_recvbits:
   11114  *
   11115  *	Receive a series of bits from the EEPROM.
   11116  */
   11117 static void
   11118 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   11119 {
   11120 	uint32_t reg, val;
   11121 	int x;
   11122 
   11123 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   11124 
   11125 	val = 0;
   11126 	for (x = nbits; x > 0; x--) {
   11127 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11128 		CSR_WRITE_FLUSH(sc);
   11129 		delay(2);
   11130 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   11131 			val |= (1U << (x - 1));
   11132 		CSR_WRITE(sc, WMREG_EECD, reg);
   11133 		CSR_WRITE_FLUSH(sc);
   11134 		delay(2);
   11135 	}
   11136 	*valp = val;
   11137 }
   11138 
   11139 /* Microwire */
   11140 
   11141 /*
   11142  * wm_nvm_read_uwire:
   11143  *
   11144  *	Read a word from the EEPROM using the MicroWire protocol.
   11145  */
   11146 static int
   11147 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11148 {
   11149 	uint32_t reg, val;
   11150 	int i;
   11151 
   11152 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11153 		device_xname(sc->sc_dev), __func__));
   11154 
   11155 	for (i = 0; i < wordcnt; i++) {
   11156 		/* Clear SK and DI. */
   11157 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   11158 		CSR_WRITE(sc, WMREG_EECD, reg);
   11159 
   11160 		/*
   11161 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   11162 		 * and Xen.
   11163 		 *
   11164 		 * We use this workaround only for 82540 because qemu's
   11165 		 * e1000 act as 82540.
   11166 		 */
   11167 		if (sc->sc_type == WM_T_82540) {
   11168 			reg |= EECD_SK;
   11169 			CSR_WRITE(sc, WMREG_EECD, reg);
   11170 			reg &= ~EECD_SK;
   11171 			CSR_WRITE(sc, WMREG_EECD, reg);
   11172 			CSR_WRITE_FLUSH(sc);
   11173 			delay(2);
   11174 		}
   11175 		/* XXX: end of workaround */
   11176 
   11177 		/* Set CHIP SELECT. */
   11178 		reg |= EECD_CS;
   11179 		CSR_WRITE(sc, WMREG_EECD, reg);
   11180 		CSR_WRITE_FLUSH(sc);
   11181 		delay(2);
   11182 
   11183 		/* Shift in the READ command. */
   11184 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   11185 
   11186 		/* Shift in address. */
   11187 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   11188 
   11189 		/* Shift out the data. */
   11190 		wm_eeprom_recvbits(sc, &val, 16);
   11191 		data[i] = val & 0xffff;
   11192 
   11193 		/* Clear CHIP SELECT. */
   11194 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   11195 		CSR_WRITE(sc, WMREG_EECD, reg);
   11196 		CSR_WRITE_FLUSH(sc);
   11197 		delay(2);
   11198 	}
   11199 
   11200 	return 0;
   11201 }
   11202 
   11203 /* SPI */
   11204 
   11205 /*
   11206  * Set SPI and FLASH related information from the EECD register.
   11207  * For 82541 and 82547, the word size is taken from EEPROM.
   11208  */
   11209 static int
   11210 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   11211 {
   11212 	int size;
   11213 	uint32_t reg;
   11214 	uint16_t data;
   11215 
   11216 	reg = CSR_READ(sc, WMREG_EECD);
   11217 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   11218 
   11219 	/* Read the size of NVM from EECD by default */
   11220 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11221 	switch (sc->sc_type) {
   11222 	case WM_T_82541:
   11223 	case WM_T_82541_2:
   11224 	case WM_T_82547:
   11225 	case WM_T_82547_2:
   11226 		/* Set dummy value to access EEPROM */
   11227 		sc->sc_nvm_wordsize = 64;
   11228 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
   11229 		reg = data;
   11230 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11231 		if (size == 0)
   11232 			size = 6; /* 64 word size */
   11233 		else
   11234 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   11235 		break;
   11236 	case WM_T_80003:
   11237 	case WM_T_82571:
   11238 	case WM_T_82572:
   11239 	case WM_T_82573: /* SPI case */
   11240 	case WM_T_82574: /* SPI case */
   11241 	case WM_T_82583: /* SPI case */
   11242 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11243 		if (size > 14)
   11244 			size = 14;
   11245 		break;
   11246 	case WM_T_82575:
   11247 	case WM_T_82576:
   11248 	case WM_T_82580:
   11249 	case WM_T_I350:
   11250 	case WM_T_I354:
   11251 	case WM_T_I210:
   11252 	case WM_T_I211:
   11253 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11254 		if (size > 15)
   11255 			size = 15;
   11256 		break;
   11257 	default:
   11258 		aprint_error_dev(sc->sc_dev,
   11259 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   11260 		return -1;
   11261 		break;
   11262 	}
   11263 
   11264 	sc->sc_nvm_wordsize = 1 << size;
   11265 
   11266 	return 0;
   11267 }
   11268 
   11269 /*
   11270  * wm_nvm_ready_spi:
   11271  *
   11272  *	Wait for a SPI EEPROM to be ready for commands.
   11273  */
   11274 static int
   11275 wm_nvm_ready_spi(struct wm_softc *sc)
   11276 {
   11277 	uint32_t val;
   11278 	int usec;
   11279 
   11280 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11281 		device_xname(sc->sc_dev), __func__));
   11282 
   11283 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   11284 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   11285 		wm_eeprom_recvbits(sc, &val, 8);
   11286 		if ((val & SPI_SR_RDY) == 0)
   11287 			break;
   11288 	}
   11289 	if (usec >= SPI_MAX_RETRIES) {
   11290 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   11291 		return 1;
   11292 	}
   11293 	return 0;
   11294 }
   11295 
   11296 /*
   11297  * wm_nvm_read_spi:
   11298  *
   11299  *	Read a work from the EEPROM using the SPI protocol.
   11300  */
   11301 static int
   11302 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11303 {
   11304 	uint32_t reg, val;
   11305 	int i;
   11306 	uint8_t opc;
   11307 
   11308 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11309 		device_xname(sc->sc_dev), __func__));
   11310 
   11311 	/* Clear SK and CS. */
   11312 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   11313 	CSR_WRITE(sc, WMREG_EECD, reg);
   11314 	CSR_WRITE_FLUSH(sc);
   11315 	delay(2);
   11316 
   11317 	if (wm_nvm_ready_spi(sc))
   11318 		return 1;
   11319 
   11320 	/* Toggle CS to flush commands. */
   11321 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   11322 	CSR_WRITE_FLUSH(sc);
   11323 	delay(2);
   11324 	CSR_WRITE(sc, WMREG_EECD, reg);
   11325 	CSR_WRITE_FLUSH(sc);
   11326 	delay(2);
   11327 
   11328 	opc = SPI_OPC_READ;
   11329 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   11330 		opc |= SPI_OPC_A8;
   11331 
   11332 	wm_eeprom_sendbits(sc, opc, 8);
   11333 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   11334 
   11335 	for (i = 0; i < wordcnt; i++) {
   11336 		wm_eeprom_recvbits(sc, &val, 16);
   11337 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   11338 	}
   11339 
   11340 	/* Raise CS and clear SK. */
   11341 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   11342 	CSR_WRITE(sc, WMREG_EECD, reg);
   11343 	CSR_WRITE_FLUSH(sc);
   11344 	delay(2);
   11345 
   11346 	return 0;
   11347 }
   11348 
   11349 /* Using with EERD */
   11350 
   11351 static int
   11352 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   11353 {
   11354 	uint32_t attempts = 100000;
   11355 	uint32_t i, reg = 0;
   11356 	int32_t done = -1;
   11357 
   11358 	for (i = 0; i < attempts; i++) {
   11359 		reg = CSR_READ(sc, rw);
   11360 
   11361 		if (reg & EERD_DONE) {
   11362 			done = 0;
   11363 			break;
   11364 		}
   11365 		delay(5);
   11366 	}
   11367 
   11368 	return done;
   11369 }
   11370 
   11371 static int
   11372 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   11373     uint16_t *data)
   11374 {
   11375 	int i, eerd = 0;
   11376 	int error = 0;
   11377 
   11378 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11379 		device_xname(sc->sc_dev), __func__));
   11380 
   11381 	for (i = 0; i < wordcnt; i++) {
   11382 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   11383 
   11384 		CSR_WRITE(sc, WMREG_EERD, eerd);
   11385 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   11386 		if (error != 0)
   11387 			break;
   11388 
   11389 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   11390 	}
   11391 
   11392 	return error;
   11393 }
   11394 
   11395 /* Flash */
   11396 
   11397 static int
   11398 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   11399 {
   11400 	uint32_t eecd;
   11401 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   11402 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   11403 	uint8_t sig_byte = 0;
   11404 
   11405 	switch (sc->sc_type) {
   11406 	case WM_T_PCH_SPT:
   11407 		/*
   11408 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
   11409 		 * sector valid bits from the NVM.
   11410 		 */
   11411 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
   11412 		if ((*bank == 0) || (*bank == 1)) {
   11413 			aprint_error_dev(sc->sc_dev,
   11414 			    "%s: no valid NVM bank present (%u)\n", __func__,
   11415 				*bank);
   11416 			return -1;
   11417 		} else {
   11418 			*bank = *bank - 2;
   11419 			return 0;
   11420 		}
   11421 	case WM_T_ICH8:
   11422 	case WM_T_ICH9:
   11423 		eecd = CSR_READ(sc, WMREG_EECD);
   11424 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   11425 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   11426 			return 0;
   11427 		}
   11428 		/* FALLTHROUGH */
   11429 	default:
   11430 		/* Default to 0 */
   11431 		*bank = 0;
   11432 
   11433 		/* Check bank 0 */
   11434 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   11435 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11436 			*bank = 0;
   11437 			return 0;
   11438 		}
   11439 
   11440 		/* Check bank 1 */
   11441 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   11442 		    &sig_byte);
   11443 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11444 			*bank = 1;
   11445 			return 0;
   11446 		}
   11447 	}
   11448 
   11449 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   11450 		device_xname(sc->sc_dev)));
   11451 	return -1;
   11452 }
   11453 
   11454 /******************************************************************************
   11455  * This function does initial flash setup so that a new read/write/erase cycle
   11456  * can be started.
   11457  *
   11458  * sc - The pointer to the hw structure
   11459  ****************************************************************************/
   11460 static int32_t
   11461 wm_ich8_cycle_init(struct wm_softc *sc)
   11462 {
   11463 	uint16_t hsfsts;
   11464 	int32_t error = 1;
   11465 	int32_t i     = 0;
   11466 
   11467 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11468 
   11469 	/* May be check the Flash Des Valid bit in Hw status */
   11470 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   11471 		return error;
   11472 	}
   11473 
   11474 	/* Clear FCERR in Hw status by writing 1 */
   11475 	/* Clear DAEL in Hw status by writing a 1 */
   11476 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   11477 
   11478 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11479 
   11480 	/*
   11481 	 * Either we should have a hardware SPI cycle in progress bit to check
   11482 	 * against, in order to start a new cycle or FDONE bit should be
   11483 	 * changed in the hardware so that it is 1 after harware reset, which
   11484 	 * can then be used as an indication whether a cycle is in progress or
   11485 	 * has been completed .. we should also have some software semaphore
   11486 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   11487 	 * threads access to those bits can be sequentiallized or a way so that
   11488 	 * 2 threads dont start the cycle at the same time
   11489 	 */
   11490 
   11491 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11492 		/*
   11493 		 * There is no cycle running at present, so we can start a
   11494 		 * cycle
   11495 		 */
   11496 
   11497 		/* Begin by setting Flash Cycle Done. */
   11498 		hsfsts |= HSFSTS_DONE;
   11499 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11500 		error = 0;
   11501 	} else {
   11502 		/*
   11503 		 * otherwise poll for sometime so the current cycle has a
   11504 		 * chance to end before giving up.
   11505 		 */
   11506 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   11507 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11508 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11509 				error = 0;
   11510 				break;
   11511 			}
   11512 			delay(1);
   11513 		}
   11514 		if (error == 0) {
   11515 			/*
   11516 			 * Successful in waiting for previous cycle to timeout,
   11517 			 * now set the Flash Cycle Done.
   11518 			 */
   11519 			hsfsts |= HSFSTS_DONE;
   11520 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11521 		}
   11522 	}
   11523 	return error;
   11524 }
   11525 
   11526 /******************************************************************************
   11527  * This function starts a flash cycle and waits for its completion
   11528  *
   11529  * sc - The pointer to the hw structure
   11530  ****************************************************************************/
   11531 static int32_t
   11532 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   11533 {
   11534 	uint16_t hsflctl;
   11535 	uint16_t hsfsts;
   11536 	int32_t error = 1;
   11537 	uint32_t i = 0;
   11538 
   11539 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   11540 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11541 	hsflctl |= HSFCTL_GO;
   11542 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11543 
   11544 	/* Wait till FDONE bit is set to 1 */
   11545 	do {
   11546 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11547 		if (hsfsts & HSFSTS_DONE)
   11548 			break;
   11549 		delay(1);
   11550 		i++;
   11551 	} while (i < timeout);
   11552 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   11553 		error = 0;
   11554 
   11555 	return error;
   11556 }
   11557 
   11558 /******************************************************************************
   11559  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   11560  *
   11561  * sc - The pointer to the hw structure
   11562  * index - The index of the byte or word to read.
   11563  * size - Size of data to read, 1=byte 2=word, 4=dword
   11564  * data - Pointer to the word to store the value read.
   11565  *****************************************************************************/
   11566 static int32_t
   11567 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   11568     uint32_t size, uint32_t *data)
   11569 {
   11570 	uint16_t hsfsts;
   11571 	uint16_t hsflctl;
   11572 	uint32_t flash_linear_address;
   11573 	uint32_t flash_data = 0;
   11574 	int32_t error = 1;
   11575 	int32_t count = 0;
   11576 
   11577 	if (size < 1  || size > 4 || data == 0x0 ||
   11578 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   11579 		return error;
   11580 
   11581 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   11582 	    sc->sc_ich8_flash_base;
   11583 
   11584 	do {
   11585 		delay(1);
   11586 		/* Steps */
   11587 		error = wm_ich8_cycle_init(sc);
   11588 		if (error)
   11589 			break;
   11590 
   11591 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11592 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   11593 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   11594 		    & HSFCTL_BCOUNT_MASK;
   11595 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   11596 		if (sc->sc_type == WM_T_PCH_SPT) {
   11597 			/*
   11598 			 * In SPT, This register is in Lan memory space, not
   11599 			 * flash. Therefore, only 32 bit access is supported.
   11600 			 */
   11601 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
   11602 			    (uint32_t)hsflctl);
   11603 		} else
   11604 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11605 
   11606 		/*
   11607 		 * Write the last 24 bits of index into Flash Linear address
   11608 		 * field in Flash Address
   11609 		 */
   11610 		/* TODO: TBD maybe check the index against the size of flash */
   11611 
   11612 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   11613 
   11614 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   11615 
   11616 		/*
   11617 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   11618 		 * the whole sequence a few more times, else read in (shift in)
   11619 		 * the Flash Data0, the order is least significant byte first
   11620 		 * msb to lsb
   11621 		 */
   11622 		if (error == 0) {
   11623 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   11624 			if (size == 1)
   11625 				*data = (uint8_t)(flash_data & 0x000000FF);
   11626 			else if (size == 2)
   11627 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   11628 			else if (size == 4)
   11629 				*data = (uint32_t)flash_data;
   11630 			break;
   11631 		} else {
   11632 			/*
   11633 			 * If we've gotten here, then things are probably
   11634 			 * completely hosed, but if the error condition is
   11635 			 * detected, it won't hurt to give it another try...
   11636 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   11637 			 */
   11638 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11639 			if (hsfsts & HSFSTS_ERR) {
   11640 				/* Repeat for some time before giving up. */
   11641 				continue;
   11642 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   11643 				break;
   11644 		}
   11645 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   11646 
   11647 	return error;
   11648 }
   11649 
   11650 /******************************************************************************
   11651  * Reads a single byte from the NVM using the ICH8 flash access registers.
   11652  *
   11653  * sc - pointer to wm_hw structure
   11654  * index - The index of the byte to read.
   11655  * data - Pointer to a byte to store the value read.
   11656  *****************************************************************************/
   11657 static int32_t
   11658 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   11659 {
   11660 	int32_t status;
   11661 	uint32_t word = 0;
   11662 
   11663 	status = wm_read_ich8_data(sc, index, 1, &word);
   11664 	if (status == 0)
   11665 		*data = (uint8_t)word;
   11666 	else
   11667 		*data = 0;
   11668 
   11669 	return status;
   11670 }
   11671 
   11672 /******************************************************************************
   11673  * Reads a word from the NVM using the ICH8 flash access registers.
   11674  *
   11675  * sc - pointer to wm_hw structure
   11676  * index - The starting byte index of the word to read.
   11677  * data - Pointer to a word to store the value read.
   11678  *****************************************************************************/
   11679 static int32_t
   11680 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   11681 {
   11682 	int32_t status;
   11683 	uint32_t word = 0;
   11684 
   11685 	status = wm_read_ich8_data(sc, index, 2, &word);
   11686 	if (status == 0)
   11687 		*data = (uint16_t)word;
   11688 	else
   11689 		*data = 0;
   11690 
   11691 	return status;
   11692 }
   11693 
   11694 /******************************************************************************
   11695  * Reads a dword from the NVM using the ICH8 flash access registers.
   11696  *
   11697  * sc - pointer to wm_hw structure
   11698  * index - The starting byte index of the word to read.
   11699  * data - Pointer to a word to store the value read.
   11700  *****************************************************************************/
   11701 static int32_t
   11702 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   11703 {
   11704 	int32_t status;
   11705 
   11706 	status = wm_read_ich8_data(sc, index, 4, data);
   11707 	return status;
   11708 }
   11709 
   11710 /******************************************************************************
   11711  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   11712  * register.
   11713  *
   11714  * sc - Struct containing variables accessed by shared code
   11715  * offset - offset of word in the EEPROM to read
   11716  * data - word read from the EEPROM
   11717  * words - number of words to read
   11718  *****************************************************************************/
   11719 static int
   11720 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11721 {
   11722 	int32_t  error = 0;
   11723 	uint32_t flash_bank = 0;
   11724 	uint32_t act_offset = 0;
   11725 	uint32_t bank_offset = 0;
   11726 	uint16_t word = 0;
   11727 	uint16_t i = 0;
   11728 
   11729 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11730 		device_xname(sc->sc_dev), __func__));
   11731 
   11732 	/*
   11733 	 * We need to know which is the valid flash bank.  In the event
   11734 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   11735 	 * managing flash_bank.  So it cannot be trusted and needs
   11736 	 * to be updated with each read.
   11737 	 */
   11738 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   11739 	if (error) {
   11740 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   11741 			device_xname(sc->sc_dev)));
   11742 		flash_bank = 0;
   11743 	}
   11744 
   11745 	/*
   11746 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   11747 	 * size
   11748 	 */
   11749 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   11750 
   11751 	error = wm_get_swfwhw_semaphore(sc);
   11752 	if (error) {
   11753 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11754 		    __func__);
   11755 		return error;
   11756 	}
   11757 
   11758 	for (i = 0; i < words; i++) {
   11759 		/* The NVM part needs a byte offset, hence * 2 */
   11760 		act_offset = bank_offset + ((offset + i) * 2);
   11761 		error = wm_read_ich8_word(sc, act_offset, &word);
   11762 		if (error) {
   11763 			aprint_error_dev(sc->sc_dev,
   11764 			    "%s: failed to read NVM\n", __func__);
   11765 			break;
   11766 		}
   11767 		data[i] = word;
   11768 	}
   11769 
   11770 	wm_put_swfwhw_semaphore(sc);
   11771 	return error;
   11772 }
   11773 
   11774 /******************************************************************************
   11775  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   11776  * register.
   11777  *
   11778  * sc - Struct containing variables accessed by shared code
   11779  * offset - offset of word in the EEPROM to read
   11780  * data - word read from the EEPROM
   11781  * words - number of words to read
   11782  *****************************************************************************/
   11783 static int
   11784 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11785 {
   11786 	int32_t  error = 0;
   11787 	uint32_t flash_bank = 0;
   11788 	uint32_t act_offset = 0;
   11789 	uint32_t bank_offset = 0;
   11790 	uint32_t dword = 0;
   11791 	uint16_t i = 0;
   11792 
   11793 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11794 		device_xname(sc->sc_dev), __func__));
   11795 
   11796 	/*
   11797 	 * We need to know which is the valid flash bank.  In the event
   11798 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   11799 	 * managing flash_bank.  So it cannot be trusted and needs
   11800 	 * to be updated with each read.
   11801 	 */
   11802 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   11803 	if (error) {
   11804 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   11805 			device_xname(sc->sc_dev)));
   11806 		flash_bank = 0;
   11807 	}
   11808 
   11809 	/*
   11810 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   11811 	 * size
   11812 	 */
   11813 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   11814 
   11815 	error = wm_get_swfwhw_semaphore(sc);
   11816 	if (error) {
   11817 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11818 		    __func__);
   11819 		return error;
   11820 	}
   11821 
   11822 	for (i = 0; i < words; i++) {
   11823 		/* The NVM part needs a byte offset, hence * 2 */
   11824 		act_offset = bank_offset + ((offset + i) * 2);
   11825 		/* but we must read dword aligned, so mask ... */
   11826 		error = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   11827 		if (error) {
   11828 			aprint_error_dev(sc->sc_dev,
   11829 			    "%s: failed to read NVM\n", __func__);
   11830 			break;
   11831 		}
   11832 		/* ... and pick out low or high word */
   11833 		if ((act_offset & 0x2) == 0)
   11834 			data[i] = (uint16_t)(dword & 0xFFFF);
   11835 		else
   11836 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   11837 	}
   11838 
   11839 	wm_put_swfwhw_semaphore(sc);
   11840 	return error;
   11841 }
   11842 
   11843 /* iNVM */
   11844 
   11845 static int
   11846 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   11847 {
   11848 	int32_t  rv = 0;
   11849 	uint32_t invm_dword;
   11850 	uint16_t i;
   11851 	uint8_t record_type, word_address;
   11852 
   11853 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11854 		device_xname(sc->sc_dev), __func__));
   11855 
   11856 	for (i = 0; i < INVM_SIZE; i++) {
   11857 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   11858 		/* Get record type */
   11859 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   11860 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   11861 			break;
   11862 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   11863 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   11864 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   11865 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   11866 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   11867 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   11868 			if (word_address == address) {
   11869 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   11870 				rv = 0;
   11871 				break;
   11872 			}
   11873 		}
   11874 	}
   11875 
   11876 	return rv;
   11877 }
   11878 
   11879 static int
   11880 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11881 {
   11882 	int rv = 0;
   11883 	int i;
   11884 
   11885 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11886 		device_xname(sc->sc_dev), __func__));
   11887 
   11888 	for (i = 0; i < words; i++) {
   11889 		switch (offset + i) {
   11890 		case NVM_OFF_MACADDR:
   11891 		case NVM_OFF_MACADDR1:
   11892 		case NVM_OFF_MACADDR2:
   11893 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   11894 			if (rv != 0) {
   11895 				data[i] = 0xffff;
   11896 				rv = -1;
   11897 			}
   11898 			break;
   11899 		case NVM_OFF_CFG2:
   11900 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11901 			if (rv != 0) {
   11902 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   11903 				rv = 0;
   11904 			}
   11905 			break;
   11906 		case NVM_OFF_CFG4:
   11907 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11908 			if (rv != 0) {
   11909 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   11910 				rv = 0;
   11911 			}
   11912 			break;
   11913 		case NVM_OFF_LED_1_CFG:
   11914 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11915 			if (rv != 0) {
   11916 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   11917 				rv = 0;
   11918 			}
   11919 			break;
   11920 		case NVM_OFF_LED_0_2_CFG:
   11921 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11922 			if (rv != 0) {
   11923 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   11924 				rv = 0;
   11925 			}
   11926 			break;
   11927 		case NVM_OFF_ID_LED_SETTINGS:
   11928 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11929 			if (rv != 0) {
   11930 				*data = ID_LED_RESERVED_FFFF;
   11931 				rv = 0;
   11932 			}
   11933 			break;
   11934 		default:
   11935 			DPRINTF(WM_DEBUG_NVM,
   11936 			    ("NVM word 0x%02x is not mapped.\n", offset));
   11937 			*data = NVM_RESERVED_WORD;
   11938 			break;
   11939 		}
   11940 	}
   11941 
   11942 	return rv;
   11943 }
   11944 
   11945 /* Lock, detecting NVM type, validate checksum, version and read */
   11946 
   11947 /*
   11948  * wm_nvm_acquire:
   11949  *
   11950  *	Perform the EEPROM handshake required on some chips.
   11951  */
   11952 static int
   11953 wm_nvm_acquire(struct wm_softc *sc)
   11954 {
   11955 	uint32_t reg;
   11956 	int x;
   11957 	int ret = 0;
   11958 
   11959 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11960 		device_xname(sc->sc_dev), __func__));
   11961 
   11962 	if (sc->sc_type >= WM_T_ICH8) {
   11963 		ret = wm_get_nvm_ich8lan(sc);
   11964 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   11965 		ret = wm_get_swfwhw_semaphore(sc);
   11966 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
   11967 		/* This will also do wm_get_swsm_semaphore() if needed */
   11968 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
   11969 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11970 		ret = wm_get_swsm_semaphore(sc);
   11971 	}
   11972 
   11973 	if (ret) {
   11974 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11975 			__func__);
   11976 		return 1;
   11977 	}
   11978 
   11979 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   11980 		reg = CSR_READ(sc, WMREG_EECD);
   11981 
   11982 		/* Request EEPROM access. */
   11983 		reg |= EECD_EE_REQ;
   11984 		CSR_WRITE(sc, WMREG_EECD, reg);
   11985 
   11986 		/* ..and wait for it to be granted. */
   11987 		for (x = 0; x < 1000; x++) {
   11988 			reg = CSR_READ(sc, WMREG_EECD);
   11989 			if (reg & EECD_EE_GNT)
   11990 				break;
   11991 			delay(5);
   11992 		}
   11993 		if ((reg & EECD_EE_GNT) == 0) {
   11994 			aprint_error_dev(sc->sc_dev,
   11995 			    "could not acquire EEPROM GNT\n");
   11996 			reg &= ~EECD_EE_REQ;
   11997 			CSR_WRITE(sc, WMREG_EECD, reg);
   11998 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   11999 				wm_put_swfwhw_semaphore(sc);
   12000 			if (sc->sc_flags & WM_F_LOCK_SWFW)
   12001 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   12002 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
   12003 				wm_put_swsm_semaphore(sc);
   12004 			return 1;
   12005 		}
   12006 	}
   12007 
   12008 	return 0;
   12009 }
   12010 
   12011 /*
   12012  * wm_nvm_release:
   12013  *
   12014  *	Release the EEPROM mutex.
   12015  */
   12016 static void
   12017 wm_nvm_release(struct wm_softc *sc)
   12018 {
   12019 	uint32_t reg;
   12020 
   12021 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12022 		device_xname(sc->sc_dev), __func__));
   12023 
   12024 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   12025 		reg = CSR_READ(sc, WMREG_EECD);
   12026 		reg &= ~EECD_EE_REQ;
   12027 		CSR_WRITE(sc, WMREG_EECD, reg);
   12028 	}
   12029 
   12030 	if (sc->sc_type >= WM_T_ICH8) {
   12031 		wm_put_nvm_ich8lan(sc);
   12032 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   12033 		wm_put_swfwhw_semaphore(sc);
   12034 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   12035 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   12036 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
   12037 		wm_put_swsm_semaphore(sc);
   12038 }
   12039 
   12040 static int
   12041 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   12042 {
   12043 	uint32_t eecd = 0;
   12044 
   12045 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   12046 	    || sc->sc_type == WM_T_82583) {
   12047 		eecd = CSR_READ(sc, WMREG_EECD);
   12048 
   12049 		/* Isolate bits 15 & 16 */
   12050 		eecd = ((eecd >> 15) & 0x03);
   12051 
   12052 		/* If both bits are set, device is Flash type */
   12053 		if (eecd == 0x03)
   12054 			return 0;
   12055 	}
   12056 	return 1;
   12057 }
   12058 
   12059 static int
   12060 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   12061 {
   12062 	uint32_t eec;
   12063 
   12064 	eec = CSR_READ(sc, WMREG_EEC);
   12065 	if ((eec & EEC_FLASH_DETECTED) != 0)
   12066 		return 1;
   12067 
   12068 	return 0;
   12069 }
   12070 
   12071 /*
   12072  * wm_nvm_validate_checksum
   12073  *
   12074  * The checksum is defined as the sum of the first 64 (16 bit) words.
   12075  */
   12076 static int
   12077 wm_nvm_validate_checksum(struct wm_softc *sc)
   12078 {
   12079 	uint16_t checksum;
   12080 	uint16_t eeprom_data;
   12081 #ifdef WM_DEBUG
   12082 	uint16_t csum_wordaddr, valid_checksum;
   12083 #endif
   12084 	int i;
   12085 
   12086 	checksum = 0;
   12087 
   12088 	/* Don't check for I211 */
   12089 	if (sc->sc_type == WM_T_I211)
   12090 		return 0;
   12091 
   12092 #ifdef WM_DEBUG
   12093 	if (sc->sc_type == WM_T_PCH_LPT) {
   12094 		csum_wordaddr = NVM_OFF_COMPAT;
   12095 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   12096 	} else {
   12097 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   12098 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   12099 	}
   12100 
   12101 	/* Dump EEPROM image for debug */
   12102 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12103 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12104 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   12105 		/* XXX PCH_SPT? */
   12106 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   12107 		if ((eeprom_data & valid_checksum) == 0) {
   12108 			DPRINTF(WM_DEBUG_NVM,
   12109 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   12110 				device_xname(sc->sc_dev), eeprom_data,
   12111 				    valid_checksum));
   12112 		}
   12113 	}
   12114 
   12115 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   12116 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   12117 		for (i = 0; i < NVM_SIZE; i++) {
   12118 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12119 				printf("XXXX ");
   12120 			else
   12121 				printf("%04hx ", eeprom_data);
   12122 			if (i % 8 == 7)
   12123 				printf("\n");
   12124 		}
   12125 	}
   12126 
   12127 #endif /* WM_DEBUG */
   12128 
   12129 	for (i = 0; i < NVM_SIZE; i++) {
   12130 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12131 			return 1;
   12132 		checksum += eeprom_data;
   12133 	}
   12134 
   12135 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   12136 #ifdef WM_DEBUG
   12137 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   12138 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   12139 #endif
   12140 	}
   12141 
   12142 	return 0;
   12143 }
   12144 
   12145 static void
   12146 wm_nvm_version_invm(struct wm_softc *sc)
   12147 {
   12148 	uint32_t dword;
   12149 
   12150 	/*
   12151 	 * Linux's code to decode version is very strange, so we don't
   12152 	 * obey that algorithm and just use word 61 as the document.
   12153 	 * Perhaps it's not perfect though...
   12154 	 *
   12155 	 * Example:
   12156 	 *
   12157 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   12158 	 */
   12159 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   12160 	dword = __SHIFTOUT(dword, INVM_VER_1);
   12161 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   12162 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   12163 }
   12164 
   12165 static void
   12166 wm_nvm_version(struct wm_softc *sc)
   12167 {
   12168 	uint16_t major, minor, build, patch;
   12169 	uint16_t uid0, uid1;
   12170 	uint16_t nvm_data;
   12171 	uint16_t off;
   12172 	bool check_version = false;
   12173 	bool check_optionrom = false;
   12174 	bool have_build = false;
   12175 	bool have_uid = true;
   12176 
   12177 	/*
   12178 	 * Version format:
   12179 	 *
   12180 	 * XYYZ
   12181 	 * X0YZ
   12182 	 * X0YY
   12183 	 *
   12184 	 * Example:
   12185 	 *
   12186 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   12187 	 *	82571	0x50a6	5.10.6?
   12188 	 *	82572	0x506a	5.6.10?
   12189 	 *	82572EI	0x5069	5.6.9?
   12190 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   12191 	 *		0x2013	2.1.3?
   12192 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   12193 	 */
   12194 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
   12195 	switch (sc->sc_type) {
   12196 	case WM_T_82571:
   12197 	case WM_T_82572:
   12198 	case WM_T_82574:
   12199 	case WM_T_82583:
   12200 		check_version = true;
   12201 		check_optionrom = true;
   12202 		have_build = true;
   12203 		break;
   12204 	case WM_T_82575:
   12205 	case WM_T_82576:
   12206 	case WM_T_82580:
   12207 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   12208 			check_version = true;
   12209 		break;
   12210 	case WM_T_I211:
   12211 		wm_nvm_version_invm(sc);
   12212 		have_uid = false;
   12213 		goto printver;
   12214 	case WM_T_I210:
   12215 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   12216 			wm_nvm_version_invm(sc);
   12217 			have_uid = false;
   12218 			goto printver;
   12219 		}
   12220 		/* FALLTHROUGH */
   12221 	case WM_T_I350:
   12222 	case WM_T_I354:
   12223 		check_version = true;
   12224 		check_optionrom = true;
   12225 		break;
   12226 	default:
   12227 		return;
   12228 	}
   12229 	if (check_version) {
   12230 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
   12231 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   12232 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   12233 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   12234 			build = nvm_data & NVM_BUILD_MASK;
   12235 			have_build = true;
   12236 		} else
   12237 			minor = nvm_data & 0x00ff;
   12238 
   12239 		/* Decimal */
   12240 		minor = (minor / 16) * 10 + (minor % 16);
   12241 		sc->sc_nvm_ver_major = major;
   12242 		sc->sc_nvm_ver_minor = minor;
   12243 
   12244 printver:
   12245 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   12246 		    sc->sc_nvm_ver_minor);
   12247 		if (have_build) {
   12248 			sc->sc_nvm_ver_build = build;
   12249 			aprint_verbose(".%d", build);
   12250 		}
   12251 	}
   12252 	if (check_optionrom) {
   12253 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
   12254 		/* Option ROM Version */
   12255 		if ((off != 0x0000) && (off != 0xffff)) {
   12256 			off += NVM_COMBO_VER_OFF;
   12257 			wm_nvm_read(sc, off + 1, 1, &uid1);
   12258 			wm_nvm_read(sc, off, 1, &uid0);
   12259 			if ((uid0 != 0) && (uid0 != 0xffff)
   12260 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   12261 				/* 16bits */
   12262 				major = uid0 >> 8;
   12263 				build = (uid0 << 8) | (uid1 >> 8);
   12264 				patch = uid1 & 0x00ff;
   12265 				aprint_verbose(", option ROM Version %d.%d.%d",
   12266 				    major, build, patch);
   12267 			}
   12268 		}
   12269 	}
   12270 
   12271 	if (have_uid) {
   12272 		wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
   12273 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   12274 	}
   12275 }
   12276 
   12277 /*
   12278  * wm_nvm_read:
   12279  *
   12280  *	Read data from the serial EEPROM.
   12281  */
   12282 static int
   12283 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12284 {
   12285 	int rv;
   12286 
   12287 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12288 		device_xname(sc->sc_dev), __func__));
   12289 
   12290 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   12291 		return 1;
   12292 
   12293 	if (wm_nvm_acquire(sc))
   12294 		return 1;
   12295 
   12296 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12297 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12298 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   12299 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
   12300 	else if (sc->sc_type == WM_T_PCH_SPT)
   12301 		rv = wm_nvm_read_spt(sc, word, wordcnt, data);
   12302 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
   12303 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
   12304 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
   12305 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
   12306 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
   12307 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
   12308 	else
   12309 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
   12310 
   12311 	wm_nvm_release(sc);
   12312 	return rv;
   12313 }
   12314 
   12315 /*
   12316  * Hardware semaphores.
   12317  * Very complexed...
   12318  */
   12319 
   12320 static int
   12321 wm_get_null(struct wm_softc *sc)
   12322 {
   12323 
   12324 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12325 		device_xname(sc->sc_dev), __func__));
   12326 	return 0;
   12327 }
   12328 
   12329 static void
   12330 wm_put_null(struct wm_softc *sc)
   12331 {
   12332 
   12333 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12334 		device_xname(sc->sc_dev), __func__));
   12335 	return;
   12336 }
   12337 
   12338 /*
   12339  * Get hardware semaphore.
   12340  * Same as e1000_get_hw_semaphore_generic()
   12341  */
   12342 static int
   12343 wm_get_swsm_semaphore(struct wm_softc *sc)
   12344 {
   12345 	int32_t timeout;
   12346 	uint32_t swsm;
   12347 
   12348 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12349 		device_xname(sc->sc_dev), __func__));
   12350 	KASSERT(sc->sc_nvm_wordsize > 0);
   12351 
   12352 	/* Get the SW semaphore. */
   12353 	timeout = sc->sc_nvm_wordsize + 1;
   12354 	while (timeout) {
   12355 		swsm = CSR_READ(sc, WMREG_SWSM);
   12356 
   12357 		if ((swsm & SWSM_SMBI) == 0)
   12358 			break;
   12359 
   12360 		delay(50);
   12361 		timeout--;
   12362 	}
   12363 
   12364 	if (timeout == 0) {
   12365 		aprint_error_dev(sc->sc_dev,
   12366 		    "could not acquire SWSM SMBI\n");
   12367 		return 1;
   12368 	}
   12369 
   12370 	/* Get the FW semaphore. */
   12371 	timeout = sc->sc_nvm_wordsize + 1;
   12372 	while (timeout) {
   12373 		swsm = CSR_READ(sc, WMREG_SWSM);
   12374 		swsm |= SWSM_SWESMBI;
   12375 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   12376 		/* If we managed to set the bit we got the semaphore. */
   12377 		swsm = CSR_READ(sc, WMREG_SWSM);
   12378 		if (swsm & SWSM_SWESMBI)
   12379 			break;
   12380 
   12381 		delay(50);
   12382 		timeout--;
   12383 	}
   12384 
   12385 	if (timeout == 0) {
   12386 		aprint_error_dev(sc->sc_dev,
   12387 		    "could not acquire SWSM SWESMBI\n");
   12388 		/* Release semaphores */
   12389 		wm_put_swsm_semaphore(sc);
   12390 		return 1;
   12391 	}
   12392 	return 0;
   12393 }
   12394 
   12395 /*
   12396  * Put hardware semaphore.
   12397  * Same as e1000_put_hw_semaphore_generic()
   12398  */
   12399 static void
   12400 wm_put_swsm_semaphore(struct wm_softc *sc)
   12401 {
   12402 	uint32_t swsm;
   12403 
   12404 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12405 		device_xname(sc->sc_dev), __func__));
   12406 
   12407 	swsm = CSR_READ(sc, WMREG_SWSM);
   12408 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   12409 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   12410 }
   12411 
   12412 /*
   12413  * Get SW/FW semaphore.
   12414  * Same as e1000_acquire_swfw_sync_82575().
   12415  */
   12416 static int
   12417 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12418 {
   12419 	uint32_t swfw_sync;
   12420 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   12421 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   12422 	int timeout = 200;
   12423 
   12424 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12425 		device_xname(sc->sc_dev), __func__));
   12426 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   12427 
   12428 	for (timeout = 0; timeout < 200; timeout++) {
   12429 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
   12430 			if (wm_get_swsm_semaphore(sc)) {
   12431 				aprint_error_dev(sc->sc_dev,
   12432 				    "%s: failed to get semaphore\n",
   12433 				    __func__);
   12434 				return 1;
   12435 			}
   12436 		}
   12437 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12438 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   12439 			swfw_sync |= swmask;
   12440 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12441 			if (sc->sc_flags & WM_F_LOCK_SWSM)
   12442 				wm_put_swsm_semaphore(sc);
   12443 			return 0;
   12444 		}
   12445 		if (sc->sc_flags & WM_F_LOCK_SWSM)
   12446 			wm_put_swsm_semaphore(sc);
   12447 		delay(5000);
   12448 	}
   12449 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   12450 	    device_xname(sc->sc_dev), mask, swfw_sync);
   12451 	return 1;
   12452 }
   12453 
   12454 static void
   12455 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12456 {
   12457 	uint32_t swfw_sync;
   12458 
   12459 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12460 		device_xname(sc->sc_dev), __func__));
   12461 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   12462 
   12463 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   12464 		while (wm_get_swsm_semaphore(sc) != 0)
   12465 			continue;
   12466 	}
   12467 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12468 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   12469 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12470 	if (sc->sc_flags & WM_F_LOCK_SWSM)
   12471 		wm_put_swsm_semaphore(sc);
   12472 }
   12473 
   12474 static int
   12475 wm_get_phy_82575(struct wm_softc *sc)
   12476 {
   12477 
   12478 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12479 		device_xname(sc->sc_dev), __func__));
   12480 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   12481 }
   12482 
   12483 static void
   12484 wm_put_phy_82575(struct wm_softc *sc)
   12485 {
   12486 
   12487 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12488 		device_xname(sc->sc_dev), __func__));
   12489 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   12490 }
   12491 
   12492 static int
   12493 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   12494 {
   12495 	uint32_t ext_ctrl;
   12496 	int timeout = 200;
   12497 
   12498 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12499 		device_xname(sc->sc_dev), __func__));
   12500 
   12501 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12502 	for (timeout = 0; timeout < 200; timeout++) {
   12503 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12504 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12505 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12506 
   12507 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12508 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   12509 			return 0;
   12510 		delay(5000);
   12511 	}
   12512 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   12513 	    device_xname(sc->sc_dev), ext_ctrl);
   12514 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12515 	return 1;
   12516 }
   12517 
   12518 static void
   12519 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   12520 {
   12521 	uint32_t ext_ctrl;
   12522 
   12523 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12524 		device_xname(sc->sc_dev), __func__));
   12525 
   12526 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12527 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12528 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12529 
   12530 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12531 }
   12532 
   12533 static int
   12534 wm_get_swflag_ich8lan(struct wm_softc *sc)
   12535 {
   12536 	uint32_t ext_ctrl;
   12537 	int timeout;
   12538 
   12539 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12540 		device_xname(sc->sc_dev), __func__));
   12541 	mutex_enter(sc->sc_ich_phymtx);
   12542 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   12543 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12544 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   12545 			break;
   12546 		delay(1000);
   12547 	}
   12548 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   12549 		printf("%s: SW has already locked the resource\n",
   12550 		    device_xname(sc->sc_dev));
   12551 		goto out;
   12552 	}
   12553 
   12554 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12555 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12556 	for (timeout = 0; timeout < 1000; timeout++) {
   12557 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12558 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   12559 			break;
   12560 		delay(1000);
   12561 	}
   12562 	if (timeout >= 1000) {
   12563 		printf("%s: failed to acquire semaphore\n",
   12564 		    device_xname(sc->sc_dev));
   12565 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12566 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12567 		goto out;
   12568 	}
   12569 	return 0;
   12570 
   12571 out:
   12572 	mutex_exit(sc->sc_ich_phymtx);
   12573 	return 1;
   12574 }
   12575 
   12576 static void
   12577 wm_put_swflag_ich8lan(struct wm_softc *sc)
   12578 {
   12579 	uint32_t ext_ctrl;
   12580 
   12581 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12582 		device_xname(sc->sc_dev), __func__));
   12583 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12584 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   12585 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12586 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12587 	} else {
   12588 		printf("%s: Semaphore unexpectedly released\n",
   12589 		    device_xname(sc->sc_dev));
   12590 	}
   12591 
   12592 	mutex_exit(sc->sc_ich_phymtx);
   12593 }
   12594 
   12595 static int
   12596 wm_get_nvm_ich8lan(struct wm_softc *sc)
   12597 {
   12598 
   12599 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12600 		device_xname(sc->sc_dev), __func__));
   12601 	mutex_enter(sc->sc_ich_nvmmtx);
   12602 
   12603 	return 0;
   12604 }
   12605 
   12606 static void
   12607 wm_put_nvm_ich8lan(struct wm_softc *sc)
   12608 {
   12609 
   12610 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12611 		device_xname(sc->sc_dev), __func__));
   12612 	mutex_exit(sc->sc_ich_nvmmtx);
   12613 }
   12614 
   12615 static int
   12616 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   12617 {
   12618 	int i = 0;
   12619 	uint32_t reg;
   12620 
   12621 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12622 		device_xname(sc->sc_dev), __func__));
   12623 
   12624 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12625 	do {
   12626 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   12627 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   12628 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12629 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   12630 			break;
   12631 		delay(2*1000);
   12632 		i++;
   12633 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   12634 
   12635 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   12636 		wm_put_hw_semaphore_82573(sc);
   12637 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   12638 		    device_xname(sc->sc_dev));
   12639 		return -1;
   12640 	}
   12641 
   12642 	return 0;
   12643 }
   12644 
   12645 static void
   12646 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   12647 {
   12648 	uint32_t reg;
   12649 
   12650 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12651 		device_xname(sc->sc_dev), __func__));
   12652 
   12653 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12654 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12655 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   12656 }
   12657 
   12658 /*
   12659  * Management mode and power management related subroutines.
   12660  * BMC, AMT, suspend/resume and EEE.
   12661  */
   12662 
   12663 #ifdef WM_WOL
   12664 static int
   12665 wm_check_mng_mode(struct wm_softc *sc)
   12666 {
   12667 	int rv;
   12668 
   12669 	switch (sc->sc_type) {
   12670 	case WM_T_ICH8:
   12671 	case WM_T_ICH9:
   12672 	case WM_T_ICH10:
   12673 	case WM_T_PCH:
   12674 	case WM_T_PCH2:
   12675 	case WM_T_PCH_LPT:
   12676 	case WM_T_PCH_SPT:
   12677 		rv = wm_check_mng_mode_ich8lan(sc);
   12678 		break;
   12679 	case WM_T_82574:
   12680 	case WM_T_82583:
   12681 		rv = wm_check_mng_mode_82574(sc);
   12682 		break;
   12683 	case WM_T_82571:
   12684 	case WM_T_82572:
   12685 	case WM_T_82573:
   12686 	case WM_T_80003:
   12687 		rv = wm_check_mng_mode_generic(sc);
   12688 		break;
   12689 	default:
   12690 		/* noting to do */
   12691 		rv = 0;
   12692 		break;
   12693 	}
   12694 
   12695 	return rv;
   12696 }
   12697 
   12698 static int
   12699 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   12700 {
   12701 	uint32_t fwsm;
   12702 
   12703 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12704 
   12705 	if (((fwsm & FWSM_FW_VALID) != 0)
   12706 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   12707 		return 1;
   12708 
   12709 	return 0;
   12710 }
   12711 
   12712 static int
   12713 wm_check_mng_mode_82574(struct wm_softc *sc)
   12714 {
   12715 	uint16_t data;
   12716 
   12717 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   12718 
   12719 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   12720 		return 1;
   12721 
   12722 	return 0;
   12723 }
   12724 
   12725 static int
   12726 wm_check_mng_mode_generic(struct wm_softc *sc)
   12727 {
   12728 	uint32_t fwsm;
   12729 
   12730 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12731 
   12732 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   12733 		return 1;
   12734 
   12735 	return 0;
   12736 }
   12737 #endif /* WM_WOL */
   12738 
   12739 static int
   12740 wm_enable_mng_pass_thru(struct wm_softc *sc)
   12741 {
   12742 	uint32_t manc, fwsm, factps;
   12743 
   12744 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   12745 		return 0;
   12746 
   12747 	manc = CSR_READ(sc, WMREG_MANC);
   12748 
   12749 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   12750 		device_xname(sc->sc_dev), manc));
   12751 	if ((manc & MANC_RECV_TCO_EN) == 0)
   12752 		return 0;
   12753 
   12754 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   12755 		fwsm = CSR_READ(sc, WMREG_FWSM);
   12756 		factps = CSR_READ(sc, WMREG_FACTPS);
   12757 		if (((factps & FACTPS_MNGCG) == 0)
   12758 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   12759 			return 1;
   12760 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   12761 		uint16_t data;
   12762 
   12763 		factps = CSR_READ(sc, WMREG_FACTPS);
   12764 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   12765 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   12766 			device_xname(sc->sc_dev), factps, data));
   12767 		if (((factps & FACTPS_MNGCG) == 0)
   12768 		    && ((data & NVM_CFG2_MNGM_MASK)
   12769 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   12770 			return 1;
   12771 	} else if (((manc & MANC_SMBUS_EN) != 0)
   12772 	    && ((manc & MANC_ASF_EN) == 0))
   12773 		return 1;
   12774 
   12775 	return 0;
   12776 }
   12777 
   12778 static bool
   12779 wm_phy_resetisblocked(struct wm_softc *sc)
   12780 {
   12781 	bool blocked = false;
   12782 	uint32_t reg;
   12783 	int i = 0;
   12784 
   12785 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12786 		device_xname(sc->sc_dev), __func__));
   12787 
   12788 	switch (sc->sc_type) {
   12789 	case WM_T_ICH8:
   12790 	case WM_T_ICH9:
   12791 	case WM_T_ICH10:
   12792 	case WM_T_PCH:
   12793 	case WM_T_PCH2:
   12794 	case WM_T_PCH_LPT:
   12795 	case WM_T_PCH_SPT:
   12796 		do {
   12797 			reg = CSR_READ(sc, WMREG_FWSM);
   12798 			if ((reg & FWSM_RSPCIPHY) == 0) {
   12799 				blocked = true;
   12800 				delay(10*1000);
   12801 				continue;
   12802 			}
   12803 			blocked = false;
   12804 		} while (blocked && (i++ < 30));
   12805 		return blocked;
   12806 		break;
   12807 	case WM_T_82571:
   12808 	case WM_T_82572:
   12809 	case WM_T_82573:
   12810 	case WM_T_82574:
   12811 	case WM_T_82583:
   12812 	case WM_T_80003:
   12813 		reg = CSR_READ(sc, WMREG_MANC);
   12814 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   12815 			return true;
   12816 		else
   12817 			return false;
   12818 		break;
   12819 	default:
   12820 		/* no problem */
   12821 		break;
   12822 	}
   12823 
   12824 	return false;
   12825 }
   12826 
   12827 static void
   12828 wm_get_hw_control(struct wm_softc *sc)
   12829 {
   12830 	uint32_t reg;
   12831 
   12832 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12833 		device_xname(sc->sc_dev), __func__));
   12834 
   12835 	if (sc->sc_type == WM_T_82573) {
   12836 		reg = CSR_READ(sc, WMREG_SWSM);
   12837 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   12838 	} else if (sc->sc_type >= WM_T_82571) {
   12839 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12840 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   12841 	}
   12842 }
   12843 
   12844 static void
   12845 wm_release_hw_control(struct wm_softc *sc)
   12846 {
   12847 	uint32_t reg;
   12848 
   12849 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12850 		device_xname(sc->sc_dev), __func__));
   12851 
   12852 	if (sc->sc_type == WM_T_82573) {
   12853 		reg = CSR_READ(sc, WMREG_SWSM);
   12854 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   12855 	} else if (sc->sc_type >= WM_T_82571) {
   12856 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12857 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   12858 	}
   12859 }
   12860 
   12861 static void
   12862 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   12863 {
   12864 	uint32_t reg;
   12865 
   12866 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12867 		device_xname(sc->sc_dev), __func__));
   12868 
   12869 	if (sc->sc_type < WM_T_PCH2)
   12870 		return;
   12871 
   12872 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12873 
   12874 	if (gate)
   12875 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   12876 	else
   12877 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   12878 
   12879 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   12880 }
   12881 
   12882 static void
   12883 wm_smbustopci(struct wm_softc *sc)
   12884 {
   12885 	uint32_t fwsm, reg;
   12886 	int rv = 0;
   12887 
   12888 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12889 		device_xname(sc->sc_dev), __func__));
   12890 
   12891 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   12892 	wm_gate_hw_phy_config_ich8lan(sc, true);
   12893 
   12894 	/* Disable ULP */
   12895 	wm_ulp_disable(sc);
   12896 
   12897 	/* Acquire PHY semaphore */
   12898 	sc->phy.acquire(sc);
   12899 
   12900 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12901 	switch (sc->sc_type) {
   12902 	case WM_T_PCH_LPT:
   12903 	case WM_T_PCH_SPT:
   12904 		if (wm_phy_is_accessible_pchlan(sc))
   12905 			break;
   12906 
   12907 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12908 		reg |= CTRL_EXT_FORCE_SMBUS;
   12909 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12910 #if 0
   12911 		/* XXX Isn't this required??? */
   12912 		CSR_WRITE_FLUSH(sc);
   12913 #endif
   12914 		delay(50 * 1000);
   12915 		/* FALLTHROUGH */
   12916 	case WM_T_PCH2:
   12917 		if (wm_phy_is_accessible_pchlan(sc) == true)
   12918 			break;
   12919 		/* FALLTHROUGH */
   12920 	case WM_T_PCH:
   12921 		if (sc->sc_type == WM_T_PCH)
   12922 			if ((fwsm & FWSM_FW_VALID) != 0)
   12923 				break;
   12924 
   12925 		if (wm_phy_resetisblocked(sc) == true) {
   12926 			printf("XXX reset is blocked(3)\n");
   12927 			break;
   12928 		}
   12929 
   12930 		wm_toggle_lanphypc_pch_lpt(sc);
   12931 
   12932 		if (sc->sc_type >= WM_T_PCH_LPT) {
   12933 			if (wm_phy_is_accessible_pchlan(sc) == true)
   12934 				break;
   12935 
   12936 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12937 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   12938 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12939 
   12940 			if (wm_phy_is_accessible_pchlan(sc) == true)
   12941 				break;
   12942 			rv = -1;
   12943 		}
   12944 		break;
   12945 	default:
   12946 		break;
   12947 	}
   12948 
   12949 	/* Release semaphore */
   12950 	sc->phy.release(sc);
   12951 
   12952 	if (rv == 0) {
   12953 		if (wm_phy_resetisblocked(sc)) {
   12954 			printf("XXX reset is blocked(4)\n");
   12955 			goto out;
   12956 		}
   12957 		wm_reset_phy(sc);
   12958 		if (wm_phy_resetisblocked(sc))
   12959 			printf("XXX reset is blocked(4)\n");
   12960 	}
   12961 
   12962 out:
   12963 	/*
   12964 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   12965 	 */
   12966 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   12967 		delay(10*1000);
   12968 		wm_gate_hw_phy_config_ich8lan(sc, false);
   12969 	}
   12970 }
   12971 
   12972 static void
   12973 wm_init_manageability(struct wm_softc *sc)
   12974 {
   12975 
   12976 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12977 		device_xname(sc->sc_dev), __func__));
   12978 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   12979 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   12980 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   12981 
   12982 		/* Disable hardware interception of ARP */
   12983 		manc &= ~MANC_ARP_EN;
   12984 
   12985 		/* Enable receiving management packets to the host */
   12986 		if (sc->sc_type >= WM_T_82571) {
   12987 			manc |= MANC_EN_MNG2HOST;
   12988 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   12989 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   12990 		}
   12991 
   12992 		CSR_WRITE(sc, WMREG_MANC, manc);
   12993 	}
   12994 }
   12995 
   12996 static void
   12997 wm_release_manageability(struct wm_softc *sc)
   12998 {
   12999 
   13000 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13001 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13002 
   13003 		manc |= MANC_ARP_EN;
   13004 		if (sc->sc_type >= WM_T_82571)
   13005 			manc &= ~MANC_EN_MNG2HOST;
   13006 
   13007 		CSR_WRITE(sc, WMREG_MANC, manc);
   13008 	}
   13009 }
   13010 
   13011 static void
   13012 wm_get_wakeup(struct wm_softc *sc)
   13013 {
   13014 
   13015 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   13016 	switch (sc->sc_type) {
   13017 	case WM_T_82573:
   13018 	case WM_T_82583:
   13019 		sc->sc_flags |= WM_F_HAS_AMT;
   13020 		/* FALLTHROUGH */
   13021 	case WM_T_80003:
   13022 	case WM_T_82575:
   13023 	case WM_T_82576:
   13024 	case WM_T_82580:
   13025 	case WM_T_I350:
   13026 	case WM_T_I354:
   13027 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   13028 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   13029 		/* FALLTHROUGH */
   13030 	case WM_T_82541:
   13031 	case WM_T_82541_2:
   13032 	case WM_T_82547:
   13033 	case WM_T_82547_2:
   13034 	case WM_T_82571:
   13035 	case WM_T_82572:
   13036 	case WM_T_82574:
   13037 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13038 		break;
   13039 	case WM_T_ICH8:
   13040 	case WM_T_ICH9:
   13041 	case WM_T_ICH10:
   13042 	case WM_T_PCH:
   13043 	case WM_T_PCH2:
   13044 	case WM_T_PCH_LPT:
   13045 	case WM_T_PCH_SPT:
   13046 		sc->sc_flags |= WM_F_HAS_AMT;
   13047 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13048 		break;
   13049 	default:
   13050 		break;
   13051 	}
   13052 
   13053 	/* 1: HAS_MANAGE */
   13054 	if (wm_enable_mng_pass_thru(sc) != 0)
   13055 		sc->sc_flags |= WM_F_HAS_MANAGE;
   13056 
   13057 #ifdef WM_DEBUG
   13058 	printf("\n");
   13059 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   13060 		printf("HAS_AMT,");
   13061 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
   13062 		printf("ARC_SUBSYS_VALID,");
   13063 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
   13064 		printf("ASF_FIRMWARE_PRES,");
   13065 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
   13066 		printf("HAS_MANAGE,");
   13067 	printf("\n");
   13068 #endif
   13069 	/*
   13070 	 * Note that the WOL flags is set after the resetting of the eeprom
   13071 	 * stuff
   13072 	 */
   13073 }
   13074 
   13075 /*
   13076  * Unconfigure Ultra Low Power mode.
   13077  * Only for I217 and newer (see below).
   13078  */
   13079 static void
   13080 wm_ulp_disable(struct wm_softc *sc)
   13081 {
   13082 	uint32_t reg;
   13083 	int i = 0;
   13084 
   13085 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13086 		device_xname(sc->sc_dev), __func__));
   13087 	/* Exclude old devices */
   13088 	if ((sc->sc_type < WM_T_PCH_LPT)
   13089 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   13090 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   13091 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   13092 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   13093 		return;
   13094 
   13095 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   13096 		/* Request ME un-configure ULP mode in the PHY */
   13097 		reg = CSR_READ(sc, WMREG_H2ME);
   13098 		reg &= ~H2ME_ULP;
   13099 		reg |= H2ME_ENFORCE_SETTINGS;
   13100 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13101 
   13102 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   13103 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   13104 			if (i++ == 30) {
   13105 				printf("%s timed out\n", __func__);
   13106 				return;
   13107 			}
   13108 			delay(10 * 1000);
   13109 		}
   13110 		reg = CSR_READ(sc, WMREG_H2ME);
   13111 		reg &= ~H2ME_ENFORCE_SETTINGS;
   13112 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13113 
   13114 		return;
   13115 	}
   13116 
   13117 	/* Acquire semaphore */
   13118 	sc->phy.acquire(sc);
   13119 
   13120 	/* Toggle LANPHYPC */
   13121 	wm_toggle_lanphypc_pch_lpt(sc);
   13122 
   13123 	/* Unforce SMBus mode in PHY */
   13124 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13125 	if (reg == 0x0000 || reg == 0xffff) {
   13126 		uint32_t reg2;
   13127 
   13128 		printf("%s: Force SMBus first.\n", __func__);
   13129 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   13130 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   13131 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   13132 		delay(50 * 1000);
   13133 
   13134 		reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13135 	}
   13136 	reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13137 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg);
   13138 
   13139 	/* Unforce SMBus mode in MAC */
   13140 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13141 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   13142 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13143 
   13144 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL);
   13145 	reg |= HV_PM_CTRL_K1_ENA;
   13146 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg);
   13147 
   13148 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1);
   13149 	reg &= ~(I218_ULP_CONFIG1_IND
   13150 	    | I218_ULP_CONFIG1_STICKY_ULP
   13151 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   13152 	    | I218_ULP_CONFIG1_WOL_HOST
   13153 	    | I218_ULP_CONFIG1_INBAND_EXIT
   13154 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   13155 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   13156 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   13157 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13158 	reg |= I218_ULP_CONFIG1_START;
   13159 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13160 
   13161 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   13162 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   13163 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   13164 
   13165 	/* Release semaphore */
   13166 	sc->phy.release(sc);
   13167 	wm_gmii_reset(sc);
   13168 	delay(50 * 1000);
   13169 }
   13170 
   13171 /* WOL in the newer chipset interfaces (pchlan) */
   13172 static void
   13173 wm_enable_phy_wakeup(struct wm_softc *sc)
   13174 {
   13175 #if 0
   13176 	uint16_t preg;
   13177 
   13178 	/* Copy MAC RARs to PHY RARs */
   13179 
   13180 	/* Copy MAC MTA to PHY MTA */
   13181 
   13182 	/* Configure PHY Rx Control register */
   13183 
   13184 	/* Enable PHY wakeup in MAC register */
   13185 
   13186 	/* Configure and enable PHY wakeup in PHY registers */
   13187 
   13188 	/* Activate PHY wakeup */
   13189 
   13190 	/* XXX */
   13191 #endif
   13192 }
   13193 
   13194 /* Power down workaround on D3 */
   13195 static void
   13196 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   13197 {
   13198 	uint32_t reg;
   13199 	int i;
   13200 
   13201 	for (i = 0; i < 2; i++) {
   13202 		/* Disable link */
   13203 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13204 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13205 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13206 
   13207 		/*
   13208 		 * Call gig speed drop workaround on Gig disable before
   13209 		 * accessing any PHY registers
   13210 		 */
   13211 		if (sc->sc_type == WM_T_ICH8)
   13212 			wm_gig_downshift_workaround_ich8lan(sc);
   13213 
   13214 		/* Write VR power-down enable */
   13215 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13216 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13217 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   13218 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   13219 
   13220 		/* Read it back and test */
   13221 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13222 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13223 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   13224 			break;
   13225 
   13226 		/* Issue PHY reset and repeat at most one more time */
   13227 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   13228 	}
   13229 }
   13230 
   13231 static void
   13232 wm_enable_wakeup(struct wm_softc *sc)
   13233 {
   13234 	uint32_t reg, pmreg;
   13235 	pcireg_t pmode;
   13236 
   13237 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13238 		device_xname(sc->sc_dev), __func__));
   13239 
   13240 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   13241 		&pmreg, NULL) == 0)
   13242 		return;
   13243 
   13244 	/* Advertise the wakeup capability */
   13245 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   13246 	    | CTRL_SWDPIN(3));
   13247 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   13248 
   13249 	/* ICH workaround */
   13250 	switch (sc->sc_type) {
   13251 	case WM_T_ICH8:
   13252 	case WM_T_ICH9:
   13253 	case WM_T_ICH10:
   13254 	case WM_T_PCH:
   13255 	case WM_T_PCH2:
   13256 	case WM_T_PCH_LPT:
   13257 	case WM_T_PCH_SPT:
   13258 		/* Disable gig during WOL */
   13259 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13260 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   13261 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13262 		if (sc->sc_type == WM_T_PCH)
   13263 			wm_gmii_reset(sc);
   13264 
   13265 		/* Power down workaround */
   13266 		if (sc->sc_phytype == WMPHY_82577) {
   13267 			struct mii_softc *child;
   13268 
   13269 			/* Assume that the PHY is copper */
   13270 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13271 			if ((child != NULL) && (child->mii_mpd_rev <= 2))
   13272 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   13273 				    (768 << 5) | 25, 0x0444); /* magic num */
   13274 		}
   13275 		break;
   13276 	default:
   13277 		break;
   13278 	}
   13279 
   13280 	/* Keep the laser running on fiber adapters */
   13281 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   13282 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   13283 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13284 		reg |= CTRL_EXT_SWDPIN(3);
   13285 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13286 	}
   13287 
   13288 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   13289 #if 0	/* for the multicast packet */
   13290 	reg |= WUFC_MC;
   13291 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   13292 #endif
   13293 
   13294 	if (sc->sc_type >= WM_T_PCH)
   13295 		wm_enable_phy_wakeup(sc);
   13296 	else {
   13297 		CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
   13298 		CSR_WRITE(sc, WMREG_WUFC, reg);
   13299 	}
   13300 
   13301 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13302 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13303 		|| (sc->sc_type == WM_T_PCH2))
   13304 		    && (sc->sc_phytype == WMPHY_IGP_3))
   13305 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   13306 
   13307 	/* Request PME */
   13308 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   13309 #if 0
   13310 	/* Disable WOL */
   13311 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   13312 #else
   13313 	/* For WOL */
   13314 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   13315 #endif
   13316 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   13317 }
   13318 
   13319 /* LPLU */
   13320 
   13321 static void
   13322 wm_lplu_d0_disable(struct wm_softc *sc)
   13323 {
   13324 	uint32_t reg;
   13325 
   13326 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13327 		device_xname(sc->sc_dev), __func__));
   13328 
   13329 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13330 	reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   13331 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13332 }
   13333 
   13334 static void
   13335 wm_lplu_d0_disable_pch(struct wm_softc *sc)
   13336 {
   13337 	uint32_t reg;
   13338 
   13339 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13340 		device_xname(sc->sc_dev), __func__));
   13341 
   13342 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   13343 	reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   13344 	reg |= HV_OEM_BITS_ANEGNOW;
   13345 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   13346 }
   13347 
   13348 /* EEE */
   13349 
   13350 static void
   13351 wm_set_eee_i350(struct wm_softc *sc)
   13352 {
   13353 	uint32_t ipcnfg, eeer;
   13354 
   13355 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   13356 	eeer = CSR_READ(sc, WMREG_EEER);
   13357 
   13358 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   13359 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   13360 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   13361 		    | EEER_LPI_FC);
   13362 	} else {
   13363 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   13364 		ipcnfg &= ~IPCNFG_10BASE_TE;
   13365 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   13366 		    | EEER_LPI_FC);
   13367 	}
   13368 
   13369 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   13370 	CSR_WRITE(sc, WMREG_EEER, eeer);
   13371 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   13372 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   13373 }
   13374 
   13375 /*
   13376  * Workarounds (mainly PHY related).
   13377  * Basically, PHY's workarounds are in the PHY drivers.
   13378  */
   13379 
   13380 /* Work-around for 82566 Kumeran PCS lock loss */
   13381 static void
   13382 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   13383 {
   13384 #if 0
   13385 	int miistatus, active, i;
   13386 	int reg;
   13387 
   13388 	miistatus = sc->sc_mii.mii_media_status;
   13389 
   13390 	/* If the link is not up, do nothing */
   13391 	if ((miistatus & IFM_ACTIVE) == 0)
   13392 		return;
   13393 
   13394 	active = sc->sc_mii.mii_media_active;
   13395 
   13396 	/* Nothing to do if the link is other than 1Gbps */
   13397 	if (IFM_SUBTYPE(active) != IFM_1000_T)
   13398 		return;
   13399 
   13400 	for (i = 0; i < 10; i++) {
   13401 		/* read twice */
   13402 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   13403 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   13404 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   13405 			goto out;	/* GOOD! */
   13406 
   13407 		/* Reset the PHY */
   13408 		wm_gmii_reset(sc);
   13409 		delay(5*1000);
   13410 	}
   13411 
   13412 	/* Disable GigE link negotiation */
   13413 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13414 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13415 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13416 
   13417 	/*
   13418 	 * Call gig speed drop workaround on Gig disable before accessing
   13419 	 * any PHY registers.
   13420 	 */
   13421 	wm_gig_downshift_workaround_ich8lan(sc);
   13422 
   13423 out:
   13424 	return;
   13425 #endif
   13426 }
   13427 
   13428 /* WOL from S5 stops working */
   13429 static void
   13430 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   13431 {
   13432 	uint16_t kmrn_reg;
   13433 
   13434 	/* Only for igp3 */
   13435 	if (sc->sc_phytype == WMPHY_IGP_3) {
   13436 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
   13437 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
   13438 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   13439 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
   13440 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   13441 	}
   13442 }
   13443 
   13444 /*
   13445  * Workaround for pch's PHYs
   13446  * XXX should be moved to new PHY driver?
   13447  */
   13448 static void
   13449 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   13450 {
   13451 
   13452 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13453 		device_xname(sc->sc_dev), __func__));
   13454 	KASSERT(sc->sc_type == WM_T_PCH);
   13455 
   13456 	if (sc->sc_phytype == WMPHY_82577)
   13457 		wm_set_mdio_slow_mode_hv(sc);
   13458 
   13459 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   13460 
   13461 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   13462 
   13463 	/* 82578 */
   13464 	if (sc->sc_phytype == WMPHY_82578) {
   13465 		struct mii_softc *child;
   13466 
   13467 		/*
   13468 		 * Return registers to default by doing a soft reset then
   13469 		 * writing 0x3140 to the control register
   13470 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   13471 		 */
   13472 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13473 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   13474 			PHY_RESET(child);
   13475 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   13476 			    0x3140);
   13477 		}
   13478 	}
   13479 
   13480 	/* Select page 0 */
   13481 	sc->phy.acquire(sc);
   13482 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   13483 	sc->phy.release(sc);
   13484 
   13485 	/*
   13486 	 * Configure the K1 Si workaround during phy reset assuming there is
   13487 	 * link so that it disables K1 if link is in 1Gbps.
   13488 	 */
   13489 	wm_k1_gig_workaround_hv(sc, 1);
   13490 }
   13491 
   13492 static void
   13493 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   13494 {
   13495 
   13496 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13497 		device_xname(sc->sc_dev), __func__));
   13498 	KASSERT(sc->sc_type == WM_T_PCH2);
   13499 
   13500 	wm_set_mdio_slow_mode_hv(sc);
   13501 }
   13502 
   13503 static int
   13504 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   13505 {
   13506 	int k1_enable = sc->sc_nvm_k1_enabled;
   13507 
   13508 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13509 		device_xname(sc->sc_dev), __func__));
   13510 
   13511 	if (sc->phy.acquire(sc) != 0)
   13512 		return -1;
   13513 
   13514 	if (link) {
   13515 		k1_enable = 0;
   13516 
   13517 		/* Link stall fix for link up */
   13518 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   13519 	} else {
   13520 		/* Link stall fix for link down */
   13521 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   13522 	}
   13523 
   13524 	wm_configure_k1_ich8lan(sc, k1_enable);
   13525 	sc->phy.release(sc);
   13526 
   13527 	return 0;
   13528 }
   13529 
   13530 static void
   13531 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   13532 {
   13533 	uint32_t reg;
   13534 
   13535 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   13536 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   13537 	    reg | HV_KMRN_MDIO_SLOW);
   13538 }
   13539 
   13540 static void
   13541 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   13542 {
   13543 	uint32_t ctrl, ctrl_ext, tmp;
   13544 	uint16_t kmrn_reg;
   13545 
   13546 	kmrn_reg = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
   13547 
   13548 	if (k1_enable)
   13549 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
   13550 	else
   13551 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
   13552 
   13553 	wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
   13554 
   13555 	delay(20);
   13556 
   13557 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13558 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   13559 
   13560 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   13561 	tmp |= CTRL_FRCSPD;
   13562 
   13563 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   13564 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   13565 	CSR_WRITE_FLUSH(sc);
   13566 	delay(20);
   13567 
   13568 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   13569 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13570 	CSR_WRITE_FLUSH(sc);
   13571 	delay(20);
   13572 }
   13573 
   13574 /* special case - for 82575 - need to do manual init ... */
   13575 static void
   13576 wm_reset_init_script_82575(struct wm_softc *sc)
   13577 {
   13578 	/*
   13579 	 * remark: this is untested code - we have no board without EEPROM
   13580 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   13581 	 */
   13582 
   13583 	/* SerDes configuration via SERDESCTRL */
   13584 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   13585 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   13586 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   13587 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   13588 
   13589 	/* CCM configuration via CCMCTL register */
   13590 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   13591 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   13592 
   13593 	/* PCIe lanes configuration */
   13594 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   13595 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   13596 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   13597 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   13598 
   13599 	/* PCIe PLL Configuration */
   13600 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   13601 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   13602 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   13603 }
   13604 
   13605 static void
   13606 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   13607 {
   13608 	uint32_t reg;
   13609 	uint16_t nvmword;
   13610 	int rv;
   13611 
   13612 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   13613 		return;
   13614 
   13615 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   13616 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   13617 	if (rv != 0) {
   13618 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   13619 		    __func__);
   13620 		return;
   13621 	}
   13622 
   13623 	reg = CSR_READ(sc, WMREG_MDICNFG);
   13624 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   13625 		reg |= MDICNFG_DEST;
   13626 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   13627 		reg |= MDICNFG_COM_MDIO;
   13628 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   13629 }
   13630 
   13631 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   13632 
   13633 static bool
   13634 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   13635 {
   13636 	int i;
   13637 	uint32_t reg;
   13638 	uint16_t id1, id2;
   13639 
   13640 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13641 		device_xname(sc->sc_dev), __func__));
   13642 	id1 = id2 = 0xffff;
   13643 	for (i = 0; i < 2; i++) {
   13644 		id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1);
   13645 		if (MII_INVALIDID(id1))
   13646 			continue;
   13647 		id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2);
   13648 		if (MII_INVALIDID(id2))
   13649 			continue;
   13650 		break;
   13651 	}
   13652 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) {
   13653 		goto out;
   13654 	}
   13655 
   13656 	if (sc->sc_type < WM_T_PCH_LPT) {
   13657 		sc->phy.release(sc);
   13658 		wm_set_mdio_slow_mode_hv(sc);
   13659 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
   13660 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
   13661 		sc->phy.acquire(sc);
   13662 	}
   13663 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   13664 		printf("XXX return with false\n");
   13665 		return false;
   13666 	}
   13667 out:
   13668 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   13669 		/* Only unforce SMBus if ME is not active */
   13670 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   13671 			/* Unforce SMBus mode in PHY */
   13672 			reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   13673 			    CV_SMB_CTRL);
   13674 			reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13675 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   13676 			    CV_SMB_CTRL, reg);
   13677 
   13678 			/* Unforce SMBus mode in MAC */
   13679 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13680 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   13681 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13682 		}
   13683 	}
   13684 	return true;
   13685 }
   13686 
   13687 static void
   13688 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   13689 {
   13690 	uint32_t reg;
   13691 	int i;
   13692 
   13693 	/* Set PHY Config Counter to 50msec */
   13694 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   13695 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   13696 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   13697 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   13698 
   13699 	/* Toggle LANPHYPC */
   13700 	reg = CSR_READ(sc, WMREG_CTRL);
   13701 	reg |= CTRL_LANPHYPC_OVERRIDE;
   13702 	reg &= ~CTRL_LANPHYPC_VALUE;
   13703 	CSR_WRITE(sc, WMREG_CTRL, reg);
   13704 	CSR_WRITE_FLUSH(sc);
   13705 	delay(1000);
   13706 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   13707 	CSR_WRITE(sc, WMREG_CTRL, reg);
   13708 	CSR_WRITE_FLUSH(sc);
   13709 
   13710 	if (sc->sc_type < WM_T_PCH_LPT)
   13711 		delay(50 * 1000);
   13712 	else {
   13713 		i = 20;
   13714 
   13715 		do {
   13716 			delay(5 * 1000);
   13717 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   13718 		    && i--);
   13719 
   13720 		delay(30 * 1000);
   13721 	}
   13722 }
   13723 
   13724 static int
   13725 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   13726 {
   13727 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   13728 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   13729 	uint32_t rxa;
   13730 	uint16_t scale = 0, lat_enc = 0;
   13731 	int64_t lat_ns, value;
   13732 
   13733 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13734 		device_xname(sc->sc_dev), __func__));
   13735 
   13736 	if (link) {
   13737 		pcireg_t preg;
   13738 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   13739 
   13740 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   13741 
   13742 		/*
   13743 		 * Determine the maximum latency tolerated by the device.
   13744 		 *
   13745 		 * Per the PCIe spec, the tolerated latencies are encoded as
   13746 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   13747 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   13748 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   13749 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   13750 		 */
   13751 		lat_ns = ((int64_t)rxa * 1024 -
   13752 		    (2 * (int64_t)sc->sc_ethercom.ec_if.if_mtu)) * 8 * 1000;
   13753 		if (lat_ns < 0)
   13754 			lat_ns = 0;
   13755 		else {
   13756 			uint32_t status;
   13757 			uint16_t speed;
   13758 
   13759 			status = CSR_READ(sc, WMREG_STATUS);
   13760 			switch (__SHIFTOUT(status, STATUS_SPEED)) {
   13761 			case STATUS_SPEED_10:
   13762 				speed = 10;
   13763 				break;
   13764 			case STATUS_SPEED_100:
   13765 				speed = 100;
   13766 				break;
   13767 			case STATUS_SPEED_1000:
   13768 				speed = 1000;
   13769 				break;
   13770 			default:
   13771 				printf("%s: Unknown speed (status = %08x)\n",
   13772 				    device_xname(sc->sc_dev), status);
   13773 				return -1;
   13774 			}
   13775 			lat_ns /= speed;
   13776 		}
   13777 		value = lat_ns;
   13778 
   13779 		while (value > LTRV_VALUE) {
   13780 			scale ++;
   13781 			value = howmany(value, __BIT(5));
   13782 		}
   13783 		if (scale > LTRV_SCALE_MAX) {
   13784 			printf("%s: Invalid LTR latency scale %d\n",
   13785 			    device_xname(sc->sc_dev), scale);
   13786 			return -1;
   13787 		}
   13788 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   13789 
   13790 		/* Determine the maximum latency tolerated by the platform */
   13791 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   13792 		    WM_PCI_LTR_CAP_LPT);
   13793 		max_snoop = preg & 0xffff;
   13794 		max_nosnoop = preg >> 16;
   13795 
   13796 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   13797 
   13798 		if (lat_enc > max_ltr_enc) {
   13799 			lat_enc = max_ltr_enc;
   13800 		}
   13801 	}
   13802 	/* Snoop and No-Snoop latencies the same */
   13803 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   13804 	CSR_WRITE(sc, WMREG_LTRV, reg);
   13805 
   13806 	return 0;
   13807 }
   13808 
   13809 /*
   13810  * I210 Errata 25 and I211 Errata 10
   13811  * Slow System Clock.
   13812  */
   13813 static void
   13814 wm_pll_workaround_i210(struct wm_softc *sc)
   13815 {
   13816 	uint32_t mdicnfg, wuc;
   13817 	uint32_t reg;
   13818 	pcireg_t pcireg;
   13819 	uint32_t pmreg;
   13820 	uint16_t nvmword, tmp_nvmword;
   13821 	int phyval;
   13822 	bool wa_done = false;
   13823 	int i;
   13824 
   13825 	/* Save WUC and MDICNFG registers */
   13826 	wuc = CSR_READ(sc, WMREG_WUC);
   13827 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   13828 
   13829 	reg = mdicnfg & ~MDICNFG_DEST;
   13830 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   13831 
   13832 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   13833 		nvmword = INVM_DEFAULT_AL;
   13834 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   13835 
   13836 	/* Get Power Management cap offset */
   13837 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   13838 		&pmreg, NULL) == 0)
   13839 		return;
   13840 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   13841 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   13842 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   13843 
   13844 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   13845 			break; /* OK */
   13846 		}
   13847 
   13848 		wa_done = true;
   13849 		/* Directly reset the internal PHY */
   13850 		reg = CSR_READ(sc, WMREG_CTRL);
   13851 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   13852 
   13853 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13854 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   13855 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13856 
   13857 		CSR_WRITE(sc, WMREG_WUC, 0);
   13858 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   13859 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   13860 
   13861 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   13862 		    pmreg + PCI_PMCSR);
   13863 		pcireg |= PCI_PMCSR_STATE_D3;
   13864 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   13865 		    pmreg + PCI_PMCSR, pcireg);
   13866 		delay(1000);
   13867 		pcireg &= ~PCI_PMCSR_STATE_D3;
   13868 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   13869 		    pmreg + PCI_PMCSR, pcireg);
   13870 
   13871 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   13872 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   13873 
   13874 		/* Restore WUC register */
   13875 		CSR_WRITE(sc, WMREG_WUC, wuc);
   13876 	}
   13877 
   13878 	/* Restore MDICNFG setting */
   13879 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   13880 	if (wa_done)
   13881 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   13882 }
   13883