Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.513
      1 /*	$NetBSD: if_wm.c,v 1.513 2017/06/26 04:03:34 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- Disable D0 LPLU on 8257[12356], 82580 and I350.
     77  *	- TX Multi queue improvement (refine queue selection logic)
     78  *	- Split header buffer for newer descriptors
     79  *	- EEE (Energy Efficiency Ethernet)
     80  *	- Virtual Function
     81  *	- Set LED correctly (based on contents in EEPROM)
     82  *	- Rework how parameters are loaded from the EEPROM.
     83  *	- Image Unique ID
     84  */
     85 
     86 #include <sys/cdefs.h>
     87 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.513 2017/06/26 04:03:34 msaitoh Exp $");
     88 
     89 #ifdef _KERNEL_OPT
     90 #include "opt_net_mpsafe.h"
     91 #include "opt_if_wm.h"
     92 #endif
     93 
     94 #include <sys/param.h>
     95 #include <sys/systm.h>
     96 #include <sys/callout.h>
     97 #include <sys/mbuf.h>
     98 #include <sys/malloc.h>
     99 #include <sys/kmem.h>
    100 #include <sys/kernel.h>
    101 #include <sys/socket.h>
    102 #include <sys/ioctl.h>
    103 #include <sys/errno.h>
    104 #include <sys/device.h>
    105 #include <sys/queue.h>
    106 #include <sys/syslog.h>
    107 #include <sys/interrupt.h>
    108 #include <sys/cpu.h>
    109 #include <sys/pcq.h>
    110 
    111 #include <sys/rndsource.h>
    112 
    113 #include <net/if.h>
    114 #include <net/if_dl.h>
    115 #include <net/if_media.h>
    116 #include <net/if_ether.h>
    117 
    118 #include <net/bpf.h>
    119 
    120 #include <netinet/in.h>			/* XXX for struct ip */
    121 #include <netinet/in_systm.h>		/* XXX for struct ip */
    122 #include <netinet/ip.h>			/* XXX for struct ip */
    123 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    124 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    125 
    126 #include <sys/bus.h>
    127 #include <sys/intr.h>
    128 #include <machine/endian.h>
    129 
    130 #include <dev/mii/mii.h>
    131 #include <dev/mii/miivar.h>
    132 #include <dev/mii/miidevs.h>
    133 #include <dev/mii/mii_bitbang.h>
    134 #include <dev/mii/ikphyreg.h>
    135 #include <dev/mii/igphyreg.h>
    136 #include <dev/mii/igphyvar.h>
    137 #include <dev/mii/inbmphyreg.h>
    138 
    139 #include <dev/pci/pcireg.h>
    140 #include <dev/pci/pcivar.h>
    141 #include <dev/pci/pcidevs.h>
    142 
    143 #include <dev/pci/if_wmreg.h>
    144 #include <dev/pci/if_wmvar.h>
    145 
    146 #ifdef WM_DEBUG
    147 #define	WM_DEBUG_LINK		__BIT(0)
    148 #define	WM_DEBUG_TX		__BIT(1)
    149 #define	WM_DEBUG_RX		__BIT(2)
    150 #define	WM_DEBUG_GMII		__BIT(3)
    151 #define	WM_DEBUG_MANAGE		__BIT(4)
    152 #define	WM_DEBUG_NVM		__BIT(5)
    153 #define	WM_DEBUG_INIT		__BIT(6)
    154 #define	WM_DEBUG_LOCK		__BIT(7)
    155 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    156     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    157 
    158 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    159 #else
    160 #define	DPRINTF(x, y)	/* nothing */
    161 #endif /* WM_DEBUG */
    162 
    163 #ifdef NET_MPSAFE
    164 #define WM_MPSAFE	1
    165 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    166 #else
    167 #define CALLOUT_FLAGS	0
    168 #endif
    169 
    170 /*
    171  * This device driver's max interrupt numbers.
    172  */
    173 #define WM_MAX_NQUEUEINTR	16
    174 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    175 
    176 #ifndef WM_DISABLE_MSI
    177 #define	WM_DISABLE_MSI 0
    178 #endif
    179 #ifndef WM_DISABLE_MSIX
    180 #define	WM_DISABLE_MSIX 0
    181 #endif
    182 
    183 int wm_disable_msi = WM_DISABLE_MSI;
    184 int wm_disable_msix = WM_DISABLE_MSIX;
    185 
    186 /*
    187  * Transmit descriptor list size.  Due to errata, we can only have
    188  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    189  * on >= 82544.  We tell the upper layers that they can queue a lot
    190  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    191  * of them at a time.
    192  *
    193  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    194  * chains containing many small mbufs have been observed in zero-copy
    195  * situations with jumbo frames.
    196  */
    197 #define	WM_NTXSEGS		256
    198 #define	WM_IFQUEUELEN		256
    199 #define	WM_TXQUEUELEN_MAX	64
    200 #define	WM_TXQUEUELEN_MAX_82547	16
    201 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    202 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    203 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    204 #define	WM_NTXDESC_82542	256
    205 #define	WM_NTXDESC_82544	4096
    206 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    207 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    208 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    209 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    210 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    211 
    212 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    213 
    214 #define	WM_TXINTERQSIZE		256
    215 
    216 /*
    217  * Receive descriptor list size.  We have one Rx buffer for normal
    218  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    219  * packet.  We allocate 256 receive descriptors, each with a 2k
    220  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    221  */
    222 #define	WM_NRXDESC		256
    223 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    224 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    225 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    226 
    227 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    228 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    229 #endif
    230 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    231 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    232 #endif
    233 
    234 typedef union txdescs {
    235 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    236 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    237 } txdescs_t;
    238 
    239 typedef union rxdescs {
    240 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    241 	ext_rxdesc_t      sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    242 	nq_rxdesc_t      sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    243 } rxdescs_t;
    244 
    245 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    246 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    247 
    248 /*
    249  * Software state for transmit jobs.
    250  */
    251 struct wm_txsoft {
    252 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    253 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    254 	int txs_firstdesc;		/* first descriptor in packet */
    255 	int txs_lastdesc;		/* last descriptor in packet */
    256 	int txs_ndesc;			/* # of descriptors used */
    257 };
    258 
    259 /*
    260  * Software state for receive buffers.  Each descriptor gets a
    261  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    262  * more than one buffer, we chain them together.
    263  */
    264 struct wm_rxsoft {
    265 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    266 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    267 };
    268 
    269 #define WM_LINKUP_TIMEOUT	50
    270 
    271 static uint16_t swfwphysem[] = {
    272 	SWFW_PHY0_SM,
    273 	SWFW_PHY1_SM,
    274 	SWFW_PHY2_SM,
    275 	SWFW_PHY3_SM
    276 };
    277 
    278 static const uint32_t wm_82580_rxpbs_table[] = {
    279 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    280 };
    281 
    282 struct wm_softc;
    283 
    284 #ifdef WM_EVENT_COUNTERS
    285 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    286 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    287 	struct evcnt qname##_ev_##evname;
    288 
    289 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    290 	do{								\
    291 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    292 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    293 		    "%s%02d%s", #qname, (qnum), #evname);		\
    294 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    295 		    (evtype), NULL, (xname),				\
    296 		    (q)->qname##_##evname##_evcnt_name);		\
    297 	}while(0)
    298 
    299 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    300 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    301 
    302 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    303 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    304 
    305 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    306 	evcnt_detach(&(q)->qname##_ev_##evname);
    307 #endif /* WM_EVENT_COUNTERS */
    308 
    309 struct wm_txqueue {
    310 	kmutex_t *txq_lock;		/* lock for tx operations */
    311 
    312 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    313 
    314 	/* Software state for the transmit descriptors. */
    315 	int txq_num;			/* must be a power of two */
    316 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    317 
    318 	/* TX control data structures. */
    319 	int txq_ndesc;			/* must be a power of two */
    320 	size_t txq_descsize;		/* a tx descriptor size */
    321 	txdescs_t *txq_descs_u;
    322         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    323 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    324 	int txq_desc_rseg;		/* real number of control segment */
    325 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    326 #define	txq_descs	txq_descs_u->sctxu_txdescs
    327 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    328 
    329 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    330 
    331 	int txq_free;			/* number of free Tx descriptors */
    332 	int txq_next;			/* next ready Tx descriptor */
    333 
    334 	int txq_sfree;			/* number of free Tx jobs */
    335 	int txq_snext;			/* next free Tx job */
    336 	int txq_sdirty;			/* dirty Tx jobs */
    337 
    338 	/* These 4 variables are used only on the 82547. */
    339 	int txq_fifo_size;		/* Tx FIFO size */
    340 	int txq_fifo_head;		/* current head of FIFO */
    341 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    342 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    343 
    344 	/*
    345 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    346 	 * CPUs. This queue intermediate them without block.
    347 	 */
    348 	pcq_t *txq_interq;
    349 
    350 	/*
    351 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    352 	 * to manage Tx H/W queue's busy flag.
    353 	 */
    354 	int txq_flags;			/* flags for H/W queue, see below */
    355 #define	WM_TXQ_NO_SPACE	0x1
    356 
    357 	bool txq_stopping;
    358 
    359 	uint32_t txq_packets;		/* for AIM */
    360 	uint32_t txq_bytes;		/* for AIM */
    361 #ifdef WM_EVENT_COUNTERS
    362 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
    363 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
    364 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
    365 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
    366 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
    367 						/* XXX not used? */
    368 
    369 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
    370 	WM_Q_EVCNT_DEFINE(txq,txtusum)		/* TCP/UDP cksums comp. out-bound */
    371 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
    372 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
    373 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
    374 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
    375 
    376 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
    377 
    378 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
    379 
    380 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    381 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    382 #endif /* WM_EVENT_COUNTERS */
    383 };
    384 
    385 struct wm_rxqueue {
    386 	kmutex_t *rxq_lock;		/* lock for rx operations */
    387 
    388 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    389 
    390 	/* Software state for the receive descriptors. */
    391 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    392 
    393 	/* RX control data structures. */
    394 	int rxq_ndesc;			/* must be a power of two */
    395 	size_t rxq_descsize;		/* a rx descriptor size */
    396 	rxdescs_t *rxq_descs_u;
    397 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    398 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    399 	int rxq_desc_rseg;		/* real number of control segment */
    400 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    401 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    402 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    403 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    404 
    405 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    406 
    407 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    408 	int rxq_discard;
    409 	int rxq_len;
    410 	struct mbuf *rxq_head;
    411 	struct mbuf *rxq_tail;
    412 	struct mbuf **rxq_tailp;
    413 
    414 	bool rxq_stopping;
    415 
    416 	uint32_t rxq_packets;		/* for AIM */
    417 	uint32_t rxq_bytes;		/* for AIM */
    418 #ifdef WM_EVENT_COUNTERS
    419 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
    420 
    421 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
    422 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
    423 #endif
    424 };
    425 
    426 struct wm_queue {
    427 	int wmq_id;			/* index of transmit and receive queues */
    428 	int wmq_intr_idx;		/* index of MSI-X tables */
    429 
    430 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    431 	bool wmq_set_itr;
    432 
    433 	struct wm_txqueue wmq_txq;
    434 	struct wm_rxqueue wmq_rxq;
    435 
    436 	void *wmq_si;
    437 };
    438 
    439 struct wm_phyop {
    440 	int (*acquire)(struct wm_softc *);
    441 	void (*release)(struct wm_softc *);
    442 	int reset_delay_us;
    443 };
    444 
    445 /*
    446  * Software state per device.
    447  */
    448 struct wm_softc {
    449 	device_t sc_dev;		/* generic device information */
    450 	bus_space_tag_t sc_st;		/* bus space tag */
    451 	bus_space_handle_t sc_sh;	/* bus space handle */
    452 	bus_size_t sc_ss;		/* bus space size */
    453 	bus_space_tag_t sc_iot;		/* I/O space tag */
    454 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    455 	bus_size_t sc_ios;		/* I/O space size */
    456 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    457 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    458 	bus_size_t sc_flashs;		/* flash registers space size */
    459 	off_t sc_flashreg_offset;	/*
    460 					 * offset to flash registers from
    461 					 * start of BAR
    462 					 */
    463 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    464 
    465 	struct ethercom sc_ethercom;	/* ethernet common data */
    466 	struct mii_data sc_mii;		/* MII/media information */
    467 
    468 	pci_chipset_tag_t sc_pc;
    469 	pcitag_t sc_pcitag;
    470 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    471 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    472 
    473 	uint16_t sc_pcidevid;		/* PCI device ID */
    474 	wm_chip_type sc_type;		/* MAC type */
    475 	int sc_rev;			/* MAC revision */
    476 	wm_phy_type sc_phytype;		/* PHY type */
    477 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    478 #define	WM_MEDIATYPE_UNKNOWN		0x00
    479 #define	WM_MEDIATYPE_FIBER		0x01
    480 #define	WM_MEDIATYPE_COPPER		0x02
    481 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    482 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    483 	int sc_flags;			/* flags; see below */
    484 	int sc_if_flags;		/* last if_flags */
    485 	int sc_flowflags;		/* 802.3x flow control flags */
    486 	int sc_align_tweak;
    487 
    488 	void *sc_ihs[WM_MAX_NINTR];	/*
    489 					 * interrupt cookie.
    490 					 * - legacy and msi use sc_ihs[0] only
    491 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    492 					 */
    493 	pci_intr_handle_t *sc_intrs;	/*
    494 					 * legacy and msi use sc_intrs[0] only
    495 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    496 					 */
    497 	int sc_nintrs;			/* number of interrupts */
    498 
    499 	int sc_link_intr_idx;		/* index of MSI-X tables */
    500 
    501 	callout_t sc_tick_ch;		/* tick callout */
    502 	bool sc_core_stopping;
    503 
    504 	int sc_nvm_ver_major;
    505 	int sc_nvm_ver_minor;
    506 	int sc_nvm_ver_build;
    507 	int sc_nvm_addrbits;		/* NVM address bits */
    508 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    509 	int sc_ich8_flash_base;
    510 	int sc_ich8_flash_bank_size;
    511 	int sc_nvm_k1_enabled;
    512 
    513 	int sc_nqueues;
    514 	struct wm_queue *sc_queue;
    515 	u_int sc_rx_process_limit;	/* Rx processing repeat limit in softint */
    516 	u_int sc_rx_intr_process_limit;	/* Rx processing repeat limit in H/W intr */
    517 
    518 	int sc_affinity_offset;
    519 
    520 #ifdef WM_EVENT_COUNTERS
    521 	/* Event counters. */
    522 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    523 
    524         /* WM_T_82542_2_1 only */
    525 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    526 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    527 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    528 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    529 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    530 #endif /* WM_EVENT_COUNTERS */
    531 
    532 	/* This variable are used only on the 82547. */
    533 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    534 
    535 	uint32_t sc_ctrl;		/* prototype CTRL register */
    536 #if 0
    537 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    538 #endif
    539 	uint32_t sc_icr;		/* prototype interrupt bits */
    540 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    541 	uint32_t sc_tctl;		/* prototype TCTL register */
    542 	uint32_t sc_rctl;		/* prototype RCTL register */
    543 	uint32_t sc_txcw;		/* prototype TXCW register */
    544 	uint32_t sc_tipg;		/* prototype TIPG register */
    545 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    546 	uint32_t sc_pba;		/* prototype PBA register */
    547 
    548 	int sc_tbi_linkup;		/* TBI link status */
    549 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    550 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    551 
    552 	int sc_mchash_type;		/* multicast filter offset */
    553 
    554 	krndsource_t rnd_source;	/* random source */
    555 
    556 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    557 
    558 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    559 	kmutex_t *sc_ich_phymtx;	/*
    560 					 * 82574/82583/ICH/PCH specific PHY
    561 					 * mutex. For 82574/82583, the mutex
    562 					 * is used for both PHY and NVM.
    563 					 */
    564 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    565 
    566 	struct wm_phyop phy;
    567 };
    568 
    569 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    570 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    571 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    572 
    573 #define	WM_RXCHAIN_RESET(rxq)						\
    574 do {									\
    575 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    576 	*(rxq)->rxq_tailp = NULL;					\
    577 	(rxq)->rxq_len = 0;						\
    578 } while (/*CONSTCOND*/0)
    579 
    580 #define	WM_RXCHAIN_LINK(rxq, m)						\
    581 do {									\
    582 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    583 	(rxq)->rxq_tailp = &(m)->m_next;				\
    584 } while (/*CONSTCOND*/0)
    585 
    586 #ifdef WM_EVENT_COUNTERS
    587 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    588 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    589 
    590 #define WM_Q_EVCNT_INCR(qname, evname)			\
    591 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    592 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    593 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    594 #else /* !WM_EVENT_COUNTERS */
    595 #define	WM_EVCNT_INCR(ev)	/* nothing */
    596 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    597 
    598 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    599 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    600 #endif /* !WM_EVENT_COUNTERS */
    601 
    602 #define	CSR_READ(sc, reg)						\
    603 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    604 #define	CSR_WRITE(sc, reg, val)						\
    605 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    606 #define	CSR_WRITE_FLUSH(sc)						\
    607 	(void) CSR_READ((sc), WMREG_STATUS)
    608 
    609 #define ICH8_FLASH_READ32(sc, reg)					\
    610 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    611 	    (reg) + sc->sc_flashreg_offset)
    612 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    613 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    614 	    (reg) + sc->sc_flashreg_offset, (data))
    615 
    616 #define ICH8_FLASH_READ16(sc, reg)					\
    617 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    618 	    (reg) + sc->sc_flashreg_offset)
    619 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    620 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    621 	    (reg) + sc->sc_flashreg_offset, (data))
    622 
    623 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    624 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    625 
    626 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    627 #define	WM_CDTXADDR_HI(txq, x)						\
    628 	(sizeof(bus_addr_t) == 8 ?					\
    629 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    630 
    631 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    632 #define	WM_CDRXADDR_HI(rxq, x)						\
    633 	(sizeof(bus_addr_t) == 8 ?					\
    634 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    635 
    636 /*
    637  * Register read/write functions.
    638  * Other than CSR_{READ|WRITE}().
    639  */
    640 #if 0
    641 static inline uint32_t wm_io_read(struct wm_softc *, int);
    642 #endif
    643 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    644 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    645 	uint32_t, uint32_t);
    646 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    647 
    648 /*
    649  * Descriptor sync/init functions.
    650  */
    651 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    652 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    653 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    654 
    655 /*
    656  * Device driver interface functions and commonly used functions.
    657  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    658  */
    659 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    660 static int	wm_match(device_t, cfdata_t, void *);
    661 static void	wm_attach(device_t, device_t, void *);
    662 static int	wm_detach(device_t, int);
    663 static bool	wm_suspend(device_t, const pmf_qual_t *);
    664 static bool	wm_resume(device_t, const pmf_qual_t *);
    665 static void	wm_watchdog(struct ifnet *);
    666 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
    667 static void	wm_tick(void *);
    668 static int	wm_ifflags_cb(struct ethercom *);
    669 static int	wm_ioctl(struct ifnet *, u_long, void *);
    670 /* MAC address related */
    671 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    672 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    673 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    674 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    675 static void	wm_set_filter(struct wm_softc *);
    676 /* Reset and init related */
    677 static void	wm_set_vlan(struct wm_softc *);
    678 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    679 static void	wm_get_auto_rd_done(struct wm_softc *);
    680 static void	wm_lan_init_done(struct wm_softc *);
    681 static void	wm_get_cfg_done(struct wm_softc *);
    682 static void	wm_initialize_hardware_bits(struct wm_softc *);
    683 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    684 static void	wm_reset_phy(struct wm_softc *);
    685 static void	wm_flush_desc_rings(struct wm_softc *);
    686 static void	wm_reset(struct wm_softc *);
    687 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    688 static void	wm_rxdrain(struct wm_rxqueue *);
    689 static void	wm_rss_getkey(uint8_t *);
    690 static void	wm_init_rss(struct wm_softc *);
    691 static void	wm_adjust_qnum(struct wm_softc *, int);
    692 static inline bool	wm_is_using_msix(struct wm_softc *);
    693 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    694 static int	wm_softint_establish(struct wm_softc *, int, int);
    695 static int	wm_setup_legacy(struct wm_softc *);
    696 static int	wm_setup_msix(struct wm_softc *);
    697 static int	wm_init(struct ifnet *);
    698 static int	wm_init_locked(struct ifnet *);
    699 static void	wm_turnon(struct wm_softc *);
    700 static void	wm_turnoff(struct wm_softc *);
    701 static void	wm_stop(struct ifnet *, int);
    702 static void	wm_stop_locked(struct ifnet *, int);
    703 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    704 static void	wm_82547_txfifo_stall(void *);
    705 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    706 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    707 /* DMA related */
    708 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    709 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    710 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    711 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    712     struct wm_txqueue *);
    713 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    714 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    715 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    716     struct wm_rxqueue *);
    717 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    718 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    719 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    720 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    721 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    722 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    723 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    724     struct wm_txqueue *);
    725 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    726     struct wm_rxqueue *);
    727 static int	wm_alloc_txrx_queues(struct wm_softc *);
    728 static void	wm_free_txrx_queues(struct wm_softc *);
    729 static int	wm_init_txrx_queues(struct wm_softc *);
    730 /* Start */
    731 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    732     struct wm_txsoft *, uint32_t *, uint8_t *);
    733 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    734 static void	wm_start(struct ifnet *);
    735 static void	wm_start_locked(struct ifnet *);
    736 static int	wm_transmit(struct ifnet *, struct mbuf *);
    737 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    738 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    739 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    740     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    741 static void	wm_nq_start(struct ifnet *);
    742 static void	wm_nq_start_locked(struct ifnet *);
    743 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    744 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    745 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    746 static void	wm_deferred_start_locked(struct wm_txqueue *);
    747 static void	wm_handle_queue(void *);
    748 /* Interrupt */
    749 static int	wm_txeof(struct wm_softc *, struct wm_txqueue *);
    750 static void	wm_rxeof(struct wm_rxqueue *, u_int);
    751 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    752 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    753 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    754 static void	wm_linkintr(struct wm_softc *, uint32_t);
    755 static int	wm_intr_legacy(void *);
    756 static inline void	wm_txrxintr_disable(struct wm_queue *);
    757 static inline void	wm_txrxintr_enable(struct wm_queue *);
    758 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    759 static int	wm_txrxintr_msix(void *);
    760 static int	wm_linkintr_msix(void *);
    761 
    762 /*
    763  * Media related.
    764  * GMII, SGMII, TBI, SERDES and SFP.
    765  */
    766 /* Common */
    767 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    768 /* GMII related */
    769 static void	wm_gmii_reset(struct wm_softc *);
    770 static void	wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t, uint16_t);
    771 static int	wm_get_phy_id_82575(struct wm_softc *);
    772 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    773 static int	wm_gmii_mediachange(struct ifnet *);
    774 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    775 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    776 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    777 static int	wm_gmii_i82543_readreg(device_t, int, int);
    778 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    779 static int	wm_gmii_mdic_readreg(device_t, int, int);
    780 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    781 static int	wm_gmii_i82544_readreg(device_t, int, int);
    782 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    783 static int	wm_gmii_i80003_readreg(device_t, int, int);
    784 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    785 static int	wm_gmii_bm_readreg(device_t, int, int);
    786 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    787 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    788 static int	wm_gmii_hv_readreg(device_t, int, int);
    789 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
    790 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    791 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
    792 static int	wm_gmii_82580_readreg(device_t, int, int);
    793 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    794 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    795 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    796 static void	wm_gmii_statchg(struct ifnet *);
    797 /*
    798  * kumeran related (80003, ICH* and PCH*).
    799  * These functions are not for accessing MII registers but for accessing
    800  * kumeran specific registers.
    801  */
    802 static int	wm_kmrn_readreg(struct wm_softc *, int);
    803 static int	wm_kmrn_readreg_locked(struct wm_softc *, int);
    804 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
    805 static void	wm_kmrn_writereg_locked(struct wm_softc *, int, int);
    806 /* SGMII */
    807 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    808 static int	wm_sgmii_readreg(device_t, int, int);
    809 static void	wm_sgmii_writereg(device_t, int, int, int);
    810 /* TBI related */
    811 static void	wm_tbi_mediainit(struct wm_softc *);
    812 static int	wm_tbi_mediachange(struct ifnet *);
    813 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    814 static int	wm_check_for_link(struct wm_softc *);
    815 static void	wm_tbi_tick(struct wm_softc *);
    816 /* SERDES related */
    817 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    818 static int	wm_serdes_mediachange(struct ifnet *);
    819 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    820 static void	wm_serdes_tick(struct wm_softc *);
    821 /* SFP related */
    822 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    823 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    824 
    825 /*
    826  * NVM related.
    827  * Microwire, SPI (w/wo EERD) and Flash.
    828  */
    829 /* Misc functions */
    830 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    831 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    832 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    833 /* Microwire */
    834 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    835 /* SPI */
    836 static int	wm_nvm_ready_spi(struct wm_softc *);
    837 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    838 /* Using with EERD */
    839 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    840 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    841 /* Flash */
    842 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    843     unsigned int *);
    844 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    845 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    846 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    847 	uint32_t *);
    848 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    849 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    850 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    851 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    852 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    853 /* iNVM */
    854 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    855 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    856 /* Lock, detecting NVM type, validate checksum and read */
    857 static int	wm_nvm_acquire(struct wm_softc *);
    858 static void	wm_nvm_release(struct wm_softc *);
    859 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    860 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    861 static int	wm_nvm_validate_checksum(struct wm_softc *);
    862 static void	wm_nvm_version_invm(struct wm_softc *);
    863 static void	wm_nvm_version(struct wm_softc *);
    864 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    865 
    866 /*
    867  * Hardware semaphores.
    868  * Very complexed...
    869  */
    870 static int	wm_get_null(struct wm_softc *);
    871 static void	wm_put_null(struct wm_softc *);
    872 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    873 static void	wm_put_swsm_semaphore(struct wm_softc *);
    874 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    875 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    876 static int	wm_get_phy_82575(struct wm_softc *);
    877 static void	wm_put_phy_82575(struct wm_softc *);
    878 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    879 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    880 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    881 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    882 static int	wm_get_nvm_ich8lan(struct wm_softc *);		/* For NVM */
    883 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    884 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    885 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    886 
    887 /*
    888  * Management mode and power management related subroutines.
    889  * BMC, AMT, suspend/resume and EEE.
    890  */
    891 #if 0
    892 static int	wm_check_mng_mode(struct wm_softc *);
    893 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    894 static int	wm_check_mng_mode_82574(struct wm_softc *);
    895 static int	wm_check_mng_mode_generic(struct wm_softc *);
    896 #endif
    897 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    898 static bool	wm_phy_resetisblocked(struct wm_softc *);
    899 static void	wm_get_hw_control(struct wm_softc *);
    900 static void	wm_release_hw_control(struct wm_softc *);
    901 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    902 static void	wm_smbustopci(struct wm_softc *);
    903 static void	wm_init_manageability(struct wm_softc *);
    904 static void	wm_release_manageability(struct wm_softc *);
    905 static void	wm_get_wakeup(struct wm_softc *);
    906 static void	wm_ulp_disable(struct wm_softc *);
    907 static void	wm_enable_phy_wakeup(struct wm_softc *);
    908 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    909 static void	wm_enable_wakeup(struct wm_softc *);
    910 /* LPLU (Low Power Link Up) */
    911 static void	wm_lplu_d0_disable(struct wm_softc *);
    912 static void	wm_lplu_d0_disable_pch(struct wm_softc *);
    913 /* EEE */
    914 static void	wm_set_eee_i350(struct wm_softc *);
    915 
    916 /*
    917  * Workarounds (mainly PHY related).
    918  * Basically, PHY's workarounds are in the PHY drivers.
    919  */
    920 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    921 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    922 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    923 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    924 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    925 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    926 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    927 static void	wm_reset_init_script_82575(struct wm_softc *);
    928 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    929 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
    930 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
    931 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
    932 static void	wm_pll_workaround_i210(struct wm_softc *);
    933 
    934 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    935     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    936 
    937 /*
    938  * Devices supported by this driver.
    939  */
    940 static const struct wm_product {
    941 	pci_vendor_id_t		wmp_vendor;
    942 	pci_product_id_t	wmp_product;
    943 	const char		*wmp_name;
    944 	wm_chip_type		wmp_type;
    945 	uint32_t		wmp_flags;
    946 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    947 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    948 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    949 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    950 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    951 } wm_products[] = {
    952 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    953 	  "Intel i82542 1000BASE-X Ethernet",
    954 	  WM_T_82542_2_1,	WMP_F_FIBER },
    955 
    956 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    957 	  "Intel i82543GC 1000BASE-X Ethernet",
    958 	  WM_T_82543,		WMP_F_FIBER },
    959 
    960 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    961 	  "Intel i82543GC 1000BASE-T Ethernet",
    962 	  WM_T_82543,		WMP_F_COPPER },
    963 
    964 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    965 	  "Intel i82544EI 1000BASE-T Ethernet",
    966 	  WM_T_82544,		WMP_F_COPPER },
    967 
    968 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    969 	  "Intel i82544EI 1000BASE-X Ethernet",
    970 	  WM_T_82544,		WMP_F_FIBER },
    971 
    972 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    973 	  "Intel i82544GC 1000BASE-T Ethernet",
    974 	  WM_T_82544,		WMP_F_COPPER },
    975 
    976 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    977 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    978 	  WM_T_82544,		WMP_F_COPPER },
    979 
    980 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    981 	  "Intel i82540EM 1000BASE-T Ethernet",
    982 	  WM_T_82540,		WMP_F_COPPER },
    983 
    984 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    985 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    986 	  WM_T_82540,		WMP_F_COPPER },
    987 
    988 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    989 	  "Intel i82540EP 1000BASE-T Ethernet",
    990 	  WM_T_82540,		WMP_F_COPPER },
    991 
    992 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    993 	  "Intel i82540EP 1000BASE-T Ethernet",
    994 	  WM_T_82540,		WMP_F_COPPER },
    995 
    996 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    997 	  "Intel i82540EP 1000BASE-T Ethernet",
    998 	  WM_T_82540,		WMP_F_COPPER },
    999 
   1000 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1001 	  "Intel i82545EM 1000BASE-T Ethernet",
   1002 	  WM_T_82545,		WMP_F_COPPER },
   1003 
   1004 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1005 	  "Intel i82545GM 1000BASE-T Ethernet",
   1006 	  WM_T_82545_3,		WMP_F_COPPER },
   1007 
   1008 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1009 	  "Intel i82545GM 1000BASE-X Ethernet",
   1010 	  WM_T_82545_3,		WMP_F_FIBER },
   1011 
   1012 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1013 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1014 	  WM_T_82545_3,		WMP_F_SERDES },
   1015 
   1016 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1017 	  "Intel i82546EB 1000BASE-T Ethernet",
   1018 	  WM_T_82546,		WMP_F_COPPER },
   1019 
   1020 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1021 	  "Intel i82546EB 1000BASE-T Ethernet",
   1022 	  WM_T_82546,		WMP_F_COPPER },
   1023 
   1024 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1025 	  "Intel i82545EM 1000BASE-X Ethernet",
   1026 	  WM_T_82545,		WMP_F_FIBER },
   1027 
   1028 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1029 	  "Intel i82546EB 1000BASE-X Ethernet",
   1030 	  WM_T_82546,		WMP_F_FIBER },
   1031 
   1032 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1033 	  "Intel i82546GB 1000BASE-T Ethernet",
   1034 	  WM_T_82546_3,		WMP_F_COPPER },
   1035 
   1036 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1037 	  "Intel i82546GB 1000BASE-X Ethernet",
   1038 	  WM_T_82546_3,		WMP_F_FIBER },
   1039 
   1040 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1041 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1042 	  WM_T_82546_3,		WMP_F_SERDES },
   1043 
   1044 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1045 	  "i82546GB quad-port Gigabit Ethernet",
   1046 	  WM_T_82546_3,		WMP_F_COPPER },
   1047 
   1048 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1049 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1050 	  WM_T_82546_3,		WMP_F_COPPER },
   1051 
   1052 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1053 	  "Intel PRO/1000MT (82546GB)",
   1054 	  WM_T_82546_3,		WMP_F_COPPER },
   1055 
   1056 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1057 	  "Intel i82541EI 1000BASE-T Ethernet",
   1058 	  WM_T_82541,		WMP_F_COPPER },
   1059 
   1060 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1061 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1062 	  WM_T_82541,		WMP_F_COPPER },
   1063 
   1064 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1065 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1066 	  WM_T_82541,		WMP_F_COPPER },
   1067 
   1068 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1069 	  "Intel i82541ER 1000BASE-T Ethernet",
   1070 	  WM_T_82541_2,		WMP_F_COPPER },
   1071 
   1072 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1073 	  "Intel i82541GI 1000BASE-T Ethernet",
   1074 	  WM_T_82541_2,		WMP_F_COPPER },
   1075 
   1076 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1077 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1078 	  WM_T_82541_2,		WMP_F_COPPER },
   1079 
   1080 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1081 	  "Intel i82541PI 1000BASE-T Ethernet",
   1082 	  WM_T_82541_2,		WMP_F_COPPER },
   1083 
   1084 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1085 	  "Intel i82547EI 1000BASE-T Ethernet",
   1086 	  WM_T_82547,		WMP_F_COPPER },
   1087 
   1088 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1089 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1090 	  WM_T_82547,		WMP_F_COPPER },
   1091 
   1092 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1093 	  "Intel i82547GI 1000BASE-T Ethernet",
   1094 	  WM_T_82547_2,		WMP_F_COPPER },
   1095 
   1096 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1097 	  "Intel PRO/1000 PT (82571EB)",
   1098 	  WM_T_82571,		WMP_F_COPPER },
   1099 
   1100 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1101 	  "Intel PRO/1000 PF (82571EB)",
   1102 	  WM_T_82571,		WMP_F_FIBER },
   1103 
   1104 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1105 	  "Intel PRO/1000 PB (82571EB)",
   1106 	  WM_T_82571,		WMP_F_SERDES },
   1107 
   1108 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1109 	  "Intel PRO/1000 QT (82571EB)",
   1110 	  WM_T_82571,		WMP_F_COPPER },
   1111 
   1112 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1113 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1114 	  WM_T_82571,		WMP_F_COPPER, },
   1115 
   1116 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1117 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1118 	  WM_T_82571,		WMP_F_COPPER, },
   1119 
   1120 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1121 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1122 	  WM_T_82571,		WMP_F_SERDES, },
   1123 
   1124 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1125 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1126 	  WM_T_82571,		WMP_F_SERDES, },
   1127 
   1128 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1129 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1130 	  WM_T_82571,		WMP_F_FIBER, },
   1131 
   1132 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1133 	  "Intel i82572EI 1000baseT Ethernet",
   1134 	  WM_T_82572,		WMP_F_COPPER },
   1135 
   1136 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1137 	  "Intel i82572EI 1000baseX Ethernet",
   1138 	  WM_T_82572,		WMP_F_FIBER },
   1139 
   1140 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1141 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1142 	  WM_T_82572,		WMP_F_SERDES },
   1143 
   1144 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1145 	  "Intel i82572EI 1000baseT Ethernet",
   1146 	  WM_T_82572,		WMP_F_COPPER },
   1147 
   1148 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1149 	  "Intel i82573E",
   1150 	  WM_T_82573,		WMP_F_COPPER },
   1151 
   1152 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1153 	  "Intel i82573E IAMT",
   1154 	  WM_T_82573,		WMP_F_COPPER },
   1155 
   1156 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1157 	  "Intel i82573L Gigabit Ethernet",
   1158 	  WM_T_82573,		WMP_F_COPPER },
   1159 
   1160 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1161 	  "Intel i82574L",
   1162 	  WM_T_82574,		WMP_F_COPPER },
   1163 
   1164 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1165 	  "Intel i82574L",
   1166 	  WM_T_82574,		WMP_F_COPPER },
   1167 
   1168 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1169 	  "Intel i82583V",
   1170 	  WM_T_82583,		WMP_F_COPPER },
   1171 
   1172 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1173 	  "i80003 dual 1000baseT Ethernet",
   1174 	  WM_T_80003,		WMP_F_COPPER },
   1175 
   1176 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1177 	  "i80003 dual 1000baseX Ethernet",
   1178 	  WM_T_80003,		WMP_F_COPPER },
   1179 
   1180 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1181 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1182 	  WM_T_80003,		WMP_F_SERDES },
   1183 
   1184 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1185 	  "Intel i80003 1000baseT Ethernet",
   1186 	  WM_T_80003,		WMP_F_COPPER },
   1187 
   1188 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1189 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1190 	  WM_T_80003,		WMP_F_SERDES },
   1191 
   1192 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1193 	  "Intel i82801H (M_AMT) LAN Controller",
   1194 	  WM_T_ICH8,		WMP_F_COPPER },
   1195 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1196 	  "Intel i82801H (AMT) LAN Controller",
   1197 	  WM_T_ICH8,		WMP_F_COPPER },
   1198 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1199 	  "Intel i82801H LAN Controller",
   1200 	  WM_T_ICH8,		WMP_F_COPPER },
   1201 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1202 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1203 	  WM_T_ICH8,		WMP_F_COPPER },
   1204 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1205 	  "Intel i82801H (M) LAN Controller",
   1206 	  WM_T_ICH8,		WMP_F_COPPER },
   1207 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1208 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1209 	  WM_T_ICH8,		WMP_F_COPPER },
   1210 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1211 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1212 	  WM_T_ICH8,		WMP_F_COPPER },
   1213 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1214 	  "82567V-3 LAN Controller",
   1215 	  WM_T_ICH8,		WMP_F_COPPER },
   1216 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1217 	  "82801I (AMT) LAN Controller",
   1218 	  WM_T_ICH9,		WMP_F_COPPER },
   1219 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1220 	  "82801I 10/100 LAN Controller",
   1221 	  WM_T_ICH9,		WMP_F_COPPER },
   1222 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1223 	  "82801I (G) 10/100 LAN Controller",
   1224 	  WM_T_ICH9,		WMP_F_COPPER },
   1225 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1226 	  "82801I (GT) 10/100 LAN Controller",
   1227 	  WM_T_ICH9,		WMP_F_COPPER },
   1228 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1229 	  "82801I (C) LAN Controller",
   1230 	  WM_T_ICH9,		WMP_F_COPPER },
   1231 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1232 	  "82801I mobile LAN Controller",
   1233 	  WM_T_ICH9,		WMP_F_COPPER },
   1234 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1235 	  "82801I mobile (V) LAN Controller",
   1236 	  WM_T_ICH9,		WMP_F_COPPER },
   1237 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1238 	  "82801I mobile (AMT) LAN Controller",
   1239 	  WM_T_ICH9,		WMP_F_COPPER },
   1240 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1241 	  "82567LM-4 LAN Controller",
   1242 	  WM_T_ICH9,		WMP_F_COPPER },
   1243 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1244 	  "82567LM-2 LAN Controller",
   1245 	  WM_T_ICH10,		WMP_F_COPPER },
   1246 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1247 	  "82567LF-2 LAN Controller",
   1248 	  WM_T_ICH10,		WMP_F_COPPER },
   1249 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1250 	  "82567LM-3 LAN Controller",
   1251 	  WM_T_ICH10,		WMP_F_COPPER },
   1252 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1253 	  "82567LF-3 LAN Controller",
   1254 	  WM_T_ICH10,		WMP_F_COPPER },
   1255 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1256 	  "82567V-2 LAN Controller",
   1257 	  WM_T_ICH10,		WMP_F_COPPER },
   1258 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1259 	  "82567V-3? LAN Controller",
   1260 	  WM_T_ICH10,		WMP_F_COPPER },
   1261 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1262 	  "HANKSVILLE LAN Controller",
   1263 	  WM_T_ICH10,		WMP_F_COPPER },
   1264 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1265 	  "PCH LAN (82577LM) Controller",
   1266 	  WM_T_PCH,		WMP_F_COPPER },
   1267 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1268 	  "PCH LAN (82577LC) Controller",
   1269 	  WM_T_PCH,		WMP_F_COPPER },
   1270 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1271 	  "PCH LAN (82578DM) Controller",
   1272 	  WM_T_PCH,		WMP_F_COPPER },
   1273 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1274 	  "PCH LAN (82578DC) Controller",
   1275 	  WM_T_PCH,		WMP_F_COPPER },
   1276 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1277 	  "PCH2 LAN (82579LM) Controller",
   1278 	  WM_T_PCH2,		WMP_F_COPPER },
   1279 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1280 	  "PCH2 LAN (82579V) Controller",
   1281 	  WM_T_PCH2,		WMP_F_COPPER },
   1282 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1283 	  "82575EB dual-1000baseT Ethernet",
   1284 	  WM_T_82575,		WMP_F_COPPER },
   1285 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1286 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1287 	  WM_T_82575,		WMP_F_SERDES },
   1288 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1289 	  "82575GB quad-1000baseT Ethernet",
   1290 	  WM_T_82575,		WMP_F_COPPER },
   1291 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1292 	  "82575GB quad-1000baseT Ethernet (PM)",
   1293 	  WM_T_82575,		WMP_F_COPPER },
   1294 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1295 	  "82576 1000BaseT Ethernet",
   1296 	  WM_T_82576,		WMP_F_COPPER },
   1297 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1298 	  "82576 1000BaseX Ethernet",
   1299 	  WM_T_82576,		WMP_F_FIBER },
   1300 
   1301 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1302 	  "82576 gigabit Ethernet (SERDES)",
   1303 	  WM_T_82576,		WMP_F_SERDES },
   1304 
   1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1306 	  "82576 quad-1000BaseT Ethernet",
   1307 	  WM_T_82576,		WMP_F_COPPER },
   1308 
   1309 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1310 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1311 	  WM_T_82576,		WMP_F_COPPER },
   1312 
   1313 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1314 	  "82576 gigabit Ethernet",
   1315 	  WM_T_82576,		WMP_F_COPPER },
   1316 
   1317 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1318 	  "82576 gigabit Ethernet (SERDES)",
   1319 	  WM_T_82576,		WMP_F_SERDES },
   1320 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1321 	  "82576 quad-gigabit Ethernet (SERDES)",
   1322 	  WM_T_82576,		WMP_F_SERDES },
   1323 
   1324 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1325 	  "82580 1000BaseT Ethernet",
   1326 	  WM_T_82580,		WMP_F_COPPER },
   1327 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1328 	  "82580 1000BaseX Ethernet",
   1329 	  WM_T_82580,		WMP_F_FIBER },
   1330 
   1331 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1332 	  "82580 1000BaseT Ethernet (SERDES)",
   1333 	  WM_T_82580,		WMP_F_SERDES },
   1334 
   1335 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1336 	  "82580 gigabit Ethernet (SGMII)",
   1337 	  WM_T_82580,		WMP_F_COPPER },
   1338 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1339 	  "82580 dual-1000BaseT Ethernet",
   1340 	  WM_T_82580,		WMP_F_COPPER },
   1341 
   1342 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1343 	  "82580 quad-1000BaseX Ethernet",
   1344 	  WM_T_82580,		WMP_F_FIBER },
   1345 
   1346 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1347 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1348 	  WM_T_82580,		WMP_F_COPPER },
   1349 
   1350 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1351 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1352 	  WM_T_82580,		WMP_F_SERDES },
   1353 
   1354 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1355 	  "DH89XXCC 1000BASE-KX Ethernet",
   1356 	  WM_T_82580,		WMP_F_SERDES },
   1357 
   1358 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1359 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1360 	  WM_T_82580,		WMP_F_SERDES },
   1361 
   1362 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1363 	  "I350 Gigabit Network Connection",
   1364 	  WM_T_I350,		WMP_F_COPPER },
   1365 
   1366 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1367 	  "I350 Gigabit Fiber Network Connection",
   1368 	  WM_T_I350,		WMP_F_FIBER },
   1369 
   1370 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1371 	  "I350 Gigabit Backplane Connection",
   1372 	  WM_T_I350,		WMP_F_SERDES },
   1373 
   1374 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1375 	  "I350 Quad Port Gigabit Ethernet",
   1376 	  WM_T_I350,		WMP_F_SERDES },
   1377 
   1378 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1379 	  "I350 Gigabit Connection",
   1380 	  WM_T_I350,		WMP_F_COPPER },
   1381 
   1382 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1383 	  "I354 Gigabit Ethernet (KX)",
   1384 	  WM_T_I354,		WMP_F_SERDES },
   1385 
   1386 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1387 	  "I354 Gigabit Ethernet (SGMII)",
   1388 	  WM_T_I354,		WMP_F_COPPER },
   1389 
   1390 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1391 	  "I354 Gigabit Ethernet (2.5G)",
   1392 	  WM_T_I354,		WMP_F_COPPER },
   1393 
   1394 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1395 	  "I210-T1 Ethernet Server Adapter",
   1396 	  WM_T_I210,		WMP_F_COPPER },
   1397 
   1398 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1399 	  "I210 Ethernet (Copper OEM)",
   1400 	  WM_T_I210,		WMP_F_COPPER },
   1401 
   1402 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1403 	  "I210 Ethernet (Copper IT)",
   1404 	  WM_T_I210,		WMP_F_COPPER },
   1405 
   1406 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1407 	  "I210 Ethernet (FLASH less)",
   1408 	  WM_T_I210,		WMP_F_COPPER },
   1409 
   1410 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1411 	  "I210 Gigabit Ethernet (Fiber)",
   1412 	  WM_T_I210,		WMP_F_FIBER },
   1413 
   1414 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1415 	  "I210 Gigabit Ethernet (SERDES)",
   1416 	  WM_T_I210,		WMP_F_SERDES },
   1417 
   1418 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1419 	  "I210 Gigabit Ethernet (FLASH less)",
   1420 	  WM_T_I210,		WMP_F_SERDES },
   1421 
   1422 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1423 	  "I210 Gigabit Ethernet (SGMII)",
   1424 	  WM_T_I210,		WMP_F_COPPER },
   1425 
   1426 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1427 	  "I211 Ethernet (COPPER)",
   1428 	  WM_T_I211,		WMP_F_COPPER },
   1429 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1430 	  "I217 V Ethernet Connection",
   1431 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1432 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1433 	  "I217 LM Ethernet Connection",
   1434 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1435 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1436 	  "I218 V Ethernet Connection",
   1437 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1438 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1439 	  "I218 V Ethernet Connection",
   1440 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1441 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1442 	  "I218 V Ethernet Connection",
   1443 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1444 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1445 	  "I218 LM Ethernet Connection",
   1446 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1447 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1448 	  "I218 LM Ethernet Connection",
   1449 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1450 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1451 	  "I218 LM Ethernet Connection",
   1452 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1453 #if 0
   1454 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1455 	  "I219 V Ethernet Connection",
   1456 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1457 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1458 	  "I219 V Ethernet Connection",
   1459 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1460 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1461 	  "I219 V Ethernet Connection",
   1462 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1463 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1464 	  "I219 V Ethernet Connection",
   1465 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1466 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1467 	  "I219 LM Ethernet Connection",
   1468 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1469 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1470 	  "I219 LM Ethernet Connection",
   1471 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1472 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1473 	  "I219 LM Ethernet Connection",
   1474 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1475 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1476 	  "I219 LM Ethernet Connection",
   1477 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1478 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1479 	  "I219 LM Ethernet Connection",
   1480 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1481 #endif
   1482 	{ 0,			0,
   1483 	  NULL,
   1484 	  0,			0 },
   1485 };
   1486 
   1487 /*
   1488  * Register read/write functions.
   1489  * Other than CSR_{READ|WRITE}().
   1490  */
   1491 
   1492 #if 0 /* Not currently used */
   1493 static inline uint32_t
   1494 wm_io_read(struct wm_softc *sc, int reg)
   1495 {
   1496 
   1497 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1498 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1499 }
   1500 #endif
   1501 
   1502 static inline void
   1503 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1504 {
   1505 
   1506 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1507 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1508 }
   1509 
   1510 static inline void
   1511 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1512     uint32_t data)
   1513 {
   1514 	uint32_t regval;
   1515 	int i;
   1516 
   1517 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1518 
   1519 	CSR_WRITE(sc, reg, regval);
   1520 
   1521 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1522 		delay(5);
   1523 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1524 			break;
   1525 	}
   1526 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1527 		aprint_error("%s: WARNING:"
   1528 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1529 		    device_xname(sc->sc_dev), reg);
   1530 	}
   1531 }
   1532 
   1533 static inline void
   1534 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1535 {
   1536 	wa->wa_low = htole32(v & 0xffffffffU);
   1537 	if (sizeof(bus_addr_t) == 8)
   1538 		wa->wa_high = htole32((uint64_t) v >> 32);
   1539 	else
   1540 		wa->wa_high = 0;
   1541 }
   1542 
   1543 /*
   1544  * Descriptor sync/init functions.
   1545  */
   1546 static inline void
   1547 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1548 {
   1549 	struct wm_softc *sc = txq->txq_sc;
   1550 
   1551 	/* If it will wrap around, sync to the end of the ring. */
   1552 	if ((start + num) > WM_NTXDESC(txq)) {
   1553 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1554 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1555 		    (WM_NTXDESC(txq) - start), ops);
   1556 		num -= (WM_NTXDESC(txq) - start);
   1557 		start = 0;
   1558 	}
   1559 
   1560 	/* Now sync whatever is left. */
   1561 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1562 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1563 }
   1564 
   1565 static inline void
   1566 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1567 {
   1568 	struct wm_softc *sc = rxq->rxq_sc;
   1569 
   1570 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1571 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1572 }
   1573 
   1574 static inline void
   1575 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1576 {
   1577 	struct wm_softc *sc = rxq->rxq_sc;
   1578 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1579 	struct mbuf *m = rxs->rxs_mbuf;
   1580 
   1581 	/*
   1582 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1583 	 * so that the payload after the Ethernet header is aligned
   1584 	 * to a 4-byte boundary.
   1585 
   1586 	 * XXX BRAINDAMAGE ALERT!
   1587 	 * The stupid chip uses the same size for every buffer, which
   1588 	 * is set in the Receive Control register.  We are using the 2K
   1589 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1590 	 * reason, we can't "scoot" packets longer than the standard
   1591 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1592 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1593 	 * the upper layer copy the headers.
   1594 	 */
   1595 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1596 
   1597 	if (sc->sc_type == WM_T_82574) {
   1598 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1599 		rxd->erx_data.erxd_addr =
   1600 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1601 		rxd->erx_data.erxd_dd = 0;
   1602 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1603 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1604 
   1605 		rxd->nqrx_data.nrxd_paddr =
   1606 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1607 		/* Currently, split header is not supported. */
   1608 		rxd->nqrx_data.nrxd_haddr = 0;
   1609 	} else {
   1610 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1611 
   1612 		wm_set_dma_addr(&rxd->wrx_addr,
   1613 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1614 		rxd->wrx_len = 0;
   1615 		rxd->wrx_cksum = 0;
   1616 		rxd->wrx_status = 0;
   1617 		rxd->wrx_errors = 0;
   1618 		rxd->wrx_special = 0;
   1619 	}
   1620 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1621 
   1622 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1623 }
   1624 
   1625 /*
   1626  * Device driver interface functions and commonly used functions.
   1627  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1628  */
   1629 
   1630 /* Lookup supported device table */
   1631 static const struct wm_product *
   1632 wm_lookup(const struct pci_attach_args *pa)
   1633 {
   1634 	const struct wm_product *wmp;
   1635 
   1636 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1637 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1638 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1639 			return wmp;
   1640 	}
   1641 	return NULL;
   1642 }
   1643 
   1644 /* The match function (ca_match) */
   1645 static int
   1646 wm_match(device_t parent, cfdata_t cf, void *aux)
   1647 {
   1648 	struct pci_attach_args *pa = aux;
   1649 
   1650 	if (wm_lookup(pa) != NULL)
   1651 		return 1;
   1652 
   1653 	return 0;
   1654 }
   1655 
   1656 /* The attach function (ca_attach) */
   1657 static void
   1658 wm_attach(device_t parent, device_t self, void *aux)
   1659 {
   1660 	struct wm_softc *sc = device_private(self);
   1661 	struct pci_attach_args *pa = aux;
   1662 	prop_dictionary_t dict;
   1663 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1664 	pci_chipset_tag_t pc = pa->pa_pc;
   1665 	int counts[PCI_INTR_TYPE_SIZE];
   1666 	pci_intr_type_t max_type;
   1667 	const char *eetype, *xname;
   1668 	bus_space_tag_t memt;
   1669 	bus_space_handle_t memh;
   1670 	bus_size_t memsize;
   1671 	int memh_valid;
   1672 	int i, error;
   1673 	const struct wm_product *wmp;
   1674 	prop_data_t ea;
   1675 	prop_number_t pn;
   1676 	uint8_t enaddr[ETHER_ADDR_LEN];
   1677 	char buf[256];
   1678 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1679 	pcireg_t preg, memtype;
   1680 	uint16_t eeprom_data, apme_mask;
   1681 	bool force_clear_smbi;
   1682 	uint32_t link_mode;
   1683 	uint32_t reg;
   1684 
   1685 	sc->sc_dev = self;
   1686 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1687 	sc->sc_core_stopping = false;
   1688 
   1689 	wmp = wm_lookup(pa);
   1690 #ifdef DIAGNOSTIC
   1691 	if (wmp == NULL) {
   1692 		printf("\n");
   1693 		panic("wm_attach: impossible");
   1694 	}
   1695 #endif
   1696 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1697 
   1698 	sc->sc_pc = pa->pa_pc;
   1699 	sc->sc_pcitag = pa->pa_tag;
   1700 
   1701 	if (pci_dma64_available(pa))
   1702 		sc->sc_dmat = pa->pa_dmat64;
   1703 	else
   1704 		sc->sc_dmat = pa->pa_dmat;
   1705 
   1706 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1707 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1708 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1709 
   1710 	sc->sc_type = wmp->wmp_type;
   1711 
   1712 	/* Set default function pointers */
   1713 	sc->phy.acquire = wm_get_null;
   1714 	sc->phy.release = wm_put_null;
   1715 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1716 
   1717 	if (sc->sc_type < WM_T_82543) {
   1718 		if (sc->sc_rev < 2) {
   1719 			aprint_error_dev(sc->sc_dev,
   1720 			    "i82542 must be at least rev. 2\n");
   1721 			return;
   1722 		}
   1723 		if (sc->sc_rev < 3)
   1724 			sc->sc_type = WM_T_82542_2_0;
   1725 	}
   1726 
   1727 	/*
   1728 	 * Disable MSI for Errata:
   1729 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1730 	 *
   1731 	 *  82544: Errata 25
   1732 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1733 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1734 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1735 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1736 	 *
   1737 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1738 	 *
   1739 	 *  82571 & 82572: Errata 63
   1740 	 */
   1741 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1742 	    || (sc->sc_type == WM_T_82572))
   1743 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1744 
   1745 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1746 	    || (sc->sc_type == WM_T_82580)
   1747 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1748 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1749 		sc->sc_flags |= WM_F_NEWQUEUE;
   1750 
   1751 	/* Set device properties (mactype) */
   1752 	dict = device_properties(sc->sc_dev);
   1753 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1754 
   1755 	/*
   1756 	 * Map the device.  All devices support memory-mapped acccess,
   1757 	 * and it is really required for normal operation.
   1758 	 */
   1759 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1760 	switch (memtype) {
   1761 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1762 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1763 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1764 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1765 		break;
   1766 	default:
   1767 		memh_valid = 0;
   1768 		break;
   1769 	}
   1770 
   1771 	if (memh_valid) {
   1772 		sc->sc_st = memt;
   1773 		sc->sc_sh = memh;
   1774 		sc->sc_ss = memsize;
   1775 	} else {
   1776 		aprint_error_dev(sc->sc_dev,
   1777 		    "unable to map device registers\n");
   1778 		return;
   1779 	}
   1780 
   1781 	/*
   1782 	 * In addition, i82544 and later support I/O mapped indirect
   1783 	 * register access.  It is not desirable (nor supported in
   1784 	 * this driver) to use it for normal operation, though it is
   1785 	 * required to work around bugs in some chip versions.
   1786 	 */
   1787 	if (sc->sc_type >= WM_T_82544) {
   1788 		/* First we have to find the I/O BAR. */
   1789 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1790 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1791 			if (memtype == PCI_MAPREG_TYPE_IO)
   1792 				break;
   1793 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1794 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1795 				i += 4;	/* skip high bits, too */
   1796 		}
   1797 		if (i < PCI_MAPREG_END) {
   1798 			/*
   1799 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1800 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1801 			 * It's no problem because newer chips has no this
   1802 			 * bug.
   1803 			 *
   1804 			 * The i8254x doesn't apparently respond when the
   1805 			 * I/O BAR is 0, which looks somewhat like it's not
   1806 			 * been configured.
   1807 			 */
   1808 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1809 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1810 				aprint_error_dev(sc->sc_dev,
   1811 				    "WARNING: I/O BAR at zero.\n");
   1812 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1813 					0, &sc->sc_iot, &sc->sc_ioh,
   1814 					NULL, &sc->sc_ios) == 0) {
   1815 				sc->sc_flags |= WM_F_IOH_VALID;
   1816 			} else {
   1817 				aprint_error_dev(sc->sc_dev,
   1818 				    "WARNING: unable to map I/O space\n");
   1819 			}
   1820 		}
   1821 
   1822 	}
   1823 
   1824 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1825 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1826 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1827 	if (sc->sc_type < WM_T_82542_2_1)
   1828 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1829 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1830 
   1831 	/* power up chip */
   1832 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1833 	    NULL)) && error != EOPNOTSUPP) {
   1834 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1835 		return;
   1836 	}
   1837 
   1838 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1839 
   1840 	/* Allocation settings */
   1841 	max_type = PCI_INTR_TYPE_MSIX;
   1842 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1;
   1843 	counts[PCI_INTR_TYPE_MSI] = 1;
   1844 	counts[PCI_INTR_TYPE_INTX] = 1;
   1845 	/* overridden by disable flags */
   1846 	if (wm_disable_msi != 0) {
   1847 		counts[PCI_INTR_TYPE_MSI] = 0;
   1848 		if (wm_disable_msix != 0) {
   1849 			max_type = PCI_INTR_TYPE_INTX;
   1850 			counts[PCI_INTR_TYPE_MSIX] = 0;
   1851 		}
   1852 	} else if (wm_disable_msix != 0) {
   1853 		max_type = PCI_INTR_TYPE_MSI;
   1854 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1855 	}
   1856 
   1857 alloc_retry:
   1858 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1859 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1860 		return;
   1861 	}
   1862 
   1863 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1864 		error = wm_setup_msix(sc);
   1865 		if (error) {
   1866 			pci_intr_release(pc, sc->sc_intrs,
   1867 			    counts[PCI_INTR_TYPE_MSIX]);
   1868 
   1869 			/* Setup for MSI: Disable MSI-X */
   1870 			max_type = PCI_INTR_TYPE_MSI;
   1871 			counts[PCI_INTR_TYPE_MSI] = 1;
   1872 			counts[PCI_INTR_TYPE_INTX] = 1;
   1873 			goto alloc_retry;
   1874 		}
   1875 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1876 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1877 		error = wm_setup_legacy(sc);
   1878 		if (error) {
   1879 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1880 			    counts[PCI_INTR_TYPE_MSI]);
   1881 
   1882 			/* The next try is for INTx: Disable MSI */
   1883 			max_type = PCI_INTR_TYPE_INTX;
   1884 			counts[PCI_INTR_TYPE_INTX] = 1;
   1885 			goto alloc_retry;
   1886 		}
   1887 	} else {
   1888 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1889 		error = wm_setup_legacy(sc);
   1890 		if (error) {
   1891 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1892 			    counts[PCI_INTR_TYPE_INTX]);
   1893 			return;
   1894 		}
   1895 	}
   1896 
   1897 	/*
   1898 	 * Check the function ID (unit number of the chip).
   1899 	 */
   1900 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1901 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1902 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1903 	    || (sc->sc_type == WM_T_82580)
   1904 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1905 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1906 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1907 	else
   1908 		sc->sc_funcid = 0;
   1909 
   1910 	/*
   1911 	 * Determine a few things about the bus we're connected to.
   1912 	 */
   1913 	if (sc->sc_type < WM_T_82543) {
   1914 		/* We don't really know the bus characteristics here. */
   1915 		sc->sc_bus_speed = 33;
   1916 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1917 		/*
   1918 		 * CSA (Communication Streaming Architecture) is about as fast
   1919 		 * a 32-bit 66MHz PCI Bus.
   1920 		 */
   1921 		sc->sc_flags |= WM_F_CSA;
   1922 		sc->sc_bus_speed = 66;
   1923 		aprint_verbose_dev(sc->sc_dev,
   1924 		    "Communication Streaming Architecture\n");
   1925 		if (sc->sc_type == WM_T_82547) {
   1926 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1927 			callout_setfunc(&sc->sc_txfifo_ch,
   1928 					wm_82547_txfifo_stall, sc);
   1929 			aprint_verbose_dev(sc->sc_dev,
   1930 			    "using 82547 Tx FIFO stall work-around\n");
   1931 		}
   1932 	} else if (sc->sc_type >= WM_T_82571) {
   1933 		sc->sc_flags |= WM_F_PCIE;
   1934 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1935 		    && (sc->sc_type != WM_T_ICH10)
   1936 		    && (sc->sc_type != WM_T_PCH)
   1937 		    && (sc->sc_type != WM_T_PCH2)
   1938 		    && (sc->sc_type != WM_T_PCH_LPT)
   1939 		    && (sc->sc_type != WM_T_PCH_SPT)) {
   1940 			/* ICH* and PCH* have no PCIe capability registers */
   1941 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1942 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1943 				NULL) == 0)
   1944 				aprint_error_dev(sc->sc_dev,
   1945 				    "unable to find PCIe capability\n");
   1946 		}
   1947 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1948 	} else {
   1949 		reg = CSR_READ(sc, WMREG_STATUS);
   1950 		if (reg & STATUS_BUS64)
   1951 			sc->sc_flags |= WM_F_BUS64;
   1952 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1953 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1954 
   1955 			sc->sc_flags |= WM_F_PCIX;
   1956 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1957 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1958 				aprint_error_dev(sc->sc_dev,
   1959 				    "unable to find PCIX capability\n");
   1960 			else if (sc->sc_type != WM_T_82545_3 &&
   1961 				 sc->sc_type != WM_T_82546_3) {
   1962 				/*
   1963 				 * Work around a problem caused by the BIOS
   1964 				 * setting the max memory read byte count
   1965 				 * incorrectly.
   1966 				 */
   1967 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1968 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1969 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1970 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1971 
   1972 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1973 				    PCIX_CMD_BYTECNT_SHIFT;
   1974 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1975 				    PCIX_STATUS_MAXB_SHIFT;
   1976 				if (bytecnt > maxb) {
   1977 					aprint_verbose_dev(sc->sc_dev,
   1978 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1979 					    512 << bytecnt, 512 << maxb);
   1980 					pcix_cmd = (pcix_cmd &
   1981 					    ~PCIX_CMD_BYTECNT_MASK) |
   1982 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1983 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1984 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1985 					    pcix_cmd);
   1986 				}
   1987 			}
   1988 		}
   1989 		/*
   1990 		 * The quad port adapter is special; it has a PCIX-PCIX
   1991 		 * bridge on the board, and can run the secondary bus at
   1992 		 * a higher speed.
   1993 		 */
   1994 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   1995 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   1996 								      : 66;
   1997 		} else if (sc->sc_flags & WM_F_PCIX) {
   1998 			switch (reg & STATUS_PCIXSPD_MASK) {
   1999 			case STATUS_PCIXSPD_50_66:
   2000 				sc->sc_bus_speed = 66;
   2001 				break;
   2002 			case STATUS_PCIXSPD_66_100:
   2003 				sc->sc_bus_speed = 100;
   2004 				break;
   2005 			case STATUS_PCIXSPD_100_133:
   2006 				sc->sc_bus_speed = 133;
   2007 				break;
   2008 			default:
   2009 				aprint_error_dev(sc->sc_dev,
   2010 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2011 				    reg & STATUS_PCIXSPD_MASK);
   2012 				sc->sc_bus_speed = 66;
   2013 				break;
   2014 			}
   2015 		} else
   2016 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2017 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2018 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2019 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2020 	}
   2021 
   2022 	/* clear interesting stat counters */
   2023 	CSR_READ(sc, WMREG_COLC);
   2024 	CSR_READ(sc, WMREG_RXERRC);
   2025 
   2026 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2027 	    || (sc->sc_type >= WM_T_ICH8))
   2028 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2029 	if (sc->sc_type >= WM_T_ICH8)
   2030 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2031 
   2032 	/* Set PHY, NVM mutex related stuff */
   2033 	switch (sc->sc_type) {
   2034 	case WM_T_82542_2_0:
   2035 	case WM_T_82542_2_1:
   2036 	case WM_T_82543:
   2037 	case WM_T_82544:
   2038 		/* Microwire */
   2039 		sc->sc_nvm_wordsize = 64;
   2040 		sc->sc_nvm_addrbits = 6;
   2041 		break;
   2042 	case WM_T_82540:
   2043 	case WM_T_82545:
   2044 	case WM_T_82545_3:
   2045 	case WM_T_82546:
   2046 	case WM_T_82546_3:
   2047 		/* Microwire */
   2048 		reg = CSR_READ(sc, WMREG_EECD);
   2049 		if (reg & EECD_EE_SIZE) {
   2050 			sc->sc_nvm_wordsize = 256;
   2051 			sc->sc_nvm_addrbits = 8;
   2052 		} else {
   2053 			sc->sc_nvm_wordsize = 64;
   2054 			sc->sc_nvm_addrbits = 6;
   2055 		}
   2056 		sc->sc_flags |= WM_F_LOCK_EECD;
   2057 		break;
   2058 	case WM_T_82541:
   2059 	case WM_T_82541_2:
   2060 	case WM_T_82547:
   2061 	case WM_T_82547_2:
   2062 		sc->sc_flags |= WM_F_LOCK_EECD;
   2063 		reg = CSR_READ(sc, WMREG_EECD);
   2064 		if (reg & EECD_EE_TYPE) {
   2065 			/* SPI */
   2066 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2067 			wm_nvm_set_addrbits_size_eecd(sc);
   2068 		} else {
   2069 			/* Microwire */
   2070 			if ((reg & EECD_EE_ABITS) != 0) {
   2071 				sc->sc_nvm_wordsize = 256;
   2072 				sc->sc_nvm_addrbits = 8;
   2073 			} else {
   2074 				sc->sc_nvm_wordsize = 64;
   2075 				sc->sc_nvm_addrbits = 6;
   2076 			}
   2077 		}
   2078 		break;
   2079 	case WM_T_82571:
   2080 	case WM_T_82572:
   2081 		/* SPI */
   2082 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2083 		wm_nvm_set_addrbits_size_eecd(sc);
   2084 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
   2085 		sc->phy.acquire = wm_get_swsm_semaphore;
   2086 		sc->phy.release = wm_put_swsm_semaphore;
   2087 		break;
   2088 	case WM_T_82573:
   2089 	case WM_T_82574:
   2090 	case WM_T_82583:
   2091 		if (sc->sc_type == WM_T_82573) {
   2092 			sc->sc_flags |= WM_F_LOCK_SWSM;
   2093 			sc->phy.acquire = wm_get_swsm_semaphore;
   2094 			sc->phy.release = wm_put_swsm_semaphore;
   2095 		} else {
   2096 			sc->sc_flags |= WM_F_LOCK_EXTCNF;
   2097 			/* Both PHY and NVM use the same semaphore. */
   2098 			sc->phy.acquire
   2099 			    = wm_get_swfwhw_semaphore;
   2100 			sc->phy.release
   2101 			    = wm_put_swfwhw_semaphore;
   2102 		}
   2103 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2104 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2105 			sc->sc_nvm_wordsize = 2048;
   2106 		} else {
   2107 			/* SPI */
   2108 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2109 			wm_nvm_set_addrbits_size_eecd(sc);
   2110 		}
   2111 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2112 		break;
   2113 	case WM_T_82575:
   2114 	case WM_T_82576:
   2115 	case WM_T_82580:
   2116 	case WM_T_I350:
   2117 	case WM_T_I354:
   2118 	case WM_T_80003:
   2119 		/* SPI */
   2120 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2121 		wm_nvm_set_addrbits_size_eecd(sc);
   2122 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
   2123 		    | WM_F_LOCK_SWSM;
   2124 		sc->phy.acquire = wm_get_phy_82575;
   2125 		sc->phy.release = wm_put_phy_82575;
   2126 		break;
   2127 	case WM_T_ICH8:
   2128 	case WM_T_ICH9:
   2129 	case WM_T_ICH10:
   2130 	case WM_T_PCH:
   2131 	case WM_T_PCH2:
   2132 	case WM_T_PCH_LPT:
   2133 		/* FLASH */
   2134 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2135 		sc->sc_nvm_wordsize = 2048;
   2136 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2137 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2138 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2139 			aprint_error_dev(sc->sc_dev,
   2140 			    "can't map FLASH registers\n");
   2141 			goto out;
   2142 		}
   2143 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2144 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2145 		    ICH_FLASH_SECTOR_SIZE;
   2146 		sc->sc_ich8_flash_bank_size =
   2147 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2148 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2149 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2150 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2151 		sc->sc_flashreg_offset = 0;
   2152 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2153 		sc->phy.release = wm_put_swflag_ich8lan;
   2154 		break;
   2155 	case WM_T_PCH_SPT:
   2156 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2157 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2158 		sc->sc_flasht = sc->sc_st;
   2159 		sc->sc_flashh = sc->sc_sh;
   2160 		sc->sc_ich8_flash_base = 0;
   2161 		sc->sc_nvm_wordsize =
   2162 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2163 			* NVM_SIZE_MULTIPLIER;
   2164 		/* It is size in bytes, we want words */
   2165 		sc->sc_nvm_wordsize /= 2;
   2166 		/* assume 2 banks */
   2167 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2168 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2169 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2170 		sc->phy.release = wm_put_swflag_ich8lan;
   2171 		break;
   2172 	case WM_T_I210:
   2173 	case WM_T_I211:
   2174 		if (wm_nvm_get_flash_presence_i210(sc)) {
   2175 			wm_nvm_set_addrbits_size_eecd(sc);
   2176 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2177 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2178 		} else {
   2179 			sc->sc_nvm_wordsize = INVM_SIZE;
   2180 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2181 		}
   2182 		sc->sc_flags |= WM_F_LOCK_SWFW | WM_F_LOCK_SWSM;
   2183 		sc->phy.acquire = wm_get_phy_82575;
   2184 		sc->phy.release = wm_put_phy_82575;
   2185 		break;
   2186 	default:
   2187 		break;
   2188 	}
   2189 
   2190 	/* Reset the chip to a known state. */
   2191 	wm_reset(sc);
   2192 
   2193 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2194 	switch (sc->sc_type) {
   2195 	case WM_T_82571:
   2196 	case WM_T_82572:
   2197 		reg = CSR_READ(sc, WMREG_SWSM2);
   2198 		if ((reg & SWSM2_LOCK) == 0) {
   2199 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2200 			force_clear_smbi = true;
   2201 		} else
   2202 			force_clear_smbi = false;
   2203 		break;
   2204 	case WM_T_82573:
   2205 	case WM_T_82574:
   2206 	case WM_T_82583:
   2207 		force_clear_smbi = true;
   2208 		break;
   2209 	default:
   2210 		force_clear_smbi = false;
   2211 		break;
   2212 	}
   2213 	if (force_clear_smbi) {
   2214 		reg = CSR_READ(sc, WMREG_SWSM);
   2215 		if ((reg & SWSM_SMBI) != 0)
   2216 			aprint_error_dev(sc->sc_dev,
   2217 			    "Please update the Bootagent\n");
   2218 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2219 	}
   2220 
   2221 	/*
   2222 	 * Defer printing the EEPROM type until after verifying the checksum
   2223 	 * This allows the EEPROM type to be printed correctly in the case
   2224 	 * that no EEPROM is attached.
   2225 	 */
   2226 	/*
   2227 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2228 	 * this for later, so we can fail future reads from the EEPROM.
   2229 	 */
   2230 	if (wm_nvm_validate_checksum(sc)) {
   2231 		/*
   2232 		 * Read twice again because some PCI-e parts fail the
   2233 		 * first check due to the link being in sleep state.
   2234 		 */
   2235 		if (wm_nvm_validate_checksum(sc))
   2236 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2237 	}
   2238 
   2239 	/* Set device properties (macflags) */
   2240 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2241 
   2242 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2243 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2244 	else {
   2245 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2246 		    sc->sc_nvm_wordsize);
   2247 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2248 			aprint_verbose("iNVM");
   2249 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2250 			aprint_verbose("FLASH(HW)");
   2251 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2252 			aprint_verbose("FLASH");
   2253 		else {
   2254 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2255 				eetype = "SPI";
   2256 			else
   2257 				eetype = "MicroWire";
   2258 			aprint_verbose("(%d address bits) %s EEPROM",
   2259 			    sc->sc_nvm_addrbits, eetype);
   2260 		}
   2261 	}
   2262 	wm_nvm_version(sc);
   2263 	aprint_verbose("\n");
   2264 
   2265 	/* Check for I21[01] PLL workaround */
   2266 	if (sc->sc_type == WM_T_I210)
   2267 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2268 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2269 		/* NVM image release 3.25 has a workaround */
   2270 		if ((sc->sc_nvm_ver_major < 3)
   2271 		    || ((sc->sc_nvm_ver_major == 3)
   2272 			&& (sc->sc_nvm_ver_minor < 25))) {
   2273 			aprint_verbose_dev(sc->sc_dev,
   2274 			    "ROM image version %d.%d is older than 3.25\n",
   2275 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2276 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2277 		}
   2278 	}
   2279 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2280 		wm_pll_workaround_i210(sc);
   2281 
   2282 	wm_get_wakeup(sc);
   2283 
   2284 	/* Non-AMT based hardware can now take control from firmware */
   2285 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2286 		wm_get_hw_control(sc);
   2287 
   2288 	/*
   2289 	 * Read the Ethernet address from the EEPROM, if not first found
   2290 	 * in device properties.
   2291 	 */
   2292 	ea = prop_dictionary_get(dict, "mac-address");
   2293 	if (ea != NULL) {
   2294 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2295 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2296 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2297 	} else {
   2298 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2299 			aprint_error_dev(sc->sc_dev,
   2300 			    "unable to read Ethernet address\n");
   2301 			goto out;
   2302 		}
   2303 	}
   2304 
   2305 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2306 	    ether_sprintf(enaddr));
   2307 
   2308 	/*
   2309 	 * Read the config info from the EEPROM, and set up various
   2310 	 * bits in the control registers based on their contents.
   2311 	 */
   2312 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2313 	if (pn != NULL) {
   2314 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2315 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2316 	} else {
   2317 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2318 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2319 			goto out;
   2320 		}
   2321 	}
   2322 
   2323 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2324 	if (pn != NULL) {
   2325 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2326 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2327 	} else {
   2328 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2329 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2330 			goto out;
   2331 		}
   2332 	}
   2333 
   2334 	/* check for WM_F_WOL */
   2335 	switch (sc->sc_type) {
   2336 	case WM_T_82542_2_0:
   2337 	case WM_T_82542_2_1:
   2338 	case WM_T_82543:
   2339 		/* dummy? */
   2340 		eeprom_data = 0;
   2341 		apme_mask = NVM_CFG3_APME;
   2342 		break;
   2343 	case WM_T_82544:
   2344 		apme_mask = NVM_CFG2_82544_APM_EN;
   2345 		eeprom_data = cfg2;
   2346 		break;
   2347 	case WM_T_82546:
   2348 	case WM_T_82546_3:
   2349 	case WM_T_82571:
   2350 	case WM_T_82572:
   2351 	case WM_T_82573:
   2352 	case WM_T_82574:
   2353 	case WM_T_82583:
   2354 	case WM_T_80003:
   2355 	default:
   2356 		apme_mask = NVM_CFG3_APME;
   2357 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2358 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2359 		break;
   2360 	case WM_T_82575:
   2361 	case WM_T_82576:
   2362 	case WM_T_82580:
   2363 	case WM_T_I350:
   2364 	case WM_T_I354: /* XXX ok? */
   2365 	case WM_T_ICH8:
   2366 	case WM_T_ICH9:
   2367 	case WM_T_ICH10:
   2368 	case WM_T_PCH:
   2369 	case WM_T_PCH2:
   2370 	case WM_T_PCH_LPT:
   2371 	case WM_T_PCH_SPT:
   2372 		/* XXX The funcid should be checked on some devices */
   2373 		apme_mask = WUC_APME;
   2374 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2375 		break;
   2376 	}
   2377 
   2378 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2379 	if ((eeprom_data & apme_mask) != 0)
   2380 		sc->sc_flags |= WM_F_WOL;
   2381 
   2382 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2383 		/* Check NVM for autonegotiation */
   2384 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2385 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2386 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2387 		}
   2388 	}
   2389 
   2390 	/*
   2391 	 * XXX need special handling for some multiple port cards
   2392 	 * to disable a paticular port.
   2393 	 */
   2394 
   2395 	if (sc->sc_type >= WM_T_82544) {
   2396 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2397 		if (pn != NULL) {
   2398 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2399 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2400 		} else {
   2401 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2402 				aprint_error_dev(sc->sc_dev,
   2403 				    "unable to read SWDPIN\n");
   2404 				goto out;
   2405 			}
   2406 		}
   2407 	}
   2408 
   2409 	if (cfg1 & NVM_CFG1_ILOS)
   2410 		sc->sc_ctrl |= CTRL_ILOS;
   2411 
   2412 	/*
   2413 	 * XXX
   2414 	 * This code isn't correct because pin 2 and 3 are located
   2415 	 * in different position on newer chips. Check all datasheet.
   2416 	 *
   2417 	 * Until resolve this problem, check if a chip < 82580
   2418 	 */
   2419 	if (sc->sc_type <= WM_T_82580) {
   2420 		if (sc->sc_type >= WM_T_82544) {
   2421 			sc->sc_ctrl |=
   2422 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2423 			    CTRL_SWDPIO_SHIFT;
   2424 			sc->sc_ctrl |=
   2425 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2426 			    CTRL_SWDPINS_SHIFT;
   2427 		} else {
   2428 			sc->sc_ctrl |=
   2429 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2430 			    CTRL_SWDPIO_SHIFT;
   2431 		}
   2432 	}
   2433 
   2434 	/* XXX For other than 82580? */
   2435 	if (sc->sc_type == WM_T_82580) {
   2436 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2437 		if (nvmword & __BIT(13))
   2438 			sc->sc_ctrl |= CTRL_ILOS;
   2439 	}
   2440 
   2441 #if 0
   2442 	if (sc->sc_type >= WM_T_82544) {
   2443 		if (cfg1 & NVM_CFG1_IPS0)
   2444 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2445 		if (cfg1 & NVM_CFG1_IPS1)
   2446 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2447 		sc->sc_ctrl_ext |=
   2448 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2449 		    CTRL_EXT_SWDPIO_SHIFT;
   2450 		sc->sc_ctrl_ext |=
   2451 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2452 		    CTRL_EXT_SWDPINS_SHIFT;
   2453 	} else {
   2454 		sc->sc_ctrl_ext |=
   2455 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2456 		    CTRL_EXT_SWDPIO_SHIFT;
   2457 	}
   2458 #endif
   2459 
   2460 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2461 #if 0
   2462 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2463 #endif
   2464 
   2465 	if (sc->sc_type == WM_T_PCH) {
   2466 		uint16_t val;
   2467 
   2468 		/* Save the NVM K1 bit setting */
   2469 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2470 
   2471 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2472 			sc->sc_nvm_k1_enabled = 1;
   2473 		else
   2474 			sc->sc_nvm_k1_enabled = 0;
   2475 	}
   2476 
   2477 	/*
   2478 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
   2479 	 * media structures accordingly.
   2480 	 */
   2481 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2482 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2483 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2484 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
   2485 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2486 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
   2487 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2488 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2489 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2490 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2491 	    || (sc->sc_type ==WM_T_I211)) {
   2492 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2493 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2494 		switch (link_mode) {
   2495 		case CTRL_EXT_LINK_MODE_1000KX:
   2496 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2497 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2498 			break;
   2499 		case CTRL_EXT_LINK_MODE_SGMII:
   2500 			if (wm_sgmii_uses_mdio(sc)) {
   2501 				aprint_verbose_dev(sc->sc_dev,
   2502 				    "SGMII(MDIO)\n");
   2503 				sc->sc_flags |= WM_F_SGMII;
   2504 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2505 				break;
   2506 			}
   2507 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2508 			/*FALLTHROUGH*/
   2509 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2510 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2511 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2512 				if (link_mode
   2513 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2514 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2515 					sc->sc_flags |= WM_F_SGMII;
   2516 				} else {
   2517 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2518 					aprint_verbose_dev(sc->sc_dev,
   2519 					    "SERDES\n");
   2520 				}
   2521 				break;
   2522 			}
   2523 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2524 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2525 
   2526 			/* Change current link mode setting */
   2527 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2528 			switch (sc->sc_mediatype) {
   2529 			case WM_MEDIATYPE_COPPER:
   2530 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2531 				break;
   2532 			case WM_MEDIATYPE_SERDES:
   2533 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2534 				break;
   2535 			default:
   2536 				break;
   2537 			}
   2538 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2539 			break;
   2540 		case CTRL_EXT_LINK_MODE_GMII:
   2541 		default:
   2542 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2543 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2544 			break;
   2545 		}
   2546 
   2547 		reg &= ~CTRL_EXT_I2C_ENA;
   2548 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2549 			reg |= CTRL_EXT_I2C_ENA;
   2550 		else
   2551 			reg &= ~CTRL_EXT_I2C_ENA;
   2552 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2553 
   2554 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2555 			wm_gmii_mediainit(sc, wmp->wmp_product);
   2556 		else
   2557 			wm_tbi_mediainit(sc);
   2558 	} else if (sc->sc_type < WM_T_82543 ||
   2559 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2560 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2561 			aprint_error_dev(sc->sc_dev,
   2562 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2563 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2564 		}
   2565 		wm_tbi_mediainit(sc);
   2566 	} else {
   2567 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2568 			aprint_error_dev(sc->sc_dev,
   2569 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2570 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2571 		}
   2572 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2573 	}
   2574 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2575 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2576 
   2577 	ifp = &sc->sc_ethercom.ec_if;
   2578 	xname = device_xname(sc->sc_dev);
   2579 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2580 	ifp->if_softc = sc;
   2581 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2582 #ifdef WM_MPSAFE
   2583 	ifp->if_extflags = IFEF_START_MPSAFE;
   2584 #endif
   2585 	ifp->if_ioctl = wm_ioctl;
   2586 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2587 		ifp->if_start = wm_nq_start;
   2588 		/*
   2589 		 * When the number of CPUs is one and the controller can use
   2590 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2591 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2592 		 * and the other is used for link status changing.
   2593 		 * In this situation, wm_nq_transmit() is disadvantageous
   2594 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2595 		 */
   2596 		if (wm_is_using_multiqueue(sc))
   2597 			ifp->if_transmit = wm_nq_transmit;
   2598 	} else {
   2599 		ifp->if_start = wm_start;
   2600 		/*
   2601 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2602 		 */
   2603 		if (wm_is_using_multiqueue(sc))
   2604 			ifp->if_transmit = wm_transmit;
   2605 	}
   2606 	ifp->if_watchdog = wm_watchdog;
   2607 	ifp->if_init = wm_init;
   2608 	ifp->if_stop = wm_stop;
   2609 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2610 	IFQ_SET_READY(&ifp->if_snd);
   2611 
   2612 	/* Check for jumbo frame */
   2613 	switch (sc->sc_type) {
   2614 	case WM_T_82573:
   2615 		/* XXX limited to 9234 if ASPM is disabled */
   2616 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2617 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2618 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2619 		break;
   2620 	case WM_T_82571:
   2621 	case WM_T_82572:
   2622 	case WM_T_82574:
   2623 	case WM_T_82575:
   2624 	case WM_T_82576:
   2625 	case WM_T_82580:
   2626 	case WM_T_I350:
   2627 	case WM_T_I354: /* XXXX ok? */
   2628 	case WM_T_I210:
   2629 	case WM_T_I211:
   2630 	case WM_T_80003:
   2631 	case WM_T_ICH9:
   2632 	case WM_T_ICH10:
   2633 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2634 	case WM_T_PCH_LPT:
   2635 	case WM_T_PCH_SPT:
   2636 		/* XXX limited to 9234 */
   2637 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2638 		break;
   2639 	case WM_T_PCH:
   2640 		/* XXX limited to 4096 */
   2641 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2642 		break;
   2643 	case WM_T_82542_2_0:
   2644 	case WM_T_82542_2_1:
   2645 	case WM_T_82583:
   2646 	case WM_T_ICH8:
   2647 		/* No support for jumbo frame */
   2648 		break;
   2649 	default:
   2650 		/* ETHER_MAX_LEN_JUMBO */
   2651 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2652 		break;
   2653 	}
   2654 
   2655 	/* If we're a i82543 or greater, we can support VLANs. */
   2656 	if (sc->sc_type >= WM_T_82543)
   2657 		sc->sc_ethercom.ec_capabilities |=
   2658 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2659 
   2660 	/*
   2661 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2662 	 * on i82543 and later.
   2663 	 */
   2664 	if (sc->sc_type >= WM_T_82543) {
   2665 		ifp->if_capabilities |=
   2666 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2667 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2668 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2669 		    IFCAP_CSUM_TCPv6_Tx |
   2670 		    IFCAP_CSUM_UDPv6_Tx;
   2671 	}
   2672 
   2673 	/*
   2674 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2675 	 *
   2676 	 *	82541GI (8086:1076) ... no
   2677 	 *	82572EI (8086:10b9) ... yes
   2678 	 */
   2679 	if (sc->sc_type >= WM_T_82571) {
   2680 		ifp->if_capabilities |=
   2681 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2682 	}
   2683 
   2684 	/*
   2685 	 * If we're a i82544 or greater (except i82547), we can do
   2686 	 * TCP segmentation offload.
   2687 	 */
   2688 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2689 		ifp->if_capabilities |= IFCAP_TSOv4;
   2690 	}
   2691 
   2692 	if (sc->sc_type >= WM_T_82571) {
   2693 		ifp->if_capabilities |= IFCAP_TSOv6;
   2694 	}
   2695 
   2696 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2697 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2698 
   2699 #ifdef WM_MPSAFE
   2700 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2701 #else
   2702 	sc->sc_core_lock = NULL;
   2703 #endif
   2704 
   2705 	/* Attach the interface. */
   2706 	if_initialize(ifp);
   2707 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2708 	ether_ifattach(ifp, enaddr);
   2709 	if_register(ifp);
   2710 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2711 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2712 			  RND_FLAG_DEFAULT);
   2713 
   2714 #ifdef WM_EVENT_COUNTERS
   2715 	/* Attach event counters. */
   2716 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2717 	    NULL, xname, "linkintr");
   2718 
   2719 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2720 	    NULL, xname, "tx_xoff");
   2721 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2722 	    NULL, xname, "tx_xon");
   2723 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2724 	    NULL, xname, "rx_xoff");
   2725 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2726 	    NULL, xname, "rx_xon");
   2727 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2728 	    NULL, xname, "rx_macctl");
   2729 #endif /* WM_EVENT_COUNTERS */
   2730 
   2731 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2732 		pmf_class_network_register(self, ifp);
   2733 	else
   2734 		aprint_error_dev(self, "couldn't establish power handler\n");
   2735 
   2736 	sc->sc_flags |= WM_F_ATTACHED;
   2737  out:
   2738 	return;
   2739 }
   2740 
   2741 /* The detach function (ca_detach) */
   2742 static int
   2743 wm_detach(device_t self, int flags __unused)
   2744 {
   2745 	struct wm_softc *sc = device_private(self);
   2746 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2747 	int i;
   2748 
   2749 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2750 		return 0;
   2751 
   2752 	/* Stop the interface. Callouts are stopped in it. */
   2753 	wm_stop(ifp, 1);
   2754 
   2755 	pmf_device_deregister(self);
   2756 
   2757 #ifdef WM_EVENT_COUNTERS
   2758 	evcnt_detach(&sc->sc_ev_linkintr);
   2759 
   2760 	evcnt_detach(&sc->sc_ev_tx_xoff);
   2761 	evcnt_detach(&sc->sc_ev_tx_xon);
   2762 	evcnt_detach(&sc->sc_ev_rx_xoff);
   2763 	evcnt_detach(&sc->sc_ev_rx_xon);
   2764 	evcnt_detach(&sc->sc_ev_rx_macctl);
   2765 #endif /* WM_EVENT_COUNTERS */
   2766 
   2767 	/* Tell the firmware about the release */
   2768 	WM_CORE_LOCK(sc);
   2769 	wm_release_manageability(sc);
   2770 	wm_release_hw_control(sc);
   2771 	wm_enable_wakeup(sc);
   2772 	WM_CORE_UNLOCK(sc);
   2773 
   2774 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2775 
   2776 	/* Delete all remaining media. */
   2777 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2778 
   2779 	ether_ifdetach(ifp);
   2780 	if_detach(ifp);
   2781 	if_percpuq_destroy(sc->sc_ipq);
   2782 
   2783 	/* Unload RX dmamaps and free mbufs */
   2784 	for (i = 0; i < sc->sc_nqueues; i++) {
   2785 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2786 		mutex_enter(rxq->rxq_lock);
   2787 		wm_rxdrain(rxq);
   2788 		mutex_exit(rxq->rxq_lock);
   2789 	}
   2790 	/* Must unlock here */
   2791 
   2792 	/* Disestablish the interrupt handler */
   2793 	for (i = 0; i < sc->sc_nintrs; i++) {
   2794 		if (sc->sc_ihs[i] != NULL) {
   2795 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2796 			sc->sc_ihs[i] = NULL;
   2797 		}
   2798 	}
   2799 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2800 
   2801 	wm_free_txrx_queues(sc);
   2802 
   2803 	/* Unmap the registers */
   2804 	if (sc->sc_ss) {
   2805 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2806 		sc->sc_ss = 0;
   2807 	}
   2808 	if (sc->sc_ios) {
   2809 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2810 		sc->sc_ios = 0;
   2811 	}
   2812 	if (sc->sc_flashs) {
   2813 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2814 		sc->sc_flashs = 0;
   2815 	}
   2816 
   2817 	if (sc->sc_core_lock)
   2818 		mutex_obj_free(sc->sc_core_lock);
   2819 	if (sc->sc_ich_phymtx)
   2820 		mutex_obj_free(sc->sc_ich_phymtx);
   2821 	if (sc->sc_ich_nvmmtx)
   2822 		mutex_obj_free(sc->sc_ich_nvmmtx);
   2823 
   2824 	return 0;
   2825 }
   2826 
   2827 static bool
   2828 wm_suspend(device_t self, const pmf_qual_t *qual)
   2829 {
   2830 	struct wm_softc *sc = device_private(self);
   2831 
   2832 	wm_release_manageability(sc);
   2833 	wm_release_hw_control(sc);
   2834 	wm_enable_wakeup(sc);
   2835 
   2836 	return true;
   2837 }
   2838 
   2839 static bool
   2840 wm_resume(device_t self, const pmf_qual_t *qual)
   2841 {
   2842 	struct wm_softc *sc = device_private(self);
   2843 
   2844 	wm_init_manageability(sc);
   2845 
   2846 	return true;
   2847 }
   2848 
   2849 /*
   2850  * wm_watchdog:		[ifnet interface function]
   2851  *
   2852  *	Watchdog timer handler.
   2853  */
   2854 static void
   2855 wm_watchdog(struct ifnet *ifp)
   2856 {
   2857 	int qid;
   2858 	struct wm_softc *sc = ifp->if_softc;
   2859 
   2860 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2861 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2862 
   2863 		wm_watchdog_txq(ifp, txq);
   2864 	}
   2865 
   2866 	/* Reset the interface. */
   2867 	(void) wm_init(ifp);
   2868 
   2869 	/*
   2870 	 * There are still some upper layer processing which call
   2871 	 * ifp->if_start(). e.g. ALTQ or one CPU system
   2872 	 */
   2873 	/* Try to get more packets going. */
   2874 	ifp->if_start(ifp);
   2875 }
   2876 
   2877 static void
   2878 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
   2879 {
   2880 	struct wm_softc *sc = ifp->if_softc;
   2881 
   2882 	/*
   2883 	 * Since we're using delayed interrupts, sweep up
   2884 	 * before we report an error.
   2885 	 */
   2886 	mutex_enter(txq->txq_lock);
   2887 	wm_txeof(sc, txq);
   2888 	mutex_exit(txq->txq_lock);
   2889 
   2890 	if (txq->txq_free != WM_NTXDESC(txq)) {
   2891 #ifdef WM_DEBUG
   2892 		int i, j;
   2893 		struct wm_txsoft *txs;
   2894 #endif
   2895 		log(LOG_ERR,
   2896 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2897 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   2898 		    txq->txq_next);
   2899 		ifp->if_oerrors++;
   2900 #ifdef WM_DEBUG
   2901 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   2902 		    i = WM_NEXTTXS(txq, i)) {
   2903 		    txs = &txq->txq_soft[i];
   2904 		    printf("txs %d tx %d -> %d\n",
   2905 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2906 		    for (j = txs->txs_firstdesc; ;
   2907 			j = WM_NEXTTX(txq, j)) {
   2908 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2909 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   2910 			printf("\t %#08x%08x\n",
   2911 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   2912 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   2913 			if (j == txs->txs_lastdesc)
   2914 				break;
   2915 			}
   2916 		}
   2917 #endif
   2918 	}
   2919 }
   2920 
   2921 /*
   2922  * wm_tick:
   2923  *
   2924  *	One second timer, used to check link status, sweep up
   2925  *	completed transmit jobs, etc.
   2926  */
   2927 static void
   2928 wm_tick(void *arg)
   2929 {
   2930 	struct wm_softc *sc = arg;
   2931 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2932 #ifndef WM_MPSAFE
   2933 	int s = splnet();
   2934 #endif
   2935 
   2936 	WM_CORE_LOCK(sc);
   2937 
   2938 	if (sc->sc_core_stopping)
   2939 		goto out;
   2940 
   2941 	if (sc->sc_type >= WM_T_82542_2_1) {
   2942 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2943 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2944 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2945 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2946 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2947 	}
   2948 
   2949 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2950 	ifp->if_ierrors += 0ULL /* ensure quad_t */
   2951 	    + CSR_READ(sc, WMREG_CRCERRS)
   2952 	    + CSR_READ(sc, WMREG_ALGNERRC)
   2953 	    + CSR_READ(sc, WMREG_SYMERRC)
   2954 	    + CSR_READ(sc, WMREG_RXERRC)
   2955 	    + CSR_READ(sc, WMREG_SEC)
   2956 	    + CSR_READ(sc, WMREG_CEXTERR)
   2957 	    + CSR_READ(sc, WMREG_RLEC);
   2958 	/*
   2959 	 * WMREG_RNBC is incremented when there is no available buffers in host
   2960 	 * memory. It does not mean the number of dropped packet. Because
   2961 	 * ethernet controller can receive packets in such case if there is
   2962 	 * space in phy's FIFO.
   2963 	 *
   2964 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   2965 	 * own EVCNT instead of if_iqdrops.
   2966 	 */
   2967 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   2968 
   2969 	if (sc->sc_flags & WM_F_HAS_MII)
   2970 		mii_tick(&sc->sc_mii);
   2971 	else if ((sc->sc_type >= WM_T_82575)
   2972 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   2973 		wm_serdes_tick(sc);
   2974 	else
   2975 		wm_tbi_tick(sc);
   2976 
   2977 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2978 out:
   2979 	WM_CORE_UNLOCK(sc);
   2980 #ifndef WM_MPSAFE
   2981 	splx(s);
   2982 #endif
   2983 }
   2984 
   2985 static int
   2986 wm_ifflags_cb(struct ethercom *ec)
   2987 {
   2988 	struct ifnet *ifp = &ec->ec_if;
   2989 	struct wm_softc *sc = ifp->if_softc;
   2990 	int rc = 0;
   2991 
   2992 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   2993 		device_xname(sc->sc_dev), __func__));
   2994 
   2995 	WM_CORE_LOCK(sc);
   2996 
   2997 	int change = ifp->if_flags ^ sc->sc_if_flags;
   2998 	sc->sc_if_flags = ifp->if_flags;
   2999 
   3000 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3001 		rc = ENETRESET;
   3002 		goto out;
   3003 	}
   3004 
   3005 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   3006 		wm_set_filter(sc);
   3007 
   3008 	wm_set_vlan(sc);
   3009 
   3010 out:
   3011 	WM_CORE_UNLOCK(sc);
   3012 
   3013 	return rc;
   3014 }
   3015 
   3016 /*
   3017  * wm_ioctl:		[ifnet interface function]
   3018  *
   3019  *	Handle control requests from the operator.
   3020  */
   3021 static int
   3022 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3023 {
   3024 	struct wm_softc *sc = ifp->if_softc;
   3025 	struct ifreq *ifr = (struct ifreq *) data;
   3026 	struct ifaddr *ifa = (struct ifaddr *)data;
   3027 	struct sockaddr_dl *sdl;
   3028 	int s, error;
   3029 
   3030 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3031 		device_xname(sc->sc_dev), __func__));
   3032 
   3033 #ifndef WM_MPSAFE
   3034 	s = splnet();
   3035 #endif
   3036 	switch (cmd) {
   3037 	case SIOCSIFMEDIA:
   3038 	case SIOCGIFMEDIA:
   3039 		WM_CORE_LOCK(sc);
   3040 		/* Flow control requires full-duplex mode. */
   3041 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3042 		    (ifr->ifr_media & IFM_FDX) == 0)
   3043 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3044 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3045 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3046 				/* We can do both TXPAUSE and RXPAUSE. */
   3047 				ifr->ifr_media |=
   3048 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3049 			}
   3050 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3051 		}
   3052 		WM_CORE_UNLOCK(sc);
   3053 #ifdef WM_MPSAFE
   3054 		s = splnet();
   3055 #endif
   3056 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3057 #ifdef WM_MPSAFE
   3058 		splx(s);
   3059 #endif
   3060 		break;
   3061 	case SIOCINITIFADDR:
   3062 		WM_CORE_LOCK(sc);
   3063 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3064 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3065 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3066 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3067 			/* unicast address is first multicast entry */
   3068 			wm_set_filter(sc);
   3069 			error = 0;
   3070 			WM_CORE_UNLOCK(sc);
   3071 			break;
   3072 		}
   3073 		WM_CORE_UNLOCK(sc);
   3074 		/*FALLTHROUGH*/
   3075 	default:
   3076 #ifdef WM_MPSAFE
   3077 		s = splnet();
   3078 #endif
   3079 		/* It may call wm_start, so unlock here */
   3080 		error = ether_ioctl(ifp, cmd, data);
   3081 #ifdef WM_MPSAFE
   3082 		splx(s);
   3083 #endif
   3084 		if (error != ENETRESET)
   3085 			break;
   3086 
   3087 		error = 0;
   3088 
   3089 		if (cmd == SIOCSIFCAP) {
   3090 			error = (*ifp->if_init)(ifp);
   3091 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3092 			;
   3093 		else if (ifp->if_flags & IFF_RUNNING) {
   3094 			/*
   3095 			 * Multicast list has changed; set the hardware filter
   3096 			 * accordingly.
   3097 			 */
   3098 			WM_CORE_LOCK(sc);
   3099 			wm_set_filter(sc);
   3100 			WM_CORE_UNLOCK(sc);
   3101 		}
   3102 		break;
   3103 	}
   3104 
   3105 #ifndef WM_MPSAFE
   3106 	splx(s);
   3107 #endif
   3108 	return error;
   3109 }
   3110 
   3111 /* MAC address related */
   3112 
   3113 /*
   3114  * Get the offset of MAC address and return it.
   3115  * If error occured, use offset 0.
   3116  */
   3117 static uint16_t
   3118 wm_check_alt_mac_addr(struct wm_softc *sc)
   3119 {
   3120 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3121 	uint16_t offset = NVM_OFF_MACADDR;
   3122 
   3123 	/* Try to read alternative MAC address pointer */
   3124 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3125 		return 0;
   3126 
   3127 	/* Check pointer if it's valid or not. */
   3128 	if ((offset == 0x0000) || (offset == 0xffff))
   3129 		return 0;
   3130 
   3131 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3132 	/*
   3133 	 * Check whether alternative MAC address is valid or not.
   3134 	 * Some cards have non 0xffff pointer but those don't use
   3135 	 * alternative MAC address in reality.
   3136 	 *
   3137 	 * Check whether the broadcast bit is set or not.
   3138 	 */
   3139 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3140 		if (((myea[0] & 0xff) & 0x01) == 0)
   3141 			return offset; /* Found */
   3142 
   3143 	/* Not found */
   3144 	return 0;
   3145 }
   3146 
   3147 static int
   3148 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3149 {
   3150 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3151 	uint16_t offset = NVM_OFF_MACADDR;
   3152 	int do_invert = 0;
   3153 
   3154 	switch (sc->sc_type) {
   3155 	case WM_T_82580:
   3156 	case WM_T_I350:
   3157 	case WM_T_I354:
   3158 		/* EEPROM Top Level Partitioning */
   3159 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3160 		break;
   3161 	case WM_T_82571:
   3162 	case WM_T_82575:
   3163 	case WM_T_82576:
   3164 	case WM_T_80003:
   3165 	case WM_T_I210:
   3166 	case WM_T_I211:
   3167 		offset = wm_check_alt_mac_addr(sc);
   3168 		if (offset == 0)
   3169 			if ((sc->sc_funcid & 0x01) == 1)
   3170 				do_invert = 1;
   3171 		break;
   3172 	default:
   3173 		if ((sc->sc_funcid & 0x01) == 1)
   3174 			do_invert = 1;
   3175 		break;
   3176 	}
   3177 
   3178 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3179 		goto bad;
   3180 
   3181 	enaddr[0] = myea[0] & 0xff;
   3182 	enaddr[1] = myea[0] >> 8;
   3183 	enaddr[2] = myea[1] & 0xff;
   3184 	enaddr[3] = myea[1] >> 8;
   3185 	enaddr[4] = myea[2] & 0xff;
   3186 	enaddr[5] = myea[2] >> 8;
   3187 
   3188 	/*
   3189 	 * Toggle the LSB of the MAC address on the second port
   3190 	 * of some dual port cards.
   3191 	 */
   3192 	if (do_invert != 0)
   3193 		enaddr[5] ^= 1;
   3194 
   3195 	return 0;
   3196 
   3197  bad:
   3198 	return -1;
   3199 }
   3200 
   3201 /*
   3202  * wm_set_ral:
   3203  *
   3204  *	Set an entery in the receive address list.
   3205  */
   3206 static void
   3207 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3208 {
   3209 	uint32_t ral_lo, ral_hi;
   3210 
   3211 	if (enaddr != NULL) {
   3212 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3213 		    (enaddr[3] << 24);
   3214 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3215 		ral_hi |= RAL_AV;
   3216 	} else {
   3217 		ral_lo = 0;
   3218 		ral_hi = 0;
   3219 	}
   3220 
   3221 	if (sc->sc_type >= WM_T_82544) {
   3222 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
   3223 		    ral_lo);
   3224 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
   3225 		    ral_hi);
   3226 	} else {
   3227 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
   3228 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
   3229 	}
   3230 }
   3231 
   3232 /*
   3233  * wm_mchash:
   3234  *
   3235  *	Compute the hash of the multicast address for the 4096-bit
   3236  *	multicast filter.
   3237  */
   3238 static uint32_t
   3239 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3240 {
   3241 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3242 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3243 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3244 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3245 	uint32_t hash;
   3246 
   3247 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3248 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3249 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3250 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   3251 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3252 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3253 		return (hash & 0x3ff);
   3254 	}
   3255 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3256 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3257 
   3258 	return (hash & 0xfff);
   3259 }
   3260 
   3261 /*
   3262  * wm_set_filter:
   3263  *
   3264  *	Set up the receive filter.
   3265  */
   3266 static void
   3267 wm_set_filter(struct wm_softc *sc)
   3268 {
   3269 	struct ethercom *ec = &sc->sc_ethercom;
   3270 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3271 	struct ether_multi *enm;
   3272 	struct ether_multistep step;
   3273 	bus_addr_t mta_reg;
   3274 	uint32_t hash, reg, bit;
   3275 	int i, size, ralmax;
   3276 
   3277 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3278 		device_xname(sc->sc_dev), __func__));
   3279 
   3280 	if (sc->sc_type >= WM_T_82544)
   3281 		mta_reg = WMREG_CORDOVA_MTA;
   3282 	else
   3283 		mta_reg = WMREG_MTA;
   3284 
   3285 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3286 
   3287 	if (ifp->if_flags & IFF_BROADCAST)
   3288 		sc->sc_rctl |= RCTL_BAM;
   3289 	if (ifp->if_flags & IFF_PROMISC) {
   3290 		sc->sc_rctl |= RCTL_UPE;
   3291 		goto allmulti;
   3292 	}
   3293 
   3294 	/*
   3295 	 * Set the station address in the first RAL slot, and
   3296 	 * clear the remaining slots.
   3297 	 */
   3298 	if (sc->sc_type == WM_T_ICH8)
   3299 		size = WM_RAL_TABSIZE_ICH8 -1;
   3300 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3301 	    || (sc->sc_type == WM_T_PCH))
   3302 		size = WM_RAL_TABSIZE_ICH8;
   3303 	else if (sc->sc_type == WM_T_PCH2)
   3304 		size = WM_RAL_TABSIZE_PCH2;
   3305 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
   3306 		size = WM_RAL_TABSIZE_PCH_LPT;
   3307 	else if (sc->sc_type == WM_T_82575)
   3308 		size = WM_RAL_TABSIZE_82575;
   3309 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3310 		size = WM_RAL_TABSIZE_82576;
   3311 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3312 		size = WM_RAL_TABSIZE_I350;
   3313 	else
   3314 		size = WM_RAL_TABSIZE;
   3315 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3316 
   3317 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   3318 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3319 		switch (i) {
   3320 		case 0:
   3321 			/* We can use all entries */
   3322 			ralmax = size;
   3323 			break;
   3324 		case 1:
   3325 			/* Only RAR[0] */
   3326 			ralmax = 1;
   3327 			break;
   3328 		default:
   3329 			/* available SHRA + RAR[0] */
   3330 			ralmax = i + 1;
   3331 		}
   3332 	} else
   3333 		ralmax = size;
   3334 	for (i = 1; i < size; i++) {
   3335 		if (i < ralmax)
   3336 			wm_set_ral(sc, NULL, i);
   3337 	}
   3338 
   3339 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3340 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3341 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3342 	    || (sc->sc_type == WM_T_PCH_SPT))
   3343 		size = WM_ICH8_MC_TABSIZE;
   3344 	else
   3345 		size = WM_MC_TABSIZE;
   3346 	/* Clear out the multicast table. */
   3347 	for (i = 0; i < size; i++)
   3348 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3349 
   3350 	ETHER_LOCK(ec);
   3351 	ETHER_FIRST_MULTI(step, ec, enm);
   3352 	while (enm != NULL) {
   3353 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3354 			ETHER_UNLOCK(ec);
   3355 			/*
   3356 			 * We must listen to a range of multicast addresses.
   3357 			 * For now, just accept all multicasts, rather than
   3358 			 * trying to set only those filter bits needed to match
   3359 			 * the range.  (At this time, the only use of address
   3360 			 * ranges is for IP multicast routing, for which the
   3361 			 * range is big enough to require all bits set.)
   3362 			 */
   3363 			goto allmulti;
   3364 		}
   3365 
   3366 		hash = wm_mchash(sc, enm->enm_addrlo);
   3367 
   3368 		reg = (hash >> 5);
   3369 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3370 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3371 		    || (sc->sc_type == WM_T_PCH2)
   3372 		    || (sc->sc_type == WM_T_PCH_LPT)
   3373 		    || (sc->sc_type == WM_T_PCH_SPT))
   3374 			reg &= 0x1f;
   3375 		else
   3376 			reg &= 0x7f;
   3377 		bit = hash & 0x1f;
   3378 
   3379 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3380 		hash |= 1U << bit;
   3381 
   3382 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3383 			/*
   3384 			 * 82544 Errata 9: Certain register cannot be written
   3385 			 * with particular alignments in PCI-X bus operation
   3386 			 * (FCAH, MTA and VFTA).
   3387 			 */
   3388 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3389 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3390 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3391 		} else
   3392 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3393 
   3394 		ETHER_NEXT_MULTI(step, enm);
   3395 	}
   3396 	ETHER_UNLOCK(ec);
   3397 
   3398 	ifp->if_flags &= ~IFF_ALLMULTI;
   3399 	goto setit;
   3400 
   3401  allmulti:
   3402 	ifp->if_flags |= IFF_ALLMULTI;
   3403 	sc->sc_rctl |= RCTL_MPE;
   3404 
   3405  setit:
   3406 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3407 }
   3408 
   3409 /* Reset and init related */
   3410 
   3411 static void
   3412 wm_set_vlan(struct wm_softc *sc)
   3413 {
   3414 
   3415 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3416 		device_xname(sc->sc_dev), __func__));
   3417 
   3418 	/* Deal with VLAN enables. */
   3419 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3420 		sc->sc_ctrl |= CTRL_VME;
   3421 	else
   3422 		sc->sc_ctrl &= ~CTRL_VME;
   3423 
   3424 	/* Write the control registers. */
   3425 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3426 }
   3427 
   3428 static void
   3429 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3430 {
   3431 	uint32_t gcr;
   3432 	pcireg_t ctrl2;
   3433 
   3434 	gcr = CSR_READ(sc, WMREG_GCR);
   3435 
   3436 	/* Only take action if timeout value is defaulted to 0 */
   3437 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3438 		goto out;
   3439 
   3440 	if ((gcr & GCR_CAP_VER2) == 0) {
   3441 		gcr |= GCR_CMPL_TMOUT_10MS;
   3442 		goto out;
   3443 	}
   3444 
   3445 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3446 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3447 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3448 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3449 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3450 
   3451 out:
   3452 	/* Disable completion timeout resend */
   3453 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3454 
   3455 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3456 }
   3457 
   3458 void
   3459 wm_get_auto_rd_done(struct wm_softc *sc)
   3460 {
   3461 	int i;
   3462 
   3463 	/* wait for eeprom to reload */
   3464 	switch (sc->sc_type) {
   3465 	case WM_T_82571:
   3466 	case WM_T_82572:
   3467 	case WM_T_82573:
   3468 	case WM_T_82574:
   3469 	case WM_T_82583:
   3470 	case WM_T_82575:
   3471 	case WM_T_82576:
   3472 	case WM_T_82580:
   3473 	case WM_T_I350:
   3474 	case WM_T_I354:
   3475 	case WM_T_I210:
   3476 	case WM_T_I211:
   3477 	case WM_T_80003:
   3478 	case WM_T_ICH8:
   3479 	case WM_T_ICH9:
   3480 		for (i = 0; i < 10; i++) {
   3481 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3482 				break;
   3483 			delay(1000);
   3484 		}
   3485 		if (i == 10) {
   3486 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3487 			    "complete\n", device_xname(sc->sc_dev));
   3488 		}
   3489 		break;
   3490 	default:
   3491 		break;
   3492 	}
   3493 }
   3494 
   3495 void
   3496 wm_lan_init_done(struct wm_softc *sc)
   3497 {
   3498 	uint32_t reg = 0;
   3499 	int i;
   3500 
   3501 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3502 		device_xname(sc->sc_dev), __func__));
   3503 
   3504 	/* Wait for eeprom to reload */
   3505 	switch (sc->sc_type) {
   3506 	case WM_T_ICH10:
   3507 	case WM_T_PCH:
   3508 	case WM_T_PCH2:
   3509 	case WM_T_PCH_LPT:
   3510 	case WM_T_PCH_SPT:
   3511 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3512 			reg = CSR_READ(sc, WMREG_STATUS);
   3513 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3514 				break;
   3515 			delay(100);
   3516 		}
   3517 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3518 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3519 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3520 		}
   3521 		break;
   3522 	default:
   3523 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3524 		    __func__);
   3525 		break;
   3526 	}
   3527 
   3528 	reg &= ~STATUS_LAN_INIT_DONE;
   3529 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3530 }
   3531 
   3532 void
   3533 wm_get_cfg_done(struct wm_softc *sc)
   3534 {
   3535 	int mask;
   3536 	uint32_t reg;
   3537 	int i;
   3538 
   3539 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3540 		device_xname(sc->sc_dev), __func__));
   3541 
   3542 	/* Wait for eeprom to reload */
   3543 	switch (sc->sc_type) {
   3544 	case WM_T_82542_2_0:
   3545 	case WM_T_82542_2_1:
   3546 		/* null */
   3547 		break;
   3548 	case WM_T_82543:
   3549 	case WM_T_82544:
   3550 	case WM_T_82540:
   3551 	case WM_T_82545:
   3552 	case WM_T_82545_3:
   3553 	case WM_T_82546:
   3554 	case WM_T_82546_3:
   3555 	case WM_T_82541:
   3556 	case WM_T_82541_2:
   3557 	case WM_T_82547:
   3558 	case WM_T_82547_2:
   3559 	case WM_T_82573:
   3560 	case WM_T_82574:
   3561 	case WM_T_82583:
   3562 		/* generic */
   3563 		delay(10*1000);
   3564 		break;
   3565 	case WM_T_80003:
   3566 	case WM_T_82571:
   3567 	case WM_T_82572:
   3568 	case WM_T_82575:
   3569 	case WM_T_82576:
   3570 	case WM_T_82580:
   3571 	case WM_T_I350:
   3572 	case WM_T_I354:
   3573 	case WM_T_I210:
   3574 	case WM_T_I211:
   3575 		if (sc->sc_type == WM_T_82571) {
   3576 			/* Only 82571 shares port 0 */
   3577 			mask = EEMNGCTL_CFGDONE_0;
   3578 		} else
   3579 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3580 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3581 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3582 				break;
   3583 			delay(1000);
   3584 		}
   3585 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3586 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3587 				device_xname(sc->sc_dev), __func__));
   3588 		}
   3589 		break;
   3590 	case WM_T_ICH8:
   3591 	case WM_T_ICH9:
   3592 	case WM_T_ICH10:
   3593 	case WM_T_PCH:
   3594 	case WM_T_PCH2:
   3595 	case WM_T_PCH_LPT:
   3596 	case WM_T_PCH_SPT:
   3597 		delay(10*1000);
   3598 		if (sc->sc_type >= WM_T_ICH10)
   3599 			wm_lan_init_done(sc);
   3600 		else
   3601 			wm_get_auto_rd_done(sc);
   3602 
   3603 		reg = CSR_READ(sc, WMREG_STATUS);
   3604 		if ((reg & STATUS_PHYRA) != 0)
   3605 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3606 		break;
   3607 	default:
   3608 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3609 		    __func__);
   3610 		break;
   3611 	}
   3612 }
   3613 
   3614 /* Init hardware bits */
   3615 void
   3616 wm_initialize_hardware_bits(struct wm_softc *sc)
   3617 {
   3618 	uint32_t tarc0, tarc1, reg;
   3619 
   3620 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3621 		device_xname(sc->sc_dev), __func__));
   3622 
   3623 	/* For 82571 variant, 80003 and ICHs */
   3624 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3625 	    || (sc->sc_type >= WM_T_80003)) {
   3626 
   3627 		/* Transmit Descriptor Control 0 */
   3628 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3629 		reg |= TXDCTL_COUNT_DESC;
   3630 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3631 
   3632 		/* Transmit Descriptor Control 1 */
   3633 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3634 		reg |= TXDCTL_COUNT_DESC;
   3635 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3636 
   3637 		/* TARC0 */
   3638 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3639 		switch (sc->sc_type) {
   3640 		case WM_T_82571:
   3641 		case WM_T_82572:
   3642 		case WM_T_82573:
   3643 		case WM_T_82574:
   3644 		case WM_T_82583:
   3645 		case WM_T_80003:
   3646 			/* Clear bits 30..27 */
   3647 			tarc0 &= ~__BITS(30, 27);
   3648 			break;
   3649 		default:
   3650 			break;
   3651 		}
   3652 
   3653 		switch (sc->sc_type) {
   3654 		case WM_T_82571:
   3655 		case WM_T_82572:
   3656 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3657 
   3658 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3659 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3660 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3661 			/* 8257[12] Errata No.7 */
   3662 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3663 
   3664 			/* TARC1 bit 28 */
   3665 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3666 				tarc1 &= ~__BIT(28);
   3667 			else
   3668 				tarc1 |= __BIT(28);
   3669 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3670 
   3671 			/*
   3672 			 * 8257[12] Errata No.13
   3673 			 * Disable Dyamic Clock Gating.
   3674 			 */
   3675 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3676 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3677 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3678 			break;
   3679 		case WM_T_82573:
   3680 		case WM_T_82574:
   3681 		case WM_T_82583:
   3682 			if ((sc->sc_type == WM_T_82574)
   3683 			    || (sc->sc_type == WM_T_82583))
   3684 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3685 
   3686 			/* Extended Device Control */
   3687 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3688 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3689 			reg |= __BIT(22);	/* Set bit 22 */
   3690 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3691 
   3692 			/* Device Control */
   3693 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   3694 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3695 
   3696 			/* PCIe Control Register */
   3697 			/*
   3698 			 * 82573 Errata (unknown).
   3699 			 *
   3700 			 * 82574 Errata 25 and 82583 Errata 12
   3701 			 * "Dropped Rx Packets":
   3702 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   3703 			 */
   3704 			reg = CSR_READ(sc, WMREG_GCR);
   3705 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   3706 			CSR_WRITE(sc, WMREG_GCR, reg);
   3707 
   3708 			if ((sc->sc_type == WM_T_82574)
   3709 			    || (sc->sc_type == WM_T_82583)) {
   3710 				/*
   3711 				 * Document says this bit must be set for
   3712 				 * proper operation.
   3713 				 */
   3714 				reg = CSR_READ(sc, WMREG_GCR);
   3715 				reg |= __BIT(22);
   3716 				CSR_WRITE(sc, WMREG_GCR, reg);
   3717 
   3718 				/*
   3719 				 * Apply workaround for hardware errata
   3720 				 * documented in errata docs Fixes issue where
   3721 				 * some error prone or unreliable PCIe
   3722 				 * completions are occurring, particularly
   3723 				 * with ASPM enabled. Without fix, issue can
   3724 				 * cause Tx timeouts.
   3725 				 */
   3726 				reg = CSR_READ(sc, WMREG_GCR2);
   3727 				reg |= __BIT(0);
   3728 				CSR_WRITE(sc, WMREG_GCR2, reg);
   3729 			}
   3730 			break;
   3731 		case WM_T_80003:
   3732 			/* TARC0 */
   3733 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   3734 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3735 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   3736 
   3737 			/* TARC1 bit 28 */
   3738 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3739 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3740 				tarc1 &= ~__BIT(28);
   3741 			else
   3742 				tarc1 |= __BIT(28);
   3743 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3744 			break;
   3745 		case WM_T_ICH8:
   3746 		case WM_T_ICH9:
   3747 		case WM_T_ICH10:
   3748 		case WM_T_PCH:
   3749 		case WM_T_PCH2:
   3750 		case WM_T_PCH_LPT:
   3751 		case WM_T_PCH_SPT:
   3752 			/* TARC0 */
   3753 			if ((sc->sc_type == WM_T_ICH8)
   3754 			    || (sc->sc_type == WM_T_PCH_SPT)) {
   3755 				/* Set TARC0 bits 29 and 28 */
   3756 				tarc0 |= __BITS(29, 28);
   3757 			}
   3758 			/* Set TARC0 bits 23,24,26,27 */
   3759 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   3760 
   3761 			/* CTRL_EXT */
   3762 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3763 			reg |= __BIT(22);	/* Set bit 22 */
   3764 			/*
   3765 			 * Enable PHY low-power state when MAC is at D3
   3766 			 * w/o WoL
   3767 			 */
   3768 			if (sc->sc_type >= WM_T_PCH)
   3769 				reg |= CTRL_EXT_PHYPDEN;
   3770 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3771 
   3772 			/* TARC1 */
   3773 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3774 			/* bit 28 */
   3775 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3776 				tarc1 &= ~__BIT(28);
   3777 			else
   3778 				tarc1 |= __BIT(28);
   3779 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   3780 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3781 
   3782 			/* Device Status */
   3783 			if (sc->sc_type == WM_T_ICH8) {
   3784 				reg = CSR_READ(sc, WMREG_STATUS);
   3785 				reg &= ~__BIT(31);
   3786 				CSR_WRITE(sc, WMREG_STATUS, reg);
   3787 
   3788 			}
   3789 
   3790 			/* IOSFPC */
   3791 			if (sc->sc_type == WM_T_PCH_SPT) {
   3792 				reg = CSR_READ(sc, WMREG_IOSFPC);
   3793 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   3794 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   3795 			}
   3796 			/*
   3797 			 * Work-around descriptor data corruption issue during
   3798 			 * NFS v2 UDP traffic, just disable the NFS filtering
   3799 			 * capability.
   3800 			 */
   3801 			reg = CSR_READ(sc, WMREG_RFCTL);
   3802 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   3803 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3804 			break;
   3805 		default:
   3806 			break;
   3807 		}
   3808 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   3809 
   3810 		switch (sc->sc_type) {
   3811 		/*
   3812 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   3813 		 * Avoid RSS Hash Value bug.
   3814 		 */
   3815 		case WM_T_82571:
   3816 		case WM_T_82572:
   3817 		case WM_T_82573:
   3818 		case WM_T_80003:
   3819 		case WM_T_ICH8:
   3820 			reg = CSR_READ(sc, WMREG_RFCTL);
   3821 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   3822 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3823 			break;
   3824 		case WM_T_82574:
   3825 			/* use extened Rx descriptor. */
   3826 			reg = CSR_READ(sc, WMREG_RFCTL);
   3827 			reg |= WMREG_RFCTL_EXSTEN;
   3828 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3829 			break;
   3830 		default:
   3831 			break;
   3832 		}
   3833 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   3834 		/*
   3835 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   3836 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   3837 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   3838 		 * Correctly by the Device"
   3839 		 *
   3840 		 * I354(C2000) Errata AVR53:
   3841 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   3842 		 * Hang"
   3843 		 */
   3844 		reg = CSR_READ(sc, WMREG_RFCTL);
   3845 		reg |= WMREG_RFCTL_IPV6EXDIS;
   3846 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   3847 	}
   3848 }
   3849 
   3850 static uint32_t
   3851 wm_rxpbs_adjust_82580(uint32_t val)
   3852 {
   3853 	uint32_t rv = 0;
   3854 
   3855 	if (val < __arraycount(wm_82580_rxpbs_table))
   3856 		rv = wm_82580_rxpbs_table[val];
   3857 
   3858 	return rv;
   3859 }
   3860 
   3861 /*
   3862  * wm_reset_phy:
   3863  *
   3864  *	generic PHY reset function.
   3865  *	Same as e1000_phy_hw_reset_generic()
   3866  */
   3867 static void
   3868 wm_reset_phy(struct wm_softc *sc)
   3869 {
   3870 	uint32_t reg;
   3871 
   3872 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3873 		device_xname(sc->sc_dev), __func__));
   3874 	if (wm_phy_resetisblocked(sc))
   3875 		return;
   3876 
   3877 	sc->phy.acquire(sc);
   3878 
   3879 	reg = CSR_READ(sc, WMREG_CTRL);
   3880 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   3881 	CSR_WRITE_FLUSH(sc);
   3882 
   3883 	delay(sc->phy.reset_delay_us);
   3884 
   3885 	CSR_WRITE(sc, WMREG_CTRL, reg);
   3886 	CSR_WRITE_FLUSH(sc);
   3887 
   3888 	delay(150);
   3889 
   3890 	sc->phy.release(sc);
   3891 
   3892 	wm_get_cfg_done(sc);
   3893 }
   3894 
   3895 static void
   3896 wm_flush_desc_rings(struct wm_softc *sc)
   3897 {
   3898 	pcireg_t preg;
   3899 	uint32_t reg;
   3900 	int nexttx;
   3901 
   3902 	/* First, disable MULR fix in FEXTNVM11 */
   3903 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   3904 	reg |= FEXTNVM11_DIS_MULRFIX;
   3905 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   3906 
   3907 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   3908 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   3909 	if (((preg & DESCRING_STATUS_FLUSH_REQ) != 0) && (reg != 0)) {
   3910 		struct wm_txqueue *txq;
   3911 		wiseman_txdesc_t *txd;
   3912 
   3913 		/* TX */
   3914 		printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   3915 		    device_xname(sc->sc_dev), preg, reg);
   3916 		reg = CSR_READ(sc, WMREG_TCTL);
   3917 		CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   3918 
   3919 		txq = &sc->sc_queue[0].wmq_txq;
   3920 		nexttx = txq->txq_next;
   3921 		txd = &txq->txq_descs[nexttx];
   3922 		wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   3923 		txd->wtx_cmdlen = htole32(WTX_CMD_IFCS| 512);
   3924 		txd->wtx_fields.wtxu_status = 0;
   3925 		txd->wtx_fields.wtxu_options = 0;
   3926 		txd->wtx_fields.wtxu_vlan = 0;
   3927 
   3928 		bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   3929 			BUS_SPACE_BARRIER_WRITE);
   3930 
   3931 		txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   3932 		CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   3933 		bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   3934 			BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   3935 		delay(250);
   3936 	}
   3937 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   3938 	if (preg & DESCRING_STATUS_FLUSH_REQ) {
   3939 		uint32_t rctl;
   3940 
   3941 		/* RX */
   3942 		printf("%s: Need RX flush (reg = %08x)\n",
   3943 		    device_xname(sc->sc_dev), preg);
   3944 		rctl = CSR_READ(sc, WMREG_RCTL);
   3945 		CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   3946 		CSR_WRITE_FLUSH(sc);
   3947 		delay(150);
   3948 
   3949 		reg = CSR_READ(sc, WMREG_RXDCTL(0));
   3950 		/* zero the lower 14 bits (prefetch and host thresholds) */
   3951 		reg &= 0xffffc000;
   3952 		/*
   3953 		 * update thresholds: prefetch threshold to 31, host threshold
   3954 		 * to 1 and make sure the granularity is "descriptors" and not
   3955 		 * "cache lines"
   3956 		 */
   3957 		reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   3958 		CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   3959 
   3960 		/*
   3961 		 * momentarily enable the RX ring for the changes to take
   3962 		 * effect
   3963 		 */
   3964 		CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   3965 		CSR_WRITE_FLUSH(sc);
   3966 		delay(150);
   3967 		CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   3968 	}
   3969 }
   3970 
   3971 /*
   3972  * wm_reset:
   3973  *
   3974  *	Reset the i82542 chip.
   3975  */
   3976 static void
   3977 wm_reset(struct wm_softc *sc)
   3978 {
   3979 	int phy_reset = 0;
   3980 	int i, error = 0;
   3981 	uint32_t reg;
   3982 
   3983 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3984 		device_xname(sc->sc_dev), __func__));
   3985 	KASSERT(sc->sc_type != 0);
   3986 
   3987 	/*
   3988 	 * Allocate on-chip memory according to the MTU size.
   3989 	 * The Packet Buffer Allocation register must be written
   3990 	 * before the chip is reset.
   3991 	 */
   3992 	switch (sc->sc_type) {
   3993 	case WM_T_82547:
   3994 	case WM_T_82547_2:
   3995 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3996 		    PBA_22K : PBA_30K;
   3997 		for (i = 0; i < sc->sc_nqueues; i++) {
   3998 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3999 			txq->txq_fifo_head = 0;
   4000 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4001 			txq->txq_fifo_size =
   4002 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4003 			txq->txq_fifo_stall = 0;
   4004 		}
   4005 		break;
   4006 	case WM_T_82571:
   4007 	case WM_T_82572:
   4008 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4009 	case WM_T_80003:
   4010 		sc->sc_pba = PBA_32K;
   4011 		break;
   4012 	case WM_T_82573:
   4013 		sc->sc_pba = PBA_12K;
   4014 		break;
   4015 	case WM_T_82574:
   4016 	case WM_T_82583:
   4017 		sc->sc_pba = PBA_20K;
   4018 		break;
   4019 	case WM_T_82576:
   4020 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4021 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4022 		break;
   4023 	case WM_T_82580:
   4024 	case WM_T_I350:
   4025 	case WM_T_I354:
   4026 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4027 		break;
   4028 	case WM_T_I210:
   4029 	case WM_T_I211:
   4030 		sc->sc_pba = PBA_34K;
   4031 		break;
   4032 	case WM_T_ICH8:
   4033 		/* Workaround for a bit corruption issue in FIFO memory */
   4034 		sc->sc_pba = PBA_8K;
   4035 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4036 		break;
   4037 	case WM_T_ICH9:
   4038 	case WM_T_ICH10:
   4039 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4040 		    PBA_14K : PBA_10K;
   4041 		break;
   4042 	case WM_T_PCH:
   4043 	case WM_T_PCH2:
   4044 	case WM_T_PCH_LPT:
   4045 	case WM_T_PCH_SPT:
   4046 		sc->sc_pba = PBA_26K;
   4047 		break;
   4048 	default:
   4049 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4050 		    PBA_40K : PBA_48K;
   4051 		break;
   4052 	}
   4053 	/*
   4054 	 * Only old or non-multiqueue devices have the PBA register
   4055 	 * XXX Need special handling for 82575.
   4056 	 */
   4057 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4058 	    || (sc->sc_type == WM_T_82575))
   4059 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4060 
   4061 	/* Prevent the PCI-E bus from sticking */
   4062 	if (sc->sc_flags & WM_F_PCIE) {
   4063 		int timeout = 800;
   4064 
   4065 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4066 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4067 
   4068 		while (timeout--) {
   4069 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4070 			    == 0)
   4071 				break;
   4072 			delay(100);
   4073 		}
   4074 		if (timeout == 0)
   4075 			device_printf(sc->sc_dev,
   4076 			    "failed to disable busmastering\n");
   4077 	}
   4078 
   4079 	/* Set the completion timeout for interface */
   4080 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4081 	    || (sc->sc_type == WM_T_82580)
   4082 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4083 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4084 		wm_set_pcie_completion_timeout(sc);
   4085 
   4086 	/* Clear interrupt */
   4087 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4088 	if (wm_is_using_msix(sc)) {
   4089 		if (sc->sc_type != WM_T_82574) {
   4090 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4091 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4092 		} else {
   4093 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4094 		}
   4095 	}
   4096 
   4097 	/* Stop the transmit and receive processes. */
   4098 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4099 	sc->sc_rctl &= ~RCTL_EN;
   4100 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4101 	CSR_WRITE_FLUSH(sc);
   4102 
   4103 	/* XXX set_tbi_sbp_82543() */
   4104 
   4105 	delay(10*1000);
   4106 
   4107 	/* Must acquire the MDIO ownership before MAC reset */
   4108 	switch (sc->sc_type) {
   4109 	case WM_T_82573:
   4110 	case WM_T_82574:
   4111 	case WM_T_82583:
   4112 		error = wm_get_hw_semaphore_82573(sc);
   4113 		break;
   4114 	default:
   4115 		break;
   4116 	}
   4117 
   4118 	/*
   4119 	 * 82541 Errata 29? & 82547 Errata 28?
   4120 	 * See also the description about PHY_RST bit in CTRL register
   4121 	 * in 8254x_GBe_SDM.pdf.
   4122 	 */
   4123 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4124 		CSR_WRITE(sc, WMREG_CTRL,
   4125 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4126 		CSR_WRITE_FLUSH(sc);
   4127 		delay(5000);
   4128 	}
   4129 
   4130 	switch (sc->sc_type) {
   4131 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4132 	case WM_T_82541:
   4133 	case WM_T_82541_2:
   4134 	case WM_T_82547:
   4135 	case WM_T_82547_2:
   4136 		/*
   4137 		 * On some chipsets, a reset through a memory-mapped write
   4138 		 * cycle can cause the chip to reset before completing the
   4139 		 * write cycle.  This causes major headache that can be
   4140 		 * avoided by issuing the reset via indirect register writes
   4141 		 * through I/O space.
   4142 		 *
   4143 		 * So, if we successfully mapped the I/O BAR at attach time,
   4144 		 * use that.  Otherwise, try our luck with a memory-mapped
   4145 		 * reset.
   4146 		 */
   4147 		if (sc->sc_flags & WM_F_IOH_VALID)
   4148 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4149 		else
   4150 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4151 		break;
   4152 	case WM_T_82545_3:
   4153 	case WM_T_82546_3:
   4154 		/* Use the shadow control register on these chips. */
   4155 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4156 		break;
   4157 	case WM_T_80003:
   4158 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4159 		sc->phy.acquire(sc);
   4160 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4161 		sc->phy.release(sc);
   4162 		break;
   4163 	case WM_T_ICH8:
   4164 	case WM_T_ICH9:
   4165 	case WM_T_ICH10:
   4166 	case WM_T_PCH:
   4167 	case WM_T_PCH2:
   4168 	case WM_T_PCH_LPT:
   4169 	case WM_T_PCH_SPT:
   4170 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4171 		if (wm_phy_resetisblocked(sc) == false) {
   4172 			/*
   4173 			 * Gate automatic PHY configuration by hardware on
   4174 			 * non-managed 82579
   4175 			 */
   4176 			if ((sc->sc_type == WM_T_PCH2)
   4177 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4178 				== 0))
   4179 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4180 
   4181 			reg |= CTRL_PHY_RESET;
   4182 			phy_reset = 1;
   4183 		} else
   4184 			printf("XXX reset is blocked!!!\n");
   4185 		sc->phy.acquire(sc);
   4186 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4187 		/* Don't insert a completion barrier when reset */
   4188 		delay(20*1000);
   4189 		mutex_exit(sc->sc_ich_phymtx);
   4190 		break;
   4191 	case WM_T_82580:
   4192 	case WM_T_I350:
   4193 	case WM_T_I354:
   4194 	case WM_T_I210:
   4195 	case WM_T_I211:
   4196 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4197 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4198 			CSR_WRITE_FLUSH(sc);
   4199 		delay(5000);
   4200 		break;
   4201 	case WM_T_82542_2_0:
   4202 	case WM_T_82542_2_1:
   4203 	case WM_T_82543:
   4204 	case WM_T_82540:
   4205 	case WM_T_82545:
   4206 	case WM_T_82546:
   4207 	case WM_T_82571:
   4208 	case WM_T_82572:
   4209 	case WM_T_82573:
   4210 	case WM_T_82574:
   4211 	case WM_T_82575:
   4212 	case WM_T_82576:
   4213 	case WM_T_82583:
   4214 	default:
   4215 		/* Everything else can safely use the documented method. */
   4216 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4217 		break;
   4218 	}
   4219 
   4220 	/* Must release the MDIO ownership after MAC reset */
   4221 	switch (sc->sc_type) {
   4222 	case WM_T_82573:
   4223 	case WM_T_82574:
   4224 	case WM_T_82583:
   4225 		if (error == 0)
   4226 			wm_put_hw_semaphore_82573(sc);
   4227 		break;
   4228 	default:
   4229 		break;
   4230 	}
   4231 
   4232 	if (phy_reset != 0)
   4233 		wm_get_cfg_done(sc);
   4234 
   4235 	/* reload EEPROM */
   4236 	switch (sc->sc_type) {
   4237 	case WM_T_82542_2_0:
   4238 	case WM_T_82542_2_1:
   4239 	case WM_T_82543:
   4240 	case WM_T_82544:
   4241 		delay(10);
   4242 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4243 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4244 		CSR_WRITE_FLUSH(sc);
   4245 		delay(2000);
   4246 		break;
   4247 	case WM_T_82540:
   4248 	case WM_T_82545:
   4249 	case WM_T_82545_3:
   4250 	case WM_T_82546:
   4251 	case WM_T_82546_3:
   4252 		delay(5*1000);
   4253 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4254 		break;
   4255 	case WM_T_82541:
   4256 	case WM_T_82541_2:
   4257 	case WM_T_82547:
   4258 	case WM_T_82547_2:
   4259 		delay(20000);
   4260 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4261 		break;
   4262 	case WM_T_82571:
   4263 	case WM_T_82572:
   4264 	case WM_T_82573:
   4265 	case WM_T_82574:
   4266 	case WM_T_82583:
   4267 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4268 			delay(10);
   4269 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4270 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4271 			CSR_WRITE_FLUSH(sc);
   4272 		}
   4273 		/* check EECD_EE_AUTORD */
   4274 		wm_get_auto_rd_done(sc);
   4275 		/*
   4276 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4277 		 * is set.
   4278 		 */
   4279 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4280 		    || (sc->sc_type == WM_T_82583))
   4281 			delay(25*1000);
   4282 		break;
   4283 	case WM_T_82575:
   4284 	case WM_T_82576:
   4285 	case WM_T_82580:
   4286 	case WM_T_I350:
   4287 	case WM_T_I354:
   4288 	case WM_T_I210:
   4289 	case WM_T_I211:
   4290 	case WM_T_80003:
   4291 		/* check EECD_EE_AUTORD */
   4292 		wm_get_auto_rd_done(sc);
   4293 		break;
   4294 	case WM_T_ICH8:
   4295 	case WM_T_ICH9:
   4296 	case WM_T_ICH10:
   4297 	case WM_T_PCH:
   4298 	case WM_T_PCH2:
   4299 	case WM_T_PCH_LPT:
   4300 	case WM_T_PCH_SPT:
   4301 		break;
   4302 	default:
   4303 		panic("%s: unknown type\n", __func__);
   4304 	}
   4305 
   4306 	/* Check whether EEPROM is present or not */
   4307 	switch (sc->sc_type) {
   4308 	case WM_T_82575:
   4309 	case WM_T_82576:
   4310 	case WM_T_82580:
   4311 	case WM_T_I350:
   4312 	case WM_T_I354:
   4313 	case WM_T_ICH8:
   4314 	case WM_T_ICH9:
   4315 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4316 			/* Not found */
   4317 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4318 			if (sc->sc_type == WM_T_82575)
   4319 				wm_reset_init_script_82575(sc);
   4320 		}
   4321 		break;
   4322 	default:
   4323 		break;
   4324 	}
   4325 
   4326 	if ((sc->sc_type == WM_T_82580)
   4327 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4328 		/* clear global device reset status bit */
   4329 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4330 	}
   4331 
   4332 	/* Clear any pending interrupt events. */
   4333 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4334 	reg = CSR_READ(sc, WMREG_ICR);
   4335 	if (wm_is_using_msix(sc)) {
   4336 		if (sc->sc_type != WM_T_82574) {
   4337 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4338 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4339 		} else
   4340 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4341 	}
   4342 
   4343 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4344 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4345 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4346 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   4347 		reg = CSR_READ(sc, WMREG_KABGTXD);
   4348 		reg |= KABGTXD_BGSQLBIAS;
   4349 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   4350 	}
   4351 
   4352 	/* reload sc_ctrl */
   4353 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4354 
   4355 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4356 		wm_set_eee_i350(sc);
   4357 
   4358 	/* Clear the host wakeup bit after lcd reset */
   4359 	if (sc->sc_type >= WM_T_PCH) {
   4360 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   4361 		    BM_PORT_GEN_CFG);
   4362 		reg &= ~BM_WUC_HOST_WU_BIT;
   4363 		wm_gmii_hv_writereg(sc->sc_dev, 2,
   4364 		    BM_PORT_GEN_CFG, reg);
   4365 	}
   4366 
   4367 	/*
   4368 	 * For PCH, this write will make sure that any noise will be detected
   4369 	 * as a CRC error and be dropped rather than show up as a bad packet
   4370 	 * to the DMA engine
   4371 	 */
   4372 	if (sc->sc_type == WM_T_PCH)
   4373 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4374 
   4375 	if (sc->sc_type >= WM_T_82544)
   4376 		CSR_WRITE(sc, WMREG_WUC, 0);
   4377 
   4378 	wm_reset_mdicnfg_82580(sc);
   4379 
   4380 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4381 		wm_pll_workaround_i210(sc);
   4382 }
   4383 
   4384 /*
   4385  * wm_add_rxbuf:
   4386  *
   4387  *	Add a receive buffer to the indiciated descriptor.
   4388  */
   4389 static int
   4390 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4391 {
   4392 	struct wm_softc *sc = rxq->rxq_sc;
   4393 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4394 	struct mbuf *m;
   4395 	int error;
   4396 
   4397 	KASSERT(mutex_owned(rxq->rxq_lock));
   4398 
   4399 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4400 	if (m == NULL)
   4401 		return ENOBUFS;
   4402 
   4403 	MCLGET(m, M_DONTWAIT);
   4404 	if ((m->m_flags & M_EXT) == 0) {
   4405 		m_freem(m);
   4406 		return ENOBUFS;
   4407 	}
   4408 
   4409 	if (rxs->rxs_mbuf != NULL)
   4410 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4411 
   4412 	rxs->rxs_mbuf = m;
   4413 
   4414 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4415 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4416 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4417 	if (error) {
   4418 		/* XXX XXX XXX */
   4419 		aprint_error_dev(sc->sc_dev,
   4420 		    "unable to load rx DMA map %d, error = %d\n",
   4421 		    idx, error);
   4422 		panic("wm_add_rxbuf");
   4423 	}
   4424 
   4425 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4426 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4427 
   4428 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4429 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4430 			wm_init_rxdesc(rxq, idx);
   4431 	} else
   4432 		wm_init_rxdesc(rxq, idx);
   4433 
   4434 	return 0;
   4435 }
   4436 
   4437 /*
   4438  * wm_rxdrain:
   4439  *
   4440  *	Drain the receive queue.
   4441  */
   4442 static void
   4443 wm_rxdrain(struct wm_rxqueue *rxq)
   4444 {
   4445 	struct wm_softc *sc = rxq->rxq_sc;
   4446 	struct wm_rxsoft *rxs;
   4447 	int i;
   4448 
   4449 	KASSERT(mutex_owned(rxq->rxq_lock));
   4450 
   4451 	for (i = 0; i < WM_NRXDESC; i++) {
   4452 		rxs = &rxq->rxq_soft[i];
   4453 		if (rxs->rxs_mbuf != NULL) {
   4454 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4455 			m_freem(rxs->rxs_mbuf);
   4456 			rxs->rxs_mbuf = NULL;
   4457 		}
   4458 	}
   4459 }
   4460 
   4461 
   4462 /*
   4463  * XXX copy from FreeBSD's sys/net/rss_config.c
   4464  */
   4465 /*
   4466  * RSS secret key, intended to prevent attacks on load-balancing.  Its
   4467  * effectiveness may be limited by algorithm choice and available entropy
   4468  * during the boot.
   4469  *
   4470  * XXXRW: And that we don't randomize it yet!
   4471  *
   4472  * This is the default Microsoft RSS specification key which is also
   4473  * the Chelsio T5 firmware default key.
   4474  */
   4475 #define RSS_KEYSIZE 40
   4476 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
   4477 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
   4478 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
   4479 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
   4480 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
   4481 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
   4482 };
   4483 
   4484 /*
   4485  * Caller must pass an array of size sizeof(rss_key).
   4486  *
   4487  * XXX
   4488  * As if_ixgbe may use this function, this function should not be
   4489  * if_wm specific function.
   4490  */
   4491 static void
   4492 wm_rss_getkey(uint8_t *key)
   4493 {
   4494 
   4495 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
   4496 }
   4497 
   4498 /*
   4499  * Setup registers for RSS.
   4500  *
   4501  * XXX not yet VMDq support
   4502  */
   4503 static void
   4504 wm_init_rss(struct wm_softc *sc)
   4505 {
   4506 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4507 	int i;
   4508 
   4509 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
   4510 
   4511 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4512 		int qid, reta_ent;
   4513 
   4514 		qid  = i % sc->sc_nqueues;
   4515 		switch(sc->sc_type) {
   4516 		case WM_T_82574:
   4517 			reta_ent = __SHIFTIN(qid,
   4518 			    RETA_ENT_QINDEX_MASK_82574);
   4519 			break;
   4520 		case WM_T_82575:
   4521 			reta_ent = __SHIFTIN(qid,
   4522 			    RETA_ENT_QINDEX1_MASK_82575);
   4523 			break;
   4524 		default:
   4525 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4526 			break;
   4527 		}
   4528 
   4529 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4530 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4531 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4532 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4533 	}
   4534 
   4535 	wm_rss_getkey((uint8_t *)rss_key);
   4536 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4537 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4538 
   4539 	if (sc->sc_type == WM_T_82574)
   4540 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4541 	else
   4542 		mrqc = MRQC_ENABLE_RSS_MQ;
   4543 
   4544 	/*
   4545 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   4546 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   4547 	 */
   4548 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4549 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4550 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4551 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4552 
   4553 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4554 }
   4555 
   4556 /*
   4557  * Adjust TX and RX queue numbers which the system actulally uses.
   4558  *
   4559  * The numbers are affected by below parameters.
   4560  *     - The nubmer of hardware queues
   4561  *     - The number of MSI-X vectors (= "nvectors" argument)
   4562  *     - ncpu
   4563  */
   4564 static void
   4565 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4566 {
   4567 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4568 
   4569 	if (nvectors < 2) {
   4570 		sc->sc_nqueues = 1;
   4571 		return;
   4572 	}
   4573 
   4574 	switch(sc->sc_type) {
   4575 	case WM_T_82572:
   4576 		hw_ntxqueues = 2;
   4577 		hw_nrxqueues = 2;
   4578 		break;
   4579 	case WM_T_82574:
   4580 		hw_ntxqueues = 2;
   4581 		hw_nrxqueues = 2;
   4582 		break;
   4583 	case WM_T_82575:
   4584 		hw_ntxqueues = 4;
   4585 		hw_nrxqueues = 4;
   4586 		break;
   4587 	case WM_T_82576:
   4588 		hw_ntxqueues = 16;
   4589 		hw_nrxqueues = 16;
   4590 		break;
   4591 	case WM_T_82580:
   4592 	case WM_T_I350:
   4593 	case WM_T_I354:
   4594 		hw_ntxqueues = 8;
   4595 		hw_nrxqueues = 8;
   4596 		break;
   4597 	case WM_T_I210:
   4598 		hw_ntxqueues = 4;
   4599 		hw_nrxqueues = 4;
   4600 		break;
   4601 	case WM_T_I211:
   4602 		hw_ntxqueues = 2;
   4603 		hw_nrxqueues = 2;
   4604 		break;
   4605 		/*
   4606 		 * As below ethernet controllers does not support MSI-X,
   4607 		 * this driver let them not use multiqueue.
   4608 		 *     - WM_T_80003
   4609 		 *     - WM_T_ICH8
   4610 		 *     - WM_T_ICH9
   4611 		 *     - WM_T_ICH10
   4612 		 *     - WM_T_PCH
   4613 		 *     - WM_T_PCH2
   4614 		 *     - WM_T_PCH_LPT
   4615 		 */
   4616 	default:
   4617 		hw_ntxqueues = 1;
   4618 		hw_nrxqueues = 1;
   4619 		break;
   4620 	}
   4621 
   4622 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
   4623 
   4624 	/*
   4625 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   4626 	 * the number of queues used actually.
   4627 	 */
   4628 	if (nvectors < hw_nqueues + 1) {
   4629 		sc->sc_nqueues = nvectors - 1;
   4630 	} else {
   4631 		sc->sc_nqueues = hw_nqueues;
   4632 	}
   4633 
   4634 	/*
   4635 	 * As queues more then cpus cannot improve scaling, we limit
   4636 	 * the number of queues used actually.
   4637 	 */
   4638 	if (ncpu < sc->sc_nqueues)
   4639 		sc->sc_nqueues = ncpu;
   4640 }
   4641 
   4642 static inline bool
   4643 wm_is_using_msix(struct wm_softc *sc)
   4644 {
   4645 
   4646 	return (sc->sc_nintrs > 1);
   4647 }
   4648 
   4649 static inline bool
   4650 wm_is_using_multiqueue(struct wm_softc *sc)
   4651 {
   4652 
   4653 	return (sc->sc_nqueues > 1);
   4654 }
   4655 
   4656 static int
   4657 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   4658 {
   4659 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   4660 	wmq->wmq_id = qidx;
   4661 	wmq->wmq_intr_idx = intr_idx;
   4662 	wmq->wmq_si = softint_establish(SOFTINT_NET
   4663 #ifdef WM_MPSAFE
   4664 	    | SOFTINT_MPSAFE
   4665 #endif
   4666 	    , wm_handle_queue, wmq);
   4667 	if (wmq->wmq_si != NULL)
   4668 		return 0;
   4669 
   4670 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   4671 	    wmq->wmq_id);
   4672 
   4673 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   4674 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4675 	return ENOMEM;
   4676 }
   4677 
   4678 /*
   4679  * Both single interrupt MSI and INTx can use this function.
   4680  */
   4681 static int
   4682 wm_setup_legacy(struct wm_softc *sc)
   4683 {
   4684 	pci_chipset_tag_t pc = sc->sc_pc;
   4685 	const char *intrstr = NULL;
   4686 	char intrbuf[PCI_INTRSTR_LEN];
   4687 	int error;
   4688 
   4689 	error = wm_alloc_txrx_queues(sc);
   4690 	if (error) {
   4691 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4692 		    error);
   4693 		return ENOMEM;
   4694 	}
   4695 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   4696 	    sizeof(intrbuf));
   4697 #ifdef WM_MPSAFE
   4698 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   4699 #endif
   4700 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   4701 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   4702 	if (sc->sc_ihs[0] == NULL) {
   4703 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   4704 		    (pci_intr_type(pc, sc->sc_intrs[0])
   4705 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   4706 		return ENOMEM;
   4707 	}
   4708 
   4709 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   4710 	sc->sc_nintrs = 1;
   4711 
   4712 	return wm_softint_establish(sc, 0, 0);
   4713 }
   4714 
   4715 static int
   4716 wm_setup_msix(struct wm_softc *sc)
   4717 {
   4718 	void *vih;
   4719 	kcpuset_t *affinity;
   4720 	int qidx, error, intr_idx, txrx_established;
   4721 	pci_chipset_tag_t pc = sc->sc_pc;
   4722 	const char *intrstr = NULL;
   4723 	char intrbuf[PCI_INTRSTR_LEN];
   4724 	char intr_xname[INTRDEVNAMEBUF];
   4725 
   4726 	if (sc->sc_nqueues < ncpu) {
   4727 		/*
   4728 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   4729 		 * interrupts start from CPU#1.
   4730 		 */
   4731 		sc->sc_affinity_offset = 1;
   4732 	} else {
   4733 		/*
   4734 		 * In this case, this device use all CPUs. So, we unify
   4735 		 * affinitied cpu_index to msix vector number for readability.
   4736 		 */
   4737 		sc->sc_affinity_offset = 0;
   4738 	}
   4739 
   4740 	error = wm_alloc_txrx_queues(sc);
   4741 	if (error) {
   4742 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4743 		    error);
   4744 		return ENOMEM;
   4745 	}
   4746 
   4747 	kcpuset_create(&affinity, false);
   4748 	intr_idx = 0;
   4749 
   4750 	/*
   4751 	 * TX and RX
   4752 	 */
   4753 	txrx_established = 0;
   4754 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   4755 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4756 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   4757 
   4758 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4759 		    sizeof(intrbuf));
   4760 #ifdef WM_MPSAFE
   4761 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   4762 		    PCI_INTR_MPSAFE, true);
   4763 #endif
   4764 		memset(intr_xname, 0, sizeof(intr_xname));
   4765 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   4766 		    device_xname(sc->sc_dev), qidx);
   4767 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4768 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   4769 		if (vih == NULL) {
   4770 			aprint_error_dev(sc->sc_dev,
   4771 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   4772 			    intrstr ? " at " : "",
   4773 			    intrstr ? intrstr : "");
   4774 
   4775 			goto fail;
   4776 		}
   4777 		kcpuset_zero(affinity);
   4778 		/* Round-robin affinity */
   4779 		kcpuset_set(affinity, affinity_to);
   4780 		error = interrupt_distribute(vih, affinity, NULL);
   4781 		if (error == 0) {
   4782 			aprint_normal_dev(sc->sc_dev,
   4783 			    "for TX and RX interrupting at %s affinity to %u\n",
   4784 			    intrstr, affinity_to);
   4785 		} else {
   4786 			aprint_normal_dev(sc->sc_dev,
   4787 			    "for TX and RX interrupting at %s\n", intrstr);
   4788 		}
   4789 		sc->sc_ihs[intr_idx] = vih;
   4790 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   4791 			goto fail;
   4792 		txrx_established++;
   4793 		intr_idx++;
   4794 	}
   4795 
   4796 	/*
   4797 	 * LINK
   4798 	 */
   4799 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4800 	    sizeof(intrbuf));
   4801 #ifdef WM_MPSAFE
   4802 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   4803 #endif
   4804 	memset(intr_xname, 0, sizeof(intr_xname));
   4805 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   4806 	    device_xname(sc->sc_dev));
   4807 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4808 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   4809 	if (vih == NULL) {
   4810 		aprint_error_dev(sc->sc_dev,
   4811 		    "unable to establish MSI-X(for LINK)%s%s\n",
   4812 		    intrstr ? " at " : "",
   4813 		    intrstr ? intrstr : "");
   4814 
   4815 		goto fail;
   4816 	}
   4817 	/* keep default affinity to LINK interrupt */
   4818 	aprint_normal_dev(sc->sc_dev,
   4819 	    "for LINK interrupting at %s\n", intrstr);
   4820 	sc->sc_ihs[intr_idx] = vih;
   4821 	sc->sc_link_intr_idx = intr_idx;
   4822 
   4823 	sc->sc_nintrs = sc->sc_nqueues + 1;
   4824 	kcpuset_destroy(affinity);
   4825 	return 0;
   4826 
   4827  fail:
   4828 	for (qidx = 0; qidx < txrx_established; qidx++) {
   4829 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4830 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   4831 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4832 	}
   4833 
   4834 	kcpuset_destroy(affinity);
   4835 	return ENOMEM;
   4836 }
   4837 
   4838 static void
   4839 wm_turnon(struct wm_softc *sc)
   4840 {
   4841 	int i;
   4842 
   4843 	KASSERT(WM_CORE_LOCKED(sc));
   4844 
   4845 	/*
   4846 	 * must unset stopping flags in ascending order.
   4847 	 */
   4848 	for(i = 0; i < sc->sc_nqueues; i++) {
   4849 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4850 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   4851 
   4852 		mutex_enter(txq->txq_lock);
   4853 		txq->txq_stopping = false;
   4854 		mutex_exit(txq->txq_lock);
   4855 
   4856 		mutex_enter(rxq->rxq_lock);
   4857 		rxq->rxq_stopping = false;
   4858 		mutex_exit(rxq->rxq_lock);
   4859 	}
   4860 
   4861 	sc->sc_core_stopping = false;
   4862 }
   4863 
   4864 static void
   4865 wm_turnoff(struct wm_softc *sc)
   4866 {
   4867 	int i;
   4868 
   4869 	KASSERT(WM_CORE_LOCKED(sc));
   4870 
   4871 	sc->sc_core_stopping = true;
   4872 
   4873 	/*
   4874 	 * must set stopping flags in ascending order.
   4875 	 */
   4876 	for(i = 0; i < sc->sc_nqueues; i++) {
   4877 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   4878 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4879 
   4880 		mutex_enter(rxq->rxq_lock);
   4881 		rxq->rxq_stopping = true;
   4882 		mutex_exit(rxq->rxq_lock);
   4883 
   4884 		mutex_enter(txq->txq_lock);
   4885 		txq->txq_stopping = true;
   4886 		mutex_exit(txq->txq_lock);
   4887 	}
   4888 }
   4889 
   4890 /*
   4891  * write interrupt interval value to ITR or EITR
   4892  */
   4893 static void
   4894 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   4895 {
   4896 
   4897 	if (!wmq->wmq_set_itr)
   4898 		return;
   4899 
   4900 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4901 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   4902 
   4903 		/*
   4904 		 * 82575 doesn't have CNT_INGR field.
   4905 		 * So, overwrite counter field by software.
   4906 		 */
   4907 		if (sc->sc_type == WM_T_82575)
   4908 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   4909 		else
   4910 			eitr |= EITR_CNT_INGR;
   4911 
   4912 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   4913 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   4914 		/*
   4915 		 * 82574 has both ITR and EITR. SET EITR when we use
   4916 		 * the multi queue function with MSI-X.
   4917 		 */
   4918 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   4919 			    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   4920 	} else {
   4921 		KASSERT(wmq->wmq_id == 0);
   4922 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   4923 	}
   4924 
   4925 	wmq->wmq_set_itr = false;
   4926 }
   4927 
   4928 /*
   4929  * TODO
   4930  * Below dynamic calculation of itr is almost the same as linux igb,
   4931  * however it does not fit to wm(4). So, we will have been disable AIM
   4932  * until we will find appropriate calculation of itr.
   4933  */
   4934 /*
   4935  * calculate interrupt interval value to be going to write register in
   4936  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   4937  */
   4938 static void
   4939 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   4940 {
   4941 #ifdef NOTYET
   4942 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   4943 	struct wm_txqueue *txq = &wmq->wmq_txq;
   4944 	uint32_t avg_size = 0;
   4945 	uint32_t new_itr;
   4946 
   4947 	if (rxq->rxq_packets)
   4948 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   4949 	if (txq->txq_packets)
   4950 		avg_size = max(avg_size, txq->txq_bytes / txq->txq_packets);
   4951 
   4952 	if (avg_size == 0) {
   4953 		new_itr = 450; /* restore default value */
   4954 		goto out;
   4955 	}
   4956 
   4957 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   4958 	avg_size += 24;
   4959 
   4960 	/* Don't starve jumbo frames */
   4961 	avg_size = min(avg_size, 3000);
   4962 
   4963 	/* Give a little boost to mid-size frames */
   4964 	if ((avg_size > 300) && (avg_size < 1200))
   4965 		new_itr = avg_size / 3;
   4966 	else
   4967 		new_itr = avg_size / 2;
   4968 
   4969 out:
   4970 	/*
   4971 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   4972 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   4973 	 */
   4974 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   4975 		new_itr *= 4;
   4976 
   4977 	if (new_itr != wmq->wmq_itr) {
   4978 		wmq->wmq_itr = new_itr;
   4979 		wmq->wmq_set_itr = true;
   4980 	} else
   4981 		wmq->wmq_set_itr = false;
   4982 
   4983 	rxq->rxq_packets = 0;
   4984 	rxq->rxq_bytes = 0;
   4985 	txq->txq_packets = 0;
   4986 	txq->txq_bytes = 0;
   4987 #endif
   4988 }
   4989 
   4990 /*
   4991  * wm_init:		[ifnet interface function]
   4992  *
   4993  *	Initialize the interface.
   4994  */
   4995 static int
   4996 wm_init(struct ifnet *ifp)
   4997 {
   4998 	struct wm_softc *sc = ifp->if_softc;
   4999 	int ret;
   5000 
   5001 	WM_CORE_LOCK(sc);
   5002 	ret = wm_init_locked(ifp);
   5003 	WM_CORE_UNLOCK(sc);
   5004 
   5005 	return ret;
   5006 }
   5007 
   5008 static int
   5009 wm_init_locked(struct ifnet *ifp)
   5010 {
   5011 	struct wm_softc *sc = ifp->if_softc;
   5012 	int i, j, trynum, error = 0;
   5013 	uint32_t reg;
   5014 
   5015 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5016 		device_xname(sc->sc_dev), __func__));
   5017 	KASSERT(WM_CORE_LOCKED(sc));
   5018 
   5019 	/*
   5020 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5021 	 * There is a small but measurable benefit to avoiding the adjusment
   5022 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5023 	 * on such platforms.  One possibility is that the DMA itself is
   5024 	 * slightly more efficient if the front of the entire packet (instead
   5025 	 * of the front of the headers) is aligned.
   5026 	 *
   5027 	 * Note we must always set align_tweak to 0 if we are using
   5028 	 * jumbo frames.
   5029 	 */
   5030 #ifdef __NO_STRICT_ALIGNMENT
   5031 	sc->sc_align_tweak = 0;
   5032 #else
   5033 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5034 		sc->sc_align_tweak = 0;
   5035 	else
   5036 		sc->sc_align_tweak = 2;
   5037 #endif /* __NO_STRICT_ALIGNMENT */
   5038 
   5039 	/* Cancel any pending I/O. */
   5040 	wm_stop_locked(ifp, 0);
   5041 
   5042 	/* update statistics before reset */
   5043 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   5044 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   5045 
   5046 	/* PCH_SPT hardware workaround */
   5047 	if (sc->sc_type == WM_T_PCH_SPT)
   5048 		wm_flush_desc_rings(sc);
   5049 
   5050 	/* Reset the chip to a known state. */
   5051 	wm_reset(sc);
   5052 
   5053 	/* AMT based hardware can now take control from firmware */
   5054 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5055 		wm_get_hw_control(sc);
   5056 
   5057 	/* Init hardware bits */
   5058 	wm_initialize_hardware_bits(sc);
   5059 
   5060 	/* Reset the PHY. */
   5061 	if (sc->sc_flags & WM_F_HAS_MII)
   5062 		wm_gmii_reset(sc);
   5063 
   5064 	/* Calculate (E)ITR value */
   5065 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5066 		/*
   5067 		 * For NEWQUEUE's EITR (except for 82575).
   5068 		 * 82575's EITR should be set same throttling value as other
   5069 		 * old controllers' ITR because the interrupt/sec calculation
   5070 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5071 		 *
   5072 		 * 82574's EITR should be set same throttling value as ITR.
   5073 		 *
   5074 		 * For N interrupts/sec, set this value to:
   5075 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5076 		 */
   5077 		sc->sc_itr_init = 450;
   5078 	} else if (sc->sc_type >= WM_T_82543) {
   5079 		/*
   5080 		 * Set up the interrupt throttling register (units of 256ns)
   5081 		 * Note that a footnote in Intel's documentation says this
   5082 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5083 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5084 		 * that that is also true for the 1024ns units of the other
   5085 		 * interrupt-related timer registers -- so, really, we ought
   5086 		 * to divide this value by 4 when the link speed is low.
   5087 		 *
   5088 		 * XXX implement this division at link speed change!
   5089 		 */
   5090 
   5091 		/*
   5092 		 * For N interrupts/sec, set this value to:
   5093 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5094 		 * absolute and packet timer values to this value
   5095 		 * divided by 4 to get "simple timer" behavior.
   5096 		 */
   5097 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5098 	}
   5099 
   5100 	error = wm_init_txrx_queues(sc);
   5101 	if (error)
   5102 		goto out;
   5103 
   5104 	/*
   5105 	 * Clear out the VLAN table -- we don't use it (yet).
   5106 	 */
   5107 	CSR_WRITE(sc, WMREG_VET, 0);
   5108 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5109 		trynum = 10; /* Due to hw errata */
   5110 	else
   5111 		trynum = 1;
   5112 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5113 		for (j = 0; j < trynum; j++)
   5114 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5115 
   5116 	/*
   5117 	 * Set up flow-control parameters.
   5118 	 *
   5119 	 * XXX Values could probably stand some tuning.
   5120 	 */
   5121 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5122 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5123 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5124 	    && (sc->sc_type != WM_T_PCH_SPT)) {
   5125 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5126 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5127 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5128 	}
   5129 
   5130 	sc->sc_fcrtl = FCRTL_DFLT;
   5131 	if (sc->sc_type < WM_T_82543) {
   5132 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5133 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5134 	} else {
   5135 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5136 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5137 	}
   5138 
   5139 	if (sc->sc_type == WM_T_80003)
   5140 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5141 	else
   5142 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5143 
   5144 	/* Writes the control register. */
   5145 	wm_set_vlan(sc);
   5146 
   5147 	if (sc->sc_flags & WM_F_HAS_MII) {
   5148 		int val;
   5149 
   5150 		switch (sc->sc_type) {
   5151 		case WM_T_80003:
   5152 		case WM_T_ICH8:
   5153 		case WM_T_ICH9:
   5154 		case WM_T_ICH10:
   5155 		case WM_T_PCH:
   5156 		case WM_T_PCH2:
   5157 		case WM_T_PCH_LPT:
   5158 		case WM_T_PCH_SPT:
   5159 			/*
   5160 			 * Set the mac to wait the maximum time between each
   5161 			 * iteration and increase the max iterations when
   5162 			 * polling the phy; this fixes erroneous timeouts at
   5163 			 * 10Mbps.
   5164 			 */
   5165 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5166 			    0xFFFF);
   5167 			val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
   5168 			val |= 0x3F;
   5169 			wm_kmrn_writereg(sc,
   5170 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
   5171 			break;
   5172 		default:
   5173 			break;
   5174 		}
   5175 
   5176 		if (sc->sc_type == WM_T_80003) {
   5177 			val = CSR_READ(sc, WMREG_CTRL_EXT);
   5178 			val &= ~CTRL_EXT_LINK_MODE_MASK;
   5179 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   5180 
   5181 			/* Bypass RX and TX FIFO's */
   5182 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5183 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5184 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5185 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5186 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5187 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5188 		}
   5189 	}
   5190 #if 0
   5191 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5192 #endif
   5193 
   5194 	/* Set up checksum offload parameters. */
   5195 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5196 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5197 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5198 		reg |= RXCSUM_IPOFL;
   5199 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5200 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5201 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5202 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5203 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5204 
   5205 	/* Set registers about MSI-X */
   5206 	if (wm_is_using_msix(sc)) {
   5207 		uint32_t ivar;
   5208 		struct wm_queue *wmq;
   5209 		int qid, qintr_idx;
   5210 
   5211 		if (sc->sc_type == WM_T_82575) {
   5212 			/* Interrupt control */
   5213 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5214 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5215 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5216 
   5217 			/* TX and RX */
   5218 			for (i = 0; i < sc->sc_nqueues; i++) {
   5219 				wmq = &sc->sc_queue[i];
   5220 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5221 				    EITR_TX_QUEUE(wmq->wmq_id)
   5222 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5223 			}
   5224 			/* Link status */
   5225 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5226 			    EITR_OTHER);
   5227 		} else if (sc->sc_type == WM_T_82574) {
   5228 			/* Interrupt control */
   5229 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5230 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5231 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5232 
   5233 			/*
   5234 			 * workaround issue with spurious interrupts
   5235 			 * in MSI-X mode.
   5236 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5237 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5238 			 */
   5239 			reg = CSR_READ(sc, WMREG_RFCTL);
   5240 			reg |= WMREG_RFCTL_ACKDIS;
   5241 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5242 
   5243 			ivar = 0;
   5244 			/* TX and RX */
   5245 			for (i = 0; i < sc->sc_nqueues; i++) {
   5246 				wmq = &sc->sc_queue[i];
   5247 				qid = wmq->wmq_id;
   5248 				qintr_idx = wmq->wmq_intr_idx;
   5249 
   5250 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5251 				    IVAR_TX_MASK_Q_82574(qid));
   5252 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5253 				    IVAR_RX_MASK_Q_82574(qid));
   5254 			}
   5255 			/* Link status */
   5256 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5257 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5258 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5259 		} else {
   5260 			/* Interrupt control */
   5261 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5262 			    | GPIE_EIAME | GPIE_PBA);
   5263 
   5264 			switch (sc->sc_type) {
   5265 			case WM_T_82580:
   5266 			case WM_T_I350:
   5267 			case WM_T_I354:
   5268 			case WM_T_I210:
   5269 			case WM_T_I211:
   5270 				/* TX and RX */
   5271 				for (i = 0; i < sc->sc_nqueues; i++) {
   5272 					wmq = &sc->sc_queue[i];
   5273 					qid = wmq->wmq_id;
   5274 					qintr_idx = wmq->wmq_intr_idx;
   5275 
   5276 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5277 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5278 					ivar |= __SHIFTIN((qintr_idx
   5279 						| IVAR_VALID),
   5280 					    IVAR_TX_MASK_Q(qid));
   5281 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5282 					ivar |= __SHIFTIN((qintr_idx
   5283 						| IVAR_VALID),
   5284 					    IVAR_RX_MASK_Q(qid));
   5285 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5286 				}
   5287 				break;
   5288 			case WM_T_82576:
   5289 				/* TX and RX */
   5290 				for (i = 0; i < sc->sc_nqueues; i++) {
   5291 					wmq = &sc->sc_queue[i];
   5292 					qid = wmq->wmq_id;
   5293 					qintr_idx = wmq->wmq_intr_idx;
   5294 
   5295 					ivar = CSR_READ(sc,
   5296 					    WMREG_IVAR_Q_82576(qid));
   5297 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   5298 					ivar |= __SHIFTIN((qintr_idx
   5299 						| IVAR_VALID),
   5300 					    IVAR_TX_MASK_Q_82576(qid));
   5301 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   5302 					ivar |= __SHIFTIN((qintr_idx
   5303 						| IVAR_VALID),
   5304 					    IVAR_RX_MASK_Q_82576(qid));
   5305 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   5306 					    ivar);
   5307 				}
   5308 				break;
   5309 			default:
   5310 				break;
   5311 			}
   5312 
   5313 			/* Link status */
   5314 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   5315 			    IVAR_MISC_OTHER);
   5316 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   5317 		}
   5318 
   5319 		if (wm_is_using_multiqueue(sc)) {
   5320 			wm_init_rss(sc);
   5321 
   5322 			/*
   5323 			** NOTE: Receive Full-Packet Checksum Offload
   5324 			** is mutually exclusive with Multiqueue. However
   5325 			** this is not the same as TCP/IP checksums which
   5326 			** still work.
   5327 			*/
   5328 			reg = CSR_READ(sc, WMREG_RXCSUM);
   5329 			reg |= RXCSUM_PCSD;
   5330 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5331 		}
   5332 	}
   5333 
   5334 	/* Set up the interrupt registers. */
   5335 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5336 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   5337 	    ICR_RXO | ICR_RXT0;
   5338 	if (wm_is_using_msix(sc)) {
   5339 		uint32_t mask;
   5340 		struct wm_queue *wmq;
   5341 
   5342 		switch (sc->sc_type) {
   5343 		case WM_T_82574:
   5344 			mask = 0;
   5345 			for (i = 0; i < sc->sc_nqueues; i++) {
   5346 				wmq = &sc->sc_queue[i];
   5347 				mask |= ICR_TXQ(wmq->wmq_id);
   5348 				mask |= ICR_RXQ(wmq->wmq_id);
   5349 			}
   5350 			mask |= ICR_OTHER;
   5351 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   5352 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   5353 			break;
   5354 		default:
   5355 			if (sc->sc_type == WM_T_82575) {
   5356 				mask = 0;
   5357 				for (i = 0; i < sc->sc_nqueues; i++) {
   5358 					wmq = &sc->sc_queue[i];
   5359 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   5360 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   5361 				}
   5362 				mask |= EITR_OTHER;
   5363 			} else {
   5364 				mask = 0;
   5365 				for (i = 0; i < sc->sc_nqueues; i++) {
   5366 					wmq = &sc->sc_queue[i];
   5367 					mask |= 1 << wmq->wmq_intr_idx;
   5368 				}
   5369 				mask |= 1 << sc->sc_link_intr_idx;
   5370 			}
   5371 			CSR_WRITE(sc, WMREG_EIAC, mask);
   5372 			CSR_WRITE(sc, WMREG_EIAM, mask);
   5373 			CSR_WRITE(sc, WMREG_EIMS, mask);
   5374 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   5375 			break;
   5376 		}
   5377 	} else
   5378 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5379 
   5380 	/* Set up the inter-packet gap. */
   5381 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   5382 
   5383 	if (sc->sc_type >= WM_T_82543) {
   5384 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5385 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   5386 			wm_itrs_writereg(sc, wmq);
   5387 		}
   5388 		/*
   5389 		 * Link interrupts occur much less than TX
   5390 		 * interrupts and RX interrupts. So, we don't
   5391 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   5392 		 * FreeBSD's if_igb.
   5393 		 */
   5394 	}
   5395 
   5396 	/* Set the VLAN ethernetype. */
   5397 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   5398 
   5399 	/*
   5400 	 * Set up the transmit control register; we start out with
   5401 	 * a collision distance suitable for FDX, but update it whe
   5402 	 * we resolve the media type.
   5403 	 */
   5404 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   5405 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   5406 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5407 	if (sc->sc_type >= WM_T_82571)
   5408 		sc->sc_tctl |= TCTL_MULR;
   5409 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5410 
   5411 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5412 		/* Write TDT after TCTL.EN is set. See the document. */
   5413 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   5414 	}
   5415 
   5416 	if (sc->sc_type == WM_T_80003) {
   5417 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   5418 		reg &= ~TCTL_EXT_GCEX_MASK;
   5419 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   5420 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   5421 	}
   5422 
   5423 	/* Set the media. */
   5424 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   5425 		goto out;
   5426 
   5427 	/* Configure for OS presence */
   5428 	wm_init_manageability(sc);
   5429 
   5430 	/*
   5431 	 * Set up the receive control register; we actually program
   5432 	 * the register when we set the receive filter.  Use multicast
   5433 	 * address offset type 0.
   5434 	 *
   5435 	 * Only the i82544 has the ability to strip the incoming
   5436 	 * CRC, so we don't enable that feature.
   5437 	 */
   5438 	sc->sc_mchash_type = 0;
   5439 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   5440 	    | RCTL_MO(sc->sc_mchash_type);
   5441 
   5442 	/*
   5443 	 * 82574 use one buffer extended Rx descriptor.
   5444 	 */
   5445 	if (sc->sc_type == WM_T_82574)
   5446 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   5447 
   5448 	/*
   5449 	 * The I350 has a bug where it always strips the CRC whether
   5450 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   5451 	 */
   5452 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5453 	    || (sc->sc_type == WM_T_I210))
   5454 		sc->sc_rctl |= RCTL_SECRC;
   5455 
   5456 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5457 	    && (ifp->if_mtu > ETHERMTU)) {
   5458 		sc->sc_rctl |= RCTL_LPE;
   5459 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5460 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5461 	}
   5462 
   5463 	if (MCLBYTES == 2048) {
   5464 		sc->sc_rctl |= RCTL_2k;
   5465 	} else {
   5466 		if (sc->sc_type >= WM_T_82543) {
   5467 			switch (MCLBYTES) {
   5468 			case 4096:
   5469 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5470 				break;
   5471 			case 8192:
   5472 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5473 				break;
   5474 			case 16384:
   5475 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5476 				break;
   5477 			default:
   5478 				panic("wm_init: MCLBYTES %d unsupported",
   5479 				    MCLBYTES);
   5480 				break;
   5481 			}
   5482 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   5483 	}
   5484 
   5485 	/* Set the receive filter. */
   5486 	wm_set_filter(sc);
   5487 
   5488 	/* Enable ECC */
   5489 	switch (sc->sc_type) {
   5490 	case WM_T_82571:
   5491 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5492 		reg |= PBA_ECC_CORR_EN;
   5493 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5494 		break;
   5495 	case WM_T_PCH_LPT:
   5496 	case WM_T_PCH_SPT:
   5497 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5498 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5499 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5500 
   5501 		sc->sc_ctrl |= CTRL_MEHE;
   5502 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5503 		break;
   5504 	default:
   5505 		break;
   5506 	}
   5507 
   5508 	/* On 575 and later set RDT only if RX enabled */
   5509 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5510 		int qidx;
   5511 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5512 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5513 			for (i = 0; i < WM_NRXDESC; i++) {
   5514 				mutex_enter(rxq->rxq_lock);
   5515 				wm_init_rxdesc(rxq, i);
   5516 				mutex_exit(rxq->rxq_lock);
   5517 
   5518 			}
   5519 		}
   5520 	}
   5521 
   5522 	wm_turnon(sc);
   5523 
   5524 	/* Start the one second link check clock. */
   5525 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5526 
   5527 	/* ...all done! */
   5528 	ifp->if_flags |= IFF_RUNNING;
   5529 	ifp->if_flags &= ~IFF_OACTIVE;
   5530 
   5531  out:
   5532 	sc->sc_if_flags = ifp->if_flags;
   5533 	if (error)
   5534 		log(LOG_ERR, "%s: interface not running\n",
   5535 		    device_xname(sc->sc_dev));
   5536 	return error;
   5537 }
   5538 
   5539 /*
   5540  * wm_stop:		[ifnet interface function]
   5541  *
   5542  *	Stop transmission on the interface.
   5543  */
   5544 static void
   5545 wm_stop(struct ifnet *ifp, int disable)
   5546 {
   5547 	struct wm_softc *sc = ifp->if_softc;
   5548 
   5549 	WM_CORE_LOCK(sc);
   5550 	wm_stop_locked(ifp, disable);
   5551 	WM_CORE_UNLOCK(sc);
   5552 }
   5553 
   5554 static void
   5555 wm_stop_locked(struct ifnet *ifp, int disable)
   5556 {
   5557 	struct wm_softc *sc = ifp->if_softc;
   5558 	struct wm_txsoft *txs;
   5559 	int i, qidx;
   5560 
   5561 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5562 		device_xname(sc->sc_dev), __func__));
   5563 	KASSERT(WM_CORE_LOCKED(sc));
   5564 
   5565 	wm_turnoff(sc);
   5566 
   5567 	/* Stop the one second clock. */
   5568 	callout_stop(&sc->sc_tick_ch);
   5569 
   5570 	/* Stop the 82547 Tx FIFO stall check timer. */
   5571 	if (sc->sc_type == WM_T_82547)
   5572 		callout_stop(&sc->sc_txfifo_ch);
   5573 
   5574 	if (sc->sc_flags & WM_F_HAS_MII) {
   5575 		/* Down the MII. */
   5576 		mii_down(&sc->sc_mii);
   5577 	} else {
   5578 #if 0
   5579 		/* Should we clear PHY's status properly? */
   5580 		wm_reset(sc);
   5581 #endif
   5582 	}
   5583 
   5584 	/* Stop the transmit and receive processes. */
   5585 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5586 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5587 	sc->sc_rctl &= ~RCTL_EN;
   5588 
   5589 	/*
   5590 	 * Clear the interrupt mask to ensure the device cannot assert its
   5591 	 * interrupt line.
   5592 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5593 	 * service any currently pending or shared interrupt.
   5594 	 */
   5595 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5596 	sc->sc_icr = 0;
   5597 	if (wm_is_using_msix(sc)) {
   5598 		if (sc->sc_type != WM_T_82574) {
   5599 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5600 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5601 		} else
   5602 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5603 	}
   5604 
   5605 	/* Release any queued transmit buffers. */
   5606 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5607 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5608 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5609 		mutex_enter(txq->txq_lock);
   5610 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5611 			txs = &txq->txq_soft[i];
   5612 			if (txs->txs_mbuf != NULL) {
   5613 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   5614 				m_freem(txs->txs_mbuf);
   5615 				txs->txs_mbuf = NULL;
   5616 			}
   5617 		}
   5618 		mutex_exit(txq->txq_lock);
   5619 	}
   5620 
   5621 	/* Mark the interface as down and cancel the watchdog timer. */
   5622 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5623 	ifp->if_timer = 0;
   5624 
   5625 	if (disable) {
   5626 		for (i = 0; i < sc->sc_nqueues; i++) {
   5627 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5628 			mutex_enter(rxq->rxq_lock);
   5629 			wm_rxdrain(rxq);
   5630 			mutex_exit(rxq->rxq_lock);
   5631 		}
   5632 	}
   5633 
   5634 #if 0 /* notyet */
   5635 	if (sc->sc_type >= WM_T_82544)
   5636 		CSR_WRITE(sc, WMREG_WUC, 0);
   5637 #endif
   5638 }
   5639 
   5640 static void
   5641 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   5642 {
   5643 	struct mbuf *m;
   5644 	int i;
   5645 
   5646 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   5647 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   5648 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   5649 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   5650 		    m->m_data, m->m_len, m->m_flags);
   5651 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   5652 	    i, i == 1 ? "" : "s");
   5653 }
   5654 
   5655 /*
   5656  * wm_82547_txfifo_stall:
   5657  *
   5658  *	Callout used to wait for the 82547 Tx FIFO to drain,
   5659  *	reset the FIFO pointers, and restart packet transmission.
   5660  */
   5661 static void
   5662 wm_82547_txfifo_stall(void *arg)
   5663 {
   5664 	struct wm_softc *sc = arg;
   5665 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5666 
   5667 	mutex_enter(txq->txq_lock);
   5668 
   5669 	if (txq->txq_stopping)
   5670 		goto out;
   5671 
   5672 	if (txq->txq_fifo_stall) {
   5673 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   5674 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   5675 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   5676 			/*
   5677 			 * Packets have drained.  Stop transmitter, reset
   5678 			 * FIFO pointers, restart transmitter, and kick
   5679 			 * the packet queue.
   5680 			 */
   5681 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   5682 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   5683 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   5684 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   5685 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   5686 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   5687 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   5688 			CSR_WRITE_FLUSH(sc);
   5689 
   5690 			txq->txq_fifo_head = 0;
   5691 			txq->txq_fifo_stall = 0;
   5692 			wm_start_locked(&sc->sc_ethercom.ec_if);
   5693 		} else {
   5694 			/*
   5695 			 * Still waiting for packets to drain; try again in
   5696 			 * another tick.
   5697 			 */
   5698 			callout_schedule(&sc->sc_txfifo_ch, 1);
   5699 		}
   5700 	}
   5701 
   5702 out:
   5703 	mutex_exit(txq->txq_lock);
   5704 }
   5705 
   5706 /*
   5707  * wm_82547_txfifo_bugchk:
   5708  *
   5709  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   5710  *	prevent enqueueing a packet that would wrap around the end
   5711  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   5712  *
   5713  *	We do this by checking the amount of space before the end
   5714  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   5715  *	the Tx FIFO, wait for all remaining packets to drain, reset
   5716  *	the internal FIFO pointers to the beginning, and restart
   5717  *	transmission on the interface.
   5718  */
   5719 #define	WM_FIFO_HDR		0x10
   5720 #define	WM_82547_PAD_LEN	0x3e0
   5721 static int
   5722 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   5723 {
   5724 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5725 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   5726 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   5727 
   5728 	/* Just return if already stalled. */
   5729 	if (txq->txq_fifo_stall)
   5730 		return 1;
   5731 
   5732 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   5733 		/* Stall only occurs in half-duplex mode. */
   5734 		goto send_packet;
   5735 	}
   5736 
   5737 	if (len >= WM_82547_PAD_LEN + space) {
   5738 		txq->txq_fifo_stall = 1;
   5739 		callout_schedule(&sc->sc_txfifo_ch, 1);
   5740 		return 1;
   5741 	}
   5742 
   5743  send_packet:
   5744 	txq->txq_fifo_head += len;
   5745 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   5746 		txq->txq_fifo_head -= txq->txq_fifo_size;
   5747 
   5748 	return 0;
   5749 }
   5750 
   5751 static int
   5752 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5753 {
   5754 	int error;
   5755 
   5756 	/*
   5757 	 * Allocate the control data structures, and create and load the
   5758 	 * DMA map for it.
   5759 	 *
   5760 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5761 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5762 	 * both sets within the same 4G segment.
   5763 	 */
   5764 	if (sc->sc_type < WM_T_82544)
   5765 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   5766 	else
   5767 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   5768 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5769 		txq->txq_descsize = sizeof(nq_txdesc_t);
   5770 	else
   5771 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   5772 
   5773 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   5774 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   5775 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   5776 		aprint_error_dev(sc->sc_dev,
   5777 		    "unable to allocate TX control data, error = %d\n",
   5778 		    error);
   5779 		goto fail_0;
   5780 	}
   5781 
   5782 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   5783 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   5784 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5785 		aprint_error_dev(sc->sc_dev,
   5786 		    "unable to map TX control data, error = %d\n", error);
   5787 		goto fail_1;
   5788 	}
   5789 
   5790 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   5791 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   5792 		aprint_error_dev(sc->sc_dev,
   5793 		    "unable to create TX control data DMA map, error = %d\n",
   5794 		    error);
   5795 		goto fail_2;
   5796 	}
   5797 
   5798 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   5799 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   5800 		aprint_error_dev(sc->sc_dev,
   5801 		    "unable to load TX control data DMA map, error = %d\n",
   5802 		    error);
   5803 		goto fail_3;
   5804 	}
   5805 
   5806 	return 0;
   5807 
   5808  fail_3:
   5809 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5810  fail_2:
   5811 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5812 	    WM_TXDESCS_SIZE(txq));
   5813  fail_1:
   5814 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5815  fail_0:
   5816 	return error;
   5817 }
   5818 
   5819 static void
   5820 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5821 {
   5822 
   5823 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   5824 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5825 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5826 	    WM_TXDESCS_SIZE(txq));
   5827 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5828 }
   5829 
   5830 static int
   5831 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5832 {
   5833 	int error;
   5834 	size_t rxq_descs_size;
   5835 
   5836 	/*
   5837 	 * Allocate the control data structures, and create and load the
   5838 	 * DMA map for it.
   5839 	 *
   5840 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5841 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5842 	 * both sets within the same 4G segment.
   5843 	 */
   5844 	rxq->rxq_ndesc = WM_NRXDESC;
   5845 	if (sc->sc_type == WM_T_82574)
   5846 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   5847 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5848 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   5849 	else
   5850 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   5851 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   5852 
   5853 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   5854 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   5855 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   5856 		aprint_error_dev(sc->sc_dev,
   5857 		    "unable to allocate RX control data, error = %d\n",
   5858 		    error);
   5859 		goto fail_0;
   5860 	}
   5861 
   5862 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   5863 		    rxq->rxq_desc_rseg, rxq_descs_size,
   5864 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5865 		aprint_error_dev(sc->sc_dev,
   5866 		    "unable to map RX control data, error = %d\n", error);
   5867 		goto fail_1;
   5868 	}
   5869 
   5870 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   5871 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   5872 		aprint_error_dev(sc->sc_dev,
   5873 		    "unable to create RX control data DMA map, error = %d\n",
   5874 		    error);
   5875 		goto fail_2;
   5876 	}
   5877 
   5878 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   5879 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   5880 		aprint_error_dev(sc->sc_dev,
   5881 		    "unable to load RX control data DMA map, error = %d\n",
   5882 		    error);
   5883 		goto fail_3;
   5884 	}
   5885 
   5886 	return 0;
   5887 
   5888  fail_3:
   5889 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5890  fail_2:
   5891 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   5892 	    rxq_descs_size);
   5893  fail_1:
   5894 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5895  fail_0:
   5896 	return error;
   5897 }
   5898 
   5899 static void
   5900 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5901 {
   5902 
   5903 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5904 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5905 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   5906 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   5907 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5908 }
   5909 
   5910 
   5911 static int
   5912 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5913 {
   5914 	int i, error;
   5915 
   5916 	/* Create the transmit buffer DMA maps. */
   5917 	WM_TXQUEUELEN(txq) =
   5918 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   5919 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   5920 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5921 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   5922 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   5923 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   5924 			aprint_error_dev(sc->sc_dev,
   5925 			    "unable to create Tx DMA map %d, error = %d\n",
   5926 			    i, error);
   5927 			goto fail;
   5928 		}
   5929 	}
   5930 
   5931 	return 0;
   5932 
   5933  fail:
   5934 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5935 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5936 			bus_dmamap_destroy(sc->sc_dmat,
   5937 			    txq->txq_soft[i].txs_dmamap);
   5938 	}
   5939 	return error;
   5940 }
   5941 
   5942 static void
   5943 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5944 {
   5945 	int i;
   5946 
   5947 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5948 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5949 			bus_dmamap_destroy(sc->sc_dmat,
   5950 			    txq->txq_soft[i].txs_dmamap);
   5951 	}
   5952 }
   5953 
   5954 static int
   5955 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5956 {
   5957 	int i, error;
   5958 
   5959 	/* Create the receive buffer DMA maps. */
   5960 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   5961 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   5962 			    MCLBYTES, 0, 0,
   5963 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   5964 			aprint_error_dev(sc->sc_dev,
   5965 			    "unable to create Rx DMA map %d error = %d\n",
   5966 			    i, error);
   5967 			goto fail;
   5968 		}
   5969 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   5970 	}
   5971 
   5972 	return 0;
   5973 
   5974  fail:
   5975 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   5976 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5977 			bus_dmamap_destroy(sc->sc_dmat,
   5978 			    rxq->rxq_soft[i].rxs_dmamap);
   5979 	}
   5980 	return error;
   5981 }
   5982 
   5983 static void
   5984 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5985 {
   5986 	int i;
   5987 
   5988 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   5989 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5990 			bus_dmamap_destroy(sc->sc_dmat,
   5991 			    rxq->rxq_soft[i].rxs_dmamap);
   5992 	}
   5993 }
   5994 
   5995 /*
   5996  * wm_alloc_quques:
   5997  *	Allocate {tx,rx}descs and {tx,rx} buffers
   5998  */
   5999 static int
   6000 wm_alloc_txrx_queues(struct wm_softc *sc)
   6001 {
   6002 	int i, error, tx_done, rx_done;
   6003 
   6004 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6005 	    KM_SLEEP);
   6006 	if (sc->sc_queue == NULL) {
   6007 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6008 		error = ENOMEM;
   6009 		goto fail_0;
   6010 	}
   6011 
   6012 	/*
   6013 	 * For transmission
   6014 	 */
   6015 	error = 0;
   6016 	tx_done = 0;
   6017 	for (i = 0; i < sc->sc_nqueues; i++) {
   6018 #ifdef WM_EVENT_COUNTERS
   6019 		int j;
   6020 		const char *xname;
   6021 #endif
   6022 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6023 		txq->txq_sc = sc;
   6024 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6025 
   6026 		error = wm_alloc_tx_descs(sc, txq);
   6027 		if (error)
   6028 			break;
   6029 		error = wm_alloc_tx_buffer(sc, txq);
   6030 		if (error) {
   6031 			wm_free_tx_descs(sc, txq);
   6032 			break;
   6033 		}
   6034 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6035 		if (txq->txq_interq == NULL) {
   6036 			wm_free_tx_descs(sc, txq);
   6037 			wm_free_tx_buffer(sc, txq);
   6038 			error = ENOMEM;
   6039 			break;
   6040 		}
   6041 
   6042 #ifdef WM_EVENT_COUNTERS
   6043 		xname = device_xname(sc->sc_dev);
   6044 
   6045 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6046 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6047 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
   6048 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6049 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6050 
   6051 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
   6052 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
   6053 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
   6054 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
   6055 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
   6056 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
   6057 
   6058 		for (j = 0; j < WM_NTXSEGS; j++) {
   6059 			snprintf(txq->txq_txseg_evcnt_names[j],
   6060 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6061 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6062 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6063 		}
   6064 
   6065 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
   6066 
   6067 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
   6068 #endif /* WM_EVENT_COUNTERS */
   6069 
   6070 		tx_done++;
   6071 	}
   6072 	if (error)
   6073 		goto fail_1;
   6074 
   6075 	/*
   6076 	 * For recieve
   6077 	 */
   6078 	error = 0;
   6079 	rx_done = 0;
   6080 	for (i = 0; i < sc->sc_nqueues; i++) {
   6081 #ifdef WM_EVENT_COUNTERS
   6082 		const char *xname;
   6083 #endif
   6084 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6085 		rxq->rxq_sc = sc;
   6086 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6087 
   6088 		error = wm_alloc_rx_descs(sc, rxq);
   6089 		if (error)
   6090 			break;
   6091 
   6092 		error = wm_alloc_rx_buffer(sc, rxq);
   6093 		if (error) {
   6094 			wm_free_rx_descs(sc, rxq);
   6095 			break;
   6096 		}
   6097 
   6098 #ifdef WM_EVENT_COUNTERS
   6099 		xname = device_xname(sc->sc_dev);
   6100 
   6101 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
   6102 
   6103 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
   6104 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
   6105 #endif /* WM_EVENT_COUNTERS */
   6106 
   6107 		rx_done++;
   6108 	}
   6109 	if (error)
   6110 		goto fail_2;
   6111 
   6112 	return 0;
   6113 
   6114  fail_2:
   6115 	for (i = 0; i < rx_done; i++) {
   6116 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6117 		wm_free_rx_buffer(sc, rxq);
   6118 		wm_free_rx_descs(sc, rxq);
   6119 		if (rxq->rxq_lock)
   6120 			mutex_obj_free(rxq->rxq_lock);
   6121 	}
   6122  fail_1:
   6123 	for (i = 0; i < tx_done; i++) {
   6124 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6125 		pcq_destroy(txq->txq_interq);
   6126 		wm_free_tx_buffer(sc, txq);
   6127 		wm_free_tx_descs(sc, txq);
   6128 		if (txq->txq_lock)
   6129 			mutex_obj_free(txq->txq_lock);
   6130 	}
   6131 
   6132 	kmem_free(sc->sc_queue,
   6133 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6134  fail_0:
   6135 	return error;
   6136 }
   6137 
   6138 /*
   6139  * wm_free_quques:
   6140  *	Free {tx,rx}descs and {tx,rx} buffers
   6141  */
   6142 static void
   6143 wm_free_txrx_queues(struct wm_softc *sc)
   6144 {
   6145 	int i;
   6146 
   6147 	for (i = 0; i < sc->sc_nqueues; i++) {
   6148 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6149 
   6150 #ifdef WM_EVENT_COUNTERS
   6151 		WM_Q_EVCNT_DETACH(rxq, rxintr, rxq, i);
   6152 		WM_Q_EVCNT_DETACH(rxq, rxipsum, rxq, i);
   6153 		WM_Q_EVCNT_DETACH(rxq, rxtusum, rxq, i);
   6154 #endif /* WM_EVENT_COUNTERS */
   6155 
   6156 		wm_free_rx_buffer(sc, rxq);
   6157 		wm_free_rx_descs(sc, rxq);
   6158 		if (rxq->rxq_lock)
   6159 			mutex_obj_free(rxq->rxq_lock);
   6160 	}
   6161 
   6162 	for (i = 0; i < sc->sc_nqueues; i++) {
   6163 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6164 		struct mbuf *m;
   6165 #ifdef WM_EVENT_COUNTERS
   6166 		int j;
   6167 
   6168 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6169 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6170 		WM_Q_EVCNT_DETACH(txq, txfifo_stall, txq, i);
   6171 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6172 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6173 		WM_Q_EVCNT_DETACH(txq, txipsum, txq, i);
   6174 		WM_Q_EVCNT_DETACH(txq, txtusum, txq, i);
   6175 		WM_Q_EVCNT_DETACH(txq, txtusum6, txq, i);
   6176 		WM_Q_EVCNT_DETACH(txq, txtso, txq, i);
   6177 		WM_Q_EVCNT_DETACH(txq, txtso6, txq, i);
   6178 		WM_Q_EVCNT_DETACH(txq, txtsopain, txq, i);
   6179 
   6180 		for (j = 0; j < WM_NTXSEGS; j++)
   6181 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6182 
   6183 		WM_Q_EVCNT_DETACH(txq, txdrop, txq, i);
   6184 		WM_Q_EVCNT_DETACH(txq, tu, txq, i);
   6185 #endif /* WM_EVENT_COUNTERS */
   6186 
   6187 		/* drain txq_interq */
   6188 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6189 			m_freem(m);
   6190 		pcq_destroy(txq->txq_interq);
   6191 
   6192 		wm_free_tx_buffer(sc, txq);
   6193 		wm_free_tx_descs(sc, txq);
   6194 		if (txq->txq_lock)
   6195 			mutex_obj_free(txq->txq_lock);
   6196 	}
   6197 
   6198 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6199 }
   6200 
   6201 static void
   6202 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6203 {
   6204 
   6205 	KASSERT(mutex_owned(txq->txq_lock));
   6206 
   6207 	/* Initialize the transmit descriptor ring. */
   6208 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6209 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6210 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6211 	txq->txq_free = WM_NTXDESC(txq);
   6212 	txq->txq_next = 0;
   6213 }
   6214 
   6215 static void
   6216 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6217     struct wm_txqueue *txq)
   6218 {
   6219 
   6220 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6221 		device_xname(sc->sc_dev), __func__));
   6222 	KASSERT(mutex_owned(txq->txq_lock));
   6223 
   6224 	if (sc->sc_type < WM_T_82543) {
   6225 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6226 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6227 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6228 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6229 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6230 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6231 	} else {
   6232 		int qid = wmq->wmq_id;
   6233 
   6234 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6235 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6236 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6237 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6238 
   6239 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6240 			/*
   6241 			 * Don't write TDT before TCTL.EN is set.
   6242 			 * See the document.
   6243 			 */
   6244 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6245 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6246 			    | TXDCTL_WTHRESH(0));
   6247 		else {
   6248 			/* XXX should update with AIM? */
   6249 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6250 			if (sc->sc_type >= WM_T_82540) {
   6251 				/* should be same */
   6252 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6253 			}
   6254 
   6255 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6256 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6257 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6258 		}
   6259 	}
   6260 }
   6261 
   6262 static void
   6263 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6264 {
   6265 	int i;
   6266 
   6267 	KASSERT(mutex_owned(txq->txq_lock));
   6268 
   6269 	/* Initialize the transmit job descriptors. */
   6270 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6271 		txq->txq_soft[i].txs_mbuf = NULL;
   6272 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6273 	txq->txq_snext = 0;
   6274 	txq->txq_sdirty = 0;
   6275 }
   6276 
   6277 static void
   6278 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6279     struct wm_txqueue *txq)
   6280 {
   6281 
   6282 	KASSERT(mutex_owned(txq->txq_lock));
   6283 
   6284 	/*
   6285 	 * Set up some register offsets that are different between
   6286 	 * the i82542 and the i82543 and later chips.
   6287 	 */
   6288 	if (sc->sc_type < WM_T_82543)
   6289 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   6290 	else
   6291 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   6292 
   6293 	wm_init_tx_descs(sc, txq);
   6294 	wm_init_tx_regs(sc, wmq, txq);
   6295 	wm_init_tx_buffer(sc, txq);
   6296 }
   6297 
   6298 static void
   6299 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6300     struct wm_rxqueue *rxq)
   6301 {
   6302 
   6303 	KASSERT(mutex_owned(rxq->rxq_lock));
   6304 
   6305 	/*
   6306 	 * Initialize the receive descriptor and receive job
   6307 	 * descriptor rings.
   6308 	 */
   6309 	if (sc->sc_type < WM_T_82543) {
   6310 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   6311 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   6312 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   6313 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6314 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   6315 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   6316 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   6317 
   6318 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   6319 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   6320 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   6321 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   6322 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   6323 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   6324 	} else {
   6325 		int qid = wmq->wmq_id;
   6326 
   6327 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   6328 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   6329 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_descsize * rxq->rxq_ndesc);
   6330 
   6331 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6332 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   6333 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   6334 
   6335 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   6336 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   6337 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   6338 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   6339 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   6340 			    | RXDCTL_WTHRESH(1));
   6341 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6342 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6343 		} else {
   6344 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6345 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6346 			/* XXX should update with AIM? */
   6347 			CSR_WRITE(sc, WMREG_RDTR, (wmq->wmq_itr / 4) | RDTR_FPD);
   6348 			/* MUST be same */
   6349 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   6350 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   6351 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   6352 		}
   6353 	}
   6354 }
   6355 
   6356 static int
   6357 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6358 {
   6359 	struct wm_rxsoft *rxs;
   6360 	int error, i;
   6361 
   6362 	KASSERT(mutex_owned(rxq->rxq_lock));
   6363 
   6364 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6365 		rxs = &rxq->rxq_soft[i];
   6366 		if (rxs->rxs_mbuf == NULL) {
   6367 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   6368 				log(LOG_ERR, "%s: unable to allocate or map "
   6369 				    "rx buffer %d, error = %d\n",
   6370 				    device_xname(sc->sc_dev), i, error);
   6371 				/*
   6372 				 * XXX Should attempt to run with fewer receive
   6373 				 * XXX buffers instead of just failing.
   6374 				 */
   6375 				wm_rxdrain(rxq);
   6376 				return ENOMEM;
   6377 			}
   6378 		} else {
   6379 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   6380 				wm_init_rxdesc(rxq, i);
   6381 			/*
   6382 			 * For 82575 and newer device, the RX descriptors
   6383 			 * must be initialized after the setting of RCTL.EN in
   6384 			 * wm_set_filter()
   6385 			 */
   6386 		}
   6387 	}
   6388 	rxq->rxq_ptr = 0;
   6389 	rxq->rxq_discard = 0;
   6390 	WM_RXCHAIN_RESET(rxq);
   6391 
   6392 	return 0;
   6393 }
   6394 
   6395 static int
   6396 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6397     struct wm_rxqueue *rxq)
   6398 {
   6399 
   6400 	KASSERT(mutex_owned(rxq->rxq_lock));
   6401 
   6402 	/*
   6403 	 * Set up some register offsets that are different between
   6404 	 * the i82542 and the i82543 and later chips.
   6405 	 */
   6406 	if (sc->sc_type < WM_T_82543)
   6407 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   6408 	else
   6409 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   6410 
   6411 	wm_init_rx_regs(sc, wmq, rxq);
   6412 	return wm_init_rx_buffer(sc, rxq);
   6413 }
   6414 
   6415 /*
   6416  * wm_init_quques:
   6417  *	Initialize {tx,rx}descs and {tx,rx} buffers
   6418  */
   6419 static int
   6420 wm_init_txrx_queues(struct wm_softc *sc)
   6421 {
   6422 	int i, error = 0;
   6423 
   6424 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6425 		device_xname(sc->sc_dev), __func__));
   6426 
   6427 	for (i = 0; i < sc->sc_nqueues; i++) {
   6428 		struct wm_queue *wmq = &sc->sc_queue[i];
   6429 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6430 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6431 
   6432 		/*
   6433 		 * TODO
   6434 		 * Currently, use constant variable instead of AIM.
   6435 		 * Furthermore, the interrupt interval of multiqueue which use
   6436 		 * polling mode is less than default value.
   6437 		 * More tuning and AIM are required.
   6438 		 */
   6439 		if (wm_is_using_multiqueue(sc))
   6440 			wmq->wmq_itr = 50;
   6441 		else
   6442 			wmq->wmq_itr = sc->sc_itr_init;
   6443 		wmq->wmq_set_itr = true;
   6444 
   6445 		mutex_enter(txq->txq_lock);
   6446 		wm_init_tx_queue(sc, wmq, txq);
   6447 		mutex_exit(txq->txq_lock);
   6448 
   6449 		mutex_enter(rxq->rxq_lock);
   6450 		error = wm_init_rx_queue(sc, wmq, rxq);
   6451 		mutex_exit(rxq->rxq_lock);
   6452 		if (error)
   6453 			break;
   6454 	}
   6455 
   6456 	return error;
   6457 }
   6458 
   6459 /*
   6460  * wm_tx_offload:
   6461  *
   6462  *	Set up TCP/IP checksumming parameters for the
   6463  *	specified packet.
   6464  */
   6465 static int
   6466 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   6467     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   6468 {
   6469 	struct mbuf *m0 = txs->txs_mbuf;
   6470 	struct livengood_tcpip_ctxdesc *t;
   6471 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   6472 	uint32_t ipcse;
   6473 	struct ether_header *eh;
   6474 	int offset, iphl;
   6475 	uint8_t fields;
   6476 
   6477 	/*
   6478 	 * XXX It would be nice if the mbuf pkthdr had offset
   6479 	 * fields for the protocol headers.
   6480 	 */
   6481 
   6482 	eh = mtod(m0, struct ether_header *);
   6483 	switch (htons(eh->ether_type)) {
   6484 	case ETHERTYPE_IP:
   6485 	case ETHERTYPE_IPV6:
   6486 		offset = ETHER_HDR_LEN;
   6487 		break;
   6488 
   6489 	case ETHERTYPE_VLAN:
   6490 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6491 		break;
   6492 
   6493 	default:
   6494 		/*
   6495 		 * Don't support this protocol or encapsulation.
   6496 		 */
   6497 		*fieldsp = 0;
   6498 		*cmdp = 0;
   6499 		return 0;
   6500 	}
   6501 
   6502 	if ((m0->m_pkthdr.csum_flags &
   6503 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6504 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6505 	} else {
   6506 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6507 	}
   6508 	ipcse = offset + iphl - 1;
   6509 
   6510 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   6511 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   6512 	seg = 0;
   6513 	fields = 0;
   6514 
   6515 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6516 		int hlen = offset + iphl;
   6517 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6518 
   6519 		if (__predict_false(m0->m_len <
   6520 				    (hlen + sizeof(struct tcphdr)))) {
   6521 			/*
   6522 			 * TCP/IP headers are not in the first mbuf; we need
   6523 			 * to do this the slow and painful way.  Let's just
   6524 			 * hope this doesn't happen very often.
   6525 			 */
   6526 			struct tcphdr th;
   6527 
   6528 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6529 
   6530 			m_copydata(m0, hlen, sizeof(th), &th);
   6531 			if (v4) {
   6532 				struct ip ip;
   6533 
   6534 				m_copydata(m0, offset, sizeof(ip), &ip);
   6535 				ip.ip_len = 0;
   6536 				m_copyback(m0,
   6537 				    offset + offsetof(struct ip, ip_len),
   6538 				    sizeof(ip.ip_len), &ip.ip_len);
   6539 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6540 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6541 			} else {
   6542 				struct ip6_hdr ip6;
   6543 
   6544 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6545 				ip6.ip6_plen = 0;
   6546 				m_copyback(m0,
   6547 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6548 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6549 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6550 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6551 			}
   6552 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6553 			    sizeof(th.th_sum), &th.th_sum);
   6554 
   6555 			hlen += th.th_off << 2;
   6556 		} else {
   6557 			/*
   6558 			 * TCP/IP headers are in the first mbuf; we can do
   6559 			 * this the easy way.
   6560 			 */
   6561 			struct tcphdr *th;
   6562 
   6563 			if (v4) {
   6564 				struct ip *ip =
   6565 				    (void *)(mtod(m0, char *) + offset);
   6566 				th = (void *)(mtod(m0, char *) + hlen);
   6567 
   6568 				ip->ip_len = 0;
   6569 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6570 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6571 			} else {
   6572 				struct ip6_hdr *ip6 =
   6573 				    (void *)(mtod(m0, char *) + offset);
   6574 				th = (void *)(mtod(m0, char *) + hlen);
   6575 
   6576 				ip6->ip6_plen = 0;
   6577 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6578 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6579 			}
   6580 			hlen += th->th_off << 2;
   6581 		}
   6582 
   6583 		if (v4) {
   6584 			WM_Q_EVCNT_INCR(txq, txtso);
   6585 			cmdlen |= WTX_TCPIP_CMD_IP;
   6586 		} else {
   6587 			WM_Q_EVCNT_INCR(txq, txtso6);
   6588 			ipcse = 0;
   6589 		}
   6590 		cmd |= WTX_TCPIP_CMD_TSE;
   6591 		cmdlen |= WTX_TCPIP_CMD_TSE |
   6592 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   6593 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   6594 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   6595 	}
   6596 
   6597 	/*
   6598 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   6599 	 * offload feature, if we load the context descriptor, we
   6600 	 * MUST provide valid values for IPCSS and TUCSS fields.
   6601 	 */
   6602 
   6603 	ipcs = WTX_TCPIP_IPCSS(offset) |
   6604 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   6605 	    WTX_TCPIP_IPCSE(ipcse);
   6606 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   6607 		WM_Q_EVCNT_INCR(txq, txipsum);
   6608 		fields |= WTX_IXSM;
   6609 	}
   6610 
   6611 	offset += iphl;
   6612 
   6613 	if (m0->m_pkthdr.csum_flags &
   6614 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   6615 		WM_Q_EVCNT_INCR(txq, txtusum);
   6616 		fields |= WTX_TXSM;
   6617 		tucs = WTX_TCPIP_TUCSS(offset) |
   6618 		    WTX_TCPIP_TUCSO(offset +
   6619 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   6620 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6621 	} else if ((m0->m_pkthdr.csum_flags &
   6622 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   6623 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6624 		fields |= WTX_TXSM;
   6625 		tucs = WTX_TCPIP_TUCSS(offset) |
   6626 		    WTX_TCPIP_TUCSO(offset +
   6627 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   6628 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6629 	} else {
   6630 		/* Just initialize it to a valid TCP context. */
   6631 		tucs = WTX_TCPIP_TUCSS(offset) |
   6632 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   6633 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6634 	}
   6635 
   6636 	/*
   6637 	 * We don't have to write context descriptor for every packet
   6638 	 * except for 82574. For 82574, we must write context descriptor
   6639 	 * for every packet when we use two descriptor queues.
   6640 	 * It would be overhead to write context descriptor for every packet,
   6641 	 * however it does not cause problems.
   6642 	 */
   6643 	/* Fill in the context descriptor. */
   6644 	t = (struct livengood_tcpip_ctxdesc *)
   6645 	    &txq->txq_descs[txq->txq_next];
   6646 	t->tcpip_ipcs = htole32(ipcs);
   6647 	t->tcpip_tucs = htole32(tucs);
   6648 	t->tcpip_cmdlen = htole32(cmdlen);
   6649 	t->tcpip_seg = htole32(seg);
   6650 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6651 
   6652 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6653 	txs->txs_ndesc++;
   6654 
   6655 	*cmdp = cmd;
   6656 	*fieldsp = fields;
   6657 
   6658 	return 0;
   6659 }
   6660 
   6661 static inline int
   6662 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   6663 {
   6664 	struct wm_softc *sc = ifp->if_softc;
   6665 	u_int cpuid = cpu_index(curcpu());
   6666 
   6667 	/*
   6668 	 * Currently, simple distribute strategy.
   6669 	 * TODO:
   6670 	 * distribute by flowid(RSS has value).
   6671 	 */
   6672         return (cpuid + ncpu - sc->sc_affinity_offset) % sc->sc_nqueues;
   6673 }
   6674 
   6675 /*
   6676  * wm_start:		[ifnet interface function]
   6677  *
   6678  *	Start packet transmission on the interface.
   6679  */
   6680 static void
   6681 wm_start(struct ifnet *ifp)
   6682 {
   6683 	struct wm_softc *sc = ifp->if_softc;
   6684 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6685 
   6686 #ifdef WM_MPSAFE
   6687 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6688 #endif
   6689 	/*
   6690 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   6691 	 */
   6692 
   6693 	mutex_enter(txq->txq_lock);
   6694 	if (!txq->txq_stopping)
   6695 		wm_start_locked(ifp);
   6696 	mutex_exit(txq->txq_lock);
   6697 }
   6698 
   6699 static void
   6700 wm_start_locked(struct ifnet *ifp)
   6701 {
   6702 	struct wm_softc *sc = ifp->if_softc;
   6703 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6704 
   6705 	wm_send_common_locked(ifp, txq, false);
   6706 }
   6707 
   6708 static int
   6709 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   6710 {
   6711 	int qid;
   6712 	struct wm_softc *sc = ifp->if_softc;
   6713 	struct wm_txqueue *txq;
   6714 
   6715 	qid = wm_select_txqueue(ifp, m);
   6716 	txq = &sc->sc_queue[qid].wmq_txq;
   6717 
   6718 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   6719 		m_freem(m);
   6720 		WM_Q_EVCNT_INCR(txq, txdrop);
   6721 		return ENOBUFS;
   6722 	}
   6723 
   6724 	/*
   6725 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   6726 	 */
   6727 	ifp->if_obytes += m->m_pkthdr.len;
   6728 	if (m->m_flags & M_MCAST)
   6729 		ifp->if_omcasts++;
   6730 
   6731 	if (mutex_tryenter(txq->txq_lock)) {
   6732 		if (!txq->txq_stopping)
   6733 			wm_transmit_locked(ifp, txq);
   6734 		mutex_exit(txq->txq_lock);
   6735 	}
   6736 
   6737 	return 0;
   6738 }
   6739 
   6740 static void
   6741 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   6742 {
   6743 
   6744 	wm_send_common_locked(ifp, txq, true);
   6745 }
   6746 
   6747 static void
   6748 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   6749     bool is_transmit)
   6750 {
   6751 	struct wm_softc *sc = ifp->if_softc;
   6752 	struct mbuf *m0;
   6753 	struct m_tag *mtag;
   6754 	struct wm_txsoft *txs;
   6755 	bus_dmamap_t dmamap;
   6756 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   6757 	bus_addr_t curaddr;
   6758 	bus_size_t seglen, curlen;
   6759 	uint32_t cksumcmd;
   6760 	uint8_t cksumfields;
   6761 
   6762 	KASSERT(mutex_owned(txq->txq_lock));
   6763 
   6764 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   6765 		return;
   6766 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   6767 		return;
   6768 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   6769 		return;
   6770 
   6771 	/* Remember the previous number of free descriptors. */
   6772 	ofree = txq->txq_free;
   6773 
   6774 	/*
   6775 	 * Loop through the send queue, setting up transmit descriptors
   6776 	 * until we drain the queue, or use up all available transmit
   6777 	 * descriptors.
   6778 	 */
   6779 	for (;;) {
   6780 		m0 = NULL;
   6781 
   6782 		/* Get a work queue entry. */
   6783 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6784 			wm_txeof(sc, txq);
   6785 			if (txq->txq_sfree == 0) {
   6786 				DPRINTF(WM_DEBUG_TX,
   6787 				    ("%s: TX: no free job descriptors\n",
   6788 					device_xname(sc->sc_dev)));
   6789 				WM_Q_EVCNT_INCR(txq, txsstall);
   6790 				break;
   6791 			}
   6792 		}
   6793 
   6794 		/* Grab a packet off the queue. */
   6795 		if (is_transmit)
   6796 			m0 = pcq_get(txq->txq_interq);
   6797 		else
   6798 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   6799 		if (m0 == NULL)
   6800 			break;
   6801 
   6802 		DPRINTF(WM_DEBUG_TX,
   6803 		    ("%s: TX: have packet to transmit: %p\n",
   6804 		    device_xname(sc->sc_dev), m0));
   6805 
   6806 		txs = &txq->txq_soft[txq->txq_snext];
   6807 		dmamap = txs->txs_dmamap;
   6808 
   6809 		use_tso = (m0->m_pkthdr.csum_flags &
   6810 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   6811 
   6812 		/*
   6813 		 * So says the Linux driver:
   6814 		 * The controller does a simple calculation to make sure
   6815 		 * there is enough room in the FIFO before initiating the
   6816 		 * DMA for each buffer.  The calc is:
   6817 		 *	4 = ceil(buffer len / MSS)
   6818 		 * To make sure we don't overrun the FIFO, adjust the max
   6819 		 * buffer len if the MSS drops.
   6820 		 */
   6821 		dmamap->dm_maxsegsz =
   6822 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   6823 		    ? m0->m_pkthdr.segsz << 2
   6824 		    : WTX_MAX_LEN;
   6825 
   6826 		/*
   6827 		 * Load the DMA map.  If this fails, the packet either
   6828 		 * didn't fit in the allotted number of segments, or we
   6829 		 * were short on resources.  For the too-many-segments
   6830 		 * case, we simply report an error and drop the packet,
   6831 		 * since we can't sanely copy a jumbo packet to a single
   6832 		 * buffer.
   6833 		 */
   6834 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6835 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   6836 		if (error) {
   6837 			if (error == EFBIG) {
   6838 				WM_Q_EVCNT_INCR(txq, txdrop);
   6839 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6840 				    "DMA segments, dropping...\n",
   6841 				    device_xname(sc->sc_dev));
   6842 				wm_dump_mbuf_chain(sc, m0);
   6843 				m_freem(m0);
   6844 				continue;
   6845 			}
   6846 			/*  Short on resources, just stop for now. */
   6847 			DPRINTF(WM_DEBUG_TX,
   6848 			    ("%s: TX: dmamap load failed: %d\n",
   6849 			    device_xname(sc->sc_dev), error));
   6850 			break;
   6851 		}
   6852 
   6853 		segs_needed = dmamap->dm_nsegs;
   6854 		if (use_tso) {
   6855 			/* For sentinel descriptor; see below. */
   6856 			segs_needed++;
   6857 		}
   6858 
   6859 		/*
   6860 		 * Ensure we have enough descriptors free to describe
   6861 		 * the packet.  Note, we always reserve one descriptor
   6862 		 * at the end of the ring due to the semantics of the
   6863 		 * TDT register, plus one more in the event we need
   6864 		 * to load offload context.
   6865 		 */
   6866 		if (segs_needed > txq->txq_free - 2) {
   6867 			/*
   6868 			 * Not enough free descriptors to transmit this
   6869 			 * packet.  We haven't committed anything yet,
   6870 			 * so just unload the DMA map, put the packet
   6871 			 * pack on the queue, and punt.  Notify the upper
   6872 			 * layer that there are no more slots left.
   6873 			 */
   6874 			DPRINTF(WM_DEBUG_TX,
   6875 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6876 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6877 			    segs_needed, txq->txq_free - 1));
   6878 			if (!is_transmit)
   6879 				ifp->if_flags |= IFF_OACTIVE;
   6880 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   6881 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6882 			WM_Q_EVCNT_INCR(txq, txdstall);
   6883 			break;
   6884 		}
   6885 
   6886 		/*
   6887 		 * Check for 82547 Tx FIFO bug.  We need to do this
   6888 		 * once we know we can transmit the packet, since we
   6889 		 * do some internal FIFO space accounting here.
   6890 		 */
   6891 		if (sc->sc_type == WM_T_82547 &&
   6892 		    wm_82547_txfifo_bugchk(sc, m0)) {
   6893 			DPRINTF(WM_DEBUG_TX,
   6894 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   6895 			    device_xname(sc->sc_dev)));
   6896 			if (!is_transmit)
   6897 				ifp->if_flags |= IFF_OACTIVE;
   6898 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   6899 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6900 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
   6901 			break;
   6902 		}
   6903 
   6904 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6905 
   6906 		DPRINTF(WM_DEBUG_TX,
   6907 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6908 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6909 
   6910 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   6911 
   6912 		/*
   6913 		 * Store a pointer to the packet so that we can free it
   6914 		 * later.
   6915 		 *
   6916 		 * Initially, we consider the number of descriptors the
   6917 		 * packet uses the number of DMA segments.  This may be
   6918 		 * incremented by 1 if we do checksum offload (a descriptor
   6919 		 * is used to set the checksum context).
   6920 		 */
   6921 		txs->txs_mbuf = m0;
   6922 		txs->txs_firstdesc = txq->txq_next;
   6923 		txs->txs_ndesc = segs_needed;
   6924 
   6925 		/* Set up offload parameters for this packet. */
   6926 		if (m0->m_pkthdr.csum_flags &
   6927 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   6928 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   6929 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   6930 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   6931 					  &cksumfields) != 0) {
   6932 				/* Error message already displayed. */
   6933 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   6934 				continue;
   6935 			}
   6936 		} else {
   6937 			cksumcmd = 0;
   6938 			cksumfields = 0;
   6939 		}
   6940 
   6941 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   6942 
   6943 		/* Sync the DMA map. */
   6944 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   6945 		    BUS_DMASYNC_PREWRITE);
   6946 
   6947 		/* Initialize the transmit descriptor. */
   6948 		for (nexttx = txq->txq_next, seg = 0;
   6949 		     seg < dmamap->dm_nsegs; seg++) {
   6950 			for (seglen = dmamap->dm_segs[seg].ds_len,
   6951 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   6952 			     seglen != 0;
   6953 			     curaddr += curlen, seglen -= curlen,
   6954 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   6955 				curlen = seglen;
   6956 
   6957 				/*
   6958 				 * So says the Linux driver:
   6959 				 * Work around for premature descriptor
   6960 				 * write-backs in TSO mode.  Append a
   6961 				 * 4-byte sentinel descriptor.
   6962 				 */
   6963 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   6964 				    curlen > 8)
   6965 					curlen -= 4;
   6966 
   6967 				wm_set_dma_addr(
   6968 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   6969 				txq->txq_descs[nexttx].wtx_cmdlen
   6970 				    = htole32(cksumcmd | curlen);
   6971 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   6972 				    = 0;
   6973 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   6974 				    = cksumfields;
   6975 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   6976 				lasttx = nexttx;
   6977 
   6978 				DPRINTF(WM_DEBUG_TX,
   6979 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   6980 				     "len %#04zx\n",
   6981 				    device_xname(sc->sc_dev), nexttx,
   6982 				    (uint64_t)curaddr, curlen));
   6983 			}
   6984 		}
   6985 
   6986 		KASSERT(lasttx != -1);
   6987 
   6988 		/*
   6989 		 * Set up the command byte on the last descriptor of
   6990 		 * the packet.  If we're in the interrupt delay window,
   6991 		 * delay the interrupt.
   6992 		 */
   6993 		txq->txq_descs[lasttx].wtx_cmdlen |=
   6994 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   6995 
   6996 		/*
   6997 		 * If VLANs are enabled and the packet has a VLAN tag, set
   6998 		 * up the descriptor to encapsulate the packet for us.
   6999 		 *
   7000 		 * This is only valid on the last descriptor of the packet.
   7001 		 */
   7002 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   7003 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7004 			    htole32(WTX_CMD_VLE);
   7005 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7006 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   7007 		}
   7008 
   7009 		txs->txs_lastdesc = lasttx;
   7010 
   7011 		DPRINTF(WM_DEBUG_TX,
   7012 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7013 		    device_xname(sc->sc_dev),
   7014 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7015 
   7016 		/* Sync the descriptors we're using. */
   7017 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7018 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7019 
   7020 		/* Give the packet to the chip. */
   7021 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7022 
   7023 		DPRINTF(WM_DEBUG_TX,
   7024 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7025 
   7026 		DPRINTF(WM_DEBUG_TX,
   7027 		    ("%s: TX: finished transmitting packet, job %d\n",
   7028 		    device_xname(sc->sc_dev), txq->txq_snext));
   7029 
   7030 		/* Advance the tx pointer. */
   7031 		txq->txq_free -= txs->txs_ndesc;
   7032 		txq->txq_next = nexttx;
   7033 
   7034 		txq->txq_sfree--;
   7035 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7036 
   7037 		/* Pass the packet to any BPF listeners. */
   7038 		bpf_mtap(ifp, m0);
   7039 	}
   7040 
   7041 	if (m0 != NULL) {
   7042 		if (!is_transmit)
   7043 			ifp->if_flags |= IFF_OACTIVE;
   7044 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7045 		WM_Q_EVCNT_INCR(txq, txdrop);
   7046 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7047 			__func__));
   7048 		m_freem(m0);
   7049 	}
   7050 
   7051 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7052 		/* No more slots; notify upper layer. */
   7053 		if (!is_transmit)
   7054 			ifp->if_flags |= IFF_OACTIVE;
   7055 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7056 	}
   7057 
   7058 	if (txq->txq_free != ofree) {
   7059 		/* Set a watchdog timer in case the chip flakes out. */
   7060 		ifp->if_timer = 5;
   7061 	}
   7062 }
   7063 
   7064 /*
   7065  * wm_nq_tx_offload:
   7066  *
   7067  *	Set up TCP/IP checksumming parameters for the
   7068  *	specified packet, for NEWQUEUE devices
   7069  */
   7070 static int
   7071 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7072     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7073 {
   7074 	struct mbuf *m0 = txs->txs_mbuf;
   7075 	struct m_tag *mtag;
   7076 	uint32_t vl_len, mssidx, cmdc;
   7077 	struct ether_header *eh;
   7078 	int offset, iphl;
   7079 
   7080 	/*
   7081 	 * XXX It would be nice if the mbuf pkthdr had offset
   7082 	 * fields for the protocol headers.
   7083 	 */
   7084 	*cmdlenp = 0;
   7085 	*fieldsp = 0;
   7086 
   7087 	eh = mtod(m0, struct ether_header *);
   7088 	switch (htons(eh->ether_type)) {
   7089 	case ETHERTYPE_IP:
   7090 	case ETHERTYPE_IPV6:
   7091 		offset = ETHER_HDR_LEN;
   7092 		break;
   7093 
   7094 	case ETHERTYPE_VLAN:
   7095 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7096 		break;
   7097 
   7098 	default:
   7099 		/* Don't support this protocol or encapsulation. */
   7100 		*do_csum = false;
   7101 		return 0;
   7102 	}
   7103 	*do_csum = true;
   7104 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7105 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7106 
   7107 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7108 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7109 
   7110 	if ((m0->m_pkthdr.csum_flags &
   7111 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7112 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7113 	} else {
   7114 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   7115 	}
   7116 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7117 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7118 
   7119 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   7120 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   7121 		     << NQTXC_VLLEN_VLAN_SHIFT);
   7122 		*cmdlenp |= NQTX_CMD_VLE;
   7123 	}
   7124 
   7125 	mssidx = 0;
   7126 
   7127 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7128 		int hlen = offset + iphl;
   7129 		int tcp_hlen;
   7130 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7131 
   7132 		if (__predict_false(m0->m_len <
   7133 				    (hlen + sizeof(struct tcphdr)))) {
   7134 			/*
   7135 			 * TCP/IP headers are not in the first mbuf; we need
   7136 			 * to do this the slow and painful way.  Let's just
   7137 			 * hope this doesn't happen very often.
   7138 			 */
   7139 			struct tcphdr th;
   7140 
   7141 			WM_Q_EVCNT_INCR(txq, txtsopain);
   7142 
   7143 			m_copydata(m0, hlen, sizeof(th), &th);
   7144 			if (v4) {
   7145 				struct ip ip;
   7146 
   7147 				m_copydata(m0, offset, sizeof(ip), &ip);
   7148 				ip.ip_len = 0;
   7149 				m_copyback(m0,
   7150 				    offset + offsetof(struct ip, ip_len),
   7151 				    sizeof(ip.ip_len), &ip.ip_len);
   7152 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7153 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7154 			} else {
   7155 				struct ip6_hdr ip6;
   7156 
   7157 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7158 				ip6.ip6_plen = 0;
   7159 				m_copyback(m0,
   7160 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7161 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7162 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7163 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7164 			}
   7165 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7166 			    sizeof(th.th_sum), &th.th_sum);
   7167 
   7168 			tcp_hlen = th.th_off << 2;
   7169 		} else {
   7170 			/*
   7171 			 * TCP/IP headers are in the first mbuf; we can do
   7172 			 * this the easy way.
   7173 			 */
   7174 			struct tcphdr *th;
   7175 
   7176 			if (v4) {
   7177 				struct ip *ip =
   7178 				    (void *)(mtod(m0, char *) + offset);
   7179 				th = (void *)(mtod(m0, char *) + hlen);
   7180 
   7181 				ip->ip_len = 0;
   7182 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7183 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7184 			} else {
   7185 				struct ip6_hdr *ip6 =
   7186 				    (void *)(mtod(m0, char *) + offset);
   7187 				th = (void *)(mtod(m0, char *) + hlen);
   7188 
   7189 				ip6->ip6_plen = 0;
   7190 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7191 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7192 			}
   7193 			tcp_hlen = th->th_off << 2;
   7194 		}
   7195 		hlen += tcp_hlen;
   7196 		*cmdlenp |= NQTX_CMD_TSE;
   7197 
   7198 		if (v4) {
   7199 			WM_Q_EVCNT_INCR(txq, txtso);
   7200 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7201 		} else {
   7202 			WM_Q_EVCNT_INCR(txq, txtso6);
   7203 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7204 		}
   7205 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7206 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7207 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7208 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7209 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7210 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7211 	} else {
   7212 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7213 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7214 	}
   7215 
   7216 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7217 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7218 		cmdc |= NQTXC_CMD_IP4;
   7219 	}
   7220 
   7221 	if (m0->m_pkthdr.csum_flags &
   7222 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7223 		WM_Q_EVCNT_INCR(txq, txtusum);
   7224 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7225 			cmdc |= NQTXC_CMD_TCP;
   7226 		} else {
   7227 			cmdc |= NQTXC_CMD_UDP;
   7228 		}
   7229 		cmdc |= NQTXC_CMD_IP4;
   7230 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7231 	}
   7232 	if (m0->m_pkthdr.csum_flags &
   7233 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7234 		WM_Q_EVCNT_INCR(txq, txtusum6);
   7235 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7236 			cmdc |= NQTXC_CMD_TCP;
   7237 		} else {
   7238 			cmdc |= NQTXC_CMD_UDP;
   7239 		}
   7240 		cmdc |= NQTXC_CMD_IP6;
   7241 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7242 	}
   7243 
   7244 	/*
   7245 	 * We don't have to write context descriptor for every packet to
   7246 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   7247 	 * I210 and I211. It is enough to write once per a Tx queue for these
   7248 	 * controllers.
   7249 	 * It would be overhead to write context descriptor for every packet,
   7250 	 * however it does not cause problems.
   7251 	 */
   7252 	/* Fill in the context descriptor. */
   7253 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   7254 	    htole32(vl_len);
   7255 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   7256 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   7257 	    htole32(cmdc);
   7258 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   7259 	    htole32(mssidx);
   7260 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7261 	DPRINTF(WM_DEBUG_TX,
   7262 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   7263 	    txq->txq_next, 0, vl_len));
   7264 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   7265 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7266 	txs->txs_ndesc++;
   7267 	return 0;
   7268 }
   7269 
   7270 /*
   7271  * wm_nq_start:		[ifnet interface function]
   7272  *
   7273  *	Start packet transmission on the interface for NEWQUEUE devices
   7274  */
   7275 static void
   7276 wm_nq_start(struct ifnet *ifp)
   7277 {
   7278 	struct wm_softc *sc = ifp->if_softc;
   7279 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7280 
   7281 #ifdef WM_MPSAFE
   7282 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   7283 #endif
   7284 	/*
   7285 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7286 	 */
   7287 
   7288 	mutex_enter(txq->txq_lock);
   7289 	if (!txq->txq_stopping)
   7290 		wm_nq_start_locked(ifp);
   7291 	mutex_exit(txq->txq_lock);
   7292 }
   7293 
   7294 static void
   7295 wm_nq_start_locked(struct ifnet *ifp)
   7296 {
   7297 	struct wm_softc *sc = ifp->if_softc;
   7298 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7299 
   7300 	wm_nq_send_common_locked(ifp, txq, false);
   7301 }
   7302 
   7303 static int
   7304 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   7305 {
   7306 	int qid;
   7307 	struct wm_softc *sc = ifp->if_softc;
   7308 	struct wm_txqueue *txq;
   7309 
   7310 	qid = wm_select_txqueue(ifp, m);
   7311 	txq = &sc->sc_queue[qid].wmq_txq;
   7312 
   7313 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7314 		m_freem(m);
   7315 		WM_Q_EVCNT_INCR(txq, txdrop);
   7316 		return ENOBUFS;
   7317 	}
   7318 
   7319 	/*
   7320 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7321 	 */
   7322 	ifp->if_obytes += m->m_pkthdr.len;
   7323 	if (m->m_flags & M_MCAST)
   7324 		ifp->if_omcasts++;
   7325 
   7326 	/*
   7327 	 * The situations which this mutex_tryenter() fails at running time
   7328 	 * are below two patterns.
   7329 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   7330 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   7331 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   7332 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   7333 	 * In the case of (2), the last packet enqueued to txq->txq_interq is also
   7334 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck, either.
   7335 	 */
   7336 	if (mutex_tryenter(txq->txq_lock)) {
   7337 		if (!txq->txq_stopping)
   7338 			wm_nq_transmit_locked(ifp, txq);
   7339 		mutex_exit(txq->txq_lock);
   7340 	}
   7341 
   7342 	return 0;
   7343 }
   7344 
   7345 static void
   7346 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7347 {
   7348 
   7349 	wm_nq_send_common_locked(ifp, txq, true);
   7350 }
   7351 
   7352 static void
   7353 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7354     bool is_transmit)
   7355 {
   7356 	struct wm_softc *sc = ifp->if_softc;
   7357 	struct mbuf *m0;
   7358 	struct m_tag *mtag;
   7359 	struct wm_txsoft *txs;
   7360 	bus_dmamap_t dmamap;
   7361 	int error, nexttx, lasttx = -1, seg, segs_needed;
   7362 	bool do_csum, sent;
   7363 
   7364 	KASSERT(mutex_owned(txq->txq_lock));
   7365 
   7366 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7367 		return;
   7368 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7369 		return;
   7370 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7371 		return;
   7372 
   7373 	sent = false;
   7374 
   7375 	/*
   7376 	 * Loop through the send queue, setting up transmit descriptors
   7377 	 * until we drain the queue, or use up all available transmit
   7378 	 * descriptors.
   7379 	 */
   7380 	for (;;) {
   7381 		m0 = NULL;
   7382 
   7383 		/* Get a work queue entry. */
   7384 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7385 			wm_txeof(sc, txq);
   7386 			if (txq->txq_sfree == 0) {
   7387 				DPRINTF(WM_DEBUG_TX,
   7388 				    ("%s: TX: no free job descriptors\n",
   7389 					device_xname(sc->sc_dev)));
   7390 				WM_Q_EVCNT_INCR(txq, txsstall);
   7391 				break;
   7392 			}
   7393 		}
   7394 
   7395 		/* Grab a packet off the queue. */
   7396 		if (is_transmit)
   7397 			m0 = pcq_get(txq->txq_interq);
   7398 		else
   7399 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7400 		if (m0 == NULL)
   7401 			break;
   7402 
   7403 		DPRINTF(WM_DEBUG_TX,
   7404 		    ("%s: TX: have packet to transmit: %p\n",
   7405 		    device_xname(sc->sc_dev), m0));
   7406 
   7407 		txs = &txq->txq_soft[txq->txq_snext];
   7408 		dmamap = txs->txs_dmamap;
   7409 
   7410 		/*
   7411 		 * Load the DMA map.  If this fails, the packet either
   7412 		 * didn't fit in the allotted number of segments, or we
   7413 		 * were short on resources.  For the too-many-segments
   7414 		 * case, we simply report an error and drop the packet,
   7415 		 * since we can't sanely copy a jumbo packet to a single
   7416 		 * buffer.
   7417 		 */
   7418 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7419 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7420 		if (error) {
   7421 			if (error == EFBIG) {
   7422 				WM_Q_EVCNT_INCR(txq, txdrop);
   7423 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7424 				    "DMA segments, dropping...\n",
   7425 				    device_xname(sc->sc_dev));
   7426 				wm_dump_mbuf_chain(sc, m0);
   7427 				m_freem(m0);
   7428 				continue;
   7429 			}
   7430 			/* Short on resources, just stop for now. */
   7431 			DPRINTF(WM_DEBUG_TX,
   7432 			    ("%s: TX: dmamap load failed: %d\n",
   7433 			    device_xname(sc->sc_dev), error));
   7434 			break;
   7435 		}
   7436 
   7437 		segs_needed = dmamap->dm_nsegs;
   7438 
   7439 		/*
   7440 		 * Ensure we have enough descriptors free to describe
   7441 		 * the packet.  Note, we always reserve one descriptor
   7442 		 * at the end of the ring due to the semantics of the
   7443 		 * TDT register, plus one more in the event we need
   7444 		 * to load offload context.
   7445 		 */
   7446 		if (segs_needed > txq->txq_free - 2) {
   7447 			/*
   7448 			 * Not enough free descriptors to transmit this
   7449 			 * packet.  We haven't committed anything yet,
   7450 			 * so just unload the DMA map, put the packet
   7451 			 * pack on the queue, and punt.  Notify the upper
   7452 			 * layer that there are no more slots left.
   7453 			 */
   7454 			DPRINTF(WM_DEBUG_TX,
   7455 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7456 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7457 			    segs_needed, txq->txq_free - 1));
   7458 			if (!is_transmit)
   7459 				ifp->if_flags |= IFF_OACTIVE;
   7460 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7461 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7462 			WM_Q_EVCNT_INCR(txq, txdstall);
   7463 			break;
   7464 		}
   7465 
   7466 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7467 
   7468 		DPRINTF(WM_DEBUG_TX,
   7469 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7470 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7471 
   7472 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7473 
   7474 		/*
   7475 		 * Store a pointer to the packet so that we can free it
   7476 		 * later.
   7477 		 *
   7478 		 * Initially, we consider the number of descriptors the
   7479 		 * packet uses the number of DMA segments.  This may be
   7480 		 * incremented by 1 if we do checksum offload (a descriptor
   7481 		 * is used to set the checksum context).
   7482 		 */
   7483 		txs->txs_mbuf = m0;
   7484 		txs->txs_firstdesc = txq->txq_next;
   7485 		txs->txs_ndesc = segs_needed;
   7486 
   7487 		/* Set up offload parameters for this packet. */
   7488 		uint32_t cmdlen, fields, dcmdlen;
   7489 		if (m0->m_pkthdr.csum_flags &
   7490 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7491 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7492 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7493 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   7494 			    &do_csum) != 0) {
   7495 				/* Error message already displayed. */
   7496 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7497 				continue;
   7498 			}
   7499 		} else {
   7500 			do_csum = false;
   7501 			cmdlen = 0;
   7502 			fields = 0;
   7503 		}
   7504 
   7505 		/* Sync the DMA map. */
   7506 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7507 		    BUS_DMASYNC_PREWRITE);
   7508 
   7509 		/* Initialize the first transmit descriptor. */
   7510 		nexttx = txq->txq_next;
   7511 		if (!do_csum) {
   7512 			/* setup a legacy descriptor */
   7513 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   7514 			    dmamap->dm_segs[0].ds_addr);
   7515 			txq->txq_descs[nexttx].wtx_cmdlen =
   7516 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   7517 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   7518 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   7519 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   7520 			    NULL) {
   7521 				txq->txq_descs[nexttx].wtx_cmdlen |=
   7522 				    htole32(WTX_CMD_VLE);
   7523 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   7524 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   7525 			} else {
   7526 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7527 			}
   7528 			dcmdlen = 0;
   7529 		} else {
   7530 			/* setup an advanced data descriptor */
   7531 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7532 			    htole64(dmamap->dm_segs[0].ds_addr);
   7533 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   7534 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7535 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   7536 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   7537 			    htole32(fields);
   7538 			DPRINTF(WM_DEBUG_TX,
   7539 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   7540 			    device_xname(sc->sc_dev), nexttx,
   7541 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   7542 			DPRINTF(WM_DEBUG_TX,
   7543 			    ("\t 0x%08x%08x\n", fields,
   7544 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   7545 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   7546 		}
   7547 
   7548 		lasttx = nexttx;
   7549 		nexttx = WM_NEXTTX(txq, nexttx);
   7550 		/*
   7551 		 * fill in the next descriptors. legacy or adcanced format
   7552 		 * is the same here
   7553 		 */
   7554 		for (seg = 1; seg < dmamap->dm_nsegs;
   7555 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   7556 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7557 			    htole64(dmamap->dm_segs[seg].ds_addr);
   7558 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7559 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   7560 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   7561 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   7562 			lasttx = nexttx;
   7563 
   7564 			DPRINTF(WM_DEBUG_TX,
   7565 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   7566 			     "len %#04zx\n",
   7567 			    device_xname(sc->sc_dev), nexttx,
   7568 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   7569 			    dmamap->dm_segs[seg].ds_len));
   7570 		}
   7571 
   7572 		KASSERT(lasttx != -1);
   7573 
   7574 		/*
   7575 		 * Set up the command byte on the last descriptor of
   7576 		 * the packet.  If we're in the interrupt delay window,
   7577 		 * delay the interrupt.
   7578 		 */
   7579 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   7580 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   7581 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7582 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7583 
   7584 		txs->txs_lastdesc = lasttx;
   7585 
   7586 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7587 		    device_xname(sc->sc_dev),
   7588 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7589 
   7590 		/* Sync the descriptors we're using. */
   7591 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7592 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7593 
   7594 		/* Give the packet to the chip. */
   7595 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7596 		sent = true;
   7597 
   7598 		DPRINTF(WM_DEBUG_TX,
   7599 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7600 
   7601 		DPRINTF(WM_DEBUG_TX,
   7602 		    ("%s: TX: finished transmitting packet, job %d\n",
   7603 		    device_xname(sc->sc_dev), txq->txq_snext));
   7604 
   7605 		/* Advance the tx pointer. */
   7606 		txq->txq_free -= txs->txs_ndesc;
   7607 		txq->txq_next = nexttx;
   7608 
   7609 		txq->txq_sfree--;
   7610 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7611 
   7612 		/* Pass the packet to any BPF listeners. */
   7613 		bpf_mtap(ifp, m0);
   7614 	}
   7615 
   7616 	if (m0 != NULL) {
   7617 		if (!is_transmit)
   7618 			ifp->if_flags |= IFF_OACTIVE;
   7619 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7620 		WM_Q_EVCNT_INCR(txq, txdrop);
   7621 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7622 			__func__));
   7623 		m_freem(m0);
   7624 	}
   7625 
   7626 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7627 		/* No more slots; notify upper layer. */
   7628 		if (!is_transmit)
   7629 			ifp->if_flags |= IFF_OACTIVE;
   7630 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7631 	}
   7632 
   7633 	if (sent) {
   7634 		/* Set a watchdog timer in case the chip flakes out. */
   7635 		ifp->if_timer = 5;
   7636 	}
   7637 }
   7638 
   7639 static void
   7640 wm_deferred_start_locked(struct wm_txqueue *txq)
   7641 {
   7642 	struct wm_softc *sc = txq->txq_sc;
   7643 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7644 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   7645 	int qid = wmq->wmq_id;
   7646 
   7647 	KASSERT(mutex_owned(txq->txq_lock));
   7648 
   7649 	if (txq->txq_stopping) {
   7650 		mutex_exit(txq->txq_lock);
   7651 		return;
   7652 	}
   7653 
   7654 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7655 		/* XXX need for ALTQ or one CPU system */
   7656 		if (qid == 0)
   7657 			wm_nq_start_locked(ifp);
   7658 		wm_nq_transmit_locked(ifp, txq);
   7659 	} else {
   7660 		/* XXX need for ALTQ or one CPU system */
   7661 		if (qid == 0)
   7662 			wm_start_locked(ifp);
   7663 		wm_transmit_locked(ifp, txq);
   7664 	}
   7665 }
   7666 
   7667 /* Interrupt */
   7668 
   7669 /*
   7670  * wm_txeof:
   7671  *
   7672  *	Helper; handle transmit interrupts.
   7673  */
   7674 static int
   7675 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
   7676 {
   7677 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7678 	struct wm_txsoft *txs;
   7679 	bool processed = false;
   7680 	int count = 0;
   7681 	int i;
   7682 	uint8_t status;
   7683 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   7684 
   7685 	KASSERT(mutex_owned(txq->txq_lock));
   7686 
   7687 	if (txq->txq_stopping)
   7688 		return 0;
   7689 
   7690 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   7691 	/* for ALTQ and legacy(not use multiqueue) ethernet controller */
   7692 	if (wmq->wmq_id == 0)
   7693 		ifp->if_flags &= ~IFF_OACTIVE;
   7694 
   7695 	/*
   7696 	 * Go through the Tx list and free mbufs for those
   7697 	 * frames which have been transmitted.
   7698 	 */
   7699 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   7700 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   7701 		txs = &txq->txq_soft[i];
   7702 
   7703 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   7704 			device_xname(sc->sc_dev), i));
   7705 
   7706 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   7707 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   7708 
   7709 		status =
   7710 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   7711 		if ((status & WTX_ST_DD) == 0) {
   7712 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   7713 			    BUS_DMASYNC_PREREAD);
   7714 			break;
   7715 		}
   7716 
   7717 		processed = true;
   7718 		count++;
   7719 		DPRINTF(WM_DEBUG_TX,
   7720 		    ("%s: TX: job %d done: descs %d..%d\n",
   7721 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   7722 		    txs->txs_lastdesc));
   7723 
   7724 		/*
   7725 		 * XXX We should probably be using the statistics
   7726 		 * XXX registers, but I don't know if they exist
   7727 		 * XXX on chips before the i82544.
   7728 		 */
   7729 
   7730 #ifdef WM_EVENT_COUNTERS
   7731 		if (status & WTX_ST_TU)
   7732 			WM_Q_EVCNT_INCR(txq, tu);
   7733 #endif /* WM_EVENT_COUNTERS */
   7734 
   7735 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   7736 			ifp->if_oerrors++;
   7737 			if (status & WTX_ST_LC)
   7738 				log(LOG_WARNING, "%s: late collision\n",
   7739 				    device_xname(sc->sc_dev));
   7740 			else if (status & WTX_ST_EC) {
   7741 				ifp->if_collisions += 16;
   7742 				log(LOG_WARNING, "%s: excessive collisions\n",
   7743 				    device_xname(sc->sc_dev));
   7744 			}
   7745 		} else
   7746 			ifp->if_opackets++;
   7747 
   7748 		txq->txq_packets++;
   7749 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   7750 
   7751 		txq->txq_free += txs->txs_ndesc;
   7752 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   7753 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   7754 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   7755 		m_freem(txs->txs_mbuf);
   7756 		txs->txs_mbuf = NULL;
   7757 	}
   7758 
   7759 	/* Update the dirty transmit buffer pointer. */
   7760 	txq->txq_sdirty = i;
   7761 	DPRINTF(WM_DEBUG_TX,
   7762 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   7763 
   7764 	if (count != 0)
   7765 		rnd_add_uint32(&sc->rnd_source, count);
   7766 
   7767 	/*
   7768 	 * If there are no more pending transmissions, cancel the watchdog
   7769 	 * timer.
   7770 	 */
   7771 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   7772 		ifp->if_timer = 0;
   7773 
   7774 	return processed;
   7775 }
   7776 
   7777 static inline uint32_t
   7778 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   7779 {
   7780 	struct wm_softc *sc = rxq->rxq_sc;
   7781 
   7782 	if (sc->sc_type == WM_T_82574)
   7783 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   7784 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7785 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   7786 	else
   7787 		return rxq->rxq_descs[idx].wrx_status;
   7788 }
   7789 
   7790 static inline uint32_t
   7791 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   7792 {
   7793 	struct wm_softc *sc = rxq->rxq_sc;
   7794 
   7795 	if (sc->sc_type == WM_T_82574)
   7796 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   7797 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7798 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   7799 	else
   7800 		return rxq->rxq_descs[idx].wrx_errors;
   7801 }
   7802 
   7803 static inline uint16_t
   7804 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   7805 {
   7806 	struct wm_softc *sc = rxq->rxq_sc;
   7807 
   7808 	if (sc->sc_type == WM_T_82574)
   7809 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   7810 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7811 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   7812 	else
   7813 		return rxq->rxq_descs[idx].wrx_special;
   7814 }
   7815 
   7816 static inline int
   7817 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   7818 {
   7819 	struct wm_softc *sc = rxq->rxq_sc;
   7820 
   7821 	if (sc->sc_type == WM_T_82574)
   7822 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   7823 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7824 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   7825 	else
   7826 		return rxq->rxq_descs[idx].wrx_len;
   7827 }
   7828 
   7829 #ifdef WM_DEBUG
   7830 static inline uint32_t
   7831 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   7832 {
   7833 	struct wm_softc *sc = rxq->rxq_sc;
   7834 
   7835 	if (sc->sc_type == WM_T_82574)
   7836 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   7837 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7838 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   7839 	else
   7840 		return 0;
   7841 }
   7842 
   7843 static inline uint8_t
   7844 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   7845 {
   7846 	struct wm_softc *sc = rxq->rxq_sc;
   7847 
   7848 	if (sc->sc_type == WM_T_82574)
   7849 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   7850 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7851 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   7852 	else
   7853 		return 0;
   7854 }
   7855 #endif /* WM_DEBUG */
   7856 
   7857 static inline bool
   7858 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   7859     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   7860 {
   7861 
   7862 	if (sc->sc_type == WM_T_82574)
   7863 		return (status & ext_bit) != 0;
   7864 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7865 		return (status & nq_bit) != 0;
   7866 	else
   7867 		return (status & legacy_bit) != 0;
   7868 }
   7869 
   7870 static inline bool
   7871 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   7872     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   7873 {
   7874 
   7875 	if (sc->sc_type == WM_T_82574)
   7876 		return (error & ext_bit) != 0;
   7877 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7878 		return (error & nq_bit) != 0;
   7879 	else
   7880 		return (error & legacy_bit) != 0;
   7881 }
   7882 
   7883 static inline bool
   7884 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   7885 {
   7886 
   7887 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   7888 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   7889 		return true;
   7890 	else
   7891 		return false;
   7892 }
   7893 
   7894 static inline bool
   7895 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   7896 {
   7897 	struct wm_softc *sc = rxq->rxq_sc;
   7898 
   7899 	/* XXXX missing error bit for newqueue? */
   7900 	if (wm_rxdesc_is_set_error(sc, errors,
   7901 		WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE,
   7902 		EXTRXC_ERROR_CE|EXTRXC_ERROR_SE|EXTRXC_ERROR_SEQ|EXTRXC_ERROR_CXE|EXTRXC_ERROR_RXE,
   7903 		NQRXC_ERROR_RXE)) {
   7904 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE, EXTRXC_ERROR_SE, 0))
   7905 			log(LOG_WARNING, "%s: symbol error\n",
   7906 			    device_xname(sc->sc_dev));
   7907 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ, EXTRXC_ERROR_SEQ, 0))
   7908 			log(LOG_WARNING, "%s: receive sequence error\n",
   7909 			    device_xname(sc->sc_dev));
   7910 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE, EXTRXC_ERROR_CE, 0))
   7911 			log(LOG_WARNING, "%s: CRC error\n",
   7912 			    device_xname(sc->sc_dev));
   7913 		return true;
   7914 	}
   7915 
   7916 	return false;
   7917 }
   7918 
   7919 static inline bool
   7920 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   7921 {
   7922 	struct wm_softc *sc = rxq->rxq_sc;
   7923 
   7924 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   7925 		NQRXC_STATUS_DD)) {
   7926 		/* We have processed all of the receive descriptors. */
   7927 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   7928 		return false;
   7929 	}
   7930 
   7931 	return true;
   7932 }
   7933 
   7934 static inline bool
   7935 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status, uint16_t vlantag,
   7936     struct mbuf *m)
   7937 {
   7938 	struct ifnet *ifp = &rxq->rxq_sc->sc_ethercom.ec_if;
   7939 
   7940 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   7941 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   7942 		VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), return false);
   7943 	}
   7944 
   7945 	return true;
   7946 }
   7947 
   7948 static inline void
   7949 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   7950     uint32_t errors, struct mbuf *m)
   7951 {
   7952 	struct wm_softc *sc = rxq->rxq_sc;
   7953 
   7954 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   7955 		if (wm_rxdesc_is_set_status(sc, status,
   7956 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   7957 			WM_Q_EVCNT_INCR(rxq, rxipsum);
   7958 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   7959 			if (wm_rxdesc_is_set_error(sc, errors,
   7960 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   7961 				m->m_pkthdr.csum_flags |=
   7962 					M_CSUM_IPv4_BAD;
   7963 		}
   7964 		if (wm_rxdesc_is_set_status(sc, status,
   7965 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   7966 			/*
   7967 			 * Note: we don't know if this was TCP or UDP,
   7968 			 * so we just set both bits, and expect the
   7969 			 * upper layers to deal.
   7970 			 */
   7971 			WM_Q_EVCNT_INCR(rxq, rxtusum);
   7972 			m->m_pkthdr.csum_flags |=
   7973 				M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7974 				M_CSUM_TCPv6 | M_CSUM_UDPv6;
   7975 			if (wm_rxdesc_is_set_error(sc, errors,
   7976 				WRX_ER_TCPE, EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   7977 				m->m_pkthdr.csum_flags |=
   7978 					M_CSUM_TCP_UDP_BAD;
   7979 		}
   7980 	}
   7981 }
   7982 
   7983 /*
   7984  * wm_rxeof:
   7985  *
   7986  *	Helper; handle receive interrupts.
   7987  */
   7988 static void
   7989 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   7990 {
   7991 	struct wm_softc *sc = rxq->rxq_sc;
   7992 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7993 	struct wm_rxsoft *rxs;
   7994 	struct mbuf *m;
   7995 	int i, len;
   7996 	int count = 0;
   7997 	uint32_t status, errors;
   7998 	uint16_t vlantag;
   7999 
   8000 	KASSERT(mutex_owned(rxq->rxq_lock));
   8001 
   8002 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8003 		if (limit-- == 0) {
   8004 			rxq->rxq_ptr = i;
   8005 			break;
   8006 		}
   8007 
   8008 		rxs = &rxq->rxq_soft[i];
   8009 
   8010 		DPRINTF(WM_DEBUG_RX,
   8011 		    ("%s: RX: checking descriptor %d\n",
   8012 		    device_xname(sc->sc_dev), i));
   8013 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   8014 
   8015 		status = wm_rxdesc_get_status(rxq, i);
   8016 		errors = wm_rxdesc_get_errors(rxq, i);
   8017 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8018 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8019 #ifdef WM_DEBUG
   8020 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8021 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8022 #endif
   8023 
   8024 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8025 			/*
   8026 			 * Update the receive pointer holding rxq_lock
   8027 			 * consistent with increment counter.
   8028 			 */
   8029 			rxq->rxq_ptr = i;
   8030 			break;
   8031 		}
   8032 
   8033 		count++;
   8034 		if (__predict_false(rxq->rxq_discard)) {
   8035 			DPRINTF(WM_DEBUG_RX,
   8036 			    ("%s: RX: discarding contents of descriptor %d\n",
   8037 			    device_xname(sc->sc_dev), i));
   8038 			wm_init_rxdesc(rxq, i);
   8039 			if (wm_rxdesc_is_eop(rxq, status)) {
   8040 				/* Reset our state. */
   8041 				DPRINTF(WM_DEBUG_RX,
   8042 				    ("%s: RX: resetting rxdiscard -> 0\n",
   8043 				    device_xname(sc->sc_dev)));
   8044 				rxq->rxq_discard = 0;
   8045 			}
   8046 			continue;
   8047 		}
   8048 
   8049 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8050 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   8051 
   8052 		m = rxs->rxs_mbuf;
   8053 
   8054 		/*
   8055 		 * Add a new receive buffer to the ring, unless of
   8056 		 * course the length is zero. Treat the latter as a
   8057 		 * failed mapping.
   8058 		 */
   8059 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   8060 			/*
   8061 			 * Failed, throw away what we've done so
   8062 			 * far, and discard the rest of the packet.
   8063 			 */
   8064 			ifp->if_ierrors++;
   8065 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8066 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   8067 			wm_init_rxdesc(rxq, i);
   8068 			if (!wm_rxdesc_is_eop(rxq, status))
   8069 				rxq->rxq_discard = 1;
   8070 			if (rxq->rxq_head != NULL)
   8071 				m_freem(rxq->rxq_head);
   8072 			WM_RXCHAIN_RESET(rxq);
   8073 			DPRINTF(WM_DEBUG_RX,
   8074 			    ("%s: RX: Rx buffer allocation failed, "
   8075 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8076 			    rxq->rxq_discard ? " (discard)" : ""));
   8077 			continue;
   8078 		}
   8079 
   8080 		m->m_len = len;
   8081 		rxq->rxq_len += len;
   8082 		DPRINTF(WM_DEBUG_RX,
   8083 		    ("%s: RX: buffer at %p len %d\n",
   8084 		    device_xname(sc->sc_dev), m->m_data, len));
   8085 
   8086 		/* If this is not the end of the packet, keep looking. */
   8087 		if (!wm_rxdesc_is_eop(rxq, status)) {
   8088 			WM_RXCHAIN_LINK(rxq, m);
   8089 			DPRINTF(WM_DEBUG_RX,
   8090 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   8091 			    device_xname(sc->sc_dev), rxq->rxq_len));
   8092 			continue;
   8093 		}
   8094 
   8095 		/*
   8096 		 * Okay, we have the entire packet now.  The chip is
   8097 		 * configured to include the FCS except I350 and I21[01]
   8098 		 * (not all chips can be configured to strip it),
   8099 		 * so we need to trim it.
   8100 		 * May need to adjust length of previous mbuf in the
   8101 		 * chain if the current mbuf is too short.
   8102 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   8103 		 * is always set in I350, so we don't trim it.
   8104 		 */
   8105 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   8106 		    && (sc->sc_type != WM_T_I210)
   8107 		    && (sc->sc_type != WM_T_I211)) {
   8108 			if (m->m_len < ETHER_CRC_LEN) {
   8109 				rxq->rxq_tail->m_len
   8110 				    -= (ETHER_CRC_LEN - m->m_len);
   8111 				m->m_len = 0;
   8112 			} else
   8113 				m->m_len -= ETHER_CRC_LEN;
   8114 			len = rxq->rxq_len - ETHER_CRC_LEN;
   8115 		} else
   8116 			len = rxq->rxq_len;
   8117 
   8118 		WM_RXCHAIN_LINK(rxq, m);
   8119 
   8120 		*rxq->rxq_tailp = NULL;
   8121 		m = rxq->rxq_head;
   8122 
   8123 		WM_RXCHAIN_RESET(rxq);
   8124 
   8125 		DPRINTF(WM_DEBUG_RX,
   8126 		    ("%s: RX: have entire packet, len -> %d\n",
   8127 		    device_xname(sc->sc_dev), len));
   8128 
   8129 		/* If an error occurred, update stats and drop the packet. */
   8130 		if (wm_rxdesc_has_errors(rxq, errors)) {
   8131 			m_freem(m);
   8132 			continue;
   8133 		}
   8134 
   8135 		/* No errors.  Receive the packet. */
   8136 		m_set_rcvif(m, ifp);
   8137 		m->m_pkthdr.len = len;
   8138 		/*
   8139 		 * TODO
   8140 		 * should be save rsshash and rsstype to this mbuf.
   8141 		 */
   8142 		DPRINTF(WM_DEBUG_RX,
   8143 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   8144 			device_xname(sc->sc_dev), rsstype, rsshash));
   8145 
   8146 		/*
   8147 		 * If VLANs are enabled, VLAN packets have been unwrapped
   8148 		 * for us.  Associate the tag with the packet.
   8149 		 */
   8150 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   8151 			continue;
   8152 
   8153 		/* Set up checksum info for this packet. */
   8154 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   8155 		/*
   8156 		 * Update the receive pointer holding rxq_lock consistent with
   8157 		 * increment counter.
   8158 		 */
   8159 		rxq->rxq_ptr = i;
   8160 		rxq->rxq_packets++;
   8161 		rxq->rxq_bytes += len;
   8162 		mutex_exit(rxq->rxq_lock);
   8163 
   8164 		/* Pass it on. */
   8165 		if_percpuq_enqueue(sc->sc_ipq, m);
   8166 
   8167 		mutex_enter(rxq->rxq_lock);
   8168 
   8169 		if (rxq->rxq_stopping)
   8170 			break;
   8171 	}
   8172 
   8173 	if (count != 0)
   8174 		rnd_add_uint32(&sc->rnd_source, count);
   8175 
   8176 	DPRINTF(WM_DEBUG_RX,
   8177 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8178 }
   8179 
   8180 /*
   8181  * wm_linkintr_gmii:
   8182  *
   8183  *	Helper; handle link interrupts for GMII.
   8184  */
   8185 static void
   8186 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8187 {
   8188 
   8189 	KASSERT(WM_CORE_LOCKED(sc));
   8190 
   8191 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8192 		__func__));
   8193 
   8194 	if (icr & ICR_LSC) {
   8195 		uint32_t reg;
   8196 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   8197 
   8198 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   8199 			wm_gig_downshift_workaround_ich8lan(sc);
   8200 
   8201 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   8202 			device_xname(sc->sc_dev)));
   8203 		mii_pollstat(&sc->sc_mii);
   8204 		if (sc->sc_type == WM_T_82543) {
   8205 			int miistatus, active;
   8206 
   8207 			/*
   8208 			 * With 82543, we need to force speed and
   8209 			 * duplex on the MAC equal to what the PHY
   8210 			 * speed and duplex configuration is.
   8211 			 */
   8212 			miistatus = sc->sc_mii.mii_media_status;
   8213 
   8214 			if (miistatus & IFM_ACTIVE) {
   8215 				active = sc->sc_mii.mii_media_active;
   8216 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8217 				switch (IFM_SUBTYPE(active)) {
   8218 				case IFM_10_T:
   8219 					sc->sc_ctrl |= CTRL_SPEED_10;
   8220 					break;
   8221 				case IFM_100_TX:
   8222 					sc->sc_ctrl |= CTRL_SPEED_100;
   8223 					break;
   8224 				case IFM_1000_T:
   8225 					sc->sc_ctrl |= CTRL_SPEED_1000;
   8226 					break;
   8227 				default:
   8228 					/*
   8229 					 * fiber?
   8230 					 * Shoud not enter here.
   8231 					 */
   8232 					printf("unknown media (%x)\n", active);
   8233 					break;
   8234 				}
   8235 				if (active & IFM_FDX)
   8236 					sc->sc_ctrl |= CTRL_FD;
   8237 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8238 			}
   8239 		} else if ((sc->sc_type == WM_T_ICH8)
   8240 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   8241 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   8242 		} else if (sc->sc_type == WM_T_PCH) {
   8243 			wm_k1_gig_workaround_hv(sc,
   8244 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   8245 		}
   8246 
   8247 		if ((sc->sc_phytype == WMPHY_82578)
   8248 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   8249 			== IFM_1000_T)) {
   8250 
   8251 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   8252 				delay(200*1000); /* XXX too big */
   8253 
   8254 				/* Link stall fix for link up */
   8255 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8256 				    HV_MUX_DATA_CTRL,
   8257 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   8258 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   8259 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8260 				    HV_MUX_DATA_CTRL,
   8261 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   8262 			}
   8263 		}
   8264 		/*
   8265 		 * I217 Packet Loss issue:
   8266 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
   8267 		 * on power up.
   8268 		 * Set the Beacon Duration for I217 to 8 usec
   8269 		 */
   8270 		if ((sc->sc_type == WM_T_PCH_LPT)
   8271 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8272 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   8273 			reg &= ~FEXTNVM4_BEACON_DURATION;
   8274 			reg |= FEXTNVM4_BEACON_DURATION_8US;
   8275 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   8276 		}
   8277 
   8278 		/* XXX Work-around I218 hang issue */
   8279 		/* e1000_k1_workaround_lpt_lp() */
   8280 
   8281 		if ((sc->sc_type == WM_T_PCH_LPT)
   8282 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8283 			/*
   8284 			 * Set platform power management values for Latency
   8285 			 * Tolerance Reporting (LTR)
   8286 			 */
   8287 			wm_platform_pm_pch_lpt(sc,
   8288 				((sc->sc_mii.mii_media_status & IFM_ACTIVE)
   8289 				    != 0));
   8290 		}
   8291 
   8292 		/* FEXTNVM6 K1-off workaround */
   8293 		if (sc->sc_type == WM_T_PCH_SPT) {
   8294 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
   8295 			if (CSR_READ(sc, WMREG_PCIEANACFG)
   8296 			    & FEXTNVM6_K1_OFF_ENABLE)
   8297 				reg |= FEXTNVM6_K1_OFF_ENABLE;
   8298 			else
   8299 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   8300 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   8301 		}
   8302 	} else if (icr & ICR_RXSEQ) {
   8303 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   8304 			device_xname(sc->sc_dev)));
   8305 	}
   8306 }
   8307 
   8308 /*
   8309  * wm_linkintr_tbi:
   8310  *
   8311  *	Helper; handle link interrupts for TBI mode.
   8312  */
   8313 static void
   8314 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   8315 {
   8316 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8317 	uint32_t status;
   8318 
   8319 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8320 		__func__));
   8321 
   8322 	status = CSR_READ(sc, WMREG_STATUS);
   8323 	if (icr & ICR_LSC) {
   8324 		if (status & STATUS_LU) {
   8325 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8326 			    device_xname(sc->sc_dev),
   8327 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   8328 			/*
   8329 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   8330 			 * so we should update sc->sc_ctrl
   8331 			 */
   8332 
   8333 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   8334 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8335 			sc->sc_fcrtl &= ~FCRTL_XONE;
   8336 			if (status & STATUS_FD)
   8337 				sc->sc_tctl |=
   8338 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8339 			else
   8340 				sc->sc_tctl |=
   8341 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8342 			if (sc->sc_ctrl & CTRL_TFCE)
   8343 				sc->sc_fcrtl |= FCRTL_XONE;
   8344 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8345 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   8346 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   8347 				      sc->sc_fcrtl);
   8348 			sc->sc_tbi_linkup = 1;
   8349 			if_link_state_change(ifp, LINK_STATE_UP);
   8350 		} else {
   8351 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8352 			    device_xname(sc->sc_dev)));
   8353 			sc->sc_tbi_linkup = 0;
   8354 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8355 		}
   8356 		/* Update LED */
   8357 		wm_tbi_serdes_set_linkled(sc);
   8358 	} else if (icr & ICR_RXSEQ) {
   8359 		DPRINTF(WM_DEBUG_LINK,
   8360 		    ("%s: LINK: Receive sequence error\n",
   8361 		    device_xname(sc->sc_dev)));
   8362 	}
   8363 }
   8364 
   8365 /*
   8366  * wm_linkintr_serdes:
   8367  *
   8368  *	Helper; handle link interrupts for TBI mode.
   8369  */
   8370 static void
   8371 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   8372 {
   8373 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8374 	struct mii_data *mii = &sc->sc_mii;
   8375 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8376 	uint32_t pcs_adv, pcs_lpab, reg;
   8377 
   8378 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8379 		__func__));
   8380 
   8381 	if (icr & ICR_LSC) {
   8382 		/* Check PCS */
   8383 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8384 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   8385 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   8386 				device_xname(sc->sc_dev)));
   8387 			mii->mii_media_status |= IFM_ACTIVE;
   8388 			sc->sc_tbi_linkup = 1;
   8389 			if_link_state_change(ifp, LINK_STATE_UP);
   8390 		} else {
   8391 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8392 				device_xname(sc->sc_dev)));
   8393 			mii->mii_media_status |= IFM_NONE;
   8394 			sc->sc_tbi_linkup = 0;
   8395 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8396 			wm_tbi_serdes_set_linkled(sc);
   8397 			return;
   8398 		}
   8399 		mii->mii_media_active |= IFM_1000_SX;
   8400 		if ((reg & PCS_LSTS_FDX) != 0)
   8401 			mii->mii_media_active |= IFM_FDX;
   8402 		else
   8403 			mii->mii_media_active |= IFM_HDX;
   8404 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   8405 			/* Check flow */
   8406 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8407 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   8408 				DPRINTF(WM_DEBUG_LINK,
   8409 				    ("XXX LINKOK but not ACOMP\n"));
   8410 				return;
   8411 			}
   8412 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   8413 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   8414 			DPRINTF(WM_DEBUG_LINK,
   8415 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   8416 			if ((pcs_adv & TXCW_SYM_PAUSE)
   8417 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   8418 				mii->mii_media_active |= IFM_FLOW
   8419 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   8420 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   8421 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8422 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   8423 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8424 				mii->mii_media_active |= IFM_FLOW
   8425 				    | IFM_ETH_TXPAUSE;
   8426 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   8427 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8428 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   8429 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8430 				mii->mii_media_active |= IFM_FLOW
   8431 				    | IFM_ETH_RXPAUSE;
   8432 		}
   8433 		/* Update LED */
   8434 		wm_tbi_serdes_set_linkled(sc);
   8435 	} else {
   8436 		DPRINTF(WM_DEBUG_LINK,
   8437 		    ("%s: LINK: Receive sequence error\n",
   8438 		    device_xname(sc->sc_dev)));
   8439 	}
   8440 }
   8441 
   8442 /*
   8443  * wm_linkintr:
   8444  *
   8445  *	Helper; handle link interrupts.
   8446  */
   8447 static void
   8448 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   8449 {
   8450 
   8451 	KASSERT(WM_CORE_LOCKED(sc));
   8452 
   8453 	if (sc->sc_flags & WM_F_HAS_MII)
   8454 		wm_linkintr_gmii(sc, icr);
   8455 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   8456 	    && (sc->sc_type >= WM_T_82575))
   8457 		wm_linkintr_serdes(sc, icr);
   8458 	else
   8459 		wm_linkintr_tbi(sc, icr);
   8460 }
   8461 
   8462 /*
   8463  * wm_intr_legacy:
   8464  *
   8465  *	Interrupt service routine for INTx and MSI.
   8466  */
   8467 static int
   8468 wm_intr_legacy(void *arg)
   8469 {
   8470 	struct wm_softc *sc = arg;
   8471 	struct wm_queue *wmq = &sc->sc_queue[0];
   8472 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8473 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8474 	uint32_t icr, rndval = 0;
   8475 	int handled = 0;
   8476 
   8477 	while (1 /* CONSTCOND */) {
   8478 		icr = CSR_READ(sc, WMREG_ICR);
   8479 		if ((icr & sc->sc_icr) == 0)
   8480 			break;
   8481 		if (handled == 0) {
   8482 			DPRINTF(WM_DEBUG_TX,
   8483 			    ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
   8484 		}
   8485 		if (rndval == 0)
   8486 			rndval = icr;
   8487 
   8488 		mutex_enter(rxq->rxq_lock);
   8489 
   8490 		if (rxq->rxq_stopping) {
   8491 			mutex_exit(rxq->rxq_lock);
   8492 			break;
   8493 		}
   8494 
   8495 		handled = 1;
   8496 
   8497 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8498 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   8499 			DPRINTF(WM_DEBUG_RX,
   8500 			    ("%s: RX: got Rx intr 0x%08x\n",
   8501 			    device_xname(sc->sc_dev),
   8502 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
   8503 			WM_Q_EVCNT_INCR(rxq, rxintr);
   8504 		}
   8505 #endif
   8506 		wm_rxeof(rxq, UINT_MAX);
   8507 
   8508 		mutex_exit(rxq->rxq_lock);
   8509 		mutex_enter(txq->txq_lock);
   8510 
   8511 		if (txq->txq_stopping) {
   8512 			mutex_exit(txq->txq_lock);
   8513 			break;
   8514 		}
   8515 
   8516 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8517 		if (icr & ICR_TXDW) {
   8518 			DPRINTF(WM_DEBUG_TX,
   8519 			    ("%s: TX: got TXDW interrupt\n",
   8520 			    device_xname(sc->sc_dev)));
   8521 			WM_Q_EVCNT_INCR(txq, txdw);
   8522 		}
   8523 #endif
   8524 		wm_txeof(sc, txq);
   8525 
   8526 		mutex_exit(txq->txq_lock);
   8527 		WM_CORE_LOCK(sc);
   8528 
   8529 		if (sc->sc_core_stopping) {
   8530 			WM_CORE_UNLOCK(sc);
   8531 			break;
   8532 		}
   8533 
   8534 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   8535 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8536 			wm_linkintr(sc, icr);
   8537 		}
   8538 
   8539 		WM_CORE_UNLOCK(sc);
   8540 
   8541 		if (icr & ICR_RXO) {
   8542 #if defined(WM_DEBUG)
   8543 			log(LOG_WARNING, "%s: Receive overrun\n",
   8544 			    device_xname(sc->sc_dev));
   8545 #endif /* defined(WM_DEBUG) */
   8546 		}
   8547 	}
   8548 
   8549 	rnd_add_uint32(&sc->rnd_source, rndval);
   8550 
   8551 	if (handled) {
   8552 		/* Try to get more packets going. */
   8553 		softint_schedule(wmq->wmq_si);
   8554 	}
   8555 
   8556 	return handled;
   8557 }
   8558 
   8559 static inline void
   8560 wm_txrxintr_disable(struct wm_queue *wmq)
   8561 {
   8562 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8563 
   8564 	if (sc->sc_type == WM_T_82574)
   8565 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8566 	else if (sc->sc_type == WM_T_82575)
   8567 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8568 	else
   8569 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   8570 }
   8571 
   8572 static inline void
   8573 wm_txrxintr_enable(struct wm_queue *wmq)
   8574 {
   8575 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8576 
   8577 	wm_itrs_calculate(sc, wmq);
   8578 
   8579 	if (sc->sc_type == WM_T_82574)
   8580 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8581 	else if (sc->sc_type == WM_T_82575)
   8582 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8583 	else
   8584 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   8585 }
   8586 
   8587 static int
   8588 wm_txrxintr_msix(void *arg)
   8589 {
   8590 	struct wm_queue *wmq = arg;
   8591 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8592 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8593 	struct wm_softc *sc = txq->txq_sc;
   8594 	u_int limit = sc->sc_rx_intr_process_limit;
   8595 
   8596 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   8597 
   8598 	DPRINTF(WM_DEBUG_TX,
   8599 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   8600 
   8601 	wm_txrxintr_disable(wmq);
   8602 
   8603 	mutex_enter(txq->txq_lock);
   8604 
   8605 	if (txq->txq_stopping) {
   8606 		mutex_exit(txq->txq_lock);
   8607 		return 0;
   8608 	}
   8609 
   8610 	WM_Q_EVCNT_INCR(txq, txdw);
   8611 	wm_txeof(sc, txq);
   8612 	/* wm_deferred start() is done in wm_handle_queue(). */
   8613 	mutex_exit(txq->txq_lock);
   8614 
   8615 	DPRINTF(WM_DEBUG_RX,
   8616 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   8617 	mutex_enter(rxq->rxq_lock);
   8618 
   8619 	if (rxq->rxq_stopping) {
   8620 		mutex_exit(rxq->rxq_lock);
   8621 		return 0;
   8622 	}
   8623 
   8624 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8625 	wm_rxeof(rxq, limit);
   8626 	mutex_exit(rxq->rxq_lock);
   8627 
   8628 	wm_itrs_writereg(sc, wmq);
   8629 
   8630 	softint_schedule(wmq->wmq_si);
   8631 
   8632 	return 1;
   8633 }
   8634 
   8635 static void
   8636 wm_handle_queue(void *arg)
   8637 {
   8638 	struct wm_queue *wmq = arg;
   8639 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8640 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8641 	struct wm_softc *sc = txq->txq_sc;
   8642 	u_int limit = sc->sc_rx_process_limit;
   8643 
   8644 	mutex_enter(txq->txq_lock);
   8645 	if (txq->txq_stopping) {
   8646 		mutex_exit(txq->txq_lock);
   8647 		return;
   8648 	}
   8649 	wm_txeof(sc, txq);
   8650 	wm_deferred_start_locked(txq);
   8651 	mutex_exit(txq->txq_lock);
   8652 
   8653 	mutex_enter(rxq->rxq_lock);
   8654 	if (rxq->rxq_stopping) {
   8655 		mutex_exit(rxq->rxq_lock);
   8656 		return;
   8657 	}
   8658 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8659 	wm_rxeof(rxq, limit);
   8660 	mutex_exit(rxq->rxq_lock);
   8661 
   8662 	wm_txrxintr_enable(wmq);
   8663 }
   8664 
   8665 /*
   8666  * wm_linkintr_msix:
   8667  *
   8668  *	Interrupt service routine for link status change for MSI-X.
   8669  */
   8670 static int
   8671 wm_linkintr_msix(void *arg)
   8672 {
   8673 	struct wm_softc *sc = arg;
   8674 	uint32_t reg;
   8675 
   8676 	DPRINTF(WM_DEBUG_LINK,
   8677 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   8678 
   8679 	reg = CSR_READ(sc, WMREG_ICR);
   8680 	WM_CORE_LOCK(sc);
   8681 	if ((sc->sc_core_stopping) || ((reg & ICR_LSC) == 0))
   8682 		goto out;
   8683 
   8684 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8685 	wm_linkintr(sc, ICR_LSC);
   8686 
   8687 out:
   8688 	WM_CORE_UNLOCK(sc);
   8689 
   8690 	if (sc->sc_type == WM_T_82574)
   8691 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   8692 	else if (sc->sc_type == WM_T_82575)
   8693 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   8694 	else
   8695 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   8696 
   8697 	return 1;
   8698 }
   8699 
   8700 /*
   8701  * Media related.
   8702  * GMII, SGMII, TBI (and SERDES)
   8703  */
   8704 
   8705 /* Common */
   8706 
   8707 /*
   8708  * wm_tbi_serdes_set_linkled:
   8709  *
   8710  *	Update the link LED on TBI and SERDES devices.
   8711  */
   8712 static void
   8713 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   8714 {
   8715 
   8716 	if (sc->sc_tbi_linkup)
   8717 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   8718 	else
   8719 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   8720 
   8721 	/* 82540 or newer devices are active low */
   8722 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   8723 
   8724 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8725 }
   8726 
   8727 /* GMII related */
   8728 
   8729 /*
   8730  * wm_gmii_reset:
   8731  *
   8732  *	Reset the PHY.
   8733  */
   8734 static void
   8735 wm_gmii_reset(struct wm_softc *sc)
   8736 {
   8737 	uint32_t reg;
   8738 	int rv;
   8739 
   8740 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   8741 		device_xname(sc->sc_dev), __func__));
   8742 
   8743 	rv = sc->phy.acquire(sc);
   8744 	if (rv != 0) {
   8745 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8746 		    __func__);
   8747 		return;
   8748 	}
   8749 
   8750 	switch (sc->sc_type) {
   8751 	case WM_T_82542_2_0:
   8752 	case WM_T_82542_2_1:
   8753 		/* null */
   8754 		break;
   8755 	case WM_T_82543:
   8756 		/*
   8757 		 * With 82543, we need to force speed and duplex on the MAC
   8758 		 * equal to what the PHY speed and duplex configuration is.
   8759 		 * In addition, we need to perform a hardware reset on the PHY
   8760 		 * to take it out of reset.
   8761 		 */
   8762 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   8763 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8764 
   8765 		/* The PHY reset pin is active-low. */
   8766 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   8767 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   8768 		    CTRL_EXT_SWDPIN(4));
   8769 		reg |= CTRL_EXT_SWDPIO(4);
   8770 
   8771 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   8772 		CSR_WRITE_FLUSH(sc);
   8773 		delay(10*1000);
   8774 
   8775 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   8776 		CSR_WRITE_FLUSH(sc);
   8777 		delay(150);
   8778 #if 0
   8779 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   8780 #endif
   8781 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   8782 		break;
   8783 	case WM_T_82544:	/* reset 10000us */
   8784 	case WM_T_82540:
   8785 	case WM_T_82545:
   8786 	case WM_T_82545_3:
   8787 	case WM_T_82546:
   8788 	case WM_T_82546_3:
   8789 	case WM_T_82541:
   8790 	case WM_T_82541_2:
   8791 	case WM_T_82547:
   8792 	case WM_T_82547_2:
   8793 	case WM_T_82571:	/* reset 100us */
   8794 	case WM_T_82572:
   8795 	case WM_T_82573:
   8796 	case WM_T_82574:
   8797 	case WM_T_82575:
   8798 	case WM_T_82576:
   8799 	case WM_T_82580:
   8800 	case WM_T_I350:
   8801 	case WM_T_I354:
   8802 	case WM_T_I210:
   8803 	case WM_T_I211:
   8804 	case WM_T_82583:
   8805 	case WM_T_80003:
   8806 		/* generic reset */
   8807 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8808 		CSR_WRITE_FLUSH(sc);
   8809 		delay(20000);
   8810 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8811 		CSR_WRITE_FLUSH(sc);
   8812 		delay(20000);
   8813 
   8814 		if ((sc->sc_type == WM_T_82541)
   8815 		    || (sc->sc_type == WM_T_82541_2)
   8816 		    || (sc->sc_type == WM_T_82547)
   8817 		    || (sc->sc_type == WM_T_82547_2)) {
   8818 			/* workaround for igp are done in igp_reset() */
   8819 			/* XXX add code to set LED after phy reset */
   8820 		}
   8821 		break;
   8822 	case WM_T_ICH8:
   8823 	case WM_T_ICH9:
   8824 	case WM_T_ICH10:
   8825 	case WM_T_PCH:
   8826 	case WM_T_PCH2:
   8827 	case WM_T_PCH_LPT:
   8828 	case WM_T_PCH_SPT:
   8829 		/* generic reset */
   8830 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8831 		CSR_WRITE_FLUSH(sc);
   8832 		delay(100);
   8833 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8834 		CSR_WRITE_FLUSH(sc);
   8835 		delay(150);
   8836 		break;
   8837 	default:
   8838 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   8839 		    __func__);
   8840 		break;
   8841 	}
   8842 
   8843 	sc->phy.release(sc);
   8844 
   8845 	/* get_cfg_done */
   8846 	wm_get_cfg_done(sc);
   8847 
   8848 	/* extra setup */
   8849 	switch (sc->sc_type) {
   8850 	case WM_T_82542_2_0:
   8851 	case WM_T_82542_2_1:
   8852 	case WM_T_82543:
   8853 	case WM_T_82544:
   8854 	case WM_T_82540:
   8855 	case WM_T_82545:
   8856 	case WM_T_82545_3:
   8857 	case WM_T_82546:
   8858 	case WM_T_82546_3:
   8859 	case WM_T_82541_2:
   8860 	case WM_T_82547_2:
   8861 	case WM_T_82571:
   8862 	case WM_T_82572:
   8863 	case WM_T_82573:
   8864 	case WM_T_82575:
   8865 	case WM_T_82576:
   8866 	case WM_T_82580:
   8867 	case WM_T_I350:
   8868 	case WM_T_I354:
   8869 	case WM_T_I210:
   8870 	case WM_T_I211:
   8871 	case WM_T_80003:
   8872 		/* null */
   8873 		break;
   8874 	case WM_T_82574:
   8875 	case WM_T_82583:
   8876 		wm_lplu_d0_disable(sc);
   8877 		break;
   8878 	case WM_T_82541:
   8879 	case WM_T_82547:
   8880 		/* XXX Configure actively LED after PHY reset */
   8881 		break;
   8882 	case WM_T_ICH8:
   8883 	case WM_T_ICH9:
   8884 	case WM_T_ICH10:
   8885 	case WM_T_PCH:
   8886 	case WM_T_PCH2:
   8887 	case WM_T_PCH_LPT:
   8888 	case WM_T_PCH_SPT:
   8889 		/* Allow time for h/w to get to a quiescent state afer reset */
   8890 		delay(10*1000);
   8891 
   8892 		if (sc->sc_type == WM_T_PCH)
   8893 			wm_hv_phy_workaround_ich8lan(sc);
   8894 
   8895 		if (sc->sc_type == WM_T_PCH2)
   8896 			wm_lv_phy_workaround_ich8lan(sc);
   8897 
   8898 		/* Clear the host wakeup bit after lcd reset */
   8899 		if (sc->sc_type >= WM_T_PCH) {
   8900 			reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   8901 			    BM_PORT_GEN_CFG);
   8902 			reg &= ~BM_WUC_HOST_WU_BIT;
   8903 			wm_gmii_hv_writereg(sc->sc_dev, 2,
   8904 			    BM_PORT_GEN_CFG, reg);
   8905 		}
   8906 
   8907 		/*
   8908 		 * XXX Configure the LCD with th extended configuration region
   8909 		 * in NVM
   8910 		 */
   8911 
   8912 		/* Disable D0 LPLU. */
   8913 		if (sc->sc_type >= WM_T_PCH)	/* PCH* */
   8914 			wm_lplu_d0_disable_pch(sc);
   8915 		else
   8916 			wm_lplu_d0_disable(sc);	/* ICH* */
   8917 		break;
   8918 	default:
   8919 		panic("%s: unknown type\n", __func__);
   8920 		break;
   8921 	}
   8922 }
   8923 
   8924 /*
   8925  * Setup sc_phytype and mii_{read|write}reg.
   8926  *
   8927  *  To identify PHY type, correct read/write function should be selected.
   8928  * To select correct read/write function, PCI ID or MAC type are required
   8929  * without accessing PHY registers.
   8930  *
   8931  *  On the first call of this function, PHY ID is not known yet. Check
   8932  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   8933  * result might be incorrect.
   8934  *
   8935  *  In the second call, PHY OUI and model is used to identify PHY type.
   8936  * It might not be perfpect because of the lack of compared entry, but it
   8937  * would be better than the first call.
   8938  *
   8939  *  If the detected new result and previous assumption is different,
   8940  * diagnous message will be printed.
   8941  */
   8942 static void
   8943 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   8944     uint16_t phy_model)
   8945 {
   8946 	device_t dev = sc->sc_dev;
   8947 	struct mii_data *mii = &sc->sc_mii;
   8948 	uint16_t new_phytype = WMPHY_UNKNOWN;
   8949 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   8950 	mii_readreg_t new_readreg;
   8951 	mii_writereg_t new_writereg;
   8952 
   8953 	if (mii->mii_readreg == NULL) {
   8954 		/*
   8955 		 *  This is the first call of this function. For ICH and PCH
   8956 		 * variants, it's difficult to determine the PHY access method
   8957 		 * by sc_type, so use the PCI product ID for some devices.
   8958 		 */
   8959 
   8960 		switch (sc->sc_pcidevid) {
   8961 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   8962 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   8963 			/* 82577 */
   8964 			new_phytype = WMPHY_82577;
   8965 			break;
   8966 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   8967 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   8968 			/* 82578 */
   8969 			new_phytype = WMPHY_82578;
   8970 			break;
   8971 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   8972 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   8973 			/* 82579 */
   8974 			new_phytype = WMPHY_82579;
   8975 			break;
   8976 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   8977 		case PCI_PRODUCT_INTEL_82801I_BM:
   8978 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   8979 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   8980 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   8981 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   8982 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   8983 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   8984 			/* ICH8, 9, 10 with 82567 */
   8985 			new_phytype = WMPHY_BM;
   8986 			break;
   8987 		default:
   8988 			break;
   8989 		}
   8990 	} else {
   8991 		/* It's not the first call. Use PHY OUI and model */
   8992 		switch (phy_oui) {
   8993 		case MII_OUI_ATHEROS: /* XXX ??? */
   8994 			switch (phy_model) {
   8995 			case 0x0004: /* XXX */
   8996 				new_phytype = WMPHY_82578;
   8997 				break;
   8998 			default:
   8999 				break;
   9000 			}
   9001 			break;
   9002 		case MII_OUI_xxMARVELL:
   9003 			switch (phy_model) {
   9004 			case MII_MODEL_xxMARVELL_I210:
   9005 				new_phytype = WMPHY_I210;
   9006 				break;
   9007 			case MII_MODEL_xxMARVELL_E1011:
   9008 			case MII_MODEL_xxMARVELL_E1000_3:
   9009 			case MII_MODEL_xxMARVELL_E1000_5:
   9010 			case MII_MODEL_xxMARVELL_E1112:
   9011 				new_phytype = WMPHY_M88;
   9012 				break;
   9013 			case MII_MODEL_xxMARVELL_E1149:
   9014 				new_phytype = WMPHY_BM;
   9015 				break;
   9016 			case MII_MODEL_xxMARVELL_E1111:
   9017 			case MII_MODEL_xxMARVELL_I347:
   9018 			case MII_MODEL_xxMARVELL_E1512:
   9019 			case MII_MODEL_xxMARVELL_E1340M:
   9020 			case MII_MODEL_xxMARVELL_E1543:
   9021 				new_phytype = WMPHY_M88;
   9022 				break;
   9023 			case MII_MODEL_xxMARVELL_I82563:
   9024 				new_phytype = WMPHY_GG82563;
   9025 				break;
   9026 			default:
   9027 				break;
   9028 			}
   9029 			break;
   9030 		case MII_OUI_INTEL:
   9031 			switch (phy_model) {
   9032 			case MII_MODEL_INTEL_I82577:
   9033 				new_phytype = WMPHY_82577;
   9034 				break;
   9035 			case MII_MODEL_INTEL_I82579:
   9036 				new_phytype = WMPHY_82579;
   9037 				break;
   9038 			case MII_MODEL_INTEL_I217:
   9039 				new_phytype = WMPHY_I217;
   9040 				break;
   9041 			case MII_MODEL_INTEL_I82580:
   9042 			case MII_MODEL_INTEL_I350:
   9043 				new_phytype = WMPHY_82580;
   9044 				break;
   9045 			default:
   9046 				break;
   9047 			}
   9048 			break;
   9049 		case MII_OUI_yyINTEL:
   9050 			switch (phy_model) {
   9051 			case MII_MODEL_yyINTEL_I82562G:
   9052 			case MII_MODEL_yyINTEL_I82562EM:
   9053 			case MII_MODEL_yyINTEL_I82562ET:
   9054 				new_phytype = WMPHY_IFE;
   9055 				break;
   9056 			case MII_MODEL_yyINTEL_IGP01E1000:
   9057 				new_phytype = WMPHY_IGP;
   9058 				break;
   9059 			case MII_MODEL_yyINTEL_I82566:
   9060 				new_phytype = WMPHY_IGP_3;
   9061 				break;
   9062 			default:
   9063 				break;
   9064 			}
   9065 			break;
   9066 		default:
   9067 			break;
   9068 		}
   9069 		if (new_phytype == WMPHY_UNKNOWN)
   9070 			aprint_verbose_dev(dev, "%s: unknown PHY model\n",
   9071 			    __func__);
   9072 
   9073 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9074 		    && (sc->sc_phytype != new_phytype )) {
   9075 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9076 			    "was incorrect. PHY type from PHY ID = %u\n",
   9077 			    sc->sc_phytype, new_phytype);
   9078 		}
   9079 	}
   9080 
   9081 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   9082 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   9083 		/* SGMII */
   9084 		new_readreg = wm_sgmii_readreg;
   9085 		new_writereg = wm_sgmii_writereg;
   9086 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   9087 		/* BM2 (phyaddr == 1) */
   9088 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9089 		    && (new_phytype != WMPHY_BM)
   9090 		    && (new_phytype != WMPHY_UNKNOWN))
   9091 			doubt_phytype = new_phytype;
   9092 		new_phytype = WMPHY_BM;
   9093 		new_readreg = wm_gmii_bm_readreg;
   9094 		new_writereg = wm_gmii_bm_writereg;
   9095 	} else if (sc->sc_type >= WM_T_PCH) {
   9096 		/* All PCH* use _hv_ */
   9097 		new_readreg = wm_gmii_hv_readreg;
   9098 		new_writereg = wm_gmii_hv_writereg;
   9099 	} else if (sc->sc_type >= WM_T_ICH8) {
   9100 		/* non-82567 ICH8, 9 and 10 */
   9101 		new_readreg = wm_gmii_i82544_readreg;
   9102 		new_writereg = wm_gmii_i82544_writereg;
   9103 	} else if (sc->sc_type >= WM_T_80003) {
   9104 		/* 80003 */
   9105 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9106 		    && (new_phytype != WMPHY_GG82563)
   9107 		    && (new_phytype != WMPHY_UNKNOWN))
   9108 			doubt_phytype = new_phytype;
   9109 		new_phytype = WMPHY_GG82563;
   9110 		new_readreg = wm_gmii_i80003_readreg;
   9111 		new_writereg = wm_gmii_i80003_writereg;
   9112 	} else if (sc->sc_type >= WM_T_I210) {
   9113 		/* I210 and I211 */
   9114 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9115 		    && (new_phytype != WMPHY_I210)
   9116 		    && (new_phytype != WMPHY_UNKNOWN))
   9117 			doubt_phytype = new_phytype;
   9118 		new_phytype = WMPHY_I210;
   9119 		new_readreg = wm_gmii_gs40g_readreg;
   9120 		new_writereg = wm_gmii_gs40g_writereg;
   9121 	} else if (sc->sc_type >= WM_T_82580) {
   9122 		/* 82580, I350 and I354 */
   9123 		new_readreg = wm_gmii_82580_readreg;
   9124 		new_writereg = wm_gmii_82580_writereg;
   9125 	} else if (sc->sc_type >= WM_T_82544) {
   9126 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   9127 		new_readreg = wm_gmii_i82544_readreg;
   9128 		new_writereg = wm_gmii_i82544_writereg;
   9129 	} else {
   9130 		new_readreg = wm_gmii_i82543_readreg;
   9131 		new_writereg = wm_gmii_i82543_writereg;
   9132 	}
   9133 
   9134 	if (new_phytype == WMPHY_BM) {
   9135 		/* All BM use _bm_ */
   9136 		new_readreg = wm_gmii_bm_readreg;
   9137 		new_writereg = wm_gmii_bm_writereg;
   9138 	}
   9139 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
   9140 		/* All PCH* use _hv_ */
   9141 		new_readreg = wm_gmii_hv_readreg;
   9142 		new_writereg = wm_gmii_hv_writereg;
   9143 	}
   9144 
   9145 	/* Diag output */
   9146 	if (doubt_phytype != WMPHY_UNKNOWN)
   9147 		aprint_error_dev(dev, "Assumed new PHY type was "
   9148 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   9149 		    new_phytype);
   9150 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9151 	    && (sc->sc_phytype != new_phytype ))
   9152 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9153 		    "was incorrect. New PHY type = %u\n",
   9154 		    sc->sc_phytype, new_phytype);
   9155 
   9156 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   9157 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   9158 
   9159 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   9160 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   9161 		    "function was incorrect.\n");
   9162 
   9163 	/* Update now */
   9164 	sc->sc_phytype = new_phytype;
   9165 	mii->mii_readreg = new_readreg;
   9166 	mii->mii_writereg = new_writereg;
   9167 }
   9168 
   9169 /*
   9170  * wm_get_phy_id_82575:
   9171  *
   9172  * Return PHY ID. Return -1 if it failed.
   9173  */
   9174 static int
   9175 wm_get_phy_id_82575(struct wm_softc *sc)
   9176 {
   9177 	uint32_t reg;
   9178 	int phyid = -1;
   9179 
   9180 	/* XXX */
   9181 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   9182 		return -1;
   9183 
   9184 	if (wm_sgmii_uses_mdio(sc)) {
   9185 		switch (sc->sc_type) {
   9186 		case WM_T_82575:
   9187 		case WM_T_82576:
   9188 			reg = CSR_READ(sc, WMREG_MDIC);
   9189 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   9190 			break;
   9191 		case WM_T_82580:
   9192 		case WM_T_I350:
   9193 		case WM_T_I354:
   9194 		case WM_T_I210:
   9195 		case WM_T_I211:
   9196 			reg = CSR_READ(sc, WMREG_MDICNFG);
   9197 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   9198 			break;
   9199 		default:
   9200 			return -1;
   9201 		}
   9202 	}
   9203 
   9204 	return phyid;
   9205 }
   9206 
   9207 
   9208 /*
   9209  * wm_gmii_mediainit:
   9210  *
   9211  *	Initialize media for use on 1000BASE-T devices.
   9212  */
   9213 static void
   9214 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   9215 {
   9216 	device_t dev = sc->sc_dev;
   9217 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9218 	struct mii_data *mii = &sc->sc_mii;
   9219 	uint32_t reg;
   9220 
   9221 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9222 		device_xname(sc->sc_dev), __func__));
   9223 
   9224 	/* We have GMII. */
   9225 	sc->sc_flags |= WM_F_HAS_MII;
   9226 
   9227 	if (sc->sc_type == WM_T_80003)
   9228 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9229 	else
   9230 		sc->sc_tipg = TIPG_1000T_DFLT;
   9231 
   9232 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   9233 	if ((sc->sc_type == WM_T_82580)
   9234 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   9235 	    || (sc->sc_type == WM_T_I211)) {
   9236 		reg = CSR_READ(sc, WMREG_PHPM);
   9237 		reg &= ~PHPM_GO_LINK_D;
   9238 		CSR_WRITE(sc, WMREG_PHPM, reg);
   9239 	}
   9240 
   9241 	/*
   9242 	 * Let the chip set speed/duplex on its own based on
   9243 	 * signals from the PHY.
   9244 	 * XXXbouyer - I'm not sure this is right for the 80003,
   9245 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   9246 	 */
   9247 	sc->sc_ctrl |= CTRL_SLU;
   9248 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9249 
   9250 	/* Initialize our media structures and probe the GMII. */
   9251 	mii->mii_ifp = ifp;
   9252 
   9253 	/*
   9254 	 * The first call of wm_mii_setup_phytype. The result might be
   9255 	 * incorrect.
   9256 	 */
   9257 	wm_gmii_setup_phytype(sc, 0, 0);
   9258 
   9259 	mii->mii_statchg = wm_gmii_statchg;
   9260 
   9261 	/* get PHY control from SMBus to PCIe */
   9262 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   9263 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
   9264 		wm_smbustopci(sc);
   9265 
   9266 	wm_gmii_reset(sc);
   9267 
   9268 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9269 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   9270 	    wm_gmii_mediastatus);
   9271 
   9272 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   9273 	    || (sc->sc_type == WM_T_82580)
   9274 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   9275 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   9276 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   9277 			/* Attach only one port */
   9278 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   9279 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9280 		} else {
   9281 			int i, id;
   9282 			uint32_t ctrl_ext;
   9283 
   9284 			id = wm_get_phy_id_82575(sc);
   9285 			if (id != -1) {
   9286 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   9287 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   9288 			}
   9289 			if ((id == -1)
   9290 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9291 				/* Power on sgmii phy if it is disabled */
   9292 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9293 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   9294 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   9295 				CSR_WRITE_FLUSH(sc);
   9296 				delay(300*1000); /* XXX too long */
   9297 
   9298 				/* from 1 to 8 */
   9299 				for (i = 1; i < 8; i++)
   9300 					mii_attach(sc->sc_dev, &sc->sc_mii,
   9301 					    0xffffffff, i, MII_OFFSET_ANY,
   9302 					    MIIF_DOPAUSE);
   9303 
   9304 				/* restore previous sfp cage power state */
   9305 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9306 			}
   9307 		}
   9308 	} else {
   9309 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9310 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9311 	}
   9312 
   9313 	/*
   9314 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   9315 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   9316 	 */
   9317 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   9318 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9319 		wm_set_mdio_slow_mode_hv(sc);
   9320 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9321 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9322 	}
   9323 
   9324 	/*
   9325 	 * (For ICH8 variants)
   9326 	 * If PHY detection failed, use BM's r/w function and retry.
   9327 	 */
   9328 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9329 		/* if failed, retry with *_bm_* */
   9330 		aprint_verbose_dev(dev, "Assumed PHY access function "
   9331 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   9332 		    sc->sc_phytype);
   9333 		sc->sc_phytype = WMPHY_BM;
   9334 		mii->mii_readreg = wm_gmii_bm_readreg;
   9335 		mii->mii_writereg = wm_gmii_bm_writereg;
   9336 
   9337 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9338 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9339 	}
   9340 
   9341 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9342 		/* Any PHY wasn't find */
   9343 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   9344 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   9345 		sc->sc_phytype = WMPHY_NONE;
   9346 	} else {
   9347 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   9348 
   9349 		/*
   9350 		 * PHY Found! Check PHY type again by the second call of
   9351 		 * wm_mii_setup_phytype.
   9352 		 */
   9353 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   9354 		    child->mii_mpd_model);
   9355 
   9356 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   9357 	}
   9358 }
   9359 
   9360 /*
   9361  * wm_gmii_mediachange:	[ifmedia interface function]
   9362  *
   9363  *	Set hardware to newly-selected media on a 1000BASE-T device.
   9364  */
   9365 static int
   9366 wm_gmii_mediachange(struct ifnet *ifp)
   9367 {
   9368 	struct wm_softc *sc = ifp->if_softc;
   9369 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9370 	int rc;
   9371 
   9372 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9373 		device_xname(sc->sc_dev), __func__));
   9374 	if ((ifp->if_flags & IFF_UP) == 0)
   9375 		return 0;
   9376 
   9377 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9378 	sc->sc_ctrl |= CTRL_SLU;
   9379 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9380 	    || (sc->sc_type > WM_T_82543)) {
   9381 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   9382 	} else {
   9383 		sc->sc_ctrl &= ~CTRL_ASDE;
   9384 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9385 		if (ife->ifm_media & IFM_FDX)
   9386 			sc->sc_ctrl |= CTRL_FD;
   9387 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   9388 		case IFM_10_T:
   9389 			sc->sc_ctrl |= CTRL_SPEED_10;
   9390 			break;
   9391 		case IFM_100_TX:
   9392 			sc->sc_ctrl |= CTRL_SPEED_100;
   9393 			break;
   9394 		case IFM_1000_T:
   9395 			sc->sc_ctrl |= CTRL_SPEED_1000;
   9396 			break;
   9397 		default:
   9398 			panic("wm_gmii_mediachange: bad media 0x%x",
   9399 			    ife->ifm_media);
   9400 		}
   9401 	}
   9402 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9403 	if (sc->sc_type <= WM_T_82543)
   9404 		wm_gmii_reset(sc);
   9405 
   9406 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   9407 		return 0;
   9408 	return rc;
   9409 }
   9410 
   9411 /*
   9412  * wm_gmii_mediastatus:	[ifmedia interface function]
   9413  *
   9414  *	Get the current interface media status on a 1000BASE-T device.
   9415  */
   9416 static void
   9417 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9418 {
   9419 	struct wm_softc *sc = ifp->if_softc;
   9420 
   9421 	ether_mediastatus(ifp, ifmr);
   9422 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9423 	    | sc->sc_flowflags;
   9424 }
   9425 
   9426 #define	MDI_IO		CTRL_SWDPIN(2)
   9427 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   9428 #define	MDI_CLK		CTRL_SWDPIN(3)
   9429 
   9430 static void
   9431 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   9432 {
   9433 	uint32_t i, v;
   9434 
   9435 	v = CSR_READ(sc, WMREG_CTRL);
   9436 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9437 	v |= MDI_DIR | CTRL_SWDPIO(3);
   9438 
   9439 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   9440 		if (data & i)
   9441 			v |= MDI_IO;
   9442 		else
   9443 			v &= ~MDI_IO;
   9444 		CSR_WRITE(sc, WMREG_CTRL, v);
   9445 		CSR_WRITE_FLUSH(sc);
   9446 		delay(10);
   9447 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9448 		CSR_WRITE_FLUSH(sc);
   9449 		delay(10);
   9450 		CSR_WRITE(sc, WMREG_CTRL, v);
   9451 		CSR_WRITE_FLUSH(sc);
   9452 		delay(10);
   9453 	}
   9454 }
   9455 
   9456 static uint32_t
   9457 wm_i82543_mii_recvbits(struct wm_softc *sc)
   9458 {
   9459 	uint32_t v, i, data = 0;
   9460 
   9461 	v = CSR_READ(sc, WMREG_CTRL);
   9462 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9463 	v |= CTRL_SWDPIO(3);
   9464 
   9465 	CSR_WRITE(sc, WMREG_CTRL, v);
   9466 	CSR_WRITE_FLUSH(sc);
   9467 	delay(10);
   9468 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9469 	CSR_WRITE_FLUSH(sc);
   9470 	delay(10);
   9471 	CSR_WRITE(sc, WMREG_CTRL, v);
   9472 	CSR_WRITE_FLUSH(sc);
   9473 	delay(10);
   9474 
   9475 	for (i = 0; i < 16; i++) {
   9476 		data <<= 1;
   9477 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9478 		CSR_WRITE_FLUSH(sc);
   9479 		delay(10);
   9480 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   9481 			data |= 1;
   9482 		CSR_WRITE(sc, WMREG_CTRL, v);
   9483 		CSR_WRITE_FLUSH(sc);
   9484 		delay(10);
   9485 	}
   9486 
   9487 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9488 	CSR_WRITE_FLUSH(sc);
   9489 	delay(10);
   9490 	CSR_WRITE(sc, WMREG_CTRL, v);
   9491 	CSR_WRITE_FLUSH(sc);
   9492 	delay(10);
   9493 
   9494 	return data;
   9495 }
   9496 
   9497 #undef MDI_IO
   9498 #undef MDI_DIR
   9499 #undef MDI_CLK
   9500 
   9501 /*
   9502  * wm_gmii_i82543_readreg:	[mii interface function]
   9503  *
   9504  *	Read a PHY register on the GMII (i82543 version).
   9505  */
   9506 static int
   9507 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
   9508 {
   9509 	struct wm_softc *sc = device_private(self);
   9510 	int rv;
   9511 
   9512 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9513 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   9514 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   9515 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   9516 
   9517 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   9518 	    device_xname(sc->sc_dev), phy, reg, rv));
   9519 
   9520 	return rv;
   9521 }
   9522 
   9523 /*
   9524  * wm_gmii_i82543_writereg:	[mii interface function]
   9525  *
   9526  *	Write a PHY register on the GMII (i82543 version).
   9527  */
   9528 static void
   9529 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
   9530 {
   9531 	struct wm_softc *sc = device_private(self);
   9532 
   9533 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9534 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   9535 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   9536 	    (MII_COMMAND_START << 30), 32);
   9537 }
   9538 
   9539 /*
   9540  * wm_gmii_mdic_readreg:	[mii interface function]
   9541  *
   9542  *	Read a PHY register on the GMII.
   9543  */
   9544 static int
   9545 wm_gmii_mdic_readreg(device_t self, int phy, int reg)
   9546 {
   9547 	struct wm_softc *sc = device_private(self);
   9548 	uint32_t mdic = 0;
   9549 	int i, rv;
   9550 
   9551 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   9552 	    MDIC_REGADD(reg));
   9553 
   9554 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9555 		mdic = CSR_READ(sc, WMREG_MDIC);
   9556 		if (mdic & MDIC_READY)
   9557 			break;
   9558 		delay(50);
   9559 	}
   9560 
   9561 	if ((mdic & MDIC_READY) == 0) {
   9562 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   9563 		    device_xname(sc->sc_dev), phy, reg);
   9564 		rv = 0;
   9565 	} else if (mdic & MDIC_E) {
   9566 #if 0 /* This is normal if no PHY is present. */
   9567 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   9568 		    device_xname(sc->sc_dev), phy, reg);
   9569 #endif
   9570 		rv = 0;
   9571 	} else {
   9572 		rv = MDIC_DATA(mdic);
   9573 		if (rv == 0xffff)
   9574 			rv = 0;
   9575 	}
   9576 
   9577 	return rv;
   9578 }
   9579 
   9580 /*
   9581  * wm_gmii_mdic_writereg:	[mii interface function]
   9582  *
   9583  *	Write a PHY register on the GMII.
   9584  */
   9585 static void
   9586 wm_gmii_mdic_writereg(device_t self, int phy, int reg, int val)
   9587 {
   9588 	struct wm_softc *sc = device_private(self);
   9589 	uint32_t mdic = 0;
   9590 	int i;
   9591 
   9592 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   9593 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   9594 
   9595 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9596 		mdic = CSR_READ(sc, WMREG_MDIC);
   9597 		if (mdic & MDIC_READY)
   9598 			break;
   9599 		delay(50);
   9600 	}
   9601 
   9602 	if ((mdic & MDIC_READY) == 0)
   9603 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   9604 		    device_xname(sc->sc_dev), phy, reg);
   9605 	else if (mdic & MDIC_E)
   9606 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   9607 		    device_xname(sc->sc_dev), phy, reg);
   9608 }
   9609 
   9610 /*
   9611  * wm_gmii_i82544_readreg:	[mii interface function]
   9612  *
   9613  *	Read a PHY register on the GMII.
   9614  */
   9615 static int
   9616 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
   9617 {
   9618 	struct wm_softc *sc = device_private(self);
   9619 	int rv;
   9620 
   9621 	if (sc->phy.acquire(sc)) {
   9622 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9623 		    __func__);
   9624 		return 0;
   9625 	}
   9626 	rv = wm_gmii_mdic_readreg(self, phy, reg);
   9627 	sc->phy.release(sc);
   9628 
   9629 	return rv;
   9630 }
   9631 
   9632 /*
   9633  * wm_gmii_i82544_writereg:	[mii interface function]
   9634  *
   9635  *	Write a PHY register on the GMII.
   9636  */
   9637 static void
   9638 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
   9639 {
   9640 	struct wm_softc *sc = device_private(self);
   9641 
   9642 	if (sc->phy.acquire(sc)) {
   9643 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9644 		    __func__);
   9645 	}
   9646 	wm_gmii_mdic_writereg(self, phy, reg, val);
   9647 	sc->phy.release(sc);
   9648 }
   9649 
   9650 /*
   9651  * wm_gmii_i80003_readreg:	[mii interface function]
   9652  *
   9653  *	Read a PHY register on the kumeran
   9654  * This could be handled by the PHY layer if we didn't have to lock the
   9655  * ressource ...
   9656  */
   9657 static int
   9658 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
   9659 {
   9660 	struct wm_softc *sc = device_private(self);
   9661 	int rv;
   9662 
   9663 	if (phy != 1) /* only one PHY on kumeran bus */
   9664 		return 0;
   9665 
   9666 	if (sc->phy.acquire(sc)) {
   9667 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9668 		    __func__);
   9669 		return 0;
   9670 	}
   9671 
   9672 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   9673 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   9674 		    reg >> GG82563_PAGE_SHIFT);
   9675 	} else {
   9676 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   9677 		    reg >> GG82563_PAGE_SHIFT);
   9678 	}
   9679 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   9680 	delay(200);
   9681 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
   9682 	delay(200);
   9683 	sc->phy.release(sc);
   9684 
   9685 	return rv;
   9686 }
   9687 
   9688 /*
   9689  * wm_gmii_i80003_writereg:	[mii interface function]
   9690  *
   9691  *	Write a PHY register on the kumeran.
   9692  * This could be handled by the PHY layer if we didn't have to lock the
   9693  * ressource ...
   9694  */
   9695 static void
   9696 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
   9697 {
   9698 	struct wm_softc *sc = device_private(self);
   9699 
   9700 	if (phy != 1) /* only one PHY on kumeran bus */
   9701 		return;
   9702 
   9703 	if (sc->phy.acquire(sc)) {
   9704 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9705 		    __func__);
   9706 		return;
   9707 	}
   9708 
   9709 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
   9710 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   9711 		    reg >> GG82563_PAGE_SHIFT);
   9712 	} else {
   9713 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   9714 		    reg >> GG82563_PAGE_SHIFT);
   9715 	}
   9716 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   9717 	delay(200);
   9718 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
   9719 	delay(200);
   9720 
   9721 	sc->phy.release(sc);
   9722 }
   9723 
   9724 /*
   9725  * wm_gmii_bm_readreg:	[mii interface function]
   9726  *
   9727  *	Read a PHY register on the kumeran
   9728  * This could be handled by the PHY layer if we didn't have to lock the
   9729  * ressource ...
   9730  */
   9731 static int
   9732 wm_gmii_bm_readreg(device_t self, int phy, int reg)
   9733 {
   9734 	struct wm_softc *sc = device_private(self);
   9735 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   9736 	uint16_t val;
   9737 	int rv;
   9738 
   9739 	if (sc->phy.acquire(sc)) {
   9740 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9741 		    __func__);
   9742 		return 0;
   9743 	}
   9744 
   9745 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   9746 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   9747 		    || (reg == 31)) ? 1 : phy;
   9748 	/* Page 800 works differently than the rest so it has its own func */
   9749 	if (page == BM_WUC_PAGE) {
   9750 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   9751 		rv = val;
   9752 		goto release;
   9753 	}
   9754 
   9755 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9756 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   9757 		    && (sc->sc_type != WM_T_82583))
   9758 			wm_gmii_mdic_writereg(self, phy,
   9759 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   9760 		else
   9761 			wm_gmii_mdic_writereg(self, phy,
   9762 			    BME1000_PHY_PAGE_SELECT, page);
   9763 	}
   9764 
   9765 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
   9766 
   9767 release:
   9768 	sc->phy.release(sc);
   9769 	return rv;
   9770 }
   9771 
   9772 /*
   9773  * wm_gmii_bm_writereg:	[mii interface function]
   9774  *
   9775  *	Write a PHY register on the kumeran.
   9776  * This could be handled by the PHY layer if we didn't have to lock the
   9777  * ressource ...
   9778  */
   9779 static void
   9780 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
   9781 {
   9782 	struct wm_softc *sc = device_private(self);
   9783 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   9784 
   9785 	if (sc->phy.acquire(sc)) {
   9786 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9787 		    __func__);
   9788 		return;
   9789 	}
   9790 
   9791 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   9792 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   9793 		    || (reg == 31)) ? 1 : phy;
   9794 	/* Page 800 works differently than the rest so it has its own func */
   9795 	if (page == BM_WUC_PAGE) {
   9796 		uint16_t tmp;
   9797 
   9798 		tmp = val;
   9799 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   9800 		goto release;
   9801 	}
   9802 
   9803 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9804 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   9805 		    && (sc->sc_type != WM_T_82583))
   9806 			wm_gmii_mdic_writereg(self, phy,
   9807 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   9808 		else
   9809 			wm_gmii_mdic_writereg(self, phy,
   9810 			    BME1000_PHY_PAGE_SELECT, page);
   9811 	}
   9812 
   9813 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
   9814 
   9815 release:
   9816 	sc->phy.release(sc);
   9817 }
   9818 
   9819 static void
   9820 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
   9821 {
   9822 	struct wm_softc *sc = device_private(self);
   9823 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   9824 	uint16_t wuce, reg;
   9825 
   9826 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9827 		device_xname(sc->sc_dev), __func__));
   9828 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   9829 	if (sc->sc_type == WM_T_PCH) {
   9830 		/* XXX e1000 driver do nothing... why? */
   9831 	}
   9832 
   9833 	/*
   9834 	 * 1) Enable PHY wakeup register first.
   9835 	 * See e1000_enable_phy_wakeup_reg_access_bm().
   9836 	 */
   9837 
   9838 	/* Set page 769 */
   9839 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9840 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   9841 
   9842 	/* Read WUCE and save it */
   9843 	wuce = wm_gmii_mdic_readreg(self, 1, BM_WUC_ENABLE_REG);
   9844 
   9845 	reg = wuce | BM_WUC_ENABLE_BIT;
   9846 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   9847 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, reg);
   9848 
   9849 	/* Select page 800 */
   9850 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9851 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   9852 
   9853 	/*
   9854 	 * 2) Access PHY wakeup register.
   9855 	 * See e1000_access_phy_wakeup_reg_bm.
   9856 	 */
   9857 
   9858 	/* Write page 800 */
   9859 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   9860 
   9861 	if (rd)
   9862 		*val = wm_gmii_mdic_readreg(self, 1, BM_WUC_DATA_OPCODE);
   9863 	else
   9864 		wm_gmii_mdic_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
   9865 
   9866 	/*
   9867 	 * 3) Disable PHY wakeup register.
   9868 	 * See e1000_disable_phy_wakeup_reg_access_bm().
   9869 	 */
   9870 	/* Set page 769 */
   9871 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9872 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   9873 
   9874 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
   9875 }
   9876 
   9877 /*
   9878  * wm_gmii_hv_readreg:	[mii interface function]
   9879  *
   9880  *	Read a PHY register on the kumeran
   9881  * This could be handled by the PHY layer if we didn't have to lock the
   9882  * ressource ...
   9883  */
   9884 static int
   9885 wm_gmii_hv_readreg(device_t self, int phy, int reg)
   9886 {
   9887 	struct wm_softc *sc = device_private(self);
   9888 	int rv;
   9889 
   9890 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9891 		device_xname(sc->sc_dev), __func__));
   9892 	if (sc->phy.acquire(sc)) {
   9893 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9894 		    __func__);
   9895 		return 0;
   9896 	}
   9897 
   9898 	rv = wm_gmii_hv_readreg_locked(self, phy, reg);
   9899 	sc->phy.release(sc);
   9900 	return rv;
   9901 }
   9902 
   9903 static int
   9904 wm_gmii_hv_readreg_locked(device_t self, int phy, int reg)
   9905 {
   9906 	uint16_t page = BM_PHY_REG_PAGE(reg);
   9907 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   9908 	uint16_t val;
   9909 	int rv;
   9910 
   9911 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   9912 
   9913 	/* Page 800 works differently than the rest so it has its own func */
   9914 	if (page == BM_WUC_PAGE) {
   9915 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   9916 		return val;
   9917 	}
   9918 
   9919 	/*
   9920 	 * Lower than page 768 works differently than the rest so it has its
   9921 	 * own func
   9922 	 */
   9923 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   9924 		printf("gmii_hv_readreg!!!\n");
   9925 		return 0;
   9926 	}
   9927 
   9928 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   9929 		wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   9930 		    page << BME1000_PAGE_SHIFT);
   9931 	}
   9932 
   9933 	rv = wm_gmii_mdic_readreg(self, phy, regnum & MII_ADDRMASK);
   9934 	return rv;
   9935 }
   9936 
   9937 /*
   9938  * wm_gmii_hv_writereg:	[mii interface function]
   9939  *
   9940  *	Write a PHY register on the kumeran.
   9941  * This could be handled by the PHY layer if we didn't have to lock the
   9942  * ressource ...
   9943  */
   9944 static void
   9945 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
   9946 {
   9947 	struct wm_softc *sc = device_private(self);
   9948 
   9949 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9950 		device_xname(sc->sc_dev), __func__));
   9951 
   9952 	if (sc->phy.acquire(sc)) {
   9953 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9954 		    __func__);
   9955 		return;
   9956 	}
   9957 
   9958 	wm_gmii_hv_writereg_locked(self, phy, reg, val);
   9959 	sc->phy.release(sc);
   9960 }
   9961 
   9962 static void
   9963 wm_gmii_hv_writereg_locked(device_t self, int phy, int reg, int val)
   9964 {
   9965 	struct wm_softc *sc = device_private(self);
   9966 	uint16_t page = BM_PHY_REG_PAGE(reg);
   9967 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   9968 
   9969 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   9970 
   9971 	/* Page 800 works differently than the rest so it has its own func */
   9972 	if (page == BM_WUC_PAGE) {
   9973 		uint16_t tmp;
   9974 
   9975 		tmp = val;
   9976 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   9977 		return;
   9978 	}
   9979 
   9980 	/*
   9981 	 * Lower than page 768 works differently than the rest so it has its
   9982 	 * own func
   9983 	 */
   9984 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   9985 		printf("gmii_hv_writereg!!!\n");
   9986 		return;
   9987 	}
   9988 
   9989 	{
   9990 		/*
   9991 		 * XXX Workaround MDIO accesses being disabled after entering
   9992 		 * IEEE Power Down (whenever bit 11 of the PHY control
   9993 		 * register is set)
   9994 		 */
   9995 		if (sc->sc_phytype == WMPHY_82578) {
   9996 			struct mii_softc *child;
   9997 
   9998 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   9999 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   10000 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   10001 			    && ((val & (1 << 11)) != 0)) {
   10002 				printf("XXX need workaround\n");
   10003 			}
   10004 		}
   10005 
   10006 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10007 			wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   10008 			    page << BME1000_PAGE_SHIFT);
   10009 		}
   10010 	}
   10011 
   10012 	wm_gmii_mdic_writereg(self, phy, regnum & MII_ADDRMASK, val);
   10013 }
   10014 
   10015 /*
   10016  * wm_gmii_82580_readreg:	[mii interface function]
   10017  *
   10018  *	Read a PHY register on the 82580 and I350.
   10019  * This could be handled by the PHY layer if we didn't have to lock the
   10020  * ressource ...
   10021  */
   10022 static int
   10023 wm_gmii_82580_readreg(device_t self, int phy, int reg)
   10024 {
   10025 	struct wm_softc *sc = device_private(self);
   10026 	int rv;
   10027 
   10028 	if (sc->phy.acquire(sc) != 0) {
   10029 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10030 		    __func__);
   10031 		return 0;
   10032 	}
   10033 
   10034 	rv = wm_gmii_mdic_readreg(self, phy, reg);
   10035 
   10036 	sc->phy.release(sc);
   10037 	return rv;
   10038 }
   10039 
   10040 /*
   10041  * wm_gmii_82580_writereg:	[mii interface function]
   10042  *
   10043  *	Write a PHY register on the 82580 and I350.
   10044  * This could be handled by the PHY layer if we didn't have to lock the
   10045  * ressource ...
   10046  */
   10047 static void
   10048 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
   10049 {
   10050 	struct wm_softc *sc = device_private(self);
   10051 
   10052 	if (sc->phy.acquire(sc) != 0) {
   10053 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10054 		    __func__);
   10055 		return;
   10056 	}
   10057 
   10058 	wm_gmii_mdic_writereg(self, phy, reg, val);
   10059 
   10060 	sc->phy.release(sc);
   10061 }
   10062 
   10063 /*
   10064  * wm_gmii_gs40g_readreg:	[mii interface function]
   10065  *
   10066  *	Read a PHY register on the I2100 and I211.
   10067  * This could be handled by the PHY layer if we didn't have to lock the
   10068  * ressource ...
   10069  */
   10070 static int
   10071 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
   10072 {
   10073 	struct wm_softc *sc = device_private(self);
   10074 	int page, offset;
   10075 	int rv;
   10076 
   10077 	/* Acquire semaphore */
   10078 	if (sc->phy.acquire(sc)) {
   10079 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10080 		    __func__);
   10081 		return 0;
   10082 	}
   10083 
   10084 	/* Page select */
   10085 	page = reg >> GS40G_PAGE_SHIFT;
   10086 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
   10087 
   10088 	/* Read reg */
   10089 	offset = reg & GS40G_OFFSET_MASK;
   10090 	rv = wm_gmii_mdic_readreg(self, phy, offset);
   10091 
   10092 	sc->phy.release(sc);
   10093 	return rv;
   10094 }
   10095 
   10096 /*
   10097  * wm_gmii_gs40g_writereg:	[mii interface function]
   10098  *
   10099  *	Write a PHY register on the I210 and I211.
   10100  * This could be handled by the PHY layer if we didn't have to lock the
   10101  * ressource ...
   10102  */
   10103 static void
   10104 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
   10105 {
   10106 	struct wm_softc *sc = device_private(self);
   10107 	int page, offset;
   10108 
   10109 	/* Acquire semaphore */
   10110 	if (sc->phy.acquire(sc)) {
   10111 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10112 		    __func__);
   10113 		return;
   10114 	}
   10115 
   10116 	/* Page select */
   10117 	page = reg >> GS40G_PAGE_SHIFT;
   10118 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
   10119 
   10120 	/* Write reg */
   10121 	offset = reg & GS40G_OFFSET_MASK;
   10122 	wm_gmii_mdic_writereg(self, phy, offset, val);
   10123 
   10124 	/* Release semaphore */
   10125 	sc->phy.release(sc);
   10126 }
   10127 
   10128 /*
   10129  * wm_gmii_statchg:	[mii interface function]
   10130  *
   10131  *	Callback from MII layer when media changes.
   10132  */
   10133 static void
   10134 wm_gmii_statchg(struct ifnet *ifp)
   10135 {
   10136 	struct wm_softc *sc = ifp->if_softc;
   10137 	struct mii_data *mii = &sc->sc_mii;
   10138 
   10139 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   10140 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10141 	sc->sc_fcrtl &= ~FCRTL_XONE;
   10142 
   10143 	/*
   10144 	 * Get flow control negotiation result.
   10145 	 */
   10146 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   10147 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   10148 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   10149 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   10150 	}
   10151 
   10152 	if (sc->sc_flowflags & IFM_FLOW) {
   10153 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   10154 			sc->sc_ctrl |= CTRL_TFCE;
   10155 			sc->sc_fcrtl |= FCRTL_XONE;
   10156 		}
   10157 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   10158 			sc->sc_ctrl |= CTRL_RFCE;
   10159 	}
   10160 
   10161 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   10162 		DPRINTF(WM_DEBUG_LINK,
   10163 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   10164 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10165 	} else {
   10166 		DPRINTF(WM_DEBUG_LINK,
   10167 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   10168 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10169 	}
   10170 
   10171 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10172 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10173 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   10174 						 : WMREG_FCRTL, sc->sc_fcrtl);
   10175 	if (sc->sc_type == WM_T_80003) {
   10176 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   10177 		case IFM_1000_T:
   10178 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10179 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   10180 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10181 			break;
   10182 		default:
   10183 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10184 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   10185 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   10186 			break;
   10187 		}
   10188 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   10189 	}
   10190 }
   10191 
   10192 /* kumeran related (80003, ICH* and PCH*) */
   10193 
   10194 /*
   10195  * wm_kmrn_readreg:
   10196  *
   10197  *	Read a kumeran register
   10198  */
   10199 static int
   10200 wm_kmrn_readreg(struct wm_softc *sc, int reg)
   10201 {
   10202 	int rv;
   10203 
   10204 	if (sc->sc_type == WM_T_80003)
   10205 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10206 	else
   10207 		rv = sc->phy.acquire(sc);
   10208 	if (rv != 0) {
   10209 		aprint_error_dev(sc->sc_dev,
   10210 		    "%s: failed to get semaphore\n", __func__);
   10211 		return 0;
   10212 	}
   10213 
   10214 	rv = wm_kmrn_readreg_locked(sc, reg);
   10215 
   10216 	if (sc->sc_type == WM_T_80003)
   10217 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10218 	else
   10219 		sc->phy.release(sc);
   10220 
   10221 	return rv;
   10222 }
   10223 
   10224 static int
   10225 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg)
   10226 {
   10227 	int rv;
   10228 
   10229 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10230 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10231 	    KUMCTRLSTA_REN);
   10232 	CSR_WRITE_FLUSH(sc);
   10233 	delay(2);
   10234 
   10235 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   10236 
   10237 	return rv;
   10238 }
   10239 
   10240 /*
   10241  * wm_kmrn_writereg:
   10242  *
   10243  *	Write a kumeran register
   10244  */
   10245 static void
   10246 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
   10247 {
   10248 	int rv;
   10249 
   10250 	if (sc->sc_type == WM_T_80003)
   10251 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10252 	else
   10253 		rv = sc->phy.acquire(sc);
   10254 	if (rv != 0) {
   10255 		aprint_error_dev(sc->sc_dev,
   10256 		    "%s: failed to get semaphore\n", __func__);
   10257 		return;
   10258 	}
   10259 
   10260 	wm_kmrn_writereg_locked(sc, reg, val);
   10261 
   10262 	if (sc->sc_type == WM_T_80003)
   10263 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10264 	else
   10265 		sc->phy.release(sc);
   10266 }
   10267 
   10268 static void
   10269 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, int val)
   10270 {
   10271 
   10272 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10273 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10274 	    (val & KUMCTRLSTA_MASK));
   10275 }
   10276 
   10277 /* SGMII related */
   10278 
   10279 /*
   10280  * wm_sgmii_uses_mdio
   10281  *
   10282  * Check whether the transaction is to the internal PHY or the external
   10283  * MDIO interface. Return true if it's MDIO.
   10284  */
   10285 static bool
   10286 wm_sgmii_uses_mdio(struct wm_softc *sc)
   10287 {
   10288 	uint32_t reg;
   10289 	bool ismdio = false;
   10290 
   10291 	switch (sc->sc_type) {
   10292 	case WM_T_82575:
   10293 	case WM_T_82576:
   10294 		reg = CSR_READ(sc, WMREG_MDIC);
   10295 		ismdio = ((reg & MDIC_DEST) != 0);
   10296 		break;
   10297 	case WM_T_82580:
   10298 	case WM_T_I350:
   10299 	case WM_T_I354:
   10300 	case WM_T_I210:
   10301 	case WM_T_I211:
   10302 		reg = CSR_READ(sc, WMREG_MDICNFG);
   10303 		ismdio = ((reg & MDICNFG_DEST) != 0);
   10304 		break;
   10305 	default:
   10306 		break;
   10307 	}
   10308 
   10309 	return ismdio;
   10310 }
   10311 
   10312 /*
   10313  * wm_sgmii_readreg:	[mii interface function]
   10314  *
   10315  *	Read a PHY register on the SGMII
   10316  * This could be handled by the PHY layer if we didn't have to lock the
   10317  * ressource ...
   10318  */
   10319 static int
   10320 wm_sgmii_readreg(device_t self, int phy, int reg)
   10321 {
   10322 	struct wm_softc *sc = device_private(self);
   10323 	uint32_t i2ccmd;
   10324 	int i, rv;
   10325 
   10326 	if (sc->phy.acquire(sc)) {
   10327 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10328 		    __func__);
   10329 		return 0;
   10330 	}
   10331 
   10332 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10333 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10334 	    | I2CCMD_OPCODE_READ;
   10335 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10336 
   10337 	/* Poll the ready bit */
   10338 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10339 		delay(50);
   10340 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10341 		if (i2ccmd & I2CCMD_READY)
   10342 			break;
   10343 	}
   10344 	if ((i2ccmd & I2CCMD_READY) == 0)
   10345 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
   10346 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10347 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   10348 
   10349 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   10350 
   10351 	sc->phy.release(sc);
   10352 	return rv;
   10353 }
   10354 
   10355 /*
   10356  * wm_sgmii_writereg:	[mii interface function]
   10357  *
   10358  *	Write a PHY register on the SGMII.
   10359  * This could be handled by the PHY layer if we didn't have to lock the
   10360  * ressource ...
   10361  */
   10362 static void
   10363 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
   10364 {
   10365 	struct wm_softc *sc = device_private(self);
   10366 	uint32_t i2ccmd;
   10367 	int i;
   10368 	int val_swapped;
   10369 
   10370 	if (sc->phy.acquire(sc) != 0) {
   10371 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10372 		    __func__);
   10373 		return;
   10374 	}
   10375 	/* Swap the data bytes for the I2C interface */
   10376 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   10377 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10378 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10379 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   10380 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10381 
   10382 	/* Poll the ready bit */
   10383 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10384 		delay(50);
   10385 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10386 		if (i2ccmd & I2CCMD_READY)
   10387 			break;
   10388 	}
   10389 	if ((i2ccmd & I2CCMD_READY) == 0)
   10390 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
   10391 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10392 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   10393 
   10394 	sc->phy.release(sc);
   10395 }
   10396 
   10397 /* TBI related */
   10398 
   10399 /*
   10400  * wm_tbi_mediainit:
   10401  *
   10402  *	Initialize media for use on 1000BASE-X devices.
   10403  */
   10404 static void
   10405 wm_tbi_mediainit(struct wm_softc *sc)
   10406 {
   10407 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10408 	const char *sep = "";
   10409 
   10410 	if (sc->sc_type < WM_T_82543)
   10411 		sc->sc_tipg = TIPG_WM_DFLT;
   10412 	else
   10413 		sc->sc_tipg = TIPG_LG_DFLT;
   10414 
   10415 	sc->sc_tbi_serdes_anegticks = 5;
   10416 
   10417 	/* Initialize our media structures */
   10418 	sc->sc_mii.mii_ifp = ifp;
   10419 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10420 
   10421 	if ((sc->sc_type >= WM_T_82575)
   10422 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   10423 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10424 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   10425 	else
   10426 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10427 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   10428 
   10429 	/*
   10430 	 * SWD Pins:
   10431 	 *
   10432 	 *	0 = Link LED (output)
   10433 	 *	1 = Loss Of Signal (input)
   10434 	 */
   10435 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   10436 
   10437 	/* XXX Perhaps this is only for TBI */
   10438 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   10439 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   10440 
   10441 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   10442 		sc->sc_ctrl &= ~CTRL_LRST;
   10443 
   10444 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10445 
   10446 #define	ADD(ss, mm, dd)							\
   10447 do {									\
   10448 	aprint_normal("%s%s", sep, ss);					\
   10449 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   10450 	sep = ", ";							\
   10451 } while (/*CONSTCOND*/0)
   10452 
   10453 	aprint_normal_dev(sc->sc_dev, "");
   10454 
   10455 	if (sc->sc_type == WM_T_I354) {
   10456 		uint32_t status;
   10457 
   10458 		status = CSR_READ(sc, WMREG_STATUS);
   10459 		if (((status & STATUS_2P5_SKU) != 0)
   10460 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   10461 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   10462 		} else
   10463 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   10464 	} else if (sc->sc_type == WM_T_82545) {
   10465 		/* Only 82545 is LX (XXX except SFP) */
   10466 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   10467 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   10468 	} else {
   10469 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   10470 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   10471 	}
   10472 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   10473 	aprint_normal("\n");
   10474 
   10475 #undef ADD
   10476 
   10477 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   10478 }
   10479 
   10480 /*
   10481  * wm_tbi_mediachange:	[ifmedia interface function]
   10482  *
   10483  *	Set hardware to newly-selected media on a 1000BASE-X device.
   10484  */
   10485 static int
   10486 wm_tbi_mediachange(struct ifnet *ifp)
   10487 {
   10488 	struct wm_softc *sc = ifp->if_softc;
   10489 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10490 	uint32_t status;
   10491 	int i;
   10492 
   10493 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   10494 		/* XXX need some work for >= 82571 and < 82575 */
   10495 		if (sc->sc_type < WM_T_82575)
   10496 			return 0;
   10497 	}
   10498 
   10499 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   10500 	    || (sc->sc_type >= WM_T_82575))
   10501 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   10502 
   10503 	sc->sc_ctrl &= ~CTRL_LRST;
   10504 	sc->sc_txcw = TXCW_ANE;
   10505 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10506 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   10507 	else if (ife->ifm_media & IFM_FDX)
   10508 		sc->sc_txcw |= TXCW_FD;
   10509 	else
   10510 		sc->sc_txcw |= TXCW_HD;
   10511 
   10512 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   10513 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   10514 
   10515 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   10516 		    device_xname(sc->sc_dev), sc->sc_txcw));
   10517 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10518 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10519 	CSR_WRITE_FLUSH(sc);
   10520 	delay(1000);
   10521 
   10522 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   10523 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   10524 
   10525 	/*
   10526 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   10527 	 * optics detect a signal, 0 if they don't.
   10528 	 */
   10529 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   10530 		/* Have signal; wait for the link to come up. */
   10531 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   10532 			delay(10000);
   10533 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   10534 				break;
   10535 		}
   10536 
   10537 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   10538 			    device_xname(sc->sc_dev),i));
   10539 
   10540 		status = CSR_READ(sc, WMREG_STATUS);
   10541 		DPRINTF(WM_DEBUG_LINK,
   10542 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   10543 			device_xname(sc->sc_dev),status, STATUS_LU));
   10544 		if (status & STATUS_LU) {
   10545 			/* Link is up. */
   10546 			DPRINTF(WM_DEBUG_LINK,
   10547 			    ("%s: LINK: set media -> link up %s\n",
   10548 			    device_xname(sc->sc_dev),
   10549 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   10550 
   10551 			/*
   10552 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   10553 			 * so we should update sc->sc_ctrl
   10554 			 */
   10555 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   10556 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10557 			sc->sc_fcrtl &= ~FCRTL_XONE;
   10558 			if (status & STATUS_FD)
   10559 				sc->sc_tctl |=
   10560 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10561 			else
   10562 				sc->sc_tctl |=
   10563 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10564 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   10565 				sc->sc_fcrtl |= FCRTL_XONE;
   10566 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10567 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   10568 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   10569 				      sc->sc_fcrtl);
   10570 			sc->sc_tbi_linkup = 1;
   10571 		} else {
   10572 			if (i == WM_LINKUP_TIMEOUT)
   10573 				wm_check_for_link(sc);
   10574 			/* Link is down. */
   10575 			DPRINTF(WM_DEBUG_LINK,
   10576 			    ("%s: LINK: set media -> link down\n",
   10577 			    device_xname(sc->sc_dev)));
   10578 			sc->sc_tbi_linkup = 0;
   10579 		}
   10580 	} else {
   10581 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   10582 		    device_xname(sc->sc_dev)));
   10583 		sc->sc_tbi_linkup = 0;
   10584 	}
   10585 
   10586 	wm_tbi_serdes_set_linkled(sc);
   10587 
   10588 	return 0;
   10589 }
   10590 
   10591 /*
   10592  * wm_tbi_mediastatus:	[ifmedia interface function]
   10593  *
   10594  *	Get the current interface media status on a 1000BASE-X device.
   10595  */
   10596 static void
   10597 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10598 {
   10599 	struct wm_softc *sc = ifp->if_softc;
   10600 	uint32_t ctrl, status;
   10601 
   10602 	ifmr->ifm_status = IFM_AVALID;
   10603 	ifmr->ifm_active = IFM_ETHER;
   10604 
   10605 	status = CSR_READ(sc, WMREG_STATUS);
   10606 	if ((status & STATUS_LU) == 0) {
   10607 		ifmr->ifm_active |= IFM_NONE;
   10608 		return;
   10609 	}
   10610 
   10611 	ifmr->ifm_status |= IFM_ACTIVE;
   10612 	/* Only 82545 is LX */
   10613 	if (sc->sc_type == WM_T_82545)
   10614 		ifmr->ifm_active |= IFM_1000_LX;
   10615 	else
   10616 		ifmr->ifm_active |= IFM_1000_SX;
   10617 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   10618 		ifmr->ifm_active |= IFM_FDX;
   10619 	else
   10620 		ifmr->ifm_active |= IFM_HDX;
   10621 	ctrl = CSR_READ(sc, WMREG_CTRL);
   10622 	if (ctrl & CTRL_RFCE)
   10623 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   10624 	if (ctrl & CTRL_TFCE)
   10625 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   10626 }
   10627 
   10628 /* XXX TBI only */
   10629 static int
   10630 wm_check_for_link(struct wm_softc *sc)
   10631 {
   10632 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10633 	uint32_t rxcw;
   10634 	uint32_t ctrl;
   10635 	uint32_t status;
   10636 	uint32_t sig;
   10637 
   10638 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   10639 		/* XXX need some work for >= 82571 */
   10640 		if (sc->sc_type >= WM_T_82571) {
   10641 			sc->sc_tbi_linkup = 1;
   10642 			return 0;
   10643 		}
   10644 	}
   10645 
   10646 	rxcw = CSR_READ(sc, WMREG_RXCW);
   10647 	ctrl = CSR_READ(sc, WMREG_CTRL);
   10648 	status = CSR_READ(sc, WMREG_STATUS);
   10649 
   10650 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   10651 
   10652 	DPRINTF(WM_DEBUG_LINK,
   10653 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   10654 		device_xname(sc->sc_dev), __func__,
   10655 		((ctrl & CTRL_SWDPIN(1)) == sig),
   10656 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   10657 
   10658 	/*
   10659 	 * SWDPIN   LU RXCW
   10660 	 *      0    0    0
   10661 	 *      0    0    1	(should not happen)
   10662 	 *      0    1    0	(should not happen)
   10663 	 *      0    1    1	(should not happen)
   10664 	 *      1    0    0	Disable autonego and force linkup
   10665 	 *      1    0    1	got /C/ but not linkup yet
   10666 	 *      1    1    0	(linkup)
   10667 	 *      1    1    1	If IFM_AUTO, back to autonego
   10668 	 *
   10669 	 */
   10670 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   10671 	    && ((status & STATUS_LU) == 0)
   10672 	    && ((rxcw & RXCW_C) == 0)) {
   10673 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   10674 			__func__));
   10675 		sc->sc_tbi_linkup = 0;
   10676 		/* Disable auto-negotiation in the TXCW register */
   10677 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   10678 
   10679 		/*
   10680 		 * Force link-up and also force full-duplex.
   10681 		 *
   10682 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   10683 		 * so we should update sc->sc_ctrl
   10684 		 */
   10685 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   10686 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10687 	} else if (((status & STATUS_LU) != 0)
   10688 	    && ((rxcw & RXCW_C) != 0)
   10689 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   10690 		sc->sc_tbi_linkup = 1;
   10691 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   10692 			__func__));
   10693 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10694 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   10695 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   10696 	    && ((rxcw & RXCW_C) != 0)) {
   10697 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   10698 	} else {
   10699 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   10700 			status));
   10701 	}
   10702 
   10703 	return 0;
   10704 }
   10705 
   10706 /*
   10707  * wm_tbi_tick:
   10708  *
   10709  *	Check the link on TBI devices.
   10710  *	This function acts as mii_tick().
   10711  */
   10712 static void
   10713 wm_tbi_tick(struct wm_softc *sc)
   10714 {
   10715 	struct mii_data *mii = &sc->sc_mii;
   10716 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10717 	uint32_t status;
   10718 
   10719 	KASSERT(WM_CORE_LOCKED(sc));
   10720 
   10721 	status = CSR_READ(sc, WMREG_STATUS);
   10722 
   10723 	/* XXX is this needed? */
   10724 	(void)CSR_READ(sc, WMREG_RXCW);
   10725 	(void)CSR_READ(sc, WMREG_CTRL);
   10726 
   10727 	/* set link status */
   10728 	if ((status & STATUS_LU) == 0) {
   10729 		DPRINTF(WM_DEBUG_LINK,
   10730 		    ("%s: LINK: checklink -> down\n",
   10731 			device_xname(sc->sc_dev)));
   10732 		sc->sc_tbi_linkup = 0;
   10733 	} else if (sc->sc_tbi_linkup == 0) {
   10734 		DPRINTF(WM_DEBUG_LINK,
   10735 		    ("%s: LINK: checklink -> up %s\n",
   10736 			device_xname(sc->sc_dev),
   10737 			(status & STATUS_FD) ? "FDX" : "HDX"));
   10738 		sc->sc_tbi_linkup = 1;
   10739 		sc->sc_tbi_serdes_ticks = 0;
   10740 	}
   10741 
   10742 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   10743 		goto setled;
   10744 
   10745 	if ((status & STATUS_LU) == 0) {
   10746 		sc->sc_tbi_linkup = 0;
   10747 		/* If the timer expired, retry autonegotiation */
   10748 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10749 		    && (++sc->sc_tbi_serdes_ticks
   10750 			>= sc->sc_tbi_serdes_anegticks)) {
   10751 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   10752 			sc->sc_tbi_serdes_ticks = 0;
   10753 			/*
   10754 			 * Reset the link, and let autonegotiation do
   10755 			 * its thing
   10756 			 */
   10757 			sc->sc_ctrl |= CTRL_LRST;
   10758 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10759 			CSR_WRITE_FLUSH(sc);
   10760 			delay(1000);
   10761 			sc->sc_ctrl &= ~CTRL_LRST;
   10762 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10763 			CSR_WRITE_FLUSH(sc);
   10764 			delay(1000);
   10765 			CSR_WRITE(sc, WMREG_TXCW,
   10766 			    sc->sc_txcw & ~TXCW_ANE);
   10767 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10768 		}
   10769 	}
   10770 
   10771 setled:
   10772 	wm_tbi_serdes_set_linkled(sc);
   10773 }
   10774 
   10775 /* SERDES related */
   10776 static void
   10777 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   10778 {
   10779 	uint32_t reg;
   10780 
   10781 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   10782 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   10783 		return;
   10784 
   10785 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   10786 	reg |= PCS_CFG_PCS_EN;
   10787 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   10788 
   10789 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   10790 	reg &= ~CTRL_EXT_SWDPIN(3);
   10791 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   10792 	CSR_WRITE_FLUSH(sc);
   10793 }
   10794 
   10795 static int
   10796 wm_serdes_mediachange(struct ifnet *ifp)
   10797 {
   10798 	struct wm_softc *sc = ifp->if_softc;
   10799 	bool pcs_autoneg = true; /* XXX */
   10800 	uint32_t ctrl_ext, pcs_lctl, reg;
   10801 
   10802 	/* XXX Currently, this function is not called on 8257[12] */
   10803 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   10804 	    || (sc->sc_type >= WM_T_82575))
   10805 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   10806 
   10807 	wm_serdes_power_up_link_82575(sc);
   10808 
   10809 	sc->sc_ctrl |= CTRL_SLU;
   10810 
   10811 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   10812 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   10813 
   10814 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   10815 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   10816 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   10817 	case CTRL_EXT_LINK_MODE_SGMII:
   10818 		pcs_autoneg = true;
   10819 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   10820 		break;
   10821 	case CTRL_EXT_LINK_MODE_1000KX:
   10822 		pcs_autoneg = false;
   10823 		/* FALLTHROUGH */
   10824 	default:
   10825 		if ((sc->sc_type == WM_T_82575)
   10826 		    || (sc->sc_type == WM_T_82576)) {
   10827 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   10828 				pcs_autoneg = false;
   10829 		}
   10830 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   10831 		    | CTRL_FRCFDX;
   10832 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   10833 	}
   10834 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10835 
   10836 	if (pcs_autoneg) {
   10837 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   10838 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   10839 
   10840 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   10841 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   10842 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   10843 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   10844 	} else
   10845 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   10846 
   10847 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   10848 
   10849 
   10850 	return 0;
   10851 }
   10852 
   10853 static void
   10854 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10855 {
   10856 	struct wm_softc *sc = ifp->if_softc;
   10857 	struct mii_data *mii = &sc->sc_mii;
   10858 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10859 	uint32_t pcs_adv, pcs_lpab, reg;
   10860 
   10861 	ifmr->ifm_status = IFM_AVALID;
   10862 	ifmr->ifm_active = IFM_ETHER;
   10863 
   10864 	/* Check PCS */
   10865 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10866 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   10867 		ifmr->ifm_active |= IFM_NONE;
   10868 		sc->sc_tbi_linkup = 0;
   10869 		goto setled;
   10870 	}
   10871 
   10872 	sc->sc_tbi_linkup = 1;
   10873 	ifmr->ifm_status |= IFM_ACTIVE;
   10874 	if (sc->sc_type == WM_T_I354) {
   10875 		uint32_t status;
   10876 
   10877 		status = CSR_READ(sc, WMREG_STATUS);
   10878 		if (((status & STATUS_2P5_SKU) != 0)
   10879 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   10880 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
   10881 		} else
   10882 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
   10883 	} else {
   10884 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   10885 		case PCS_LSTS_SPEED_10:
   10886 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   10887 			break;
   10888 		case PCS_LSTS_SPEED_100:
   10889 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   10890 			break;
   10891 		case PCS_LSTS_SPEED_1000:
   10892 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   10893 			break;
   10894 		default:
   10895 			device_printf(sc->sc_dev, "Unknown speed\n");
   10896 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   10897 			break;
   10898 		}
   10899 	}
   10900 	if ((reg & PCS_LSTS_FDX) != 0)
   10901 		ifmr->ifm_active |= IFM_FDX;
   10902 	else
   10903 		ifmr->ifm_active |= IFM_HDX;
   10904 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   10905 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   10906 		/* Check flow */
   10907 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10908 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   10909 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   10910 			goto setled;
   10911 		}
   10912 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   10913 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   10914 		DPRINTF(WM_DEBUG_LINK,
   10915 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   10916 		if ((pcs_adv & TXCW_SYM_PAUSE)
   10917 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   10918 			mii->mii_media_active |= IFM_FLOW
   10919 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   10920 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   10921 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   10922 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   10923 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   10924 			mii->mii_media_active |= IFM_FLOW
   10925 			    | IFM_ETH_TXPAUSE;
   10926 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   10927 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   10928 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   10929 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   10930 			mii->mii_media_active |= IFM_FLOW
   10931 			    | IFM_ETH_RXPAUSE;
   10932 		}
   10933 	}
   10934 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   10935 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   10936 setled:
   10937 	wm_tbi_serdes_set_linkled(sc);
   10938 }
   10939 
   10940 /*
   10941  * wm_serdes_tick:
   10942  *
   10943  *	Check the link on serdes devices.
   10944  */
   10945 static void
   10946 wm_serdes_tick(struct wm_softc *sc)
   10947 {
   10948 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10949 	struct mii_data *mii = &sc->sc_mii;
   10950 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   10951 	uint32_t reg;
   10952 
   10953 	KASSERT(WM_CORE_LOCKED(sc));
   10954 
   10955 	mii->mii_media_status = IFM_AVALID;
   10956 	mii->mii_media_active = IFM_ETHER;
   10957 
   10958 	/* Check PCS */
   10959 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   10960 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   10961 		mii->mii_media_status |= IFM_ACTIVE;
   10962 		sc->sc_tbi_linkup = 1;
   10963 		sc->sc_tbi_serdes_ticks = 0;
   10964 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   10965 		if ((reg & PCS_LSTS_FDX) != 0)
   10966 			mii->mii_media_active |= IFM_FDX;
   10967 		else
   10968 			mii->mii_media_active |= IFM_HDX;
   10969 	} else {
   10970 		mii->mii_media_status |= IFM_NONE;
   10971 		sc->sc_tbi_linkup = 0;
   10972 		/* If the timer expired, retry autonegotiation */
   10973 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10974 		    && (++sc->sc_tbi_serdes_ticks
   10975 			>= sc->sc_tbi_serdes_anegticks)) {
   10976 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   10977 			sc->sc_tbi_serdes_ticks = 0;
   10978 			/* XXX */
   10979 			wm_serdes_mediachange(ifp);
   10980 		}
   10981 	}
   10982 
   10983 	wm_tbi_serdes_set_linkled(sc);
   10984 }
   10985 
   10986 /* SFP related */
   10987 
   10988 static int
   10989 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   10990 {
   10991 	uint32_t i2ccmd;
   10992 	int i;
   10993 
   10994 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   10995 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10996 
   10997 	/* Poll the ready bit */
   10998 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10999 		delay(50);
   11000 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11001 		if (i2ccmd & I2CCMD_READY)
   11002 			break;
   11003 	}
   11004 	if ((i2ccmd & I2CCMD_READY) == 0)
   11005 		return -1;
   11006 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   11007 		return -1;
   11008 
   11009 	*data = i2ccmd & 0x00ff;
   11010 
   11011 	return 0;
   11012 }
   11013 
   11014 static uint32_t
   11015 wm_sfp_get_media_type(struct wm_softc *sc)
   11016 {
   11017 	uint32_t ctrl_ext;
   11018 	uint8_t val = 0;
   11019 	int timeout = 3;
   11020 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   11021 	int rv = -1;
   11022 
   11023 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11024 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   11025 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   11026 	CSR_WRITE_FLUSH(sc);
   11027 
   11028 	/* Read SFP module data */
   11029 	while (timeout) {
   11030 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   11031 		if (rv == 0)
   11032 			break;
   11033 		delay(100*1000); /* XXX too big */
   11034 		timeout--;
   11035 	}
   11036 	if (rv != 0)
   11037 		goto out;
   11038 	switch (val) {
   11039 	case SFF_SFP_ID_SFF:
   11040 		aprint_normal_dev(sc->sc_dev,
   11041 		    "Module/Connector soldered to board\n");
   11042 		break;
   11043 	case SFF_SFP_ID_SFP:
   11044 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   11045 		break;
   11046 	case SFF_SFP_ID_UNKNOWN:
   11047 		goto out;
   11048 	default:
   11049 		break;
   11050 	}
   11051 
   11052 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   11053 	if (rv != 0) {
   11054 		goto out;
   11055 	}
   11056 
   11057 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   11058 		mediatype = WM_MEDIATYPE_SERDES;
   11059 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   11060 		sc->sc_flags |= WM_F_SGMII;
   11061 		mediatype = WM_MEDIATYPE_COPPER;
   11062 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   11063 		sc->sc_flags |= WM_F_SGMII;
   11064 		mediatype = WM_MEDIATYPE_SERDES;
   11065 	}
   11066 
   11067 out:
   11068 	/* Restore I2C interface setting */
   11069 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11070 
   11071 	return mediatype;
   11072 }
   11073 
   11074 /*
   11075  * NVM related.
   11076  * Microwire, SPI (w/wo EERD) and Flash.
   11077  */
   11078 
   11079 /* Both spi and uwire */
   11080 
   11081 /*
   11082  * wm_eeprom_sendbits:
   11083  *
   11084  *	Send a series of bits to the EEPROM.
   11085  */
   11086 static void
   11087 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   11088 {
   11089 	uint32_t reg;
   11090 	int x;
   11091 
   11092 	reg = CSR_READ(sc, WMREG_EECD);
   11093 
   11094 	for (x = nbits; x > 0; x--) {
   11095 		if (bits & (1U << (x - 1)))
   11096 			reg |= EECD_DI;
   11097 		else
   11098 			reg &= ~EECD_DI;
   11099 		CSR_WRITE(sc, WMREG_EECD, reg);
   11100 		CSR_WRITE_FLUSH(sc);
   11101 		delay(2);
   11102 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11103 		CSR_WRITE_FLUSH(sc);
   11104 		delay(2);
   11105 		CSR_WRITE(sc, WMREG_EECD, reg);
   11106 		CSR_WRITE_FLUSH(sc);
   11107 		delay(2);
   11108 	}
   11109 }
   11110 
   11111 /*
   11112  * wm_eeprom_recvbits:
   11113  *
   11114  *	Receive a series of bits from the EEPROM.
   11115  */
   11116 static void
   11117 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   11118 {
   11119 	uint32_t reg, val;
   11120 	int x;
   11121 
   11122 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   11123 
   11124 	val = 0;
   11125 	for (x = nbits; x > 0; x--) {
   11126 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11127 		CSR_WRITE_FLUSH(sc);
   11128 		delay(2);
   11129 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   11130 			val |= (1U << (x - 1));
   11131 		CSR_WRITE(sc, WMREG_EECD, reg);
   11132 		CSR_WRITE_FLUSH(sc);
   11133 		delay(2);
   11134 	}
   11135 	*valp = val;
   11136 }
   11137 
   11138 /* Microwire */
   11139 
   11140 /*
   11141  * wm_nvm_read_uwire:
   11142  *
   11143  *	Read a word from the EEPROM using the MicroWire protocol.
   11144  */
   11145 static int
   11146 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11147 {
   11148 	uint32_t reg, val;
   11149 	int i;
   11150 
   11151 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11152 		device_xname(sc->sc_dev), __func__));
   11153 
   11154 	for (i = 0; i < wordcnt; i++) {
   11155 		/* Clear SK and DI. */
   11156 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   11157 		CSR_WRITE(sc, WMREG_EECD, reg);
   11158 
   11159 		/*
   11160 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   11161 		 * and Xen.
   11162 		 *
   11163 		 * We use this workaround only for 82540 because qemu's
   11164 		 * e1000 act as 82540.
   11165 		 */
   11166 		if (sc->sc_type == WM_T_82540) {
   11167 			reg |= EECD_SK;
   11168 			CSR_WRITE(sc, WMREG_EECD, reg);
   11169 			reg &= ~EECD_SK;
   11170 			CSR_WRITE(sc, WMREG_EECD, reg);
   11171 			CSR_WRITE_FLUSH(sc);
   11172 			delay(2);
   11173 		}
   11174 		/* XXX: end of workaround */
   11175 
   11176 		/* Set CHIP SELECT. */
   11177 		reg |= EECD_CS;
   11178 		CSR_WRITE(sc, WMREG_EECD, reg);
   11179 		CSR_WRITE_FLUSH(sc);
   11180 		delay(2);
   11181 
   11182 		/* Shift in the READ command. */
   11183 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   11184 
   11185 		/* Shift in address. */
   11186 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   11187 
   11188 		/* Shift out the data. */
   11189 		wm_eeprom_recvbits(sc, &val, 16);
   11190 		data[i] = val & 0xffff;
   11191 
   11192 		/* Clear CHIP SELECT. */
   11193 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   11194 		CSR_WRITE(sc, WMREG_EECD, reg);
   11195 		CSR_WRITE_FLUSH(sc);
   11196 		delay(2);
   11197 	}
   11198 
   11199 	return 0;
   11200 }
   11201 
   11202 /* SPI */
   11203 
   11204 /*
   11205  * Set SPI and FLASH related information from the EECD register.
   11206  * For 82541 and 82547, the word size is taken from EEPROM.
   11207  */
   11208 static int
   11209 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   11210 {
   11211 	int size;
   11212 	uint32_t reg;
   11213 	uint16_t data;
   11214 
   11215 	reg = CSR_READ(sc, WMREG_EECD);
   11216 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   11217 
   11218 	/* Read the size of NVM from EECD by default */
   11219 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11220 	switch (sc->sc_type) {
   11221 	case WM_T_82541:
   11222 	case WM_T_82541_2:
   11223 	case WM_T_82547:
   11224 	case WM_T_82547_2:
   11225 		/* Set dummy value to access EEPROM */
   11226 		sc->sc_nvm_wordsize = 64;
   11227 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
   11228 		reg = data;
   11229 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11230 		if (size == 0)
   11231 			size = 6; /* 64 word size */
   11232 		else
   11233 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   11234 		break;
   11235 	case WM_T_80003:
   11236 	case WM_T_82571:
   11237 	case WM_T_82572:
   11238 	case WM_T_82573: /* SPI case */
   11239 	case WM_T_82574: /* SPI case */
   11240 	case WM_T_82583: /* SPI case */
   11241 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11242 		if (size > 14)
   11243 			size = 14;
   11244 		break;
   11245 	case WM_T_82575:
   11246 	case WM_T_82576:
   11247 	case WM_T_82580:
   11248 	case WM_T_I350:
   11249 	case WM_T_I354:
   11250 	case WM_T_I210:
   11251 	case WM_T_I211:
   11252 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11253 		if (size > 15)
   11254 			size = 15;
   11255 		break;
   11256 	default:
   11257 		aprint_error_dev(sc->sc_dev,
   11258 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   11259 		return -1;
   11260 		break;
   11261 	}
   11262 
   11263 	sc->sc_nvm_wordsize = 1 << size;
   11264 
   11265 	return 0;
   11266 }
   11267 
   11268 /*
   11269  * wm_nvm_ready_spi:
   11270  *
   11271  *	Wait for a SPI EEPROM to be ready for commands.
   11272  */
   11273 static int
   11274 wm_nvm_ready_spi(struct wm_softc *sc)
   11275 {
   11276 	uint32_t val;
   11277 	int usec;
   11278 
   11279 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11280 		device_xname(sc->sc_dev), __func__));
   11281 
   11282 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   11283 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   11284 		wm_eeprom_recvbits(sc, &val, 8);
   11285 		if ((val & SPI_SR_RDY) == 0)
   11286 			break;
   11287 	}
   11288 	if (usec >= SPI_MAX_RETRIES) {
   11289 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   11290 		return 1;
   11291 	}
   11292 	return 0;
   11293 }
   11294 
   11295 /*
   11296  * wm_nvm_read_spi:
   11297  *
   11298  *	Read a work from the EEPROM using the SPI protocol.
   11299  */
   11300 static int
   11301 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11302 {
   11303 	uint32_t reg, val;
   11304 	int i;
   11305 	uint8_t opc;
   11306 
   11307 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11308 		device_xname(sc->sc_dev), __func__));
   11309 
   11310 	/* Clear SK and CS. */
   11311 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   11312 	CSR_WRITE(sc, WMREG_EECD, reg);
   11313 	CSR_WRITE_FLUSH(sc);
   11314 	delay(2);
   11315 
   11316 	if (wm_nvm_ready_spi(sc))
   11317 		return 1;
   11318 
   11319 	/* Toggle CS to flush commands. */
   11320 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   11321 	CSR_WRITE_FLUSH(sc);
   11322 	delay(2);
   11323 	CSR_WRITE(sc, WMREG_EECD, reg);
   11324 	CSR_WRITE_FLUSH(sc);
   11325 	delay(2);
   11326 
   11327 	opc = SPI_OPC_READ;
   11328 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   11329 		opc |= SPI_OPC_A8;
   11330 
   11331 	wm_eeprom_sendbits(sc, opc, 8);
   11332 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   11333 
   11334 	for (i = 0; i < wordcnt; i++) {
   11335 		wm_eeprom_recvbits(sc, &val, 16);
   11336 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   11337 	}
   11338 
   11339 	/* Raise CS and clear SK. */
   11340 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   11341 	CSR_WRITE(sc, WMREG_EECD, reg);
   11342 	CSR_WRITE_FLUSH(sc);
   11343 	delay(2);
   11344 
   11345 	return 0;
   11346 }
   11347 
   11348 /* Using with EERD */
   11349 
   11350 static int
   11351 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   11352 {
   11353 	uint32_t attempts = 100000;
   11354 	uint32_t i, reg = 0;
   11355 	int32_t done = -1;
   11356 
   11357 	for (i = 0; i < attempts; i++) {
   11358 		reg = CSR_READ(sc, rw);
   11359 
   11360 		if (reg & EERD_DONE) {
   11361 			done = 0;
   11362 			break;
   11363 		}
   11364 		delay(5);
   11365 	}
   11366 
   11367 	return done;
   11368 }
   11369 
   11370 static int
   11371 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   11372     uint16_t *data)
   11373 {
   11374 	int i, eerd = 0;
   11375 	int error = 0;
   11376 
   11377 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11378 		device_xname(sc->sc_dev), __func__));
   11379 
   11380 	for (i = 0; i < wordcnt; i++) {
   11381 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   11382 
   11383 		CSR_WRITE(sc, WMREG_EERD, eerd);
   11384 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   11385 		if (error != 0)
   11386 			break;
   11387 
   11388 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   11389 	}
   11390 
   11391 	return error;
   11392 }
   11393 
   11394 /* Flash */
   11395 
   11396 static int
   11397 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   11398 {
   11399 	uint32_t eecd;
   11400 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   11401 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   11402 	uint8_t sig_byte = 0;
   11403 
   11404 	switch (sc->sc_type) {
   11405 	case WM_T_PCH_SPT:
   11406 		/*
   11407 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
   11408 		 * sector valid bits from the NVM.
   11409 		 */
   11410 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
   11411 		if ((*bank == 0) || (*bank == 1)) {
   11412 			aprint_error_dev(sc->sc_dev,
   11413 			    "%s: no valid NVM bank present (%u)\n", __func__,
   11414 				*bank);
   11415 			return -1;
   11416 		} else {
   11417 			*bank = *bank - 2;
   11418 			return 0;
   11419 		}
   11420 	case WM_T_ICH8:
   11421 	case WM_T_ICH9:
   11422 		eecd = CSR_READ(sc, WMREG_EECD);
   11423 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   11424 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   11425 			return 0;
   11426 		}
   11427 		/* FALLTHROUGH */
   11428 	default:
   11429 		/* Default to 0 */
   11430 		*bank = 0;
   11431 
   11432 		/* Check bank 0 */
   11433 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   11434 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11435 			*bank = 0;
   11436 			return 0;
   11437 		}
   11438 
   11439 		/* Check bank 1 */
   11440 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   11441 		    &sig_byte);
   11442 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11443 			*bank = 1;
   11444 			return 0;
   11445 		}
   11446 	}
   11447 
   11448 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   11449 		device_xname(sc->sc_dev)));
   11450 	return -1;
   11451 }
   11452 
   11453 /******************************************************************************
   11454  * This function does initial flash setup so that a new read/write/erase cycle
   11455  * can be started.
   11456  *
   11457  * sc - The pointer to the hw structure
   11458  ****************************************************************************/
   11459 static int32_t
   11460 wm_ich8_cycle_init(struct wm_softc *sc)
   11461 {
   11462 	uint16_t hsfsts;
   11463 	int32_t error = 1;
   11464 	int32_t i     = 0;
   11465 
   11466 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11467 
   11468 	/* May be check the Flash Des Valid bit in Hw status */
   11469 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   11470 		return error;
   11471 	}
   11472 
   11473 	/* Clear FCERR in Hw status by writing 1 */
   11474 	/* Clear DAEL in Hw status by writing a 1 */
   11475 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   11476 
   11477 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11478 
   11479 	/*
   11480 	 * Either we should have a hardware SPI cycle in progress bit to check
   11481 	 * against, in order to start a new cycle or FDONE bit should be
   11482 	 * changed in the hardware so that it is 1 after harware reset, which
   11483 	 * can then be used as an indication whether a cycle is in progress or
   11484 	 * has been completed .. we should also have some software semaphore
   11485 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   11486 	 * threads access to those bits can be sequentiallized or a way so that
   11487 	 * 2 threads dont start the cycle at the same time
   11488 	 */
   11489 
   11490 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11491 		/*
   11492 		 * There is no cycle running at present, so we can start a
   11493 		 * cycle
   11494 		 */
   11495 
   11496 		/* Begin by setting Flash Cycle Done. */
   11497 		hsfsts |= HSFSTS_DONE;
   11498 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11499 		error = 0;
   11500 	} else {
   11501 		/*
   11502 		 * otherwise poll for sometime so the current cycle has a
   11503 		 * chance to end before giving up.
   11504 		 */
   11505 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   11506 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11507 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11508 				error = 0;
   11509 				break;
   11510 			}
   11511 			delay(1);
   11512 		}
   11513 		if (error == 0) {
   11514 			/*
   11515 			 * Successful in waiting for previous cycle to timeout,
   11516 			 * now set the Flash Cycle Done.
   11517 			 */
   11518 			hsfsts |= HSFSTS_DONE;
   11519 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11520 		}
   11521 	}
   11522 	return error;
   11523 }
   11524 
   11525 /******************************************************************************
   11526  * This function starts a flash cycle and waits for its completion
   11527  *
   11528  * sc - The pointer to the hw structure
   11529  ****************************************************************************/
   11530 static int32_t
   11531 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   11532 {
   11533 	uint16_t hsflctl;
   11534 	uint16_t hsfsts;
   11535 	int32_t error = 1;
   11536 	uint32_t i = 0;
   11537 
   11538 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   11539 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11540 	hsflctl |= HSFCTL_GO;
   11541 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11542 
   11543 	/* Wait till FDONE bit is set to 1 */
   11544 	do {
   11545 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11546 		if (hsfsts & HSFSTS_DONE)
   11547 			break;
   11548 		delay(1);
   11549 		i++;
   11550 	} while (i < timeout);
   11551 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   11552 		error = 0;
   11553 
   11554 	return error;
   11555 }
   11556 
   11557 /******************************************************************************
   11558  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   11559  *
   11560  * sc - The pointer to the hw structure
   11561  * index - The index of the byte or word to read.
   11562  * size - Size of data to read, 1=byte 2=word, 4=dword
   11563  * data - Pointer to the word to store the value read.
   11564  *****************************************************************************/
   11565 static int32_t
   11566 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   11567     uint32_t size, uint32_t *data)
   11568 {
   11569 	uint16_t hsfsts;
   11570 	uint16_t hsflctl;
   11571 	uint32_t flash_linear_address;
   11572 	uint32_t flash_data = 0;
   11573 	int32_t error = 1;
   11574 	int32_t count = 0;
   11575 
   11576 	if (size < 1  || size > 4 || data == 0x0 ||
   11577 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   11578 		return error;
   11579 
   11580 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   11581 	    sc->sc_ich8_flash_base;
   11582 
   11583 	do {
   11584 		delay(1);
   11585 		/* Steps */
   11586 		error = wm_ich8_cycle_init(sc);
   11587 		if (error)
   11588 			break;
   11589 
   11590 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11591 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   11592 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   11593 		    & HSFCTL_BCOUNT_MASK;
   11594 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   11595 		if (sc->sc_type == WM_T_PCH_SPT) {
   11596 			/*
   11597 			 * In SPT, This register is in Lan memory space, not
   11598 			 * flash. Therefore, only 32 bit access is supported.
   11599 			 */
   11600 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
   11601 			    (uint32_t)hsflctl);
   11602 		} else
   11603 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11604 
   11605 		/*
   11606 		 * Write the last 24 bits of index into Flash Linear address
   11607 		 * field in Flash Address
   11608 		 */
   11609 		/* TODO: TBD maybe check the index against the size of flash */
   11610 
   11611 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   11612 
   11613 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   11614 
   11615 		/*
   11616 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   11617 		 * the whole sequence a few more times, else read in (shift in)
   11618 		 * the Flash Data0, the order is least significant byte first
   11619 		 * msb to lsb
   11620 		 */
   11621 		if (error == 0) {
   11622 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   11623 			if (size == 1)
   11624 				*data = (uint8_t)(flash_data & 0x000000FF);
   11625 			else if (size == 2)
   11626 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   11627 			else if (size == 4)
   11628 				*data = (uint32_t)flash_data;
   11629 			break;
   11630 		} else {
   11631 			/*
   11632 			 * If we've gotten here, then things are probably
   11633 			 * completely hosed, but if the error condition is
   11634 			 * detected, it won't hurt to give it another try...
   11635 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   11636 			 */
   11637 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11638 			if (hsfsts & HSFSTS_ERR) {
   11639 				/* Repeat for some time before giving up. */
   11640 				continue;
   11641 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   11642 				break;
   11643 		}
   11644 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   11645 
   11646 	return error;
   11647 }
   11648 
   11649 /******************************************************************************
   11650  * Reads a single byte from the NVM using the ICH8 flash access registers.
   11651  *
   11652  * sc - pointer to wm_hw structure
   11653  * index - The index of the byte to read.
   11654  * data - Pointer to a byte to store the value read.
   11655  *****************************************************************************/
   11656 static int32_t
   11657 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   11658 {
   11659 	int32_t status;
   11660 	uint32_t word = 0;
   11661 
   11662 	status = wm_read_ich8_data(sc, index, 1, &word);
   11663 	if (status == 0)
   11664 		*data = (uint8_t)word;
   11665 	else
   11666 		*data = 0;
   11667 
   11668 	return status;
   11669 }
   11670 
   11671 /******************************************************************************
   11672  * Reads a word from the NVM using the ICH8 flash access registers.
   11673  *
   11674  * sc - pointer to wm_hw structure
   11675  * index - The starting byte index of the word to read.
   11676  * data - Pointer to a word to store the value read.
   11677  *****************************************************************************/
   11678 static int32_t
   11679 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   11680 {
   11681 	int32_t status;
   11682 	uint32_t word = 0;
   11683 
   11684 	status = wm_read_ich8_data(sc, index, 2, &word);
   11685 	if (status == 0)
   11686 		*data = (uint16_t)word;
   11687 	else
   11688 		*data = 0;
   11689 
   11690 	return status;
   11691 }
   11692 
   11693 /******************************************************************************
   11694  * Reads a dword from the NVM using the ICH8 flash access registers.
   11695  *
   11696  * sc - pointer to wm_hw structure
   11697  * index - The starting byte index of the word to read.
   11698  * data - Pointer to a word to store the value read.
   11699  *****************************************************************************/
   11700 static int32_t
   11701 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   11702 {
   11703 	int32_t status;
   11704 
   11705 	status = wm_read_ich8_data(sc, index, 4, data);
   11706 	return status;
   11707 }
   11708 
   11709 /******************************************************************************
   11710  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   11711  * register.
   11712  *
   11713  * sc - Struct containing variables accessed by shared code
   11714  * offset - offset of word in the EEPROM to read
   11715  * data - word read from the EEPROM
   11716  * words - number of words to read
   11717  *****************************************************************************/
   11718 static int
   11719 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11720 {
   11721 	int32_t  error = 0;
   11722 	uint32_t flash_bank = 0;
   11723 	uint32_t act_offset = 0;
   11724 	uint32_t bank_offset = 0;
   11725 	uint16_t word = 0;
   11726 	uint16_t i = 0;
   11727 
   11728 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11729 		device_xname(sc->sc_dev), __func__));
   11730 
   11731 	/*
   11732 	 * We need to know which is the valid flash bank.  In the event
   11733 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   11734 	 * managing flash_bank.  So it cannot be trusted and needs
   11735 	 * to be updated with each read.
   11736 	 */
   11737 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   11738 	if (error) {
   11739 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   11740 			device_xname(sc->sc_dev)));
   11741 		flash_bank = 0;
   11742 	}
   11743 
   11744 	/*
   11745 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   11746 	 * size
   11747 	 */
   11748 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   11749 
   11750 	error = wm_get_swfwhw_semaphore(sc);
   11751 	if (error) {
   11752 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11753 		    __func__);
   11754 		return error;
   11755 	}
   11756 
   11757 	for (i = 0; i < words; i++) {
   11758 		/* The NVM part needs a byte offset, hence * 2 */
   11759 		act_offset = bank_offset + ((offset + i) * 2);
   11760 		error = wm_read_ich8_word(sc, act_offset, &word);
   11761 		if (error) {
   11762 			aprint_error_dev(sc->sc_dev,
   11763 			    "%s: failed to read NVM\n", __func__);
   11764 			break;
   11765 		}
   11766 		data[i] = word;
   11767 	}
   11768 
   11769 	wm_put_swfwhw_semaphore(sc);
   11770 	return error;
   11771 }
   11772 
   11773 /******************************************************************************
   11774  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   11775  * register.
   11776  *
   11777  * sc - Struct containing variables accessed by shared code
   11778  * offset - offset of word in the EEPROM to read
   11779  * data - word read from the EEPROM
   11780  * words - number of words to read
   11781  *****************************************************************************/
   11782 static int
   11783 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11784 {
   11785 	int32_t  error = 0;
   11786 	uint32_t flash_bank = 0;
   11787 	uint32_t act_offset = 0;
   11788 	uint32_t bank_offset = 0;
   11789 	uint32_t dword = 0;
   11790 	uint16_t i = 0;
   11791 
   11792 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11793 		device_xname(sc->sc_dev), __func__));
   11794 
   11795 	/*
   11796 	 * We need to know which is the valid flash bank.  In the event
   11797 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   11798 	 * managing flash_bank.  So it cannot be trusted and needs
   11799 	 * to be updated with each read.
   11800 	 */
   11801 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   11802 	if (error) {
   11803 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   11804 			device_xname(sc->sc_dev)));
   11805 		flash_bank = 0;
   11806 	}
   11807 
   11808 	/*
   11809 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   11810 	 * size
   11811 	 */
   11812 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   11813 
   11814 	error = wm_get_swfwhw_semaphore(sc);
   11815 	if (error) {
   11816 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11817 		    __func__);
   11818 		return error;
   11819 	}
   11820 
   11821 	for (i = 0; i < words; i++) {
   11822 		/* The NVM part needs a byte offset, hence * 2 */
   11823 		act_offset = bank_offset + ((offset + i) * 2);
   11824 		/* but we must read dword aligned, so mask ... */
   11825 		error = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   11826 		if (error) {
   11827 			aprint_error_dev(sc->sc_dev,
   11828 			    "%s: failed to read NVM\n", __func__);
   11829 			break;
   11830 		}
   11831 		/* ... and pick out low or high word */
   11832 		if ((act_offset & 0x2) == 0)
   11833 			data[i] = (uint16_t)(dword & 0xFFFF);
   11834 		else
   11835 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   11836 	}
   11837 
   11838 	wm_put_swfwhw_semaphore(sc);
   11839 	return error;
   11840 }
   11841 
   11842 /* iNVM */
   11843 
   11844 static int
   11845 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   11846 {
   11847 	int32_t  rv = 0;
   11848 	uint32_t invm_dword;
   11849 	uint16_t i;
   11850 	uint8_t record_type, word_address;
   11851 
   11852 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11853 		device_xname(sc->sc_dev), __func__));
   11854 
   11855 	for (i = 0; i < INVM_SIZE; i++) {
   11856 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   11857 		/* Get record type */
   11858 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   11859 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   11860 			break;
   11861 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   11862 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   11863 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   11864 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   11865 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   11866 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   11867 			if (word_address == address) {
   11868 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   11869 				rv = 0;
   11870 				break;
   11871 			}
   11872 		}
   11873 	}
   11874 
   11875 	return rv;
   11876 }
   11877 
   11878 static int
   11879 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   11880 {
   11881 	int rv = 0;
   11882 	int i;
   11883 
   11884 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11885 		device_xname(sc->sc_dev), __func__));
   11886 
   11887 	for (i = 0; i < words; i++) {
   11888 		switch (offset + i) {
   11889 		case NVM_OFF_MACADDR:
   11890 		case NVM_OFF_MACADDR1:
   11891 		case NVM_OFF_MACADDR2:
   11892 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   11893 			if (rv != 0) {
   11894 				data[i] = 0xffff;
   11895 				rv = -1;
   11896 			}
   11897 			break;
   11898 		case NVM_OFF_CFG2:
   11899 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11900 			if (rv != 0) {
   11901 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   11902 				rv = 0;
   11903 			}
   11904 			break;
   11905 		case NVM_OFF_CFG4:
   11906 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11907 			if (rv != 0) {
   11908 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   11909 				rv = 0;
   11910 			}
   11911 			break;
   11912 		case NVM_OFF_LED_1_CFG:
   11913 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11914 			if (rv != 0) {
   11915 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   11916 				rv = 0;
   11917 			}
   11918 			break;
   11919 		case NVM_OFF_LED_0_2_CFG:
   11920 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11921 			if (rv != 0) {
   11922 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   11923 				rv = 0;
   11924 			}
   11925 			break;
   11926 		case NVM_OFF_ID_LED_SETTINGS:
   11927 			rv = wm_nvm_read_word_invm(sc, offset, data);
   11928 			if (rv != 0) {
   11929 				*data = ID_LED_RESERVED_FFFF;
   11930 				rv = 0;
   11931 			}
   11932 			break;
   11933 		default:
   11934 			DPRINTF(WM_DEBUG_NVM,
   11935 			    ("NVM word 0x%02x is not mapped.\n", offset));
   11936 			*data = NVM_RESERVED_WORD;
   11937 			break;
   11938 		}
   11939 	}
   11940 
   11941 	return rv;
   11942 }
   11943 
   11944 /* Lock, detecting NVM type, validate checksum, version and read */
   11945 
   11946 /*
   11947  * wm_nvm_acquire:
   11948  *
   11949  *	Perform the EEPROM handshake required on some chips.
   11950  */
   11951 static int
   11952 wm_nvm_acquire(struct wm_softc *sc)
   11953 {
   11954 	uint32_t reg;
   11955 	int x;
   11956 	int ret = 0;
   11957 
   11958 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11959 		device_xname(sc->sc_dev), __func__));
   11960 
   11961 	if (sc->sc_type >= WM_T_ICH8) {
   11962 		ret = wm_get_nvm_ich8lan(sc);
   11963 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   11964 		ret = wm_get_swfwhw_semaphore(sc);
   11965 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
   11966 		/* This will also do wm_get_swsm_semaphore() if needed */
   11967 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
   11968 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11969 		ret = wm_get_swsm_semaphore(sc);
   11970 	}
   11971 
   11972 	if (ret) {
   11973 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   11974 			__func__);
   11975 		return 1;
   11976 	}
   11977 
   11978 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   11979 		reg = CSR_READ(sc, WMREG_EECD);
   11980 
   11981 		/* Request EEPROM access. */
   11982 		reg |= EECD_EE_REQ;
   11983 		CSR_WRITE(sc, WMREG_EECD, reg);
   11984 
   11985 		/* ..and wait for it to be granted. */
   11986 		for (x = 0; x < 1000; x++) {
   11987 			reg = CSR_READ(sc, WMREG_EECD);
   11988 			if (reg & EECD_EE_GNT)
   11989 				break;
   11990 			delay(5);
   11991 		}
   11992 		if ((reg & EECD_EE_GNT) == 0) {
   11993 			aprint_error_dev(sc->sc_dev,
   11994 			    "could not acquire EEPROM GNT\n");
   11995 			reg &= ~EECD_EE_REQ;
   11996 			CSR_WRITE(sc, WMREG_EECD, reg);
   11997 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   11998 				wm_put_swfwhw_semaphore(sc);
   11999 			if (sc->sc_flags & WM_F_LOCK_SWFW)
   12000 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   12001 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
   12002 				wm_put_swsm_semaphore(sc);
   12003 			return 1;
   12004 		}
   12005 	}
   12006 
   12007 	return 0;
   12008 }
   12009 
   12010 /*
   12011  * wm_nvm_release:
   12012  *
   12013  *	Release the EEPROM mutex.
   12014  */
   12015 static void
   12016 wm_nvm_release(struct wm_softc *sc)
   12017 {
   12018 	uint32_t reg;
   12019 
   12020 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12021 		device_xname(sc->sc_dev), __func__));
   12022 
   12023 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   12024 		reg = CSR_READ(sc, WMREG_EECD);
   12025 		reg &= ~EECD_EE_REQ;
   12026 		CSR_WRITE(sc, WMREG_EECD, reg);
   12027 	}
   12028 
   12029 	if (sc->sc_type >= WM_T_ICH8) {
   12030 		wm_put_nvm_ich8lan(sc);
   12031 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   12032 		wm_put_swfwhw_semaphore(sc);
   12033 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   12034 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   12035 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
   12036 		wm_put_swsm_semaphore(sc);
   12037 }
   12038 
   12039 static int
   12040 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   12041 {
   12042 	uint32_t eecd = 0;
   12043 
   12044 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   12045 	    || sc->sc_type == WM_T_82583) {
   12046 		eecd = CSR_READ(sc, WMREG_EECD);
   12047 
   12048 		/* Isolate bits 15 & 16 */
   12049 		eecd = ((eecd >> 15) & 0x03);
   12050 
   12051 		/* If both bits are set, device is Flash type */
   12052 		if (eecd == 0x03)
   12053 			return 0;
   12054 	}
   12055 	return 1;
   12056 }
   12057 
   12058 static int
   12059 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   12060 {
   12061 	uint32_t eec;
   12062 
   12063 	eec = CSR_READ(sc, WMREG_EEC);
   12064 	if ((eec & EEC_FLASH_DETECTED) != 0)
   12065 		return 1;
   12066 
   12067 	return 0;
   12068 }
   12069 
   12070 /*
   12071  * wm_nvm_validate_checksum
   12072  *
   12073  * The checksum is defined as the sum of the first 64 (16 bit) words.
   12074  */
   12075 static int
   12076 wm_nvm_validate_checksum(struct wm_softc *sc)
   12077 {
   12078 	uint16_t checksum;
   12079 	uint16_t eeprom_data;
   12080 #ifdef WM_DEBUG
   12081 	uint16_t csum_wordaddr, valid_checksum;
   12082 #endif
   12083 	int i;
   12084 
   12085 	checksum = 0;
   12086 
   12087 	/* Don't check for I211 */
   12088 	if (sc->sc_type == WM_T_I211)
   12089 		return 0;
   12090 
   12091 #ifdef WM_DEBUG
   12092 	if (sc->sc_type == WM_T_PCH_LPT) {
   12093 		csum_wordaddr = NVM_OFF_COMPAT;
   12094 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   12095 	} else {
   12096 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   12097 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   12098 	}
   12099 
   12100 	/* Dump EEPROM image for debug */
   12101 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12102 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12103 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   12104 		/* XXX PCH_SPT? */
   12105 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   12106 		if ((eeprom_data & valid_checksum) == 0) {
   12107 			DPRINTF(WM_DEBUG_NVM,
   12108 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   12109 				device_xname(sc->sc_dev), eeprom_data,
   12110 				    valid_checksum));
   12111 		}
   12112 	}
   12113 
   12114 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   12115 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   12116 		for (i = 0; i < NVM_SIZE; i++) {
   12117 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12118 				printf("XXXX ");
   12119 			else
   12120 				printf("%04hx ", eeprom_data);
   12121 			if (i % 8 == 7)
   12122 				printf("\n");
   12123 		}
   12124 	}
   12125 
   12126 #endif /* WM_DEBUG */
   12127 
   12128 	for (i = 0; i < NVM_SIZE; i++) {
   12129 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12130 			return 1;
   12131 		checksum += eeprom_data;
   12132 	}
   12133 
   12134 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   12135 #ifdef WM_DEBUG
   12136 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   12137 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   12138 #endif
   12139 	}
   12140 
   12141 	return 0;
   12142 }
   12143 
   12144 static void
   12145 wm_nvm_version_invm(struct wm_softc *sc)
   12146 {
   12147 	uint32_t dword;
   12148 
   12149 	/*
   12150 	 * Linux's code to decode version is very strange, so we don't
   12151 	 * obey that algorithm and just use word 61 as the document.
   12152 	 * Perhaps it's not perfect though...
   12153 	 *
   12154 	 * Example:
   12155 	 *
   12156 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   12157 	 */
   12158 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   12159 	dword = __SHIFTOUT(dword, INVM_VER_1);
   12160 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   12161 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   12162 }
   12163 
   12164 static void
   12165 wm_nvm_version(struct wm_softc *sc)
   12166 {
   12167 	uint16_t major, minor, build, patch;
   12168 	uint16_t uid0, uid1;
   12169 	uint16_t nvm_data;
   12170 	uint16_t off;
   12171 	bool check_version = false;
   12172 	bool check_optionrom = false;
   12173 	bool have_build = false;
   12174 	bool have_uid = true;
   12175 
   12176 	/*
   12177 	 * Version format:
   12178 	 *
   12179 	 * XYYZ
   12180 	 * X0YZ
   12181 	 * X0YY
   12182 	 *
   12183 	 * Example:
   12184 	 *
   12185 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   12186 	 *	82571	0x50a6	5.10.6?
   12187 	 *	82572	0x506a	5.6.10?
   12188 	 *	82572EI	0x5069	5.6.9?
   12189 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   12190 	 *		0x2013	2.1.3?
   12191 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   12192 	 */
   12193 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
   12194 	switch (sc->sc_type) {
   12195 	case WM_T_82571:
   12196 	case WM_T_82572:
   12197 	case WM_T_82574:
   12198 	case WM_T_82583:
   12199 		check_version = true;
   12200 		check_optionrom = true;
   12201 		have_build = true;
   12202 		break;
   12203 	case WM_T_82575:
   12204 	case WM_T_82576:
   12205 	case WM_T_82580:
   12206 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   12207 			check_version = true;
   12208 		break;
   12209 	case WM_T_I211:
   12210 		wm_nvm_version_invm(sc);
   12211 		have_uid = false;
   12212 		goto printver;
   12213 	case WM_T_I210:
   12214 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   12215 			wm_nvm_version_invm(sc);
   12216 			have_uid = false;
   12217 			goto printver;
   12218 		}
   12219 		/* FALLTHROUGH */
   12220 	case WM_T_I350:
   12221 	case WM_T_I354:
   12222 		check_version = true;
   12223 		check_optionrom = true;
   12224 		break;
   12225 	default:
   12226 		return;
   12227 	}
   12228 	if (check_version) {
   12229 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
   12230 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   12231 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   12232 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   12233 			build = nvm_data & NVM_BUILD_MASK;
   12234 			have_build = true;
   12235 		} else
   12236 			minor = nvm_data & 0x00ff;
   12237 
   12238 		/* Decimal */
   12239 		minor = (minor / 16) * 10 + (minor % 16);
   12240 		sc->sc_nvm_ver_major = major;
   12241 		sc->sc_nvm_ver_minor = minor;
   12242 
   12243 printver:
   12244 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   12245 		    sc->sc_nvm_ver_minor);
   12246 		if (have_build) {
   12247 			sc->sc_nvm_ver_build = build;
   12248 			aprint_verbose(".%d", build);
   12249 		}
   12250 	}
   12251 	if (check_optionrom) {
   12252 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
   12253 		/* Option ROM Version */
   12254 		if ((off != 0x0000) && (off != 0xffff)) {
   12255 			off += NVM_COMBO_VER_OFF;
   12256 			wm_nvm_read(sc, off + 1, 1, &uid1);
   12257 			wm_nvm_read(sc, off, 1, &uid0);
   12258 			if ((uid0 != 0) && (uid0 != 0xffff)
   12259 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   12260 				/* 16bits */
   12261 				major = uid0 >> 8;
   12262 				build = (uid0 << 8) | (uid1 >> 8);
   12263 				patch = uid1 & 0x00ff;
   12264 				aprint_verbose(", option ROM Version %d.%d.%d",
   12265 				    major, build, patch);
   12266 			}
   12267 		}
   12268 	}
   12269 
   12270 	if (have_uid) {
   12271 		wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
   12272 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   12273 	}
   12274 }
   12275 
   12276 /*
   12277  * wm_nvm_read:
   12278  *
   12279  *	Read data from the serial EEPROM.
   12280  */
   12281 static int
   12282 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12283 {
   12284 	int rv;
   12285 
   12286 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12287 		device_xname(sc->sc_dev), __func__));
   12288 
   12289 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   12290 		return 1;
   12291 
   12292 	if (wm_nvm_acquire(sc))
   12293 		return 1;
   12294 
   12295 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12296 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12297 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   12298 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
   12299 	else if (sc->sc_type == WM_T_PCH_SPT)
   12300 		rv = wm_nvm_read_spt(sc, word, wordcnt, data);
   12301 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
   12302 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
   12303 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
   12304 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
   12305 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
   12306 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
   12307 	else
   12308 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
   12309 
   12310 	wm_nvm_release(sc);
   12311 	return rv;
   12312 }
   12313 
   12314 /*
   12315  * Hardware semaphores.
   12316  * Very complexed...
   12317  */
   12318 
   12319 static int
   12320 wm_get_null(struct wm_softc *sc)
   12321 {
   12322 
   12323 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12324 		device_xname(sc->sc_dev), __func__));
   12325 	return 0;
   12326 }
   12327 
   12328 static void
   12329 wm_put_null(struct wm_softc *sc)
   12330 {
   12331 
   12332 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12333 		device_xname(sc->sc_dev), __func__));
   12334 	return;
   12335 }
   12336 
   12337 /*
   12338  * Get hardware semaphore.
   12339  * Same as e1000_get_hw_semaphore_generic()
   12340  */
   12341 static int
   12342 wm_get_swsm_semaphore(struct wm_softc *sc)
   12343 {
   12344 	int32_t timeout;
   12345 	uint32_t swsm;
   12346 
   12347 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12348 		device_xname(sc->sc_dev), __func__));
   12349 	KASSERT(sc->sc_nvm_wordsize > 0);
   12350 
   12351 	/* Get the SW semaphore. */
   12352 	timeout = sc->sc_nvm_wordsize + 1;
   12353 	while (timeout) {
   12354 		swsm = CSR_READ(sc, WMREG_SWSM);
   12355 
   12356 		if ((swsm & SWSM_SMBI) == 0)
   12357 			break;
   12358 
   12359 		delay(50);
   12360 		timeout--;
   12361 	}
   12362 
   12363 	if (timeout == 0) {
   12364 		aprint_error_dev(sc->sc_dev,
   12365 		    "could not acquire SWSM SMBI\n");
   12366 		return 1;
   12367 	}
   12368 
   12369 	/* Get the FW semaphore. */
   12370 	timeout = sc->sc_nvm_wordsize + 1;
   12371 	while (timeout) {
   12372 		swsm = CSR_READ(sc, WMREG_SWSM);
   12373 		swsm |= SWSM_SWESMBI;
   12374 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   12375 		/* If we managed to set the bit we got the semaphore. */
   12376 		swsm = CSR_READ(sc, WMREG_SWSM);
   12377 		if (swsm & SWSM_SWESMBI)
   12378 			break;
   12379 
   12380 		delay(50);
   12381 		timeout--;
   12382 	}
   12383 
   12384 	if (timeout == 0) {
   12385 		aprint_error_dev(sc->sc_dev,
   12386 		    "could not acquire SWSM SWESMBI\n");
   12387 		/* Release semaphores */
   12388 		wm_put_swsm_semaphore(sc);
   12389 		return 1;
   12390 	}
   12391 	return 0;
   12392 }
   12393 
   12394 /*
   12395  * Put hardware semaphore.
   12396  * Same as e1000_put_hw_semaphore_generic()
   12397  */
   12398 static void
   12399 wm_put_swsm_semaphore(struct wm_softc *sc)
   12400 {
   12401 	uint32_t swsm;
   12402 
   12403 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12404 		device_xname(sc->sc_dev), __func__));
   12405 
   12406 	swsm = CSR_READ(sc, WMREG_SWSM);
   12407 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   12408 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   12409 }
   12410 
   12411 /*
   12412  * Get SW/FW semaphore.
   12413  * Same as e1000_acquire_swfw_sync_82575().
   12414  */
   12415 static int
   12416 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12417 {
   12418 	uint32_t swfw_sync;
   12419 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   12420 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   12421 	int timeout = 200;
   12422 
   12423 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12424 		device_xname(sc->sc_dev), __func__));
   12425 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   12426 
   12427 	for (timeout = 0; timeout < 200; timeout++) {
   12428 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
   12429 			if (wm_get_swsm_semaphore(sc)) {
   12430 				aprint_error_dev(sc->sc_dev,
   12431 				    "%s: failed to get semaphore\n",
   12432 				    __func__);
   12433 				return 1;
   12434 			}
   12435 		}
   12436 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12437 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   12438 			swfw_sync |= swmask;
   12439 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12440 			if (sc->sc_flags & WM_F_LOCK_SWSM)
   12441 				wm_put_swsm_semaphore(sc);
   12442 			return 0;
   12443 		}
   12444 		if (sc->sc_flags & WM_F_LOCK_SWSM)
   12445 			wm_put_swsm_semaphore(sc);
   12446 		delay(5000);
   12447 	}
   12448 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   12449 	    device_xname(sc->sc_dev), mask, swfw_sync);
   12450 	return 1;
   12451 }
   12452 
   12453 static void
   12454 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12455 {
   12456 	uint32_t swfw_sync;
   12457 
   12458 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12459 		device_xname(sc->sc_dev), __func__));
   12460 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   12461 
   12462 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   12463 		while (wm_get_swsm_semaphore(sc) != 0)
   12464 			continue;
   12465 	}
   12466 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12467 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   12468 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12469 	if (sc->sc_flags & WM_F_LOCK_SWSM)
   12470 		wm_put_swsm_semaphore(sc);
   12471 }
   12472 
   12473 static int
   12474 wm_get_phy_82575(struct wm_softc *sc)
   12475 {
   12476 
   12477 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12478 		device_xname(sc->sc_dev), __func__));
   12479 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   12480 }
   12481 
   12482 static void
   12483 wm_put_phy_82575(struct wm_softc *sc)
   12484 {
   12485 
   12486 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12487 		device_xname(sc->sc_dev), __func__));
   12488 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   12489 }
   12490 
   12491 static int
   12492 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   12493 {
   12494 	uint32_t ext_ctrl;
   12495 	int timeout = 200;
   12496 
   12497 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12498 		device_xname(sc->sc_dev), __func__));
   12499 
   12500 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12501 	for (timeout = 0; timeout < 200; timeout++) {
   12502 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12503 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12504 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12505 
   12506 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12507 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   12508 			return 0;
   12509 		delay(5000);
   12510 	}
   12511 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   12512 	    device_xname(sc->sc_dev), ext_ctrl);
   12513 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12514 	return 1;
   12515 }
   12516 
   12517 static void
   12518 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   12519 {
   12520 	uint32_t ext_ctrl;
   12521 
   12522 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12523 		device_xname(sc->sc_dev), __func__));
   12524 
   12525 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12526 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12527 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12528 
   12529 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12530 }
   12531 
   12532 static int
   12533 wm_get_swflag_ich8lan(struct wm_softc *sc)
   12534 {
   12535 	uint32_t ext_ctrl;
   12536 	int timeout;
   12537 
   12538 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12539 		device_xname(sc->sc_dev), __func__));
   12540 	mutex_enter(sc->sc_ich_phymtx);
   12541 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   12542 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12543 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   12544 			break;
   12545 		delay(1000);
   12546 	}
   12547 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   12548 		printf("%s: SW has already locked the resource\n",
   12549 		    device_xname(sc->sc_dev));
   12550 		goto out;
   12551 	}
   12552 
   12553 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12554 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12555 	for (timeout = 0; timeout < 1000; timeout++) {
   12556 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12557 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   12558 			break;
   12559 		delay(1000);
   12560 	}
   12561 	if (timeout >= 1000) {
   12562 		printf("%s: failed to acquire semaphore\n",
   12563 		    device_xname(sc->sc_dev));
   12564 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12565 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12566 		goto out;
   12567 	}
   12568 	return 0;
   12569 
   12570 out:
   12571 	mutex_exit(sc->sc_ich_phymtx);
   12572 	return 1;
   12573 }
   12574 
   12575 static void
   12576 wm_put_swflag_ich8lan(struct wm_softc *sc)
   12577 {
   12578 	uint32_t ext_ctrl;
   12579 
   12580 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12581 		device_xname(sc->sc_dev), __func__));
   12582 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12583 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   12584 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12585 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12586 	} else {
   12587 		printf("%s: Semaphore unexpectedly released\n",
   12588 		    device_xname(sc->sc_dev));
   12589 	}
   12590 
   12591 	mutex_exit(sc->sc_ich_phymtx);
   12592 }
   12593 
   12594 static int
   12595 wm_get_nvm_ich8lan(struct wm_softc *sc)
   12596 {
   12597 
   12598 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12599 		device_xname(sc->sc_dev), __func__));
   12600 	mutex_enter(sc->sc_ich_nvmmtx);
   12601 
   12602 	return 0;
   12603 }
   12604 
   12605 static void
   12606 wm_put_nvm_ich8lan(struct wm_softc *sc)
   12607 {
   12608 
   12609 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12610 		device_xname(sc->sc_dev), __func__));
   12611 	mutex_exit(sc->sc_ich_nvmmtx);
   12612 }
   12613 
   12614 static int
   12615 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   12616 {
   12617 	int i = 0;
   12618 	uint32_t reg;
   12619 
   12620 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12621 		device_xname(sc->sc_dev), __func__));
   12622 
   12623 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12624 	do {
   12625 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   12626 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   12627 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12628 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   12629 			break;
   12630 		delay(2*1000);
   12631 		i++;
   12632 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   12633 
   12634 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   12635 		wm_put_hw_semaphore_82573(sc);
   12636 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   12637 		    device_xname(sc->sc_dev));
   12638 		return -1;
   12639 	}
   12640 
   12641 	return 0;
   12642 }
   12643 
   12644 static void
   12645 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   12646 {
   12647 	uint32_t reg;
   12648 
   12649 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12650 		device_xname(sc->sc_dev), __func__));
   12651 
   12652 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12653 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12654 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   12655 }
   12656 
   12657 /*
   12658  * Management mode and power management related subroutines.
   12659  * BMC, AMT, suspend/resume and EEE.
   12660  */
   12661 
   12662 #ifdef WM_WOL
   12663 static int
   12664 wm_check_mng_mode(struct wm_softc *sc)
   12665 {
   12666 	int rv;
   12667 
   12668 	switch (sc->sc_type) {
   12669 	case WM_T_ICH8:
   12670 	case WM_T_ICH9:
   12671 	case WM_T_ICH10:
   12672 	case WM_T_PCH:
   12673 	case WM_T_PCH2:
   12674 	case WM_T_PCH_LPT:
   12675 	case WM_T_PCH_SPT:
   12676 		rv = wm_check_mng_mode_ich8lan(sc);
   12677 		break;
   12678 	case WM_T_82574:
   12679 	case WM_T_82583:
   12680 		rv = wm_check_mng_mode_82574(sc);
   12681 		break;
   12682 	case WM_T_82571:
   12683 	case WM_T_82572:
   12684 	case WM_T_82573:
   12685 	case WM_T_80003:
   12686 		rv = wm_check_mng_mode_generic(sc);
   12687 		break;
   12688 	default:
   12689 		/* noting to do */
   12690 		rv = 0;
   12691 		break;
   12692 	}
   12693 
   12694 	return rv;
   12695 }
   12696 
   12697 static int
   12698 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   12699 {
   12700 	uint32_t fwsm;
   12701 
   12702 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12703 
   12704 	if (((fwsm & FWSM_FW_VALID) != 0)
   12705 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   12706 		return 1;
   12707 
   12708 	return 0;
   12709 }
   12710 
   12711 static int
   12712 wm_check_mng_mode_82574(struct wm_softc *sc)
   12713 {
   12714 	uint16_t data;
   12715 
   12716 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   12717 
   12718 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   12719 		return 1;
   12720 
   12721 	return 0;
   12722 }
   12723 
   12724 static int
   12725 wm_check_mng_mode_generic(struct wm_softc *sc)
   12726 {
   12727 	uint32_t fwsm;
   12728 
   12729 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12730 
   12731 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   12732 		return 1;
   12733 
   12734 	return 0;
   12735 }
   12736 #endif /* WM_WOL */
   12737 
   12738 static int
   12739 wm_enable_mng_pass_thru(struct wm_softc *sc)
   12740 {
   12741 	uint32_t manc, fwsm, factps;
   12742 
   12743 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   12744 		return 0;
   12745 
   12746 	manc = CSR_READ(sc, WMREG_MANC);
   12747 
   12748 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   12749 		device_xname(sc->sc_dev), manc));
   12750 	if ((manc & MANC_RECV_TCO_EN) == 0)
   12751 		return 0;
   12752 
   12753 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   12754 		fwsm = CSR_READ(sc, WMREG_FWSM);
   12755 		factps = CSR_READ(sc, WMREG_FACTPS);
   12756 		if (((factps & FACTPS_MNGCG) == 0)
   12757 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   12758 			return 1;
   12759 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   12760 		uint16_t data;
   12761 
   12762 		factps = CSR_READ(sc, WMREG_FACTPS);
   12763 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   12764 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   12765 			device_xname(sc->sc_dev), factps, data));
   12766 		if (((factps & FACTPS_MNGCG) == 0)
   12767 		    && ((data & NVM_CFG2_MNGM_MASK)
   12768 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   12769 			return 1;
   12770 	} else if (((manc & MANC_SMBUS_EN) != 0)
   12771 	    && ((manc & MANC_ASF_EN) == 0))
   12772 		return 1;
   12773 
   12774 	return 0;
   12775 }
   12776 
   12777 static bool
   12778 wm_phy_resetisblocked(struct wm_softc *sc)
   12779 {
   12780 	bool blocked = false;
   12781 	uint32_t reg;
   12782 	int i = 0;
   12783 
   12784 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12785 		device_xname(sc->sc_dev), __func__));
   12786 
   12787 	switch (sc->sc_type) {
   12788 	case WM_T_ICH8:
   12789 	case WM_T_ICH9:
   12790 	case WM_T_ICH10:
   12791 	case WM_T_PCH:
   12792 	case WM_T_PCH2:
   12793 	case WM_T_PCH_LPT:
   12794 	case WM_T_PCH_SPT:
   12795 		do {
   12796 			reg = CSR_READ(sc, WMREG_FWSM);
   12797 			if ((reg & FWSM_RSPCIPHY) == 0) {
   12798 				blocked = true;
   12799 				delay(10*1000);
   12800 				continue;
   12801 			}
   12802 			blocked = false;
   12803 		} while (blocked && (i++ < 30));
   12804 		return blocked;
   12805 		break;
   12806 	case WM_T_82571:
   12807 	case WM_T_82572:
   12808 	case WM_T_82573:
   12809 	case WM_T_82574:
   12810 	case WM_T_82583:
   12811 	case WM_T_80003:
   12812 		reg = CSR_READ(sc, WMREG_MANC);
   12813 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   12814 			return true;
   12815 		else
   12816 			return false;
   12817 		break;
   12818 	default:
   12819 		/* no problem */
   12820 		break;
   12821 	}
   12822 
   12823 	return false;
   12824 }
   12825 
   12826 static void
   12827 wm_get_hw_control(struct wm_softc *sc)
   12828 {
   12829 	uint32_t reg;
   12830 
   12831 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12832 		device_xname(sc->sc_dev), __func__));
   12833 
   12834 	if (sc->sc_type == WM_T_82573) {
   12835 		reg = CSR_READ(sc, WMREG_SWSM);
   12836 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   12837 	} else if (sc->sc_type >= WM_T_82571) {
   12838 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12839 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   12840 	}
   12841 }
   12842 
   12843 static void
   12844 wm_release_hw_control(struct wm_softc *sc)
   12845 {
   12846 	uint32_t reg;
   12847 
   12848 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12849 		device_xname(sc->sc_dev), __func__));
   12850 
   12851 	if (sc->sc_type == WM_T_82573) {
   12852 		reg = CSR_READ(sc, WMREG_SWSM);
   12853 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   12854 	} else if (sc->sc_type >= WM_T_82571) {
   12855 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12856 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   12857 	}
   12858 }
   12859 
   12860 static void
   12861 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   12862 {
   12863 	uint32_t reg;
   12864 
   12865 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12866 		device_xname(sc->sc_dev), __func__));
   12867 
   12868 	if (sc->sc_type < WM_T_PCH2)
   12869 		return;
   12870 
   12871 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   12872 
   12873 	if (gate)
   12874 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   12875 	else
   12876 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   12877 
   12878 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   12879 }
   12880 
   12881 static void
   12882 wm_smbustopci(struct wm_softc *sc)
   12883 {
   12884 	uint32_t fwsm, reg;
   12885 	int rv = 0;
   12886 
   12887 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12888 		device_xname(sc->sc_dev), __func__));
   12889 
   12890 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   12891 	wm_gate_hw_phy_config_ich8lan(sc, true);
   12892 
   12893 	/* Disable ULP */
   12894 	wm_ulp_disable(sc);
   12895 
   12896 	/* Acquire PHY semaphore */
   12897 	sc->phy.acquire(sc);
   12898 
   12899 	fwsm = CSR_READ(sc, WMREG_FWSM);
   12900 	switch (sc->sc_type) {
   12901 	case WM_T_PCH_LPT:
   12902 	case WM_T_PCH_SPT:
   12903 		if (wm_phy_is_accessible_pchlan(sc))
   12904 			break;
   12905 
   12906 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12907 		reg |= CTRL_EXT_FORCE_SMBUS;
   12908 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12909 #if 0
   12910 		/* XXX Isn't this required??? */
   12911 		CSR_WRITE_FLUSH(sc);
   12912 #endif
   12913 		delay(50 * 1000);
   12914 		/* FALLTHROUGH */
   12915 	case WM_T_PCH2:
   12916 		if (wm_phy_is_accessible_pchlan(sc) == true)
   12917 			break;
   12918 		/* FALLTHROUGH */
   12919 	case WM_T_PCH:
   12920 		if (sc->sc_type == WM_T_PCH)
   12921 			if ((fwsm & FWSM_FW_VALID) != 0)
   12922 				break;
   12923 
   12924 		if (wm_phy_resetisblocked(sc) == true) {
   12925 			printf("XXX reset is blocked(3)\n");
   12926 			break;
   12927 		}
   12928 
   12929 		wm_toggle_lanphypc_pch_lpt(sc);
   12930 
   12931 		if (sc->sc_type >= WM_T_PCH_LPT) {
   12932 			if (wm_phy_is_accessible_pchlan(sc) == true)
   12933 				break;
   12934 
   12935 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12936 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   12937 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12938 
   12939 			if (wm_phy_is_accessible_pchlan(sc) == true)
   12940 				break;
   12941 			rv = -1;
   12942 		}
   12943 		break;
   12944 	default:
   12945 		break;
   12946 	}
   12947 
   12948 	/* Release semaphore */
   12949 	sc->phy.release(sc);
   12950 
   12951 	if (rv == 0) {
   12952 		if (wm_phy_resetisblocked(sc)) {
   12953 			printf("XXX reset is blocked(4)\n");
   12954 			goto out;
   12955 		}
   12956 		wm_reset_phy(sc);
   12957 		if (wm_phy_resetisblocked(sc))
   12958 			printf("XXX reset is blocked(4)\n");
   12959 	}
   12960 
   12961 out:
   12962 	/*
   12963 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   12964 	 */
   12965 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   12966 		delay(10*1000);
   12967 		wm_gate_hw_phy_config_ich8lan(sc, false);
   12968 	}
   12969 }
   12970 
   12971 static void
   12972 wm_init_manageability(struct wm_softc *sc)
   12973 {
   12974 
   12975 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12976 		device_xname(sc->sc_dev), __func__));
   12977 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   12978 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   12979 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   12980 
   12981 		/* Disable hardware interception of ARP */
   12982 		manc &= ~MANC_ARP_EN;
   12983 
   12984 		/* Enable receiving management packets to the host */
   12985 		if (sc->sc_type >= WM_T_82571) {
   12986 			manc |= MANC_EN_MNG2HOST;
   12987 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   12988 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   12989 		}
   12990 
   12991 		CSR_WRITE(sc, WMREG_MANC, manc);
   12992 	}
   12993 }
   12994 
   12995 static void
   12996 wm_release_manageability(struct wm_softc *sc)
   12997 {
   12998 
   12999 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13000 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13001 
   13002 		manc |= MANC_ARP_EN;
   13003 		if (sc->sc_type >= WM_T_82571)
   13004 			manc &= ~MANC_EN_MNG2HOST;
   13005 
   13006 		CSR_WRITE(sc, WMREG_MANC, manc);
   13007 	}
   13008 }
   13009 
   13010 static void
   13011 wm_get_wakeup(struct wm_softc *sc)
   13012 {
   13013 
   13014 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   13015 	switch (sc->sc_type) {
   13016 	case WM_T_82573:
   13017 	case WM_T_82583:
   13018 		sc->sc_flags |= WM_F_HAS_AMT;
   13019 		/* FALLTHROUGH */
   13020 	case WM_T_80003:
   13021 	case WM_T_82575:
   13022 	case WM_T_82576:
   13023 	case WM_T_82580:
   13024 	case WM_T_I350:
   13025 	case WM_T_I354:
   13026 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   13027 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   13028 		/* FALLTHROUGH */
   13029 	case WM_T_82541:
   13030 	case WM_T_82541_2:
   13031 	case WM_T_82547:
   13032 	case WM_T_82547_2:
   13033 	case WM_T_82571:
   13034 	case WM_T_82572:
   13035 	case WM_T_82574:
   13036 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13037 		break;
   13038 	case WM_T_ICH8:
   13039 	case WM_T_ICH9:
   13040 	case WM_T_ICH10:
   13041 	case WM_T_PCH:
   13042 	case WM_T_PCH2:
   13043 	case WM_T_PCH_LPT:
   13044 	case WM_T_PCH_SPT:
   13045 		sc->sc_flags |= WM_F_HAS_AMT;
   13046 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13047 		break;
   13048 	default:
   13049 		break;
   13050 	}
   13051 
   13052 	/* 1: HAS_MANAGE */
   13053 	if (wm_enable_mng_pass_thru(sc) != 0)
   13054 		sc->sc_flags |= WM_F_HAS_MANAGE;
   13055 
   13056 	/*
   13057 	 * Note that the WOL flags is set after the resetting of the eeprom
   13058 	 * stuff
   13059 	 */
   13060 }
   13061 
   13062 /*
   13063  * Unconfigure Ultra Low Power mode.
   13064  * Only for I217 and newer (see below).
   13065  */
   13066 static void
   13067 wm_ulp_disable(struct wm_softc *sc)
   13068 {
   13069 	uint32_t reg;
   13070 	int i = 0;
   13071 
   13072 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13073 		device_xname(sc->sc_dev), __func__));
   13074 	/* Exclude old devices */
   13075 	if ((sc->sc_type < WM_T_PCH_LPT)
   13076 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   13077 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   13078 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   13079 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   13080 		return;
   13081 
   13082 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   13083 		/* Request ME un-configure ULP mode in the PHY */
   13084 		reg = CSR_READ(sc, WMREG_H2ME);
   13085 		reg &= ~H2ME_ULP;
   13086 		reg |= H2ME_ENFORCE_SETTINGS;
   13087 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13088 
   13089 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   13090 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   13091 			if (i++ == 30) {
   13092 				printf("%s timed out\n", __func__);
   13093 				return;
   13094 			}
   13095 			delay(10 * 1000);
   13096 		}
   13097 		reg = CSR_READ(sc, WMREG_H2ME);
   13098 		reg &= ~H2ME_ENFORCE_SETTINGS;
   13099 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13100 
   13101 		return;
   13102 	}
   13103 
   13104 	/* Acquire semaphore */
   13105 	sc->phy.acquire(sc);
   13106 
   13107 	/* Toggle LANPHYPC */
   13108 	wm_toggle_lanphypc_pch_lpt(sc);
   13109 
   13110 	/* Unforce SMBus mode in PHY */
   13111 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13112 	if (reg == 0x0000 || reg == 0xffff) {
   13113 		uint32_t reg2;
   13114 
   13115 		printf("%s: Force SMBus first.\n", __func__);
   13116 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   13117 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   13118 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   13119 		delay(50 * 1000);
   13120 
   13121 		reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13122 	}
   13123 	reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13124 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg);
   13125 
   13126 	/* Unforce SMBus mode in MAC */
   13127 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13128 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   13129 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13130 
   13131 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL);
   13132 	reg |= HV_PM_CTRL_K1_ENA;
   13133 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg);
   13134 
   13135 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1);
   13136 	reg &= ~(I218_ULP_CONFIG1_IND
   13137 	    | I218_ULP_CONFIG1_STICKY_ULP
   13138 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   13139 	    | I218_ULP_CONFIG1_WOL_HOST
   13140 	    | I218_ULP_CONFIG1_INBAND_EXIT
   13141 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   13142 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   13143 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   13144 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13145 	reg |= I218_ULP_CONFIG1_START;
   13146 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13147 
   13148 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   13149 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   13150 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   13151 
   13152 	/* Release semaphore */
   13153 	sc->phy.release(sc);
   13154 	wm_gmii_reset(sc);
   13155 	delay(50 * 1000);
   13156 }
   13157 
   13158 /* WOL in the newer chipset interfaces (pchlan) */
   13159 static void
   13160 wm_enable_phy_wakeup(struct wm_softc *sc)
   13161 {
   13162 #if 0
   13163 	uint16_t preg;
   13164 
   13165 	/* Copy MAC RARs to PHY RARs */
   13166 
   13167 	/* Copy MAC MTA to PHY MTA */
   13168 
   13169 	/* Configure PHY Rx Control register */
   13170 
   13171 	/* Enable PHY wakeup in MAC register */
   13172 
   13173 	/* Configure and enable PHY wakeup in PHY registers */
   13174 
   13175 	/* Activate PHY wakeup */
   13176 
   13177 	/* XXX */
   13178 #endif
   13179 }
   13180 
   13181 /* Power down workaround on D3 */
   13182 static void
   13183 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   13184 {
   13185 	uint32_t reg;
   13186 	int i;
   13187 
   13188 	for (i = 0; i < 2; i++) {
   13189 		/* Disable link */
   13190 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13191 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13192 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13193 
   13194 		/*
   13195 		 * Call gig speed drop workaround on Gig disable before
   13196 		 * accessing any PHY registers
   13197 		 */
   13198 		if (sc->sc_type == WM_T_ICH8)
   13199 			wm_gig_downshift_workaround_ich8lan(sc);
   13200 
   13201 		/* Write VR power-down enable */
   13202 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13203 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13204 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   13205 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   13206 
   13207 		/* Read it back and test */
   13208 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13209 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13210 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   13211 			break;
   13212 
   13213 		/* Issue PHY reset and repeat at most one more time */
   13214 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   13215 	}
   13216 }
   13217 
   13218 static void
   13219 wm_enable_wakeup(struct wm_softc *sc)
   13220 {
   13221 	uint32_t reg, pmreg;
   13222 	pcireg_t pmode;
   13223 
   13224 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13225 		device_xname(sc->sc_dev), __func__));
   13226 
   13227 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   13228 		&pmreg, NULL) == 0)
   13229 		return;
   13230 
   13231 	/* Advertise the wakeup capability */
   13232 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   13233 	    | CTRL_SWDPIN(3));
   13234 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   13235 
   13236 	/* ICH workaround */
   13237 	switch (sc->sc_type) {
   13238 	case WM_T_ICH8:
   13239 	case WM_T_ICH9:
   13240 	case WM_T_ICH10:
   13241 	case WM_T_PCH:
   13242 	case WM_T_PCH2:
   13243 	case WM_T_PCH_LPT:
   13244 	case WM_T_PCH_SPT:
   13245 		/* Disable gig during WOL */
   13246 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13247 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   13248 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13249 		if (sc->sc_type == WM_T_PCH)
   13250 			wm_gmii_reset(sc);
   13251 
   13252 		/* Power down workaround */
   13253 		if (sc->sc_phytype == WMPHY_82577) {
   13254 			struct mii_softc *child;
   13255 
   13256 			/* Assume that the PHY is copper */
   13257 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13258 			if ((child != NULL) && (child->mii_mpd_rev <= 2))
   13259 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   13260 				    (768 << 5) | 25, 0x0444); /* magic num */
   13261 		}
   13262 		break;
   13263 	default:
   13264 		break;
   13265 	}
   13266 
   13267 	/* Keep the laser running on fiber adapters */
   13268 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   13269 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   13270 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13271 		reg |= CTRL_EXT_SWDPIN(3);
   13272 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13273 	}
   13274 
   13275 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   13276 #if 0	/* for the multicast packet */
   13277 	reg |= WUFC_MC;
   13278 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   13279 #endif
   13280 
   13281 	if (sc->sc_type >= WM_T_PCH)
   13282 		wm_enable_phy_wakeup(sc);
   13283 	else {
   13284 		CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
   13285 		CSR_WRITE(sc, WMREG_WUFC, reg);
   13286 	}
   13287 
   13288 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13289 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13290 		|| (sc->sc_type == WM_T_PCH2))
   13291 		    && (sc->sc_phytype == WMPHY_IGP_3))
   13292 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   13293 
   13294 	/* Request PME */
   13295 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   13296 #if 0
   13297 	/* Disable WOL */
   13298 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   13299 #else
   13300 	/* For WOL */
   13301 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   13302 #endif
   13303 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   13304 }
   13305 
   13306 /* LPLU */
   13307 
   13308 static void
   13309 wm_lplu_d0_disable(struct wm_softc *sc)
   13310 {
   13311 	uint32_t reg;
   13312 
   13313 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13314 		device_xname(sc->sc_dev), __func__));
   13315 
   13316 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13317 	reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   13318 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13319 }
   13320 
   13321 static void
   13322 wm_lplu_d0_disable_pch(struct wm_softc *sc)
   13323 {
   13324 	uint32_t reg;
   13325 
   13326 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13327 		device_xname(sc->sc_dev), __func__));
   13328 
   13329 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   13330 	reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   13331 	reg |= HV_OEM_BITS_ANEGNOW;
   13332 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   13333 }
   13334 
   13335 /* EEE */
   13336 
   13337 static void
   13338 wm_set_eee_i350(struct wm_softc *sc)
   13339 {
   13340 	uint32_t ipcnfg, eeer;
   13341 
   13342 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   13343 	eeer = CSR_READ(sc, WMREG_EEER);
   13344 
   13345 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   13346 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   13347 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   13348 		    | EEER_LPI_FC);
   13349 	} else {
   13350 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   13351 		ipcnfg &= ~IPCNFG_10BASE_TE;
   13352 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   13353 		    | EEER_LPI_FC);
   13354 	}
   13355 
   13356 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   13357 	CSR_WRITE(sc, WMREG_EEER, eeer);
   13358 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   13359 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   13360 }
   13361 
   13362 /*
   13363  * Workarounds (mainly PHY related).
   13364  * Basically, PHY's workarounds are in the PHY drivers.
   13365  */
   13366 
   13367 /* Work-around for 82566 Kumeran PCS lock loss */
   13368 static void
   13369 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   13370 {
   13371 #if 0
   13372 	int miistatus, active, i;
   13373 	int reg;
   13374 
   13375 	miistatus = sc->sc_mii.mii_media_status;
   13376 
   13377 	/* If the link is not up, do nothing */
   13378 	if ((miistatus & IFM_ACTIVE) == 0)
   13379 		return;
   13380 
   13381 	active = sc->sc_mii.mii_media_active;
   13382 
   13383 	/* Nothing to do if the link is other than 1Gbps */
   13384 	if (IFM_SUBTYPE(active) != IFM_1000_T)
   13385 		return;
   13386 
   13387 	for (i = 0; i < 10; i++) {
   13388 		/* read twice */
   13389 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   13390 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   13391 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   13392 			goto out;	/* GOOD! */
   13393 
   13394 		/* Reset the PHY */
   13395 		wm_gmii_reset(sc);
   13396 		delay(5*1000);
   13397 	}
   13398 
   13399 	/* Disable GigE link negotiation */
   13400 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13401 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13402 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13403 
   13404 	/*
   13405 	 * Call gig speed drop workaround on Gig disable before accessing
   13406 	 * any PHY registers.
   13407 	 */
   13408 	wm_gig_downshift_workaround_ich8lan(sc);
   13409 
   13410 out:
   13411 	return;
   13412 #endif
   13413 }
   13414 
   13415 /* WOL from S5 stops working */
   13416 static void
   13417 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   13418 {
   13419 	uint16_t kmrn_reg;
   13420 
   13421 	/* Only for igp3 */
   13422 	if (sc->sc_phytype == WMPHY_IGP_3) {
   13423 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
   13424 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
   13425 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   13426 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
   13427 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   13428 	}
   13429 }
   13430 
   13431 /*
   13432  * Workaround for pch's PHYs
   13433  * XXX should be moved to new PHY driver?
   13434  */
   13435 static void
   13436 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   13437 {
   13438 
   13439 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13440 		device_xname(sc->sc_dev), __func__));
   13441 	KASSERT(sc->sc_type == WM_T_PCH);
   13442 
   13443 	if (sc->sc_phytype == WMPHY_82577)
   13444 		wm_set_mdio_slow_mode_hv(sc);
   13445 
   13446 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   13447 
   13448 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   13449 
   13450 	/* 82578 */
   13451 	if (sc->sc_phytype == WMPHY_82578) {
   13452 		struct mii_softc *child;
   13453 
   13454 		/*
   13455 		 * Return registers to default by doing a soft reset then
   13456 		 * writing 0x3140 to the control register
   13457 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   13458 		 */
   13459 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13460 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   13461 			PHY_RESET(child);
   13462 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   13463 			    0x3140);
   13464 		}
   13465 	}
   13466 
   13467 	/* Select page 0 */
   13468 	sc->phy.acquire(sc);
   13469 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   13470 	sc->phy.release(sc);
   13471 
   13472 	/*
   13473 	 * Configure the K1 Si workaround during phy reset assuming there is
   13474 	 * link so that it disables K1 if link is in 1Gbps.
   13475 	 */
   13476 	wm_k1_gig_workaround_hv(sc, 1);
   13477 }
   13478 
   13479 static void
   13480 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   13481 {
   13482 
   13483 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13484 		device_xname(sc->sc_dev), __func__));
   13485 	KASSERT(sc->sc_type == WM_T_PCH2);
   13486 
   13487 	wm_set_mdio_slow_mode_hv(sc);
   13488 }
   13489 
   13490 static int
   13491 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   13492 {
   13493 	int k1_enable = sc->sc_nvm_k1_enabled;
   13494 
   13495 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13496 		device_xname(sc->sc_dev), __func__));
   13497 
   13498 	if (sc->phy.acquire(sc) != 0)
   13499 		return -1;
   13500 
   13501 	if (link) {
   13502 		k1_enable = 0;
   13503 
   13504 		/* Link stall fix for link up */
   13505 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   13506 	} else {
   13507 		/* Link stall fix for link down */
   13508 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   13509 	}
   13510 
   13511 	wm_configure_k1_ich8lan(sc, k1_enable);
   13512 	sc->phy.release(sc);
   13513 
   13514 	return 0;
   13515 }
   13516 
   13517 static void
   13518 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   13519 {
   13520 	uint32_t reg;
   13521 
   13522 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   13523 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   13524 	    reg | HV_KMRN_MDIO_SLOW);
   13525 }
   13526 
   13527 static void
   13528 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   13529 {
   13530 	uint32_t ctrl, ctrl_ext, tmp;
   13531 	uint16_t kmrn_reg;
   13532 
   13533 	kmrn_reg = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
   13534 
   13535 	if (k1_enable)
   13536 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
   13537 	else
   13538 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
   13539 
   13540 	wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
   13541 
   13542 	delay(20);
   13543 
   13544 	ctrl = CSR_READ(sc, WMREG_CTRL);
   13545 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   13546 
   13547 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   13548 	tmp |= CTRL_FRCSPD;
   13549 
   13550 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   13551 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   13552 	CSR_WRITE_FLUSH(sc);
   13553 	delay(20);
   13554 
   13555 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   13556 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   13557 	CSR_WRITE_FLUSH(sc);
   13558 	delay(20);
   13559 }
   13560 
   13561 /* special case - for 82575 - need to do manual init ... */
   13562 static void
   13563 wm_reset_init_script_82575(struct wm_softc *sc)
   13564 {
   13565 	/*
   13566 	 * remark: this is untested code - we have no board without EEPROM
   13567 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   13568 	 */
   13569 
   13570 	/* SerDes configuration via SERDESCTRL */
   13571 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   13572 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   13573 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   13574 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   13575 
   13576 	/* CCM configuration via CCMCTL register */
   13577 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   13578 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   13579 
   13580 	/* PCIe lanes configuration */
   13581 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   13582 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   13583 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   13584 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   13585 
   13586 	/* PCIe PLL Configuration */
   13587 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   13588 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   13589 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   13590 }
   13591 
   13592 static void
   13593 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   13594 {
   13595 	uint32_t reg;
   13596 	uint16_t nvmword;
   13597 	int rv;
   13598 
   13599 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   13600 		return;
   13601 
   13602 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   13603 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   13604 	if (rv != 0) {
   13605 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   13606 		    __func__);
   13607 		return;
   13608 	}
   13609 
   13610 	reg = CSR_READ(sc, WMREG_MDICNFG);
   13611 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   13612 		reg |= MDICNFG_DEST;
   13613 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   13614 		reg |= MDICNFG_COM_MDIO;
   13615 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   13616 }
   13617 
   13618 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   13619 
   13620 static bool
   13621 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   13622 {
   13623 	int i;
   13624 	uint32_t reg;
   13625 	uint16_t id1, id2;
   13626 
   13627 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13628 		device_xname(sc->sc_dev), __func__));
   13629 	id1 = id2 = 0xffff;
   13630 	for (i = 0; i < 2; i++) {
   13631 		id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1);
   13632 		if (MII_INVALIDID(id1))
   13633 			continue;
   13634 		id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2);
   13635 		if (MII_INVALIDID(id2))
   13636 			continue;
   13637 		break;
   13638 	}
   13639 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) {
   13640 		goto out;
   13641 	}
   13642 
   13643 	if (sc->sc_type < WM_T_PCH_LPT) {
   13644 		sc->phy.release(sc);
   13645 		wm_set_mdio_slow_mode_hv(sc);
   13646 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
   13647 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
   13648 		sc->phy.acquire(sc);
   13649 	}
   13650 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   13651 		printf("XXX return with false\n");
   13652 		return false;
   13653 	}
   13654 out:
   13655 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   13656 		/* Only unforce SMBus if ME is not active */
   13657 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   13658 			/* Unforce SMBus mode in PHY */
   13659 			reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   13660 			    CV_SMB_CTRL);
   13661 			reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13662 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   13663 			    CV_SMB_CTRL, reg);
   13664 
   13665 			/* Unforce SMBus mode in MAC */
   13666 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13667 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   13668 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13669 		}
   13670 	}
   13671 	return true;
   13672 }
   13673 
   13674 static void
   13675 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   13676 {
   13677 	uint32_t reg;
   13678 	int i;
   13679 
   13680 	/* Set PHY Config Counter to 50msec */
   13681 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   13682 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   13683 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   13684 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   13685 
   13686 	/* Toggle LANPHYPC */
   13687 	reg = CSR_READ(sc, WMREG_CTRL);
   13688 	reg |= CTRL_LANPHYPC_OVERRIDE;
   13689 	reg &= ~CTRL_LANPHYPC_VALUE;
   13690 	CSR_WRITE(sc, WMREG_CTRL, reg);
   13691 	CSR_WRITE_FLUSH(sc);
   13692 	delay(1000);
   13693 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   13694 	CSR_WRITE(sc, WMREG_CTRL, reg);
   13695 	CSR_WRITE_FLUSH(sc);
   13696 
   13697 	if (sc->sc_type < WM_T_PCH_LPT)
   13698 		delay(50 * 1000);
   13699 	else {
   13700 		i = 20;
   13701 
   13702 		do {
   13703 			delay(5 * 1000);
   13704 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   13705 		    && i--);
   13706 
   13707 		delay(30 * 1000);
   13708 	}
   13709 }
   13710 
   13711 static int
   13712 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   13713 {
   13714 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   13715 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   13716 	uint32_t rxa;
   13717 	uint16_t scale = 0, lat_enc = 0;
   13718 	int64_t lat_ns, value;
   13719 
   13720 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13721 		device_xname(sc->sc_dev), __func__));
   13722 
   13723 	if (link) {
   13724 		pcireg_t preg;
   13725 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   13726 
   13727 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   13728 
   13729 		/*
   13730 		 * Determine the maximum latency tolerated by the device.
   13731 		 *
   13732 		 * Per the PCIe spec, the tolerated latencies are encoded as
   13733 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   13734 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   13735 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   13736 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   13737 		 */
   13738 		lat_ns = ((int64_t)rxa * 1024 -
   13739 		    (2 * (int64_t)sc->sc_ethercom.ec_if.if_mtu)) * 8 * 1000;
   13740 		if (lat_ns < 0)
   13741 			lat_ns = 0;
   13742 		else {
   13743 			uint32_t status;
   13744 			uint16_t speed;
   13745 
   13746 			status = CSR_READ(sc, WMREG_STATUS);
   13747 			switch (__SHIFTOUT(status, STATUS_SPEED)) {
   13748 			case STATUS_SPEED_10:
   13749 				speed = 10;
   13750 				break;
   13751 			case STATUS_SPEED_100:
   13752 				speed = 100;
   13753 				break;
   13754 			case STATUS_SPEED_1000:
   13755 				speed = 1000;
   13756 				break;
   13757 			default:
   13758 				printf("%s: Unknown speed (status = %08x)\n",
   13759 				    device_xname(sc->sc_dev), status);
   13760 				return -1;
   13761 			}
   13762 			lat_ns /= speed;
   13763 		}
   13764 		value = lat_ns;
   13765 
   13766 		while (value > LTRV_VALUE) {
   13767 			scale ++;
   13768 			value = howmany(value, __BIT(5));
   13769 		}
   13770 		if (scale > LTRV_SCALE_MAX) {
   13771 			printf("%s: Invalid LTR latency scale %d\n",
   13772 			    device_xname(sc->sc_dev), scale);
   13773 			return -1;
   13774 		}
   13775 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   13776 
   13777 		/* Determine the maximum latency tolerated by the platform */
   13778 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   13779 		    WM_PCI_LTR_CAP_LPT);
   13780 		max_snoop = preg & 0xffff;
   13781 		max_nosnoop = preg >> 16;
   13782 
   13783 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   13784 
   13785 		if (lat_enc > max_ltr_enc) {
   13786 			lat_enc = max_ltr_enc;
   13787 		}
   13788 	}
   13789 	/* Snoop and No-Snoop latencies the same */
   13790 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   13791 	CSR_WRITE(sc, WMREG_LTRV, reg);
   13792 
   13793 	return 0;
   13794 }
   13795 
   13796 /*
   13797  * I210 Errata 25 and I211 Errata 10
   13798  * Slow System Clock.
   13799  */
   13800 static void
   13801 wm_pll_workaround_i210(struct wm_softc *sc)
   13802 {
   13803 	uint32_t mdicnfg, wuc;
   13804 	uint32_t reg;
   13805 	pcireg_t pcireg;
   13806 	uint32_t pmreg;
   13807 	uint16_t nvmword, tmp_nvmword;
   13808 	int phyval;
   13809 	bool wa_done = false;
   13810 	int i;
   13811 
   13812 	/* Save WUC and MDICNFG registers */
   13813 	wuc = CSR_READ(sc, WMREG_WUC);
   13814 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   13815 
   13816 	reg = mdicnfg & ~MDICNFG_DEST;
   13817 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   13818 
   13819 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   13820 		nvmword = INVM_DEFAULT_AL;
   13821 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   13822 
   13823 	/* Get Power Management cap offset */
   13824 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   13825 		&pmreg, NULL) == 0)
   13826 		return;
   13827 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   13828 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   13829 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   13830 
   13831 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   13832 			break; /* OK */
   13833 		}
   13834 
   13835 		wa_done = true;
   13836 		/* Directly reset the internal PHY */
   13837 		reg = CSR_READ(sc, WMREG_CTRL);
   13838 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   13839 
   13840 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13841 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   13842 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13843 
   13844 		CSR_WRITE(sc, WMREG_WUC, 0);
   13845 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   13846 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   13847 
   13848 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   13849 		    pmreg + PCI_PMCSR);
   13850 		pcireg |= PCI_PMCSR_STATE_D3;
   13851 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   13852 		    pmreg + PCI_PMCSR, pcireg);
   13853 		delay(1000);
   13854 		pcireg &= ~PCI_PMCSR_STATE_D3;
   13855 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   13856 		    pmreg + PCI_PMCSR, pcireg);
   13857 
   13858 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   13859 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   13860 
   13861 		/* Restore WUC register */
   13862 		CSR_WRITE(sc, WMREG_WUC, wuc);
   13863 	}
   13864 
   13865 	/* Restore MDICNFG setting */
   13866 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   13867 	if (wa_done)
   13868 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   13869 }
   13870