Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.531
      1 /*	$NetBSD: if_wm.c,v 1.531 2017/07/26 06:48:49 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- TX Multi queue improvement (refine queue selection logic)
     77  *	- Split header buffer for newer descriptors
     78  *	- EEE (Energy Efficiency Ethernet)
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  *	- Image Unique ID
     83  */
     84 
     85 #include <sys/cdefs.h>
     86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.531 2017/07/26 06:48:49 msaitoh Exp $");
     87 
     88 #ifdef _KERNEL_OPT
     89 #include "opt_net_mpsafe.h"
     90 #include "opt_if_wm.h"
     91 #endif
     92 
     93 #include <sys/param.h>
     94 #include <sys/systm.h>
     95 #include <sys/callout.h>
     96 #include <sys/mbuf.h>
     97 #include <sys/malloc.h>
     98 #include <sys/kmem.h>
     99 #include <sys/kernel.h>
    100 #include <sys/socket.h>
    101 #include <sys/ioctl.h>
    102 #include <sys/errno.h>
    103 #include <sys/device.h>
    104 #include <sys/queue.h>
    105 #include <sys/syslog.h>
    106 #include <sys/interrupt.h>
    107 #include <sys/cpu.h>
    108 #include <sys/pcq.h>
    109 
    110 #include <sys/rndsource.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <netinet/in.h>			/* XXX for struct ip */
    120 #include <netinet/in_systm.h>		/* XXX for struct ip */
    121 #include <netinet/ip.h>			/* XXX for struct ip */
    122 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    123 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    124 
    125 #include <sys/bus.h>
    126 #include <sys/intr.h>
    127 #include <machine/endian.h>
    128 
    129 #include <dev/mii/mii.h>
    130 #include <dev/mii/miivar.h>
    131 #include <dev/mii/miidevs.h>
    132 #include <dev/mii/mii_bitbang.h>
    133 #include <dev/mii/ikphyreg.h>
    134 #include <dev/mii/igphyreg.h>
    135 #include <dev/mii/igphyvar.h>
    136 #include <dev/mii/inbmphyreg.h>
    137 #include <dev/mii/ihphyreg.h>
    138 
    139 #include <dev/pci/pcireg.h>
    140 #include <dev/pci/pcivar.h>
    141 #include <dev/pci/pcidevs.h>
    142 
    143 #include <dev/pci/if_wmreg.h>
    144 #include <dev/pci/if_wmvar.h>
    145 
    146 #ifdef WM_DEBUG
    147 #define	WM_DEBUG_LINK		__BIT(0)
    148 #define	WM_DEBUG_TX		__BIT(1)
    149 #define	WM_DEBUG_RX		__BIT(2)
    150 #define	WM_DEBUG_GMII		__BIT(3)
    151 #define	WM_DEBUG_MANAGE		__BIT(4)
    152 #define	WM_DEBUG_NVM		__BIT(5)
    153 #define	WM_DEBUG_INIT		__BIT(6)
    154 #define	WM_DEBUG_LOCK		__BIT(7)
    155 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    156     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    157 
    158 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    159 #else
    160 #define	DPRINTF(x, y)	/* nothing */
    161 #endif /* WM_DEBUG */
    162 
    163 #ifdef NET_MPSAFE
    164 #define WM_MPSAFE	1
    165 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    166 #else
    167 #define CALLOUT_FLAGS	0
    168 #endif
    169 
    170 /*
    171  * This device driver's max interrupt numbers.
    172  */
    173 #define WM_MAX_NQUEUEINTR	16
    174 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    175 
    176 #ifndef WM_DISABLE_MSI
    177 #define	WM_DISABLE_MSI 0
    178 #endif
    179 #ifndef WM_DISABLE_MSIX
    180 #define	WM_DISABLE_MSIX 0
    181 #endif
    182 
    183 int wm_disable_msi = WM_DISABLE_MSI;
    184 int wm_disable_msix = WM_DISABLE_MSIX;
    185 
    186 /*
    187  * Transmit descriptor list size.  Due to errata, we can only have
    188  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    189  * on >= 82544.  We tell the upper layers that they can queue a lot
    190  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    191  * of them at a time.
    192  *
    193  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    194  * chains containing many small mbufs have been observed in zero-copy
    195  * situations with jumbo frames.
    196  */
    197 #define	WM_NTXSEGS		256
    198 #define	WM_IFQUEUELEN		256
    199 #define	WM_TXQUEUELEN_MAX	64
    200 #define	WM_TXQUEUELEN_MAX_82547	16
    201 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    202 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    203 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    204 #define	WM_NTXDESC_82542	256
    205 #define	WM_NTXDESC_82544	4096
    206 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    207 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    208 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    209 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    210 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    211 
    212 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    213 
    214 #define	WM_TXINTERQSIZE		256
    215 
    216 /*
    217  * Receive descriptor list size.  We have one Rx buffer for normal
    218  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    219  * packet.  We allocate 256 receive descriptors, each with a 2k
    220  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    221  */
    222 #define	WM_NRXDESC		256
    223 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    224 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    225 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    226 
    227 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
    228 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
    229 #endif
    230 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
    231 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
    232 #endif
    233 
    234 typedef union txdescs {
    235 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    236 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    237 } txdescs_t;
    238 
    239 typedef union rxdescs {
    240 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
    241 	ext_rxdesc_t      sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
    242 	nq_rxdesc_t      sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
    243 } rxdescs_t;
    244 
    245 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    246 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
    247 
    248 /*
    249  * Software state for transmit jobs.
    250  */
    251 struct wm_txsoft {
    252 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    253 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    254 	int txs_firstdesc;		/* first descriptor in packet */
    255 	int txs_lastdesc;		/* last descriptor in packet */
    256 	int txs_ndesc;			/* # of descriptors used */
    257 };
    258 
    259 /*
    260  * Software state for receive buffers.  Each descriptor gets a
    261  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    262  * more than one buffer, we chain them together.
    263  */
    264 struct wm_rxsoft {
    265 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    266 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    267 };
    268 
    269 #define WM_LINKUP_TIMEOUT	50
    270 
    271 static uint16_t swfwphysem[] = {
    272 	SWFW_PHY0_SM,
    273 	SWFW_PHY1_SM,
    274 	SWFW_PHY2_SM,
    275 	SWFW_PHY3_SM
    276 };
    277 
    278 static const uint32_t wm_82580_rxpbs_table[] = {
    279 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    280 };
    281 
    282 struct wm_softc;
    283 
    284 #ifdef WM_EVENT_COUNTERS
    285 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    286 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    287 	struct evcnt qname##_ev_##evname;
    288 
    289 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    290 	do{								\
    291 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    292 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    293 		    "%s%02d%s", #qname, (qnum), #evname);		\
    294 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    295 		    (evtype), NULL, (xname),				\
    296 		    (q)->qname##_##evname##_evcnt_name);		\
    297 	}while(0)
    298 
    299 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    300 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    301 
    302 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    303 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    304 
    305 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
    306 	evcnt_detach(&(q)->qname##_ev_##evname);
    307 #endif /* WM_EVENT_COUNTERS */
    308 
    309 struct wm_txqueue {
    310 	kmutex_t *txq_lock;		/* lock for tx operations */
    311 
    312 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    313 
    314 	/* Software state for the transmit descriptors. */
    315 	int txq_num;			/* must be a power of two */
    316 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    317 
    318 	/* TX control data structures. */
    319 	int txq_ndesc;			/* must be a power of two */
    320 	size_t txq_descsize;		/* a tx descriptor size */
    321 	txdescs_t *txq_descs_u;
    322         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    323 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    324 	int txq_desc_rseg;		/* real number of control segment */
    325 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    326 #define	txq_descs	txq_descs_u->sctxu_txdescs
    327 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    328 
    329 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    330 
    331 	int txq_free;			/* number of free Tx descriptors */
    332 	int txq_next;			/* next ready Tx descriptor */
    333 
    334 	int txq_sfree;			/* number of free Tx jobs */
    335 	int txq_snext;			/* next free Tx job */
    336 	int txq_sdirty;			/* dirty Tx jobs */
    337 
    338 	/* These 4 variables are used only on the 82547. */
    339 	int txq_fifo_size;		/* Tx FIFO size */
    340 	int txq_fifo_head;		/* current head of FIFO */
    341 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    342 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    343 
    344 	/*
    345 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    346 	 * CPUs. This queue intermediate them without block.
    347 	 */
    348 	pcq_t *txq_interq;
    349 
    350 	/*
    351 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    352 	 * to manage Tx H/W queue's busy flag.
    353 	 */
    354 	int txq_flags;			/* flags for H/W queue, see below */
    355 #define	WM_TXQ_NO_SPACE	0x1
    356 
    357 	bool txq_stopping;
    358 
    359 	uint32_t txq_packets;		/* for AIM */
    360 	uint32_t txq_bytes;		/* for AIM */
    361 #ifdef WM_EVENT_COUNTERS
    362 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
    363 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
    364 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
    365 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
    366 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
    367 						/* XXX not used? */
    368 
    369 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
    370 	WM_Q_EVCNT_DEFINE(txq,txtusum)		/* TCP/UDP cksums comp. out-bound */
    371 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
    372 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
    373 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
    374 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
    375 
    376 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
    377 
    378 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
    379 
    380 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    381 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    382 #endif /* WM_EVENT_COUNTERS */
    383 };
    384 
    385 struct wm_rxqueue {
    386 	kmutex_t *rxq_lock;		/* lock for rx operations */
    387 
    388 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    389 
    390 	/* Software state for the receive descriptors. */
    391 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    392 
    393 	/* RX control data structures. */
    394 	int rxq_ndesc;			/* must be a power of two */
    395 	size_t rxq_descsize;		/* a rx descriptor size */
    396 	rxdescs_t *rxq_descs_u;
    397 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    398 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    399 	int rxq_desc_rseg;		/* real number of control segment */
    400 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    401 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
    402 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
    403 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
    404 
    405 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    406 
    407 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    408 	int rxq_discard;
    409 	int rxq_len;
    410 	struct mbuf *rxq_head;
    411 	struct mbuf *rxq_tail;
    412 	struct mbuf **rxq_tailp;
    413 
    414 	bool rxq_stopping;
    415 
    416 	uint32_t rxq_packets;		/* for AIM */
    417 	uint32_t rxq_bytes;		/* for AIM */
    418 #ifdef WM_EVENT_COUNTERS
    419 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
    420 
    421 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
    422 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
    423 #endif
    424 };
    425 
    426 struct wm_queue {
    427 	int wmq_id;			/* index of transmit and receive queues */
    428 	int wmq_intr_idx;		/* index of MSI-X tables */
    429 
    430 	uint32_t wmq_itr;		/* interrupt interval per queue. */
    431 	bool wmq_set_itr;
    432 
    433 	struct wm_txqueue wmq_txq;
    434 	struct wm_rxqueue wmq_rxq;
    435 
    436 	void *wmq_si;
    437 };
    438 
    439 struct wm_phyop {
    440 	int (*acquire)(struct wm_softc *);
    441 	void (*release)(struct wm_softc *);
    442 	int reset_delay_us;
    443 };
    444 
    445 struct wm_nvmop {
    446 	int (*acquire)(struct wm_softc *);
    447 	void (*release)(struct wm_softc *);
    448 	int (*read)(struct wm_softc *, int, int, uint16_t *);
    449 };
    450 
    451 /*
    452  * Software state per device.
    453  */
    454 struct wm_softc {
    455 	device_t sc_dev;		/* generic device information */
    456 	bus_space_tag_t sc_st;		/* bus space tag */
    457 	bus_space_handle_t sc_sh;	/* bus space handle */
    458 	bus_size_t sc_ss;		/* bus space size */
    459 	bus_space_tag_t sc_iot;		/* I/O space tag */
    460 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    461 	bus_size_t sc_ios;		/* I/O space size */
    462 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    463 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    464 	bus_size_t sc_flashs;		/* flash registers space size */
    465 	off_t sc_flashreg_offset;	/*
    466 					 * offset to flash registers from
    467 					 * start of BAR
    468 					 */
    469 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    470 
    471 	struct ethercom sc_ethercom;	/* ethernet common data */
    472 	struct mii_data sc_mii;		/* MII/media information */
    473 
    474 	pci_chipset_tag_t sc_pc;
    475 	pcitag_t sc_pcitag;
    476 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    477 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    478 
    479 	uint16_t sc_pcidevid;		/* PCI device ID */
    480 	wm_chip_type sc_type;		/* MAC type */
    481 	int sc_rev;			/* MAC revision */
    482 	wm_phy_type sc_phytype;		/* PHY type */
    483 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    484 #define	WM_MEDIATYPE_UNKNOWN		0x00
    485 #define	WM_MEDIATYPE_FIBER		0x01
    486 #define	WM_MEDIATYPE_COPPER		0x02
    487 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    488 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    489 	int sc_flags;			/* flags; see below */
    490 	int sc_if_flags;		/* last if_flags */
    491 	int sc_flowflags;		/* 802.3x flow control flags */
    492 	int sc_align_tweak;
    493 
    494 	void *sc_ihs[WM_MAX_NINTR];	/*
    495 					 * interrupt cookie.
    496 					 * - legacy and msi use sc_ihs[0] only
    497 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
    498 					 */
    499 	pci_intr_handle_t *sc_intrs;	/*
    500 					 * legacy and msi use sc_intrs[0] only
    501 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
    502 					 */
    503 	int sc_nintrs;			/* number of interrupts */
    504 
    505 	int sc_link_intr_idx;		/* index of MSI-X tables */
    506 
    507 	callout_t sc_tick_ch;		/* tick callout */
    508 	bool sc_core_stopping;
    509 
    510 	int sc_nvm_ver_major;
    511 	int sc_nvm_ver_minor;
    512 	int sc_nvm_ver_build;
    513 	int sc_nvm_addrbits;		/* NVM address bits */
    514 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    515 	int sc_ich8_flash_base;
    516 	int sc_ich8_flash_bank_size;
    517 	int sc_nvm_k1_enabled;
    518 
    519 	int sc_nqueues;
    520 	struct wm_queue *sc_queue;
    521 	u_int sc_rx_process_limit;	/* Rx processing repeat limit in softint */
    522 	u_int sc_rx_intr_process_limit;	/* Rx processing repeat limit in H/W intr */
    523 
    524 	int sc_affinity_offset;
    525 
    526 #ifdef WM_EVENT_COUNTERS
    527 	/* Event counters. */
    528 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    529 
    530         /* WM_T_82542_2_1 only */
    531 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    532 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    533 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    534 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    535 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    536 #endif /* WM_EVENT_COUNTERS */
    537 
    538 	/* This variable are used only on the 82547. */
    539 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    540 
    541 	uint32_t sc_ctrl;		/* prototype CTRL register */
    542 #if 0
    543 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    544 #endif
    545 	uint32_t sc_icr;		/* prototype interrupt bits */
    546 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
    547 	uint32_t sc_tctl;		/* prototype TCTL register */
    548 	uint32_t sc_rctl;		/* prototype RCTL register */
    549 	uint32_t sc_txcw;		/* prototype TXCW register */
    550 	uint32_t sc_tipg;		/* prototype TIPG register */
    551 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    552 	uint32_t sc_pba;		/* prototype PBA register */
    553 
    554 	int sc_tbi_linkup;		/* TBI link status */
    555 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    556 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    557 
    558 	int sc_mchash_type;		/* multicast filter offset */
    559 
    560 	krndsource_t rnd_source;	/* random source */
    561 
    562 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    563 
    564 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    565 	kmutex_t *sc_ich_phymtx;	/*
    566 					 * 82574/82583/ICH/PCH specific PHY
    567 					 * mutex. For 82574/82583, the mutex
    568 					 * is used for both PHY and NVM.
    569 					 */
    570 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    571 
    572 	struct wm_phyop phy;
    573 	struct wm_nvmop nvm;
    574 };
    575 
    576 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    577 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    578 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    579 
    580 #define	WM_RXCHAIN_RESET(rxq)						\
    581 do {									\
    582 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    583 	*(rxq)->rxq_tailp = NULL;					\
    584 	(rxq)->rxq_len = 0;						\
    585 } while (/*CONSTCOND*/0)
    586 
    587 #define	WM_RXCHAIN_LINK(rxq, m)						\
    588 do {									\
    589 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    590 	(rxq)->rxq_tailp = &(m)->m_next;				\
    591 } while (/*CONSTCOND*/0)
    592 
    593 #ifdef WM_EVENT_COUNTERS
    594 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    595 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    596 
    597 #define WM_Q_EVCNT_INCR(qname, evname)			\
    598 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    599 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    600 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    601 #else /* !WM_EVENT_COUNTERS */
    602 #define	WM_EVCNT_INCR(ev)	/* nothing */
    603 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    604 
    605 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    606 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    607 #endif /* !WM_EVENT_COUNTERS */
    608 
    609 #define	CSR_READ(sc, reg)						\
    610 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    611 #define	CSR_WRITE(sc, reg, val)						\
    612 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    613 #define	CSR_WRITE_FLUSH(sc)						\
    614 	(void) CSR_READ((sc), WMREG_STATUS)
    615 
    616 #define ICH8_FLASH_READ32(sc, reg)					\
    617 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    618 	    (reg) + sc->sc_flashreg_offset)
    619 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    620 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    621 	    (reg) + sc->sc_flashreg_offset, (data))
    622 
    623 #define ICH8_FLASH_READ16(sc, reg)					\
    624 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    625 	    (reg) + sc->sc_flashreg_offset)
    626 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    627 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    628 	    (reg) + sc->sc_flashreg_offset, (data))
    629 
    630 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    631 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
    632 
    633 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    634 #define	WM_CDTXADDR_HI(txq, x)						\
    635 	(sizeof(bus_addr_t) == 8 ?					\
    636 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    637 
    638 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    639 #define	WM_CDRXADDR_HI(rxq, x)						\
    640 	(sizeof(bus_addr_t) == 8 ?					\
    641 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    642 
    643 /*
    644  * Register read/write functions.
    645  * Other than CSR_{READ|WRITE}().
    646  */
    647 #if 0
    648 static inline uint32_t wm_io_read(struct wm_softc *, int);
    649 #endif
    650 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    651 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    652 	uint32_t, uint32_t);
    653 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    654 
    655 /*
    656  * Descriptor sync/init functions.
    657  */
    658 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    659 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    660 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    661 
    662 /*
    663  * Device driver interface functions and commonly used functions.
    664  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    665  */
    666 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    667 static int	wm_match(device_t, cfdata_t, void *);
    668 static void	wm_attach(device_t, device_t, void *);
    669 static int	wm_detach(device_t, int);
    670 static bool	wm_suspend(device_t, const pmf_qual_t *);
    671 static bool	wm_resume(device_t, const pmf_qual_t *);
    672 static void	wm_watchdog(struct ifnet *);
    673 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
    674 static void	wm_tick(void *);
    675 static int	wm_ifflags_cb(struct ethercom *);
    676 static int	wm_ioctl(struct ifnet *, u_long, void *);
    677 /* MAC address related */
    678 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    679 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    680 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    681 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    682 static void	wm_set_filter(struct wm_softc *);
    683 /* Reset and init related */
    684 static void	wm_set_vlan(struct wm_softc *);
    685 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    686 static void	wm_get_auto_rd_done(struct wm_softc *);
    687 static void	wm_lan_init_done(struct wm_softc *);
    688 static void	wm_get_cfg_done(struct wm_softc *);
    689 static void	wm_phy_post_reset(struct wm_softc *);
    690 static void	wm_write_smbus_addr(struct wm_softc *);
    691 static void	wm_init_lcd_from_nvm(struct wm_softc *);
    692 static void	wm_initialize_hardware_bits(struct wm_softc *);
    693 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    694 static void	wm_reset_phy(struct wm_softc *);
    695 static void	wm_flush_desc_rings(struct wm_softc *);
    696 static void	wm_reset(struct wm_softc *);
    697 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    698 static void	wm_rxdrain(struct wm_rxqueue *);
    699 static void	wm_rss_getkey(uint8_t *);
    700 static void	wm_init_rss(struct wm_softc *);
    701 static void	wm_adjust_qnum(struct wm_softc *, int);
    702 static inline bool	wm_is_using_msix(struct wm_softc *);
    703 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
    704 static int	wm_softint_establish(struct wm_softc *, int, int);
    705 static int	wm_setup_legacy(struct wm_softc *);
    706 static int	wm_setup_msix(struct wm_softc *);
    707 static int	wm_init(struct ifnet *);
    708 static int	wm_init_locked(struct ifnet *);
    709 static void	wm_turnon(struct wm_softc *);
    710 static void	wm_turnoff(struct wm_softc *);
    711 static void	wm_stop(struct ifnet *, int);
    712 static void	wm_stop_locked(struct ifnet *, int);
    713 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    714 static void	wm_82547_txfifo_stall(void *);
    715 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    716 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
    717 /* DMA related */
    718 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    719 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    720 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    721 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    722     struct wm_txqueue *);
    723 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    724 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    725 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    726     struct wm_rxqueue *);
    727 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    728 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    729 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    730 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    731 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    732 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    733 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    734     struct wm_txqueue *);
    735 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    736     struct wm_rxqueue *);
    737 static int	wm_alloc_txrx_queues(struct wm_softc *);
    738 static void	wm_free_txrx_queues(struct wm_softc *);
    739 static int	wm_init_txrx_queues(struct wm_softc *);
    740 /* Start */
    741 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
    742     struct wm_txsoft *, uint32_t *, uint8_t *);
    743 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
    744 static void	wm_start(struct ifnet *);
    745 static void	wm_start_locked(struct ifnet *);
    746 static int	wm_transmit(struct ifnet *, struct mbuf *);
    747 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
    748 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    749 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    750     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    751 static void	wm_nq_start(struct ifnet *);
    752 static void	wm_nq_start_locked(struct ifnet *);
    753 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    754 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    755 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    756 static void	wm_deferred_start_locked(struct wm_txqueue *);
    757 static void	wm_handle_queue(void *);
    758 /* Interrupt */
    759 static int	wm_txeof(struct wm_softc *, struct wm_txqueue *);
    760 static void	wm_rxeof(struct wm_rxqueue *, u_int);
    761 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    762 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    763 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    764 static void	wm_linkintr(struct wm_softc *, uint32_t);
    765 static int	wm_intr_legacy(void *);
    766 static inline void	wm_txrxintr_disable(struct wm_queue *);
    767 static inline void	wm_txrxintr_enable(struct wm_queue *);
    768 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
    769 static int	wm_txrxintr_msix(void *);
    770 static int	wm_linkintr_msix(void *);
    771 
    772 /*
    773  * Media related.
    774  * GMII, SGMII, TBI, SERDES and SFP.
    775  */
    776 /* Common */
    777 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    778 /* GMII related */
    779 static void	wm_gmii_reset(struct wm_softc *);
    780 static void	wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t, uint16_t);
    781 static int	wm_get_phy_id_82575(struct wm_softc *);
    782 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    783 static int	wm_gmii_mediachange(struct ifnet *);
    784 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    785 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    786 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    787 static int	wm_gmii_i82543_readreg(device_t, int, int);
    788 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    789 static int	wm_gmii_mdic_readreg(device_t, int, int);
    790 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    791 static int	wm_gmii_i82544_readreg(device_t, int, int);
    792 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    793 static int	wm_gmii_i80003_readreg(device_t, int, int);
    794 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    795 static int	wm_gmii_bm_readreg(device_t, int, int);
    796 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    797 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    798 static int	wm_gmii_hv_readreg(device_t, int, int);
    799 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
    800 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    801 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
    802 static int	wm_gmii_82580_readreg(device_t, int, int);
    803 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    804 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    805 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    806 static void	wm_gmii_statchg(struct ifnet *);
    807 /*
    808  * kumeran related (80003, ICH* and PCH*).
    809  * These functions are not for accessing MII registers but for accessing
    810  * kumeran specific registers.
    811  */
    812 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
    813 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
    814 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
    815 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
    816 /* SGMII */
    817 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    818 static int	wm_sgmii_readreg(device_t, int, int);
    819 static void	wm_sgmii_writereg(device_t, int, int, int);
    820 /* TBI related */
    821 static void	wm_tbi_mediainit(struct wm_softc *);
    822 static int	wm_tbi_mediachange(struct ifnet *);
    823 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    824 static int	wm_check_for_link(struct wm_softc *);
    825 static void	wm_tbi_tick(struct wm_softc *);
    826 /* SERDES related */
    827 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    828 static int	wm_serdes_mediachange(struct ifnet *);
    829 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    830 static void	wm_serdes_tick(struct wm_softc *);
    831 /* SFP related */
    832 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    833 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    834 
    835 /*
    836  * NVM related.
    837  * Microwire, SPI (w/wo EERD) and Flash.
    838  */
    839 /* Misc functions */
    840 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    841 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    842 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    843 /* Microwire */
    844 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    845 /* SPI */
    846 static int	wm_nvm_ready_spi(struct wm_softc *);
    847 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    848 /* Using with EERD */
    849 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    850 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    851 /* Flash */
    852 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    853     unsigned int *);
    854 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    855 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    856 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    857 	uint32_t *);
    858 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    859 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    860 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    861 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    862 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    863 /* iNVM */
    864 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    865 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    866 /* Lock, detecting NVM type, validate checksum and read */
    867 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    868 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    869 static int	wm_nvm_validate_checksum(struct wm_softc *);
    870 static void	wm_nvm_version_invm(struct wm_softc *);
    871 static void	wm_nvm_version(struct wm_softc *);
    872 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    873 
    874 /*
    875  * Hardware semaphores.
    876  * Very complexed...
    877  */
    878 static int	wm_get_null(struct wm_softc *);
    879 static void	wm_put_null(struct wm_softc *);
    880 static int	wm_get_eecd(struct wm_softc *);
    881 static void	wm_put_eecd(struct wm_softc *);
    882 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    883 static void	wm_put_swsm_semaphore(struct wm_softc *);
    884 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    885 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    886 static int	wm_get_nvm_80003(struct wm_softc *);
    887 static void	wm_put_nvm_80003(struct wm_softc *);
    888 static int	wm_get_nvm_82571(struct wm_softc *);
    889 static void	wm_put_nvm_82571(struct wm_softc *);
    890 static int	wm_get_phy_82575(struct wm_softc *);
    891 static void	wm_put_phy_82575(struct wm_softc *);
    892 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    893 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    894 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    895 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    896 static int	wm_get_nvm_ich8lan(struct wm_softc *);
    897 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    898 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    899 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    900 
    901 /*
    902  * Management mode and power management related subroutines.
    903  * BMC, AMT, suspend/resume and EEE.
    904  */
    905 #if 0
    906 static int	wm_check_mng_mode(struct wm_softc *);
    907 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    908 static int	wm_check_mng_mode_82574(struct wm_softc *);
    909 static int	wm_check_mng_mode_generic(struct wm_softc *);
    910 #endif
    911 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    912 static bool	wm_phy_resetisblocked(struct wm_softc *);
    913 static void	wm_get_hw_control(struct wm_softc *);
    914 static void	wm_release_hw_control(struct wm_softc *);
    915 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    916 static void	wm_smbustopci(struct wm_softc *);
    917 static void	wm_init_manageability(struct wm_softc *);
    918 static void	wm_release_manageability(struct wm_softc *);
    919 static void	wm_get_wakeup(struct wm_softc *);
    920 static void	wm_ulp_disable(struct wm_softc *);
    921 static void	wm_enable_phy_wakeup(struct wm_softc *);
    922 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    923 static void	wm_enable_wakeup(struct wm_softc *);
    924 /* LPLU (Low Power Link Up) */
    925 static void	wm_lplu_d0_disable(struct wm_softc *);
    926 /* EEE */
    927 static void	wm_set_eee_i350(struct wm_softc *);
    928 
    929 /*
    930  * Workarounds (mainly PHY related).
    931  * Basically, PHY's workarounds are in the PHY drivers.
    932  */
    933 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    934 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    935 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    936 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    937 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    938 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    939 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    940 static void	wm_reset_init_script_82575(struct wm_softc *);
    941 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    942 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
    943 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
    944 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
    945 static void	wm_pll_workaround_i210(struct wm_softc *);
    946 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
    947 
    948 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    949     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    950 
    951 /*
    952  * Devices supported by this driver.
    953  */
    954 static const struct wm_product {
    955 	pci_vendor_id_t		wmp_vendor;
    956 	pci_product_id_t	wmp_product;
    957 	const char		*wmp_name;
    958 	wm_chip_type		wmp_type;
    959 	uint32_t		wmp_flags;
    960 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    961 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    962 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    963 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    964 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    965 } wm_products[] = {
    966 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    967 	  "Intel i82542 1000BASE-X Ethernet",
    968 	  WM_T_82542_2_1,	WMP_F_FIBER },
    969 
    970 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    971 	  "Intel i82543GC 1000BASE-X Ethernet",
    972 	  WM_T_82543,		WMP_F_FIBER },
    973 
    974 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    975 	  "Intel i82543GC 1000BASE-T Ethernet",
    976 	  WM_T_82543,		WMP_F_COPPER },
    977 
    978 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    979 	  "Intel i82544EI 1000BASE-T Ethernet",
    980 	  WM_T_82544,		WMP_F_COPPER },
    981 
    982 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    983 	  "Intel i82544EI 1000BASE-X Ethernet",
    984 	  WM_T_82544,		WMP_F_FIBER },
    985 
    986 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    987 	  "Intel i82544GC 1000BASE-T Ethernet",
    988 	  WM_T_82544,		WMP_F_COPPER },
    989 
    990 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    991 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    992 	  WM_T_82544,		WMP_F_COPPER },
    993 
    994 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    995 	  "Intel i82540EM 1000BASE-T Ethernet",
    996 	  WM_T_82540,		WMP_F_COPPER },
    997 
    998 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    999 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
   1000 	  WM_T_82540,		WMP_F_COPPER },
   1001 
   1002 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
   1003 	  "Intel i82540EP 1000BASE-T Ethernet",
   1004 	  WM_T_82540,		WMP_F_COPPER },
   1005 
   1006 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
   1007 	  "Intel i82540EP 1000BASE-T Ethernet",
   1008 	  WM_T_82540,		WMP_F_COPPER },
   1009 
   1010 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
   1011 	  "Intel i82540EP 1000BASE-T Ethernet",
   1012 	  WM_T_82540,		WMP_F_COPPER },
   1013 
   1014 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
   1015 	  "Intel i82545EM 1000BASE-T Ethernet",
   1016 	  WM_T_82545,		WMP_F_COPPER },
   1017 
   1018 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
   1019 	  "Intel i82545GM 1000BASE-T Ethernet",
   1020 	  WM_T_82545_3,		WMP_F_COPPER },
   1021 
   1022 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
   1023 	  "Intel i82545GM 1000BASE-X Ethernet",
   1024 	  WM_T_82545_3,		WMP_F_FIBER },
   1025 
   1026 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
   1027 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
   1028 	  WM_T_82545_3,		WMP_F_SERDES },
   1029 
   1030 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
   1031 	  "Intel i82546EB 1000BASE-T Ethernet",
   1032 	  WM_T_82546,		WMP_F_COPPER },
   1033 
   1034 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
   1035 	  "Intel i82546EB 1000BASE-T Ethernet",
   1036 	  WM_T_82546,		WMP_F_COPPER },
   1037 
   1038 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
   1039 	  "Intel i82545EM 1000BASE-X Ethernet",
   1040 	  WM_T_82545,		WMP_F_FIBER },
   1041 
   1042 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
   1043 	  "Intel i82546EB 1000BASE-X Ethernet",
   1044 	  WM_T_82546,		WMP_F_FIBER },
   1045 
   1046 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
   1047 	  "Intel i82546GB 1000BASE-T Ethernet",
   1048 	  WM_T_82546_3,		WMP_F_COPPER },
   1049 
   1050 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
   1051 	  "Intel i82546GB 1000BASE-X Ethernet",
   1052 	  WM_T_82546_3,		WMP_F_FIBER },
   1053 
   1054 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
   1055 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
   1056 	  WM_T_82546_3,		WMP_F_SERDES },
   1057 
   1058 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
   1059 	  "i82546GB quad-port Gigabit Ethernet",
   1060 	  WM_T_82546_3,		WMP_F_COPPER },
   1061 
   1062 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
   1063 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
   1064 	  WM_T_82546_3,		WMP_F_COPPER },
   1065 
   1066 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
   1067 	  "Intel PRO/1000MT (82546GB)",
   1068 	  WM_T_82546_3,		WMP_F_COPPER },
   1069 
   1070 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
   1071 	  "Intel i82541EI 1000BASE-T Ethernet",
   1072 	  WM_T_82541,		WMP_F_COPPER },
   1073 
   1074 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
   1075 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
   1076 	  WM_T_82541,		WMP_F_COPPER },
   1077 
   1078 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
   1079 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1080 	  WM_T_82541,		WMP_F_COPPER },
   1081 
   1082 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1083 	  "Intel i82541ER 1000BASE-T Ethernet",
   1084 	  WM_T_82541_2,		WMP_F_COPPER },
   1085 
   1086 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1087 	  "Intel i82541GI 1000BASE-T Ethernet",
   1088 	  WM_T_82541_2,		WMP_F_COPPER },
   1089 
   1090 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1091 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1092 	  WM_T_82541_2,		WMP_F_COPPER },
   1093 
   1094 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1095 	  "Intel i82541PI 1000BASE-T Ethernet",
   1096 	  WM_T_82541_2,		WMP_F_COPPER },
   1097 
   1098 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1099 	  "Intel i82547EI 1000BASE-T Ethernet",
   1100 	  WM_T_82547,		WMP_F_COPPER },
   1101 
   1102 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1103 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1104 	  WM_T_82547,		WMP_F_COPPER },
   1105 
   1106 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1107 	  "Intel i82547GI 1000BASE-T Ethernet",
   1108 	  WM_T_82547_2,		WMP_F_COPPER },
   1109 
   1110 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1111 	  "Intel PRO/1000 PT (82571EB)",
   1112 	  WM_T_82571,		WMP_F_COPPER },
   1113 
   1114 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1115 	  "Intel PRO/1000 PF (82571EB)",
   1116 	  WM_T_82571,		WMP_F_FIBER },
   1117 
   1118 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1119 	  "Intel PRO/1000 PB (82571EB)",
   1120 	  WM_T_82571,		WMP_F_SERDES },
   1121 
   1122 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1123 	  "Intel PRO/1000 QT (82571EB)",
   1124 	  WM_T_82571,		WMP_F_COPPER },
   1125 
   1126 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1127 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1128 	  WM_T_82571,		WMP_F_COPPER, },
   1129 
   1130 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1131 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1132 	  WM_T_82571,		WMP_F_COPPER, },
   1133 
   1134 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1135 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1136 	  WM_T_82571,		WMP_F_SERDES, },
   1137 
   1138 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1139 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1140 	  WM_T_82571,		WMP_F_SERDES, },
   1141 
   1142 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1143 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1144 	  WM_T_82571,		WMP_F_FIBER, },
   1145 
   1146 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1147 	  "Intel i82572EI 1000baseT Ethernet",
   1148 	  WM_T_82572,		WMP_F_COPPER },
   1149 
   1150 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1151 	  "Intel i82572EI 1000baseX Ethernet",
   1152 	  WM_T_82572,		WMP_F_FIBER },
   1153 
   1154 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1155 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1156 	  WM_T_82572,		WMP_F_SERDES },
   1157 
   1158 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1159 	  "Intel i82572EI 1000baseT Ethernet",
   1160 	  WM_T_82572,		WMP_F_COPPER },
   1161 
   1162 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1163 	  "Intel i82573E",
   1164 	  WM_T_82573,		WMP_F_COPPER },
   1165 
   1166 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1167 	  "Intel i82573E IAMT",
   1168 	  WM_T_82573,		WMP_F_COPPER },
   1169 
   1170 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1171 	  "Intel i82573L Gigabit Ethernet",
   1172 	  WM_T_82573,		WMP_F_COPPER },
   1173 
   1174 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1175 	  "Intel i82574L",
   1176 	  WM_T_82574,		WMP_F_COPPER },
   1177 
   1178 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1179 	  "Intel i82574L",
   1180 	  WM_T_82574,		WMP_F_COPPER },
   1181 
   1182 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1183 	  "Intel i82583V",
   1184 	  WM_T_82583,		WMP_F_COPPER },
   1185 
   1186 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1187 	  "i80003 dual 1000baseT Ethernet",
   1188 	  WM_T_80003,		WMP_F_COPPER },
   1189 
   1190 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1191 	  "i80003 dual 1000baseX Ethernet",
   1192 	  WM_T_80003,		WMP_F_COPPER },
   1193 
   1194 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1195 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1196 	  WM_T_80003,		WMP_F_SERDES },
   1197 
   1198 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1199 	  "Intel i80003 1000baseT Ethernet",
   1200 	  WM_T_80003,		WMP_F_COPPER },
   1201 
   1202 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1203 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1204 	  WM_T_80003,		WMP_F_SERDES },
   1205 
   1206 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1207 	  "Intel i82801H (M_AMT) LAN Controller",
   1208 	  WM_T_ICH8,		WMP_F_COPPER },
   1209 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1210 	  "Intel i82801H (AMT) LAN Controller",
   1211 	  WM_T_ICH8,		WMP_F_COPPER },
   1212 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1213 	  "Intel i82801H LAN Controller",
   1214 	  WM_T_ICH8,		WMP_F_COPPER },
   1215 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1216 	  "Intel i82801H (IFE) 10/100 LAN Controller",
   1217 	  WM_T_ICH8,		WMP_F_COPPER },
   1218 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1219 	  "Intel i82801H (M) LAN Controller",
   1220 	  WM_T_ICH8,		WMP_F_COPPER },
   1221 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1222 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
   1223 	  WM_T_ICH8,		WMP_F_COPPER },
   1224 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1225 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
   1226 	  WM_T_ICH8,		WMP_F_COPPER },
   1227 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1228 	  "82567V-3 LAN Controller",
   1229 	  WM_T_ICH8,		WMP_F_COPPER },
   1230 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1231 	  "82801I (AMT) LAN Controller",
   1232 	  WM_T_ICH9,		WMP_F_COPPER },
   1233 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1234 	  "82801I 10/100 LAN Controller",
   1235 	  WM_T_ICH9,		WMP_F_COPPER },
   1236 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1237 	  "82801I (G) 10/100 LAN Controller",
   1238 	  WM_T_ICH9,		WMP_F_COPPER },
   1239 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1240 	  "82801I (GT) 10/100 LAN Controller",
   1241 	  WM_T_ICH9,		WMP_F_COPPER },
   1242 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1243 	  "82801I (C) LAN Controller",
   1244 	  WM_T_ICH9,		WMP_F_COPPER },
   1245 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1246 	  "82801I mobile LAN Controller",
   1247 	  WM_T_ICH9,		WMP_F_COPPER },
   1248 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
   1249 	  "82801I mobile (V) LAN Controller",
   1250 	  WM_T_ICH9,		WMP_F_COPPER },
   1251 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1252 	  "82801I mobile (AMT) LAN Controller",
   1253 	  WM_T_ICH9,		WMP_F_COPPER },
   1254 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1255 	  "82567LM-4 LAN Controller",
   1256 	  WM_T_ICH9,		WMP_F_COPPER },
   1257 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1258 	  "82567LM-2 LAN Controller",
   1259 	  WM_T_ICH10,		WMP_F_COPPER },
   1260 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1261 	  "82567LF-2 LAN Controller",
   1262 	  WM_T_ICH10,		WMP_F_COPPER },
   1263 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1264 	  "82567LM-3 LAN Controller",
   1265 	  WM_T_ICH10,		WMP_F_COPPER },
   1266 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1267 	  "82567LF-3 LAN Controller",
   1268 	  WM_T_ICH10,		WMP_F_COPPER },
   1269 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1270 	  "82567V-2 LAN Controller",
   1271 	  WM_T_ICH10,		WMP_F_COPPER },
   1272 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1273 	  "82567V-3? LAN Controller",
   1274 	  WM_T_ICH10,		WMP_F_COPPER },
   1275 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1276 	  "HANKSVILLE LAN Controller",
   1277 	  WM_T_ICH10,		WMP_F_COPPER },
   1278 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1279 	  "PCH LAN (82577LM) Controller",
   1280 	  WM_T_PCH,		WMP_F_COPPER },
   1281 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1282 	  "PCH LAN (82577LC) Controller",
   1283 	  WM_T_PCH,		WMP_F_COPPER },
   1284 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1285 	  "PCH LAN (82578DM) Controller",
   1286 	  WM_T_PCH,		WMP_F_COPPER },
   1287 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1288 	  "PCH LAN (82578DC) Controller",
   1289 	  WM_T_PCH,		WMP_F_COPPER },
   1290 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1291 	  "PCH2 LAN (82579LM) Controller",
   1292 	  WM_T_PCH2,		WMP_F_COPPER },
   1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1294 	  "PCH2 LAN (82579V) Controller",
   1295 	  WM_T_PCH2,		WMP_F_COPPER },
   1296 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1297 	  "82575EB dual-1000baseT Ethernet",
   1298 	  WM_T_82575,		WMP_F_COPPER },
   1299 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1300 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1301 	  WM_T_82575,		WMP_F_SERDES },
   1302 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1303 	  "82575GB quad-1000baseT Ethernet",
   1304 	  WM_T_82575,		WMP_F_COPPER },
   1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1306 	  "82575GB quad-1000baseT Ethernet (PM)",
   1307 	  WM_T_82575,		WMP_F_COPPER },
   1308 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1309 	  "82576 1000BaseT Ethernet",
   1310 	  WM_T_82576,		WMP_F_COPPER },
   1311 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1312 	  "82576 1000BaseX Ethernet",
   1313 	  WM_T_82576,		WMP_F_FIBER },
   1314 
   1315 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1316 	  "82576 gigabit Ethernet (SERDES)",
   1317 	  WM_T_82576,		WMP_F_SERDES },
   1318 
   1319 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1320 	  "82576 quad-1000BaseT Ethernet",
   1321 	  WM_T_82576,		WMP_F_COPPER },
   1322 
   1323 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1324 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1325 	  WM_T_82576,		WMP_F_COPPER },
   1326 
   1327 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1328 	  "82576 gigabit Ethernet",
   1329 	  WM_T_82576,		WMP_F_COPPER },
   1330 
   1331 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1332 	  "82576 gigabit Ethernet (SERDES)",
   1333 	  WM_T_82576,		WMP_F_SERDES },
   1334 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1335 	  "82576 quad-gigabit Ethernet (SERDES)",
   1336 	  WM_T_82576,		WMP_F_SERDES },
   1337 
   1338 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1339 	  "82580 1000BaseT Ethernet",
   1340 	  WM_T_82580,		WMP_F_COPPER },
   1341 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1342 	  "82580 1000BaseX Ethernet",
   1343 	  WM_T_82580,		WMP_F_FIBER },
   1344 
   1345 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1346 	  "82580 1000BaseT Ethernet (SERDES)",
   1347 	  WM_T_82580,		WMP_F_SERDES },
   1348 
   1349 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1350 	  "82580 gigabit Ethernet (SGMII)",
   1351 	  WM_T_82580,		WMP_F_COPPER },
   1352 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1353 	  "82580 dual-1000BaseT Ethernet",
   1354 	  WM_T_82580,		WMP_F_COPPER },
   1355 
   1356 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1357 	  "82580 quad-1000BaseX Ethernet",
   1358 	  WM_T_82580,		WMP_F_FIBER },
   1359 
   1360 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1361 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1362 	  WM_T_82580,		WMP_F_COPPER },
   1363 
   1364 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1365 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1366 	  WM_T_82580,		WMP_F_SERDES },
   1367 
   1368 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1369 	  "DH89XXCC 1000BASE-KX Ethernet",
   1370 	  WM_T_82580,		WMP_F_SERDES },
   1371 
   1372 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1373 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1374 	  WM_T_82580,		WMP_F_SERDES },
   1375 
   1376 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1377 	  "I350 Gigabit Network Connection",
   1378 	  WM_T_I350,		WMP_F_COPPER },
   1379 
   1380 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1381 	  "I350 Gigabit Fiber Network Connection",
   1382 	  WM_T_I350,		WMP_F_FIBER },
   1383 
   1384 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1385 	  "I350 Gigabit Backplane Connection",
   1386 	  WM_T_I350,		WMP_F_SERDES },
   1387 
   1388 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1389 	  "I350 Quad Port Gigabit Ethernet",
   1390 	  WM_T_I350,		WMP_F_SERDES },
   1391 
   1392 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1393 	  "I350 Gigabit Connection",
   1394 	  WM_T_I350,		WMP_F_COPPER },
   1395 
   1396 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1397 	  "I354 Gigabit Ethernet (KX)",
   1398 	  WM_T_I354,		WMP_F_SERDES },
   1399 
   1400 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1401 	  "I354 Gigabit Ethernet (SGMII)",
   1402 	  WM_T_I354,		WMP_F_COPPER },
   1403 
   1404 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1405 	  "I354 Gigabit Ethernet (2.5G)",
   1406 	  WM_T_I354,		WMP_F_COPPER },
   1407 
   1408 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1409 	  "I210-T1 Ethernet Server Adapter",
   1410 	  WM_T_I210,		WMP_F_COPPER },
   1411 
   1412 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1413 	  "I210 Ethernet (Copper OEM)",
   1414 	  WM_T_I210,		WMP_F_COPPER },
   1415 
   1416 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1417 	  "I210 Ethernet (Copper IT)",
   1418 	  WM_T_I210,		WMP_F_COPPER },
   1419 
   1420 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1421 	  "I210 Ethernet (FLASH less)",
   1422 	  WM_T_I210,		WMP_F_COPPER },
   1423 
   1424 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1425 	  "I210 Gigabit Ethernet (Fiber)",
   1426 	  WM_T_I210,		WMP_F_FIBER },
   1427 
   1428 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1429 	  "I210 Gigabit Ethernet (SERDES)",
   1430 	  WM_T_I210,		WMP_F_SERDES },
   1431 
   1432 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1433 	  "I210 Gigabit Ethernet (FLASH less)",
   1434 	  WM_T_I210,		WMP_F_SERDES },
   1435 
   1436 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1437 	  "I210 Gigabit Ethernet (SGMII)",
   1438 	  WM_T_I210,		WMP_F_COPPER },
   1439 
   1440 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1441 	  "I211 Ethernet (COPPER)",
   1442 	  WM_T_I211,		WMP_F_COPPER },
   1443 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1444 	  "I217 V Ethernet Connection",
   1445 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1446 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1447 	  "I217 LM Ethernet Connection",
   1448 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1449 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1450 	  "I218 V Ethernet Connection",
   1451 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1452 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1453 	  "I218 V Ethernet Connection",
   1454 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1455 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1456 	  "I218 V Ethernet Connection",
   1457 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1458 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1459 	  "I218 LM Ethernet Connection",
   1460 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1461 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1462 	  "I218 LM Ethernet Connection",
   1463 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1464 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1465 	  "I218 LM Ethernet Connection",
   1466 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1467 #if 0
   1468 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1469 	  "I219 V Ethernet Connection",
   1470 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1471 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1472 	  "I219 V Ethernet Connection",
   1473 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1474 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1475 	  "I219 V Ethernet Connection",
   1476 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1477 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1478 	  "I219 V Ethernet Connection",
   1479 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1480 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1481 	  "I219 LM Ethernet Connection",
   1482 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1483 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1484 	  "I219 LM Ethernet Connection",
   1485 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1486 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1487 	  "I219 LM Ethernet Connection",
   1488 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1489 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1490 	  "I219 LM Ethernet Connection",
   1491 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1492 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1493 	  "I219 LM Ethernet Connection",
   1494 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1495 #endif
   1496 	{ 0,			0,
   1497 	  NULL,
   1498 	  0,			0 },
   1499 };
   1500 
   1501 /*
   1502  * Register read/write functions.
   1503  * Other than CSR_{READ|WRITE}().
   1504  */
   1505 
   1506 #if 0 /* Not currently used */
   1507 static inline uint32_t
   1508 wm_io_read(struct wm_softc *sc, int reg)
   1509 {
   1510 
   1511 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1512 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1513 }
   1514 #endif
   1515 
   1516 static inline void
   1517 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1518 {
   1519 
   1520 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1521 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1522 }
   1523 
   1524 static inline void
   1525 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1526     uint32_t data)
   1527 {
   1528 	uint32_t regval;
   1529 	int i;
   1530 
   1531 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1532 
   1533 	CSR_WRITE(sc, reg, regval);
   1534 
   1535 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1536 		delay(5);
   1537 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1538 			break;
   1539 	}
   1540 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1541 		aprint_error("%s: WARNING:"
   1542 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1543 		    device_xname(sc->sc_dev), reg);
   1544 	}
   1545 }
   1546 
   1547 static inline void
   1548 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1549 {
   1550 	wa->wa_low = htole32(v & 0xffffffffU);
   1551 	if (sizeof(bus_addr_t) == 8)
   1552 		wa->wa_high = htole32((uint64_t) v >> 32);
   1553 	else
   1554 		wa->wa_high = 0;
   1555 }
   1556 
   1557 /*
   1558  * Descriptor sync/init functions.
   1559  */
   1560 static inline void
   1561 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1562 {
   1563 	struct wm_softc *sc = txq->txq_sc;
   1564 
   1565 	/* If it will wrap around, sync to the end of the ring. */
   1566 	if ((start + num) > WM_NTXDESC(txq)) {
   1567 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1568 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1569 		    (WM_NTXDESC(txq) - start), ops);
   1570 		num -= (WM_NTXDESC(txq) - start);
   1571 		start = 0;
   1572 	}
   1573 
   1574 	/* Now sync whatever is left. */
   1575 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1576 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1577 }
   1578 
   1579 static inline void
   1580 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1581 {
   1582 	struct wm_softc *sc = rxq->rxq_sc;
   1583 
   1584 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1585 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
   1586 }
   1587 
   1588 static inline void
   1589 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1590 {
   1591 	struct wm_softc *sc = rxq->rxq_sc;
   1592 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1593 	struct mbuf *m = rxs->rxs_mbuf;
   1594 
   1595 	/*
   1596 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1597 	 * so that the payload after the Ethernet header is aligned
   1598 	 * to a 4-byte boundary.
   1599 
   1600 	 * XXX BRAINDAMAGE ALERT!
   1601 	 * The stupid chip uses the same size for every buffer, which
   1602 	 * is set in the Receive Control register.  We are using the 2K
   1603 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1604 	 * reason, we can't "scoot" packets longer than the standard
   1605 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1606 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1607 	 * the upper layer copy the headers.
   1608 	 */
   1609 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1610 
   1611 	if (sc->sc_type == WM_T_82574) {
   1612 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
   1613 		rxd->erx_data.erxd_addr =
   1614 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1615 		rxd->erx_data.erxd_dd = 0;
   1616 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   1617 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
   1618 
   1619 		rxd->nqrx_data.nrxd_paddr =
   1620 			htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1621 		/* Currently, split header is not supported. */
   1622 		rxd->nqrx_data.nrxd_haddr = 0;
   1623 	} else {
   1624 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1625 
   1626 		wm_set_dma_addr(&rxd->wrx_addr,
   1627 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1628 		rxd->wrx_len = 0;
   1629 		rxd->wrx_cksum = 0;
   1630 		rxd->wrx_status = 0;
   1631 		rxd->wrx_errors = 0;
   1632 		rxd->wrx_special = 0;
   1633 	}
   1634 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1635 
   1636 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1637 }
   1638 
   1639 /*
   1640  * Device driver interface functions and commonly used functions.
   1641  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1642  */
   1643 
   1644 /* Lookup supported device table */
   1645 static const struct wm_product *
   1646 wm_lookup(const struct pci_attach_args *pa)
   1647 {
   1648 	const struct wm_product *wmp;
   1649 
   1650 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1651 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1652 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1653 			return wmp;
   1654 	}
   1655 	return NULL;
   1656 }
   1657 
   1658 /* The match function (ca_match) */
   1659 static int
   1660 wm_match(device_t parent, cfdata_t cf, void *aux)
   1661 {
   1662 	struct pci_attach_args *pa = aux;
   1663 
   1664 	if (wm_lookup(pa) != NULL)
   1665 		return 1;
   1666 
   1667 	return 0;
   1668 }
   1669 
   1670 /* The attach function (ca_attach) */
   1671 static void
   1672 wm_attach(device_t parent, device_t self, void *aux)
   1673 {
   1674 	struct wm_softc *sc = device_private(self);
   1675 	struct pci_attach_args *pa = aux;
   1676 	prop_dictionary_t dict;
   1677 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1678 	pci_chipset_tag_t pc = pa->pa_pc;
   1679 	int counts[PCI_INTR_TYPE_SIZE];
   1680 	pci_intr_type_t max_type;
   1681 	const char *eetype, *xname;
   1682 	bus_space_tag_t memt;
   1683 	bus_space_handle_t memh;
   1684 	bus_size_t memsize;
   1685 	int memh_valid;
   1686 	int i, error;
   1687 	const struct wm_product *wmp;
   1688 	prop_data_t ea;
   1689 	prop_number_t pn;
   1690 	uint8_t enaddr[ETHER_ADDR_LEN];
   1691 	char buf[256];
   1692 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1693 	pcireg_t preg, memtype;
   1694 	uint16_t eeprom_data, apme_mask;
   1695 	bool force_clear_smbi;
   1696 	uint32_t link_mode;
   1697 	uint32_t reg;
   1698 
   1699 	sc->sc_dev = self;
   1700 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1701 	sc->sc_core_stopping = false;
   1702 
   1703 	wmp = wm_lookup(pa);
   1704 #ifdef DIAGNOSTIC
   1705 	if (wmp == NULL) {
   1706 		printf("\n");
   1707 		panic("wm_attach: impossible");
   1708 	}
   1709 #endif
   1710 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1711 
   1712 	sc->sc_pc = pa->pa_pc;
   1713 	sc->sc_pcitag = pa->pa_tag;
   1714 
   1715 	if (pci_dma64_available(pa))
   1716 		sc->sc_dmat = pa->pa_dmat64;
   1717 	else
   1718 		sc->sc_dmat = pa->pa_dmat;
   1719 
   1720 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1721 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1722 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1723 
   1724 	sc->sc_type = wmp->wmp_type;
   1725 
   1726 	/* Set default function pointers */
   1727 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
   1728 	sc->phy.release = sc->nvm.release = wm_put_null;
   1729 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
   1730 
   1731 	if (sc->sc_type < WM_T_82543) {
   1732 		if (sc->sc_rev < 2) {
   1733 			aprint_error_dev(sc->sc_dev,
   1734 			    "i82542 must be at least rev. 2\n");
   1735 			return;
   1736 		}
   1737 		if (sc->sc_rev < 3)
   1738 			sc->sc_type = WM_T_82542_2_0;
   1739 	}
   1740 
   1741 	/*
   1742 	 * Disable MSI for Errata:
   1743 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1744 	 *
   1745 	 *  82544: Errata 25
   1746 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1747 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1748 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1749 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1750 	 *
   1751 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1752 	 *
   1753 	 *  82571 & 82572: Errata 63
   1754 	 */
   1755 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1756 	    || (sc->sc_type == WM_T_82572))
   1757 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1758 
   1759 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1760 	    || (sc->sc_type == WM_T_82580)
   1761 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1762 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1763 		sc->sc_flags |= WM_F_NEWQUEUE;
   1764 
   1765 	/* Set device properties (mactype) */
   1766 	dict = device_properties(sc->sc_dev);
   1767 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1768 
   1769 	/*
   1770 	 * Map the device.  All devices support memory-mapped acccess,
   1771 	 * and it is really required for normal operation.
   1772 	 */
   1773 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1774 	switch (memtype) {
   1775 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1776 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1777 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1778 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1779 		break;
   1780 	default:
   1781 		memh_valid = 0;
   1782 		break;
   1783 	}
   1784 
   1785 	if (memh_valid) {
   1786 		sc->sc_st = memt;
   1787 		sc->sc_sh = memh;
   1788 		sc->sc_ss = memsize;
   1789 	} else {
   1790 		aprint_error_dev(sc->sc_dev,
   1791 		    "unable to map device registers\n");
   1792 		return;
   1793 	}
   1794 
   1795 	/*
   1796 	 * In addition, i82544 and later support I/O mapped indirect
   1797 	 * register access.  It is not desirable (nor supported in
   1798 	 * this driver) to use it for normal operation, though it is
   1799 	 * required to work around bugs in some chip versions.
   1800 	 */
   1801 	if (sc->sc_type >= WM_T_82544) {
   1802 		/* First we have to find the I/O BAR. */
   1803 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1804 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1805 			if (memtype == PCI_MAPREG_TYPE_IO)
   1806 				break;
   1807 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1808 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1809 				i += 4;	/* skip high bits, too */
   1810 		}
   1811 		if (i < PCI_MAPREG_END) {
   1812 			/*
   1813 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1814 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1815 			 * It's no problem because newer chips has no this
   1816 			 * bug.
   1817 			 *
   1818 			 * The i8254x doesn't apparently respond when the
   1819 			 * I/O BAR is 0, which looks somewhat like it's not
   1820 			 * been configured.
   1821 			 */
   1822 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1823 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1824 				aprint_error_dev(sc->sc_dev,
   1825 				    "WARNING: I/O BAR at zero.\n");
   1826 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1827 					0, &sc->sc_iot, &sc->sc_ioh,
   1828 					NULL, &sc->sc_ios) == 0) {
   1829 				sc->sc_flags |= WM_F_IOH_VALID;
   1830 			} else {
   1831 				aprint_error_dev(sc->sc_dev,
   1832 				    "WARNING: unable to map I/O space\n");
   1833 			}
   1834 		}
   1835 
   1836 	}
   1837 
   1838 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1839 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1840 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1841 	if (sc->sc_type < WM_T_82542_2_1)
   1842 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1843 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1844 
   1845 	/* power up chip */
   1846 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1847 	    NULL)) && error != EOPNOTSUPP) {
   1848 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1849 		return;
   1850 	}
   1851 
   1852 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1853 
   1854 	/* Allocation settings */
   1855 	max_type = PCI_INTR_TYPE_MSIX;
   1856 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1;
   1857 	counts[PCI_INTR_TYPE_MSI] = 1;
   1858 	counts[PCI_INTR_TYPE_INTX] = 1;
   1859 	/* overridden by disable flags */
   1860 	if (wm_disable_msi != 0) {
   1861 		counts[PCI_INTR_TYPE_MSI] = 0;
   1862 		if (wm_disable_msix != 0) {
   1863 			max_type = PCI_INTR_TYPE_INTX;
   1864 			counts[PCI_INTR_TYPE_MSIX] = 0;
   1865 		}
   1866 	} else if (wm_disable_msix != 0) {
   1867 		max_type = PCI_INTR_TYPE_MSI;
   1868 		counts[PCI_INTR_TYPE_MSIX] = 0;
   1869 	}
   1870 
   1871 alloc_retry:
   1872 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1873 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1874 		return;
   1875 	}
   1876 
   1877 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1878 		error = wm_setup_msix(sc);
   1879 		if (error) {
   1880 			pci_intr_release(pc, sc->sc_intrs,
   1881 			    counts[PCI_INTR_TYPE_MSIX]);
   1882 
   1883 			/* Setup for MSI: Disable MSI-X */
   1884 			max_type = PCI_INTR_TYPE_MSI;
   1885 			counts[PCI_INTR_TYPE_MSI] = 1;
   1886 			counts[PCI_INTR_TYPE_INTX] = 1;
   1887 			goto alloc_retry;
   1888 		}
   1889 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1890 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1891 		error = wm_setup_legacy(sc);
   1892 		if (error) {
   1893 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1894 			    counts[PCI_INTR_TYPE_MSI]);
   1895 
   1896 			/* The next try is for INTx: Disable MSI */
   1897 			max_type = PCI_INTR_TYPE_INTX;
   1898 			counts[PCI_INTR_TYPE_INTX] = 1;
   1899 			goto alloc_retry;
   1900 		}
   1901 	} else {
   1902 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1903 		error = wm_setup_legacy(sc);
   1904 		if (error) {
   1905 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1906 			    counts[PCI_INTR_TYPE_INTX]);
   1907 			return;
   1908 		}
   1909 	}
   1910 
   1911 	/*
   1912 	 * Check the function ID (unit number of the chip).
   1913 	 */
   1914 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1915 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1916 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1917 	    || (sc->sc_type == WM_T_82580)
   1918 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1919 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1920 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1921 	else
   1922 		sc->sc_funcid = 0;
   1923 
   1924 	/*
   1925 	 * Determine a few things about the bus we're connected to.
   1926 	 */
   1927 	if (sc->sc_type < WM_T_82543) {
   1928 		/* We don't really know the bus characteristics here. */
   1929 		sc->sc_bus_speed = 33;
   1930 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1931 		/*
   1932 		 * CSA (Communication Streaming Architecture) is about as fast
   1933 		 * a 32-bit 66MHz PCI Bus.
   1934 		 */
   1935 		sc->sc_flags |= WM_F_CSA;
   1936 		sc->sc_bus_speed = 66;
   1937 		aprint_verbose_dev(sc->sc_dev,
   1938 		    "Communication Streaming Architecture\n");
   1939 		if (sc->sc_type == WM_T_82547) {
   1940 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1941 			callout_setfunc(&sc->sc_txfifo_ch,
   1942 					wm_82547_txfifo_stall, sc);
   1943 			aprint_verbose_dev(sc->sc_dev,
   1944 			    "using 82547 Tx FIFO stall work-around\n");
   1945 		}
   1946 	} else if (sc->sc_type >= WM_T_82571) {
   1947 		sc->sc_flags |= WM_F_PCIE;
   1948 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1949 		    && (sc->sc_type != WM_T_ICH10)
   1950 		    && (sc->sc_type != WM_T_PCH)
   1951 		    && (sc->sc_type != WM_T_PCH2)
   1952 		    && (sc->sc_type != WM_T_PCH_LPT)
   1953 		    && (sc->sc_type != WM_T_PCH_SPT)) {
   1954 			/* ICH* and PCH* have no PCIe capability registers */
   1955 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1956 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1957 				NULL) == 0)
   1958 				aprint_error_dev(sc->sc_dev,
   1959 				    "unable to find PCIe capability\n");
   1960 		}
   1961 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1962 	} else {
   1963 		reg = CSR_READ(sc, WMREG_STATUS);
   1964 		if (reg & STATUS_BUS64)
   1965 			sc->sc_flags |= WM_F_BUS64;
   1966 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1967 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1968 
   1969 			sc->sc_flags |= WM_F_PCIX;
   1970 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1971 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1972 				aprint_error_dev(sc->sc_dev,
   1973 				    "unable to find PCIX capability\n");
   1974 			else if (sc->sc_type != WM_T_82545_3 &&
   1975 				 sc->sc_type != WM_T_82546_3) {
   1976 				/*
   1977 				 * Work around a problem caused by the BIOS
   1978 				 * setting the max memory read byte count
   1979 				 * incorrectly.
   1980 				 */
   1981 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1982 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1983 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1984 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1985 
   1986 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1987 				    PCIX_CMD_BYTECNT_SHIFT;
   1988 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1989 				    PCIX_STATUS_MAXB_SHIFT;
   1990 				if (bytecnt > maxb) {
   1991 					aprint_verbose_dev(sc->sc_dev,
   1992 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1993 					    512 << bytecnt, 512 << maxb);
   1994 					pcix_cmd = (pcix_cmd &
   1995 					    ~PCIX_CMD_BYTECNT_MASK) |
   1996 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1997 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1998 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1999 					    pcix_cmd);
   2000 				}
   2001 			}
   2002 		}
   2003 		/*
   2004 		 * The quad port adapter is special; it has a PCIX-PCIX
   2005 		 * bridge on the board, and can run the secondary bus at
   2006 		 * a higher speed.
   2007 		 */
   2008 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   2009 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   2010 								      : 66;
   2011 		} else if (sc->sc_flags & WM_F_PCIX) {
   2012 			switch (reg & STATUS_PCIXSPD_MASK) {
   2013 			case STATUS_PCIXSPD_50_66:
   2014 				sc->sc_bus_speed = 66;
   2015 				break;
   2016 			case STATUS_PCIXSPD_66_100:
   2017 				sc->sc_bus_speed = 100;
   2018 				break;
   2019 			case STATUS_PCIXSPD_100_133:
   2020 				sc->sc_bus_speed = 133;
   2021 				break;
   2022 			default:
   2023 				aprint_error_dev(sc->sc_dev,
   2024 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   2025 				    reg & STATUS_PCIXSPD_MASK);
   2026 				sc->sc_bus_speed = 66;
   2027 				break;
   2028 			}
   2029 		} else
   2030 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   2031 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   2032 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   2033 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   2034 	}
   2035 
   2036 	/* clear interesting stat counters */
   2037 	CSR_READ(sc, WMREG_COLC);
   2038 	CSR_READ(sc, WMREG_RXERRC);
   2039 
   2040 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   2041 	    || (sc->sc_type >= WM_T_ICH8))
   2042 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2043 	if (sc->sc_type >= WM_T_ICH8)
   2044 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2045 
   2046 	/* Set PHY, NVM mutex related stuff */
   2047 	switch (sc->sc_type) {
   2048 	case WM_T_82542_2_0:
   2049 	case WM_T_82542_2_1:
   2050 	case WM_T_82543:
   2051 	case WM_T_82544:
   2052 		/* Microwire */
   2053 		sc->nvm.read = wm_nvm_read_uwire;
   2054 		sc->sc_nvm_wordsize = 64;
   2055 		sc->sc_nvm_addrbits = 6;
   2056 		break;
   2057 	case WM_T_82540:
   2058 	case WM_T_82545:
   2059 	case WM_T_82545_3:
   2060 	case WM_T_82546:
   2061 	case WM_T_82546_3:
   2062 		/* Microwire */
   2063 		sc->nvm.read = wm_nvm_read_uwire;
   2064 		reg = CSR_READ(sc, WMREG_EECD);
   2065 		if (reg & EECD_EE_SIZE) {
   2066 			sc->sc_nvm_wordsize = 256;
   2067 			sc->sc_nvm_addrbits = 8;
   2068 		} else {
   2069 			sc->sc_nvm_wordsize = 64;
   2070 			sc->sc_nvm_addrbits = 6;
   2071 		}
   2072 		sc->sc_flags |= WM_F_LOCK_EECD;
   2073 		sc->nvm.acquire = wm_get_eecd;
   2074 		sc->nvm.release = wm_put_eecd;
   2075 		break;
   2076 	case WM_T_82541:
   2077 	case WM_T_82541_2:
   2078 	case WM_T_82547:
   2079 	case WM_T_82547_2:
   2080 		reg = CSR_READ(sc, WMREG_EECD);
   2081 		if (reg & EECD_EE_TYPE) {
   2082 			/* SPI */
   2083 			sc->nvm.read = wm_nvm_read_spi;
   2084 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2085 			wm_nvm_set_addrbits_size_eecd(sc);
   2086 		} else {
   2087 			/* Microwire */
   2088 			sc->nvm.read = wm_nvm_read_uwire;
   2089 			if ((reg & EECD_EE_ABITS) != 0) {
   2090 				sc->sc_nvm_wordsize = 256;
   2091 				sc->sc_nvm_addrbits = 8;
   2092 			} else {
   2093 				sc->sc_nvm_wordsize = 64;
   2094 				sc->sc_nvm_addrbits = 6;
   2095 			}
   2096 		}
   2097 		sc->sc_flags |= WM_F_LOCK_EECD;
   2098 		sc->nvm.acquire = wm_get_eecd;
   2099 		sc->nvm.release = wm_put_eecd;
   2100 		break;
   2101 	case WM_T_82571:
   2102 	case WM_T_82572:
   2103 		/* SPI */
   2104 		sc->nvm.read = wm_nvm_read_eerd;
   2105 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2106 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2107 		wm_nvm_set_addrbits_size_eecd(sc);
   2108 		sc->phy.acquire = wm_get_swsm_semaphore;
   2109 		sc->phy.release = wm_put_swsm_semaphore;
   2110 		sc->nvm.acquire = wm_get_nvm_82571;
   2111 		sc->nvm.release = wm_put_nvm_82571;
   2112 		break;
   2113 	case WM_T_82573:
   2114 	case WM_T_82574:
   2115 	case WM_T_82583:
   2116 		sc->nvm.read = wm_nvm_read_eerd;
   2117 		/* Not use WM_F_LOCK_EECD because we use EERD */
   2118 		if (sc->sc_type == WM_T_82573) {
   2119 			sc->phy.acquire = wm_get_swsm_semaphore;
   2120 			sc->phy.release = wm_put_swsm_semaphore;
   2121 			sc->nvm.acquire = wm_get_nvm_82571;
   2122 			sc->nvm.release = wm_put_nvm_82571;
   2123 		} else {
   2124 			/* Both PHY and NVM use the same semaphore. */
   2125 			sc->phy.acquire = sc->nvm.acquire
   2126 			    = wm_get_swfwhw_semaphore;
   2127 			sc->phy.release = sc->nvm.release
   2128 			    = wm_put_swfwhw_semaphore;
   2129 		}
   2130 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2131 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2132 			sc->sc_nvm_wordsize = 2048;
   2133 		} else {
   2134 			/* SPI */
   2135 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2136 			wm_nvm_set_addrbits_size_eecd(sc);
   2137 		}
   2138 		break;
   2139 	case WM_T_82575:
   2140 	case WM_T_82576:
   2141 	case WM_T_82580:
   2142 	case WM_T_I350:
   2143 	case WM_T_I354:
   2144 	case WM_T_80003:
   2145 		/* SPI */
   2146 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2147 		wm_nvm_set_addrbits_size_eecd(sc);
   2148 		if((sc->sc_type == WM_T_80003)
   2149 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
   2150 			sc->nvm.read = wm_nvm_read_eerd;
   2151 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2152 		} else {
   2153 			sc->nvm.read = wm_nvm_read_spi;
   2154 			sc->sc_flags |= WM_F_LOCK_EECD;
   2155 		}
   2156 		sc->phy.acquire = wm_get_phy_82575;
   2157 		sc->phy.release = wm_put_phy_82575;
   2158 		sc->nvm.acquire = wm_get_nvm_80003;
   2159 		sc->nvm.release = wm_put_nvm_80003;
   2160 		break;
   2161 	case WM_T_ICH8:
   2162 	case WM_T_ICH9:
   2163 	case WM_T_ICH10:
   2164 	case WM_T_PCH:
   2165 	case WM_T_PCH2:
   2166 	case WM_T_PCH_LPT:
   2167 		sc->nvm.read = wm_nvm_read_ich8;
   2168 		/* FLASH */
   2169 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2170 		sc->sc_nvm_wordsize = 2048;
   2171 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2172 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2173 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2174 			aprint_error_dev(sc->sc_dev,
   2175 			    "can't map FLASH registers\n");
   2176 			goto out;
   2177 		}
   2178 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2179 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2180 		    ICH_FLASH_SECTOR_SIZE;
   2181 		sc->sc_ich8_flash_bank_size =
   2182 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2183 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2184 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2185 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2186 		sc->sc_flashreg_offset = 0;
   2187 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2188 		sc->phy.release = wm_put_swflag_ich8lan;
   2189 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2190 		sc->nvm.release = wm_put_nvm_ich8lan;
   2191 		break;
   2192 	case WM_T_PCH_SPT:
   2193 		sc->nvm.read = wm_nvm_read_spt;
   2194 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2195 		sc->sc_flags |= WM_F_EEPROM_FLASH;
   2196 		sc->sc_flasht = sc->sc_st;
   2197 		sc->sc_flashh = sc->sc_sh;
   2198 		sc->sc_ich8_flash_base = 0;
   2199 		sc->sc_nvm_wordsize =
   2200 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2201 			* NVM_SIZE_MULTIPLIER;
   2202 		/* It is size in bytes, we want words */
   2203 		sc->sc_nvm_wordsize /= 2;
   2204 		/* assume 2 banks */
   2205 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2206 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2207 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2208 		sc->phy.release = wm_put_swflag_ich8lan;
   2209 		sc->nvm.acquire = wm_get_nvm_ich8lan;
   2210 		sc->nvm.release = wm_put_nvm_ich8lan;
   2211 		break;
   2212 	case WM_T_I210:
   2213 	case WM_T_I211:
   2214 		if (wm_nvm_get_flash_presence_i210(sc)) {
   2215 			sc->nvm.read = wm_nvm_read_eerd;
   2216 			/* Don't use WM_F_LOCK_EECD because we use EERD */
   2217 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2218 			wm_nvm_set_addrbits_size_eecd(sc);
   2219 		} else {
   2220 			sc->nvm.read = wm_nvm_read_invm;
   2221 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2222 			sc->sc_nvm_wordsize = INVM_SIZE;
   2223 		}
   2224 		sc->phy.acquire = wm_get_phy_82575;
   2225 		sc->phy.release = wm_put_phy_82575;
   2226 		sc->nvm.acquire = wm_get_nvm_80003;
   2227 		sc->nvm.release = wm_put_nvm_80003;
   2228 		break;
   2229 	default:
   2230 		break;
   2231 	}
   2232 
   2233 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2234 	switch (sc->sc_type) {
   2235 	case WM_T_82571:
   2236 	case WM_T_82572:
   2237 		reg = CSR_READ(sc, WMREG_SWSM2);
   2238 		if ((reg & SWSM2_LOCK) == 0) {
   2239 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2240 			force_clear_smbi = true;
   2241 		} else
   2242 			force_clear_smbi = false;
   2243 		break;
   2244 	case WM_T_82573:
   2245 	case WM_T_82574:
   2246 	case WM_T_82583:
   2247 		force_clear_smbi = true;
   2248 		break;
   2249 	default:
   2250 		force_clear_smbi = false;
   2251 		break;
   2252 	}
   2253 	if (force_clear_smbi) {
   2254 		reg = CSR_READ(sc, WMREG_SWSM);
   2255 		if ((reg & SWSM_SMBI) != 0)
   2256 			aprint_error_dev(sc->sc_dev,
   2257 			    "Please update the Bootagent\n");
   2258 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2259 	}
   2260 
   2261 	/*
   2262 	 * Defer printing the EEPROM type until after verifying the checksum
   2263 	 * This allows the EEPROM type to be printed correctly in the case
   2264 	 * that no EEPROM is attached.
   2265 	 */
   2266 	/*
   2267 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2268 	 * this for later, so we can fail future reads from the EEPROM.
   2269 	 */
   2270 	if (wm_nvm_validate_checksum(sc)) {
   2271 		/*
   2272 		 * Read twice again because some PCI-e parts fail the
   2273 		 * first check due to the link being in sleep state.
   2274 		 */
   2275 		if (wm_nvm_validate_checksum(sc))
   2276 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2277 	}
   2278 
   2279 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2280 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2281 	else {
   2282 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2283 		    sc->sc_nvm_wordsize);
   2284 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2285 			aprint_verbose("iNVM");
   2286 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2287 			aprint_verbose("FLASH(HW)");
   2288 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2289 			aprint_verbose("FLASH");
   2290 		else {
   2291 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2292 				eetype = "SPI";
   2293 			else
   2294 				eetype = "MicroWire";
   2295 			aprint_verbose("(%d address bits) %s EEPROM",
   2296 			    sc->sc_nvm_addrbits, eetype);
   2297 		}
   2298 	}
   2299 	wm_nvm_version(sc);
   2300 	aprint_verbose("\n");
   2301 
   2302 	/*
   2303 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
   2304 	 * incorrect.
   2305 	 */
   2306 	wm_gmii_setup_phytype(sc, 0, 0);
   2307 
   2308 	/* Reset the chip to a known state. */
   2309 	wm_reset(sc);
   2310 
   2311 	/* Check for I21[01] PLL workaround */
   2312 	if (sc->sc_type == WM_T_I210)
   2313 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2314 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2315 		/* NVM image release 3.25 has a workaround */
   2316 		if ((sc->sc_nvm_ver_major < 3)
   2317 		    || ((sc->sc_nvm_ver_major == 3)
   2318 			&& (sc->sc_nvm_ver_minor < 25))) {
   2319 			aprint_verbose_dev(sc->sc_dev,
   2320 			    "ROM image version %d.%d is older than 3.25\n",
   2321 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2322 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2323 		}
   2324 	}
   2325 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2326 		wm_pll_workaround_i210(sc);
   2327 
   2328 	wm_get_wakeup(sc);
   2329 
   2330 	/* Non-AMT based hardware can now take control from firmware */
   2331 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2332 		wm_get_hw_control(sc);
   2333 
   2334 	/*
   2335 	 * Read the Ethernet address from the EEPROM, if not first found
   2336 	 * in device properties.
   2337 	 */
   2338 	ea = prop_dictionary_get(dict, "mac-address");
   2339 	if (ea != NULL) {
   2340 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2341 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2342 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2343 	} else {
   2344 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2345 			aprint_error_dev(sc->sc_dev,
   2346 			    "unable to read Ethernet address\n");
   2347 			goto out;
   2348 		}
   2349 	}
   2350 
   2351 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2352 	    ether_sprintf(enaddr));
   2353 
   2354 	/*
   2355 	 * Read the config info from the EEPROM, and set up various
   2356 	 * bits in the control registers based on their contents.
   2357 	 */
   2358 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2359 	if (pn != NULL) {
   2360 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2361 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2362 	} else {
   2363 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2364 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2365 			goto out;
   2366 		}
   2367 	}
   2368 
   2369 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2370 	if (pn != NULL) {
   2371 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2372 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2373 	} else {
   2374 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2375 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2376 			goto out;
   2377 		}
   2378 	}
   2379 
   2380 	/* check for WM_F_WOL */
   2381 	switch (sc->sc_type) {
   2382 	case WM_T_82542_2_0:
   2383 	case WM_T_82542_2_1:
   2384 	case WM_T_82543:
   2385 		/* dummy? */
   2386 		eeprom_data = 0;
   2387 		apme_mask = NVM_CFG3_APME;
   2388 		break;
   2389 	case WM_T_82544:
   2390 		apme_mask = NVM_CFG2_82544_APM_EN;
   2391 		eeprom_data = cfg2;
   2392 		break;
   2393 	case WM_T_82546:
   2394 	case WM_T_82546_3:
   2395 	case WM_T_82571:
   2396 	case WM_T_82572:
   2397 	case WM_T_82573:
   2398 	case WM_T_82574:
   2399 	case WM_T_82583:
   2400 	case WM_T_80003:
   2401 	default:
   2402 		apme_mask = NVM_CFG3_APME;
   2403 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2404 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2405 		break;
   2406 	case WM_T_82575:
   2407 	case WM_T_82576:
   2408 	case WM_T_82580:
   2409 	case WM_T_I350:
   2410 	case WM_T_I354: /* XXX ok? */
   2411 	case WM_T_ICH8:
   2412 	case WM_T_ICH9:
   2413 	case WM_T_ICH10:
   2414 	case WM_T_PCH:
   2415 	case WM_T_PCH2:
   2416 	case WM_T_PCH_LPT:
   2417 	case WM_T_PCH_SPT:
   2418 		/* XXX The funcid should be checked on some devices */
   2419 		apme_mask = WUC_APME;
   2420 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2421 		break;
   2422 	}
   2423 
   2424 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2425 	if ((eeprom_data & apme_mask) != 0)
   2426 		sc->sc_flags |= WM_F_WOL;
   2427 
   2428 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2429 		/* Check NVM for autonegotiation */
   2430 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2431 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2432 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2433 		}
   2434 	}
   2435 
   2436 	/*
   2437 	 * XXX need special handling for some multiple port cards
   2438 	 * to disable a paticular port.
   2439 	 */
   2440 
   2441 	if (sc->sc_type >= WM_T_82544) {
   2442 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2443 		if (pn != NULL) {
   2444 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2445 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2446 		} else {
   2447 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2448 				aprint_error_dev(sc->sc_dev,
   2449 				    "unable to read SWDPIN\n");
   2450 				goto out;
   2451 			}
   2452 		}
   2453 	}
   2454 
   2455 	if (cfg1 & NVM_CFG1_ILOS)
   2456 		sc->sc_ctrl |= CTRL_ILOS;
   2457 
   2458 	/*
   2459 	 * XXX
   2460 	 * This code isn't correct because pin 2 and 3 are located
   2461 	 * in different position on newer chips. Check all datasheet.
   2462 	 *
   2463 	 * Until resolve this problem, check if a chip < 82580
   2464 	 */
   2465 	if (sc->sc_type <= WM_T_82580) {
   2466 		if (sc->sc_type >= WM_T_82544) {
   2467 			sc->sc_ctrl |=
   2468 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2469 			    CTRL_SWDPIO_SHIFT;
   2470 			sc->sc_ctrl |=
   2471 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2472 			    CTRL_SWDPINS_SHIFT;
   2473 		} else {
   2474 			sc->sc_ctrl |=
   2475 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2476 			    CTRL_SWDPIO_SHIFT;
   2477 		}
   2478 	}
   2479 
   2480 	/* XXX For other than 82580? */
   2481 	if (sc->sc_type == WM_T_82580) {
   2482 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2483 		if (nvmword & __BIT(13))
   2484 			sc->sc_ctrl |= CTRL_ILOS;
   2485 	}
   2486 
   2487 #if 0
   2488 	if (sc->sc_type >= WM_T_82544) {
   2489 		if (cfg1 & NVM_CFG1_IPS0)
   2490 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2491 		if (cfg1 & NVM_CFG1_IPS1)
   2492 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2493 		sc->sc_ctrl_ext |=
   2494 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2495 		    CTRL_EXT_SWDPIO_SHIFT;
   2496 		sc->sc_ctrl_ext |=
   2497 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2498 		    CTRL_EXT_SWDPINS_SHIFT;
   2499 	} else {
   2500 		sc->sc_ctrl_ext |=
   2501 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2502 		    CTRL_EXT_SWDPIO_SHIFT;
   2503 	}
   2504 #endif
   2505 
   2506 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2507 #if 0
   2508 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2509 #endif
   2510 
   2511 	if (sc->sc_type == WM_T_PCH) {
   2512 		uint16_t val;
   2513 
   2514 		/* Save the NVM K1 bit setting */
   2515 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2516 
   2517 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2518 			sc->sc_nvm_k1_enabled = 1;
   2519 		else
   2520 			sc->sc_nvm_k1_enabled = 0;
   2521 	}
   2522 
   2523 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
   2524 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2525 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2526 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2527 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
   2528 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2529 		/* Copper only */
   2530 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   2531 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
   2532 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
   2533 	    || (sc->sc_type ==WM_T_I211)) {
   2534 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2535 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2536 		switch (link_mode) {
   2537 		case CTRL_EXT_LINK_MODE_1000KX:
   2538 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2539 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2540 			break;
   2541 		case CTRL_EXT_LINK_MODE_SGMII:
   2542 			if (wm_sgmii_uses_mdio(sc)) {
   2543 				aprint_verbose_dev(sc->sc_dev,
   2544 				    "SGMII(MDIO)\n");
   2545 				sc->sc_flags |= WM_F_SGMII;
   2546 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2547 				break;
   2548 			}
   2549 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2550 			/*FALLTHROUGH*/
   2551 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2552 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2553 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2554 				if (link_mode
   2555 				    == CTRL_EXT_LINK_MODE_SGMII) {
   2556 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2557 					sc->sc_flags |= WM_F_SGMII;
   2558 				} else {
   2559 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2560 					aprint_verbose_dev(sc->sc_dev,
   2561 					    "SERDES\n");
   2562 				}
   2563 				break;
   2564 			}
   2565 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2566 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
   2567 
   2568 			/* Change current link mode setting */
   2569 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2570 			switch (sc->sc_mediatype) {
   2571 			case WM_MEDIATYPE_COPPER:
   2572 				reg |= CTRL_EXT_LINK_MODE_SGMII;
   2573 				break;
   2574 			case WM_MEDIATYPE_SERDES:
   2575 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2576 				break;
   2577 			default:
   2578 				break;
   2579 			}
   2580 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2581 			break;
   2582 		case CTRL_EXT_LINK_MODE_GMII:
   2583 		default:
   2584 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2585 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2586 			break;
   2587 		}
   2588 
   2589 		reg &= ~CTRL_EXT_I2C_ENA;
   2590 		if ((sc->sc_flags & WM_F_SGMII) != 0)
   2591 			reg |= CTRL_EXT_I2C_ENA;
   2592 		else
   2593 			reg &= ~CTRL_EXT_I2C_ENA;
   2594 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2595 	} else if (sc->sc_type < WM_T_82543 ||
   2596 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2597 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2598 			aprint_error_dev(sc->sc_dev,
   2599 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2600 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2601 		}
   2602 	} else {
   2603 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
   2604 			aprint_error_dev(sc->sc_dev,
   2605 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2606 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2607 		}
   2608 	}
   2609 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
   2610 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
   2611 
   2612 	/* Set device properties (macflags) */
   2613 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2614 
   2615 	/* Initialize the media structures accordingly. */
   2616 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2617 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2618 	else
   2619 		wm_tbi_mediainit(sc); /* All others */
   2620 
   2621 	ifp = &sc->sc_ethercom.ec_if;
   2622 	xname = device_xname(sc->sc_dev);
   2623 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2624 	ifp->if_softc = sc;
   2625 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2626 #ifdef WM_MPSAFE
   2627 	ifp->if_extflags = IFEF_START_MPSAFE;
   2628 #endif
   2629 	ifp->if_ioctl = wm_ioctl;
   2630 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2631 		ifp->if_start = wm_nq_start;
   2632 		/*
   2633 		 * When the number of CPUs is one and the controller can use
   2634 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
   2635 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
   2636 		 * and the other is used for link status changing.
   2637 		 * In this situation, wm_nq_transmit() is disadvantageous
   2638 		 * because of wm_select_txqueue() and pcq(9) overhead.
   2639 		 */
   2640 		if (wm_is_using_multiqueue(sc))
   2641 			ifp->if_transmit = wm_nq_transmit;
   2642 	} else {
   2643 		ifp->if_start = wm_start;
   2644 		/*
   2645 		 * wm_transmit() has the same disadvantage as wm_transmit().
   2646 		 */
   2647 		if (wm_is_using_multiqueue(sc))
   2648 			ifp->if_transmit = wm_transmit;
   2649 	}
   2650 	ifp->if_watchdog = wm_watchdog;
   2651 	ifp->if_init = wm_init;
   2652 	ifp->if_stop = wm_stop;
   2653 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2654 	IFQ_SET_READY(&ifp->if_snd);
   2655 
   2656 	/* Check for jumbo frame */
   2657 	switch (sc->sc_type) {
   2658 	case WM_T_82573:
   2659 		/* XXX limited to 9234 if ASPM is disabled */
   2660 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2661 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2662 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2663 		break;
   2664 	case WM_T_82571:
   2665 	case WM_T_82572:
   2666 	case WM_T_82574:
   2667 	case WM_T_82575:
   2668 	case WM_T_82576:
   2669 	case WM_T_82580:
   2670 	case WM_T_I350:
   2671 	case WM_T_I354: /* XXXX ok? */
   2672 	case WM_T_I210:
   2673 	case WM_T_I211:
   2674 	case WM_T_80003:
   2675 	case WM_T_ICH9:
   2676 	case WM_T_ICH10:
   2677 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2678 	case WM_T_PCH_LPT:
   2679 	case WM_T_PCH_SPT:
   2680 		/* XXX limited to 9234 */
   2681 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2682 		break;
   2683 	case WM_T_PCH:
   2684 		/* XXX limited to 4096 */
   2685 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2686 		break;
   2687 	case WM_T_82542_2_0:
   2688 	case WM_T_82542_2_1:
   2689 	case WM_T_82583:
   2690 	case WM_T_ICH8:
   2691 		/* No support for jumbo frame */
   2692 		break;
   2693 	default:
   2694 		/* ETHER_MAX_LEN_JUMBO */
   2695 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2696 		break;
   2697 	}
   2698 
   2699 	/* If we're a i82543 or greater, we can support VLANs. */
   2700 	if (sc->sc_type >= WM_T_82543)
   2701 		sc->sc_ethercom.ec_capabilities |=
   2702 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2703 
   2704 	/*
   2705 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2706 	 * on i82543 and later.
   2707 	 */
   2708 	if (sc->sc_type >= WM_T_82543) {
   2709 		ifp->if_capabilities |=
   2710 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2711 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2712 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2713 		    IFCAP_CSUM_TCPv6_Tx |
   2714 		    IFCAP_CSUM_UDPv6_Tx;
   2715 	}
   2716 
   2717 	/*
   2718 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2719 	 *
   2720 	 *	82541GI (8086:1076) ... no
   2721 	 *	82572EI (8086:10b9) ... yes
   2722 	 */
   2723 	if (sc->sc_type >= WM_T_82571) {
   2724 		ifp->if_capabilities |=
   2725 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2726 	}
   2727 
   2728 	/*
   2729 	 * If we're a i82544 or greater (except i82547), we can do
   2730 	 * TCP segmentation offload.
   2731 	 */
   2732 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2733 		ifp->if_capabilities |= IFCAP_TSOv4;
   2734 	}
   2735 
   2736 	if (sc->sc_type >= WM_T_82571) {
   2737 		ifp->if_capabilities |= IFCAP_TSOv6;
   2738 	}
   2739 
   2740 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
   2741 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
   2742 
   2743 #ifdef WM_MPSAFE
   2744 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2745 #else
   2746 	sc->sc_core_lock = NULL;
   2747 #endif
   2748 
   2749 	/* Attach the interface. */
   2750 	if_initialize(ifp);
   2751 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2752 	ether_ifattach(ifp, enaddr);
   2753 	if_register(ifp);
   2754 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2755 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2756 			  RND_FLAG_DEFAULT);
   2757 
   2758 #ifdef WM_EVENT_COUNTERS
   2759 	/* Attach event counters. */
   2760 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2761 	    NULL, xname, "linkintr");
   2762 
   2763 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2764 	    NULL, xname, "tx_xoff");
   2765 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2766 	    NULL, xname, "tx_xon");
   2767 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2768 	    NULL, xname, "rx_xoff");
   2769 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2770 	    NULL, xname, "rx_xon");
   2771 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2772 	    NULL, xname, "rx_macctl");
   2773 #endif /* WM_EVENT_COUNTERS */
   2774 
   2775 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2776 		pmf_class_network_register(self, ifp);
   2777 	else
   2778 		aprint_error_dev(self, "couldn't establish power handler\n");
   2779 
   2780 	sc->sc_flags |= WM_F_ATTACHED;
   2781  out:
   2782 	return;
   2783 }
   2784 
   2785 /* The detach function (ca_detach) */
   2786 static int
   2787 wm_detach(device_t self, int flags __unused)
   2788 {
   2789 	struct wm_softc *sc = device_private(self);
   2790 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2791 	int i;
   2792 
   2793 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2794 		return 0;
   2795 
   2796 	/* Stop the interface. Callouts are stopped in it. */
   2797 	wm_stop(ifp, 1);
   2798 
   2799 	pmf_device_deregister(self);
   2800 
   2801 #ifdef WM_EVENT_COUNTERS
   2802 	evcnt_detach(&sc->sc_ev_linkintr);
   2803 
   2804 	evcnt_detach(&sc->sc_ev_tx_xoff);
   2805 	evcnt_detach(&sc->sc_ev_tx_xon);
   2806 	evcnt_detach(&sc->sc_ev_rx_xoff);
   2807 	evcnt_detach(&sc->sc_ev_rx_xon);
   2808 	evcnt_detach(&sc->sc_ev_rx_macctl);
   2809 #endif /* WM_EVENT_COUNTERS */
   2810 
   2811 	/* Tell the firmware about the release */
   2812 	WM_CORE_LOCK(sc);
   2813 	wm_release_manageability(sc);
   2814 	wm_release_hw_control(sc);
   2815 	wm_enable_wakeup(sc);
   2816 	WM_CORE_UNLOCK(sc);
   2817 
   2818 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2819 
   2820 	/* Delete all remaining media. */
   2821 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2822 
   2823 	ether_ifdetach(ifp);
   2824 	if_detach(ifp);
   2825 	if_percpuq_destroy(sc->sc_ipq);
   2826 
   2827 	/* Unload RX dmamaps and free mbufs */
   2828 	for (i = 0; i < sc->sc_nqueues; i++) {
   2829 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2830 		mutex_enter(rxq->rxq_lock);
   2831 		wm_rxdrain(rxq);
   2832 		mutex_exit(rxq->rxq_lock);
   2833 	}
   2834 	/* Must unlock here */
   2835 
   2836 	/* Disestablish the interrupt handler */
   2837 	for (i = 0; i < sc->sc_nintrs; i++) {
   2838 		if (sc->sc_ihs[i] != NULL) {
   2839 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2840 			sc->sc_ihs[i] = NULL;
   2841 		}
   2842 	}
   2843 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2844 
   2845 	wm_free_txrx_queues(sc);
   2846 
   2847 	/* Unmap the registers */
   2848 	if (sc->sc_ss) {
   2849 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2850 		sc->sc_ss = 0;
   2851 	}
   2852 	if (sc->sc_ios) {
   2853 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2854 		sc->sc_ios = 0;
   2855 	}
   2856 	if (sc->sc_flashs) {
   2857 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2858 		sc->sc_flashs = 0;
   2859 	}
   2860 
   2861 	if (sc->sc_core_lock)
   2862 		mutex_obj_free(sc->sc_core_lock);
   2863 	if (sc->sc_ich_phymtx)
   2864 		mutex_obj_free(sc->sc_ich_phymtx);
   2865 	if (sc->sc_ich_nvmmtx)
   2866 		mutex_obj_free(sc->sc_ich_nvmmtx);
   2867 
   2868 	return 0;
   2869 }
   2870 
   2871 static bool
   2872 wm_suspend(device_t self, const pmf_qual_t *qual)
   2873 {
   2874 	struct wm_softc *sc = device_private(self);
   2875 
   2876 	wm_release_manageability(sc);
   2877 	wm_release_hw_control(sc);
   2878 	wm_enable_wakeup(sc);
   2879 
   2880 	return true;
   2881 }
   2882 
   2883 static bool
   2884 wm_resume(device_t self, const pmf_qual_t *qual)
   2885 {
   2886 	struct wm_softc *sc = device_private(self);
   2887 
   2888 	wm_init_manageability(sc);
   2889 
   2890 	return true;
   2891 }
   2892 
   2893 /*
   2894  * wm_watchdog:		[ifnet interface function]
   2895  *
   2896  *	Watchdog timer handler.
   2897  */
   2898 static void
   2899 wm_watchdog(struct ifnet *ifp)
   2900 {
   2901 	int qid;
   2902 	struct wm_softc *sc = ifp->if_softc;
   2903 
   2904 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2905 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2906 
   2907 		wm_watchdog_txq(ifp, txq);
   2908 	}
   2909 
   2910 	/* Reset the interface. */
   2911 	(void) wm_init(ifp);
   2912 
   2913 	/*
   2914 	 * There are still some upper layer processing which call
   2915 	 * ifp->if_start(). e.g. ALTQ or one CPU system
   2916 	 */
   2917 	/* Try to get more packets going. */
   2918 	ifp->if_start(ifp);
   2919 }
   2920 
   2921 static void
   2922 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
   2923 {
   2924 	struct wm_softc *sc = ifp->if_softc;
   2925 
   2926 	/*
   2927 	 * Since we're using delayed interrupts, sweep up
   2928 	 * before we report an error.
   2929 	 */
   2930 	mutex_enter(txq->txq_lock);
   2931 	wm_txeof(sc, txq);
   2932 	mutex_exit(txq->txq_lock);
   2933 
   2934 	if (txq->txq_free != WM_NTXDESC(txq)) {
   2935 #ifdef WM_DEBUG
   2936 		int i, j;
   2937 		struct wm_txsoft *txs;
   2938 #endif
   2939 		log(LOG_ERR,
   2940 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2941 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   2942 		    txq->txq_next);
   2943 		ifp->if_oerrors++;
   2944 #ifdef WM_DEBUG
   2945 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   2946 		    i = WM_NEXTTXS(txq, i)) {
   2947 		    txs = &txq->txq_soft[i];
   2948 		    printf("txs %d tx %d -> %d\n",
   2949 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2950 		    for (j = txs->txs_firstdesc; ;
   2951 			j = WM_NEXTTX(txq, j)) {
   2952 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2953 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   2954 			printf("\t %#08x%08x\n",
   2955 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   2956 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   2957 			if (j == txs->txs_lastdesc)
   2958 				break;
   2959 			}
   2960 		}
   2961 #endif
   2962 	}
   2963 }
   2964 
   2965 /*
   2966  * wm_tick:
   2967  *
   2968  *	One second timer, used to check link status, sweep up
   2969  *	completed transmit jobs, etc.
   2970  */
   2971 static void
   2972 wm_tick(void *arg)
   2973 {
   2974 	struct wm_softc *sc = arg;
   2975 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2976 #ifndef WM_MPSAFE
   2977 	int s = splnet();
   2978 #endif
   2979 
   2980 	WM_CORE_LOCK(sc);
   2981 
   2982 	if (sc->sc_core_stopping)
   2983 		goto out;
   2984 
   2985 	if (sc->sc_type >= WM_T_82542_2_1) {
   2986 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2987 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2988 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2989 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2990 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2991 	}
   2992 
   2993 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2994 	ifp->if_ierrors += 0ULL /* ensure quad_t */
   2995 	    + CSR_READ(sc, WMREG_CRCERRS)
   2996 	    + CSR_READ(sc, WMREG_ALGNERRC)
   2997 	    + CSR_READ(sc, WMREG_SYMERRC)
   2998 	    + CSR_READ(sc, WMREG_RXERRC)
   2999 	    + CSR_READ(sc, WMREG_SEC)
   3000 	    + CSR_READ(sc, WMREG_CEXTERR)
   3001 	    + CSR_READ(sc, WMREG_RLEC);
   3002 	/*
   3003 	 * WMREG_RNBC is incremented when there is no available buffers in host
   3004 	 * memory. It does not mean the number of dropped packet. Because
   3005 	 * ethernet controller can receive packets in such case if there is
   3006 	 * space in phy's FIFO.
   3007 	 *
   3008 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
   3009 	 * own EVCNT instead of if_iqdrops.
   3010 	 */
   3011 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
   3012 
   3013 	if (sc->sc_flags & WM_F_HAS_MII)
   3014 		mii_tick(&sc->sc_mii);
   3015 	else if ((sc->sc_type >= WM_T_82575)
   3016 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3017 		wm_serdes_tick(sc);
   3018 	else
   3019 		wm_tbi_tick(sc);
   3020 
   3021 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   3022 out:
   3023 	WM_CORE_UNLOCK(sc);
   3024 #ifndef WM_MPSAFE
   3025 	splx(s);
   3026 #endif
   3027 }
   3028 
   3029 static int
   3030 wm_ifflags_cb(struct ethercom *ec)
   3031 {
   3032 	struct ifnet *ifp = &ec->ec_if;
   3033 	struct wm_softc *sc = ifp->if_softc;
   3034 	int rc = 0;
   3035 
   3036 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3037 		device_xname(sc->sc_dev), __func__));
   3038 
   3039 	WM_CORE_LOCK(sc);
   3040 
   3041 	int change = ifp->if_flags ^ sc->sc_if_flags;
   3042 	sc->sc_if_flags = ifp->if_flags;
   3043 
   3044 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   3045 		rc = ENETRESET;
   3046 		goto out;
   3047 	}
   3048 
   3049 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   3050 		wm_set_filter(sc);
   3051 
   3052 	wm_set_vlan(sc);
   3053 
   3054 out:
   3055 	WM_CORE_UNLOCK(sc);
   3056 
   3057 	return rc;
   3058 }
   3059 
   3060 /*
   3061  * wm_ioctl:		[ifnet interface function]
   3062  *
   3063  *	Handle control requests from the operator.
   3064  */
   3065 static int
   3066 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   3067 {
   3068 	struct wm_softc *sc = ifp->if_softc;
   3069 	struct ifreq *ifr = (struct ifreq *) data;
   3070 	struct ifaddr *ifa = (struct ifaddr *)data;
   3071 	struct sockaddr_dl *sdl;
   3072 	int s, error;
   3073 
   3074 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3075 		device_xname(sc->sc_dev), __func__));
   3076 
   3077 #ifndef WM_MPSAFE
   3078 	s = splnet();
   3079 #endif
   3080 	switch (cmd) {
   3081 	case SIOCSIFMEDIA:
   3082 	case SIOCGIFMEDIA:
   3083 		WM_CORE_LOCK(sc);
   3084 		/* Flow control requires full-duplex mode. */
   3085 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   3086 		    (ifr->ifr_media & IFM_FDX) == 0)
   3087 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   3088 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   3089 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   3090 				/* We can do both TXPAUSE and RXPAUSE. */
   3091 				ifr->ifr_media |=
   3092 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   3093 			}
   3094 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   3095 		}
   3096 		WM_CORE_UNLOCK(sc);
   3097 #ifdef WM_MPSAFE
   3098 		s = splnet();
   3099 #endif
   3100 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   3101 #ifdef WM_MPSAFE
   3102 		splx(s);
   3103 #endif
   3104 		break;
   3105 	case SIOCINITIFADDR:
   3106 		WM_CORE_LOCK(sc);
   3107 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   3108 			sdl = satosdl(ifp->if_dl->ifa_addr);
   3109 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   3110 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   3111 			/* unicast address is first multicast entry */
   3112 			wm_set_filter(sc);
   3113 			error = 0;
   3114 			WM_CORE_UNLOCK(sc);
   3115 			break;
   3116 		}
   3117 		WM_CORE_UNLOCK(sc);
   3118 		/*FALLTHROUGH*/
   3119 	default:
   3120 #ifdef WM_MPSAFE
   3121 		s = splnet();
   3122 #endif
   3123 		/* It may call wm_start, so unlock here */
   3124 		error = ether_ioctl(ifp, cmd, data);
   3125 #ifdef WM_MPSAFE
   3126 		splx(s);
   3127 #endif
   3128 		if (error != ENETRESET)
   3129 			break;
   3130 
   3131 		error = 0;
   3132 
   3133 		if (cmd == SIOCSIFCAP) {
   3134 			error = (*ifp->if_init)(ifp);
   3135 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   3136 			;
   3137 		else if (ifp->if_flags & IFF_RUNNING) {
   3138 			/*
   3139 			 * Multicast list has changed; set the hardware filter
   3140 			 * accordingly.
   3141 			 */
   3142 			WM_CORE_LOCK(sc);
   3143 			wm_set_filter(sc);
   3144 			WM_CORE_UNLOCK(sc);
   3145 		}
   3146 		break;
   3147 	}
   3148 
   3149 #ifndef WM_MPSAFE
   3150 	splx(s);
   3151 #endif
   3152 	return error;
   3153 }
   3154 
   3155 /* MAC address related */
   3156 
   3157 /*
   3158  * Get the offset of MAC address and return it.
   3159  * If error occured, use offset 0.
   3160  */
   3161 static uint16_t
   3162 wm_check_alt_mac_addr(struct wm_softc *sc)
   3163 {
   3164 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3165 	uint16_t offset = NVM_OFF_MACADDR;
   3166 
   3167 	/* Try to read alternative MAC address pointer */
   3168 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3169 		return 0;
   3170 
   3171 	/* Check pointer if it's valid or not. */
   3172 	if ((offset == 0x0000) || (offset == 0xffff))
   3173 		return 0;
   3174 
   3175 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3176 	/*
   3177 	 * Check whether alternative MAC address is valid or not.
   3178 	 * Some cards have non 0xffff pointer but those don't use
   3179 	 * alternative MAC address in reality.
   3180 	 *
   3181 	 * Check whether the broadcast bit is set or not.
   3182 	 */
   3183 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3184 		if (((myea[0] & 0xff) & 0x01) == 0)
   3185 			return offset; /* Found */
   3186 
   3187 	/* Not found */
   3188 	return 0;
   3189 }
   3190 
   3191 static int
   3192 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3193 {
   3194 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3195 	uint16_t offset = NVM_OFF_MACADDR;
   3196 	int do_invert = 0;
   3197 
   3198 	switch (sc->sc_type) {
   3199 	case WM_T_82580:
   3200 	case WM_T_I350:
   3201 	case WM_T_I354:
   3202 		/* EEPROM Top Level Partitioning */
   3203 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3204 		break;
   3205 	case WM_T_82571:
   3206 	case WM_T_82575:
   3207 	case WM_T_82576:
   3208 	case WM_T_80003:
   3209 	case WM_T_I210:
   3210 	case WM_T_I211:
   3211 		offset = wm_check_alt_mac_addr(sc);
   3212 		if (offset == 0)
   3213 			if ((sc->sc_funcid & 0x01) == 1)
   3214 				do_invert = 1;
   3215 		break;
   3216 	default:
   3217 		if ((sc->sc_funcid & 0x01) == 1)
   3218 			do_invert = 1;
   3219 		break;
   3220 	}
   3221 
   3222 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3223 		goto bad;
   3224 
   3225 	enaddr[0] = myea[0] & 0xff;
   3226 	enaddr[1] = myea[0] >> 8;
   3227 	enaddr[2] = myea[1] & 0xff;
   3228 	enaddr[3] = myea[1] >> 8;
   3229 	enaddr[4] = myea[2] & 0xff;
   3230 	enaddr[5] = myea[2] >> 8;
   3231 
   3232 	/*
   3233 	 * Toggle the LSB of the MAC address on the second port
   3234 	 * of some dual port cards.
   3235 	 */
   3236 	if (do_invert != 0)
   3237 		enaddr[5] ^= 1;
   3238 
   3239 	return 0;
   3240 
   3241  bad:
   3242 	return -1;
   3243 }
   3244 
   3245 /*
   3246  * wm_set_ral:
   3247  *
   3248  *	Set an entery in the receive address list.
   3249  */
   3250 static void
   3251 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3252 {
   3253 	uint32_t ral_lo, ral_hi, addrl, addrh;
   3254 	uint32_t wlock_mac;
   3255 	int rv;
   3256 
   3257 	if (enaddr != NULL) {
   3258 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3259 		    (enaddr[3] << 24);
   3260 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3261 		ral_hi |= RAL_AV;
   3262 	} else {
   3263 		ral_lo = 0;
   3264 		ral_hi = 0;
   3265 	}
   3266 
   3267 	switch (sc->sc_type) {
   3268 	case WM_T_82542_2_0:
   3269 	case WM_T_82542_2_1:
   3270 	case WM_T_82543:
   3271 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
   3272 		CSR_WRITE_FLUSH(sc);
   3273 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
   3274 		CSR_WRITE_FLUSH(sc);
   3275 		break;
   3276 	case WM_T_PCH2:
   3277 	case WM_T_PCH_LPT:
   3278 	case WM_T_PCH_SPT:
   3279 		if (idx == 0) {
   3280 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3281 			CSR_WRITE_FLUSH(sc);
   3282 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3283 			CSR_WRITE_FLUSH(sc);
   3284 			return;
   3285 		}
   3286 		if (sc->sc_type != WM_T_PCH2) {
   3287 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
   3288 			    FWSM_WLOCK_MAC);
   3289 			addrl = WMREG_SHRAL(idx - 1);
   3290 			addrh = WMREG_SHRAH(idx - 1);
   3291 		} else {
   3292 			wlock_mac = 0;
   3293 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
   3294 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
   3295 		}
   3296 
   3297 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
   3298 			rv = wm_get_swflag_ich8lan(sc);
   3299 			if (rv != 0)
   3300 				return;
   3301 			CSR_WRITE(sc, addrl, ral_lo);
   3302 			CSR_WRITE_FLUSH(sc);
   3303 			CSR_WRITE(sc, addrh, ral_hi);
   3304 			CSR_WRITE_FLUSH(sc);
   3305 			wm_put_swflag_ich8lan(sc);
   3306 		}
   3307 
   3308 		break;
   3309 	default:
   3310 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
   3311 		CSR_WRITE_FLUSH(sc);
   3312 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
   3313 		CSR_WRITE_FLUSH(sc);
   3314 		break;
   3315 	}
   3316 }
   3317 
   3318 /*
   3319  * wm_mchash:
   3320  *
   3321  *	Compute the hash of the multicast address for the 4096-bit
   3322  *	multicast filter.
   3323  */
   3324 static uint32_t
   3325 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3326 {
   3327 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3328 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3329 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3330 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3331 	uint32_t hash;
   3332 
   3333 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3334 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3335 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3336 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   3337 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3338 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3339 		return (hash & 0x3ff);
   3340 	}
   3341 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3342 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3343 
   3344 	return (hash & 0xfff);
   3345 }
   3346 
   3347 /*
   3348  * wm_set_filter:
   3349  *
   3350  *	Set up the receive filter.
   3351  */
   3352 static void
   3353 wm_set_filter(struct wm_softc *sc)
   3354 {
   3355 	struct ethercom *ec = &sc->sc_ethercom;
   3356 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3357 	struct ether_multi *enm;
   3358 	struct ether_multistep step;
   3359 	bus_addr_t mta_reg;
   3360 	uint32_t hash, reg, bit;
   3361 	int i, size, ralmax;
   3362 
   3363 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3364 		device_xname(sc->sc_dev), __func__));
   3365 
   3366 	if (sc->sc_type >= WM_T_82544)
   3367 		mta_reg = WMREG_CORDOVA_MTA;
   3368 	else
   3369 		mta_reg = WMREG_MTA;
   3370 
   3371 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3372 
   3373 	if (ifp->if_flags & IFF_BROADCAST)
   3374 		sc->sc_rctl |= RCTL_BAM;
   3375 	if (ifp->if_flags & IFF_PROMISC) {
   3376 		sc->sc_rctl |= RCTL_UPE;
   3377 		goto allmulti;
   3378 	}
   3379 
   3380 	/*
   3381 	 * Set the station address in the first RAL slot, and
   3382 	 * clear the remaining slots.
   3383 	 */
   3384 	if (sc->sc_type == WM_T_ICH8)
   3385 		size = WM_RAL_TABSIZE_ICH8 -1;
   3386 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3387 	    || (sc->sc_type == WM_T_PCH))
   3388 		size = WM_RAL_TABSIZE_ICH8;
   3389 	else if (sc->sc_type == WM_T_PCH2)
   3390 		size = WM_RAL_TABSIZE_PCH2;
   3391 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
   3392 		size = WM_RAL_TABSIZE_PCH_LPT;
   3393 	else if (sc->sc_type == WM_T_82575)
   3394 		size = WM_RAL_TABSIZE_82575;
   3395 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3396 		size = WM_RAL_TABSIZE_82576;
   3397 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3398 		size = WM_RAL_TABSIZE_I350;
   3399 	else
   3400 		size = WM_RAL_TABSIZE;
   3401 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3402 
   3403 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   3404 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3405 		switch (i) {
   3406 		case 0:
   3407 			/* We can use all entries */
   3408 			ralmax = size;
   3409 			break;
   3410 		case 1:
   3411 			/* Only RAR[0] */
   3412 			ralmax = 1;
   3413 			break;
   3414 		default:
   3415 			/* available SHRA + RAR[0] */
   3416 			ralmax = i + 1;
   3417 		}
   3418 	} else
   3419 		ralmax = size;
   3420 	for (i = 1; i < size; i++) {
   3421 		if (i < ralmax)
   3422 			wm_set_ral(sc, NULL, i);
   3423 	}
   3424 
   3425 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3426 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3427 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3428 	    || (sc->sc_type == WM_T_PCH_SPT))
   3429 		size = WM_ICH8_MC_TABSIZE;
   3430 	else
   3431 		size = WM_MC_TABSIZE;
   3432 	/* Clear out the multicast table. */
   3433 	for (i = 0; i < size; i++) {
   3434 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3435 		CSR_WRITE_FLUSH(sc);
   3436 	}
   3437 
   3438 	ETHER_LOCK(ec);
   3439 	ETHER_FIRST_MULTI(step, ec, enm);
   3440 	while (enm != NULL) {
   3441 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3442 			ETHER_UNLOCK(ec);
   3443 			/*
   3444 			 * We must listen to a range of multicast addresses.
   3445 			 * For now, just accept all multicasts, rather than
   3446 			 * trying to set only those filter bits needed to match
   3447 			 * the range.  (At this time, the only use of address
   3448 			 * ranges is for IP multicast routing, for which the
   3449 			 * range is big enough to require all bits set.)
   3450 			 */
   3451 			goto allmulti;
   3452 		}
   3453 
   3454 		hash = wm_mchash(sc, enm->enm_addrlo);
   3455 
   3456 		reg = (hash >> 5);
   3457 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3458 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3459 		    || (sc->sc_type == WM_T_PCH2)
   3460 		    || (sc->sc_type == WM_T_PCH_LPT)
   3461 		    || (sc->sc_type == WM_T_PCH_SPT))
   3462 			reg &= 0x1f;
   3463 		else
   3464 			reg &= 0x7f;
   3465 		bit = hash & 0x1f;
   3466 
   3467 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3468 		hash |= 1U << bit;
   3469 
   3470 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3471 			/*
   3472 			 * 82544 Errata 9: Certain register cannot be written
   3473 			 * with particular alignments in PCI-X bus operation
   3474 			 * (FCAH, MTA and VFTA).
   3475 			 */
   3476 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3477 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3478 			CSR_WRITE_FLUSH(sc);
   3479 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3480 			CSR_WRITE_FLUSH(sc);
   3481 		} else {
   3482 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3483 			CSR_WRITE_FLUSH(sc);
   3484 		}
   3485 
   3486 		ETHER_NEXT_MULTI(step, enm);
   3487 	}
   3488 	ETHER_UNLOCK(ec);
   3489 
   3490 	ifp->if_flags &= ~IFF_ALLMULTI;
   3491 	goto setit;
   3492 
   3493  allmulti:
   3494 	ifp->if_flags |= IFF_ALLMULTI;
   3495 	sc->sc_rctl |= RCTL_MPE;
   3496 
   3497  setit:
   3498 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3499 }
   3500 
   3501 /* Reset and init related */
   3502 
   3503 static void
   3504 wm_set_vlan(struct wm_softc *sc)
   3505 {
   3506 
   3507 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3508 		device_xname(sc->sc_dev), __func__));
   3509 
   3510 	/* Deal with VLAN enables. */
   3511 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3512 		sc->sc_ctrl |= CTRL_VME;
   3513 	else
   3514 		sc->sc_ctrl &= ~CTRL_VME;
   3515 
   3516 	/* Write the control registers. */
   3517 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3518 }
   3519 
   3520 static void
   3521 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3522 {
   3523 	uint32_t gcr;
   3524 	pcireg_t ctrl2;
   3525 
   3526 	gcr = CSR_READ(sc, WMREG_GCR);
   3527 
   3528 	/* Only take action if timeout value is defaulted to 0 */
   3529 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3530 		goto out;
   3531 
   3532 	if ((gcr & GCR_CAP_VER2) == 0) {
   3533 		gcr |= GCR_CMPL_TMOUT_10MS;
   3534 		goto out;
   3535 	}
   3536 
   3537 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3538 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3539 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3540 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3541 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3542 
   3543 out:
   3544 	/* Disable completion timeout resend */
   3545 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3546 
   3547 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3548 }
   3549 
   3550 void
   3551 wm_get_auto_rd_done(struct wm_softc *sc)
   3552 {
   3553 	int i;
   3554 
   3555 	/* wait for eeprom to reload */
   3556 	switch (sc->sc_type) {
   3557 	case WM_T_82571:
   3558 	case WM_T_82572:
   3559 	case WM_T_82573:
   3560 	case WM_T_82574:
   3561 	case WM_T_82583:
   3562 	case WM_T_82575:
   3563 	case WM_T_82576:
   3564 	case WM_T_82580:
   3565 	case WM_T_I350:
   3566 	case WM_T_I354:
   3567 	case WM_T_I210:
   3568 	case WM_T_I211:
   3569 	case WM_T_80003:
   3570 	case WM_T_ICH8:
   3571 	case WM_T_ICH9:
   3572 		for (i = 0; i < 10; i++) {
   3573 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3574 				break;
   3575 			delay(1000);
   3576 		}
   3577 		if (i == 10) {
   3578 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3579 			    "complete\n", device_xname(sc->sc_dev));
   3580 		}
   3581 		break;
   3582 	default:
   3583 		break;
   3584 	}
   3585 }
   3586 
   3587 void
   3588 wm_lan_init_done(struct wm_softc *sc)
   3589 {
   3590 	uint32_t reg = 0;
   3591 	int i;
   3592 
   3593 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3594 		device_xname(sc->sc_dev), __func__));
   3595 
   3596 	/* Wait for eeprom to reload */
   3597 	switch (sc->sc_type) {
   3598 	case WM_T_ICH10:
   3599 	case WM_T_PCH:
   3600 	case WM_T_PCH2:
   3601 	case WM_T_PCH_LPT:
   3602 	case WM_T_PCH_SPT:
   3603 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3604 			reg = CSR_READ(sc, WMREG_STATUS);
   3605 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3606 				break;
   3607 			delay(100);
   3608 		}
   3609 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3610 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3611 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3612 		}
   3613 		break;
   3614 	default:
   3615 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3616 		    __func__);
   3617 		break;
   3618 	}
   3619 
   3620 	reg &= ~STATUS_LAN_INIT_DONE;
   3621 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3622 }
   3623 
   3624 void
   3625 wm_get_cfg_done(struct wm_softc *sc)
   3626 {
   3627 	int mask;
   3628 	uint32_t reg;
   3629 	int i;
   3630 
   3631 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3632 		device_xname(sc->sc_dev), __func__));
   3633 
   3634 	/* Wait for eeprom to reload */
   3635 	switch (sc->sc_type) {
   3636 	case WM_T_82542_2_0:
   3637 	case WM_T_82542_2_1:
   3638 		/* null */
   3639 		break;
   3640 	case WM_T_82543:
   3641 	case WM_T_82544:
   3642 	case WM_T_82540:
   3643 	case WM_T_82545:
   3644 	case WM_T_82545_3:
   3645 	case WM_T_82546:
   3646 	case WM_T_82546_3:
   3647 	case WM_T_82541:
   3648 	case WM_T_82541_2:
   3649 	case WM_T_82547:
   3650 	case WM_T_82547_2:
   3651 	case WM_T_82573:
   3652 	case WM_T_82574:
   3653 	case WM_T_82583:
   3654 		/* generic */
   3655 		delay(10*1000);
   3656 		break;
   3657 	case WM_T_80003:
   3658 	case WM_T_82571:
   3659 	case WM_T_82572:
   3660 	case WM_T_82575:
   3661 	case WM_T_82576:
   3662 	case WM_T_82580:
   3663 	case WM_T_I350:
   3664 	case WM_T_I354:
   3665 	case WM_T_I210:
   3666 	case WM_T_I211:
   3667 		if (sc->sc_type == WM_T_82571) {
   3668 			/* Only 82571 shares port 0 */
   3669 			mask = EEMNGCTL_CFGDONE_0;
   3670 		} else
   3671 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3672 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3673 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3674 				break;
   3675 			delay(1000);
   3676 		}
   3677 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3678 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3679 				device_xname(sc->sc_dev), __func__));
   3680 		}
   3681 		break;
   3682 	case WM_T_ICH8:
   3683 	case WM_T_ICH9:
   3684 	case WM_T_ICH10:
   3685 	case WM_T_PCH:
   3686 	case WM_T_PCH2:
   3687 	case WM_T_PCH_LPT:
   3688 	case WM_T_PCH_SPT:
   3689 		delay(10*1000);
   3690 		if (sc->sc_type >= WM_T_ICH10)
   3691 			wm_lan_init_done(sc);
   3692 		else
   3693 			wm_get_auto_rd_done(sc);
   3694 
   3695 		reg = CSR_READ(sc, WMREG_STATUS);
   3696 		if ((reg & STATUS_PHYRA) != 0)
   3697 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3698 		break;
   3699 	default:
   3700 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3701 		    __func__);
   3702 		break;
   3703 	}
   3704 }
   3705 
   3706 void
   3707 wm_phy_post_reset(struct wm_softc *sc)
   3708 {
   3709 	uint32_t reg;
   3710 
   3711 	/* This function is only for ICH8 and newer. */
   3712 	if (sc->sc_type < WM_T_ICH8)
   3713 		return;
   3714 
   3715 	if (wm_phy_resetisblocked(sc)) {
   3716 		/* XXX */
   3717 		device_printf(sc->sc_dev, "PHY is blocked\n");
   3718 		return;
   3719 	}
   3720 
   3721 	/* Allow time for h/w to get to quiescent state after reset */
   3722 	delay(10*1000);
   3723 
   3724 	/* Perform any necessary post-reset workarounds */
   3725 	if (sc->sc_type == WM_T_PCH)
   3726 		wm_hv_phy_workaround_ich8lan(sc);
   3727 	if (sc->sc_type == WM_T_PCH2)
   3728 		wm_lv_phy_workaround_ich8lan(sc);
   3729 
   3730 	/* Clear the host wakeup bit after lcd reset */
   3731 	if (sc->sc_type >= WM_T_PCH) {
   3732 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   3733 		    BM_PORT_GEN_CFG);
   3734 		reg &= ~BM_WUC_HOST_WU_BIT;
   3735 		wm_gmii_hv_writereg(sc->sc_dev, 2,
   3736 		    BM_PORT_GEN_CFG, reg);
   3737 	}
   3738 
   3739 	/* Configure the LCD with the extended configuration region in NVM */
   3740 	wm_init_lcd_from_nvm(sc);
   3741 
   3742 	/* Configure the LCD with the OEM bits in NVM */
   3743 }
   3744 
   3745 /* Only for PCH and newer */
   3746 static void
   3747 wm_write_smbus_addr(struct wm_softc *sc)
   3748 {
   3749 	uint32_t strap, freq;
   3750 	uint32_t phy_data;
   3751 
   3752 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3753 		device_xname(sc->sc_dev), __func__));
   3754 
   3755 	strap = CSR_READ(sc, WMREG_STRAP);
   3756 	freq = __SHIFTOUT(strap, STRAP_FREQ);
   3757 
   3758 	phy_data = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR);
   3759 
   3760 	phy_data &= ~HV_SMB_ADDR_ADDR;
   3761 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
   3762 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
   3763 
   3764 	if (sc->sc_phytype == WMPHY_I217) {
   3765 		/* Restore SMBus frequency */
   3766 		if (freq --) {
   3767 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
   3768 			    | HV_SMB_ADDR_FREQ_HIGH);
   3769 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
   3770 			    HV_SMB_ADDR_FREQ_LOW);
   3771 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
   3772 			    HV_SMB_ADDR_FREQ_HIGH);
   3773 		} else {
   3774 			DPRINTF(WM_DEBUG_INIT,
   3775 			    ("%s: %s Unsupported SMB frequency in PHY\n",
   3776 				device_xname(sc->sc_dev), __func__));
   3777 		}
   3778 	}
   3779 
   3780 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR, phy_data);
   3781 }
   3782 
   3783 void
   3784 wm_init_lcd_from_nvm(struct wm_softc *sc)
   3785 {
   3786 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
   3787 	uint16_t phy_page = 0;
   3788 
   3789 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3790 		device_xname(sc->sc_dev), __func__));
   3791 
   3792 	switch (sc->sc_type) {
   3793 	case WM_T_ICH8:
   3794 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
   3795 		    || (sc->sc_phytype != WMPHY_IGP_3))
   3796 			return;
   3797 
   3798 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
   3799 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
   3800 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
   3801 			break;
   3802 		}
   3803 		/* FALLTHROUGH */
   3804 	case WM_T_PCH:
   3805 	case WM_T_PCH2:
   3806 	case WM_T_PCH_LPT:
   3807 	case WM_T_PCH_SPT:
   3808 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
   3809 		break;
   3810 	default:
   3811 		return;
   3812 	}
   3813 
   3814 	sc->phy.acquire(sc);
   3815 
   3816 	reg = CSR_READ(sc, WMREG_FEXTNVM);
   3817 	if ((reg & sw_cfg_mask) == 0)
   3818 		goto release;
   3819 
   3820 	/*
   3821 	 * Make sure HW does not configure LCD from PHY extended configuration
   3822 	 * before SW configuration
   3823 	 */
   3824 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
   3825 	if ((sc->sc_type < WM_T_PCH2)
   3826 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
   3827 		goto release;
   3828 
   3829 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
   3830 		device_xname(sc->sc_dev), __func__));
   3831 	/* word_addr is in DWORD */
   3832 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
   3833 
   3834 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
   3835 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
   3836 
   3837 	if (((sc->sc_type == WM_T_PCH)
   3838 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
   3839 	    || (sc->sc_type > WM_T_PCH)) {
   3840 		/*
   3841 		 * HW configures the SMBus address and LEDs when the OEM and
   3842 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
   3843 		 * are cleared, SW will configure them instead.
   3844 		 */
   3845 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
   3846 			device_xname(sc->sc_dev), __func__));
   3847 		wm_write_smbus_addr(sc);
   3848 
   3849 		reg = CSR_READ(sc, WMREG_LEDCTL);
   3850 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG, reg);
   3851 	}
   3852 
   3853 	/* Configure LCD from extended configuration region. */
   3854 	for (i = 0; i < cnf_size; i++) {
   3855 		uint16_t reg_data, reg_addr;
   3856 
   3857 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
   3858 			goto release;
   3859 
   3860 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
   3861 			goto release;
   3862 
   3863 		if (reg_addr == MII_IGPHY_PAGE_SELECT)
   3864 			phy_page = reg_data;
   3865 
   3866 		reg_addr &= IGPHY_MAXREGADDR;
   3867 		reg_addr |= phy_page;
   3868 
   3869 		sc->phy.release(sc); /* XXX */
   3870 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, reg_addr, reg_data);
   3871 		sc->phy.acquire(sc); /* XXX */
   3872 	}
   3873 
   3874 release:
   3875 	sc->phy.release(sc);
   3876 	return;
   3877 }
   3878 
   3879 
   3880 /* Init hardware bits */
   3881 void
   3882 wm_initialize_hardware_bits(struct wm_softc *sc)
   3883 {
   3884 	uint32_t tarc0, tarc1, reg;
   3885 
   3886 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3887 		device_xname(sc->sc_dev), __func__));
   3888 
   3889 	/* For 82571 variant, 80003 and ICHs */
   3890 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3891 	    || (sc->sc_type >= WM_T_80003)) {
   3892 
   3893 		/* Transmit Descriptor Control 0 */
   3894 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3895 		reg |= TXDCTL_COUNT_DESC;
   3896 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3897 
   3898 		/* Transmit Descriptor Control 1 */
   3899 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3900 		reg |= TXDCTL_COUNT_DESC;
   3901 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3902 
   3903 		/* TARC0 */
   3904 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3905 		switch (sc->sc_type) {
   3906 		case WM_T_82571:
   3907 		case WM_T_82572:
   3908 		case WM_T_82573:
   3909 		case WM_T_82574:
   3910 		case WM_T_82583:
   3911 		case WM_T_80003:
   3912 			/* Clear bits 30..27 */
   3913 			tarc0 &= ~__BITS(30, 27);
   3914 			break;
   3915 		default:
   3916 			break;
   3917 		}
   3918 
   3919 		switch (sc->sc_type) {
   3920 		case WM_T_82571:
   3921 		case WM_T_82572:
   3922 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3923 
   3924 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3925 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3926 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3927 			/* 8257[12] Errata No.7 */
   3928 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3929 
   3930 			/* TARC1 bit 28 */
   3931 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3932 				tarc1 &= ~__BIT(28);
   3933 			else
   3934 				tarc1 |= __BIT(28);
   3935 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3936 
   3937 			/*
   3938 			 * 8257[12] Errata No.13
   3939 			 * Disable Dyamic Clock Gating.
   3940 			 */
   3941 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3942 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3943 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3944 			break;
   3945 		case WM_T_82573:
   3946 		case WM_T_82574:
   3947 		case WM_T_82583:
   3948 			if ((sc->sc_type == WM_T_82574)
   3949 			    || (sc->sc_type == WM_T_82583))
   3950 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3951 
   3952 			/* Extended Device Control */
   3953 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3954 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3955 			reg |= __BIT(22);	/* Set bit 22 */
   3956 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3957 
   3958 			/* Device Control */
   3959 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   3960 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3961 
   3962 			/* PCIe Control Register */
   3963 			/*
   3964 			 * 82573 Errata (unknown).
   3965 			 *
   3966 			 * 82574 Errata 25 and 82583 Errata 12
   3967 			 * "Dropped Rx Packets":
   3968 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   3969 			 */
   3970 			reg = CSR_READ(sc, WMREG_GCR);
   3971 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   3972 			CSR_WRITE(sc, WMREG_GCR, reg);
   3973 
   3974 			if ((sc->sc_type == WM_T_82574)
   3975 			    || (sc->sc_type == WM_T_82583)) {
   3976 				/*
   3977 				 * Document says this bit must be set for
   3978 				 * proper operation.
   3979 				 */
   3980 				reg = CSR_READ(sc, WMREG_GCR);
   3981 				reg |= __BIT(22);
   3982 				CSR_WRITE(sc, WMREG_GCR, reg);
   3983 
   3984 				/*
   3985 				 * Apply workaround for hardware errata
   3986 				 * documented in errata docs Fixes issue where
   3987 				 * some error prone or unreliable PCIe
   3988 				 * completions are occurring, particularly
   3989 				 * with ASPM enabled. Without fix, issue can
   3990 				 * cause Tx timeouts.
   3991 				 */
   3992 				reg = CSR_READ(sc, WMREG_GCR2);
   3993 				reg |= __BIT(0);
   3994 				CSR_WRITE(sc, WMREG_GCR2, reg);
   3995 			}
   3996 			break;
   3997 		case WM_T_80003:
   3998 			/* TARC0 */
   3999 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   4000 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   4001 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   4002 
   4003 			/* TARC1 bit 28 */
   4004 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4005 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4006 				tarc1 &= ~__BIT(28);
   4007 			else
   4008 				tarc1 |= __BIT(28);
   4009 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4010 			break;
   4011 		case WM_T_ICH8:
   4012 		case WM_T_ICH9:
   4013 		case WM_T_ICH10:
   4014 		case WM_T_PCH:
   4015 		case WM_T_PCH2:
   4016 		case WM_T_PCH_LPT:
   4017 		case WM_T_PCH_SPT:
   4018 			/* TARC0 */
   4019 			if ((sc->sc_type == WM_T_ICH8)
   4020 			    || (sc->sc_type == WM_T_PCH_SPT)) {
   4021 				/* Set TARC0 bits 29 and 28 */
   4022 				tarc0 |= __BITS(29, 28);
   4023 			}
   4024 			/* Set TARC0 bits 23,24,26,27 */
   4025 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   4026 
   4027 			/* CTRL_EXT */
   4028 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4029 			reg |= __BIT(22);	/* Set bit 22 */
   4030 			/*
   4031 			 * Enable PHY low-power state when MAC is at D3
   4032 			 * w/o WoL
   4033 			 */
   4034 			if (sc->sc_type >= WM_T_PCH)
   4035 				reg |= CTRL_EXT_PHYPDEN;
   4036 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4037 
   4038 			/* TARC1 */
   4039 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   4040 			/* bit 28 */
   4041 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   4042 				tarc1 &= ~__BIT(28);
   4043 			else
   4044 				tarc1 |= __BIT(28);
   4045 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   4046 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   4047 
   4048 			/* Device Status */
   4049 			if (sc->sc_type == WM_T_ICH8) {
   4050 				reg = CSR_READ(sc, WMREG_STATUS);
   4051 				reg &= ~__BIT(31);
   4052 				CSR_WRITE(sc, WMREG_STATUS, reg);
   4053 
   4054 			}
   4055 
   4056 			/* IOSFPC */
   4057 			if (sc->sc_type == WM_T_PCH_SPT) {
   4058 				reg = CSR_READ(sc, WMREG_IOSFPC);
   4059 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   4060 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   4061 			}
   4062 			/*
   4063 			 * Work-around descriptor data corruption issue during
   4064 			 * NFS v2 UDP traffic, just disable the NFS filtering
   4065 			 * capability.
   4066 			 */
   4067 			reg = CSR_READ(sc, WMREG_RFCTL);
   4068 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   4069 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4070 			break;
   4071 		default:
   4072 			break;
   4073 		}
   4074 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   4075 
   4076 		switch (sc->sc_type) {
   4077 		/*
   4078 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
   4079 		 * Avoid RSS Hash Value bug.
   4080 		 */
   4081 		case WM_T_82571:
   4082 		case WM_T_82572:
   4083 		case WM_T_82573:
   4084 		case WM_T_80003:
   4085 		case WM_T_ICH8:
   4086 			reg = CSR_READ(sc, WMREG_RFCTL);
   4087 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   4088 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4089 			break;
   4090 		case WM_T_82574:
   4091 			/* use extened Rx descriptor. */
   4092 			reg = CSR_READ(sc, WMREG_RFCTL);
   4093 			reg |= WMREG_RFCTL_EXSTEN;
   4094 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   4095 			break;
   4096 		default:
   4097 			break;
   4098 		}
   4099 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
   4100 		/*
   4101 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
   4102 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
   4103 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
   4104 		 * Correctly by the Device"
   4105 		 *
   4106 		 * I354(C2000) Errata AVR53:
   4107 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
   4108 		 * Hang"
   4109 		 */
   4110 		reg = CSR_READ(sc, WMREG_RFCTL);
   4111 		reg |= WMREG_RFCTL_IPV6EXDIS;
   4112 		CSR_WRITE(sc, WMREG_RFCTL, reg);
   4113 	}
   4114 }
   4115 
   4116 static uint32_t
   4117 wm_rxpbs_adjust_82580(uint32_t val)
   4118 {
   4119 	uint32_t rv = 0;
   4120 
   4121 	if (val < __arraycount(wm_82580_rxpbs_table))
   4122 		rv = wm_82580_rxpbs_table[val];
   4123 
   4124 	return rv;
   4125 }
   4126 
   4127 /*
   4128  * wm_reset_phy:
   4129  *
   4130  *	generic PHY reset function.
   4131  *	Same as e1000_phy_hw_reset_generic()
   4132  */
   4133 static void
   4134 wm_reset_phy(struct wm_softc *sc)
   4135 {
   4136 	uint32_t reg;
   4137 
   4138 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4139 		device_xname(sc->sc_dev), __func__));
   4140 	if (wm_phy_resetisblocked(sc))
   4141 		return;
   4142 
   4143 	sc->phy.acquire(sc);
   4144 
   4145 	reg = CSR_READ(sc, WMREG_CTRL);
   4146 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   4147 	CSR_WRITE_FLUSH(sc);
   4148 
   4149 	delay(sc->phy.reset_delay_us);
   4150 
   4151 	CSR_WRITE(sc, WMREG_CTRL, reg);
   4152 	CSR_WRITE_FLUSH(sc);
   4153 
   4154 	delay(150);
   4155 
   4156 	sc->phy.release(sc);
   4157 
   4158 	wm_get_cfg_done(sc);
   4159 	wm_phy_post_reset(sc);
   4160 }
   4161 
   4162 static void
   4163 wm_flush_desc_rings(struct wm_softc *sc)
   4164 {
   4165 	pcireg_t preg;
   4166 	uint32_t reg;
   4167 	struct wm_txqueue *txq;
   4168 	wiseman_txdesc_t *txd;
   4169 	int nexttx;
   4170 	uint32_t rctl;
   4171 
   4172 	/* First, disable MULR fix in FEXTNVM11 */
   4173 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
   4174 	reg |= FEXTNVM11_DIS_MULRFIX;
   4175 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   4176 
   4177 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4178 	reg = CSR_READ(sc, WMREG_TDLEN(0));
   4179 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
   4180 		return;
   4181 
   4182 	/* TX */
   4183 	printf("%s: Need TX flush (reg = %08x, len = %u)\n",
   4184 	    device_xname(sc->sc_dev), preg, reg);
   4185 	reg = CSR_READ(sc, WMREG_TCTL);
   4186 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
   4187 
   4188 	txq = &sc->sc_queue[0].wmq_txq;
   4189 	nexttx = txq->txq_next;
   4190 	txd = &txq->txq_descs[nexttx];
   4191 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
   4192 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS| 512);
   4193 	txd->wtx_fields.wtxu_status = 0;
   4194 	txd->wtx_fields.wtxu_options = 0;
   4195 	txd->wtx_fields.wtxu_vlan = 0;
   4196 
   4197 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4198 	    BUS_SPACE_BARRIER_WRITE);
   4199 
   4200 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   4201 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
   4202 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
   4203 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
   4204 	delay(250);
   4205 
   4206 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
   4207 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
   4208 		return;
   4209 
   4210 	/* RX */
   4211 	printf("%s: Need RX flush (reg = %08x)\n",
   4212 	    device_xname(sc->sc_dev), preg);
   4213 	rctl = CSR_READ(sc, WMREG_RCTL);
   4214 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4215 	CSR_WRITE_FLUSH(sc);
   4216 	delay(150);
   4217 
   4218 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
   4219 	/* zero the lower 14 bits (prefetch and host thresholds) */
   4220 	reg &= 0xffffc000;
   4221 	/*
   4222 	 * update thresholds: prefetch threshold to 31, host threshold
   4223 	 * to 1 and make sure the granularity is "descriptors" and not
   4224 	 * "cache lines"
   4225 	 */
   4226 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
   4227 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
   4228 
   4229 	/*
   4230 	 * momentarily enable the RX ring for the changes to take
   4231 	 * effect
   4232 	 */
   4233 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
   4234 	CSR_WRITE_FLUSH(sc);
   4235 	delay(150);
   4236 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
   4237 }
   4238 
   4239 /*
   4240  * wm_reset:
   4241  *
   4242  *	Reset the i82542 chip.
   4243  */
   4244 static void
   4245 wm_reset(struct wm_softc *sc)
   4246 {
   4247 	int phy_reset = 0;
   4248 	int i, error = 0;
   4249 	uint32_t reg;
   4250 	uint16_t kmreg;
   4251 	int rv;
   4252 
   4253 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4254 		device_xname(sc->sc_dev), __func__));
   4255 	KASSERT(sc->sc_type != 0);
   4256 
   4257 	/*
   4258 	 * Allocate on-chip memory according to the MTU size.
   4259 	 * The Packet Buffer Allocation register must be written
   4260 	 * before the chip is reset.
   4261 	 */
   4262 	switch (sc->sc_type) {
   4263 	case WM_T_82547:
   4264 	case WM_T_82547_2:
   4265 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4266 		    PBA_22K : PBA_30K;
   4267 		for (i = 0; i < sc->sc_nqueues; i++) {
   4268 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4269 			txq->txq_fifo_head = 0;
   4270 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   4271 			txq->txq_fifo_size =
   4272 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   4273 			txq->txq_fifo_stall = 0;
   4274 		}
   4275 		break;
   4276 	case WM_T_82571:
   4277 	case WM_T_82572:
   4278 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   4279 	case WM_T_80003:
   4280 		sc->sc_pba = PBA_32K;
   4281 		break;
   4282 	case WM_T_82573:
   4283 		sc->sc_pba = PBA_12K;
   4284 		break;
   4285 	case WM_T_82574:
   4286 	case WM_T_82583:
   4287 		sc->sc_pba = PBA_20K;
   4288 		break;
   4289 	case WM_T_82576:
   4290 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   4291 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   4292 		break;
   4293 	case WM_T_82580:
   4294 	case WM_T_I350:
   4295 	case WM_T_I354:
   4296 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   4297 		break;
   4298 	case WM_T_I210:
   4299 	case WM_T_I211:
   4300 		sc->sc_pba = PBA_34K;
   4301 		break;
   4302 	case WM_T_ICH8:
   4303 		/* Workaround for a bit corruption issue in FIFO memory */
   4304 		sc->sc_pba = PBA_8K;
   4305 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   4306 		break;
   4307 	case WM_T_ICH9:
   4308 	case WM_T_ICH10:
   4309 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   4310 		    PBA_14K : PBA_10K;
   4311 		break;
   4312 	case WM_T_PCH:
   4313 	case WM_T_PCH2:
   4314 	case WM_T_PCH_LPT:
   4315 	case WM_T_PCH_SPT:
   4316 		sc->sc_pba = PBA_26K;
   4317 		break;
   4318 	default:
   4319 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   4320 		    PBA_40K : PBA_48K;
   4321 		break;
   4322 	}
   4323 	/*
   4324 	 * Only old or non-multiqueue devices have the PBA register
   4325 	 * XXX Need special handling for 82575.
   4326 	 */
   4327 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   4328 	    || (sc->sc_type == WM_T_82575))
   4329 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   4330 
   4331 	/* Prevent the PCI-E bus from sticking */
   4332 	if (sc->sc_flags & WM_F_PCIE) {
   4333 		int timeout = 800;
   4334 
   4335 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   4336 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   4337 
   4338 		while (timeout--) {
   4339 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   4340 			    == 0)
   4341 				break;
   4342 			delay(100);
   4343 		}
   4344 		if (timeout == 0)
   4345 			device_printf(sc->sc_dev,
   4346 			    "failed to disable busmastering\n");
   4347 	}
   4348 
   4349 	/* Set the completion timeout for interface */
   4350 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   4351 	    || (sc->sc_type == WM_T_82580)
   4352 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4353 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   4354 		wm_set_pcie_completion_timeout(sc);
   4355 
   4356 	/* Clear interrupt */
   4357 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4358 	if (wm_is_using_msix(sc)) {
   4359 		if (sc->sc_type != WM_T_82574) {
   4360 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4361 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4362 		} else {
   4363 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4364 		}
   4365 	}
   4366 
   4367 	/* Stop the transmit and receive processes. */
   4368 	CSR_WRITE(sc, WMREG_RCTL, 0);
   4369 	sc->sc_rctl &= ~RCTL_EN;
   4370 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   4371 	CSR_WRITE_FLUSH(sc);
   4372 
   4373 	/* XXX set_tbi_sbp_82543() */
   4374 
   4375 	delay(10*1000);
   4376 
   4377 	/* Must acquire the MDIO ownership before MAC reset */
   4378 	switch (sc->sc_type) {
   4379 	case WM_T_82573:
   4380 	case WM_T_82574:
   4381 	case WM_T_82583:
   4382 		error = wm_get_hw_semaphore_82573(sc);
   4383 		break;
   4384 	default:
   4385 		break;
   4386 	}
   4387 
   4388 	/*
   4389 	 * 82541 Errata 29? & 82547 Errata 28?
   4390 	 * See also the description about PHY_RST bit in CTRL register
   4391 	 * in 8254x_GBe_SDM.pdf.
   4392 	 */
   4393 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   4394 		CSR_WRITE(sc, WMREG_CTRL,
   4395 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   4396 		CSR_WRITE_FLUSH(sc);
   4397 		delay(5000);
   4398 	}
   4399 
   4400 	switch (sc->sc_type) {
   4401 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   4402 	case WM_T_82541:
   4403 	case WM_T_82541_2:
   4404 	case WM_T_82547:
   4405 	case WM_T_82547_2:
   4406 		/*
   4407 		 * On some chipsets, a reset through a memory-mapped write
   4408 		 * cycle can cause the chip to reset before completing the
   4409 		 * write cycle.  This causes major headache that can be
   4410 		 * avoided by issuing the reset via indirect register writes
   4411 		 * through I/O space.
   4412 		 *
   4413 		 * So, if we successfully mapped the I/O BAR at attach time,
   4414 		 * use that.  Otherwise, try our luck with a memory-mapped
   4415 		 * reset.
   4416 		 */
   4417 		if (sc->sc_flags & WM_F_IOH_VALID)
   4418 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   4419 		else
   4420 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   4421 		break;
   4422 	case WM_T_82545_3:
   4423 	case WM_T_82546_3:
   4424 		/* Use the shadow control register on these chips. */
   4425 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   4426 		break;
   4427 	case WM_T_80003:
   4428 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4429 		sc->phy.acquire(sc);
   4430 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4431 		sc->phy.release(sc);
   4432 		break;
   4433 	case WM_T_ICH8:
   4434 	case WM_T_ICH9:
   4435 	case WM_T_ICH10:
   4436 	case WM_T_PCH:
   4437 	case WM_T_PCH2:
   4438 	case WM_T_PCH_LPT:
   4439 	case WM_T_PCH_SPT:
   4440 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   4441 		if (wm_phy_resetisblocked(sc) == false) {
   4442 			/*
   4443 			 * Gate automatic PHY configuration by hardware on
   4444 			 * non-managed 82579
   4445 			 */
   4446 			if ((sc->sc_type == WM_T_PCH2)
   4447 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   4448 				== 0))
   4449 				wm_gate_hw_phy_config_ich8lan(sc, true);
   4450 
   4451 			reg |= CTRL_PHY_RESET;
   4452 			phy_reset = 1;
   4453 		} else
   4454 			printf("XXX reset is blocked!!!\n");
   4455 		sc->phy.acquire(sc);
   4456 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4457 		/* Don't insert a completion barrier when reset */
   4458 		delay(20*1000);
   4459 		mutex_exit(sc->sc_ich_phymtx);
   4460 		break;
   4461 	case WM_T_82580:
   4462 	case WM_T_I350:
   4463 	case WM_T_I354:
   4464 	case WM_T_I210:
   4465 	case WM_T_I211:
   4466 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4467 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   4468 			CSR_WRITE_FLUSH(sc);
   4469 		delay(5000);
   4470 		break;
   4471 	case WM_T_82542_2_0:
   4472 	case WM_T_82542_2_1:
   4473 	case WM_T_82543:
   4474 	case WM_T_82540:
   4475 	case WM_T_82545:
   4476 	case WM_T_82546:
   4477 	case WM_T_82571:
   4478 	case WM_T_82572:
   4479 	case WM_T_82573:
   4480 	case WM_T_82574:
   4481 	case WM_T_82575:
   4482 	case WM_T_82576:
   4483 	case WM_T_82583:
   4484 	default:
   4485 		/* Everything else can safely use the documented method. */
   4486 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   4487 		break;
   4488 	}
   4489 
   4490 	/* Must release the MDIO ownership after MAC reset */
   4491 	switch (sc->sc_type) {
   4492 	case WM_T_82573:
   4493 	case WM_T_82574:
   4494 	case WM_T_82583:
   4495 		if (error == 0)
   4496 			wm_put_hw_semaphore_82573(sc);
   4497 		break;
   4498 	default:
   4499 		break;
   4500 	}
   4501 
   4502 	if (phy_reset != 0)
   4503 		wm_get_cfg_done(sc);
   4504 
   4505 	/* reload EEPROM */
   4506 	switch (sc->sc_type) {
   4507 	case WM_T_82542_2_0:
   4508 	case WM_T_82542_2_1:
   4509 	case WM_T_82543:
   4510 	case WM_T_82544:
   4511 		delay(10);
   4512 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4513 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4514 		CSR_WRITE_FLUSH(sc);
   4515 		delay(2000);
   4516 		break;
   4517 	case WM_T_82540:
   4518 	case WM_T_82545:
   4519 	case WM_T_82545_3:
   4520 	case WM_T_82546:
   4521 	case WM_T_82546_3:
   4522 		delay(5*1000);
   4523 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4524 		break;
   4525 	case WM_T_82541:
   4526 	case WM_T_82541_2:
   4527 	case WM_T_82547:
   4528 	case WM_T_82547_2:
   4529 		delay(20000);
   4530 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4531 		break;
   4532 	case WM_T_82571:
   4533 	case WM_T_82572:
   4534 	case WM_T_82573:
   4535 	case WM_T_82574:
   4536 	case WM_T_82583:
   4537 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4538 			delay(10);
   4539 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4540 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4541 			CSR_WRITE_FLUSH(sc);
   4542 		}
   4543 		/* check EECD_EE_AUTORD */
   4544 		wm_get_auto_rd_done(sc);
   4545 		/*
   4546 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4547 		 * is set.
   4548 		 */
   4549 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4550 		    || (sc->sc_type == WM_T_82583))
   4551 			delay(25*1000);
   4552 		break;
   4553 	case WM_T_82575:
   4554 	case WM_T_82576:
   4555 	case WM_T_82580:
   4556 	case WM_T_I350:
   4557 	case WM_T_I354:
   4558 	case WM_T_I210:
   4559 	case WM_T_I211:
   4560 	case WM_T_80003:
   4561 		/* check EECD_EE_AUTORD */
   4562 		wm_get_auto_rd_done(sc);
   4563 		break;
   4564 	case WM_T_ICH8:
   4565 	case WM_T_ICH9:
   4566 	case WM_T_ICH10:
   4567 	case WM_T_PCH:
   4568 	case WM_T_PCH2:
   4569 	case WM_T_PCH_LPT:
   4570 	case WM_T_PCH_SPT:
   4571 		break;
   4572 	default:
   4573 		panic("%s: unknown type\n", __func__);
   4574 	}
   4575 
   4576 	/* Check whether EEPROM is present or not */
   4577 	switch (sc->sc_type) {
   4578 	case WM_T_82575:
   4579 	case WM_T_82576:
   4580 	case WM_T_82580:
   4581 	case WM_T_I350:
   4582 	case WM_T_I354:
   4583 	case WM_T_ICH8:
   4584 	case WM_T_ICH9:
   4585 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4586 			/* Not found */
   4587 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4588 			if (sc->sc_type == WM_T_82575)
   4589 				wm_reset_init_script_82575(sc);
   4590 		}
   4591 		break;
   4592 	default:
   4593 		break;
   4594 	}
   4595 
   4596 	if (phy_reset != 0)
   4597 		wm_phy_post_reset(sc);
   4598 
   4599 	if ((sc->sc_type == WM_T_82580)
   4600 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4601 		/* clear global device reset status bit */
   4602 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4603 	}
   4604 
   4605 	/* Clear any pending interrupt events. */
   4606 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4607 	reg = CSR_READ(sc, WMREG_ICR);
   4608 	if (wm_is_using_msix(sc)) {
   4609 		if (sc->sc_type != WM_T_82574) {
   4610 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4611 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4612 		} else
   4613 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4614 	}
   4615 
   4616 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4617 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4618 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4619 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   4620 		reg = CSR_READ(sc, WMREG_KABGTXD);
   4621 		reg |= KABGTXD_BGSQLBIAS;
   4622 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   4623 	}
   4624 
   4625 	/* reload sc_ctrl */
   4626 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4627 
   4628 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4629 		wm_set_eee_i350(sc);
   4630 
   4631 	/*
   4632 	 * For PCH, this write will make sure that any noise will be detected
   4633 	 * as a CRC error and be dropped rather than show up as a bad packet
   4634 	 * to the DMA engine
   4635 	 */
   4636 	if (sc->sc_type == WM_T_PCH)
   4637 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4638 
   4639 	if (sc->sc_type >= WM_T_82544)
   4640 		CSR_WRITE(sc, WMREG_WUC, 0);
   4641 
   4642 	wm_reset_mdicnfg_82580(sc);
   4643 
   4644 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4645 		wm_pll_workaround_i210(sc);
   4646 
   4647 	if (sc->sc_type == WM_T_80003) {
   4648 		/* default to TRUE to enable the MDIC W/A */
   4649 		sc->sc_flags |= WM_F_80003_MDIC_WA;
   4650 
   4651 		rv = wm_kmrn_readreg(sc,
   4652 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
   4653 		if (rv == 0) {
   4654 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
   4655 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
   4656 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
   4657 			else
   4658 				sc->sc_flags |= WM_F_80003_MDIC_WA;
   4659 		}
   4660 	}
   4661 }
   4662 
   4663 /*
   4664  * wm_add_rxbuf:
   4665  *
   4666  *	Add a receive buffer to the indiciated descriptor.
   4667  */
   4668 static int
   4669 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4670 {
   4671 	struct wm_softc *sc = rxq->rxq_sc;
   4672 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4673 	struct mbuf *m;
   4674 	int error;
   4675 
   4676 	KASSERT(mutex_owned(rxq->rxq_lock));
   4677 
   4678 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4679 	if (m == NULL)
   4680 		return ENOBUFS;
   4681 
   4682 	MCLGET(m, M_DONTWAIT);
   4683 	if ((m->m_flags & M_EXT) == 0) {
   4684 		m_freem(m);
   4685 		return ENOBUFS;
   4686 	}
   4687 
   4688 	if (rxs->rxs_mbuf != NULL)
   4689 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4690 
   4691 	rxs->rxs_mbuf = m;
   4692 
   4693 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4694 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4695 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4696 	if (error) {
   4697 		/* XXX XXX XXX */
   4698 		aprint_error_dev(sc->sc_dev,
   4699 		    "unable to load rx DMA map %d, error = %d\n",
   4700 		    idx, error);
   4701 		panic("wm_add_rxbuf");
   4702 	}
   4703 
   4704 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4705 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4706 
   4707 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4708 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4709 			wm_init_rxdesc(rxq, idx);
   4710 	} else
   4711 		wm_init_rxdesc(rxq, idx);
   4712 
   4713 	return 0;
   4714 }
   4715 
   4716 /*
   4717  * wm_rxdrain:
   4718  *
   4719  *	Drain the receive queue.
   4720  */
   4721 static void
   4722 wm_rxdrain(struct wm_rxqueue *rxq)
   4723 {
   4724 	struct wm_softc *sc = rxq->rxq_sc;
   4725 	struct wm_rxsoft *rxs;
   4726 	int i;
   4727 
   4728 	KASSERT(mutex_owned(rxq->rxq_lock));
   4729 
   4730 	for (i = 0; i < WM_NRXDESC; i++) {
   4731 		rxs = &rxq->rxq_soft[i];
   4732 		if (rxs->rxs_mbuf != NULL) {
   4733 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4734 			m_freem(rxs->rxs_mbuf);
   4735 			rxs->rxs_mbuf = NULL;
   4736 		}
   4737 	}
   4738 }
   4739 
   4740 
   4741 /*
   4742  * XXX copy from FreeBSD's sys/net/rss_config.c
   4743  */
   4744 /*
   4745  * RSS secret key, intended to prevent attacks on load-balancing.  Its
   4746  * effectiveness may be limited by algorithm choice and available entropy
   4747  * during the boot.
   4748  *
   4749  * XXXRW: And that we don't randomize it yet!
   4750  *
   4751  * This is the default Microsoft RSS specification key which is also
   4752  * the Chelsio T5 firmware default key.
   4753  */
   4754 #define RSS_KEYSIZE 40
   4755 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
   4756 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
   4757 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
   4758 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
   4759 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
   4760 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
   4761 };
   4762 
   4763 /*
   4764  * Caller must pass an array of size sizeof(rss_key).
   4765  *
   4766  * XXX
   4767  * As if_ixgbe may use this function, this function should not be
   4768  * if_wm specific function.
   4769  */
   4770 static void
   4771 wm_rss_getkey(uint8_t *key)
   4772 {
   4773 
   4774 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
   4775 }
   4776 
   4777 /*
   4778  * Setup registers for RSS.
   4779  *
   4780  * XXX not yet VMDq support
   4781  */
   4782 static void
   4783 wm_init_rss(struct wm_softc *sc)
   4784 {
   4785 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4786 	int i;
   4787 
   4788 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
   4789 
   4790 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4791 		int qid, reta_ent;
   4792 
   4793 		qid  = i % sc->sc_nqueues;
   4794 		switch(sc->sc_type) {
   4795 		case WM_T_82574:
   4796 			reta_ent = __SHIFTIN(qid,
   4797 			    RETA_ENT_QINDEX_MASK_82574);
   4798 			break;
   4799 		case WM_T_82575:
   4800 			reta_ent = __SHIFTIN(qid,
   4801 			    RETA_ENT_QINDEX1_MASK_82575);
   4802 			break;
   4803 		default:
   4804 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4805 			break;
   4806 		}
   4807 
   4808 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4809 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4810 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4811 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4812 	}
   4813 
   4814 	wm_rss_getkey((uint8_t *)rss_key);
   4815 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4816 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4817 
   4818 	if (sc->sc_type == WM_T_82574)
   4819 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4820 	else
   4821 		mrqc = MRQC_ENABLE_RSS_MQ;
   4822 
   4823 	/*
   4824 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
   4825 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
   4826 	 */
   4827 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4828 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4829 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4830 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4831 
   4832 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4833 }
   4834 
   4835 /*
   4836  * Adjust TX and RX queue numbers which the system actulally uses.
   4837  *
   4838  * The numbers are affected by below parameters.
   4839  *     - The nubmer of hardware queues
   4840  *     - The number of MSI-X vectors (= "nvectors" argument)
   4841  *     - ncpu
   4842  */
   4843 static void
   4844 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4845 {
   4846 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4847 
   4848 	if (nvectors < 2) {
   4849 		sc->sc_nqueues = 1;
   4850 		return;
   4851 	}
   4852 
   4853 	switch(sc->sc_type) {
   4854 	case WM_T_82572:
   4855 		hw_ntxqueues = 2;
   4856 		hw_nrxqueues = 2;
   4857 		break;
   4858 	case WM_T_82574:
   4859 		hw_ntxqueues = 2;
   4860 		hw_nrxqueues = 2;
   4861 		break;
   4862 	case WM_T_82575:
   4863 		hw_ntxqueues = 4;
   4864 		hw_nrxqueues = 4;
   4865 		break;
   4866 	case WM_T_82576:
   4867 		hw_ntxqueues = 16;
   4868 		hw_nrxqueues = 16;
   4869 		break;
   4870 	case WM_T_82580:
   4871 	case WM_T_I350:
   4872 	case WM_T_I354:
   4873 		hw_ntxqueues = 8;
   4874 		hw_nrxqueues = 8;
   4875 		break;
   4876 	case WM_T_I210:
   4877 		hw_ntxqueues = 4;
   4878 		hw_nrxqueues = 4;
   4879 		break;
   4880 	case WM_T_I211:
   4881 		hw_ntxqueues = 2;
   4882 		hw_nrxqueues = 2;
   4883 		break;
   4884 		/*
   4885 		 * As below ethernet controllers does not support MSI-X,
   4886 		 * this driver let them not use multiqueue.
   4887 		 *     - WM_T_80003
   4888 		 *     - WM_T_ICH8
   4889 		 *     - WM_T_ICH9
   4890 		 *     - WM_T_ICH10
   4891 		 *     - WM_T_PCH
   4892 		 *     - WM_T_PCH2
   4893 		 *     - WM_T_PCH_LPT
   4894 		 */
   4895 	default:
   4896 		hw_ntxqueues = 1;
   4897 		hw_nrxqueues = 1;
   4898 		break;
   4899 	}
   4900 
   4901 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
   4902 
   4903 	/*
   4904 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   4905 	 * the number of queues used actually.
   4906 	 */
   4907 	if (nvectors < hw_nqueues + 1) {
   4908 		sc->sc_nqueues = nvectors - 1;
   4909 	} else {
   4910 		sc->sc_nqueues = hw_nqueues;
   4911 	}
   4912 
   4913 	/*
   4914 	 * As queues more then cpus cannot improve scaling, we limit
   4915 	 * the number of queues used actually.
   4916 	 */
   4917 	if (ncpu < sc->sc_nqueues)
   4918 		sc->sc_nqueues = ncpu;
   4919 }
   4920 
   4921 static inline bool
   4922 wm_is_using_msix(struct wm_softc *sc)
   4923 {
   4924 
   4925 	return (sc->sc_nintrs > 1);
   4926 }
   4927 
   4928 static inline bool
   4929 wm_is_using_multiqueue(struct wm_softc *sc)
   4930 {
   4931 
   4932 	return (sc->sc_nqueues > 1);
   4933 }
   4934 
   4935 static int
   4936 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
   4937 {
   4938 	struct wm_queue *wmq = &sc->sc_queue[qidx];
   4939 	wmq->wmq_id = qidx;
   4940 	wmq->wmq_intr_idx = intr_idx;
   4941 	wmq->wmq_si = softint_establish(SOFTINT_NET
   4942 #ifdef WM_MPSAFE
   4943 	    | SOFTINT_MPSAFE
   4944 #endif
   4945 	    , wm_handle_queue, wmq);
   4946 	if (wmq->wmq_si != NULL)
   4947 		return 0;
   4948 
   4949 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
   4950 	    wmq->wmq_id);
   4951 
   4952 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
   4953 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4954 	return ENOMEM;
   4955 }
   4956 
   4957 /*
   4958  * Both single interrupt MSI and INTx can use this function.
   4959  */
   4960 static int
   4961 wm_setup_legacy(struct wm_softc *sc)
   4962 {
   4963 	pci_chipset_tag_t pc = sc->sc_pc;
   4964 	const char *intrstr = NULL;
   4965 	char intrbuf[PCI_INTRSTR_LEN];
   4966 	int error;
   4967 
   4968 	error = wm_alloc_txrx_queues(sc);
   4969 	if (error) {
   4970 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4971 		    error);
   4972 		return ENOMEM;
   4973 	}
   4974 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   4975 	    sizeof(intrbuf));
   4976 #ifdef WM_MPSAFE
   4977 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   4978 #endif
   4979 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   4980 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   4981 	if (sc->sc_ihs[0] == NULL) {
   4982 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   4983 		    (pci_intr_type(pc, sc->sc_intrs[0])
   4984 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   4985 		return ENOMEM;
   4986 	}
   4987 
   4988 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   4989 	sc->sc_nintrs = 1;
   4990 
   4991 	return wm_softint_establish(sc, 0, 0);
   4992 }
   4993 
   4994 static int
   4995 wm_setup_msix(struct wm_softc *sc)
   4996 {
   4997 	void *vih;
   4998 	kcpuset_t *affinity;
   4999 	int qidx, error, intr_idx, txrx_established;
   5000 	pci_chipset_tag_t pc = sc->sc_pc;
   5001 	const char *intrstr = NULL;
   5002 	char intrbuf[PCI_INTRSTR_LEN];
   5003 	char intr_xname[INTRDEVNAMEBUF];
   5004 
   5005 	if (sc->sc_nqueues < ncpu) {
   5006 		/*
   5007 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   5008 		 * interrupts start from CPU#1.
   5009 		 */
   5010 		sc->sc_affinity_offset = 1;
   5011 	} else {
   5012 		/*
   5013 		 * In this case, this device use all CPUs. So, we unify
   5014 		 * affinitied cpu_index to msix vector number for readability.
   5015 		 */
   5016 		sc->sc_affinity_offset = 0;
   5017 	}
   5018 
   5019 	error = wm_alloc_txrx_queues(sc);
   5020 	if (error) {
   5021 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   5022 		    error);
   5023 		return ENOMEM;
   5024 	}
   5025 
   5026 	kcpuset_create(&affinity, false);
   5027 	intr_idx = 0;
   5028 
   5029 	/*
   5030 	 * TX and RX
   5031 	 */
   5032 	txrx_established = 0;
   5033 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5034 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5035 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   5036 
   5037 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5038 		    sizeof(intrbuf));
   5039 #ifdef WM_MPSAFE
   5040 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   5041 		    PCI_INTR_MPSAFE, true);
   5042 #endif
   5043 		memset(intr_xname, 0, sizeof(intr_xname));
   5044 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   5045 		    device_xname(sc->sc_dev), qidx);
   5046 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5047 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   5048 		if (vih == NULL) {
   5049 			aprint_error_dev(sc->sc_dev,
   5050 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   5051 			    intrstr ? " at " : "",
   5052 			    intrstr ? intrstr : "");
   5053 
   5054 			goto fail;
   5055 		}
   5056 		kcpuset_zero(affinity);
   5057 		/* Round-robin affinity */
   5058 		kcpuset_set(affinity, affinity_to);
   5059 		error = interrupt_distribute(vih, affinity, NULL);
   5060 		if (error == 0) {
   5061 			aprint_normal_dev(sc->sc_dev,
   5062 			    "for TX and RX interrupting at %s affinity to %u\n",
   5063 			    intrstr, affinity_to);
   5064 		} else {
   5065 			aprint_normal_dev(sc->sc_dev,
   5066 			    "for TX and RX interrupting at %s\n", intrstr);
   5067 		}
   5068 		sc->sc_ihs[intr_idx] = vih;
   5069 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
   5070 			goto fail;
   5071 		txrx_established++;
   5072 		intr_idx++;
   5073 	}
   5074 
   5075 	/*
   5076 	 * LINK
   5077 	 */
   5078 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   5079 	    sizeof(intrbuf));
   5080 #ifdef WM_MPSAFE
   5081 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   5082 #endif
   5083 	memset(intr_xname, 0, sizeof(intr_xname));
   5084 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   5085 	    device_xname(sc->sc_dev));
   5086 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   5087 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   5088 	if (vih == NULL) {
   5089 		aprint_error_dev(sc->sc_dev,
   5090 		    "unable to establish MSI-X(for LINK)%s%s\n",
   5091 		    intrstr ? " at " : "",
   5092 		    intrstr ? intrstr : "");
   5093 
   5094 		goto fail;
   5095 	}
   5096 	/* keep default affinity to LINK interrupt */
   5097 	aprint_normal_dev(sc->sc_dev,
   5098 	    "for LINK interrupting at %s\n", intrstr);
   5099 	sc->sc_ihs[intr_idx] = vih;
   5100 	sc->sc_link_intr_idx = intr_idx;
   5101 
   5102 	sc->sc_nintrs = sc->sc_nqueues + 1;
   5103 	kcpuset_destroy(affinity);
   5104 	return 0;
   5105 
   5106  fail:
   5107 	for (qidx = 0; qidx < txrx_established; qidx++) {
   5108 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5109 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   5110 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   5111 	}
   5112 
   5113 	kcpuset_destroy(affinity);
   5114 	return ENOMEM;
   5115 }
   5116 
   5117 static void
   5118 wm_turnon(struct wm_softc *sc)
   5119 {
   5120 	int i;
   5121 
   5122 	KASSERT(WM_CORE_LOCKED(sc));
   5123 
   5124 	/*
   5125 	 * must unset stopping flags in ascending order.
   5126 	 */
   5127 	for(i = 0; i < sc->sc_nqueues; i++) {
   5128 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5129 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5130 
   5131 		mutex_enter(txq->txq_lock);
   5132 		txq->txq_stopping = false;
   5133 		mutex_exit(txq->txq_lock);
   5134 
   5135 		mutex_enter(rxq->rxq_lock);
   5136 		rxq->rxq_stopping = false;
   5137 		mutex_exit(rxq->rxq_lock);
   5138 	}
   5139 
   5140 	sc->sc_core_stopping = false;
   5141 }
   5142 
   5143 static void
   5144 wm_turnoff(struct wm_softc *sc)
   5145 {
   5146 	int i;
   5147 
   5148 	KASSERT(WM_CORE_LOCKED(sc));
   5149 
   5150 	sc->sc_core_stopping = true;
   5151 
   5152 	/*
   5153 	 * must set stopping flags in ascending order.
   5154 	 */
   5155 	for(i = 0; i < sc->sc_nqueues; i++) {
   5156 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5157 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5158 
   5159 		mutex_enter(rxq->rxq_lock);
   5160 		rxq->rxq_stopping = true;
   5161 		mutex_exit(rxq->rxq_lock);
   5162 
   5163 		mutex_enter(txq->txq_lock);
   5164 		txq->txq_stopping = true;
   5165 		mutex_exit(txq->txq_lock);
   5166 	}
   5167 }
   5168 
   5169 /*
   5170  * write interrupt interval value to ITR or EITR
   5171  */
   5172 static void
   5173 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
   5174 {
   5175 
   5176 	if (!wmq->wmq_set_itr)
   5177 		return;
   5178 
   5179 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5180 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
   5181 
   5182 		/*
   5183 		 * 82575 doesn't have CNT_INGR field.
   5184 		 * So, overwrite counter field by software.
   5185 		 */
   5186 		if (sc->sc_type == WM_T_82575)
   5187 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
   5188 		else
   5189 			eitr |= EITR_CNT_INGR;
   5190 
   5191 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
   5192 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
   5193 		/*
   5194 		 * 82574 has both ITR and EITR. SET EITR when we use
   5195 		 * the multi queue function with MSI-X.
   5196 		 */
   5197 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
   5198 			    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
   5199 	} else {
   5200 		KASSERT(wmq->wmq_id == 0);
   5201 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
   5202 	}
   5203 
   5204 	wmq->wmq_set_itr = false;
   5205 }
   5206 
   5207 /*
   5208  * TODO
   5209  * Below dynamic calculation of itr is almost the same as linux igb,
   5210  * however it does not fit to wm(4). So, we will have been disable AIM
   5211  * until we will find appropriate calculation of itr.
   5212  */
   5213 /*
   5214  * calculate interrupt interval value to be going to write register in
   5215  * wm_itrs_writereg(). This function does not write ITR/EITR register.
   5216  */
   5217 static void
   5218 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
   5219 {
   5220 #ifdef NOTYET
   5221 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   5222 	struct wm_txqueue *txq = &wmq->wmq_txq;
   5223 	uint32_t avg_size = 0;
   5224 	uint32_t new_itr;
   5225 
   5226 	if (rxq->rxq_packets)
   5227 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
   5228 	if (txq->txq_packets)
   5229 		avg_size = max(avg_size, txq->txq_bytes / txq->txq_packets);
   5230 
   5231 	if (avg_size == 0) {
   5232 		new_itr = 450; /* restore default value */
   5233 		goto out;
   5234 	}
   5235 
   5236 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
   5237 	avg_size += 24;
   5238 
   5239 	/* Don't starve jumbo frames */
   5240 	avg_size = min(avg_size, 3000);
   5241 
   5242 	/* Give a little boost to mid-size frames */
   5243 	if ((avg_size > 300) && (avg_size < 1200))
   5244 		new_itr = avg_size / 3;
   5245 	else
   5246 		new_itr = avg_size / 2;
   5247 
   5248 out:
   5249 	/*
   5250 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
   5251 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
   5252 	 */
   5253 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
   5254 		new_itr *= 4;
   5255 
   5256 	if (new_itr != wmq->wmq_itr) {
   5257 		wmq->wmq_itr = new_itr;
   5258 		wmq->wmq_set_itr = true;
   5259 	} else
   5260 		wmq->wmq_set_itr = false;
   5261 
   5262 	rxq->rxq_packets = 0;
   5263 	rxq->rxq_bytes = 0;
   5264 	txq->txq_packets = 0;
   5265 	txq->txq_bytes = 0;
   5266 #endif
   5267 }
   5268 
   5269 /*
   5270  * wm_init:		[ifnet interface function]
   5271  *
   5272  *	Initialize the interface.
   5273  */
   5274 static int
   5275 wm_init(struct ifnet *ifp)
   5276 {
   5277 	struct wm_softc *sc = ifp->if_softc;
   5278 	int ret;
   5279 
   5280 	WM_CORE_LOCK(sc);
   5281 	ret = wm_init_locked(ifp);
   5282 	WM_CORE_UNLOCK(sc);
   5283 
   5284 	return ret;
   5285 }
   5286 
   5287 static int
   5288 wm_init_locked(struct ifnet *ifp)
   5289 {
   5290 	struct wm_softc *sc = ifp->if_softc;
   5291 	int i, j, trynum, error = 0;
   5292 	uint32_t reg;
   5293 
   5294 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5295 		device_xname(sc->sc_dev), __func__));
   5296 	KASSERT(WM_CORE_LOCKED(sc));
   5297 
   5298 	/*
   5299 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   5300 	 * There is a small but measurable benefit to avoiding the adjusment
   5301 	 * of the descriptor so that the headers are aligned, for normal mtu,
   5302 	 * on such platforms.  One possibility is that the DMA itself is
   5303 	 * slightly more efficient if the front of the entire packet (instead
   5304 	 * of the front of the headers) is aligned.
   5305 	 *
   5306 	 * Note we must always set align_tweak to 0 if we are using
   5307 	 * jumbo frames.
   5308 	 */
   5309 #ifdef __NO_STRICT_ALIGNMENT
   5310 	sc->sc_align_tweak = 0;
   5311 #else
   5312 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   5313 		sc->sc_align_tweak = 0;
   5314 	else
   5315 		sc->sc_align_tweak = 2;
   5316 #endif /* __NO_STRICT_ALIGNMENT */
   5317 
   5318 	/* Cancel any pending I/O. */
   5319 	wm_stop_locked(ifp, 0);
   5320 
   5321 	/* update statistics before reset */
   5322 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   5323 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   5324 
   5325 	/* PCH_SPT hardware workaround */
   5326 	if (sc->sc_type == WM_T_PCH_SPT)
   5327 		wm_flush_desc_rings(sc);
   5328 
   5329 	/* Reset the chip to a known state. */
   5330 	wm_reset(sc);
   5331 
   5332 	/*
   5333 	 * AMT based hardware can now take control from firmware
   5334 	 * Do this after reset.
   5335 	 */
   5336 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   5337 		wm_get_hw_control(sc);
   5338 
   5339 	if ((sc->sc_type == WM_T_PCH_SPT) &&
   5340 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
   5341 		wm_legacy_irq_quirk_spt(sc);
   5342 
   5343 	/* Init hardware bits */
   5344 	wm_initialize_hardware_bits(sc);
   5345 
   5346 	/* Reset the PHY. */
   5347 	if (sc->sc_flags & WM_F_HAS_MII)
   5348 		wm_gmii_reset(sc);
   5349 
   5350 	/* Calculate (E)ITR value */
   5351 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
   5352 		/*
   5353 		 * For NEWQUEUE's EITR (except for 82575).
   5354 		 * 82575's EITR should be set same throttling value as other
   5355 		 * old controllers' ITR because the interrupt/sec calculation
   5356 		 * is the same, that is, 1,000,000,000 / (N * 256).
   5357 		 *
   5358 		 * 82574's EITR should be set same throttling value as ITR.
   5359 		 *
   5360 		 * For N interrupts/sec, set this value to:
   5361 		 * 1,000,000 / N in contrast to ITR throttoling value.
   5362 		 */
   5363 		sc->sc_itr_init = 450;
   5364 	} else if (sc->sc_type >= WM_T_82543) {
   5365 		/*
   5366 		 * Set up the interrupt throttling register (units of 256ns)
   5367 		 * Note that a footnote in Intel's documentation says this
   5368 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   5369 		 * or 10Mbit mode.  Empirically, it appears to be the case
   5370 		 * that that is also true for the 1024ns units of the other
   5371 		 * interrupt-related timer registers -- so, really, we ought
   5372 		 * to divide this value by 4 when the link speed is low.
   5373 		 *
   5374 		 * XXX implement this division at link speed change!
   5375 		 */
   5376 
   5377 		/*
   5378 		 * For N interrupts/sec, set this value to:
   5379 		 * 1,000,000,000 / (N * 256).  Note that we set the
   5380 		 * absolute and packet timer values to this value
   5381 		 * divided by 4 to get "simple timer" behavior.
   5382 		 */
   5383 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
   5384 	}
   5385 
   5386 	error = wm_init_txrx_queues(sc);
   5387 	if (error)
   5388 		goto out;
   5389 
   5390 	/*
   5391 	 * Clear out the VLAN table -- we don't use it (yet).
   5392 	 */
   5393 	CSR_WRITE(sc, WMREG_VET, 0);
   5394 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   5395 		trynum = 10; /* Due to hw errata */
   5396 	else
   5397 		trynum = 1;
   5398 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   5399 		for (j = 0; j < trynum; j++)
   5400 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   5401 
   5402 	/*
   5403 	 * Set up flow-control parameters.
   5404 	 *
   5405 	 * XXX Values could probably stand some tuning.
   5406 	 */
   5407 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   5408 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   5409 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   5410 	    && (sc->sc_type != WM_T_PCH_SPT)) {
   5411 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   5412 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   5413 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   5414 	}
   5415 
   5416 	sc->sc_fcrtl = FCRTL_DFLT;
   5417 	if (sc->sc_type < WM_T_82543) {
   5418 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   5419 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   5420 	} else {
   5421 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   5422 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   5423 	}
   5424 
   5425 	if (sc->sc_type == WM_T_80003)
   5426 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   5427 	else
   5428 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   5429 
   5430 	/* Writes the control register. */
   5431 	wm_set_vlan(sc);
   5432 
   5433 	if (sc->sc_flags & WM_F_HAS_MII) {
   5434 		uint16_t kmreg;
   5435 
   5436 		switch (sc->sc_type) {
   5437 		case WM_T_80003:
   5438 		case WM_T_ICH8:
   5439 		case WM_T_ICH9:
   5440 		case WM_T_ICH10:
   5441 		case WM_T_PCH:
   5442 		case WM_T_PCH2:
   5443 		case WM_T_PCH_LPT:
   5444 		case WM_T_PCH_SPT:
   5445 			/*
   5446 			 * Set the mac to wait the maximum time between each
   5447 			 * iteration and increase the max iterations when
   5448 			 * polling the phy; this fixes erroneous timeouts at
   5449 			 * 10Mbps.
   5450 			 */
   5451 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   5452 			    0xFFFF);
   5453 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5454 			    &kmreg);
   5455 			kmreg |= 0x3F;
   5456 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
   5457 			    kmreg);
   5458 			break;
   5459 		default:
   5460 			break;
   5461 		}
   5462 
   5463 		if (sc->sc_type == WM_T_80003) {
   5464 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5465 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
   5466 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5467 
   5468 			/* Bypass RX and TX FIFO's */
   5469 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   5470 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   5471 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   5472 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   5473 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   5474 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   5475 		}
   5476 	}
   5477 #if 0
   5478 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   5479 #endif
   5480 
   5481 	/* Set up checksum offload parameters. */
   5482 	reg = CSR_READ(sc, WMREG_RXCSUM);
   5483 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   5484 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   5485 		reg |= RXCSUM_IPOFL;
   5486 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   5487 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   5488 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   5489 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   5490 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5491 
   5492 	/* Set registers about MSI-X */
   5493 	if (wm_is_using_msix(sc)) {
   5494 		uint32_t ivar;
   5495 		struct wm_queue *wmq;
   5496 		int qid, qintr_idx;
   5497 
   5498 		if (sc->sc_type == WM_T_82575) {
   5499 			/* Interrupt control */
   5500 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5501 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   5502 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5503 
   5504 			/* TX and RX */
   5505 			for (i = 0; i < sc->sc_nqueues; i++) {
   5506 				wmq = &sc->sc_queue[i];
   5507 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   5508 				    EITR_TX_QUEUE(wmq->wmq_id)
   5509 				    | EITR_RX_QUEUE(wmq->wmq_id));
   5510 			}
   5511 			/* Link status */
   5512 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   5513 			    EITR_OTHER);
   5514 		} else if (sc->sc_type == WM_T_82574) {
   5515 			/* Interrupt control */
   5516 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   5517 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   5518 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   5519 
   5520 			/*
   5521 			 * workaround issue with spurious interrupts
   5522 			 * in MSI-X mode.
   5523 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
   5524 			 * initialized yet. So re-initialize WMREG_RFCTL here.
   5525 			 */
   5526 			reg = CSR_READ(sc, WMREG_RFCTL);
   5527 			reg |= WMREG_RFCTL_ACKDIS;
   5528 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   5529 
   5530 			ivar = 0;
   5531 			/* TX and RX */
   5532 			for (i = 0; i < sc->sc_nqueues; i++) {
   5533 				wmq = &sc->sc_queue[i];
   5534 				qid = wmq->wmq_id;
   5535 				qintr_idx = wmq->wmq_intr_idx;
   5536 
   5537 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5538 				    IVAR_TX_MASK_Q_82574(qid));
   5539 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   5540 				    IVAR_RX_MASK_Q_82574(qid));
   5541 			}
   5542 			/* Link status */
   5543 			ivar |= __SHIFTIN((IVAR_VALID_82574
   5544 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   5545 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   5546 		} else {
   5547 			/* Interrupt control */
   5548 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   5549 			    | GPIE_EIAME | GPIE_PBA);
   5550 
   5551 			switch (sc->sc_type) {
   5552 			case WM_T_82580:
   5553 			case WM_T_I350:
   5554 			case WM_T_I354:
   5555 			case WM_T_I210:
   5556 			case WM_T_I211:
   5557 				/* TX and RX */
   5558 				for (i = 0; i < sc->sc_nqueues; i++) {
   5559 					wmq = &sc->sc_queue[i];
   5560 					qid = wmq->wmq_id;
   5561 					qintr_idx = wmq->wmq_intr_idx;
   5562 
   5563 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   5564 					ivar &= ~IVAR_TX_MASK_Q(qid);
   5565 					ivar |= __SHIFTIN((qintr_idx
   5566 						| IVAR_VALID),
   5567 					    IVAR_TX_MASK_Q(qid));
   5568 					ivar &= ~IVAR_RX_MASK_Q(qid);
   5569 					ivar |= __SHIFTIN((qintr_idx
   5570 						| IVAR_VALID),
   5571 					    IVAR_RX_MASK_Q(qid));
   5572 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   5573 				}
   5574 				break;
   5575 			case WM_T_82576:
   5576 				/* TX and RX */
   5577 				for (i = 0; i < sc->sc_nqueues; i++) {
   5578 					wmq = &sc->sc_queue[i];
   5579 					qid = wmq->wmq_id;
   5580 					qintr_idx = wmq->wmq_intr_idx;
   5581 
   5582 					ivar = CSR_READ(sc,
   5583 					    WMREG_IVAR_Q_82576(qid));
   5584 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   5585 					ivar |= __SHIFTIN((qintr_idx
   5586 						| IVAR_VALID),
   5587 					    IVAR_TX_MASK_Q_82576(qid));
   5588 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   5589 					ivar |= __SHIFTIN((qintr_idx
   5590 						| IVAR_VALID),
   5591 					    IVAR_RX_MASK_Q_82576(qid));
   5592 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   5593 					    ivar);
   5594 				}
   5595 				break;
   5596 			default:
   5597 				break;
   5598 			}
   5599 
   5600 			/* Link status */
   5601 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   5602 			    IVAR_MISC_OTHER);
   5603 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   5604 		}
   5605 
   5606 		if (wm_is_using_multiqueue(sc)) {
   5607 			wm_init_rss(sc);
   5608 
   5609 			/*
   5610 			** NOTE: Receive Full-Packet Checksum Offload
   5611 			** is mutually exclusive with Multiqueue. However
   5612 			** this is not the same as TCP/IP checksums which
   5613 			** still work.
   5614 			*/
   5615 			reg = CSR_READ(sc, WMREG_RXCSUM);
   5616 			reg |= RXCSUM_PCSD;
   5617 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   5618 		}
   5619 	}
   5620 
   5621 	/* Set up the interrupt registers. */
   5622 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5623 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   5624 	    ICR_RXO | ICR_RXT0;
   5625 	if (wm_is_using_msix(sc)) {
   5626 		uint32_t mask;
   5627 		struct wm_queue *wmq;
   5628 
   5629 		switch (sc->sc_type) {
   5630 		case WM_T_82574:
   5631 			mask = 0;
   5632 			for (i = 0; i < sc->sc_nqueues; i++) {
   5633 				wmq = &sc->sc_queue[i];
   5634 				mask |= ICR_TXQ(wmq->wmq_id);
   5635 				mask |= ICR_RXQ(wmq->wmq_id);
   5636 			}
   5637 			mask |= ICR_OTHER;
   5638 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
   5639 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
   5640 			break;
   5641 		default:
   5642 			if (sc->sc_type == WM_T_82575) {
   5643 				mask = 0;
   5644 				for (i = 0; i < sc->sc_nqueues; i++) {
   5645 					wmq = &sc->sc_queue[i];
   5646 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   5647 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   5648 				}
   5649 				mask |= EITR_OTHER;
   5650 			} else {
   5651 				mask = 0;
   5652 				for (i = 0; i < sc->sc_nqueues; i++) {
   5653 					wmq = &sc->sc_queue[i];
   5654 					mask |= 1 << wmq->wmq_intr_idx;
   5655 				}
   5656 				mask |= 1 << sc->sc_link_intr_idx;
   5657 			}
   5658 			CSR_WRITE(sc, WMREG_EIAC, mask);
   5659 			CSR_WRITE(sc, WMREG_EIAM, mask);
   5660 			CSR_WRITE(sc, WMREG_EIMS, mask);
   5661 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   5662 			break;
   5663 		}
   5664 	} else
   5665 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   5666 
   5667 	/* Set up the inter-packet gap. */
   5668 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   5669 
   5670 	if (sc->sc_type >= WM_T_82543) {
   5671 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5672 			struct wm_queue *wmq = &sc->sc_queue[qidx];
   5673 			wm_itrs_writereg(sc, wmq);
   5674 		}
   5675 		/*
   5676 		 * Link interrupts occur much less than TX
   5677 		 * interrupts and RX interrupts. So, we don't
   5678 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   5679 		 * FreeBSD's if_igb.
   5680 		 */
   5681 	}
   5682 
   5683 	/* Set the VLAN ethernetype. */
   5684 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   5685 
   5686 	/*
   5687 	 * Set up the transmit control register; we start out with
   5688 	 * a collision distance suitable for FDX, but update it whe
   5689 	 * we resolve the media type.
   5690 	 */
   5691 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   5692 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   5693 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5694 	if (sc->sc_type >= WM_T_82571)
   5695 		sc->sc_tctl |= TCTL_MULR;
   5696 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5697 
   5698 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5699 		/* Write TDT after TCTL.EN is set. See the document. */
   5700 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   5701 	}
   5702 
   5703 	if (sc->sc_type == WM_T_80003) {
   5704 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   5705 		reg &= ~TCTL_EXT_GCEX_MASK;
   5706 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   5707 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   5708 	}
   5709 
   5710 	/* Set the media. */
   5711 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   5712 		goto out;
   5713 
   5714 	/* Configure for OS presence */
   5715 	wm_init_manageability(sc);
   5716 
   5717 	/*
   5718 	 * Set up the receive control register; we actually program
   5719 	 * the register when we set the receive filter.  Use multicast
   5720 	 * address offset type 0.
   5721 	 *
   5722 	 * Only the i82544 has the ability to strip the incoming
   5723 	 * CRC, so we don't enable that feature.
   5724 	 */
   5725 	sc->sc_mchash_type = 0;
   5726 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   5727 	    | RCTL_MO(sc->sc_mchash_type);
   5728 
   5729 	/*
   5730 	 * 82574 use one buffer extended Rx descriptor.
   5731 	 */
   5732 	if (sc->sc_type == WM_T_82574)
   5733 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
   5734 
   5735 	/*
   5736 	 * The I350 has a bug where it always strips the CRC whether
   5737 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   5738 	 */
   5739 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5740 	    || (sc->sc_type == WM_T_I210))
   5741 		sc->sc_rctl |= RCTL_SECRC;
   5742 
   5743 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5744 	    && (ifp->if_mtu > ETHERMTU)) {
   5745 		sc->sc_rctl |= RCTL_LPE;
   5746 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5747 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5748 	}
   5749 
   5750 	if (MCLBYTES == 2048) {
   5751 		sc->sc_rctl |= RCTL_2k;
   5752 	} else {
   5753 		if (sc->sc_type >= WM_T_82543) {
   5754 			switch (MCLBYTES) {
   5755 			case 4096:
   5756 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5757 				break;
   5758 			case 8192:
   5759 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5760 				break;
   5761 			case 16384:
   5762 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5763 				break;
   5764 			default:
   5765 				panic("wm_init: MCLBYTES %d unsupported",
   5766 				    MCLBYTES);
   5767 				break;
   5768 			}
   5769 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   5770 	}
   5771 
   5772 	/* Enable ECC */
   5773 	switch (sc->sc_type) {
   5774 	case WM_T_82571:
   5775 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5776 		reg |= PBA_ECC_CORR_EN;
   5777 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5778 		break;
   5779 	case WM_T_PCH_LPT:
   5780 	case WM_T_PCH_SPT:
   5781 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5782 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5783 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5784 
   5785 		sc->sc_ctrl |= CTRL_MEHE;
   5786 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   5787 		break;
   5788 	default:
   5789 		break;
   5790 	}
   5791 
   5792 	/* On 575 and later set RDT only if RX enabled */
   5793 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5794 		int qidx;
   5795 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5796 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5797 			for (i = 0; i < WM_NRXDESC; i++) {
   5798 				mutex_enter(rxq->rxq_lock);
   5799 				wm_init_rxdesc(rxq, i);
   5800 				mutex_exit(rxq->rxq_lock);
   5801 
   5802 			}
   5803 		}
   5804 	}
   5805 
   5806 	/* Set the receive filter. */
   5807 	wm_set_filter(sc);
   5808 
   5809 	wm_turnon(sc);
   5810 
   5811 	/* Start the one second link check clock. */
   5812 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5813 
   5814 	/* ...all done! */
   5815 	ifp->if_flags |= IFF_RUNNING;
   5816 	ifp->if_flags &= ~IFF_OACTIVE;
   5817 
   5818  out:
   5819 	sc->sc_if_flags = ifp->if_flags;
   5820 	if (error)
   5821 		log(LOG_ERR, "%s: interface not running\n",
   5822 		    device_xname(sc->sc_dev));
   5823 	return error;
   5824 }
   5825 
   5826 /*
   5827  * wm_stop:		[ifnet interface function]
   5828  *
   5829  *	Stop transmission on the interface.
   5830  */
   5831 static void
   5832 wm_stop(struct ifnet *ifp, int disable)
   5833 {
   5834 	struct wm_softc *sc = ifp->if_softc;
   5835 
   5836 	WM_CORE_LOCK(sc);
   5837 	wm_stop_locked(ifp, disable);
   5838 	WM_CORE_UNLOCK(sc);
   5839 }
   5840 
   5841 static void
   5842 wm_stop_locked(struct ifnet *ifp, int disable)
   5843 {
   5844 	struct wm_softc *sc = ifp->if_softc;
   5845 	struct wm_txsoft *txs;
   5846 	int i, qidx;
   5847 
   5848 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5849 		device_xname(sc->sc_dev), __func__));
   5850 	KASSERT(WM_CORE_LOCKED(sc));
   5851 
   5852 	wm_turnoff(sc);
   5853 
   5854 	/* Stop the one second clock. */
   5855 	callout_stop(&sc->sc_tick_ch);
   5856 
   5857 	/* Stop the 82547 Tx FIFO stall check timer. */
   5858 	if (sc->sc_type == WM_T_82547)
   5859 		callout_stop(&sc->sc_txfifo_ch);
   5860 
   5861 	if (sc->sc_flags & WM_F_HAS_MII) {
   5862 		/* Down the MII. */
   5863 		mii_down(&sc->sc_mii);
   5864 	} else {
   5865 #if 0
   5866 		/* Should we clear PHY's status properly? */
   5867 		wm_reset(sc);
   5868 #endif
   5869 	}
   5870 
   5871 	/* Stop the transmit and receive processes. */
   5872 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5873 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5874 	sc->sc_rctl &= ~RCTL_EN;
   5875 
   5876 	/*
   5877 	 * Clear the interrupt mask to ensure the device cannot assert its
   5878 	 * interrupt line.
   5879 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5880 	 * service any currently pending or shared interrupt.
   5881 	 */
   5882 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5883 	sc->sc_icr = 0;
   5884 	if (wm_is_using_msix(sc)) {
   5885 		if (sc->sc_type != WM_T_82574) {
   5886 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5887 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5888 		} else
   5889 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5890 	}
   5891 
   5892 	/* Release any queued transmit buffers. */
   5893 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5894 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5895 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5896 		mutex_enter(txq->txq_lock);
   5897 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5898 			txs = &txq->txq_soft[i];
   5899 			if (txs->txs_mbuf != NULL) {
   5900 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   5901 				m_freem(txs->txs_mbuf);
   5902 				txs->txs_mbuf = NULL;
   5903 			}
   5904 		}
   5905 		mutex_exit(txq->txq_lock);
   5906 	}
   5907 
   5908 	/* Mark the interface as down and cancel the watchdog timer. */
   5909 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5910 	ifp->if_timer = 0;
   5911 
   5912 	if (disable) {
   5913 		for (i = 0; i < sc->sc_nqueues; i++) {
   5914 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5915 			mutex_enter(rxq->rxq_lock);
   5916 			wm_rxdrain(rxq);
   5917 			mutex_exit(rxq->rxq_lock);
   5918 		}
   5919 	}
   5920 
   5921 #if 0 /* notyet */
   5922 	if (sc->sc_type >= WM_T_82544)
   5923 		CSR_WRITE(sc, WMREG_WUC, 0);
   5924 #endif
   5925 }
   5926 
   5927 static void
   5928 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   5929 {
   5930 	struct mbuf *m;
   5931 	int i;
   5932 
   5933 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   5934 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   5935 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   5936 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   5937 		    m->m_data, m->m_len, m->m_flags);
   5938 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   5939 	    i, i == 1 ? "" : "s");
   5940 }
   5941 
   5942 /*
   5943  * wm_82547_txfifo_stall:
   5944  *
   5945  *	Callout used to wait for the 82547 Tx FIFO to drain,
   5946  *	reset the FIFO pointers, and restart packet transmission.
   5947  */
   5948 static void
   5949 wm_82547_txfifo_stall(void *arg)
   5950 {
   5951 	struct wm_softc *sc = arg;
   5952 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5953 
   5954 	mutex_enter(txq->txq_lock);
   5955 
   5956 	if (txq->txq_stopping)
   5957 		goto out;
   5958 
   5959 	if (txq->txq_fifo_stall) {
   5960 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   5961 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   5962 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   5963 			/*
   5964 			 * Packets have drained.  Stop transmitter, reset
   5965 			 * FIFO pointers, restart transmitter, and kick
   5966 			 * the packet queue.
   5967 			 */
   5968 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   5969 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   5970 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   5971 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   5972 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   5973 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   5974 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   5975 			CSR_WRITE_FLUSH(sc);
   5976 
   5977 			txq->txq_fifo_head = 0;
   5978 			txq->txq_fifo_stall = 0;
   5979 			wm_start_locked(&sc->sc_ethercom.ec_if);
   5980 		} else {
   5981 			/*
   5982 			 * Still waiting for packets to drain; try again in
   5983 			 * another tick.
   5984 			 */
   5985 			callout_schedule(&sc->sc_txfifo_ch, 1);
   5986 		}
   5987 	}
   5988 
   5989 out:
   5990 	mutex_exit(txq->txq_lock);
   5991 }
   5992 
   5993 /*
   5994  * wm_82547_txfifo_bugchk:
   5995  *
   5996  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   5997  *	prevent enqueueing a packet that would wrap around the end
   5998  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   5999  *
   6000  *	We do this by checking the amount of space before the end
   6001  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   6002  *	the Tx FIFO, wait for all remaining packets to drain, reset
   6003  *	the internal FIFO pointers to the beginning, and restart
   6004  *	transmission on the interface.
   6005  */
   6006 #define	WM_FIFO_HDR		0x10
   6007 #define	WM_82547_PAD_LEN	0x3e0
   6008 static int
   6009 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   6010 {
   6011 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6012 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   6013 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   6014 
   6015 	/* Just return if already stalled. */
   6016 	if (txq->txq_fifo_stall)
   6017 		return 1;
   6018 
   6019 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   6020 		/* Stall only occurs in half-duplex mode. */
   6021 		goto send_packet;
   6022 	}
   6023 
   6024 	if (len >= WM_82547_PAD_LEN + space) {
   6025 		txq->txq_fifo_stall = 1;
   6026 		callout_schedule(&sc->sc_txfifo_ch, 1);
   6027 		return 1;
   6028 	}
   6029 
   6030  send_packet:
   6031 	txq->txq_fifo_head += len;
   6032 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   6033 		txq->txq_fifo_head -= txq->txq_fifo_size;
   6034 
   6035 	return 0;
   6036 }
   6037 
   6038 static int
   6039 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6040 {
   6041 	int error;
   6042 
   6043 	/*
   6044 	 * Allocate the control data structures, and create and load the
   6045 	 * DMA map for it.
   6046 	 *
   6047 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6048 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6049 	 * both sets within the same 4G segment.
   6050 	 */
   6051 	if (sc->sc_type < WM_T_82544)
   6052 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   6053 	else
   6054 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   6055 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6056 		txq->txq_descsize = sizeof(nq_txdesc_t);
   6057 	else
   6058 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   6059 
   6060 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   6061 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   6062 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   6063 		aprint_error_dev(sc->sc_dev,
   6064 		    "unable to allocate TX control data, error = %d\n",
   6065 		    error);
   6066 		goto fail_0;
   6067 	}
   6068 
   6069 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   6070 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   6071 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6072 		aprint_error_dev(sc->sc_dev,
   6073 		    "unable to map TX control data, error = %d\n", error);
   6074 		goto fail_1;
   6075 	}
   6076 
   6077 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   6078 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   6079 		aprint_error_dev(sc->sc_dev,
   6080 		    "unable to create TX control data DMA map, error = %d\n",
   6081 		    error);
   6082 		goto fail_2;
   6083 	}
   6084 
   6085 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   6086 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   6087 		aprint_error_dev(sc->sc_dev,
   6088 		    "unable to load TX control data DMA map, error = %d\n",
   6089 		    error);
   6090 		goto fail_3;
   6091 	}
   6092 
   6093 	return 0;
   6094 
   6095  fail_3:
   6096 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6097  fail_2:
   6098 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6099 	    WM_TXDESCS_SIZE(txq));
   6100  fail_1:
   6101 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6102  fail_0:
   6103 	return error;
   6104 }
   6105 
   6106 static void
   6107 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   6108 {
   6109 
   6110 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   6111 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   6112 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   6113 	    WM_TXDESCS_SIZE(txq));
   6114 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   6115 }
   6116 
   6117 static int
   6118 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6119 {
   6120 	int error;
   6121 	size_t rxq_descs_size;
   6122 
   6123 	/*
   6124 	 * Allocate the control data structures, and create and load the
   6125 	 * DMA map for it.
   6126 	 *
   6127 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   6128 	 * memory.  So must Rx descriptors.  We simplify by allocating
   6129 	 * both sets within the same 4G segment.
   6130 	 */
   6131 	rxq->rxq_ndesc = WM_NRXDESC;
   6132 	if (sc->sc_type == WM_T_82574)
   6133 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
   6134 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6135 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
   6136 	else
   6137 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
   6138 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
   6139 
   6140 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
   6141 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   6142 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   6143 		aprint_error_dev(sc->sc_dev,
   6144 		    "unable to allocate RX control data, error = %d\n",
   6145 		    error);
   6146 		goto fail_0;
   6147 	}
   6148 
   6149 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   6150 		    rxq->rxq_desc_rseg, rxq_descs_size,
   6151 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
   6152 		aprint_error_dev(sc->sc_dev,
   6153 		    "unable to map RX control data, error = %d\n", error);
   6154 		goto fail_1;
   6155 	}
   6156 
   6157 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
   6158 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   6159 		aprint_error_dev(sc->sc_dev,
   6160 		    "unable to create RX control data DMA map, error = %d\n",
   6161 		    error);
   6162 		goto fail_2;
   6163 	}
   6164 
   6165 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   6166 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
   6167 		aprint_error_dev(sc->sc_dev,
   6168 		    "unable to load RX control data DMA map, error = %d\n",
   6169 		    error);
   6170 		goto fail_3;
   6171 	}
   6172 
   6173 	return 0;
   6174 
   6175  fail_3:
   6176 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6177  fail_2:
   6178 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6179 	    rxq_descs_size);
   6180  fail_1:
   6181 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6182  fail_0:
   6183 	return error;
   6184 }
   6185 
   6186 static void
   6187 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6188 {
   6189 
   6190 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6191 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   6192 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
   6193 	    rxq->rxq_descsize * rxq->rxq_ndesc);
   6194 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   6195 }
   6196 
   6197 
   6198 static int
   6199 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6200 {
   6201 	int i, error;
   6202 
   6203 	/* Create the transmit buffer DMA maps. */
   6204 	WM_TXQUEUELEN(txq) =
   6205 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   6206 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   6207 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6208 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   6209 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   6210 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   6211 			aprint_error_dev(sc->sc_dev,
   6212 			    "unable to create Tx DMA map %d, error = %d\n",
   6213 			    i, error);
   6214 			goto fail;
   6215 		}
   6216 	}
   6217 
   6218 	return 0;
   6219 
   6220  fail:
   6221 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6222 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6223 			bus_dmamap_destroy(sc->sc_dmat,
   6224 			    txq->txq_soft[i].txs_dmamap);
   6225 	}
   6226 	return error;
   6227 }
   6228 
   6229 static void
   6230 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   6231 {
   6232 	int i;
   6233 
   6234 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   6235 		if (txq->txq_soft[i].txs_dmamap != NULL)
   6236 			bus_dmamap_destroy(sc->sc_dmat,
   6237 			    txq->txq_soft[i].txs_dmamap);
   6238 	}
   6239 }
   6240 
   6241 static int
   6242 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6243 {
   6244 	int i, error;
   6245 
   6246 	/* Create the receive buffer DMA maps. */
   6247 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6248 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   6249 			    MCLBYTES, 0, 0,
   6250 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   6251 			aprint_error_dev(sc->sc_dev,
   6252 			    "unable to create Rx DMA map %d error = %d\n",
   6253 			    i, error);
   6254 			goto fail;
   6255 		}
   6256 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   6257 	}
   6258 
   6259 	return 0;
   6260 
   6261  fail:
   6262 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6263 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6264 			bus_dmamap_destroy(sc->sc_dmat,
   6265 			    rxq->rxq_soft[i].rxs_dmamap);
   6266 	}
   6267 	return error;
   6268 }
   6269 
   6270 static void
   6271 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6272 {
   6273 	int i;
   6274 
   6275 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6276 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   6277 			bus_dmamap_destroy(sc->sc_dmat,
   6278 			    rxq->rxq_soft[i].rxs_dmamap);
   6279 	}
   6280 }
   6281 
   6282 /*
   6283  * wm_alloc_quques:
   6284  *	Allocate {tx,rx}descs and {tx,rx} buffers
   6285  */
   6286 static int
   6287 wm_alloc_txrx_queues(struct wm_softc *sc)
   6288 {
   6289 	int i, error, tx_done, rx_done;
   6290 
   6291 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   6292 	    KM_SLEEP);
   6293 	if (sc->sc_queue == NULL) {
   6294 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   6295 		error = ENOMEM;
   6296 		goto fail_0;
   6297 	}
   6298 
   6299 	/*
   6300 	 * For transmission
   6301 	 */
   6302 	error = 0;
   6303 	tx_done = 0;
   6304 	for (i = 0; i < sc->sc_nqueues; i++) {
   6305 #ifdef WM_EVENT_COUNTERS
   6306 		int j;
   6307 		const char *xname;
   6308 #endif
   6309 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6310 		txq->txq_sc = sc;
   6311 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6312 
   6313 		error = wm_alloc_tx_descs(sc, txq);
   6314 		if (error)
   6315 			break;
   6316 		error = wm_alloc_tx_buffer(sc, txq);
   6317 		if (error) {
   6318 			wm_free_tx_descs(sc, txq);
   6319 			break;
   6320 		}
   6321 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   6322 		if (txq->txq_interq == NULL) {
   6323 			wm_free_tx_descs(sc, txq);
   6324 			wm_free_tx_buffer(sc, txq);
   6325 			error = ENOMEM;
   6326 			break;
   6327 		}
   6328 
   6329 #ifdef WM_EVENT_COUNTERS
   6330 		xname = device_xname(sc->sc_dev);
   6331 
   6332 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   6333 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   6334 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
   6335 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   6336 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   6337 
   6338 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
   6339 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
   6340 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
   6341 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
   6342 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
   6343 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
   6344 
   6345 		for (j = 0; j < WM_NTXSEGS; j++) {
   6346 			snprintf(txq->txq_txseg_evcnt_names[j],
   6347 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   6348 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   6349 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   6350 		}
   6351 
   6352 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
   6353 
   6354 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
   6355 #endif /* WM_EVENT_COUNTERS */
   6356 
   6357 		tx_done++;
   6358 	}
   6359 	if (error)
   6360 		goto fail_1;
   6361 
   6362 	/*
   6363 	 * For recieve
   6364 	 */
   6365 	error = 0;
   6366 	rx_done = 0;
   6367 	for (i = 0; i < sc->sc_nqueues; i++) {
   6368 #ifdef WM_EVENT_COUNTERS
   6369 		const char *xname;
   6370 #endif
   6371 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6372 		rxq->rxq_sc = sc;
   6373 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   6374 
   6375 		error = wm_alloc_rx_descs(sc, rxq);
   6376 		if (error)
   6377 			break;
   6378 
   6379 		error = wm_alloc_rx_buffer(sc, rxq);
   6380 		if (error) {
   6381 			wm_free_rx_descs(sc, rxq);
   6382 			break;
   6383 		}
   6384 
   6385 #ifdef WM_EVENT_COUNTERS
   6386 		xname = device_xname(sc->sc_dev);
   6387 
   6388 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
   6389 
   6390 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
   6391 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
   6392 #endif /* WM_EVENT_COUNTERS */
   6393 
   6394 		rx_done++;
   6395 	}
   6396 	if (error)
   6397 		goto fail_2;
   6398 
   6399 	return 0;
   6400 
   6401  fail_2:
   6402 	for (i = 0; i < rx_done; i++) {
   6403 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6404 		wm_free_rx_buffer(sc, rxq);
   6405 		wm_free_rx_descs(sc, rxq);
   6406 		if (rxq->rxq_lock)
   6407 			mutex_obj_free(rxq->rxq_lock);
   6408 	}
   6409  fail_1:
   6410 	for (i = 0; i < tx_done; i++) {
   6411 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6412 		pcq_destroy(txq->txq_interq);
   6413 		wm_free_tx_buffer(sc, txq);
   6414 		wm_free_tx_descs(sc, txq);
   6415 		if (txq->txq_lock)
   6416 			mutex_obj_free(txq->txq_lock);
   6417 	}
   6418 
   6419 	kmem_free(sc->sc_queue,
   6420 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   6421  fail_0:
   6422 	return error;
   6423 }
   6424 
   6425 /*
   6426  * wm_free_quques:
   6427  *	Free {tx,rx}descs and {tx,rx} buffers
   6428  */
   6429 static void
   6430 wm_free_txrx_queues(struct wm_softc *sc)
   6431 {
   6432 	int i;
   6433 
   6434 	for (i = 0; i < sc->sc_nqueues; i++) {
   6435 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   6436 
   6437 #ifdef WM_EVENT_COUNTERS
   6438 		WM_Q_EVCNT_DETACH(rxq, rxintr, rxq, i);
   6439 		WM_Q_EVCNT_DETACH(rxq, rxipsum, rxq, i);
   6440 		WM_Q_EVCNT_DETACH(rxq, rxtusum, rxq, i);
   6441 #endif /* WM_EVENT_COUNTERS */
   6442 
   6443 		wm_free_rx_buffer(sc, rxq);
   6444 		wm_free_rx_descs(sc, rxq);
   6445 		if (rxq->rxq_lock)
   6446 			mutex_obj_free(rxq->rxq_lock);
   6447 	}
   6448 
   6449 	for (i = 0; i < sc->sc_nqueues; i++) {
   6450 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   6451 		struct mbuf *m;
   6452 #ifdef WM_EVENT_COUNTERS
   6453 		int j;
   6454 
   6455 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
   6456 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
   6457 		WM_Q_EVCNT_DETACH(txq, txfifo_stall, txq, i);
   6458 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
   6459 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
   6460 		WM_Q_EVCNT_DETACH(txq, txipsum, txq, i);
   6461 		WM_Q_EVCNT_DETACH(txq, txtusum, txq, i);
   6462 		WM_Q_EVCNT_DETACH(txq, txtusum6, txq, i);
   6463 		WM_Q_EVCNT_DETACH(txq, txtso, txq, i);
   6464 		WM_Q_EVCNT_DETACH(txq, txtso6, txq, i);
   6465 		WM_Q_EVCNT_DETACH(txq, txtsopain, txq, i);
   6466 
   6467 		for (j = 0; j < WM_NTXSEGS; j++)
   6468 			evcnt_detach(&txq->txq_ev_txseg[j]);
   6469 
   6470 		WM_Q_EVCNT_DETACH(txq, txdrop, txq, i);
   6471 		WM_Q_EVCNT_DETACH(txq, tu, txq, i);
   6472 #endif /* WM_EVENT_COUNTERS */
   6473 
   6474 		/* drain txq_interq */
   6475 		while ((m = pcq_get(txq->txq_interq)) != NULL)
   6476 			m_freem(m);
   6477 		pcq_destroy(txq->txq_interq);
   6478 
   6479 		wm_free_tx_buffer(sc, txq);
   6480 		wm_free_tx_descs(sc, txq);
   6481 		if (txq->txq_lock)
   6482 			mutex_obj_free(txq->txq_lock);
   6483 	}
   6484 
   6485 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   6486 }
   6487 
   6488 static void
   6489 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6490 {
   6491 
   6492 	KASSERT(mutex_owned(txq->txq_lock));
   6493 
   6494 	/* Initialize the transmit descriptor ring. */
   6495 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   6496 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   6497 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6498 	txq->txq_free = WM_NTXDESC(txq);
   6499 	txq->txq_next = 0;
   6500 }
   6501 
   6502 static void
   6503 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6504     struct wm_txqueue *txq)
   6505 {
   6506 
   6507 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6508 		device_xname(sc->sc_dev), __func__));
   6509 	KASSERT(mutex_owned(txq->txq_lock));
   6510 
   6511 	if (sc->sc_type < WM_T_82543) {
   6512 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   6513 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   6514 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   6515 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   6516 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   6517 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   6518 	} else {
   6519 		int qid = wmq->wmq_id;
   6520 
   6521 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   6522 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   6523 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   6524 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   6525 
   6526 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   6527 			/*
   6528 			 * Don't write TDT before TCTL.EN is set.
   6529 			 * See the document.
   6530 			 */
   6531 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   6532 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   6533 			    | TXDCTL_WTHRESH(0));
   6534 		else {
   6535 			/* XXX should update with AIM? */
   6536 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
   6537 			if (sc->sc_type >= WM_T_82540) {
   6538 				/* should be same */
   6539 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
   6540 			}
   6541 
   6542 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   6543 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   6544 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   6545 		}
   6546 	}
   6547 }
   6548 
   6549 static void
   6550 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   6551 {
   6552 	int i;
   6553 
   6554 	KASSERT(mutex_owned(txq->txq_lock));
   6555 
   6556 	/* Initialize the transmit job descriptors. */
   6557 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   6558 		txq->txq_soft[i].txs_mbuf = NULL;
   6559 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   6560 	txq->txq_snext = 0;
   6561 	txq->txq_sdirty = 0;
   6562 }
   6563 
   6564 static void
   6565 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6566     struct wm_txqueue *txq)
   6567 {
   6568 
   6569 	KASSERT(mutex_owned(txq->txq_lock));
   6570 
   6571 	/*
   6572 	 * Set up some register offsets that are different between
   6573 	 * the i82542 and the i82543 and later chips.
   6574 	 */
   6575 	if (sc->sc_type < WM_T_82543)
   6576 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   6577 	else
   6578 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   6579 
   6580 	wm_init_tx_descs(sc, txq);
   6581 	wm_init_tx_regs(sc, wmq, txq);
   6582 	wm_init_tx_buffer(sc, txq);
   6583 }
   6584 
   6585 static void
   6586 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   6587     struct wm_rxqueue *rxq)
   6588 {
   6589 
   6590 	KASSERT(mutex_owned(rxq->rxq_lock));
   6591 
   6592 	/*
   6593 	 * Initialize the receive descriptor and receive job
   6594 	 * descriptor rings.
   6595 	 */
   6596 	if (sc->sc_type < WM_T_82543) {
   6597 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   6598 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   6599 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   6600 		    rxq->rxq_descsize * rxq->rxq_ndesc);
   6601 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   6602 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   6603 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   6604 
   6605 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   6606 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   6607 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   6608 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   6609 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   6610 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   6611 	} else {
   6612 		int qid = wmq->wmq_id;
   6613 
   6614 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   6615 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   6616 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_descsize * rxq->rxq_ndesc);
   6617 
   6618 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   6619 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   6620 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
   6621 
   6622 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
   6623 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
   6624 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   6625 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   6626 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   6627 			    | RXDCTL_WTHRESH(1));
   6628 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6629 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6630 		} else {
   6631 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   6632 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   6633 			/* XXX should update with AIM? */
   6634 			CSR_WRITE(sc, WMREG_RDTR, (wmq->wmq_itr / 4) | RDTR_FPD);
   6635 			/* MUST be same */
   6636 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
   6637 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   6638 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   6639 		}
   6640 	}
   6641 }
   6642 
   6643 static int
   6644 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   6645 {
   6646 	struct wm_rxsoft *rxs;
   6647 	int error, i;
   6648 
   6649 	KASSERT(mutex_owned(rxq->rxq_lock));
   6650 
   6651 	for (i = 0; i < rxq->rxq_ndesc; i++) {
   6652 		rxs = &rxq->rxq_soft[i];
   6653 		if (rxs->rxs_mbuf == NULL) {
   6654 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   6655 				log(LOG_ERR, "%s: unable to allocate or map "
   6656 				    "rx buffer %d, error = %d\n",
   6657 				    device_xname(sc->sc_dev), i, error);
   6658 				/*
   6659 				 * XXX Should attempt to run with fewer receive
   6660 				 * XXX buffers instead of just failing.
   6661 				 */
   6662 				wm_rxdrain(rxq);
   6663 				return ENOMEM;
   6664 			}
   6665 		} else {
   6666 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   6667 				wm_init_rxdesc(rxq, i);
   6668 			/*
   6669 			 * For 82575 and newer device, the RX descriptors
   6670 			 * must be initialized after the setting of RCTL.EN in
   6671 			 * wm_set_filter()
   6672 			 */
   6673 		}
   6674 	}
   6675 	rxq->rxq_ptr = 0;
   6676 	rxq->rxq_discard = 0;
   6677 	WM_RXCHAIN_RESET(rxq);
   6678 
   6679 	return 0;
   6680 }
   6681 
   6682 static int
   6683 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6684     struct wm_rxqueue *rxq)
   6685 {
   6686 
   6687 	KASSERT(mutex_owned(rxq->rxq_lock));
   6688 
   6689 	/*
   6690 	 * Set up some register offsets that are different between
   6691 	 * the i82542 and the i82543 and later chips.
   6692 	 */
   6693 	if (sc->sc_type < WM_T_82543)
   6694 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   6695 	else
   6696 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   6697 
   6698 	wm_init_rx_regs(sc, wmq, rxq);
   6699 	return wm_init_rx_buffer(sc, rxq);
   6700 }
   6701 
   6702 /*
   6703  * wm_init_quques:
   6704  *	Initialize {tx,rx}descs and {tx,rx} buffers
   6705  */
   6706 static int
   6707 wm_init_txrx_queues(struct wm_softc *sc)
   6708 {
   6709 	int i, error = 0;
   6710 
   6711 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6712 		device_xname(sc->sc_dev), __func__));
   6713 
   6714 	for (i = 0; i < sc->sc_nqueues; i++) {
   6715 		struct wm_queue *wmq = &sc->sc_queue[i];
   6716 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6717 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6718 
   6719 		/*
   6720 		 * TODO
   6721 		 * Currently, use constant variable instead of AIM.
   6722 		 * Furthermore, the interrupt interval of multiqueue which use
   6723 		 * polling mode is less than default value.
   6724 		 * More tuning and AIM are required.
   6725 		 */
   6726 		if (wm_is_using_multiqueue(sc))
   6727 			wmq->wmq_itr = 50;
   6728 		else
   6729 			wmq->wmq_itr = sc->sc_itr_init;
   6730 		wmq->wmq_set_itr = true;
   6731 
   6732 		mutex_enter(txq->txq_lock);
   6733 		wm_init_tx_queue(sc, wmq, txq);
   6734 		mutex_exit(txq->txq_lock);
   6735 
   6736 		mutex_enter(rxq->rxq_lock);
   6737 		error = wm_init_rx_queue(sc, wmq, rxq);
   6738 		mutex_exit(rxq->rxq_lock);
   6739 		if (error)
   6740 			break;
   6741 	}
   6742 
   6743 	return error;
   6744 }
   6745 
   6746 /*
   6747  * wm_tx_offload:
   6748  *
   6749  *	Set up TCP/IP checksumming parameters for the
   6750  *	specified packet.
   6751  */
   6752 static int
   6753 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   6754     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
   6755 {
   6756 	struct mbuf *m0 = txs->txs_mbuf;
   6757 	struct livengood_tcpip_ctxdesc *t;
   6758 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   6759 	uint32_t ipcse;
   6760 	struct ether_header *eh;
   6761 	int offset, iphl;
   6762 	uint8_t fields;
   6763 
   6764 	/*
   6765 	 * XXX It would be nice if the mbuf pkthdr had offset
   6766 	 * fields for the protocol headers.
   6767 	 */
   6768 
   6769 	eh = mtod(m0, struct ether_header *);
   6770 	switch (htons(eh->ether_type)) {
   6771 	case ETHERTYPE_IP:
   6772 	case ETHERTYPE_IPV6:
   6773 		offset = ETHER_HDR_LEN;
   6774 		break;
   6775 
   6776 	case ETHERTYPE_VLAN:
   6777 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6778 		break;
   6779 
   6780 	default:
   6781 		/*
   6782 		 * Don't support this protocol or encapsulation.
   6783 		 */
   6784 		*fieldsp = 0;
   6785 		*cmdp = 0;
   6786 		return 0;
   6787 	}
   6788 
   6789 	if ((m0->m_pkthdr.csum_flags &
   6790 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6791 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6792 	} else {
   6793 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6794 	}
   6795 	ipcse = offset + iphl - 1;
   6796 
   6797 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   6798 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   6799 	seg = 0;
   6800 	fields = 0;
   6801 
   6802 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6803 		int hlen = offset + iphl;
   6804 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6805 
   6806 		if (__predict_false(m0->m_len <
   6807 				    (hlen + sizeof(struct tcphdr)))) {
   6808 			/*
   6809 			 * TCP/IP headers are not in the first mbuf; we need
   6810 			 * to do this the slow and painful way.  Let's just
   6811 			 * hope this doesn't happen very often.
   6812 			 */
   6813 			struct tcphdr th;
   6814 
   6815 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6816 
   6817 			m_copydata(m0, hlen, sizeof(th), &th);
   6818 			if (v4) {
   6819 				struct ip ip;
   6820 
   6821 				m_copydata(m0, offset, sizeof(ip), &ip);
   6822 				ip.ip_len = 0;
   6823 				m_copyback(m0,
   6824 				    offset + offsetof(struct ip, ip_len),
   6825 				    sizeof(ip.ip_len), &ip.ip_len);
   6826 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6827 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6828 			} else {
   6829 				struct ip6_hdr ip6;
   6830 
   6831 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6832 				ip6.ip6_plen = 0;
   6833 				m_copyback(m0,
   6834 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6835 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6836 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6837 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6838 			}
   6839 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6840 			    sizeof(th.th_sum), &th.th_sum);
   6841 
   6842 			hlen += th.th_off << 2;
   6843 		} else {
   6844 			/*
   6845 			 * TCP/IP headers are in the first mbuf; we can do
   6846 			 * this the easy way.
   6847 			 */
   6848 			struct tcphdr *th;
   6849 
   6850 			if (v4) {
   6851 				struct ip *ip =
   6852 				    (void *)(mtod(m0, char *) + offset);
   6853 				th = (void *)(mtod(m0, char *) + hlen);
   6854 
   6855 				ip->ip_len = 0;
   6856 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6857 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6858 			} else {
   6859 				struct ip6_hdr *ip6 =
   6860 				    (void *)(mtod(m0, char *) + offset);
   6861 				th = (void *)(mtod(m0, char *) + hlen);
   6862 
   6863 				ip6->ip6_plen = 0;
   6864 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6865 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6866 			}
   6867 			hlen += th->th_off << 2;
   6868 		}
   6869 
   6870 		if (v4) {
   6871 			WM_Q_EVCNT_INCR(txq, txtso);
   6872 			cmdlen |= WTX_TCPIP_CMD_IP;
   6873 		} else {
   6874 			WM_Q_EVCNT_INCR(txq, txtso6);
   6875 			ipcse = 0;
   6876 		}
   6877 		cmd |= WTX_TCPIP_CMD_TSE;
   6878 		cmdlen |= WTX_TCPIP_CMD_TSE |
   6879 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   6880 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   6881 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   6882 	}
   6883 
   6884 	/*
   6885 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   6886 	 * offload feature, if we load the context descriptor, we
   6887 	 * MUST provide valid values for IPCSS and TUCSS fields.
   6888 	 */
   6889 
   6890 	ipcs = WTX_TCPIP_IPCSS(offset) |
   6891 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   6892 	    WTX_TCPIP_IPCSE(ipcse);
   6893 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   6894 		WM_Q_EVCNT_INCR(txq, txipsum);
   6895 		fields |= WTX_IXSM;
   6896 	}
   6897 
   6898 	offset += iphl;
   6899 
   6900 	if (m0->m_pkthdr.csum_flags &
   6901 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   6902 		WM_Q_EVCNT_INCR(txq, txtusum);
   6903 		fields |= WTX_TXSM;
   6904 		tucs = WTX_TCPIP_TUCSS(offset) |
   6905 		    WTX_TCPIP_TUCSO(offset +
   6906 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   6907 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6908 	} else if ((m0->m_pkthdr.csum_flags &
   6909 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   6910 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6911 		fields |= WTX_TXSM;
   6912 		tucs = WTX_TCPIP_TUCSS(offset) |
   6913 		    WTX_TCPIP_TUCSO(offset +
   6914 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   6915 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6916 	} else {
   6917 		/* Just initialize it to a valid TCP context. */
   6918 		tucs = WTX_TCPIP_TUCSS(offset) |
   6919 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   6920 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6921 	}
   6922 
   6923 	/*
   6924 	 * We don't have to write context descriptor for every packet
   6925 	 * except for 82574. For 82574, we must write context descriptor
   6926 	 * for every packet when we use two descriptor queues.
   6927 	 * It would be overhead to write context descriptor for every packet,
   6928 	 * however it does not cause problems.
   6929 	 */
   6930 	/* Fill in the context descriptor. */
   6931 	t = (struct livengood_tcpip_ctxdesc *)
   6932 	    &txq->txq_descs[txq->txq_next];
   6933 	t->tcpip_ipcs = htole32(ipcs);
   6934 	t->tcpip_tucs = htole32(tucs);
   6935 	t->tcpip_cmdlen = htole32(cmdlen);
   6936 	t->tcpip_seg = htole32(seg);
   6937 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6938 
   6939 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6940 	txs->txs_ndesc++;
   6941 
   6942 	*cmdp = cmd;
   6943 	*fieldsp = fields;
   6944 
   6945 	return 0;
   6946 }
   6947 
   6948 static inline int
   6949 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   6950 {
   6951 	struct wm_softc *sc = ifp->if_softc;
   6952 	u_int cpuid = cpu_index(curcpu());
   6953 
   6954 	/*
   6955 	 * Currently, simple distribute strategy.
   6956 	 * TODO:
   6957 	 * distribute by flowid(RSS has value).
   6958 	 */
   6959         return (cpuid + ncpu - sc->sc_affinity_offset) % sc->sc_nqueues;
   6960 }
   6961 
   6962 /*
   6963  * wm_start:		[ifnet interface function]
   6964  *
   6965  *	Start packet transmission on the interface.
   6966  */
   6967 static void
   6968 wm_start(struct ifnet *ifp)
   6969 {
   6970 	struct wm_softc *sc = ifp->if_softc;
   6971 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6972 
   6973 #ifdef WM_MPSAFE
   6974 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6975 #endif
   6976 	/*
   6977 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   6978 	 */
   6979 
   6980 	mutex_enter(txq->txq_lock);
   6981 	if (!txq->txq_stopping)
   6982 		wm_start_locked(ifp);
   6983 	mutex_exit(txq->txq_lock);
   6984 }
   6985 
   6986 static void
   6987 wm_start_locked(struct ifnet *ifp)
   6988 {
   6989 	struct wm_softc *sc = ifp->if_softc;
   6990 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6991 
   6992 	wm_send_common_locked(ifp, txq, false);
   6993 }
   6994 
   6995 static int
   6996 wm_transmit(struct ifnet *ifp, struct mbuf *m)
   6997 {
   6998 	int qid;
   6999 	struct wm_softc *sc = ifp->if_softc;
   7000 	struct wm_txqueue *txq;
   7001 
   7002 	qid = wm_select_txqueue(ifp, m);
   7003 	txq = &sc->sc_queue[qid].wmq_txq;
   7004 
   7005 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7006 		m_freem(m);
   7007 		WM_Q_EVCNT_INCR(txq, txdrop);
   7008 		return ENOBUFS;
   7009 	}
   7010 
   7011 	/*
   7012 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7013 	 */
   7014 	ifp->if_obytes += m->m_pkthdr.len;
   7015 	if (m->m_flags & M_MCAST)
   7016 		ifp->if_omcasts++;
   7017 
   7018 	if (mutex_tryenter(txq->txq_lock)) {
   7019 		if (!txq->txq_stopping)
   7020 			wm_transmit_locked(ifp, txq);
   7021 		mutex_exit(txq->txq_lock);
   7022 	}
   7023 
   7024 	return 0;
   7025 }
   7026 
   7027 static void
   7028 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7029 {
   7030 
   7031 	wm_send_common_locked(ifp, txq, true);
   7032 }
   7033 
   7034 static void
   7035 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7036     bool is_transmit)
   7037 {
   7038 	struct wm_softc *sc = ifp->if_softc;
   7039 	struct mbuf *m0;
   7040 	struct m_tag *mtag;
   7041 	struct wm_txsoft *txs;
   7042 	bus_dmamap_t dmamap;
   7043 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   7044 	bus_addr_t curaddr;
   7045 	bus_size_t seglen, curlen;
   7046 	uint32_t cksumcmd;
   7047 	uint8_t cksumfields;
   7048 
   7049 	KASSERT(mutex_owned(txq->txq_lock));
   7050 
   7051 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7052 		return;
   7053 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7054 		return;
   7055 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7056 		return;
   7057 
   7058 	/* Remember the previous number of free descriptors. */
   7059 	ofree = txq->txq_free;
   7060 
   7061 	/*
   7062 	 * Loop through the send queue, setting up transmit descriptors
   7063 	 * until we drain the queue, or use up all available transmit
   7064 	 * descriptors.
   7065 	 */
   7066 	for (;;) {
   7067 		m0 = NULL;
   7068 
   7069 		/* Get a work queue entry. */
   7070 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7071 			wm_txeof(sc, txq);
   7072 			if (txq->txq_sfree == 0) {
   7073 				DPRINTF(WM_DEBUG_TX,
   7074 				    ("%s: TX: no free job descriptors\n",
   7075 					device_xname(sc->sc_dev)));
   7076 				WM_Q_EVCNT_INCR(txq, txsstall);
   7077 				break;
   7078 			}
   7079 		}
   7080 
   7081 		/* Grab a packet off the queue. */
   7082 		if (is_transmit)
   7083 			m0 = pcq_get(txq->txq_interq);
   7084 		else
   7085 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7086 		if (m0 == NULL)
   7087 			break;
   7088 
   7089 		DPRINTF(WM_DEBUG_TX,
   7090 		    ("%s: TX: have packet to transmit: %p\n",
   7091 		    device_xname(sc->sc_dev), m0));
   7092 
   7093 		txs = &txq->txq_soft[txq->txq_snext];
   7094 		dmamap = txs->txs_dmamap;
   7095 
   7096 		use_tso = (m0->m_pkthdr.csum_flags &
   7097 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   7098 
   7099 		/*
   7100 		 * So says the Linux driver:
   7101 		 * The controller does a simple calculation to make sure
   7102 		 * there is enough room in the FIFO before initiating the
   7103 		 * DMA for each buffer.  The calc is:
   7104 		 *	4 = ceil(buffer len / MSS)
   7105 		 * To make sure we don't overrun the FIFO, adjust the max
   7106 		 * buffer len if the MSS drops.
   7107 		 */
   7108 		dmamap->dm_maxsegsz =
   7109 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   7110 		    ? m0->m_pkthdr.segsz << 2
   7111 		    : WTX_MAX_LEN;
   7112 
   7113 		/*
   7114 		 * Load the DMA map.  If this fails, the packet either
   7115 		 * didn't fit in the allotted number of segments, or we
   7116 		 * were short on resources.  For the too-many-segments
   7117 		 * case, we simply report an error and drop the packet,
   7118 		 * since we can't sanely copy a jumbo packet to a single
   7119 		 * buffer.
   7120 		 */
   7121 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7122 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7123 		if (error) {
   7124 			if (error == EFBIG) {
   7125 				WM_Q_EVCNT_INCR(txq, txdrop);
   7126 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7127 				    "DMA segments, dropping...\n",
   7128 				    device_xname(sc->sc_dev));
   7129 				wm_dump_mbuf_chain(sc, m0);
   7130 				m_freem(m0);
   7131 				continue;
   7132 			}
   7133 			/*  Short on resources, just stop for now. */
   7134 			DPRINTF(WM_DEBUG_TX,
   7135 			    ("%s: TX: dmamap load failed: %d\n",
   7136 			    device_xname(sc->sc_dev), error));
   7137 			break;
   7138 		}
   7139 
   7140 		segs_needed = dmamap->dm_nsegs;
   7141 		if (use_tso) {
   7142 			/* For sentinel descriptor; see below. */
   7143 			segs_needed++;
   7144 		}
   7145 
   7146 		/*
   7147 		 * Ensure we have enough descriptors free to describe
   7148 		 * the packet.  Note, we always reserve one descriptor
   7149 		 * at the end of the ring due to the semantics of the
   7150 		 * TDT register, plus one more in the event we need
   7151 		 * to load offload context.
   7152 		 */
   7153 		if (segs_needed > txq->txq_free - 2) {
   7154 			/*
   7155 			 * Not enough free descriptors to transmit this
   7156 			 * packet.  We haven't committed anything yet,
   7157 			 * so just unload the DMA map, put the packet
   7158 			 * pack on the queue, and punt.  Notify the upper
   7159 			 * layer that there are no more slots left.
   7160 			 */
   7161 			DPRINTF(WM_DEBUG_TX,
   7162 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7163 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7164 			    segs_needed, txq->txq_free - 1));
   7165 			if (!is_transmit)
   7166 				ifp->if_flags |= IFF_OACTIVE;
   7167 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7168 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7169 			WM_Q_EVCNT_INCR(txq, txdstall);
   7170 			break;
   7171 		}
   7172 
   7173 		/*
   7174 		 * Check for 82547 Tx FIFO bug.  We need to do this
   7175 		 * once we know we can transmit the packet, since we
   7176 		 * do some internal FIFO space accounting here.
   7177 		 */
   7178 		if (sc->sc_type == WM_T_82547 &&
   7179 		    wm_82547_txfifo_bugchk(sc, m0)) {
   7180 			DPRINTF(WM_DEBUG_TX,
   7181 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   7182 			    device_xname(sc->sc_dev)));
   7183 			if (!is_transmit)
   7184 				ifp->if_flags |= IFF_OACTIVE;
   7185 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7186 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7187 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
   7188 			break;
   7189 		}
   7190 
   7191 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7192 
   7193 		DPRINTF(WM_DEBUG_TX,
   7194 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7195 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7196 
   7197 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7198 
   7199 		/*
   7200 		 * Store a pointer to the packet so that we can free it
   7201 		 * later.
   7202 		 *
   7203 		 * Initially, we consider the number of descriptors the
   7204 		 * packet uses the number of DMA segments.  This may be
   7205 		 * incremented by 1 if we do checksum offload (a descriptor
   7206 		 * is used to set the checksum context).
   7207 		 */
   7208 		txs->txs_mbuf = m0;
   7209 		txs->txs_firstdesc = txq->txq_next;
   7210 		txs->txs_ndesc = segs_needed;
   7211 
   7212 		/* Set up offload parameters for this packet. */
   7213 		if (m0->m_pkthdr.csum_flags &
   7214 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7215 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7216 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7217 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
   7218 					  &cksumfields) != 0) {
   7219 				/* Error message already displayed. */
   7220 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7221 				continue;
   7222 			}
   7223 		} else {
   7224 			cksumcmd = 0;
   7225 			cksumfields = 0;
   7226 		}
   7227 
   7228 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   7229 
   7230 		/* Sync the DMA map. */
   7231 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7232 		    BUS_DMASYNC_PREWRITE);
   7233 
   7234 		/* Initialize the transmit descriptor. */
   7235 		for (nexttx = txq->txq_next, seg = 0;
   7236 		     seg < dmamap->dm_nsegs; seg++) {
   7237 			for (seglen = dmamap->dm_segs[seg].ds_len,
   7238 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   7239 			     seglen != 0;
   7240 			     curaddr += curlen, seglen -= curlen,
   7241 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   7242 				curlen = seglen;
   7243 
   7244 				/*
   7245 				 * So says the Linux driver:
   7246 				 * Work around for premature descriptor
   7247 				 * write-backs in TSO mode.  Append a
   7248 				 * 4-byte sentinel descriptor.
   7249 				 */
   7250 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   7251 				    curlen > 8)
   7252 					curlen -= 4;
   7253 
   7254 				wm_set_dma_addr(
   7255 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   7256 				txq->txq_descs[nexttx].wtx_cmdlen
   7257 				    = htole32(cksumcmd | curlen);
   7258 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   7259 				    = 0;
   7260 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   7261 				    = cksumfields;
   7262 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7263 				lasttx = nexttx;
   7264 
   7265 				DPRINTF(WM_DEBUG_TX,
   7266 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   7267 				     "len %#04zx\n",
   7268 				    device_xname(sc->sc_dev), nexttx,
   7269 				    (uint64_t)curaddr, curlen));
   7270 			}
   7271 		}
   7272 
   7273 		KASSERT(lasttx != -1);
   7274 
   7275 		/*
   7276 		 * Set up the command byte on the last descriptor of
   7277 		 * the packet.  If we're in the interrupt delay window,
   7278 		 * delay the interrupt.
   7279 		 */
   7280 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7281 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7282 
   7283 		/*
   7284 		 * If VLANs are enabled and the packet has a VLAN tag, set
   7285 		 * up the descriptor to encapsulate the packet for us.
   7286 		 *
   7287 		 * This is only valid on the last descriptor of the packet.
   7288 		 */
   7289 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   7290 			txq->txq_descs[lasttx].wtx_cmdlen |=
   7291 			    htole32(WTX_CMD_VLE);
   7292 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   7293 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   7294 		}
   7295 
   7296 		txs->txs_lastdesc = lasttx;
   7297 
   7298 		DPRINTF(WM_DEBUG_TX,
   7299 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7300 		    device_xname(sc->sc_dev),
   7301 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7302 
   7303 		/* Sync the descriptors we're using. */
   7304 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7305 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7306 
   7307 		/* Give the packet to the chip. */
   7308 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7309 
   7310 		DPRINTF(WM_DEBUG_TX,
   7311 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7312 
   7313 		DPRINTF(WM_DEBUG_TX,
   7314 		    ("%s: TX: finished transmitting packet, job %d\n",
   7315 		    device_xname(sc->sc_dev), txq->txq_snext));
   7316 
   7317 		/* Advance the tx pointer. */
   7318 		txq->txq_free -= txs->txs_ndesc;
   7319 		txq->txq_next = nexttx;
   7320 
   7321 		txq->txq_sfree--;
   7322 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7323 
   7324 		/* Pass the packet to any BPF listeners. */
   7325 		bpf_mtap(ifp, m0);
   7326 	}
   7327 
   7328 	if (m0 != NULL) {
   7329 		if (!is_transmit)
   7330 			ifp->if_flags |= IFF_OACTIVE;
   7331 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7332 		WM_Q_EVCNT_INCR(txq, txdrop);
   7333 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7334 			__func__));
   7335 		m_freem(m0);
   7336 	}
   7337 
   7338 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7339 		/* No more slots; notify upper layer. */
   7340 		if (!is_transmit)
   7341 			ifp->if_flags |= IFF_OACTIVE;
   7342 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7343 	}
   7344 
   7345 	if (txq->txq_free != ofree) {
   7346 		/* Set a watchdog timer in case the chip flakes out. */
   7347 		ifp->if_timer = 5;
   7348 	}
   7349 }
   7350 
   7351 /*
   7352  * wm_nq_tx_offload:
   7353  *
   7354  *	Set up TCP/IP checksumming parameters for the
   7355  *	specified packet, for NEWQUEUE devices
   7356  */
   7357 static int
   7358 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   7359     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   7360 {
   7361 	struct mbuf *m0 = txs->txs_mbuf;
   7362 	struct m_tag *mtag;
   7363 	uint32_t vl_len, mssidx, cmdc;
   7364 	struct ether_header *eh;
   7365 	int offset, iphl;
   7366 
   7367 	/*
   7368 	 * XXX It would be nice if the mbuf pkthdr had offset
   7369 	 * fields for the protocol headers.
   7370 	 */
   7371 	*cmdlenp = 0;
   7372 	*fieldsp = 0;
   7373 
   7374 	eh = mtod(m0, struct ether_header *);
   7375 	switch (htons(eh->ether_type)) {
   7376 	case ETHERTYPE_IP:
   7377 	case ETHERTYPE_IPV6:
   7378 		offset = ETHER_HDR_LEN;
   7379 		break;
   7380 
   7381 	case ETHERTYPE_VLAN:
   7382 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   7383 		break;
   7384 
   7385 	default:
   7386 		/* Don't support this protocol or encapsulation. */
   7387 		*do_csum = false;
   7388 		return 0;
   7389 	}
   7390 	*do_csum = true;
   7391 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   7392 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   7393 
   7394 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   7395 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   7396 
   7397 	if ((m0->m_pkthdr.csum_flags &
   7398 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   7399 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   7400 	} else {
   7401 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   7402 	}
   7403 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   7404 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   7405 
   7406 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   7407 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   7408 		     << NQTXC_VLLEN_VLAN_SHIFT);
   7409 		*cmdlenp |= NQTX_CMD_VLE;
   7410 	}
   7411 
   7412 	mssidx = 0;
   7413 
   7414 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   7415 		int hlen = offset + iphl;
   7416 		int tcp_hlen;
   7417 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   7418 
   7419 		if (__predict_false(m0->m_len <
   7420 				    (hlen + sizeof(struct tcphdr)))) {
   7421 			/*
   7422 			 * TCP/IP headers are not in the first mbuf; we need
   7423 			 * to do this the slow and painful way.  Let's just
   7424 			 * hope this doesn't happen very often.
   7425 			 */
   7426 			struct tcphdr th;
   7427 
   7428 			WM_Q_EVCNT_INCR(txq, txtsopain);
   7429 
   7430 			m_copydata(m0, hlen, sizeof(th), &th);
   7431 			if (v4) {
   7432 				struct ip ip;
   7433 
   7434 				m_copydata(m0, offset, sizeof(ip), &ip);
   7435 				ip.ip_len = 0;
   7436 				m_copyback(m0,
   7437 				    offset + offsetof(struct ip, ip_len),
   7438 				    sizeof(ip.ip_len), &ip.ip_len);
   7439 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   7440 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   7441 			} else {
   7442 				struct ip6_hdr ip6;
   7443 
   7444 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   7445 				ip6.ip6_plen = 0;
   7446 				m_copyback(m0,
   7447 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   7448 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   7449 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   7450 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   7451 			}
   7452 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   7453 			    sizeof(th.th_sum), &th.th_sum);
   7454 
   7455 			tcp_hlen = th.th_off << 2;
   7456 		} else {
   7457 			/*
   7458 			 * TCP/IP headers are in the first mbuf; we can do
   7459 			 * this the easy way.
   7460 			 */
   7461 			struct tcphdr *th;
   7462 
   7463 			if (v4) {
   7464 				struct ip *ip =
   7465 				    (void *)(mtod(m0, char *) + offset);
   7466 				th = (void *)(mtod(m0, char *) + hlen);
   7467 
   7468 				ip->ip_len = 0;
   7469 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   7470 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   7471 			} else {
   7472 				struct ip6_hdr *ip6 =
   7473 				    (void *)(mtod(m0, char *) + offset);
   7474 				th = (void *)(mtod(m0, char *) + hlen);
   7475 
   7476 				ip6->ip6_plen = 0;
   7477 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   7478 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   7479 			}
   7480 			tcp_hlen = th->th_off << 2;
   7481 		}
   7482 		hlen += tcp_hlen;
   7483 		*cmdlenp |= NQTX_CMD_TSE;
   7484 
   7485 		if (v4) {
   7486 			WM_Q_EVCNT_INCR(txq, txtso);
   7487 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   7488 		} else {
   7489 			WM_Q_EVCNT_INCR(txq, txtso6);
   7490 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   7491 		}
   7492 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   7493 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7494 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   7495 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   7496 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   7497 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   7498 	} else {
   7499 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   7500 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   7501 	}
   7502 
   7503 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   7504 		*fieldsp |= NQTXD_FIELDS_IXSM;
   7505 		cmdc |= NQTXC_CMD_IP4;
   7506 	}
   7507 
   7508 	if (m0->m_pkthdr.csum_flags &
   7509 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7510 		WM_Q_EVCNT_INCR(txq, txtusum);
   7511 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   7512 			cmdc |= NQTXC_CMD_TCP;
   7513 		} else {
   7514 			cmdc |= NQTXC_CMD_UDP;
   7515 		}
   7516 		cmdc |= NQTXC_CMD_IP4;
   7517 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7518 	}
   7519 	if (m0->m_pkthdr.csum_flags &
   7520 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7521 		WM_Q_EVCNT_INCR(txq, txtusum6);
   7522 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   7523 			cmdc |= NQTXC_CMD_TCP;
   7524 		} else {
   7525 			cmdc |= NQTXC_CMD_UDP;
   7526 		}
   7527 		cmdc |= NQTXC_CMD_IP6;
   7528 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   7529 	}
   7530 
   7531 	/*
   7532 	 * We don't have to write context descriptor for every packet to
   7533 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
   7534 	 * I210 and I211. It is enough to write once per a Tx queue for these
   7535 	 * controllers.
   7536 	 * It would be overhead to write context descriptor for every packet,
   7537 	 * however it does not cause problems.
   7538 	 */
   7539 	/* Fill in the context descriptor. */
   7540 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   7541 	    htole32(vl_len);
   7542 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   7543 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   7544 	    htole32(cmdc);
   7545 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   7546 	    htole32(mssidx);
   7547 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   7548 	DPRINTF(WM_DEBUG_TX,
   7549 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   7550 	    txq->txq_next, 0, vl_len));
   7551 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   7552 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   7553 	txs->txs_ndesc++;
   7554 	return 0;
   7555 }
   7556 
   7557 /*
   7558  * wm_nq_start:		[ifnet interface function]
   7559  *
   7560  *	Start packet transmission on the interface for NEWQUEUE devices
   7561  */
   7562 static void
   7563 wm_nq_start(struct ifnet *ifp)
   7564 {
   7565 	struct wm_softc *sc = ifp->if_softc;
   7566 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7567 
   7568 #ifdef WM_MPSAFE
   7569 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   7570 #endif
   7571 	/*
   7572 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
   7573 	 */
   7574 
   7575 	mutex_enter(txq->txq_lock);
   7576 	if (!txq->txq_stopping)
   7577 		wm_nq_start_locked(ifp);
   7578 	mutex_exit(txq->txq_lock);
   7579 }
   7580 
   7581 static void
   7582 wm_nq_start_locked(struct ifnet *ifp)
   7583 {
   7584 	struct wm_softc *sc = ifp->if_softc;
   7585 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7586 
   7587 	wm_nq_send_common_locked(ifp, txq, false);
   7588 }
   7589 
   7590 static int
   7591 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   7592 {
   7593 	int qid;
   7594 	struct wm_softc *sc = ifp->if_softc;
   7595 	struct wm_txqueue *txq;
   7596 
   7597 	qid = wm_select_txqueue(ifp, m);
   7598 	txq = &sc->sc_queue[qid].wmq_txq;
   7599 
   7600 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   7601 		m_freem(m);
   7602 		WM_Q_EVCNT_INCR(txq, txdrop);
   7603 		return ENOBUFS;
   7604 	}
   7605 
   7606 	/*
   7607 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
   7608 	 */
   7609 	ifp->if_obytes += m->m_pkthdr.len;
   7610 	if (m->m_flags & M_MCAST)
   7611 		ifp->if_omcasts++;
   7612 
   7613 	/*
   7614 	 * The situations which this mutex_tryenter() fails at running time
   7615 	 * are below two patterns.
   7616 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
   7617 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
   7618 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
   7619 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
   7620 	 * In the case of (2), the last packet enqueued to txq->txq_interq is also
   7621 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck, either.
   7622 	 */
   7623 	if (mutex_tryenter(txq->txq_lock)) {
   7624 		if (!txq->txq_stopping)
   7625 			wm_nq_transmit_locked(ifp, txq);
   7626 		mutex_exit(txq->txq_lock);
   7627 	}
   7628 
   7629 	return 0;
   7630 }
   7631 
   7632 static void
   7633 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   7634 {
   7635 
   7636 	wm_nq_send_common_locked(ifp, txq, true);
   7637 }
   7638 
   7639 static void
   7640 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   7641     bool is_transmit)
   7642 {
   7643 	struct wm_softc *sc = ifp->if_softc;
   7644 	struct mbuf *m0;
   7645 	struct m_tag *mtag;
   7646 	struct wm_txsoft *txs;
   7647 	bus_dmamap_t dmamap;
   7648 	int error, nexttx, lasttx = -1, seg, segs_needed;
   7649 	bool do_csum, sent;
   7650 
   7651 	KASSERT(mutex_owned(txq->txq_lock));
   7652 
   7653 	if ((ifp->if_flags & IFF_RUNNING) == 0)
   7654 		return;
   7655 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
   7656 		return;
   7657 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   7658 		return;
   7659 
   7660 	sent = false;
   7661 
   7662 	/*
   7663 	 * Loop through the send queue, setting up transmit descriptors
   7664 	 * until we drain the queue, or use up all available transmit
   7665 	 * descriptors.
   7666 	 */
   7667 	for (;;) {
   7668 		m0 = NULL;
   7669 
   7670 		/* Get a work queue entry. */
   7671 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   7672 			wm_txeof(sc, txq);
   7673 			if (txq->txq_sfree == 0) {
   7674 				DPRINTF(WM_DEBUG_TX,
   7675 				    ("%s: TX: no free job descriptors\n",
   7676 					device_xname(sc->sc_dev)));
   7677 				WM_Q_EVCNT_INCR(txq, txsstall);
   7678 				break;
   7679 			}
   7680 		}
   7681 
   7682 		/* Grab a packet off the queue. */
   7683 		if (is_transmit)
   7684 			m0 = pcq_get(txq->txq_interq);
   7685 		else
   7686 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   7687 		if (m0 == NULL)
   7688 			break;
   7689 
   7690 		DPRINTF(WM_DEBUG_TX,
   7691 		    ("%s: TX: have packet to transmit: %p\n",
   7692 		    device_xname(sc->sc_dev), m0));
   7693 
   7694 		txs = &txq->txq_soft[txq->txq_snext];
   7695 		dmamap = txs->txs_dmamap;
   7696 
   7697 		/*
   7698 		 * Load the DMA map.  If this fails, the packet either
   7699 		 * didn't fit in the allotted number of segments, or we
   7700 		 * were short on resources.  For the too-many-segments
   7701 		 * case, we simply report an error and drop the packet,
   7702 		 * since we can't sanely copy a jumbo packet to a single
   7703 		 * buffer.
   7704 		 */
   7705 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   7706 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   7707 		if (error) {
   7708 			if (error == EFBIG) {
   7709 				WM_Q_EVCNT_INCR(txq, txdrop);
   7710 				log(LOG_ERR, "%s: Tx packet consumes too many "
   7711 				    "DMA segments, dropping...\n",
   7712 				    device_xname(sc->sc_dev));
   7713 				wm_dump_mbuf_chain(sc, m0);
   7714 				m_freem(m0);
   7715 				continue;
   7716 			}
   7717 			/* Short on resources, just stop for now. */
   7718 			DPRINTF(WM_DEBUG_TX,
   7719 			    ("%s: TX: dmamap load failed: %d\n",
   7720 			    device_xname(sc->sc_dev), error));
   7721 			break;
   7722 		}
   7723 
   7724 		segs_needed = dmamap->dm_nsegs;
   7725 
   7726 		/*
   7727 		 * Ensure we have enough descriptors free to describe
   7728 		 * the packet.  Note, we always reserve one descriptor
   7729 		 * at the end of the ring due to the semantics of the
   7730 		 * TDT register, plus one more in the event we need
   7731 		 * to load offload context.
   7732 		 */
   7733 		if (segs_needed > txq->txq_free - 2) {
   7734 			/*
   7735 			 * Not enough free descriptors to transmit this
   7736 			 * packet.  We haven't committed anything yet,
   7737 			 * so just unload the DMA map, put the packet
   7738 			 * pack on the queue, and punt.  Notify the upper
   7739 			 * layer that there are no more slots left.
   7740 			 */
   7741 			DPRINTF(WM_DEBUG_TX,
   7742 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   7743 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   7744 			    segs_needed, txq->txq_free - 1));
   7745 			if (!is_transmit)
   7746 				ifp->if_flags |= IFF_OACTIVE;
   7747 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   7748 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   7749 			WM_Q_EVCNT_INCR(txq, txdstall);
   7750 			break;
   7751 		}
   7752 
   7753 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   7754 
   7755 		DPRINTF(WM_DEBUG_TX,
   7756 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   7757 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   7758 
   7759 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   7760 
   7761 		/*
   7762 		 * Store a pointer to the packet so that we can free it
   7763 		 * later.
   7764 		 *
   7765 		 * Initially, we consider the number of descriptors the
   7766 		 * packet uses the number of DMA segments.  This may be
   7767 		 * incremented by 1 if we do checksum offload (a descriptor
   7768 		 * is used to set the checksum context).
   7769 		 */
   7770 		txs->txs_mbuf = m0;
   7771 		txs->txs_firstdesc = txq->txq_next;
   7772 		txs->txs_ndesc = segs_needed;
   7773 
   7774 		/* Set up offload parameters for this packet. */
   7775 		uint32_t cmdlen, fields, dcmdlen;
   7776 		if (m0->m_pkthdr.csum_flags &
   7777 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   7778 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7779 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   7780 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   7781 			    &do_csum) != 0) {
   7782 				/* Error message already displayed. */
   7783 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   7784 				continue;
   7785 			}
   7786 		} else {
   7787 			do_csum = false;
   7788 			cmdlen = 0;
   7789 			fields = 0;
   7790 		}
   7791 
   7792 		/* Sync the DMA map. */
   7793 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7794 		    BUS_DMASYNC_PREWRITE);
   7795 
   7796 		/* Initialize the first transmit descriptor. */
   7797 		nexttx = txq->txq_next;
   7798 		if (!do_csum) {
   7799 			/* setup a legacy descriptor */
   7800 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   7801 			    dmamap->dm_segs[0].ds_addr);
   7802 			txq->txq_descs[nexttx].wtx_cmdlen =
   7803 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   7804 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   7805 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   7806 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   7807 			    NULL) {
   7808 				txq->txq_descs[nexttx].wtx_cmdlen |=
   7809 				    htole32(WTX_CMD_VLE);
   7810 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   7811 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   7812 			} else {
   7813 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7814 			}
   7815 			dcmdlen = 0;
   7816 		} else {
   7817 			/* setup an advanced data descriptor */
   7818 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7819 			    htole64(dmamap->dm_segs[0].ds_addr);
   7820 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   7821 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7822 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   7823 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   7824 			    htole32(fields);
   7825 			DPRINTF(WM_DEBUG_TX,
   7826 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   7827 			    device_xname(sc->sc_dev), nexttx,
   7828 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   7829 			DPRINTF(WM_DEBUG_TX,
   7830 			    ("\t 0x%08x%08x\n", fields,
   7831 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   7832 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   7833 		}
   7834 
   7835 		lasttx = nexttx;
   7836 		nexttx = WM_NEXTTX(txq, nexttx);
   7837 		/*
   7838 		 * fill in the next descriptors. legacy or adcanced format
   7839 		 * is the same here
   7840 		 */
   7841 		for (seg = 1; seg < dmamap->dm_nsegs;
   7842 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   7843 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7844 			    htole64(dmamap->dm_segs[seg].ds_addr);
   7845 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7846 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   7847 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   7848 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   7849 			lasttx = nexttx;
   7850 
   7851 			DPRINTF(WM_DEBUG_TX,
   7852 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   7853 			     "len %#04zx\n",
   7854 			    device_xname(sc->sc_dev), nexttx,
   7855 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   7856 			    dmamap->dm_segs[seg].ds_len));
   7857 		}
   7858 
   7859 		KASSERT(lasttx != -1);
   7860 
   7861 		/*
   7862 		 * Set up the command byte on the last descriptor of
   7863 		 * the packet.  If we're in the interrupt delay window,
   7864 		 * delay the interrupt.
   7865 		 */
   7866 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   7867 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   7868 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7869 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7870 
   7871 		txs->txs_lastdesc = lasttx;
   7872 
   7873 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7874 		    device_xname(sc->sc_dev),
   7875 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7876 
   7877 		/* Sync the descriptors we're using. */
   7878 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7879 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7880 
   7881 		/* Give the packet to the chip. */
   7882 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7883 		sent = true;
   7884 
   7885 		DPRINTF(WM_DEBUG_TX,
   7886 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7887 
   7888 		DPRINTF(WM_DEBUG_TX,
   7889 		    ("%s: TX: finished transmitting packet, job %d\n",
   7890 		    device_xname(sc->sc_dev), txq->txq_snext));
   7891 
   7892 		/* Advance the tx pointer. */
   7893 		txq->txq_free -= txs->txs_ndesc;
   7894 		txq->txq_next = nexttx;
   7895 
   7896 		txq->txq_sfree--;
   7897 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7898 
   7899 		/* Pass the packet to any BPF listeners. */
   7900 		bpf_mtap(ifp, m0);
   7901 	}
   7902 
   7903 	if (m0 != NULL) {
   7904 		if (!is_transmit)
   7905 			ifp->if_flags |= IFF_OACTIVE;
   7906 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7907 		WM_Q_EVCNT_INCR(txq, txdrop);
   7908 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7909 			__func__));
   7910 		m_freem(m0);
   7911 	}
   7912 
   7913 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7914 		/* No more slots; notify upper layer. */
   7915 		if (!is_transmit)
   7916 			ifp->if_flags |= IFF_OACTIVE;
   7917 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7918 	}
   7919 
   7920 	if (sent) {
   7921 		/* Set a watchdog timer in case the chip flakes out. */
   7922 		ifp->if_timer = 5;
   7923 	}
   7924 }
   7925 
   7926 static void
   7927 wm_deferred_start_locked(struct wm_txqueue *txq)
   7928 {
   7929 	struct wm_softc *sc = txq->txq_sc;
   7930 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7931 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   7932 	int qid = wmq->wmq_id;
   7933 
   7934 	KASSERT(mutex_owned(txq->txq_lock));
   7935 
   7936 	if (txq->txq_stopping) {
   7937 		mutex_exit(txq->txq_lock);
   7938 		return;
   7939 	}
   7940 
   7941 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   7942 		/* XXX need for ALTQ or one CPU system */
   7943 		if (qid == 0)
   7944 			wm_nq_start_locked(ifp);
   7945 		wm_nq_transmit_locked(ifp, txq);
   7946 	} else {
   7947 		/* XXX need for ALTQ or one CPU system */
   7948 		if (qid == 0)
   7949 			wm_start_locked(ifp);
   7950 		wm_transmit_locked(ifp, txq);
   7951 	}
   7952 }
   7953 
   7954 /* Interrupt */
   7955 
   7956 /*
   7957  * wm_txeof:
   7958  *
   7959  *	Helper; handle transmit interrupts.
   7960  */
   7961 static int
   7962 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
   7963 {
   7964 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7965 	struct wm_txsoft *txs;
   7966 	bool processed = false;
   7967 	int count = 0;
   7968 	int i;
   7969 	uint8_t status;
   7970 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
   7971 
   7972 	KASSERT(mutex_owned(txq->txq_lock));
   7973 
   7974 	if (txq->txq_stopping)
   7975 		return 0;
   7976 
   7977 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   7978 	/* for ALTQ and legacy(not use multiqueue) ethernet controller */
   7979 	if (wmq->wmq_id == 0)
   7980 		ifp->if_flags &= ~IFF_OACTIVE;
   7981 
   7982 	/*
   7983 	 * Go through the Tx list and free mbufs for those
   7984 	 * frames which have been transmitted.
   7985 	 */
   7986 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   7987 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   7988 		txs = &txq->txq_soft[i];
   7989 
   7990 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   7991 			device_xname(sc->sc_dev), i));
   7992 
   7993 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   7994 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   7995 
   7996 		status =
   7997 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   7998 		if ((status & WTX_ST_DD) == 0) {
   7999 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   8000 			    BUS_DMASYNC_PREREAD);
   8001 			break;
   8002 		}
   8003 
   8004 		processed = true;
   8005 		count++;
   8006 		DPRINTF(WM_DEBUG_TX,
   8007 		    ("%s: TX: job %d done: descs %d..%d\n",
   8008 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   8009 		    txs->txs_lastdesc));
   8010 
   8011 		/*
   8012 		 * XXX We should probably be using the statistics
   8013 		 * XXX registers, but I don't know if they exist
   8014 		 * XXX on chips before the i82544.
   8015 		 */
   8016 
   8017 #ifdef WM_EVENT_COUNTERS
   8018 		if (status & WTX_ST_TU)
   8019 			WM_Q_EVCNT_INCR(txq, tu);
   8020 #endif /* WM_EVENT_COUNTERS */
   8021 
   8022 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   8023 			ifp->if_oerrors++;
   8024 			if (status & WTX_ST_LC)
   8025 				log(LOG_WARNING, "%s: late collision\n",
   8026 				    device_xname(sc->sc_dev));
   8027 			else if (status & WTX_ST_EC) {
   8028 				ifp->if_collisions += 16;
   8029 				log(LOG_WARNING, "%s: excessive collisions\n",
   8030 				    device_xname(sc->sc_dev));
   8031 			}
   8032 		} else
   8033 			ifp->if_opackets++;
   8034 
   8035 		txq->txq_packets++;
   8036 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
   8037 
   8038 		txq->txq_free += txs->txs_ndesc;
   8039 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   8040 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   8041 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   8042 		m_freem(txs->txs_mbuf);
   8043 		txs->txs_mbuf = NULL;
   8044 	}
   8045 
   8046 	/* Update the dirty transmit buffer pointer. */
   8047 	txq->txq_sdirty = i;
   8048 	DPRINTF(WM_DEBUG_TX,
   8049 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   8050 
   8051 	if (count != 0)
   8052 		rnd_add_uint32(&sc->rnd_source, count);
   8053 
   8054 	/*
   8055 	 * If there are no more pending transmissions, cancel the watchdog
   8056 	 * timer.
   8057 	 */
   8058 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   8059 		ifp->if_timer = 0;
   8060 
   8061 	return processed;
   8062 }
   8063 
   8064 static inline uint32_t
   8065 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
   8066 {
   8067 	struct wm_softc *sc = rxq->rxq_sc;
   8068 
   8069 	if (sc->sc_type == WM_T_82574)
   8070 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8071 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8072 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8073 	else
   8074 		return rxq->rxq_descs[idx].wrx_status;
   8075 }
   8076 
   8077 static inline uint32_t
   8078 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
   8079 {
   8080 	struct wm_softc *sc = rxq->rxq_sc;
   8081 
   8082 	if (sc->sc_type == WM_T_82574)
   8083 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
   8084 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8085 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
   8086 	else
   8087 		return rxq->rxq_descs[idx].wrx_errors;
   8088 }
   8089 
   8090 static inline uint16_t
   8091 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
   8092 {
   8093 	struct wm_softc *sc = rxq->rxq_sc;
   8094 
   8095 	if (sc->sc_type == WM_T_82574)
   8096 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
   8097 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8098 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
   8099 	else
   8100 		return rxq->rxq_descs[idx].wrx_special;
   8101 }
   8102 
   8103 static inline int
   8104 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
   8105 {
   8106 	struct wm_softc *sc = rxq->rxq_sc;
   8107 
   8108 	if (sc->sc_type == WM_T_82574)
   8109 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
   8110 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8111 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
   8112 	else
   8113 		return rxq->rxq_descs[idx].wrx_len;
   8114 }
   8115 
   8116 #ifdef WM_DEBUG
   8117 static inline uint32_t
   8118 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
   8119 {
   8120 	struct wm_softc *sc = rxq->rxq_sc;
   8121 
   8122 	if (sc->sc_type == WM_T_82574)
   8123 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
   8124 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8125 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
   8126 	else
   8127 		return 0;
   8128 }
   8129 
   8130 static inline uint8_t
   8131 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
   8132 {
   8133 	struct wm_softc *sc = rxq->rxq_sc;
   8134 
   8135 	if (sc->sc_type == WM_T_82574)
   8136 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
   8137 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8138 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
   8139 	else
   8140 		return 0;
   8141 }
   8142 #endif /* WM_DEBUG */
   8143 
   8144 static inline bool
   8145 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
   8146     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8147 {
   8148 
   8149 	if (sc->sc_type == WM_T_82574)
   8150 		return (status & ext_bit) != 0;
   8151 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8152 		return (status & nq_bit) != 0;
   8153 	else
   8154 		return (status & legacy_bit) != 0;
   8155 }
   8156 
   8157 static inline bool
   8158 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
   8159     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
   8160 {
   8161 
   8162 	if (sc->sc_type == WM_T_82574)
   8163 		return (error & ext_bit) != 0;
   8164 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   8165 		return (error & nq_bit) != 0;
   8166 	else
   8167 		return (error & legacy_bit) != 0;
   8168 }
   8169 
   8170 static inline bool
   8171 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
   8172 {
   8173 
   8174 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8175 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
   8176 		return true;
   8177 	else
   8178 		return false;
   8179 }
   8180 
   8181 static inline bool
   8182 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
   8183 {
   8184 	struct wm_softc *sc = rxq->rxq_sc;
   8185 
   8186 	/* XXXX missing error bit for newqueue? */
   8187 	if (wm_rxdesc_is_set_error(sc, errors,
   8188 		WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE,
   8189 		EXTRXC_ERROR_CE|EXTRXC_ERROR_SE|EXTRXC_ERROR_SEQ|EXTRXC_ERROR_CXE|EXTRXC_ERROR_RXE,
   8190 		NQRXC_ERROR_RXE)) {
   8191 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE, EXTRXC_ERROR_SE, 0))
   8192 			log(LOG_WARNING, "%s: symbol error\n",
   8193 			    device_xname(sc->sc_dev));
   8194 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ, EXTRXC_ERROR_SEQ, 0))
   8195 			log(LOG_WARNING, "%s: receive sequence error\n",
   8196 			    device_xname(sc->sc_dev));
   8197 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE, EXTRXC_ERROR_CE, 0))
   8198 			log(LOG_WARNING, "%s: CRC error\n",
   8199 			    device_xname(sc->sc_dev));
   8200 		return true;
   8201 	}
   8202 
   8203 	return false;
   8204 }
   8205 
   8206 static inline bool
   8207 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
   8208 {
   8209 	struct wm_softc *sc = rxq->rxq_sc;
   8210 
   8211 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
   8212 		NQRXC_STATUS_DD)) {
   8213 		/* We have processed all of the receive descriptors. */
   8214 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
   8215 		return false;
   8216 	}
   8217 
   8218 	return true;
   8219 }
   8220 
   8221 static inline bool
   8222 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status, uint16_t vlantag,
   8223     struct mbuf *m)
   8224 {
   8225 	struct ifnet *ifp = &rxq->rxq_sc->sc_ethercom.ec_if;
   8226 
   8227 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
   8228 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
   8229 		VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), return false);
   8230 	}
   8231 
   8232 	return true;
   8233 }
   8234 
   8235 static inline void
   8236 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
   8237     uint32_t errors, struct mbuf *m)
   8238 {
   8239 	struct wm_softc *sc = rxq->rxq_sc;
   8240 
   8241 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
   8242 		if (wm_rxdesc_is_set_status(sc, status,
   8243 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
   8244 			WM_Q_EVCNT_INCR(rxq, rxipsum);
   8245 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   8246 			if (wm_rxdesc_is_set_error(sc, errors,
   8247 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
   8248 				m->m_pkthdr.csum_flags |=
   8249 					M_CSUM_IPv4_BAD;
   8250 		}
   8251 		if (wm_rxdesc_is_set_status(sc, status,
   8252 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
   8253 			/*
   8254 			 * Note: we don't know if this was TCP or UDP,
   8255 			 * so we just set both bits, and expect the
   8256 			 * upper layers to deal.
   8257 			 */
   8258 			WM_Q_EVCNT_INCR(rxq, rxtusum);
   8259 			m->m_pkthdr.csum_flags |=
   8260 				M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   8261 				M_CSUM_TCPv6 | M_CSUM_UDPv6;
   8262 			if (wm_rxdesc_is_set_error(sc, errors,
   8263 				WRX_ER_TCPE, EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
   8264 				m->m_pkthdr.csum_flags |=
   8265 					M_CSUM_TCP_UDP_BAD;
   8266 		}
   8267 	}
   8268 }
   8269 
   8270 /*
   8271  * wm_rxeof:
   8272  *
   8273  *	Helper; handle receive interrupts.
   8274  */
   8275 static void
   8276 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
   8277 {
   8278 	struct wm_softc *sc = rxq->rxq_sc;
   8279 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8280 	struct wm_rxsoft *rxs;
   8281 	struct mbuf *m;
   8282 	int i, len;
   8283 	int count = 0;
   8284 	uint32_t status, errors;
   8285 	uint16_t vlantag;
   8286 
   8287 	KASSERT(mutex_owned(rxq->rxq_lock));
   8288 
   8289 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   8290 		if (limit-- == 0) {
   8291 			rxq->rxq_ptr = i;
   8292 			break;
   8293 		}
   8294 
   8295 		rxs = &rxq->rxq_soft[i];
   8296 
   8297 		DPRINTF(WM_DEBUG_RX,
   8298 		    ("%s: RX: checking descriptor %d\n",
   8299 		    device_xname(sc->sc_dev), i));
   8300 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   8301 
   8302 		status = wm_rxdesc_get_status(rxq, i);
   8303 		errors = wm_rxdesc_get_errors(rxq, i);
   8304 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
   8305 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
   8306 #ifdef WM_DEBUG
   8307 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
   8308 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
   8309 #endif
   8310 
   8311 		if (!wm_rxdesc_dd(rxq, i, status)) {
   8312 			/*
   8313 			 * Update the receive pointer holding rxq_lock
   8314 			 * consistent with increment counter.
   8315 			 */
   8316 			rxq->rxq_ptr = i;
   8317 			break;
   8318 		}
   8319 
   8320 		count++;
   8321 		if (__predict_false(rxq->rxq_discard)) {
   8322 			DPRINTF(WM_DEBUG_RX,
   8323 			    ("%s: RX: discarding contents of descriptor %d\n",
   8324 			    device_xname(sc->sc_dev), i));
   8325 			wm_init_rxdesc(rxq, i);
   8326 			if (wm_rxdesc_is_eop(rxq, status)) {
   8327 				/* Reset our state. */
   8328 				DPRINTF(WM_DEBUG_RX,
   8329 				    ("%s: RX: resetting rxdiscard -> 0\n",
   8330 				    device_xname(sc->sc_dev)));
   8331 				rxq->rxq_discard = 0;
   8332 			}
   8333 			continue;
   8334 		}
   8335 
   8336 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8337 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   8338 
   8339 		m = rxs->rxs_mbuf;
   8340 
   8341 		/*
   8342 		 * Add a new receive buffer to the ring, unless of
   8343 		 * course the length is zero. Treat the latter as a
   8344 		 * failed mapping.
   8345 		 */
   8346 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   8347 			/*
   8348 			 * Failed, throw away what we've done so
   8349 			 * far, and discard the rest of the packet.
   8350 			 */
   8351 			ifp->if_ierrors++;
   8352 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   8353 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   8354 			wm_init_rxdesc(rxq, i);
   8355 			if (!wm_rxdesc_is_eop(rxq, status))
   8356 				rxq->rxq_discard = 1;
   8357 			if (rxq->rxq_head != NULL)
   8358 				m_freem(rxq->rxq_head);
   8359 			WM_RXCHAIN_RESET(rxq);
   8360 			DPRINTF(WM_DEBUG_RX,
   8361 			    ("%s: RX: Rx buffer allocation failed, "
   8362 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   8363 			    rxq->rxq_discard ? " (discard)" : ""));
   8364 			continue;
   8365 		}
   8366 
   8367 		m->m_len = len;
   8368 		rxq->rxq_len += len;
   8369 		DPRINTF(WM_DEBUG_RX,
   8370 		    ("%s: RX: buffer at %p len %d\n",
   8371 		    device_xname(sc->sc_dev), m->m_data, len));
   8372 
   8373 		/* If this is not the end of the packet, keep looking. */
   8374 		if (!wm_rxdesc_is_eop(rxq, status)) {
   8375 			WM_RXCHAIN_LINK(rxq, m);
   8376 			DPRINTF(WM_DEBUG_RX,
   8377 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   8378 			    device_xname(sc->sc_dev), rxq->rxq_len));
   8379 			continue;
   8380 		}
   8381 
   8382 		/*
   8383 		 * Okay, we have the entire packet now.  The chip is
   8384 		 * configured to include the FCS except I350 and I21[01]
   8385 		 * (not all chips can be configured to strip it),
   8386 		 * so we need to trim it.
   8387 		 * May need to adjust length of previous mbuf in the
   8388 		 * chain if the current mbuf is too short.
   8389 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   8390 		 * is always set in I350, so we don't trim it.
   8391 		 */
   8392 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   8393 		    && (sc->sc_type != WM_T_I210)
   8394 		    && (sc->sc_type != WM_T_I211)) {
   8395 			if (m->m_len < ETHER_CRC_LEN) {
   8396 				rxq->rxq_tail->m_len
   8397 				    -= (ETHER_CRC_LEN - m->m_len);
   8398 				m->m_len = 0;
   8399 			} else
   8400 				m->m_len -= ETHER_CRC_LEN;
   8401 			len = rxq->rxq_len - ETHER_CRC_LEN;
   8402 		} else
   8403 			len = rxq->rxq_len;
   8404 
   8405 		WM_RXCHAIN_LINK(rxq, m);
   8406 
   8407 		*rxq->rxq_tailp = NULL;
   8408 		m = rxq->rxq_head;
   8409 
   8410 		WM_RXCHAIN_RESET(rxq);
   8411 
   8412 		DPRINTF(WM_DEBUG_RX,
   8413 		    ("%s: RX: have entire packet, len -> %d\n",
   8414 		    device_xname(sc->sc_dev), len));
   8415 
   8416 		/* If an error occurred, update stats and drop the packet. */
   8417 		if (wm_rxdesc_has_errors(rxq, errors)) {
   8418 			m_freem(m);
   8419 			continue;
   8420 		}
   8421 
   8422 		/* No errors.  Receive the packet. */
   8423 		m_set_rcvif(m, ifp);
   8424 		m->m_pkthdr.len = len;
   8425 		/*
   8426 		 * TODO
   8427 		 * should be save rsshash and rsstype to this mbuf.
   8428 		 */
   8429 		DPRINTF(WM_DEBUG_RX,
   8430 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
   8431 			device_xname(sc->sc_dev), rsstype, rsshash));
   8432 
   8433 		/*
   8434 		 * If VLANs are enabled, VLAN packets have been unwrapped
   8435 		 * for us.  Associate the tag with the packet.
   8436 		 */
   8437 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
   8438 			continue;
   8439 
   8440 		/* Set up checksum info for this packet. */
   8441 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
   8442 		/*
   8443 		 * Update the receive pointer holding rxq_lock consistent with
   8444 		 * increment counter.
   8445 		 */
   8446 		rxq->rxq_ptr = i;
   8447 		rxq->rxq_packets++;
   8448 		rxq->rxq_bytes += len;
   8449 		mutex_exit(rxq->rxq_lock);
   8450 
   8451 		/* Pass it on. */
   8452 		if_percpuq_enqueue(sc->sc_ipq, m);
   8453 
   8454 		mutex_enter(rxq->rxq_lock);
   8455 
   8456 		if (rxq->rxq_stopping)
   8457 			break;
   8458 	}
   8459 
   8460 	if (count != 0)
   8461 		rnd_add_uint32(&sc->rnd_source, count);
   8462 
   8463 	DPRINTF(WM_DEBUG_RX,
   8464 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   8465 }
   8466 
   8467 /*
   8468  * wm_linkintr_gmii:
   8469  *
   8470  *	Helper; handle link interrupts for GMII.
   8471  */
   8472 static void
   8473 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   8474 {
   8475 
   8476 	KASSERT(WM_CORE_LOCKED(sc));
   8477 
   8478 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8479 		__func__));
   8480 
   8481 	if (icr & ICR_LSC) {
   8482 		uint32_t reg;
   8483 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   8484 
   8485 		if ((status & STATUS_LU) != 0) {
   8486 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8487 				device_xname(sc->sc_dev),
   8488 				(status & STATUS_FD) ? "FDX" : "HDX"));
   8489 		} else {
   8490 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8491 				device_xname(sc->sc_dev)));
   8492 		}
   8493 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   8494 			wm_gig_downshift_workaround_ich8lan(sc);
   8495 
   8496 		if ((sc->sc_type == WM_T_ICH8)
   8497 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   8498 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   8499 		}
   8500 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   8501 			device_xname(sc->sc_dev)));
   8502 		mii_pollstat(&sc->sc_mii);
   8503 		if (sc->sc_type == WM_T_82543) {
   8504 			int miistatus, active;
   8505 
   8506 			/*
   8507 			 * With 82543, we need to force speed and
   8508 			 * duplex on the MAC equal to what the PHY
   8509 			 * speed and duplex configuration is.
   8510 			 */
   8511 			miistatus = sc->sc_mii.mii_media_status;
   8512 
   8513 			if (miistatus & IFM_ACTIVE) {
   8514 				active = sc->sc_mii.mii_media_active;
   8515 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8516 				switch (IFM_SUBTYPE(active)) {
   8517 				case IFM_10_T:
   8518 					sc->sc_ctrl |= CTRL_SPEED_10;
   8519 					break;
   8520 				case IFM_100_TX:
   8521 					sc->sc_ctrl |= CTRL_SPEED_100;
   8522 					break;
   8523 				case IFM_1000_T:
   8524 					sc->sc_ctrl |= CTRL_SPEED_1000;
   8525 					break;
   8526 				default:
   8527 					/*
   8528 					 * fiber?
   8529 					 * Shoud not enter here.
   8530 					 */
   8531 					printf("unknown media (%x)\n", active);
   8532 					break;
   8533 				}
   8534 				if (active & IFM_FDX)
   8535 					sc->sc_ctrl |= CTRL_FD;
   8536 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8537 			}
   8538 		} else if (sc->sc_type == WM_T_PCH) {
   8539 			wm_k1_gig_workaround_hv(sc,
   8540 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   8541 		}
   8542 
   8543 		if ((sc->sc_phytype == WMPHY_82578)
   8544 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   8545 			== IFM_1000_T)) {
   8546 
   8547 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   8548 				delay(200*1000); /* XXX too big */
   8549 
   8550 				/* Link stall fix for link up */
   8551 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8552 				    HV_MUX_DATA_CTRL,
   8553 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   8554 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   8555 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   8556 				    HV_MUX_DATA_CTRL,
   8557 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   8558 			}
   8559 		}
   8560 		/*
   8561 		 * I217 Packet Loss issue:
   8562 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
   8563 		 * on power up.
   8564 		 * Set the Beacon Duration for I217 to 8 usec
   8565 		 */
   8566 		if ((sc->sc_type == WM_T_PCH_LPT)
   8567 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8568 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
   8569 			reg &= ~FEXTNVM4_BEACON_DURATION;
   8570 			reg |= FEXTNVM4_BEACON_DURATION_8US;
   8571 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
   8572 		}
   8573 
   8574 		/* XXX Work-around I218 hang issue */
   8575 		/* e1000_k1_workaround_lpt_lp() */
   8576 
   8577 		if ((sc->sc_type == WM_T_PCH_LPT)
   8578 		    || (sc->sc_type == WM_T_PCH_SPT)) {
   8579 			/*
   8580 			 * Set platform power management values for Latency
   8581 			 * Tolerance Reporting (LTR)
   8582 			 */
   8583 			wm_platform_pm_pch_lpt(sc,
   8584 				((sc->sc_mii.mii_media_status & IFM_ACTIVE)
   8585 				    != 0));
   8586 		}
   8587 
   8588 		/* FEXTNVM6 K1-off workaround */
   8589 		if (sc->sc_type == WM_T_PCH_SPT) {
   8590 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
   8591 			if (CSR_READ(sc, WMREG_PCIEANACFG)
   8592 			    & FEXTNVM6_K1_OFF_ENABLE)
   8593 				reg |= FEXTNVM6_K1_OFF_ENABLE;
   8594 			else
   8595 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
   8596 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
   8597 		}
   8598 	} else if (icr & ICR_RXSEQ) {
   8599 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   8600 			device_xname(sc->sc_dev)));
   8601 	}
   8602 }
   8603 
   8604 /*
   8605  * wm_linkintr_tbi:
   8606  *
   8607  *	Helper; handle link interrupts for TBI mode.
   8608  */
   8609 static void
   8610 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   8611 {
   8612 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8613 	uint32_t status;
   8614 
   8615 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8616 		__func__));
   8617 
   8618 	status = CSR_READ(sc, WMREG_STATUS);
   8619 	if (icr & ICR_LSC) {
   8620 		if (status & STATUS_LU) {
   8621 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   8622 			    device_xname(sc->sc_dev),
   8623 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   8624 			/*
   8625 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   8626 			 * so we should update sc->sc_ctrl
   8627 			 */
   8628 
   8629 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   8630 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8631 			sc->sc_fcrtl &= ~FCRTL_XONE;
   8632 			if (status & STATUS_FD)
   8633 				sc->sc_tctl |=
   8634 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8635 			else
   8636 				sc->sc_tctl |=
   8637 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8638 			if (sc->sc_ctrl & CTRL_TFCE)
   8639 				sc->sc_fcrtl |= FCRTL_XONE;
   8640 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8641 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   8642 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   8643 				      sc->sc_fcrtl);
   8644 			sc->sc_tbi_linkup = 1;
   8645 			if_link_state_change(ifp, LINK_STATE_UP);
   8646 		} else {
   8647 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8648 			    device_xname(sc->sc_dev)));
   8649 			sc->sc_tbi_linkup = 0;
   8650 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8651 		}
   8652 		/* Update LED */
   8653 		wm_tbi_serdes_set_linkled(sc);
   8654 	} else if (icr & ICR_RXSEQ) {
   8655 		DPRINTF(WM_DEBUG_LINK,
   8656 		    ("%s: LINK: Receive sequence error\n",
   8657 		    device_xname(sc->sc_dev)));
   8658 	}
   8659 }
   8660 
   8661 /*
   8662  * wm_linkintr_serdes:
   8663  *
   8664  *	Helper; handle link interrupts for TBI mode.
   8665  */
   8666 static void
   8667 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   8668 {
   8669 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8670 	struct mii_data *mii = &sc->sc_mii;
   8671 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8672 	uint32_t pcs_adv, pcs_lpab, reg;
   8673 
   8674 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   8675 		__func__));
   8676 
   8677 	if (icr & ICR_LSC) {
   8678 		/* Check PCS */
   8679 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8680 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   8681 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
   8682 				device_xname(sc->sc_dev)));
   8683 			mii->mii_media_status |= IFM_ACTIVE;
   8684 			sc->sc_tbi_linkup = 1;
   8685 			if_link_state_change(ifp, LINK_STATE_UP);
   8686 		} else {
   8687 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   8688 				device_xname(sc->sc_dev)));
   8689 			mii->mii_media_status |= IFM_NONE;
   8690 			sc->sc_tbi_linkup = 0;
   8691 			if_link_state_change(ifp, LINK_STATE_DOWN);
   8692 			wm_tbi_serdes_set_linkled(sc);
   8693 			return;
   8694 		}
   8695 		mii->mii_media_active |= IFM_1000_SX;
   8696 		if ((reg & PCS_LSTS_FDX) != 0)
   8697 			mii->mii_media_active |= IFM_FDX;
   8698 		else
   8699 			mii->mii_media_active |= IFM_HDX;
   8700 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   8701 			/* Check flow */
   8702 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   8703 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   8704 				DPRINTF(WM_DEBUG_LINK,
   8705 				    ("XXX LINKOK but not ACOMP\n"));
   8706 				return;
   8707 			}
   8708 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   8709 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   8710 			DPRINTF(WM_DEBUG_LINK,
   8711 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   8712 			if ((pcs_adv & TXCW_SYM_PAUSE)
   8713 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   8714 				mii->mii_media_active |= IFM_FLOW
   8715 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   8716 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   8717 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8718 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   8719 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8720 				mii->mii_media_active |= IFM_FLOW
   8721 				    | IFM_ETH_TXPAUSE;
   8722 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   8723 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   8724 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   8725 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   8726 				mii->mii_media_active |= IFM_FLOW
   8727 				    | IFM_ETH_RXPAUSE;
   8728 		}
   8729 		/* Update LED */
   8730 		wm_tbi_serdes_set_linkled(sc);
   8731 	} else {
   8732 		DPRINTF(WM_DEBUG_LINK,
   8733 		    ("%s: LINK: Receive sequence error\n",
   8734 		    device_xname(sc->sc_dev)));
   8735 	}
   8736 }
   8737 
   8738 /*
   8739  * wm_linkintr:
   8740  *
   8741  *	Helper; handle link interrupts.
   8742  */
   8743 static void
   8744 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   8745 {
   8746 
   8747 	KASSERT(WM_CORE_LOCKED(sc));
   8748 
   8749 	if (sc->sc_flags & WM_F_HAS_MII)
   8750 		wm_linkintr_gmii(sc, icr);
   8751 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   8752 	    && (sc->sc_type >= WM_T_82575))
   8753 		wm_linkintr_serdes(sc, icr);
   8754 	else
   8755 		wm_linkintr_tbi(sc, icr);
   8756 }
   8757 
   8758 /*
   8759  * wm_intr_legacy:
   8760  *
   8761  *	Interrupt service routine for INTx and MSI.
   8762  */
   8763 static int
   8764 wm_intr_legacy(void *arg)
   8765 {
   8766 	struct wm_softc *sc = arg;
   8767 	struct wm_queue *wmq = &sc->sc_queue[0];
   8768 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8769 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8770 	uint32_t icr, rndval = 0;
   8771 	int handled = 0;
   8772 
   8773 	while (1 /* CONSTCOND */) {
   8774 		icr = CSR_READ(sc, WMREG_ICR);
   8775 		if ((icr & sc->sc_icr) == 0)
   8776 			break;
   8777 		if (handled == 0) {
   8778 			DPRINTF(WM_DEBUG_TX,
   8779 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
   8780 		}
   8781 		if (rndval == 0)
   8782 			rndval = icr;
   8783 
   8784 		mutex_enter(rxq->rxq_lock);
   8785 
   8786 		if (rxq->rxq_stopping) {
   8787 			mutex_exit(rxq->rxq_lock);
   8788 			break;
   8789 		}
   8790 
   8791 		handled = 1;
   8792 
   8793 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8794 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   8795 			DPRINTF(WM_DEBUG_RX,
   8796 			    ("%s: RX: got Rx intr 0x%08x\n",
   8797 			    device_xname(sc->sc_dev),
   8798 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
   8799 			WM_Q_EVCNT_INCR(rxq, rxintr);
   8800 		}
   8801 #endif
   8802 		wm_rxeof(rxq, UINT_MAX);
   8803 
   8804 		mutex_exit(rxq->rxq_lock);
   8805 		mutex_enter(txq->txq_lock);
   8806 
   8807 		if (txq->txq_stopping) {
   8808 			mutex_exit(txq->txq_lock);
   8809 			break;
   8810 		}
   8811 
   8812 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   8813 		if (icr & ICR_TXDW) {
   8814 			DPRINTF(WM_DEBUG_TX,
   8815 			    ("%s: TX: got TXDW interrupt\n",
   8816 			    device_xname(sc->sc_dev)));
   8817 			WM_Q_EVCNT_INCR(txq, txdw);
   8818 		}
   8819 #endif
   8820 		wm_txeof(sc, txq);
   8821 
   8822 		mutex_exit(txq->txq_lock);
   8823 		WM_CORE_LOCK(sc);
   8824 
   8825 		if (sc->sc_core_stopping) {
   8826 			WM_CORE_UNLOCK(sc);
   8827 			break;
   8828 		}
   8829 
   8830 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   8831 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8832 			wm_linkintr(sc, icr);
   8833 		}
   8834 
   8835 		WM_CORE_UNLOCK(sc);
   8836 
   8837 		if (icr & ICR_RXO) {
   8838 #if defined(WM_DEBUG)
   8839 			log(LOG_WARNING, "%s: Receive overrun\n",
   8840 			    device_xname(sc->sc_dev));
   8841 #endif /* defined(WM_DEBUG) */
   8842 		}
   8843 	}
   8844 
   8845 	rnd_add_uint32(&sc->rnd_source, rndval);
   8846 
   8847 	if (handled) {
   8848 		/* Try to get more packets going. */
   8849 		softint_schedule(wmq->wmq_si);
   8850 	}
   8851 
   8852 	return handled;
   8853 }
   8854 
   8855 static inline void
   8856 wm_txrxintr_disable(struct wm_queue *wmq)
   8857 {
   8858 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8859 
   8860 	if (sc->sc_type == WM_T_82574)
   8861 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8862 	else if (sc->sc_type == WM_T_82575)
   8863 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8864 	else
   8865 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   8866 }
   8867 
   8868 static inline void
   8869 wm_txrxintr_enable(struct wm_queue *wmq)
   8870 {
   8871 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
   8872 
   8873 	wm_itrs_calculate(sc, wmq);
   8874 
   8875 	if (sc->sc_type == WM_T_82574)
   8876 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   8877 	else if (sc->sc_type == WM_T_82575)
   8878 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   8879 	else
   8880 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   8881 }
   8882 
   8883 static int
   8884 wm_txrxintr_msix(void *arg)
   8885 {
   8886 	struct wm_queue *wmq = arg;
   8887 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8888 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8889 	struct wm_softc *sc = txq->txq_sc;
   8890 	u_int limit = sc->sc_rx_intr_process_limit;
   8891 
   8892 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   8893 
   8894 	DPRINTF(WM_DEBUG_TX,
   8895 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   8896 
   8897 	wm_txrxintr_disable(wmq);
   8898 
   8899 	mutex_enter(txq->txq_lock);
   8900 
   8901 	if (txq->txq_stopping) {
   8902 		mutex_exit(txq->txq_lock);
   8903 		return 0;
   8904 	}
   8905 
   8906 	WM_Q_EVCNT_INCR(txq, txdw);
   8907 	wm_txeof(sc, txq);
   8908 	/* wm_deferred start() is done in wm_handle_queue(). */
   8909 	mutex_exit(txq->txq_lock);
   8910 
   8911 	DPRINTF(WM_DEBUG_RX,
   8912 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   8913 	mutex_enter(rxq->rxq_lock);
   8914 
   8915 	if (rxq->rxq_stopping) {
   8916 		mutex_exit(rxq->rxq_lock);
   8917 		return 0;
   8918 	}
   8919 
   8920 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8921 	wm_rxeof(rxq, limit);
   8922 	mutex_exit(rxq->rxq_lock);
   8923 
   8924 	wm_itrs_writereg(sc, wmq);
   8925 
   8926 	softint_schedule(wmq->wmq_si);
   8927 
   8928 	return 1;
   8929 }
   8930 
   8931 static void
   8932 wm_handle_queue(void *arg)
   8933 {
   8934 	struct wm_queue *wmq = arg;
   8935 	struct wm_txqueue *txq = &wmq->wmq_txq;
   8936 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   8937 	struct wm_softc *sc = txq->txq_sc;
   8938 	u_int limit = sc->sc_rx_process_limit;
   8939 
   8940 	mutex_enter(txq->txq_lock);
   8941 	if (txq->txq_stopping) {
   8942 		mutex_exit(txq->txq_lock);
   8943 		return;
   8944 	}
   8945 	wm_txeof(sc, txq);
   8946 	wm_deferred_start_locked(txq);
   8947 	mutex_exit(txq->txq_lock);
   8948 
   8949 	mutex_enter(rxq->rxq_lock);
   8950 	if (rxq->rxq_stopping) {
   8951 		mutex_exit(rxq->rxq_lock);
   8952 		return;
   8953 	}
   8954 	WM_Q_EVCNT_INCR(rxq, rxintr);
   8955 	wm_rxeof(rxq, limit);
   8956 	mutex_exit(rxq->rxq_lock);
   8957 
   8958 	wm_txrxintr_enable(wmq);
   8959 }
   8960 
   8961 /*
   8962  * wm_linkintr_msix:
   8963  *
   8964  *	Interrupt service routine for link status change for MSI-X.
   8965  */
   8966 static int
   8967 wm_linkintr_msix(void *arg)
   8968 {
   8969 	struct wm_softc *sc = arg;
   8970 	uint32_t reg;
   8971 
   8972 	DPRINTF(WM_DEBUG_LINK,
   8973 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   8974 
   8975 	reg = CSR_READ(sc, WMREG_ICR);
   8976 	WM_CORE_LOCK(sc);
   8977 	if ((sc->sc_core_stopping) || ((reg & ICR_LSC) == 0))
   8978 		goto out;
   8979 
   8980 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   8981 	wm_linkintr(sc, ICR_LSC);
   8982 
   8983 out:
   8984 	WM_CORE_UNLOCK(sc);
   8985 
   8986 	if (sc->sc_type == WM_T_82574)
   8987 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   8988 	else if (sc->sc_type == WM_T_82575)
   8989 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   8990 	else
   8991 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   8992 
   8993 	return 1;
   8994 }
   8995 
   8996 /*
   8997  * Media related.
   8998  * GMII, SGMII, TBI (and SERDES)
   8999  */
   9000 
   9001 /* Common */
   9002 
   9003 /*
   9004  * wm_tbi_serdes_set_linkled:
   9005  *
   9006  *	Update the link LED on TBI and SERDES devices.
   9007  */
   9008 static void
   9009 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   9010 {
   9011 
   9012 	if (sc->sc_tbi_linkup)
   9013 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   9014 	else
   9015 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   9016 
   9017 	/* 82540 or newer devices are active low */
   9018 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   9019 
   9020 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9021 }
   9022 
   9023 /* GMII related */
   9024 
   9025 /*
   9026  * wm_gmii_reset:
   9027  *
   9028  *	Reset the PHY.
   9029  */
   9030 static void
   9031 wm_gmii_reset(struct wm_softc *sc)
   9032 {
   9033 	uint32_t reg;
   9034 	int rv;
   9035 
   9036 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9037 		device_xname(sc->sc_dev), __func__));
   9038 
   9039 	rv = sc->phy.acquire(sc);
   9040 	if (rv != 0) {
   9041 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9042 		    __func__);
   9043 		return;
   9044 	}
   9045 
   9046 	switch (sc->sc_type) {
   9047 	case WM_T_82542_2_0:
   9048 	case WM_T_82542_2_1:
   9049 		/* null */
   9050 		break;
   9051 	case WM_T_82543:
   9052 		/*
   9053 		 * With 82543, we need to force speed and duplex on the MAC
   9054 		 * equal to what the PHY speed and duplex configuration is.
   9055 		 * In addition, we need to perform a hardware reset on the PHY
   9056 		 * to take it out of reset.
   9057 		 */
   9058 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9059 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9060 
   9061 		/* The PHY reset pin is active-low. */
   9062 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9063 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   9064 		    CTRL_EXT_SWDPIN(4));
   9065 		reg |= CTRL_EXT_SWDPIO(4);
   9066 
   9067 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9068 		CSR_WRITE_FLUSH(sc);
   9069 		delay(10*1000);
   9070 
   9071 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   9072 		CSR_WRITE_FLUSH(sc);
   9073 		delay(150);
   9074 #if 0
   9075 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   9076 #endif
   9077 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   9078 		break;
   9079 	case WM_T_82544:	/* reset 10000us */
   9080 	case WM_T_82540:
   9081 	case WM_T_82545:
   9082 	case WM_T_82545_3:
   9083 	case WM_T_82546:
   9084 	case WM_T_82546_3:
   9085 	case WM_T_82541:
   9086 	case WM_T_82541_2:
   9087 	case WM_T_82547:
   9088 	case WM_T_82547_2:
   9089 	case WM_T_82571:	/* reset 100us */
   9090 	case WM_T_82572:
   9091 	case WM_T_82573:
   9092 	case WM_T_82574:
   9093 	case WM_T_82575:
   9094 	case WM_T_82576:
   9095 	case WM_T_82580:
   9096 	case WM_T_I350:
   9097 	case WM_T_I354:
   9098 	case WM_T_I210:
   9099 	case WM_T_I211:
   9100 	case WM_T_82583:
   9101 	case WM_T_80003:
   9102 		/* generic reset */
   9103 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9104 		CSR_WRITE_FLUSH(sc);
   9105 		delay(20000);
   9106 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9107 		CSR_WRITE_FLUSH(sc);
   9108 		delay(20000);
   9109 
   9110 		if ((sc->sc_type == WM_T_82541)
   9111 		    || (sc->sc_type == WM_T_82541_2)
   9112 		    || (sc->sc_type == WM_T_82547)
   9113 		    || (sc->sc_type == WM_T_82547_2)) {
   9114 			/* workaround for igp are done in igp_reset() */
   9115 			/* XXX add code to set LED after phy reset */
   9116 		}
   9117 		break;
   9118 	case WM_T_ICH8:
   9119 	case WM_T_ICH9:
   9120 	case WM_T_ICH10:
   9121 	case WM_T_PCH:
   9122 	case WM_T_PCH2:
   9123 	case WM_T_PCH_LPT:
   9124 	case WM_T_PCH_SPT:
   9125 		/* generic reset */
   9126 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   9127 		CSR_WRITE_FLUSH(sc);
   9128 		delay(100);
   9129 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9130 		CSR_WRITE_FLUSH(sc);
   9131 		delay(150);
   9132 		break;
   9133 	default:
   9134 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   9135 		    __func__);
   9136 		break;
   9137 	}
   9138 
   9139 	sc->phy.release(sc);
   9140 
   9141 	/* get_cfg_done */
   9142 	wm_get_cfg_done(sc);
   9143 
   9144 	/* extra setup */
   9145 	switch (sc->sc_type) {
   9146 	case WM_T_82542_2_0:
   9147 	case WM_T_82542_2_1:
   9148 	case WM_T_82543:
   9149 	case WM_T_82544:
   9150 	case WM_T_82540:
   9151 	case WM_T_82545:
   9152 	case WM_T_82545_3:
   9153 	case WM_T_82546:
   9154 	case WM_T_82546_3:
   9155 	case WM_T_82541_2:
   9156 	case WM_T_82547_2:
   9157 	case WM_T_82571:
   9158 	case WM_T_82572:
   9159 	case WM_T_82573:
   9160 	case WM_T_82574:
   9161 	case WM_T_82583:
   9162 	case WM_T_82575:
   9163 	case WM_T_82576:
   9164 	case WM_T_82580:
   9165 	case WM_T_I350:
   9166 	case WM_T_I354:
   9167 	case WM_T_I210:
   9168 	case WM_T_I211:
   9169 	case WM_T_80003:
   9170 		/* null */
   9171 		break;
   9172 	case WM_T_82541:
   9173 	case WM_T_82547:
   9174 		/* XXX Configure actively LED after PHY reset */
   9175 		break;
   9176 	case WM_T_ICH8:
   9177 	case WM_T_ICH9:
   9178 	case WM_T_ICH10:
   9179 	case WM_T_PCH:
   9180 	case WM_T_PCH2:
   9181 	case WM_T_PCH_LPT:
   9182 	case WM_T_PCH_SPT:
   9183 		wm_phy_post_reset(sc);
   9184 		break;
   9185 	default:
   9186 		panic("%s: unknown type\n", __func__);
   9187 		break;
   9188 	}
   9189 }
   9190 
   9191 /*
   9192  * Setup sc_phytype and mii_{read|write}reg.
   9193  *
   9194  *  To identify PHY type, correct read/write function should be selected.
   9195  * To select correct read/write function, PCI ID or MAC type are required
   9196  * without accessing PHY registers.
   9197  *
   9198  *  On the first call of this function, PHY ID is not known yet. Check
   9199  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
   9200  * result might be incorrect.
   9201  *
   9202  *  In the second call, PHY OUI and model is used to identify PHY type.
   9203  * It might not be perfpect because of the lack of compared entry, but it
   9204  * would be better than the first call.
   9205  *
   9206  *  If the detected new result and previous assumption is different,
   9207  * diagnous message will be printed.
   9208  */
   9209 static void
   9210 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
   9211     uint16_t phy_model)
   9212 {
   9213 	device_t dev = sc->sc_dev;
   9214 	struct mii_data *mii = &sc->sc_mii;
   9215 	uint16_t new_phytype = WMPHY_UNKNOWN;
   9216 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
   9217 	mii_readreg_t new_readreg;
   9218 	mii_writereg_t new_writereg;
   9219 
   9220 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   9221 		device_xname(sc->sc_dev), __func__));
   9222 
   9223 	if (mii->mii_readreg == NULL) {
   9224 		/*
   9225 		 *  This is the first call of this function. For ICH and PCH
   9226 		 * variants, it's difficult to determine the PHY access method
   9227 		 * by sc_type, so use the PCI product ID for some devices.
   9228 		 */
   9229 
   9230 		switch (sc->sc_pcidevid) {
   9231 		case PCI_PRODUCT_INTEL_PCH_M_LM:
   9232 		case PCI_PRODUCT_INTEL_PCH_M_LC:
   9233 			/* 82577 */
   9234 			new_phytype = WMPHY_82577;
   9235 			break;
   9236 		case PCI_PRODUCT_INTEL_PCH_D_DM:
   9237 		case PCI_PRODUCT_INTEL_PCH_D_DC:
   9238 			/* 82578 */
   9239 			new_phytype = WMPHY_82578;
   9240 			break;
   9241 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   9242 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
   9243 			/* 82579 */
   9244 			new_phytype = WMPHY_82579;
   9245 			break;
   9246 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
   9247 		case PCI_PRODUCT_INTEL_82801I_BM:
   9248 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
   9249 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   9250 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   9251 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   9252 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   9253 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   9254 			/* ICH8, 9, 10 with 82567 */
   9255 			new_phytype = WMPHY_BM;
   9256 			break;
   9257 		default:
   9258 			break;
   9259 		}
   9260 	} else {
   9261 		/* It's not the first call. Use PHY OUI and model */
   9262 		switch (phy_oui) {
   9263 		case MII_OUI_ATHEROS: /* XXX ??? */
   9264 			switch (phy_model) {
   9265 			case 0x0004: /* XXX */
   9266 				new_phytype = WMPHY_82578;
   9267 				break;
   9268 			default:
   9269 				break;
   9270 			}
   9271 			break;
   9272 		case MII_OUI_xxMARVELL:
   9273 			switch (phy_model) {
   9274 			case MII_MODEL_xxMARVELL_I210:
   9275 				new_phytype = WMPHY_I210;
   9276 				break;
   9277 			case MII_MODEL_xxMARVELL_E1011:
   9278 			case MII_MODEL_xxMARVELL_E1000_3:
   9279 			case MII_MODEL_xxMARVELL_E1000_5:
   9280 			case MII_MODEL_xxMARVELL_E1112:
   9281 				new_phytype = WMPHY_M88;
   9282 				break;
   9283 			case MII_MODEL_xxMARVELL_E1149:
   9284 				new_phytype = WMPHY_BM;
   9285 				break;
   9286 			case MII_MODEL_xxMARVELL_E1111:
   9287 			case MII_MODEL_xxMARVELL_I347:
   9288 			case MII_MODEL_xxMARVELL_E1512:
   9289 			case MII_MODEL_xxMARVELL_E1340M:
   9290 			case MII_MODEL_xxMARVELL_E1543:
   9291 				new_phytype = WMPHY_M88;
   9292 				break;
   9293 			case MII_MODEL_xxMARVELL_I82563:
   9294 				new_phytype = WMPHY_GG82563;
   9295 				break;
   9296 			default:
   9297 				break;
   9298 			}
   9299 			break;
   9300 		case MII_OUI_INTEL:
   9301 			switch (phy_model) {
   9302 			case MII_MODEL_INTEL_I82577:
   9303 				new_phytype = WMPHY_82577;
   9304 				break;
   9305 			case MII_MODEL_INTEL_I82579:
   9306 				new_phytype = WMPHY_82579;
   9307 				break;
   9308 			case MII_MODEL_INTEL_I217:
   9309 				new_phytype = WMPHY_I217;
   9310 				break;
   9311 			case MII_MODEL_INTEL_I82580:
   9312 			case MII_MODEL_INTEL_I350:
   9313 				new_phytype = WMPHY_82580;
   9314 				break;
   9315 			default:
   9316 				break;
   9317 			}
   9318 			break;
   9319 		case MII_OUI_yyINTEL:
   9320 			switch (phy_model) {
   9321 			case MII_MODEL_yyINTEL_I82562G:
   9322 			case MII_MODEL_yyINTEL_I82562EM:
   9323 			case MII_MODEL_yyINTEL_I82562ET:
   9324 				new_phytype = WMPHY_IFE;
   9325 				break;
   9326 			case MII_MODEL_yyINTEL_IGP01E1000:
   9327 				new_phytype = WMPHY_IGP;
   9328 				break;
   9329 			case MII_MODEL_yyINTEL_I82566:
   9330 				new_phytype = WMPHY_IGP_3;
   9331 				break;
   9332 			default:
   9333 				break;
   9334 			}
   9335 			break;
   9336 		default:
   9337 			break;
   9338 		}
   9339 		if (new_phytype == WMPHY_UNKNOWN)
   9340 			aprint_verbose_dev(dev, "%s: unknown PHY model\n",
   9341 			    __func__);
   9342 
   9343 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9344 		    && (sc->sc_phytype != new_phytype )) {
   9345 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9346 			    "was incorrect. PHY type from PHY ID = %u\n",
   9347 			    sc->sc_phytype, new_phytype);
   9348 		}
   9349 	}
   9350 
   9351 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
   9352 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
   9353 		/* SGMII */
   9354 		new_readreg = wm_sgmii_readreg;
   9355 		new_writereg = wm_sgmii_writereg;
   9356 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   9357 		/* BM2 (phyaddr == 1) */
   9358 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9359 		    && (new_phytype != WMPHY_BM)
   9360 		    && (new_phytype != WMPHY_UNKNOWN))
   9361 			doubt_phytype = new_phytype;
   9362 		new_phytype = WMPHY_BM;
   9363 		new_readreg = wm_gmii_bm_readreg;
   9364 		new_writereg = wm_gmii_bm_writereg;
   9365 	} else if (sc->sc_type >= WM_T_PCH) {
   9366 		/* All PCH* use _hv_ */
   9367 		new_readreg = wm_gmii_hv_readreg;
   9368 		new_writereg = wm_gmii_hv_writereg;
   9369 	} else if (sc->sc_type >= WM_T_ICH8) {
   9370 		/* non-82567 ICH8, 9 and 10 */
   9371 		new_readreg = wm_gmii_i82544_readreg;
   9372 		new_writereg = wm_gmii_i82544_writereg;
   9373 	} else if (sc->sc_type >= WM_T_80003) {
   9374 		/* 80003 */
   9375 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9376 		    && (new_phytype != WMPHY_GG82563)
   9377 		    && (new_phytype != WMPHY_UNKNOWN))
   9378 			doubt_phytype = new_phytype;
   9379 		new_phytype = WMPHY_GG82563;
   9380 		new_readreg = wm_gmii_i80003_readreg;
   9381 		new_writereg = wm_gmii_i80003_writereg;
   9382 	} else if (sc->sc_type >= WM_T_I210) {
   9383 		/* I210 and I211 */
   9384 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9385 		    && (new_phytype != WMPHY_I210)
   9386 		    && (new_phytype != WMPHY_UNKNOWN))
   9387 			doubt_phytype = new_phytype;
   9388 		new_phytype = WMPHY_I210;
   9389 		new_readreg = wm_gmii_gs40g_readreg;
   9390 		new_writereg = wm_gmii_gs40g_writereg;
   9391 	} else if (sc->sc_type >= WM_T_82580) {
   9392 		/* 82580, I350 and I354 */
   9393 		new_readreg = wm_gmii_82580_readreg;
   9394 		new_writereg = wm_gmii_82580_writereg;
   9395 	} else if (sc->sc_type >= WM_T_82544) {
   9396 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   9397 		new_readreg = wm_gmii_i82544_readreg;
   9398 		new_writereg = wm_gmii_i82544_writereg;
   9399 	} else {
   9400 		new_readreg = wm_gmii_i82543_readreg;
   9401 		new_writereg = wm_gmii_i82543_writereg;
   9402 	}
   9403 
   9404 	if (new_phytype == WMPHY_BM) {
   9405 		/* All BM use _bm_ */
   9406 		new_readreg = wm_gmii_bm_readreg;
   9407 		new_writereg = wm_gmii_bm_writereg;
   9408 	}
   9409 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
   9410 		/* All PCH* use _hv_ */
   9411 		new_readreg = wm_gmii_hv_readreg;
   9412 		new_writereg = wm_gmii_hv_writereg;
   9413 	}
   9414 
   9415 	/* Diag output */
   9416 	if (doubt_phytype != WMPHY_UNKNOWN)
   9417 		aprint_error_dev(dev, "Assumed new PHY type was "
   9418 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
   9419 		    new_phytype);
   9420 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
   9421 	    && (sc->sc_phytype != new_phytype ))
   9422 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
   9423 		    "was incorrect. New PHY type = %u\n",
   9424 		    sc->sc_phytype, new_phytype);
   9425 
   9426 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
   9427 		aprint_error_dev(dev, "PHY type is still unknown.\n");
   9428 
   9429 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
   9430 		aprint_error_dev(dev, "Previously assumed PHY read/write "
   9431 		    "function was incorrect.\n");
   9432 
   9433 	/* Update now */
   9434 	sc->sc_phytype = new_phytype;
   9435 	mii->mii_readreg = new_readreg;
   9436 	mii->mii_writereg = new_writereg;
   9437 }
   9438 
   9439 /*
   9440  * wm_get_phy_id_82575:
   9441  *
   9442  * Return PHY ID. Return -1 if it failed.
   9443  */
   9444 static int
   9445 wm_get_phy_id_82575(struct wm_softc *sc)
   9446 {
   9447 	uint32_t reg;
   9448 	int phyid = -1;
   9449 
   9450 	/* XXX */
   9451 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   9452 		return -1;
   9453 
   9454 	if (wm_sgmii_uses_mdio(sc)) {
   9455 		switch (sc->sc_type) {
   9456 		case WM_T_82575:
   9457 		case WM_T_82576:
   9458 			reg = CSR_READ(sc, WMREG_MDIC);
   9459 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   9460 			break;
   9461 		case WM_T_82580:
   9462 		case WM_T_I350:
   9463 		case WM_T_I354:
   9464 		case WM_T_I210:
   9465 		case WM_T_I211:
   9466 			reg = CSR_READ(sc, WMREG_MDICNFG);
   9467 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   9468 			break;
   9469 		default:
   9470 			return -1;
   9471 		}
   9472 	}
   9473 
   9474 	return phyid;
   9475 }
   9476 
   9477 
   9478 /*
   9479  * wm_gmii_mediainit:
   9480  *
   9481  *	Initialize media for use on 1000BASE-T devices.
   9482  */
   9483 static void
   9484 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   9485 {
   9486 	device_t dev = sc->sc_dev;
   9487 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9488 	struct mii_data *mii = &sc->sc_mii;
   9489 	uint32_t reg;
   9490 
   9491 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9492 		device_xname(sc->sc_dev), __func__));
   9493 
   9494 	/* We have GMII. */
   9495 	sc->sc_flags |= WM_F_HAS_MII;
   9496 
   9497 	if (sc->sc_type == WM_T_80003)
   9498 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9499 	else
   9500 		sc->sc_tipg = TIPG_1000T_DFLT;
   9501 
   9502 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   9503 	if ((sc->sc_type == WM_T_82580)
   9504 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   9505 	    || (sc->sc_type == WM_T_I211)) {
   9506 		reg = CSR_READ(sc, WMREG_PHPM);
   9507 		reg &= ~PHPM_GO_LINK_D;
   9508 		CSR_WRITE(sc, WMREG_PHPM, reg);
   9509 	}
   9510 
   9511 	/*
   9512 	 * Let the chip set speed/duplex on its own based on
   9513 	 * signals from the PHY.
   9514 	 * XXXbouyer - I'm not sure this is right for the 80003,
   9515 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   9516 	 */
   9517 	sc->sc_ctrl |= CTRL_SLU;
   9518 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9519 
   9520 	/* Initialize our media structures and probe the GMII. */
   9521 	mii->mii_ifp = ifp;
   9522 
   9523 	mii->mii_statchg = wm_gmii_statchg;
   9524 
   9525 	/* get PHY control from SMBus to PCIe */
   9526 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   9527 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
   9528 		wm_smbustopci(sc);
   9529 
   9530 	wm_gmii_reset(sc);
   9531 
   9532 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9533 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   9534 	    wm_gmii_mediastatus);
   9535 
   9536 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   9537 	    || (sc->sc_type == WM_T_82580)
   9538 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   9539 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   9540 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   9541 			/* Attach only one port */
   9542 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   9543 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9544 		} else {
   9545 			int i, id;
   9546 			uint32_t ctrl_ext;
   9547 
   9548 			id = wm_get_phy_id_82575(sc);
   9549 			if (id != -1) {
   9550 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   9551 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   9552 			}
   9553 			if ((id == -1)
   9554 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9555 				/* Power on sgmii phy if it is disabled */
   9556 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9557 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   9558 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   9559 				CSR_WRITE_FLUSH(sc);
   9560 				delay(300*1000); /* XXX too long */
   9561 
   9562 				/* from 1 to 8 */
   9563 				for (i = 1; i < 8; i++)
   9564 					mii_attach(sc->sc_dev, &sc->sc_mii,
   9565 					    0xffffffff, i, MII_OFFSET_ANY,
   9566 					    MIIF_DOPAUSE);
   9567 
   9568 				/* restore previous sfp cage power state */
   9569 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9570 			}
   9571 		}
   9572 	} else {
   9573 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9574 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9575 	}
   9576 
   9577 	/*
   9578 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   9579 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   9580 	 */
   9581 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   9582 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   9583 		wm_set_mdio_slow_mode_hv(sc);
   9584 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9585 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9586 	}
   9587 
   9588 	/*
   9589 	 * (For ICH8 variants)
   9590 	 * If PHY detection failed, use BM's r/w function and retry.
   9591 	 */
   9592 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9593 		/* if failed, retry with *_bm_* */
   9594 		aprint_verbose_dev(dev, "Assumed PHY access function "
   9595 		    "(type = %d) might be incorrect. Use BM and retry.\n",
   9596 		    sc->sc_phytype);
   9597 		sc->sc_phytype = WMPHY_BM;
   9598 		mii->mii_readreg = wm_gmii_bm_readreg;
   9599 		mii->mii_writereg = wm_gmii_bm_writereg;
   9600 
   9601 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   9602 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   9603 	}
   9604 
   9605 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   9606 		/* Any PHY wasn't find */
   9607 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   9608 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   9609 		sc->sc_phytype = WMPHY_NONE;
   9610 	} else {
   9611 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
   9612 
   9613 		/*
   9614 		 * PHY Found! Check PHY type again by the second call of
   9615 		 * wm_gmii_setup_phytype.
   9616 		 */
   9617 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
   9618 		    child->mii_mpd_model);
   9619 
   9620 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   9621 	}
   9622 }
   9623 
   9624 /*
   9625  * wm_gmii_mediachange:	[ifmedia interface function]
   9626  *
   9627  *	Set hardware to newly-selected media on a 1000BASE-T device.
   9628  */
   9629 static int
   9630 wm_gmii_mediachange(struct ifnet *ifp)
   9631 {
   9632 	struct wm_softc *sc = ifp->if_softc;
   9633 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9634 	int rc;
   9635 
   9636 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   9637 		device_xname(sc->sc_dev), __func__));
   9638 	if ((ifp->if_flags & IFF_UP) == 0)
   9639 		return 0;
   9640 
   9641 	/* Disable D0 LPLU. */
   9642 	wm_lplu_d0_disable(sc);
   9643 
   9644 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   9645 	sc->sc_ctrl |= CTRL_SLU;
   9646 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9647 	    || (sc->sc_type > WM_T_82543)) {
   9648 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   9649 	} else {
   9650 		sc->sc_ctrl &= ~CTRL_ASDE;
   9651 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   9652 		if (ife->ifm_media & IFM_FDX)
   9653 			sc->sc_ctrl |= CTRL_FD;
   9654 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   9655 		case IFM_10_T:
   9656 			sc->sc_ctrl |= CTRL_SPEED_10;
   9657 			break;
   9658 		case IFM_100_TX:
   9659 			sc->sc_ctrl |= CTRL_SPEED_100;
   9660 			break;
   9661 		case IFM_1000_T:
   9662 			sc->sc_ctrl |= CTRL_SPEED_1000;
   9663 			break;
   9664 		default:
   9665 			panic("wm_gmii_mediachange: bad media 0x%x",
   9666 			    ife->ifm_media);
   9667 		}
   9668 	}
   9669 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9670 	CSR_WRITE_FLUSH(sc);
   9671 	if (sc->sc_type <= WM_T_82543)
   9672 		wm_gmii_reset(sc);
   9673 
   9674 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   9675 		return 0;
   9676 	return rc;
   9677 }
   9678 
   9679 /*
   9680  * wm_gmii_mediastatus:	[ifmedia interface function]
   9681  *
   9682  *	Get the current interface media status on a 1000BASE-T device.
   9683  */
   9684 static void
   9685 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9686 {
   9687 	struct wm_softc *sc = ifp->if_softc;
   9688 
   9689 	ether_mediastatus(ifp, ifmr);
   9690 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9691 	    | sc->sc_flowflags;
   9692 }
   9693 
   9694 #define	MDI_IO		CTRL_SWDPIN(2)
   9695 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   9696 #define	MDI_CLK		CTRL_SWDPIN(3)
   9697 
   9698 static void
   9699 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   9700 {
   9701 	uint32_t i, v;
   9702 
   9703 	v = CSR_READ(sc, WMREG_CTRL);
   9704 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9705 	v |= MDI_DIR | CTRL_SWDPIO(3);
   9706 
   9707 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   9708 		if (data & i)
   9709 			v |= MDI_IO;
   9710 		else
   9711 			v &= ~MDI_IO;
   9712 		CSR_WRITE(sc, WMREG_CTRL, v);
   9713 		CSR_WRITE_FLUSH(sc);
   9714 		delay(10);
   9715 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9716 		CSR_WRITE_FLUSH(sc);
   9717 		delay(10);
   9718 		CSR_WRITE(sc, WMREG_CTRL, v);
   9719 		CSR_WRITE_FLUSH(sc);
   9720 		delay(10);
   9721 	}
   9722 }
   9723 
   9724 static uint32_t
   9725 wm_i82543_mii_recvbits(struct wm_softc *sc)
   9726 {
   9727 	uint32_t v, i, data = 0;
   9728 
   9729 	v = CSR_READ(sc, WMREG_CTRL);
   9730 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   9731 	v |= CTRL_SWDPIO(3);
   9732 
   9733 	CSR_WRITE(sc, WMREG_CTRL, v);
   9734 	CSR_WRITE_FLUSH(sc);
   9735 	delay(10);
   9736 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9737 	CSR_WRITE_FLUSH(sc);
   9738 	delay(10);
   9739 	CSR_WRITE(sc, WMREG_CTRL, v);
   9740 	CSR_WRITE_FLUSH(sc);
   9741 	delay(10);
   9742 
   9743 	for (i = 0; i < 16; i++) {
   9744 		data <<= 1;
   9745 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9746 		CSR_WRITE_FLUSH(sc);
   9747 		delay(10);
   9748 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   9749 			data |= 1;
   9750 		CSR_WRITE(sc, WMREG_CTRL, v);
   9751 		CSR_WRITE_FLUSH(sc);
   9752 		delay(10);
   9753 	}
   9754 
   9755 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   9756 	CSR_WRITE_FLUSH(sc);
   9757 	delay(10);
   9758 	CSR_WRITE(sc, WMREG_CTRL, v);
   9759 	CSR_WRITE_FLUSH(sc);
   9760 	delay(10);
   9761 
   9762 	return data;
   9763 }
   9764 
   9765 #undef MDI_IO
   9766 #undef MDI_DIR
   9767 #undef MDI_CLK
   9768 
   9769 /*
   9770  * wm_gmii_i82543_readreg:	[mii interface function]
   9771  *
   9772  *	Read a PHY register on the GMII (i82543 version).
   9773  */
   9774 static int
   9775 wm_gmii_i82543_readreg(device_t dev, int phy, int reg)
   9776 {
   9777 	struct wm_softc *sc = device_private(dev);
   9778 	int rv;
   9779 
   9780 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9781 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   9782 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   9783 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   9784 
   9785 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   9786 	    device_xname(dev), phy, reg, rv));
   9787 
   9788 	return rv;
   9789 }
   9790 
   9791 /*
   9792  * wm_gmii_i82543_writereg:	[mii interface function]
   9793  *
   9794  *	Write a PHY register on the GMII (i82543 version).
   9795  */
   9796 static void
   9797 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, int val)
   9798 {
   9799 	struct wm_softc *sc = device_private(dev);
   9800 
   9801 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   9802 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   9803 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   9804 	    (MII_COMMAND_START << 30), 32);
   9805 }
   9806 
   9807 /*
   9808  * wm_gmii_mdic_readreg:	[mii interface function]
   9809  *
   9810  *	Read a PHY register on the GMII.
   9811  */
   9812 static int
   9813 wm_gmii_mdic_readreg(device_t dev, int phy, int reg)
   9814 {
   9815 	struct wm_softc *sc = device_private(dev);
   9816 	uint32_t mdic = 0;
   9817 	int i, rv;
   9818 
   9819 	if (reg > MII_ADDRMASK) {
   9820 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   9821 		    __func__, sc->sc_phytype, reg);
   9822 		reg &= MII_ADDRMASK;
   9823 	}
   9824 
   9825 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   9826 	    MDIC_REGADD(reg));
   9827 
   9828 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9829 		mdic = CSR_READ(sc, WMREG_MDIC);
   9830 		if (mdic & MDIC_READY)
   9831 			break;
   9832 		delay(50);
   9833 	}
   9834 
   9835 	if ((mdic & MDIC_READY) == 0) {
   9836 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   9837 		    device_xname(dev), phy, reg);
   9838 		rv = 0;
   9839 	} else if (mdic & MDIC_E) {
   9840 #if 0 /* This is normal if no PHY is present. */
   9841 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   9842 		    device_xname(dev), phy, reg);
   9843 #endif
   9844 		rv = 0;
   9845 	} else {
   9846 		rv = MDIC_DATA(mdic);
   9847 		if (rv == 0xffff)
   9848 			rv = 0;
   9849 	}
   9850 
   9851 	return rv;
   9852 }
   9853 
   9854 /*
   9855  * wm_gmii_mdic_writereg:	[mii interface function]
   9856  *
   9857  *	Write a PHY register on the GMII.
   9858  */
   9859 static void
   9860 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, int val)
   9861 {
   9862 	struct wm_softc *sc = device_private(dev);
   9863 	uint32_t mdic = 0;
   9864 	int i;
   9865 
   9866 	if (reg > MII_ADDRMASK) {
   9867 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   9868 		    __func__, sc->sc_phytype, reg);
   9869 		reg &= MII_ADDRMASK;
   9870 	}
   9871 
   9872 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   9873 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   9874 
   9875 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   9876 		mdic = CSR_READ(sc, WMREG_MDIC);
   9877 		if (mdic & MDIC_READY)
   9878 			break;
   9879 		delay(50);
   9880 	}
   9881 
   9882 	if ((mdic & MDIC_READY) == 0)
   9883 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   9884 		    device_xname(dev), phy, reg);
   9885 	else if (mdic & MDIC_E)
   9886 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   9887 		    device_xname(dev), phy, reg);
   9888 }
   9889 
   9890 /*
   9891  * wm_gmii_i82544_readreg:	[mii interface function]
   9892  *
   9893  *	Read a PHY register on the GMII.
   9894  */
   9895 static int
   9896 wm_gmii_i82544_readreg(device_t dev, int phy, int reg)
   9897 {
   9898 	struct wm_softc *sc = device_private(dev);
   9899 	int rv;
   9900 
   9901 	if (sc->phy.acquire(sc)) {
   9902 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   9903 		return 0;
   9904 	}
   9905 
   9906 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9907 		switch (sc->sc_phytype) {
   9908 		case WMPHY_IGP:
   9909 		case WMPHY_IGP_2:
   9910 		case WMPHY_IGP_3:
   9911 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT, reg);
   9912 			break;
   9913 		default:
   9914 #ifdef WM_DEBUG
   9915 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
   9916 			    __func__, sc->sc_phytype, reg);
   9917 #endif
   9918 			break;
   9919 		}
   9920 	}
   9921 
   9922 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   9923 	sc->phy.release(sc);
   9924 
   9925 	return rv;
   9926 }
   9927 
   9928 /*
   9929  * wm_gmii_i82544_writereg:	[mii interface function]
   9930  *
   9931  *	Write a PHY register on the GMII.
   9932  */
   9933 static void
   9934 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, int val)
   9935 {
   9936 	struct wm_softc *sc = device_private(dev);
   9937 
   9938 	if (sc->phy.acquire(sc)) {
   9939 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   9940 		return;
   9941 	}
   9942 
   9943 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   9944 		switch (sc->sc_phytype) {
   9945 		case WMPHY_IGP:
   9946 		case WMPHY_IGP_2:
   9947 		case WMPHY_IGP_3:
   9948 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT, reg);
   9949 			break;
   9950 		default:
   9951 #ifdef WM_DEBUG
   9952 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
   9953 			    __func__, sc->sc_phytype, reg);
   9954 #endif
   9955 			break;
   9956 		}
   9957 	}
   9958 
   9959 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   9960 	sc->phy.release(sc);
   9961 }
   9962 
   9963 /*
   9964  * wm_gmii_i80003_readreg:	[mii interface function]
   9965  *
   9966  *	Read a PHY register on the kumeran
   9967  * This could be handled by the PHY layer if we didn't have to lock the
   9968  * ressource ...
   9969  */
   9970 static int
   9971 wm_gmii_i80003_readreg(device_t dev, int phy, int reg)
   9972 {
   9973 	struct wm_softc *sc = device_private(dev);
   9974 	int page_select, temp;
   9975 	int rv;
   9976 
   9977 	if (phy != 1) /* only one PHY on kumeran bus */
   9978 		return 0;
   9979 
   9980 	if (sc->phy.acquire(sc)) {
   9981 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   9982 		return 0;
   9983 	}
   9984 
   9985 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   9986 		page_select = GG82563_PHY_PAGE_SELECT;
   9987 	else {
   9988 		/*
   9989 		 * Use Alternative Page Select register to access registers
   9990 		 * 30 and 31.
   9991 		 */
   9992 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   9993 	}
   9994 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   9995 	wm_gmii_mdic_writereg(dev, phy, page_select, temp);
   9996 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   9997 		/*
   9998 		 * Wait more 200us for a bug of the ready bit in the MDIC
   9999 		 * register.
   10000 		 */
   10001 		delay(200);
   10002 		if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) {
   10003 			device_printf(dev, "%s failed\n", __func__);
   10004 			rv = 0; /* XXX */
   10005 			goto out;
   10006 		}
   10007 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10008 		delay(200);
   10009 	} else
   10010 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10011 
   10012 out:
   10013 	sc->phy.release(sc);
   10014 	return rv;
   10015 }
   10016 
   10017 /*
   10018  * wm_gmii_i80003_writereg:	[mii interface function]
   10019  *
   10020  *	Write a PHY register on the kumeran.
   10021  * This could be handled by the PHY layer if we didn't have to lock the
   10022  * ressource ...
   10023  */
   10024 static void
   10025 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, int val)
   10026 {
   10027 	struct wm_softc *sc = device_private(dev);
   10028 	int page_select, temp;
   10029 
   10030 	if (phy != 1) /* only one PHY on kumeran bus */
   10031 		return;
   10032 
   10033 	if (sc->phy.acquire(sc)) {
   10034 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10035 		return;
   10036 	}
   10037 
   10038 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
   10039 		page_select = GG82563_PHY_PAGE_SELECT;
   10040 	else {
   10041 		/*
   10042 		 * Use Alternative Page Select register to access registers
   10043 		 * 30 and 31.
   10044 		 */
   10045 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
   10046 	}
   10047 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
   10048 	wm_gmii_mdic_writereg(dev, phy, page_select, temp);
   10049 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
   10050 		/*
   10051 		 * Wait more 200us for a bug of the ready bit in the MDIC
   10052 		 * register.
   10053 		 */
   10054 		delay(200);
   10055 		if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) {
   10056 			device_printf(dev, "%s failed\n", __func__);
   10057 			goto out;
   10058 		}
   10059 		wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10060 		delay(200);
   10061 	} else
   10062 		wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10063 
   10064 out:
   10065 	sc->phy.release(sc);
   10066 }
   10067 
   10068 /*
   10069  * wm_gmii_bm_readreg:	[mii interface function]
   10070  *
   10071  *	Read a PHY register on the kumeran
   10072  * This could be handled by the PHY layer if we didn't have to lock the
   10073  * ressource ...
   10074  */
   10075 static int
   10076 wm_gmii_bm_readreg(device_t dev, int phy, int reg)
   10077 {
   10078 	struct wm_softc *sc = device_private(dev);
   10079 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10080 	uint16_t val;
   10081 	int rv;
   10082 
   10083 	if (sc->phy.acquire(sc)) {
   10084 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10085 		return 0;
   10086 	}
   10087 
   10088 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10089 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10090 		    || (reg == 31)) ? 1 : phy;
   10091 	/* Page 800 works differently than the rest so it has its own func */
   10092 	if (page == BM_WUC_PAGE) {
   10093 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
   10094 		rv = val;
   10095 		goto release;
   10096 	}
   10097 
   10098 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10099 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10100 		    && (sc->sc_type != WM_T_82583))
   10101 			wm_gmii_mdic_writereg(dev, phy,
   10102 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10103 		else
   10104 			wm_gmii_mdic_writereg(dev, phy,
   10105 			    BME1000_PHY_PAGE_SELECT, page);
   10106 	}
   10107 
   10108 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
   10109 
   10110 release:
   10111 	sc->phy.release(sc);
   10112 	return rv;
   10113 }
   10114 
   10115 /*
   10116  * wm_gmii_bm_writereg:	[mii interface function]
   10117  *
   10118  *	Write a PHY register on the kumeran.
   10119  * This could be handled by the PHY layer if we didn't have to lock the
   10120  * ressource ...
   10121  */
   10122 static void
   10123 wm_gmii_bm_writereg(device_t dev, int phy, int reg, int val)
   10124 {
   10125 	struct wm_softc *sc = device_private(dev);
   10126 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
   10127 
   10128 	if (sc->phy.acquire(sc)) {
   10129 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10130 		return;
   10131 	}
   10132 
   10133 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
   10134 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
   10135 		    || (reg == 31)) ? 1 : phy;
   10136 	/* Page 800 works differently than the rest so it has its own func */
   10137 	if (page == BM_WUC_PAGE) {
   10138 		uint16_t tmp;
   10139 
   10140 		tmp = val;
   10141 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   10142 		goto release;
   10143 	}
   10144 
   10145 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   10146 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
   10147 		    && (sc->sc_type != WM_T_82583))
   10148 			wm_gmii_mdic_writereg(dev, phy,
   10149 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
   10150 		else
   10151 			wm_gmii_mdic_writereg(dev, phy,
   10152 			    BME1000_PHY_PAGE_SELECT, page);
   10153 	}
   10154 
   10155 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
   10156 
   10157 release:
   10158 	sc->phy.release(sc);
   10159 }
   10160 
   10161 static void
   10162 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd)
   10163 {
   10164 	struct wm_softc *sc = device_private(dev);
   10165 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   10166 	uint16_t wuce, reg;
   10167 
   10168 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10169 		device_xname(dev), __func__));
   10170 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   10171 	if (sc->sc_type == WM_T_PCH) {
   10172 		/* XXX e1000 driver do nothing... why? */
   10173 	}
   10174 
   10175 	/*
   10176 	 * 1) Enable PHY wakeup register first.
   10177 	 * See e1000_enable_phy_wakeup_reg_access_bm().
   10178 	 */
   10179 
   10180 	/* Set page 769 */
   10181 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10182 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   10183 
   10184 	/* Read WUCE and save it */
   10185 	wuce = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG);
   10186 
   10187 	reg = wuce | BM_WUC_ENABLE_BIT;
   10188 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
   10189 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, reg);
   10190 
   10191 	/* Select page 800 */
   10192 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10193 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   10194 
   10195 	/*
   10196 	 * 2) Access PHY wakeup register.
   10197 	 * See e1000_access_phy_wakeup_reg_bm.
   10198 	 */
   10199 
   10200 	/* Write page 800 */
   10201 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   10202 
   10203 	if (rd)
   10204 		*val = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE);
   10205 	else
   10206 		wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
   10207 
   10208 	/*
   10209 	 * 3) Disable PHY wakeup register.
   10210 	 * See e1000_disable_phy_wakeup_reg_access_bm().
   10211 	 */
   10212 	/* Set page 769 */
   10213 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10214 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   10215 
   10216 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, wuce);
   10217 }
   10218 
   10219 /*
   10220  * wm_gmii_hv_readreg:	[mii interface function]
   10221  *
   10222  *	Read a PHY register on the kumeran
   10223  * This could be handled by the PHY layer if we didn't have to lock the
   10224  * ressource ...
   10225  */
   10226 static int
   10227 wm_gmii_hv_readreg(device_t dev, int phy, int reg)
   10228 {
   10229 	struct wm_softc *sc = device_private(dev);
   10230 	int rv;
   10231 
   10232 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10233 		device_xname(dev), __func__));
   10234 	if (sc->phy.acquire(sc)) {
   10235 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10236 		return 0;
   10237 	}
   10238 
   10239 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg);
   10240 	sc->phy.release(sc);
   10241 	return rv;
   10242 }
   10243 
   10244 static int
   10245 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg)
   10246 {
   10247 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10248 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10249 	uint16_t val;
   10250 	int rv;
   10251 
   10252 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10253 
   10254 	/* Page 800 works differently than the rest so it has its own func */
   10255 	if (page == BM_WUC_PAGE) {
   10256 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
   10257 		return val;
   10258 	}
   10259 
   10260 	/*
   10261 	 * Lower than page 768 works differently than the rest so it has its
   10262 	 * own func
   10263 	 */
   10264 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10265 		printf("gmii_hv_readreg!!!\n");
   10266 		return 0;
   10267 	}
   10268 
   10269 	/*
   10270 	 * XXX I21[789] documents say that the SMBus Address register is at
   10271 	 * PHY address 01, Page 0 (not 768), Register 26.
   10272 	 */
   10273 	if (page == HV_INTC_FC_PAGE_START)
   10274 		page = 0;
   10275 
   10276 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10277 		wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10278 		    page << BME1000_PAGE_SHIFT);
   10279 	}
   10280 
   10281 	rv = wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK);
   10282 	return rv;
   10283 }
   10284 
   10285 /*
   10286  * wm_gmii_hv_writereg:	[mii interface function]
   10287  *
   10288  *	Write a PHY register on the kumeran.
   10289  * This could be handled by the PHY layer if we didn't have to lock the
   10290  * ressource ...
   10291  */
   10292 static void
   10293 wm_gmii_hv_writereg(device_t dev, int phy, int reg, int val)
   10294 {
   10295 	struct wm_softc *sc = device_private(dev);
   10296 
   10297 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
   10298 		device_xname(dev), __func__));
   10299 
   10300 	if (sc->phy.acquire(sc)) {
   10301 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10302 		return;
   10303 	}
   10304 
   10305 	wm_gmii_hv_writereg_locked(dev, phy, reg, val);
   10306 	sc->phy.release(sc);
   10307 }
   10308 
   10309 static void
   10310 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, int val)
   10311 {
   10312 	struct wm_softc *sc = device_private(dev);
   10313 	uint16_t page = BM_PHY_REG_PAGE(reg);
   10314 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   10315 
   10316 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
   10317 
   10318 	/* Page 800 works differently than the rest so it has its own func */
   10319 	if (page == BM_WUC_PAGE) {
   10320 		uint16_t tmp;
   10321 
   10322 		tmp = val;
   10323 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
   10324 		return;
   10325 	}
   10326 
   10327 	/*
   10328 	 * Lower than page 768 works differently than the rest so it has its
   10329 	 * own func
   10330 	 */
   10331 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   10332 		printf("gmii_hv_writereg!!!\n");
   10333 		return;
   10334 	}
   10335 
   10336 	{
   10337 		/*
   10338 		 * XXX I21[789] documents say that the SMBus Address register
   10339 		 * is at PHY address 01, Page 0 (not 768), Register 26.
   10340 		 */
   10341 		if (page == HV_INTC_FC_PAGE_START)
   10342 			page = 0;
   10343 
   10344 		/*
   10345 		 * XXX Workaround MDIO accesses being disabled after entering
   10346 		 * IEEE Power Down (whenever bit 11 of the PHY control
   10347 		 * register is set)
   10348 		 */
   10349 		if (sc->sc_phytype == WMPHY_82578) {
   10350 			struct mii_softc *child;
   10351 
   10352 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   10353 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
   10354 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
   10355 			    && ((val & (1 << 11)) != 0)) {
   10356 				printf("XXX need workaround\n");
   10357 			}
   10358 		}
   10359 
   10360 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   10361 			wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
   10362 			    page << BME1000_PAGE_SHIFT);
   10363 		}
   10364 	}
   10365 
   10366 	wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
   10367 }
   10368 
   10369 /*
   10370  * wm_gmii_82580_readreg:	[mii interface function]
   10371  *
   10372  *	Read a PHY register on the 82580 and I350.
   10373  * This could be handled by the PHY layer if we didn't have to lock the
   10374  * ressource ...
   10375  */
   10376 static int
   10377 wm_gmii_82580_readreg(device_t dev, int phy, int reg)
   10378 {
   10379 	struct wm_softc *sc = device_private(dev);
   10380 	int rv;
   10381 
   10382 	if (sc->phy.acquire(sc) != 0) {
   10383 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10384 		return 0;
   10385 	}
   10386 
   10387 #ifdef DIAGNOSTIC
   10388 	if (reg > MII_ADDRMASK) {
   10389 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10390 		    __func__, sc->sc_phytype, reg);
   10391 		reg &= MII_ADDRMASK;
   10392 	}
   10393 #endif
   10394 	rv = wm_gmii_mdic_readreg(dev, phy, reg);
   10395 
   10396 	sc->phy.release(sc);
   10397 	return rv;
   10398 }
   10399 
   10400 /*
   10401  * wm_gmii_82580_writereg:	[mii interface function]
   10402  *
   10403  *	Write a PHY register on the 82580 and I350.
   10404  * This could be handled by the PHY layer if we didn't have to lock the
   10405  * ressource ...
   10406  */
   10407 static void
   10408 wm_gmii_82580_writereg(device_t dev, int phy, int reg, int val)
   10409 {
   10410 	struct wm_softc *sc = device_private(dev);
   10411 
   10412 	if (sc->phy.acquire(sc) != 0) {
   10413 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10414 		return;
   10415 	}
   10416 
   10417 #ifdef DIAGNOSTIC
   10418 	if (reg > MII_ADDRMASK) {
   10419 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
   10420 		    __func__, sc->sc_phytype, reg);
   10421 		reg &= MII_ADDRMASK;
   10422 	}
   10423 #endif
   10424 	wm_gmii_mdic_writereg(dev, phy, reg, val);
   10425 
   10426 	sc->phy.release(sc);
   10427 }
   10428 
   10429 /*
   10430  * wm_gmii_gs40g_readreg:	[mii interface function]
   10431  *
   10432  *	Read a PHY register on the I2100 and I211.
   10433  * This could be handled by the PHY layer if we didn't have to lock the
   10434  * ressource ...
   10435  */
   10436 static int
   10437 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg)
   10438 {
   10439 	struct wm_softc *sc = device_private(dev);
   10440 	int page, offset;
   10441 	int rv;
   10442 
   10443 	/* Acquire semaphore */
   10444 	if (sc->phy.acquire(sc)) {
   10445 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10446 		return 0;
   10447 	}
   10448 
   10449 	/* Page select */
   10450 	page = reg >> GS40G_PAGE_SHIFT;
   10451 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10452 
   10453 	/* Read reg */
   10454 	offset = reg & GS40G_OFFSET_MASK;
   10455 	rv = wm_gmii_mdic_readreg(dev, phy, offset);
   10456 
   10457 	sc->phy.release(sc);
   10458 	return rv;
   10459 }
   10460 
   10461 /*
   10462  * wm_gmii_gs40g_writereg:	[mii interface function]
   10463  *
   10464  *	Write a PHY register on the I210 and I211.
   10465  * This could be handled by the PHY layer if we didn't have to lock the
   10466  * ressource ...
   10467  */
   10468 static void
   10469 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, int val)
   10470 {
   10471 	struct wm_softc *sc = device_private(dev);
   10472 	int page, offset;
   10473 
   10474 	/* Acquire semaphore */
   10475 	if (sc->phy.acquire(sc)) {
   10476 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10477 		return;
   10478 	}
   10479 
   10480 	/* Page select */
   10481 	page = reg >> GS40G_PAGE_SHIFT;
   10482 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
   10483 
   10484 	/* Write reg */
   10485 	offset = reg & GS40G_OFFSET_MASK;
   10486 	wm_gmii_mdic_writereg(dev, phy, offset, val);
   10487 
   10488 	/* Release semaphore */
   10489 	sc->phy.release(sc);
   10490 }
   10491 
   10492 /*
   10493  * wm_gmii_statchg:	[mii interface function]
   10494  *
   10495  *	Callback from MII layer when media changes.
   10496  */
   10497 static void
   10498 wm_gmii_statchg(struct ifnet *ifp)
   10499 {
   10500 	struct wm_softc *sc = ifp->if_softc;
   10501 	struct mii_data *mii = &sc->sc_mii;
   10502 
   10503 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   10504 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10505 	sc->sc_fcrtl &= ~FCRTL_XONE;
   10506 
   10507 	/*
   10508 	 * Get flow control negotiation result.
   10509 	 */
   10510 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   10511 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   10512 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   10513 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   10514 	}
   10515 
   10516 	if (sc->sc_flowflags & IFM_FLOW) {
   10517 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   10518 			sc->sc_ctrl |= CTRL_TFCE;
   10519 			sc->sc_fcrtl |= FCRTL_XONE;
   10520 		}
   10521 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   10522 			sc->sc_ctrl |= CTRL_RFCE;
   10523 	}
   10524 
   10525 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   10526 		DPRINTF(WM_DEBUG_LINK,
   10527 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   10528 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10529 	} else {
   10530 		DPRINTF(WM_DEBUG_LINK,
   10531 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   10532 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10533 	}
   10534 
   10535 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10536 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10537 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   10538 						 : WMREG_FCRTL, sc->sc_fcrtl);
   10539 	if (sc->sc_type == WM_T_80003) {
   10540 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   10541 		case IFM_1000_T:
   10542 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10543 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   10544 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   10545 			break;
   10546 		default:
   10547 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   10548 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   10549 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   10550 			break;
   10551 		}
   10552 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   10553 	}
   10554 }
   10555 
   10556 /* kumeran related (80003, ICH* and PCH*) */
   10557 
   10558 /*
   10559  * wm_kmrn_readreg:
   10560  *
   10561  *	Read a kumeran register
   10562  */
   10563 static int
   10564 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
   10565 {
   10566 	int rv;
   10567 
   10568 	if (sc->sc_type == WM_T_80003)
   10569 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10570 	else
   10571 		rv = sc->phy.acquire(sc);
   10572 	if (rv != 0) {
   10573 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   10574 		    __func__);
   10575 		return rv;
   10576 	}
   10577 
   10578 	rv = wm_kmrn_readreg_locked(sc, reg, val);
   10579 
   10580 	if (sc->sc_type == WM_T_80003)
   10581 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10582 	else
   10583 		sc->phy.release(sc);
   10584 
   10585 	return rv;
   10586 }
   10587 
   10588 static int
   10589 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
   10590 {
   10591 
   10592 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10593 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   10594 	    KUMCTRLSTA_REN);
   10595 	CSR_WRITE_FLUSH(sc);
   10596 	delay(2);
   10597 
   10598 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   10599 
   10600 	return 0;
   10601 }
   10602 
   10603 /*
   10604  * wm_kmrn_writereg:
   10605  *
   10606  *	Write a kumeran register
   10607  */
   10608 static int
   10609 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
   10610 {
   10611 	int rv;
   10612 
   10613 	if (sc->sc_type == WM_T_80003)
   10614 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10615 	else
   10616 		rv = sc->phy.acquire(sc);
   10617 	if (rv != 0) {
   10618 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
   10619 		    __func__);
   10620 		return rv;
   10621 	}
   10622 
   10623 	rv = wm_kmrn_writereg_locked(sc, reg, val);
   10624 
   10625 	if (sc->sc_type == WM_T_80003)
   10626 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   10627 	else
   10628 		sc->phy.release(sc);
   10629 
   10630 	return rv;
   10631 }
   10632 
   10633 static int
   10634 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
   10635 {
   10636 
   10637 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   10638 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
   10639 
   10640 	return 0;
   10641 }
   10642 
   10643 /* SGMII related */
   10644 
   10645 /*
   10646  * wm_sgmii_uses_mdio
   10647  *
   10648  * Check whether the transaction is to the internal PHY or the external
   10649  * MDIO interface. Return true if it's MDIO.
   10650  */
   10651 static bool
   10652 wm_sgmii_uses_mdio(struct wm_softc *sc)
   10653 {
   10654 	uint32_t reg;
   10655 	bool ismdio = false;
   10656 
   10657 	switch (sc->sc_type) {
   10658 	case WM_T_82575:
   10659 	case WM_T_82576:
   10660 		reg = CSR_READ(sc, WMREG_MDIC);
   10661 		ismdio = ((reg & MDIC_DEST) != 0);
   10662 		break;
   10663 	case WM_T_82580:
   10664 	case WM_T_I350:
   10665 	case WM_T_I354:
   10666 	case WM_T_I210:
   10667 	case WM_T_I211:
   10668 		reg = CSR_READ(sc, WMREG_MDICNFG);
   10669 		ismdio = ((reg & MDICNFG_DEST) != 0);
   10670 		break;
   10671 	default:
   10672 		break;
   10673 	}
   10674 
   10675 	return ismdio;
   10676 }
   10677 
   10678 /*
   10679  * wm_sgmii_readreg:	[mii interface function]
   10680  *
   10681  *	Read a PHY register on the SGMII
   10682  * This could be handled by the PHY layer if we didn't have to lock the
   10683  * ressource ...
   10684  */
   10685 static int
   10686 wm_sgmii_readreg(device_t dev, int phy, int reg)
   10687 {
   10688 	struct wm_softc *sc = device_private(dev);
   10689 	uint32_t i2ccmd;
   10690 	int i, rv;
   10691 
   10692 	if (sc->phy.acquire(sc)) {
   10693 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10694 		return 0;
   10695 	}
   10696 
   10697 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10698 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10699 	    | I2CCMD_OPCODE_READ;
   10700 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10701 
   10702 	/* Poll the ready bit */
   10703 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10704 		delay(50);
   10705 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10706 		if (i2ccmd & I2CCMD_READY)
   10707 			break;
   10708 	}
   10709 	if ((i2ccmd & I2CCMD_READY) == 0)
   10710 		device_printf(dev, "I2CCMD Read did not complete\n");
   10711 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10712 		device_printf(dev, "I2CCMD Error bit set\n");
   10713 
   10714 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   10715 
   10716 	sc->phy.release(sc);
   10717 	return rv;
   10718 }
   10719 
   10720 /*
   10721  * wm_sgmii_writereg:	[mii interface function]
   10722  *
   10723  *	Write a PHY register on the SGMII.
   10724  * This could be handled by the PHY layer if we didn't have to lock the
   10725  * ressource ...
   10726  */
   10727 static void
   10728 wm_sgmii_writereg(device_t dev, int phy, int reg, int val)
   10729 {
   10730 	struct wm_softc *sc = device_private(dev);
   10731 	uint32_t i2ccmd;
   10732 	int i;
   10733 	int val_swapped;
   10734 
   10735 	if (sc->phy.acquire(sc) != 0) {
   10736 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
   10737 		return;
   10738 	}
   10739 	/* Swap the data bytes for the I2C interface */
   10740 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   10741 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   10742 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   10743 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   10744 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   10745 
   10746 	/* Poll the ready bit */
   10747 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   10748 		delay(50);
   10749 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   10750 		if (i2ccmd & I2CCMD_READY)
   10751 			break;
   10752 	}
   10753 	if ((i2ccmd & I2CCMD_READY) == 0)
   10754 		device_printf(dev, "I2CCMD Write did not complete\n");
   10755 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   10756 		device_printf(dev, "I2CCMD Error bit set\n");
   10757 
   10758 	sc->phy.release(sc);
   10759 }
   10760 
   10761 /* TBI related */
   10762 
   10763 /*
   10764  * wm_tbi_mediainit:
   10765  *
   10766  *	Initialize media for use on 1000BASE-X devices.
   10767  */
   10768 static void
   10769 wm_tbi_mediainit(struct wm_softc *sc)
   10770 {
   10771 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   10772 	const char *sep = "";
   10773 
   10774 	if (sc->sc_type < WM_T_82543)
   10775 		sc->sc_tipg = TIPG_WM_DFLT;
   10776 	else
   10777 		sc->sc_tipg = TIPG_LG_DFLT;
   10778 
   10779 	sc->sc_tbi_serdes_anegticks = 5;
   10780 
   10781 	/* Initialize our media structures */
   10782 	sc->sc_mii.mii_ifp = ifp;
   10783 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   10784 
   10785 	if ((sc->sc_type >= WM_T_82575)
   10786 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   10787 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10788 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   10789 	else
   10790 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   10791 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   10792 
   10793 	/*
   10794 	 * SWD Pins:
   10795 	 *
   10796 	 *	0 = Link LED (output)
   10797 	 *	1 = Loss Of Signal (input)
   10798 	 */
   10799 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   10800 
   10801 	/* XXX Perhaps this is only for TBI */
   10802 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   10803 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   10804 
   10805 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   10806 		sc->sc_ctrl &= ~CTRL_LRST;
   10807 
   10808 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10809 
   10810 #define	ADD(ss, mm, dd)							\
   10811 do {									\
   10812 	aprint_normal("%s%s", sep, ss);					\
   10813 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   10814 	sep = ", ";							\
   10815 } while (/*CONSTCOND*/0)
   10816 
   10817 	aprint_normal_dev(sc->sc_dev, "");
   10818 
   10819 	if (sc->sc_type == WM_T_I354) {
   10820 		uint32_t status;
   10821 
   10822 		status = CSR_READ(sc, WMREG_STATUS);
   10823 		if (((status & STATUS_2P5_SKU) != 0)
   10824 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   10825 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
   10826 		} else
   10827 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
   10828 	} else if (sc->sc_type == WM_T_82545) {
   10829 		/* Only 82545 is LX (XXX except SFP) */
   10830 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   10831 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   10832 	} else {
   10833 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   10834 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   10835 	}
   10836 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   10837 	aprint_normal("\n");
   10838 
   10839 #undef ADD
   10840 
   10841 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   10842 }
   10843 
   10844 /*
   10845  * wm_tbi_mediachange:	[ifmedia interface function]
   10846  *
   10847  *	Set hardware to newly-selected media on a 1000BASE-X device.
   10848  */
   10849 static int
   10850 wm_tbi_mediachange(struct ifnet *ifp)
   10851 {
   10852 	struct wm_softc *sc = ifp->if_softc;
   10853 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10854 	uint32_t status;
   10855 	int i;
   10856 
   10857 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   10858 		/* XXX need some work for >= 82571 and < 82575 */
   10859 		if (sc->sc_type < WM_T_82575)
   10860 			return 0;
   10861 	}
   10862 
   10863 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   10864 	    || (sc->sc_type >= WM_T_82575))
   10865 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   10866 
   10867 	sc->sc_ctrl &= ~CTRL_LRST;
   10868 	sc->sc_txcw = TXCW_ANE;
   10869 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   10870 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   10871 	else if (ife->ifm_media & IFM_FDX)
   10872 		sc->sc_txcw |= TXCW_FD;
   10873 	else
   10874 		sc->sc_txcw |= TXCW_HD;
   10875 
   10876 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   10877 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   10878 
   10879 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   10880 		    device_xname(sc->sc_dev), sc->sc_txcw));
   10881 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   10882 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   10883 	CSR_WRITE_FLUSH(sc);
   10884 	delay(1000);
   10885 
   10886 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   10887 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   10888 
   10889 	/*
   10890 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   10891 	 * optics detect a signal, 0 if they don't.
   10892 	 */
   10893 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   10894 		/* Have signal; wait for the link to come up. */
   10895 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   10896 			delay(10000);
   10897 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   10898 				break;
   10899 		}
   10900 
   10901 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   10902 			    device_xname(sc->sc_dev),i));
   10903 
   10904 		status = CSR_READ(sc, WMREG_STATUS);
   10905 		DPRINTF(WM_DEBUG_LINK,
   10906 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   10907 			device_xname(sc->sc_dev),status, STATUS_LU));
   10908 		if (status & STATUS_LU) {
   10909 			/* Link is up. */
   10910 			DPRINTF(WM_DEBUG_LINK,
   10911 			    ("%s: LINK: set media -> link up %s\n",
   10912 			    device_xname(sc->sc_dev),
   10913 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   10914 
   10915 			/*
   10916 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   10917 			 * so we should update sc->sc_ctrl
   10918 			 */
   10919 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   10920 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   10921 			sc->sc_fcrtl &= ~FCRTL_XONE;
   10922 			if (status & STATUS_FD)
   10923 				sc->sc_tctl |=
   10924 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   10925 			else
   10926 				sc->sc_tctl |=
   10927 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   10928 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   10929 				sc->sc_fcrtl |= FCRTL_XONE;
   10930 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   10931 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   10932 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   10933 				      sc->sc_fcrtl);
   10934 			sc->sc_tbi_linkup = 1;
   10935 		} else {
   10936 			if (i == WM_LINKUP_TIMEOUT)
   10937 				wm_check_for_link(sc);
   10938 			/* Link is down. */
   10939 			DPRINTF(WM_DEBUG_LINK,
   10940 			    ("%s: LINK: set media -> link down\n",
   10941 			    device_xname(sc->sc_dev)));
   10942 			sc->sc_tbi_linkup = 0;
   10943 		}
   10944 	} else {
   10945 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   10946 		    device_xname(sc->sc_dev)));
   10947 		sc->sc_tbi_linkup = 0;
   10948 	}
   10949 
   10950 	wm_tbi_serdes_set_linkled(sc);
   10951 
   10952 	return 0;
   10953 }
   10954 
   10955 /*
   10956  * wm_tbi_mediastatus:	[ifmedia interface function]
   10957  *
   10958  *	Get the current interface media status on a 1000BASE-X device.
   10959  */
   10960 static void
   10961 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   10962 {
   10963 	struct wm_softc *sc = ifp->if_softc;
   10964 	uint32_t ctrl, status;
   10965 
   10966 	ifmr->ifm_status = IFM_AVALID;
   10967 	ifmr->ifm_active = IFM_ETHER;
   10968 
   10969 	status = CSR_READ(sc, WMREG_STATUS);
   10970 	if ((status & STATUS_LU) == 0) {
   10971 		ifmr->ifm_active |= IFM_NONE;
   10972 		return;
   10973 	}
   10974 
   10975 	ifmr->ifm_status |= IFM_ACTIVE;
   10976 	/* Only 82545 is LX */
   10977 	if (sc->sc_type == WM_T_82545)
   10978 		ifmr->ifm_active |= IFM_1000_LX;
   10979 	else
   10980 		ifmr->ifm_active |= IFM_1000_SX;
   10981 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   10982 		ifmr->ifm_active |= IFM_FDX;
   10983 	else
   10984 		ifmr->ifm_active |= IFM_HDX;
   10985 	ctrl = CSR_READ(sc, WMREG_CTRL);
   10986 	if (ctrl & CTRL_RFCE)
   10987 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   10988 	if (ctrl & CTRL_TFCE)
   10989 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   10990 }
   10991 
   10992 /* XXX TBI only */
   10993 static int
   10994 wm_check_for_link(struct wm_softc *sc)
   10995 {
   10996 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   10997 	uint32_t rxcw;
   10998 	uint32_t ctrl;
   10999 	uint32_t status;
   11000 	uint32_t sig;
   11001 
   11002 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   11003 		/* XXX need some work for >= 82571 */
   11004 		if (sc->sc_type >= WM_T_82571) {
   11005 			sc->sc_tbi_linkup = 1;
   11006 			return 0;
   11007 		}
   11008 	}
   11009 
   11010 	rxcw = CSR_READ(sc, WMREG_RXCW);
   11011 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11012 	status = CSR_READ(sc, WMREG_STATUS);
   11013 
   11014 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   11015 
   11016 	DPRINTF(WM_DEBUG_LINK,
   11017 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   11018 		device_xname(sc->sc_dev), __func__,
   11019 		((ctrl & CTRL_SWDPIN(1)) == sig),
   11020 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   11021 
   11022 	/*
   11023 	 * SWDPIN   LU RXCW
   11024 	 *      0    0    0
   11025 	 *      0    0    1	(should not happen)
   11026 	 *      0    1    0	(should not happen)
   11027 	 *      0    1    1	(should not happen)
   11028 	 *      1    0    0	Disable autonego and force linkup
   11029 	 *      1    0    1	got /C/ but not linkup yet
   11030 	 *      1    1    0	(linkup)
   11031 	 *      1    1    1	If IFM_AUTO, back to autonego
   11032 	 *
   11033 	 */
   11034 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   11035 	    && ((status & STATUS_LU) == 0)
   11036 	    && ((rxcw & RXCW_C) == 0)) {
   11037 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   11038 			__func__));
   11039 		sc->sc_tbi_linkup = 0;
   11040 		/* Disable auto-negotiation in the TXCW register */
   11041 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   11042 
   11043 		/*
   11044 		 * Force link-up and also force full-duplex.
   11045 		 *
   11046 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   11047 		 * so we should update sc->sc_ctrl
   11048 		 */
   11049 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   11050 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11051 	} else if (((status & STATUS_LU) != 0)
   11052 	    && ((rxcw & RXCW_C) != 0)
   11053 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   11054 		sc->sc_tbi_linkup = 1;
   11055 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   11056 			__func__));
   11057 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11058 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   11059 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   11060 	    && ((rxcw & RXCW_C) != 0)) {
   11061 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   11062 	} else {
   11063 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   11064 			status));
   11065 	}
   11066 
   11067 	return 0;
   11068 }
   11069 
   11070 /*
   11071  * wm_tbi_tick:
   11072  *
   11073  *	Check the link on TBI devices.
   11074  *	This function acts as mii_tick().
   11075  */
   11076 static void
   11077 wm_tbi_tick(struct wm_softc *sc)
   11078 {
   11079 	struct mii_data *mii = &sc->sc_mii;
   11080 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11081 	uint32_t status;
   11082 
   11083 	KASSERT(WM_CORE_LOCKED(sc));
   11084 
   11085 	status = CSR_READ(sc, WMREG_STATUS);
   11086 
   11087 	/* XXX is this needed? */
   11088 	(void)CSR_READ(sc, WMREG_RXCW);
   11089 	(void)CSR_READ(sc, WMREG_CTRL);
   11090 
   11091 	/* set link status */
   11092 	if ((status & STATUS_LU) == 0) {
   11093 		DPRINTF(WM_DEBUG_LINK,
   11094 		    ("%s: LINK: checklink -> down\n",
   11095 			device_xname(sc->sc_dev)));
   11096 		sc->sc_tbi_linkup = 0;
   11097 	} else if (sc->sc_tbi_linkup == 0) {
   11098 		DPRINTF(WM_DEBUG_LINK,
   11099 		    ("%s: LINK: checklink -> up %s\n",
   11100 			device_xname(sc->sc_dev),
   11101 			(status & STATUS_FD) ? "FDX" : "HDX"));
   11102 		sc->sc_tbi_linkup = 1;
   11103 		sc->sc_tbi_serdes_ticks = 0;
   11104 	}
   11105 
   11106 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   11107 		goto setled;
   11108 
   11109 	if ((status & STATUS_LU) == 0) {
   11110 		sc->sc_tbi_linkup = 0;
   11111 		/* If the timer expired, retry autonegotiation */
   11112 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11113 		    && (++sc->sc_tbi_serdes_ticks
   11114 			>= sc->sc_tbi_serdes_anegticks)) {
   11115 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11116 			sc->sc_tbi_serdes_ticks = 0;
   11117 			/*
   11118 			 * Reset the link, and let autonegotiation do
   11119 			 * its thing
   11120 			 */
   11121 			sc->sc_ctrl |= CTRL_LRST;
   11122 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11123 			CSR_WRITE_FLUSH(sc);
   11124 			delay(1000);
   11125 			sc->sc_ctrl &= ~CTRL_LRST;
   11126 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11127 			CSR_WRITE_FLUSH(sc);
   11128 			delay(1000);
   11129 			CSR_WRITE(sc, WMREG_TXCW,
   11130 			    sc->sc_txcw & ~TXCW_ANE);
   11131 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   11132 		}
   11133 	}
   11134 
   11135 setled:
   11136 	wm_tbi_serdes_set_linkled(sc);
   11137 }
   11138 
   11139 /* SERDES related */
   11140 static void
   11141 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   11142 {
   11143 	uint32_t reg;
   11144 
   11145 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   11146 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   11147 		return;
   11148 
   11149 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   11150 	reg |= PCS_CFG_PCS_EN;
   11151 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   11152 
   11153 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11154 	reg &= ~CTRL_EXT_SWDPIN(3);
   11155 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11156 	CSR_WRITE_FLUSH(sc);
   11157 }
   11158 
   11159 static int
   11160 wm_serdes_mediachange(struct ifnet *ifp)
   11161 {
   11162 	struct wm_softc *sc = ifp->if_softc;
   11163 	bool pcs_autoneg = true; /* XXX */
   11164 	uint32_t ctrl_ext, pcs_lctl, reg;
   11165 
   11166 	/* XXX Currently, this function is not called on 8257[12] */
   11167 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   11168 	    || (sc->sc_type >= WM_T_82575))
   11169 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   11170 
   11171 	wm_serdes_power_up_link_82575(sc);
   11172 
   11173 	sc->sc_ctrl |= CTRL_SLU;
   11174 
   11175 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   11176 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   11177 
   11178 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11179 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   11180 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   11181 	case CTRL_EXT_LINK_MODE_SGMII:
   11182 		pcs_autoneg = true;
   11183 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   11184 		break;
   11185 	case CTRL_EXT_LINK_MODE_1000KX:
   11186 		pcs_autoneg = false;
   11187 		/* FALLTHROUGH */
   11188 	default:
   11189 		if ((sc->sc_type == WM_T_82575)
   11190 		    || (sc->sc_type == WM_T_82576)) {
   11191 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   11192 				pcs_autoneg = false;
   11193 		}
   11194 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   11195 		    | CTRL_FRCFDX;
   11196 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   11197 	}
   11198 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11199 
   11200 	if (pcs_autoneg) {
   11201 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   11202 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   11203 
   11204 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   11205 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   11206 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   11207 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   11208 	} else
   11209 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   11210 
   11211 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   11212 
   11213 
   11214 	return 0;
   11215 }
   11216 
   11217 static void
   11218 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   11219 {
   11220 	struct wm_softc *sc = ifp->if_softc;
   11221 	struct mii_data *mii = &sc->sc_mii;
   11222 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   11223 	uint32_t pcs_adv, pcs_lpab, reg;
   11224 
   11225 	ifmr->ifm_status = IFM_AVALID;
   11226 	ifmr->ifm_active = IFM_ETHER;
   11227 
   11228 	/* Check PCS */
   11229 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11230 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   11231 		ifmr->ifm_active |= IFM_NONE;
   11232 		sc->sc_tbi_linkup = 0;
   11233 		goto setled;
   11234 	}
   11235 
   11236 	sc->sc_tbi_linkup = 1;
   11237 	ifmr->ifm_status |= IFM_ACTIVE;
   11238 	if (sc->sc_type == WM_T_I354) {
   11239 		uint32_t status;
   11240 
   11241 		status = CSR_READ(sc, WMREG_STATUS);
   11242 		if (((status & STATUS_2P5_SKU) != 0)
   11243 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
   11244 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
   11245 		} else
   11246 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
   11247 	} else {
   11248 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
   11249 		case PCS_LSTS_SPEED_10:
   11250 			ifmr->ifm_active |= IFM_10_T; /* XXX */
   11251 			break;
   11252 		case PCS_LSTS_SPEED_100:
   11253 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
   11254 			break;
   11255 		case PCS_LSTS_SPEED_1000:
   11256 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11257 			break;
   11258 		default:
   11259 			device_printf(sc->sc_dev, "Unknown speed\n");
   11260 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   11261 			break;
   11262 		}
   11263 	}
   11264 	if ((reg & PCS_LSTS_FDX) != 0)
   11265 		ifmr->ifm_active |= IFM_FDX;
   11266 	else
   11267 		ifmr->ifm_active |= IFM_HDX;
   11268 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   11269 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   11270 		/* Check flow */
   11271 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11272 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   11273 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   11274 			goto setled;
   11275 		}
   11276 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   11277 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   11278 		DPRINTF(WM_DEBUG_LINK,
   11279 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   11280 		if ((pcs_adv & TXCW_SYM_PAUSE)
   11281 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   11282 			mii->mii_media_active |= IFM_FLOW
   11283 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   11284 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   11285 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11286 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   11287 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11288 			mii->mii_media_active |= IFM_FLOW
   11289 			    | IFM_ETH_TXPAUSE;
   11290 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   11291 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   11292 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   11293 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   11294 			mii->mii_media_active |= IFM_FLOW
   11295 			    | IFM_ETH_RXPAUSE;
   11296 		}
   11297 	}
   11298 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   11299 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   11300 setled:
   11301 	wm_tbi_serdes_set_linkled(sc);
   11302 }
   11303 
   11304 /*
   11305  * wm_serdes_tick:
   11306  *
   11307  *	Check the link on serdes devices.
   11308  */
   11309 static void
   11310 wm_serdes_tick(struct wm_softc *sc)
   11311 {
   11312 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   11313 	struct mii_data *mii = &sc->sc_mii;
   11314 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   11315 	uint32_t reg;
   11316 
   11317 	KASSERT(WM_CORE_LOCKED(sc));
   11318 
   11319 	mii->mii_media_status = IFM_AVALID;
   11320 	mii->mii_media_active = IFM_ETHER;
   11321 
   11322 	/* Check PCS */
   11323 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   11324 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   11325 		mii->mii_media_status |= IFM_ACTIVE;
   11326 		sc->sc_tbi_linkup = 1;
   11327 		sc->sc_tbi_serdes_ticks = 0;
   11328 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   11329 		if ((reg & PCS_LSTS_FDX) != 0)
   11330 			mii->mii_media_active |= IFM_FDX;
   11331 		else
   11332 			mii->mii_media_active |= IFM_HDX;
   11333 	} else {
   11334 		mii->mii_media_status |= IFM_NONE;
   11335 		sc->sc_tbi_linkup = 0;
   11336 		/* If the timer expired, retry autonegotiation */
   11337 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   11338 		    && (++sc->sc_tbi_serdes_ticks
   11339 			>= sc->sc_tbi_serdes_anegticks)) {
   11340 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   11341 			sc->sc_tbi_serdes_ticks = 0;
   11342 			/* XXX */
   11343 			wm_serdes_mediachange(ifp);
   11344 		}
   11345 	}
   11346 
   11347 	wm_tbi_serdes_set_linkled(sc);
   11348 }
   11349 
   11350 /* SFP related */
   11351 
   11352 static int
   11353 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   11354 {
   11355 	uint32_t i2ccmd;
   11356 	int i;
   11357 
   11358 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   11359 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   11360 
   11361 	/* Poll the ready bit */
   11362 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   11363 		delay(50);
   11364 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   11365 		if (i2ccmd & I2CCMD_READY)
   11366 			break;
   11367 	}
   11368 	if ((i2ccmd & I2CCMD_READY) == 0)
   11369 		return -1;
   11370 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   11371 		return -1;
   11372 
   11373 	*data = i2ccmd & 0x00ff;
   11374 
   11375 	return 0;
   11376 }
   11377 
   11378 static uint32_t
   11379 wm_sfp_get_media_type(struct wm_softc *sc)
   11380 {
   11381 	uint32_t ctrl_ext;
   11382 	uint8_t val = 0;
   11383 	int timeout = 3;
   11384 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   11385 	int rv = -1;
   11386 
   11387 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11388 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   11389 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   11390 	CSR_WRITE_FLUSH(sc);
   11391 
   11392 	/* Read SFP module data */
   11393 	while (timeout) {
   11394 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   11395 		if (rv == 0)
   11396 			break;
   11397 		delay(100*1000); /* XXX too big */
   11398 		timeout--;
   11399 	}
   11400 	if (rv != 0)
   11401 		goto out;
   11402 	switch (val) {
   11403 	case SFF_SFP_ID_SFF:
   11404 		aprint_normal_dev(sc->sc_dev,
   11405 		    "Module/Connector soldered to board\n");
   11406 		break;
   11407 	case SFF_SFP_ID_SFP:
   11408 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   11409 		break;
   11410 	case SFF_SFP_ID_UNKNOWN:
   11411 		goto out;
   11412 	default:
   11413 		break;
   11414 	}
   11415 
   11416 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   11417 	if (rv != 0) {
   11418 		goto out;
   11419 	}
   11420 
   11421 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   11422 		mediatype = WM_MEDIATYPE_SERDES;
   11423 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   11424 		sc->sc_flags |= WM_F_SGMII;
   11425 		mediatype = WM_MEDIATYPE_COPPER;
   11426 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   11427 		sc->sc_flags |= WM_F_SGMII;
   11428 		mediatype = WM_MEDIATYPE_SERDES;
   11429 	}
   11430 
   11431 out:
   11432 	/* Restore I2C interface setting */
   11433 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11434 
   11435 	return mediatype;
   11436 }
   11437 
   11438 /*
   11439  * NVM related.
   11440  * Microwire, SPI (w/wo EERD) and Flash.
   11441  */
   11442 
   11443 /* Both spi and uwire */
   11444 
   11445 /*
   11446  * wm_eeprom_sendbits:
   11447  *
   11448  *	Send a series of bits to the EEPROM.
   11449  */
   11450 static void
   11451 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   11452 {
   11453 	uint32_t reg;
   11454 	int x;
   11455 
   11456 	reg = CSR_READ(sc, WMREG_EECD);
   11457 
   11458 	for (x = nbits; x > 0; x--) {
   11459 		if (bits & (1U << (x - 1)))
   11460 			reg |= EECD_DI;
   11461 		else
   11462 			reg &= ~EECD_DI;
   11463 		CSR_WRITE(sc, WMREG_EECD, reg);
   11464 		CSR_WRITE_FLUSH(sc);
   11465 		delay(2);
   11466 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11467 		CSR_WRITE_FLUSH(sc);
   11468 		delay(2);
   11469 		CSR_WRITE(sc, WMREG_EECD, reg);
   11470 		CSR_WRITE_FLUSH(sc);
   11471 		delay(2);
   11472 	}
   11473 }
   11474 
   11475 /*
   11476  * wm_eeprom_recvbits:
   11477  *
   11478  *	Receive a series of bits from the EEPROM.
   11479  */
   11480 static void
   11481 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   11482 {
   11483 	uint32_t reg, val;
   11484 	int x;
   11485 
   11486 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   11487 
   11488 	val = 0;
   11489 	for (x = nbits; x > 0; x--) {
   11490 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   11491 		CSR_WRITE_FLUSH(sc);
   11492 		delay(2);
   11493 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   11494 			val |= (1U << (x - 1));
   11495 		CSR_WRITE(sc, WMREG_EECD, reg);
   11496 		CSR_WRITE_FLUSH(sc);
   11497 		delay(2);
   11498 	}
   11499 	*valp = val;
   11500 }
   11501 
   11502 /* Microwire */
   11503 
   11504 /*
   11505  * wm_nvm_read_uwire:
   11506  *
   11507  *	Read a word from the EEPROM using the MicroWire protocol.
   11508  */
   11509 static int
   11510 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11511 {
   11512 	uint32_t reg, val;
   11513 	int i;
   11514 
   11515 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11516 		device_xname(sc->sc_dev), __func__));
   11517 
   11518 	if (sc->nvm.acquire(sc) != 0)
   11519 		return -1;
   11520 
   11521 	for (i = 0; i < wordcnt; i++) {
   11522 		/* Clear SK and DI. */
   11523 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   11524 		CSR_WRITE(sc, WMREG_EECD, reg);
   11525 
   11526 		/*
   11527 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   11528 		 * and Xen.
   11529 		 *
   11530 		 * We use this workaround only for 82540 because qemu's
   11531 		 * e1000 act as 82540.
   11532 		 */
   11533 		if (sc->sc_type == WM_T_82540) {
   11534 			reg |= EECD_SK;
   11535 			CSR_WRITE(sc, WMREG_EECD, reg);
   11536 			reg &= ~EECD_SK;
   11537 			CSR_WRITE(sc, WMREG_EECD, reg);
   11538 			CSR_WRITE_FLUSH(sc);
   11539 			delay(2);
   11540 		}
   11541 		/* XXX: end of workaround */
   11542 
   11543 		/* Set CHIP SELECT. */
   11544 		reg |= EECD_CS;
   11545 		CSR_WRITE(sc, WMREG_EECD, reg);
   11546 		CSR_WRITE_FLUSH(sc);
   11547 		delay(2);
   11548 
   11549 		/* Shift in the READ command. */
   11550 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   11551 
   11552 		/* Shift in address. */
   11553 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   11554 
   11555 		/* Shift out the data. */
   11556 		wm_eeprom_recvbits(sc, &val, 16);
   11557 		data[i] = val & 0xffff;
   11558 
   11559 		/* Clear CHIP SELECT. */
   11560 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   11561 		CSR_WRITE(sc, WMREG_EECD, reg);
   11562 		CSR_WRITE_FLUSH(sc);
   11563 		delay(2);
   11564 	}
   11565 
   11566 	sc->nvm.release(sc);
   11567 	return 0;
   11568 }
   11569 
   11570 /* SPI */
   11571 
   11572 /*
   11573  * Set SPI and FLASH related information from the EECD register.
   11574  * For 82541 and 82547, the word size is taken from EEPROM.
   11575  */
   11576 static int
   11577 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   11578 {
   11579 	int size;
   11580 	uint32_t reg;
   11581 	uint16_t data;
   11582 
   11583 	reg = CSR_READ(sc, WMREG_EECD);
   11584 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   11585 
   11586 	/* Read the size of NVM from EECD by default */
   11587 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11588 	switch (sc->sc_type) {
   11589 	case WM_T_82541:
   11590 	case WM_T_82541_2:
   11591 	case WM_T_82547:
   11592 	case WM_T_82547_2:
   11593 		/* Set dummy value to access EEPROM */
   11594 		sc->sc_nvm_wordsize = 64;
   11595 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
   11596 		reg = data;
   11597 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   11598 		if (size == 0)
   11599 			size = 6; /* 64 word size */
   11600 		else
   11601 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   11602 		break;
   11603 	case WM_T_80003:
   11604 	case WM_T_82571:
   11605 	case WM_T_82572:
   11606 	case WM_T_82573: /* SPI case */
   11607 	case WM_T_82574: /* SPI case */
   11608 	case WM_T_82583: /* SPI case */
   11609 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11610 		if (size > 14)
   11611 			size = 14;
   11612 		break;
   11613 	case WM_T_82575:
   11614 	case WM_T_82576:
   11615 	case WM_T_82580:
   11616 	case WM_T_I350:
   11617 	case WM_T_I354:
   11618 	case WM_T_I210:
   11619 	case WM_T_I211:
   11620 		size += NVM_WORD_SIZE_BASE_SHIFT;
   11621 		if (size > 15)
   11622 			size = 15;
   11623 		break;
   11624 	default:
   11625 		aprint_error_dev(sc->sc_dev,
   11626 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   11627 		return -1;
   11628 		break;
   11629 	}
   11630 
   11631 	sc->sc_nvm_wordsize = 1 << size;
   11632 
   11633 	return 0;
   11634 }
   11635 
   11636 /*
   11637  * wm_nvm_ready_spi:
   11638  *
   11639  *	Wait for a SPI EEPROM to be ready for commands.
   11640  */
   11641 static int
   11642 wm_nvm_ready_spi(struct wm_softc *sc)
   11643 {
   11644 	uint32_t val;
   11645 	int usec;
   11646 
   11647 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11648 		device_xname(sc->sc_dev), __func__));
   11649 
   11650 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   11651 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   11652 		wm_eeprom_recvbits(sc, &val, 8);
   11653 		if ((val & SPI_SR_RDY) == 0)
   11654 			break;
   11655 	}
   11656 	if (usec >= SPI_MAX_RETRIES) {
   11657 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   11658 		return -1;
   11659 	}
   11660 	return 0;
   11661 }
   11662 
   11663 /*
   11664  * wm_nvm_read_spi:
   11665  *
   11666  *	Read a work from the EEPROM using the SPI protocol.
   11667  */
   11668 static int
   11669 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11670 {
   11671 	uint32_t reg, val;
   11672 	int i;
   11673 	uint8_t opc;
   11674 	int rv = 0;
   11675 
   11676 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11677 		device_xname(sc->sc_dev), __func__));
   11678 
   11679 	if (sc->nvm.acquire(sc) != 0)
   11680 		return -1;
   11681 
   11682 	/* Clear SK and CS. */
   11683 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   11684 	CSR_WRITE(sc, WMREG_EECD, reg);
   11685 	CSR_WRITE_FLUSH(sc);
   11686 	delay(2);
   11687 
   11688 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
   11689 		goto out;
   11690 
   11691 	/* Toggle CS to flush commands. */
   11692 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   11693 	CSR_WRITE_FLUSH(sc);
   11694 	delay(2);
   11695 	CSR_WRITE(sc, WMREG_EECD, reg);
   11696 	CSR_WRITE_FLUSH(sc);
   11697 	delay(2);
   11698 
   11699 	opc = SPI_OPC_READ;
   11700 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   11701 		opc |= SPI_OPC_A8;
   11702 
   11703 	wm_eeprom_sendbits(sc, opc, 8);
   11704 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   11705 
   11706 	for (i = 0; i < wordcnt; i++) {
   11707 		wm_eeprom_recvbits(sc, &val, 16);
   11708 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   11709 	}
   11710 
   11711 	/* Raise CS and clear SK. */
   11712 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   11713 	CSR_WRITE(sc, WMREG_EECD, reg);
   11714 	CSR_WRITE_FLUSH(sc);
   11715 	delay(2);
   11716 
   11717 out:
   11718 	sc->nvm.release(sc);
   11719 	return rv;
   11720 }
   11721 
   11722 /* Using with EERD */
   11723 
   11724 static int
   11725 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   11726 {
   11727 	uint32_t attempts = 100000;
   11728 	uint32_t i, reg = 0;
   11729 	int32_t done = -1;
   11730 
   11731 	for (i = 0; i < attempts; i++) {
   11732 		reg = CSR_READ(sc, rw);
   11733 
   11734 		if (reg & EERD_DONE) {
   11735 			done = 0;
   11736 			break;
   11737 		}
   11738 		delay(5);
   11739 	}
   11740 
   11741 	return done;
   11742 }
   11743 
   11744 static int
   11745 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   11746     uint16_t *data)
   11747 {
   11748 	int i, eerd = 0;
   11749 	int rv = 0;
   11750 
   11751 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11752 		device_xname(sc->sc_dev), __func__));
   11753 
   11754 	if (sc->nvm.acquire(sc) != 0)
   11755 		return -1;
   11756 
   11757 	for (i = 0; i < wordcnt; i++) {
   11758 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   11759 		CSR_WRITE(sc, WMREG_EERD, eerd);
   11760 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   11761 		if (rv != 0) {
   11762 			aprint_error_dev(sc->sc_dev, "EERD polling failed\n");
   11763 			break;
   11764 		}
   11765 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   11766 	}
   11767 
   11768 	sc->nvm.release(sc);
   11769 	return rv;
   11770 }
   11771 
   11772 /* Flash */
   11773 
   11774 static int
   11775 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   11776 {
   11777 	uint32_t eecd;
   11778 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   11779 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   11780 	uint8_t sig_byte = 0;
   11781 
   11782 	switch (sc->sc_type) {
   11783 	case WM_T_PCH_SPT:
   11784 		/*
   11785 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
   11786 		 * sector valid bits from the NVM.
   11787 		 */
   11788 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
   11789 		if ((*bank == 0) || (*bank == 1)) {
   11790 			aprint_error_dev(sc->sc_dev,
   11791 			    "%s: no valid NVM bank present (%u)\n", __func__,
   11792 				*bank);
   11793 			return -1;
   11794 		} else {
   11795 			*bank = *bank - 2;
   11796 			return 0;
   11797 		}
   11798 	case WM_T_ICH8:
   11799 	case WM_T_ICH9:
   11800 		eecd = CSR_READ(sc, WMREG_EECD);
   11801 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   11802 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   11803 			return 0;
   11804 		}
   11805 		/* FALLTHROUGH */
   11806 	default:
   11807 		/* Default to 0 */
   11808 		*bank = 0;
   11809 
   11810 		/* Check bank 0 */
   11811 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   11812 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11813 			*bank = 0;
   11814 			return 0;
   11815 		}
   11816 
   11817 		/* Check bank 1 */
   11818 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   11819 		    &sig_byte);
   11820 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   11821 			*bank = 1;
   11822 			return 0;
   11823 		}
   11824 	}
   11825 
   11826 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   11827 		device_xname(sc->sc_dev)));
   11828 	return -1;
   11829 }
   11830 
   11831 /******************************************************************************
   11832  * This function does initial flash setup so that a new read/write/erase cycle
   11833  * can be started.
   11834  *
   11835  * sc - The pointer to the hw structure
   11836  ****************************************************************************/
   11837 static int32_t
   11838 wm_ich8_cycle_init(struct wm_softc *sc)
   11839 {
   11840 	uint16_t hsfsts;
   11841 	int32_t error = 1;
   11842 	int32_t i     = 0;
   11843 
   11844 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11845 
   11846 	/* May be check the Flash Des Valid bit in Hw status */
   11847 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   11848 		return error;
   11849 	}
   11850 
   11851 	/* Clear FCERR in Hw status by writing 1 */
   11852 	/* Clear DAEL in Hw status by writing a 1 */
   11853 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   11854 
   11855 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11856 
   11857 	/*
   11858 	 * Either we should have a hardware SPI cycle in progress bit to check
   11859 	 * against, in order to start a new cycle or FDONE bit should be
   11860 	 * changed in the hardware so that it is 1 after harware reset, which
   11861 	 * can then be used as an indication whether a cycle is in progress or
   11862 	 * has been completed .. we should also have some software semaphore
   11863 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   11864 	 * threads access to those bits can be sequentiallized or a way so that
   11865 	 * 2 threads dont start the cycle at the same time
   11866 	 */
   11867 
   11868 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11869 		/*
   11870 		 * There is no cycle running at present, so we can start a
   11871 		 * cycle
   11872 		 */
   11873 
   11874 		/* Begin by setting Flash Cycle Done. */
   11875 		hsfsts |= HSFSTS_DONE;
   11876 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11877 		error = 0;
   11878 	} else {
   11879 		/*
   11880 		 * otherwise poll for sometime so the current cycle has a
   11881 		 * chance to end before giving up.
   11882 		 */
   11883 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   11884 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11885 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   11886 				error = 0;
   11887 				break;
   11888 			}
   11889 			delay(1);
   11890 		}
   11891 		if (error == 0) {
   11892 			/*
   11893 			 * Successful in waiting for previous cycle to timeout,
   11894 			 * now set the Flash Cycle Done.
   11895 			 */
   11896 			hsfsts |= HSFSTS_DONE;
   11897 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   11898 		}
   11899 	}
   11900 	return error;
   11901 }
   11902 
   11903 /******************************************************************************
   11904  * This function starts a flash cycle and waits for its completion
   11905  *
   11906  * sc - The pointer to the hw structure
   11907  ****************************************************************************/
   11908 static int32_t
   11909 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   11910 {
   11911 	uint16_t hsflctl;
   11912 	uint16_t hsfsts;
   11913 	int32_t error = 1;
   11914 	uint32_t i = 0;
   11915 
   11916 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   11917 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11918 	hsflctl |= HSFCTL_GO;
   11919 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11920 
   11921 	/* Wait till FDONE bit is set to 1 */
   11922 	do {
   11923 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   11924 		if (hsfsts & HSFSTS_DONE)
   11925 			break;
   11926 		delay(1);
   11927 		i++;
   11928 	} while (i < timeout);
   11929 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   11930 		error = 0;
   11931 
   11932 	return error;
   11933 }
   11934 
   11935 /******************************************************************************
   11936  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   11937  *
   11938  * sc - The pointer to the hw structure
   11939  * index - The index of the byte or word to read.
   11940  * size - Size of data to read, 1=byte 2=word, 4=dword
   11941  * data - Pointer to the word to store the value read.
   11942  *****************************************************************************/
   11943 static int32_t
   11944 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   11945     uint32_t size, uint32_t *data)
   11946 {
   11947 	uint16_t hsfsts;
   11948 	uint16_t hsflctl;
   11949 	uint32_t flash_linear_address;
   11950 	uint32_t flash_data = 0;
   11951 	int32_t error = 1;
   11952 	int32_t count = 0;
   11953 
   11954 	if (size < 1  || size > 4 || data == 0x0 ||
   11955 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   11956 		return error;
   11957 
   11958 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   11959 	    sc->sc_ich8_flash_base;
   11960 
   11961 	do {
   11962 		delay(1);
   11963 		/* Steps */
   11964 		error = wm_ich8_cycle_init(sc);
   11965 		if (error)
   11966 			break;
   11967 
   11968 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   11969 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   11970 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   11971 		    & HSFCTL_BCOUNT_MASK;
   11972 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   11973 		if (sc->sc_type == WM_T_PCH_SPT) {
   11974 			/*
   11975 			 * In SPT, This register is in Lan memory space, not
   11976 			 * flash. Therefore, only 32 bit access is supported.
   11977 			 */
   11978 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
   11979 			    (uint32_t)hsflctl);
   11980 		} else
   11981 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   11982 
   11983 		/*
   11984 		 * Write the last 24 bits of index into Flash Linear address
   11985 		 * field in Flash Address
   11986 		 */
   11987 		/* TODO: TBD maybe check the index against the size of flash */
   11988 
   11989 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   11990 
   11991 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   11992 
   11993 		/*
   11994 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   11995 		 * the whole sequence a few more times, else read in (shift in)
   11996 		 * the Flash Data0, the order is least significant byte first
   11997 		 * msb to lsb
   11998 		 */
   11999 		if (error == 0) {
   12000 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   12001 			if (size == 1)
   12002 				*data = (uint8_t)(flash_data & 0x000000FF);
   12003 			else if (size == 2)
   12004 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   12005 			else if (size == 4)
   12006 				*data = (uint32_t)flash_data;
   12007 			break;
   12008 		} else {
   12009 			/*
   12010 			 * If we've gotten here, then things are probably
   12011 			 * completely hosed, but if the error condition is
   12012 			 * detected, it won't hurt to give it another try...
   12013 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   12014 			 */
   12015 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   12016 			if (hsfsts & HSFSTS_ERR) {
   12017 				/* Repeat for some time before giving up. */
   12018 				continue;
   12019 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   12020 				break;
   12021 		}
   12022 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   12023 
   12024 	return error;
   12025 }
   12026 
   12027 /******************************************************************************
   12028  * Reads a single byte from the NVM using the ICH8 flash access registers.
   12029  *
   12030  * sc - pointer to wm_hw structure
   12031  * index - The index of the byte to read.
   12032  * data - Pointer to a byte to store the value read.
   12033  *****************************************************************************/
   12034 static int32_t
   12035 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   12036 {
   12037 	int32_t status;
   12038 	uint32_t word = 0;
   12039 
   12040 	status = wm_read_ich8_data(sc, index, 1, &word);
   12041 	if (status == 0)
   12042 		*data = (uint8_t)word;
   12043 	else
   12044 		*data = 0;
   12045 
   12046 	return status;
   12047 }
   12048 
   12049 /******************************************************************************
   12050  * Reads a word from the NVM using the ICH8 flash access registers.
   12051  *
   12052  * sc - pointer to wm_hw structure
   12053  * index - The starting byte index of the word to read.
   12054  * data - Pointer to a word to store the value read.
   12055  *****************************************************************************/
   12056 static int32_t
   12057 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   12058 {
   12059 	int32_t status;
   12060 	uint32_t word = 0;
   12061 
   12062 	status = wm_read_ich8_data(sc, index, 2, &word);
   12063 	if (status == 0)
   12064 		*data = (uint16_t)word;
   12065 	else
   12066 		*data = 0;
   12067 
   12068 	return status;
   12069 }
   12070 
   12071 /******************************************************************************
   12072  * Reads a dword from the NVM using the ICH8 flash access registers.
   12073  *
   12074  * sc - pointer to wm_hw structure
   12075  * index - The starting byte index of the word to read.
   12076  * data - Pointer to a word to store the value read.
   12077  *****************************************************************************/
   12078 static int32_t
   12079 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   12080 {
   12081 	int32_t status;
   12082 
   12083 	status = wm_read_ich8_data(sc, index, 4, data);
   12084 	return status;
   12085 }
   12086 
   12087 /******************************************************************************
   12088  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   12089  * register.
   12090  *
   12091  * sc - Struct containing variables accessed by shared code
   12092  * offset - offset of word in the EEPROM to read
   12093  * data - word read from the EEPROM
   12094  * words - number of words to read
   12095  *****************************************************************************/
   12096 static int
   12097 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12098 {
   12099 	int32_t  rv = 0;
   12100 	uint32_t flash_bank = 0;
   12101 	uint32_t act_offset = 0;
   12102 	uint32_t bank_offset = 0;
   12103 	uint16_t word = 0;
   12104 	uint16_t i = 0;
   12105 
   12106 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12107 		device_xname(sc->sc_dev), __func__));
   12108 
   12109 	if (sc->nvm.acquire(sc) != 0)
   12110 		return -1;
   12111 
   12112 	/*
   12113 	 * We need to know which is the valid flash bank.  In the event
   12114 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12115 	 * managing flash_bank.  So it cannot be trusted and needs
   12116 	 * to be updated with each read.
   12117 	 */
   12118 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   12119 	if (rv) {
   12120 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   12121 			device_xname(sc->sc_dev)));
   12122 		flash_bank = 0;
   12123 	}
   12124 
   12125 	/*
   12126 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   12127 	 * size
   12128 	 */
   12129 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   12130 
   12131 	for (i = 0; i < words; i++) {
   12132 		/* The NVM part needs a byte offset, hence * 2 */
   12133 		act_offset = bank_offset + ((offset + i) * 2);
   12134 		rv = wm_read_ich8_word(sc, act_offset, &word);
   12135 		if (rv) {
   12136 			aprint_error_dev(sc->sc_dev,
   12137 			    "%s: failed to read NVM\n", __func__);
   12138 			break;
   12139 		}
   12140 		data[i] = word;
   12141 	}
   12142 
   12143 	sc->nvm.release(sc);
   12144 	return rv;
   12145 }
   12146 
   12147 /******************************************************************************
   12148  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   12149  * register.
   12150  *
   12151  * sc - Struct containing variables accessed by shared code
   12152  * offset - offset of word in the EEPROM to read
   12153  * data - word read from the EEPROM
   12154  * words - number of words to read
   12155  *****************************************************************************/
   12156 static int
   12157 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12158 {
   12159 	int32_t  rv = 0;
   12160 	uint32_t flash_bank = 0;
   12161 	uint32_t act_offset = 0;
   12162 	uint32_t bank_offset = 0;
   12163 	uint32_t dword = 0;
   12164 	uint16_t i = 0;
   12165 
   12166 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12167 		device_xname(sc->sc_dev), __func__));
   12168 
   12169 	if (sc->nvm.acquire(sc) != 0)
   12170 		return -1;
   12171 
   12172 	/*
   12173 	 * We need to know which is the valid flash bank.  In the event
   12174 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   12175 	 * managing flash_bank.  So it cannot be trusted and needs
   12176 	 * to be updated with each read.
   12177 	 */
   12178 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   12179 	if (rv) {
   12180 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   12181 			device_xname(sc->sc_dev)));
   12182 		flash_bank = 0;
   12183 	}
   12184 
   12185 	/*
   12186 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   12187 	 * size
   12188 	 */
   12189 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   12190 
   12191 	for (i = 0; i < words; i++) {
   12192 		/* The NVM part needs a byte offset, hence * 2 */
   12193 		act_offset = bank_offset + ((offset + i) * 2);
   12194 		/* but we must read dword aligned, so mask ... */
   12195 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   12196 		if (rv) {
   12197 			aprint_error_dev(sc->sc_dev,
   12198 			    "%s: failed to read NVM\n", __func__);
   12199 			break;
   12200 		}
   12201 		/* ... and pick out low or high word */
   12202 		if ((act_offset & 0x2) == 0)
   12203 			data[i] = (uint16_t)(dword & 0xFFFF);
   12204 		else
   12205 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   12206 	}
   12207 
   12208 	sc->nvm.release(sc);
   12209 	return rv;
   12210 }
   12211 
   12212 /* iNVM */
   12213 
   12214 static int
   12215 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   12216 {
   12217 	int32_t  rv = 0;
   12218 	uint32_t invm_dword;
   12219 	uint16_t i;
   12220 	uint8_t record_type, word_address;
   12221 
   12222 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12223 		device_xname(sc->sc_dev), __func__));
   12224 
   12225 	for (i = 0; i < INVM_SIZE; i++) {
   12226 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   12227 		/* Get record type */
   12228 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   12229 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   12230 			break;
   12231 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   12232 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   12233 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   12234 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   12235 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   12236 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   12237 			if (word_address == address) {
   12238 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   12239 				rv = 0;
   12240 				break;
   12241 			}
   12242 		}
   12243 	}
   12244 
   12245 	return rv;
   12246 }
   12247 
   12248 static int
   12249 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   12250 {
   12251 	int rv = 0;
   12252 	int i;
   12253 
   12254 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12255 		device_xname(sc->sc_dev), __func__));
   12256 
   12257 	if (sc->nvm.acquire(sc) != 0)
   12258 		return -1;
   12259 
   12260 	for (i = 0; i < words; i++) {
   12261 		switch (offset + i) {
   12262 		case NVM_OFF_MACADDR:
   12263 		case NVM_OFF_MACADDR1:
   12264 		case NVM_OFF_MACADDR2:
   12265 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   12266 			if (rv != 0) {
   12267 				data[i] = 0xffff;
   12268 				rv = -1;
   12269 			}
   12270 			break;
   12271 		case NVM_OFF_CFG2:
   12272 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12273 			if (rv != 0) {
   12274 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   12275 				rv = 0;
   12276 			}
   12277 			break;
   12278 		case NVM_OFF_CFG4:
   12279 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12280 			if (rv != 0) {
   12281 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   12282 				rv = 0;
   12283 			}
   12284 			break;
   12285 		case NVM_OFF_LED_1_CFG:
   12286 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12287 			if (rv != 0) {
   12288 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   12289 				rv = 0;
   12290 			}
   12291 			break;
   12292 		case NVM_OFF_LED_0_2_CFG:
   12293 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12294 			if (rv != 0) {
   12295 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   12296 				rv = 0;
   12297 			}
   12298 			break;
   12299 		case NVM_OFF_ID_LED_SETTINGS:
   12300 			rv = wm_nvm_read_word_invm(sc, offset, data);
   12301 			if (rv != 0) {
   12302 				*data = ID_LED_RESERVED_FFFF;
   12303 				rv = 0;
   12304 			}
   12305 			break;
   12306 		default:
   12307 			DPRINTF(WM_DEBUG_NVM,
   12308 			    ("NVM word 0x%02x is not mapped.\n", offset));
   12309 			*data = NVM_RESERVED_WORD;
   12310 			break;
   12311 		}
   12312 	}
   12313 
   12314 	sc->nvm.release(sc);
   12315 	return rv;
   12316 }
   12317 
   12318 /* Lock, detecting NVM type, validate checksum, version and read */
   12319 
   12320 static int
   12321 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   12322 {
   12323 	uint32_t eecd = 0;
   12324 
   12325 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   12326 	    || sc->sc_type == WM_T_82583) {
   12327 		eecd = CSR_READ(sc, WMREG_EECD);
   12328 
   12329 		/* Isolate bits 15 & 16 */
   12330 		eecd = ((eecd >> 15) & 0x03);
   12331 
   12332 		/* If both bits are set, device is Flash type */
   12333 		if (eecd == 0x03)
   12334 			return 0;
   12335 	}
   12336 	return 1;
   12337 }
   12338 
   12339 static int
   12340 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   12341 {
   12342 	uint32_t eec;
   12343 
   12344 	eec = CSR_READ(sc, WMREG_EEC);
   12345 	if ((eec & EEC_FLASH_DETECTED) != 0)
   12346 		return 1;
   12347 
   12348 	return 0;
   12349 }
   12350 
   12351 /*
   12352  * wm_nvm_validate_checksum
   12353  *
   12354  * The checksum is defined as the sum of the first 64 (16 bit) words.
   12355  */
   12356 static int
   12357 wm_nvm_validate_checksum(struct wm_softc *sc)
   12358 {
   12359 	uint16_t checksum;
   12360 	uint16_t eeprom_data;
   12361 #ifdef WM_DEBUG
   12362 	uint16_t csum_wordaddr, valid_checksum;
   12363 #endif
   12364 	int i;
   12365 
   12366 	checksum = 0;
   12367 
   12368 	/* Don't check for I211 */
   12369 	if (sc->sc_type == WM_T_I211)
   12370 		return 0;
   12371 
   12372 #ifdef WM_DEBUG
   12373 	if (sc->sc_type == WM_T_PCH_LPT) {
   12374 		csum_wordaddr = NVM_OFF_COMPAT;
   12375 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   12376 	} else {
   12377 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   12378 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   12379 	}
   12380 
   12381 	/* Dump EEPROM image for debug */
   12382 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12383 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12384 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   12385 		/* XXX PCH_SPT? */
   12386 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   12387 		if ((eeprom_data & valid_checksum) == 0) {
   12388 			DPRINTF(WM_DEBUG_NVM,
   12389 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   12390 				device_xname(sc->sc_dev), eeprom_data,
   12391 				    valid_checksum));
   12392 		}
   12393 	}
   12394 
   12395 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   12396 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   12397 		for (i = 0; i < NVM_SIZE; i++) {
   12398 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12399 				printf("XXXX ");
   12400 			else
   12401 				printf("%04hx ", eeprom_data);
   12402 			if (i % 8 == 7)
   12403 				printf("\n");
   12404 		}
   12405 	}
   12406 
   12407 #endif /* WM_DEBUG */
   12408 
   12409 	for (i = 0; i < NVM_SIZE; i++) {
   12410 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   12411 			return 1;
   12412 		checksum += eeprom_data;
   12413 	}
   12414 
   12415 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   12416 #ifdef WM_DEBUG
   12417 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   12418 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   12419 #endif
   12420 	}
   12421 
   12422 	return 0;
   12423 }
   12424 
   12425 static void
   12426 wm_nvm_version_invm(struct wm_softc *sc)
   12427 {
   12428 	uint32_t dword;
   12429 
   12430 	/*
   12431 	 * Linux's code to decode version is very strange, so we don't
   12432 	 * obey that algorithm and just use word 61 as the document.
   12433 	 * Perhaps it's not perfect though...
   12434 	 *
   12435 	 * Example:
   12436 	 *
   12437 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   12438 	 */
   12439 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   12440 	dword = __SHIFTOUT(dword, INVM_VER_1);
   12441 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   12442 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   12443 }
   12444 
   12445 static void
   12446 wm_nvm_version(struct wm_softc *sc)
   12447 {
   12448 	uint16_t major, minor, build, patch;
   12449 	uint16_t uid0, uid1;
   12450 	uint16_t nvm_data;
   12451 	uint16_t off;
   12452 	bool check_version = false;
   12453 	bool check_optionrom = false;
   12454 	bool have_build = false;
   12455 	bool have_uid = true;
   12456 
   12457 	/*
   12458 	 * Version format:
   12459 	 *
   12460 	 * XYYZ
   12461 	 * X0YZ
   12462 	 * X0YY
   12463 	 *
   12464 	 * Example:
   12465 	 *
   12466 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   12467 	 *	82571	0x50a6	5.10.6?
   12468 	 *	82572	0x506a	5.6.10?
   12469 	 *	82572EI	0x5069	5.6.9?
   12470 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   12471 	 *		0x2013	2.1.3?
   12472 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   12473 	 */
   12474 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
   12475 	switch (sc->sc_type) {
   12476 	case WM_T_82571:
   12477 	case WM_T_82572:
   12478 	case WM_T_82574:
   12479 	case WM_T_82583:
   12480 		check_version = true;
   12481 		check_optionrom = true;
   12482 		have_build = true;
   12483 		break;
   12484 	case WM_T_82575:
   12485 	case WM_T_82576:
   12486 	case WM_T_82580:
   12487 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   12488 			check_version = true;
   12489 		break;
   12490 	case WM_T_I211:
   12491 		wm_nvm_version_invm(sc);
   12492 		have_uid = false;
   12493 		goto printver;
   12494 	case WM_T_I210:
   12495 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   12496 			wm_nvm_version_invm(sc);
   12497 			have_uid = false;
   12498 			goto printver;
   12499 		}
   12500 		/* FALLTHROUGH */
   12501 	case WM_T_I350:
   12502 	case WM_T_I354:
   12503 		check_version = true;
   12504 		check_optionrom = true;
   12505 		break;
   12506 	default:
   12507 		return;
   12508 	}
   12509 	if (check_version) {
   12510 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
   12511 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   12512 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   12513 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   12514 			build = nvm_data & NVM_BUILD_MASK;
   12515 			have_build = true;
   12516 		} else
   12517 			minor = nvm_data & 0x00ff;
   12518 
   12519 		/* Decimal */
   12520 		minor = (minor / 16) * 10 + (minor % 16);
   12521 		sc->sc_nvm_ver_major = major;
   12522 		sc->sc_nvm_ver_minor = minor;
   12523 
   12524 printver:
   12525 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   12526 		    sc->sc_nvm_ver_minor);
   12527 		if (have_build) {
   12528 			sc->sc_nvm_ver_build = build;
   12529 			aprint_verbose(".%d", build);
   12530 		}
   12531 	}
   12532 	if (check_optionrom) {
   12533 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
   12534 		/* Option ROM Version */
   12535 		if ((off != 0x0000) && (off != 0xffff)) {
   12536 			off += NVM_COMBO_VER_OFF;
   12537 			wm_nvm_read(sc, off + 1, 1, &uid1);
   12538 			wm_nvm_read(sc, off, 1, &uid0);
   12539 			if ((uid0 != 0) && (uid0 != 0xffff)
   12540 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   12541 				/* 16bits */
   12542 				major = uid0 >> 8;
   12543 				build = (uid0 << 8) | (uid1 >> 8);
   12544 				patch = uid1 & 0x00ff;
   12545 				aprint_verbose(", option ROM Version %d.%d.%d",
   12546 				    major, build, patch);
   12547 			}
   12548 		}
   12549 	}
   12550 
   12551 	if (have_uid) {
   12552 		wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
   12553 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   12554 	}
   12555 }
   12556 
   12557 /*
   12558  * wm_nvm_read:
   12559  *
   12560  *	Read data from the serial EEPROM.
   12561  */
   12562 static int
   12563 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   12564 {
   12565 	int rv;
   12566 
   12567 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   12568 		device_xname(sc->sc_dev), __func__));
   12569 
   12570 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   12571 		return -1;
   12572 
   12573 	rv = sc->nvm.read(sc, word, wordcnt, data);
   12574 
   12575 	return rv;
   12576 }
   12577 
   12578 /*
   12579  * Hardware semaphores.
   12580  * Very complexed...
   12581  */
   12582 
   12583 static int
   12584 wm_get_null(struct wm_softc *sc)
   12585 {
   12586 
   12587 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12588 		device_xname(sc->sc_dev), __func__));
   12589 	return 0;
   12590 }
   12591 
   12592 static void
   12593 wm_put_null(struct wm_softc *sc)
   12594 {
   12595 
   12596 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12597 		device_xname(sc->sc_dev), __func__));
   12598 	return;
   12599 }
   12600 
   12601 static int
   12602 wm_get_eecd(struct wm_softc *sc)
   12603 {
   12604 	uint32_t reg;
   12605 	int x;
   12606 
   12607 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   12608 		device_xname(sc->sc_dev), __func__));
   12609 
   12610 	reg = CSR_READ(sc, WMREG_EECD);
   12611 
   12612 	/* Request EEPROM access. */
   12613 	reg |= EECD_EE_REQ;
   12614 	CSR_WRITE(sc, WMREG_EECD, reg);
   12615 
   12616 	/* ..and wait for it to be granted. */
   12617 	for (x = 0; x < 1000; x++) {
   12618 		reg = CSR_READ(sc, WMREG_EECD);
   12619 		if (reg & EECD_EE_GNT)
   12620 			break;
   12621 		delay(5);
   12622 	}
   12623 	if ((reg & EECD_EE_GNT) == 0) {
   12624 		aprint_error_dev(sc->sc_dev,
   12625 		    "could not acquire EEPROM GNT\n");
   12626 		reg &= ~EECD_EE_REQ;
   12627 		CSR_WRITE(sc, WMREG_EECD, reg);
   12628 		return -1;
   12629 	}
   12630 
   12631 	return 0;
   12632 }
   12633 
   12634 static void
   12635 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
   12636 {
   12637 
   12638 	*eecd |= EECD_SK;
   12639 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   12640 	CSR_WRITE_FLUSH(sc);
   12641 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   12642 		delay(1);
   12643 	else
   12644 		delay(50);
   12645 }
   12646 
   12647 static void
   12648 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
   12649 {
   12650 
   12651 	*eecd &= ~EECD_SK;
   12652 	CSR_WRITE(sc, WMREG_EECD, *eecd);
   12653 	CSR_WRITE_FLUSH(sc);
   12654 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
   12655 		delay(1);
   12656 	else
   12657 		delay(50);
   12658 }
   12659 
   12660 static void
   12661 wm_put_eecd(struct wm_softc *sc)
   12662 {
   12663 	uint32_t reg;
   12664 
   12665 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12666 		device_xname(sc->sc_dev), __func__));
   12667 
   12668 	/* Stop nvm */
   12669 	reg = CSR_READ(sc, WMREG_EECD);
   12670 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
   12671 		/* Pull CS high */
   12672 		reg |= EECD_CS;
   12673 		wm_nvm_eec_clock_lower(sc, &reg);
   12674 	} else {
   12675 		/* CS on Microwire is active-high */
   12676 		reg &= ~(EECD_CS | EECD_DI);
   12677 		CSR_WRITE(sc, WMREG_EECD, reg);
   12678 		wm_nvm_eec_clock_raise(sc, &reg);
   12679 		wm_nvm_eec_clock_lower(sc, &reg);
   12680 	}
   12681 
   12682 	reg = CSR_READ(sc, WMREG_EECD);
   12683 	reg &= ~EECD_EE_REQ;
   12684 	CSR_WRITE(sc, WMREG_EECD, reg);
   12685 
   12686 	return;
   12687 }
   12688 
   12689 /*
   12690  * Get hardware semaphore.
   12691  * Same as e1000_get_hw_semaphore_generic()
   12692  */
   12693 static int
   12694 wm_get_swsm_semaphore(struct wm_softc *sc)
   12695 {
   12696 	int32_t timeout;
   12697 	uint32_t swsm;
   12698 
   12699 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12700 		device_xname(sc->sc_dev), __func__));
   12701 	KASSERT(sc->sc_nvm_wordsize > 0);
   12702 
   12703 	/* Get the SW semaphore. */
   12704 	timeout = sc->sc_nvm_wordsize + 1;
   12705 	while (timeout) {
   12706 		swsm = CSR_READ(sc, WMREG_SWSM);
   12707 
   12708 		if ((swsm & SWSM_SMBI) == 0)
   12709 			break;
   12710 
   12711 		delay(50);
   12712 		timeout--;
   12713 	}
   12714 
   12715 	if (timeout == 0) {
   12716 		aprint_error_dev(sc->sc_dev,
   12717 		    "could not acquire SWSM SMBI\n");
   12718 		return 1;
   12719 	}
   12720 
   12721 	/* Get the FW semaphore. */
   12722 	timeout = sc->sc_nvm_wordsize + 1;
   12723 	while (timeout) {
   12724 		swsm = CSR_READ(sc, WMREG_SWSM);
   12725 		swsm |= SWSM_SWESMBI;
   12726 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   12727 		/* If we managed to set the bit we got the semaphore. */
   12728 		swsm = CSR_READ(sc, WMREG_SWSM);
   12729 		if (swsm & SWSM_SWESMBI)
   12730 			break;
   12731 
   12732 		delay(50);
   12733 		timeout--;
   12734 	}
   12735 
   12736 	if (timeout == 0) {
   12737 		aprint_error_dev(sc->sc_dev,
   12738 		    "could not acquire SWSM SWESMBI\n");
   12739 		/* Release semaphores */
   12740 		wm_put_swsm_semaphore(sc);
   12741 		return 1;
   12742 	}
   12743 	return 0;
   12744 }
   12745 
   12746 /*
   12747  * Put hardware semaphore.
   12748  * Same as e1000_put_hw_semaphore_generic()
   12749  */
   12750 static void
   12751 wm_put_swsm_semaphore(struct wm_softc *sc)
   12752 {
   12753 	uint32_t swsm;
   12754 
   12755 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12756 		device_xname(sc->sc_dev), __func__));
   12757 
   12758 	swsm = CSR_READ(sc, WMREG_SWSM);
   12759 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   12760 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   12761 }
   12762 
   12763 /*
   12764  * Get SW/FW semaphore.
   12765  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
   12766  */
   12767 static int
   12768 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12769 {
   12770 	uint32_t swfw_sync;
   12771 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   12772 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   12773 	int timeout;
   12774 
   12775 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12776 		device_xname(sc->sc_dev), __func__));
   12777 
   12778 	if (sc->sc_type == WM_T_80003)
   12779 		timeout = 50;
   12780 	else
   12781 		timeout = 200;
   12782 
   12783 	for (timeout = 0; timeout < 200; timeout++) {
   12784 		if (wm_get_swsm_semaphore(sc)) {
   12785 			aprint_error_dev(sc->sc_dev,
   12786 			    "%s: failed to get semaphore\n",
   12787 			    __func__);
   12788 			return 1;
   12789 		}
   12790 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12791 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   12792 			swfw_sync |= swmask;
   12793 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12794 			wm_put_swsm_semaphore(sc);
   12795 			return 0;
   12796 		}
   12797 		wm_put_swsm_semaphore(sc);
   12798 		delay(5000);
   12799 	}
   12800 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   12801 	    device_xname(sc->sc_dev), mask, swfw_sync);
   12802 	return 1;
   12803 }
   12804 
   12805 static void
   12806 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   12807 {
   12808 	uint32_t swfw_sync;
   12809 
   12810 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12811 		device_xname(sc->sc_dev), __func__));
   12812 
   12813 	while (wm_get_swsm_semaphore(sc) != 0)
   12814 		continue;
   12815 
   12816 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   12817 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   12818 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   12819 
   12820 	wm_put_swsm_semaphore(sc);
   12821 }
   12822 
   12823 static int
   12824 wm_get_nvm_80003(struct wm_softc *sc)
   12825 {
   12826 	int rv;
   12827 
   12828 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
   12829 		device_xname(sc->sc_dev), __func__));
   12830 
   12831 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
   12832 		aprint_error_dev(sc->sc_dev,
   12833 		    "%s: failed to get semaphore(SWFW)\n",
   12834 		    __func__);
   12835 		return rv;
   12836 	}
   12837 
   12838 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   12839 	    && (rv = wm_get_eecd(sc)) != 0) {
   12840 		aprint_error_dev(sc->sc_dev,
   12841 		    "%s: failed to get semaphore(EECD)\n",
   12842 		    __func__);
   12843 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   12844 		return rv;
   12845 	}
   12846 
   12847 	return 0;
   12848 }
   12849 
   12850 static void
   12851 wm_put_nvm_80003(struct wm_softc *sc)
   12852 {
   12853 
   12854 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12855 		device_xname(sc->sc_dev), __func__));
   12856 
   12857 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   12858 		wm_put_eecd(sc);
   12859 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   12860 }
   12861 
   12862 static int
   12863 wm_get_nvm_82571(struct wm_softc *sc)
   12864 {
   12865 	int rv;
   12866 
   12867 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12868 		device_xname(sc->sc_dev), __func__));
   12869 
   12870 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
   12871 		return rv;
   12872 
   12873 	switch (sc->sc_type) {
   12874 	case WM_T_82573:
   12875 		break;
   12876 	default:
   12877 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   12878 			rv = wm_get_eecd(sc);
   12879 		break;
   12880 	}
   12881 
   12882 	if (rv != 0) {
   12883 		aprint_error_dev(sc->sc_dev,
   12884 		    "%s: failed to get semaphore\n",
   12885 		    __func__);
   12886 		wm_put_swsm_semaphore(sc);
   12887 	}
   12888 
   12889 	return rv;
   12890 }
   12891 
   12892 static void
   12893 wm_put_nvm_82571(struct wm_softc *sc)
   12894 {
   12895 
   12896 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12897 		device_xname(sc->sc_dev), __func__));
   12898 
   12899 	switch (sc->sc_type) {
   12900 	case WM_T_82573:
   12901 		break;
   12902 	default:
   12903 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
   12904 			wm_put_eecd(sc);
   12905 		break;
   12906 	}
   12907 
   12908 	wm_put_swsm_semaphore(sc);
   12909 }
   12910 
   12911 static int
   12912 wm_get_phy_82575(struct wm_softc *sc)
   12913 {
   12914 
   12915 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12916 		device_xname(sc->sc_dev), __func__));
   12917 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   12918 }
   12919 
   12920 static void
   12921 wm_put_phy_82575(struct wm_softc *sc)
   12922 {
   12923 
   12924 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12925 		device_xname(sc->sc_dev), __func__));
   12926 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   12927 }
   12928 
   12929 static int
   12930 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   12931 {
   12932 	uint32_t ext_ctrl;
   12933 	int timeout = 200;
   12934 
   12935 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12936 		device_xname(sc->sc_dev), __func__));
   12937 
   12938 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12939 	for (timeout = 0; timeout < 200; timeout++) {
   12940 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12941 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12942 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12943 
   12944 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12945 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   12946 			return 0;
   12947 		delay(5000);
   12948 	}
   12949 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   12950 	    device_xname(sc->sc_dev), ext_ctrl);
   12951 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12952 	return 1;
   12953 }
   12954 
   12955 static void
   12956 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   12957 {
   12958 	uint32_t ext_ctrl;
   12959 
   12960 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12961 		device_xname(sc->sc_dev), __func__));
   12962 
   12963 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12964 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12965 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12966 
   12967 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   12968 }
   12969 
   12970 static int
   12971 wm_get_swflag_ich8lan(struct wm_softc *sc)
   12972 {
   12973 	uint32_t ext_ctrl;
   12974 	int timeout;
   12975 
   12976 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   12977 		device_xname(sc->sc_dev), __func__));
   12978 	mutex_enter(sc->sc_ich_phymtx);
   12979 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   12980 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12981 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   12982 			break;
   12983 		delay(1000);
   12984 	}
   12985 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   12986 		printf("%s: SW has already locked the resource\n",
   12987 		    device_xname(sc->sc_dev));
   12988 		goto out;
   12989 	}
   12990 
   12991 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   12992 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   12993 	for (timeout = 0; timeout < 1000; timeout++) {
   12994 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   12995 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   12996 			break;
   12997 		delay(1000);
   12998 	}
   12999 	if (timeout >= 1000) {
   13000 		printf("%s: failed to acquire semaphore\n",
   13001 		    device_xname(sc->sc_dev));
   13002 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13003 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13004 		goto out;
   13005 	}
   13006 	return 0;
   13007 
   13008 out:
   13009 	mutex_exit(sc->sc_ich_phymtx);
   13010 	return 1;
   13011 }
   13012 
   13013 static void
   13014 wm_put_swflag_ich8lan(struct wm_softc *sc)
   13015 {
   13016 	uint32_t ext_ctrl;
   13017 
   13018 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13019 		device_xname(sc->sc_dev), __func__));
   13020 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   13021 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   13022 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13023 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   13024 	} else {
   13025 		printf("%s: Semaphore unexpectedly released\n",
   13026 		    device_xname(sc->sc_dev));
   13027 	}
   13028 
   13029 	mutex_exit(sc->sc_ich_phymtx);
   13030 }
   13031 
   13032 static int
   13033 wm_get_nvm_ich8lan(struct wm_softc *sc)
   13034 {
   13035 
   13036 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13037 		device_xname(sc->sc_dev), __func__));
   13038 	mutex_enter(sc->sc_ich_nvmmtx);
   13039 
   13040 	return 0;
   13041 }
   13042 
   13043 static void
   13044 wm_put_nvm_ich8lan(struct wm_softc *sc)
   13045 {
   13046 
   13047 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13048 		device_xname(sc->sc_dev), __func__));
   13049 	mutex_exit(sc->sc_ich_nvmmtx);
   13050 }
   13051 
   13052 static int
   13053 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   13054 {
   13055 	int i = 0;
   13056 	uint32_t reg;
   13057 
   13058 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13059 		device_xname(sc->sc_dev), __func__));
   13060 
   13061 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13062 	do {
   13063 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   13064 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   13065 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13066 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   13067 			break;
   13068 		delay(2*1000);
   13069 		i++;
   13070 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   13071 
   13072 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   13073 		wm_put_hw_semaphore_82573(sc);
   13074 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   13075 		    device_xname(sc->sc_dev));
   13076 		return -1;
   13077 	}
   13078 
   13079 	return 0;
   13080 }
   13081 
   13082 static void
   13083 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   13084 {
   13085 	uint32_t reg;
   13086 
   13087 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13088 		device_xname(sc->sc_dev), __func__));
   13089 
   13090 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13091 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   13092 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   13093 }
   13094 
   13095 /*
   13096  * Management mode and power management related subroutines.
   13097  * BMC, AMT, suspend/resume and EEE.
   13098  */
   13099 
   13100 #ifdef WM_WOL
   13101 static int
   13102 wm_check_mng_mode(struct wm_softc *sc)
   13103 {
   13104 	int rv;
   13105 
   13106 	switch (sc->sc_type) {
   13107 	case WM_T_ICH8:
   13108 	case WM_T_ICH9:
   13109 	case WM_T_ICH10:
   13110 	case WM_T_PCH:
   13111 	case WM_T_PCH2:
   13112 	case WM_T_PCH_LPT:
   13113 	case WM_T_PCH_SPT:
   13114 		rv = wm_check_mng_mode_ich8lan(sc);
   13115 		break;
   13116 	case WM_T_82574:
   13117 	case WM_T_82583:
   13118 		rv = wm_check_mng_mode_82574(sc);
   13119 		break;
   13120 	case WM_T_82571:
   13121 	case WM_T_82572:
   13122 	case WM_T_82573:
   13123 	case WM_T_80003:
   13124 		rv = wm_check_mng_mode_generic(sc);
   13125 		break;
   13126 	default:
   13127 		/* noting to do */
   13128 		rv = 0;
   13129 		break;
   13130 	}
   13131 
   13132 	return rv;
   13133 }
   13134 
   13135 static int
   13136 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   13137 {
   13138 	uint32_t fwsm;
   13139 
   13140 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13141 
   13142 	if (((fwsm & FWSM_FW_VALID) != 0)
   13143 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   13144 		return 1;
   13145 
   13146 	return 0;
   13147 }
   13148 
   13149 static int
   13150 wm_check_mng_mode_82574(struct wm_softc *sc)
   13151 {
   13152 	uint16_t data;
   13153 
   13154 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   13155 
   13156 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   13157 		return 1;
   13158 
   13159 	return 0;
   13160 }
   13161 
   13162 static int
   13163 wm_check_mng_mode_generic(struct wm_softc *sc)
   13164 {
   13165 	uint32_t fwsm;
   13166 
   13167 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13168 
   13169 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   13170 		return 1;
   13171 
   13172 	return 0;
   13173 }
   13174 #endif /* WM_WOL */
   13175 
   13176 static int
   13177 wm_enable_mng_pass_thru(struct wm_softc *sc)
   13178 {
   13179 	uint32_t manc, fwsm, factps;
   13180 
   13181 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   13182 		return 0;
   13183 
   13184 	manc = CSR_READ(sc, WMREG_MANC);
   13185 
   13186 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   13187 		device_xname(sc->sc_dev), manc));
   13188 	if ((manc & MANC_RECV_TCO_EN) == 0)
   13189 		return 0;
   13190 
   13191 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   13192 		fwsm = CSR_READ(sc, WMREG_FWSM);
   13193 		factps = CSR_READ(sc, WMREG_FACTPS);
   13194 		if (((factps & FACTPS_MNGCG) == 0)
   13195 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   13196 			return 1;
   13197 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   13198 		uint16_t data;
   13199 
   13200 		factps = CSR_READ(sc, WMREG_FACTPS);
   13201 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   13202 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   13203 			device_xname(sc->sc_dev), factps, data));
   13204 		if (((factps & FACTPS_MNGCG) == 0)
   13205 		    && ((data & NVM_CFG2_MNGM_MASK)
   13206 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   13207 			return 1;
   13208 	} else if (((manc & MANC_SMBUS_EN) != 0)
   13209 	    && ((manc & MANC_ASF_EN) == 0))
   13210 		return 1;
   13211 
   13212 	return 0;
   13213 }
   13214 
   13215 static bool
   13216 wm_phy_resetisblocked(struct wm_softc *sc)
   13217 {
   13218 	bool blocked = false;
   13219 	uint32_t reg;
   13220 	int i = 0;
   13221 
   13222 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13223 		device_xname(sc->sc_dev), __func__));
   13224 
   13225 	switch (sc->sc_type) {
   13226 	case WM_T_ICH8:
   13227 	case WM_T_ICH9:
   13228 	case WM_T_ICH10:
   13229 	case WM_T_PCH:
   13230 	case WM_T_PCH2:
   13231 	case WM_T_PCH_LPT:
   13232 	case WM_T_PCH_SPT:
   13233 		do {
   13234 			reg = CSR_READ(sc, WMREG_FWSM);
   13235 			if ((reg & FWSM_RSPCIPHY) == 0) {
   13236 				blocked = true;
   13237 				delay(10*1000);
   13238 				continue;
   13239 			}
   13240 			blocked = false;
   13241 		} while (blocked && (i++ < 30));
   13242 		return blocked;
   13243 		break;
   13244 	case WM_T_82571:
   13245 	case WM_T_82572:
   13246 	case WM_T_82573:
   13247 	case WM_T_82574:
   13248 	case WM_T_82583:
   13249 	case WM_T_80003:
   13250 		reg = CSR_READ(sc, WMREG_MANC);
   13251 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   13252 			return true;
   13253 		else
   13254 			return false;
   13255 		break;
   13256 	default:
   13257 		/* no problem */
   13258 		break;
   13259 	}
   13260 
   13261 	return false;
   13262 }
   13263 
   13264 static void
   13265 wm_get_hw_control(struct wm_softc *sc)
   13266 {
   13267 	uint32_t reg;
   13268 
   13269 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13270 		device_xname(sc->sc_dev), __func__));
   13271 
   13272 	if (sc->sc_type == WM_T_82573) {
   13273 		reg = CSR_READ(sc, WMREG_SWSM);
   13274 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   13275 	} else if (sc->sc_type >= WM_T_82571) {
   13276 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13277 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   13278 	}
   13279 }
   13280 
   13281 static void
   13282 wm_release_hw_control(struct wm_softc *sc)
   13283 {
   13284 	uint32_t reg;
   13285 
   13286 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   13287 		device_xname(sc->sc_dev), __func__));
   13288 
   13289 	if (sc->sc_type == WM_T_82573) {
   13290 		reg = CSR_READ(sc, WMREG_SWSM);
   13291 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   13292 	} else if (sc->sc_type >= WM_T_82571) {
   13293 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13294 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   13295 	}
   13296 }
   13297 
   13298 static void
   13299 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   13300 {
   13301 	uint32_t reg;
   13302 
   13303 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13304 		device_xname(sc->sc_dev), __func__));
   13305 
   13306 	if (sc->sc_type < WM_T_PCH2)
   13307 		return;
   13308 
   13309 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   13310 
   13311 	if (gate)
   13312 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   13313 	else
   13314 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   13315 
   13316 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   13317 }
   13318 
   13319 static void
   13320 wm_smbustopci(struct wm_softc *sc)
   13321 {
   13322 	uint32_t fwsm, reg;
   13323 	int rv = 0;
   13324 
   13325 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13326 		device_xname(sc->sc_dev), __func__));
   13327 
   13328 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   13329 	wm_gate_hw_phy_config_ich8lan(sc, true);
   13330 
   13331 	/* Disable ULP */
   13332 	wm_ulp_disable(sc);
   13333 
   13334 	/* Acquire PHY semaphore */
   13335 	sc->phy.acquire(sc);
   13336 
   13337 	fwsm = CSR_READ(sc, WMREG_FWSM);
   13338 	switch (sc->sc_type) {
   13339 	case WM_T_PCH_LPT:
   13340 	case WM_T_PCH_SPT:
   13341 		if (wm_phy_is_accessible_pchlan(sc))
   13342 			break;
   13343 
   13344 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13345 		reg |= CTRL_EXT_FORCE_SMBUS;
   13346 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13347 #if 0
   13348 		/* XXX Isn't this required??? */
   13349 		CSR_WRITE_FLUSH(sc);
   13350 #endif
   13351 		delay(50 * 1000);
   13352 		/* FALLTHROUGH */
   13353 	case WM_T_PCH2:
   13354 		if (wm_phy_is_accessible_pchlan(sc) == true)
   13355 			break;
   13356 		/* FALLTHROUGH */
   13357 	case WM_T_PCH:
   13358 		if (sc->sc_type == WM_T_PCH)
   13359 			if ((fwsm & FWSM_FW_VALID) != 0)
   13360 				break;
   13361 
   13362 		if (wm_phy_resetisblocked(sc) == true) {
   13363 			printf("XXX reset is blocked(3)\n");
   13364 			break;
   13365 		}
   13366 
   13367 		wm_toggle_lanphypc_pch_lpt(sc);
   13368 
   13369 		if (sc->sc_type >= WM_T_PCH_LPT) {
   13370 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13371 				break;
   13372 
   13373 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13374 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   13375 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13376 
   13377 			if (wm_phy_is_accessible_pchlan(sc) == true)
   13378 				break;
   13379 			rv = -1;
   13380 		}
   13381 		break;
   13382 	default:
   13383 		break;
   13384 	}
   13385 
   13386 	/* Release semaphore */
   13387 	sc->phy.release(sc);
   13388 
   13389 	if (rv == 0) {
   13390 		if (wm_phy_resetisblocked(sc)) {
   13391 			printf("XXX reset is blocked(4)\n");
   13392 			goto out;
   13393 		}
   13394 		wm_reset_phy(sc);
   13395 		if (wm_phy_resetisblocked(sc))
   13396 			printf("XXX reset is blocked(4)\n");
   13397 	}
   13398 
   13399 out:
   13400 	/*
   13401 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   13402 	 */
   13403 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
   13404 		delay(10*1000);
   13405 		wm_gate_hw_phy_config_ich8lan(sc, false);
   13406 	}
   13407 }
   13408 
   13409 static void
   13410 wm_init_manageability(struct wm_softc *sc)
   13411 {
   13412 
   13413 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13414 		device_xname(sc->sc_dev), __func__));
   13415 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13416 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   13417 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13418 
   13419 		/* Disable hardware interception of ARP */
   13420 		manc &= ~MANC_ARP_EN;
   13421 
   13422 		/* Enable receiving management packets to the host */
   13423 		if (sc->sc_type >= WM_T_82571) {
   13424 			manc |= MANC_EN_MNG2HOST;
   13425 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   13426 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   13427 		}
   13428 
   13429 		CSR_WRITE(sc, WMREG_MANC, manc);
   13430 	}
   13431 }
   13432 
   13433 static void
   13434 wm_release_manageability(struct wm_softc *sc)
   13435 {
   13436 
   13437 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   13438 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   13439 
   13440 		manc |= MANC_ARP_EN;
   13441 		if (sc->sc_type >= WM_T_82571)
   13442 			manc &= ~MANC_EN_MNG2HOST;
   13443 
   13444 		CSR_WRITE(sc, WMREG_MANC, manc);
   13445 	}
   13446 }
   13447 
   13448 static void
   13449 wm_get_wakeup(struct wm_softc *sc)
   13450 {
   13451 
   13452 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   13453 	switch (sc->sc_type) {
   13454 	case WM_T_82573:
   13455 	case WM_T_82583:
   13456 		sc->sc_flags |= WM_F_HAS_AMT;
   13457 		/* FALLTHROUGH */
   13458 	case WM_T_80003:
   13459 	case WM_T_82575:
   13460 	case WM_T_82576:
   13461 	case WM_T_82580:
   13462 	case WM_T_I350:
   13463 	case WM_T_I354:
   13464 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   13465 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   13466 		/* FALLTHROUGH */
   13467 	case WM_T_82541:
   13468 	case WM_T_82541_2:
   13469 	case WM_T_82547:
   13470 	case WM_T_82547_2:
   13471 	case WM_T_82571:
   13472 	case WM_T_82572:
   13473 	case WM_T_82574:
   13474 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13475 		break;
   13476 	case WM_T_ICH8:
   13477 	case WM_T_ICH9:
   13478 	case WM_T_ICH10:
   13479 	case WM_T_PCH:
   13480 	case WM_T_PCH2:
   13481 	case WM_T_PCH_LPT:
   13482 	case WM_T_PCH_SPT:
   13483 		sc->sc_flags |= WM_F_HAS_AMT;
   13484 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   13485 		break;
   13486 	default:
   13487 		break;
   13488 	}
   13489 
   13490 	/* 1: HAS_MANAGE */
   13491 	if (wm_enable_mng_pass_thru(sc) != 0)
   13492 		sc->sc_flags |= WM_F_HAS_MANAGE;
   13493 
   13494 	/*
   13495 	 * Note that the WOL flags is set after the resetting of the eeprom
   13496 	 * stuff
   13497 	 */
   13498 }
   13499 
   13500 /*
   13501  * Unconfigure Ultra Low Power mode.
   13502  * Only for I217 and newer (see below).
   13503  */
   13504 static void
   13505 wm_ulp_disable(struct wm_softc *sc)
   13506 {
   13507 	uint32_t reg;
   13508 	int i = 0;
   13509 
   13510 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13511 		device_xname(sc->sc_dev), __func__));
   13512 	/* Exclude old devices */
   13513 	if ((sc->sc_type < WM_T_PCH_LPT)
   13514 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
   13515 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
   13516 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
   13517 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
   13518 		return;
   13519 
   13520 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
   13521 		/* Request ME un-configure ULP mode in the PHY */
   13522 		reg = CSR_READ(sc, WMREG_H2ME);
   13523 		reg &= ~H2ME_ULP;
   13524 		reg |= H2ME_ENFORCE_SETTINGS;
   13525 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13526 
   13527 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
   13528 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
   13529 			if (i++ == 30) {
   13530 				printf("%s timed out\n", __func__);
   13531 				return;
   13532 			}
   13533 			delay(10 * 1000);
   13534 		}
   13535 		reg = CSR_READ(sc, WMREG_H2ME);
   13536 		reg &= ~H2ME_ENFORCE_SETTINGS;
   13537 		CSR_WRITE(sc, WMREG_H2ME, reg);
   13538 
   13539 		return;
   13540 	}
   13541 
   13542 	/* Acquire semaphore */
   13543 	sc->phy.acquire(sc);
   13544 
   13545 	/* Toggle LANPHYPC */
   13546 	wm_toggle_lanphypc_pch_lpt(sc);
   13547 
   13548 	/* Unforce SMBus mode in PHY */
   13549 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13550 	if (reg == 0x0000 || reg == 0xffff) {
   13551 		uint32_t reg2;
   13552 
   13553 		printf("%s: Force SMBus first.\n", __func__);
   13554 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
   13555 		reg2 |= CTRL_EXT_FORCE_SMBUS;
   13556 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
   13557 		delay(50 * 1000);
   13558 
   13559 		reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
   13560 	}
   13561 	reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   13562 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg);
   13563 
   13564 	/* Unforce SMBus mode in MAC */
   13565 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13566 	reg &= ~CTRL_EXT_FORCE_SMBUS;
   13567 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13568 
   13569 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL);
   13570 	reg |= HV_PM_CTRL_K1_ENA;
   13571 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg);
   13572 
   13573 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1);
   13574 	reg &= ~(I218_ULP_CONFIG1_IND
   13575 	    | I218_ULP_CONFIG1_STICKY_ULP
   13576 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
   13577 	    | I218_ULP_CONFIG1_WOL_HOST
   13578 	    | I218_ULP_CONFIG1_INBAND_EXIT
   13579 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
   13580 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
   13581 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
   13582 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13583 	reg |= I218_ULP_CONFIG1_START;
   13584 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
   13585 
   13586 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   13587 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
   13588 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   13589 
   13590 	/* Release semaphore */
   13591 	sc->phy.release(sc);
   13592 	wm_gmii_reset(sc);
   13593 	delay(50 * 1000);
   13594 }
   13595 
   13596 /* WOL in the newer chipset interfaces (pchlan) */
   13597 static void
   13598 wm_enable_phy_wakeup(struct wm_softc *sc)
   13599 {
   13600 #if 0
   13601 	uint16_t preg;
   13602 
   13603 	/* Copy MAC RARs to PHY RARs */
   13604 
   13605 	/* Copy MAC MTA to PHY MTA */
   13606 
   13607 	/* Configure PHY Rx Control register */
   13608 
   13609 	/* Enable PHY wakeup in MAC register */
   13610 
   13611 	/* Configure and enable PHY wakeup in PHY registers */
   13612 
   13613 	/* Activate PHY wakeup */
   13614 
   13615 	/* XXX */
   13616 #endif
   13617 }
   13618 
   13619 /* Power down workaround on D3 */
   13620 static void
   13621 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   13622 {
   13623 	uint32_t reg;
   13624 	int i;
   13625 
   13626 	for (i = 0; i < 2; i++) {
   13627 		/* Disable link */
   13628 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13629 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13630 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13631 
   13632 		/*
   13633 		 * Call gig speed drop workaround on Gig disable before
   13634 		 * accessing any PHY registers
   13635 		 */
   13636 		if (sc->sc_type == WM_T_ICH8)
   13637 			wm_gig_downshift_workaround_ich8lan(sc);
   13638 
   13639 		/* Write VR power-down enable */
   13640 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13641 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13642 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   13643 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   13644 
   13645 		/* Read it back and test */
   13646 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   13647 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   13648 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   13649 			break;
   13650 
   13651 		/* Issue PHY reset and repeat at most one more time */
   13652 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   13653 	}
   13654 }
   13655 
   13656 static void
   13657 wm_enable_wakeup(struct wm_softc *sc)
   13658 {
   13659 	uint32_t reg, pmreg;
   13660 	pcireg_t pmode;
   13661 
   13662 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13663 		device_xname(sc->sc_dev), __func__));
   13664 
   13665 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   13666 		&pmreg, NULL) == 0)
   13667 		return;
   13668 
   13669 	/* Advertise the wakeup capability */
   13670 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   13671 	    | CTRL_SWDPIN(3));
   13672 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   13673 
   13674 	/* ICH workaround */
   13675 	switch (sc->sc_type) {
   13676 	case WM_T_ICH8:
   13677 	case WM_T_ICH9:
   13678 	case WM_T_ICH10:
   13679 	case WM_T_PCH:
   13680 	case WM_T_PCH2:
   13681 	case WM_T_PCH_LPT:
   13682 	case WM_T_PCH_SPT:
   13683 		/* Disable gig during WOL */
   13684 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13685 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   13686 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13687 		if (sc->sc_type == WM_T_PCH)
   13688 			wm_gmii_reset(sc);
   13689 
   13690 		/* Power down workaround */
   13691 		if (sc->sc_phytype == WMPHY_82577) {
   13692 			struct mii_softc *child;
   13693 
   13694 			/* Assume that the PHY is copper */
   13695 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13696 			if ((child != NULL) && (child->mii_mpd_rev <= 2))
   13697 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   13698 				    (768 << 5) | 25, 0x0444); /* magic num */
   13699 		}
   13700 		break;
   13701 	default:
   13702 		break;
   13703 	}
   13704 
   13705 	/* Keep the laser running on fiber adapters */
   13706 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   13707 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   13708 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   13709 		reg |= CTRL_EXT_SWDPIN(3);
   13710 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   13711 	}
   13712 
   13713 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   13714 #if 0	/* for the multicast packet */
   13715 	reg |= WUFC_MC;
   13716 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   13717 #endif
   13718 
   13719 	if (sc->sc_type >= WM_T_PCH)
   13720 		wm_enable_phy_wakeup(sc);
   13721 	else {
   13722 		CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
   13723 		CSR_WRITE(sc, WMREG_WUFC, reg);
   13724 	}
   13725 
   13726 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   13727 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   13728 		|| (sc->sc_type == WM_T_PCH2))
   13729 		    && (sc->sc_phytype == WMPHY_IGP_3))
   13730 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   13731 
   13732 	/* Request PME */
   13733 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   13734 #if 0
   13735 	/* Disable WOL */
   13736 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   13737 #else
   13738 	/* For WOL */
   13739 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   13740 #endif
   13741 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   13742 }
   13743 
   13744 /* LPLU */
   13745 
   13746 static void
   13747 wm_lplu_d0_disable(struct wm_softc *sc)
   13748 {
   13749 	struct mii_data *mii = &sc->sc_mii;
   13750 	uint32_t reg;
   13751 
   13752 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13753 		device_xname(sc->sc_dev), __func__));
   13754 
   13755 	if (sc->sc_phytype == WMPHY_IFE)
   13756 		return;
   13757 
   13758 	switch (sc->sc_type) {
   13759 	case WM_T_82571:
   13760 	case WM_T_82572:
   13761 	case WM_T_82573:
   13762 	case WM_T_82575:
   13763 	case WM_T_82576:
   13764 		reg = mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT);
   13765 		reg &= ~PMR_D0_LPLU;
   13766 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, reg);
   13767 		break;
   13768 	case WM_T_82580:
   13769 	case WM_T_I350:
   13770 	case WM_T_I210:
   13771 	case WM_T_I211:
   13772 		reg = CSR_READ(sc, WMREG_PHPM);
   13773 		reg &= ~PHPM_D0A_LPLU;
   13774 		CSR_WRITE(sc, WMREG_PHPM, reg);
   13775 		break;
   13776 	case WM_T_82574:
   13777 	case WM_T_82583:
   13778 	case WM_T_ICH8:
   13779 	case WM_T_ICH9:
   13780 	case WM_T_ICH10:
   13781 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13782 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   13783 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13784 		CSR_WRITE_FLUSH(sc);
   13785 		break;
   13786 	case WM_T_PCH:
   13787 	case WM_T_PCH2:
   13788 	case WM_T_PCH_LPT:
   13789 	case WM_T_PCH_SPT:
   13790 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   13791 		reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   13792 		if (wm_phy_resetisblocked(sc) == false)
   13793 			reg |= HV_OEM_BITS_ANEGNOW;
   13794 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   13795 		break;
   13796 	default:
   13797 		break;
   13798 	}
   13799 }
   13800 
   13801 /* EEE */
   13802 
   13803 static void
   13804 wm_set_eee_i350(struct wm_softc *sc)
   13805 {
   13806 	uint32_t ipcnfg, eeer;
   13807 
   13808 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   13809 	eeer = CSR_READ(sc, WMREG_EEER);
   13810 
   13811 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   13812 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   13813 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   13814 		    | EEER_LPI_FC);
   13815 	} else {
   13816 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   13817 		ipcnfg &= ~IPCNFG_10BASE_TE;
   13818 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   13819 		    | EEER_LPI_FC);
   13820 	}
   13821 
   13822 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   13823 	CSR_WRITE(sc, WMREG_EEER, eeer);
   13824 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   13825 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   13826 }
   13827 
   13828 /*
   13829  * Workarounds (mainly PHY related).
   13830  * Basically, PHY's workarounds are in the PHY drivers.
   13831  */
   13832 
   13833 /* Work-around for 82566 Kumeran PCS lock loss */
   13834 static void
   13835 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   13836 {
   13837 	struct mii_data *mii = &sc->sc_mii;
   13838 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
   13839 	int i;
   13840 	int reg;
   13841 
   13842 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13843 		device_xname(sc->sc_dev), __func__));
   13844 
   13845 	/* If the link is not up, do nothing */
   13846 	if ((status & STATUS_LU) == 0)
   13847 		return;
   13848 
   13849 	/* Nothing to do if the link is other than 1Gbps */
   13850 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
   13851 		return;
   13852 
   13853 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13854 	for (i = 0; i < 10; i++) {
   13855 		/* read twice */
   13856 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   13857 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   13858 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   13859 			goto out;	/* GOOD! */
   13860 
   13861 		/* Reset the PHY */
   13862 		wm_reset_phy(sc);
   13863 		delay(5*1000);
   13864 	}
   13865 
   13866 	/* Disable GigE link negotiation */
   13867 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   13868 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   13869 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   13870 
   13871 	/*
   13872 	 * Call gig speed drop workaround on Gig disable before accessing
   13873 	 * any PHY registers.
   13874 	 */
   13875 	wm_gig_downshift_workaround_ich8lan(sc);
   13876 
   13877 out:
   13878 	return;
   13879 }
   13880 
   13881 /* WOL from S5 stops working */
   13882 static void
   13883 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   13884 {
   13885 	uint16_t kmreg;
   13886 
   13887 	/* Only for igp3 */
   13888 	if (sc->sc_phytype == WMPHY_IGP_3) {
   13889 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
   13890 			return;
   13891 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
   13892 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
   13893 			return;
   13894 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
   13895 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
   13896 	}
   13897 }
   13898 
   13899 /*
   13900  * Workaround for pch's PHYs
   13901  * XXX should be moved to new PHY driver?
   13902  */
   13903 static void
   13904 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   13905 {
   13906 
   13907 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13908 		device_xname(sc->sc_dev), __func__));
   13909 	KASSERT(sc->sc_type == WM_T_PCH);
   13910 
   13911 	if (sc->sc_phytype == WMPHY_82577)
   13912 		wm_set_mdio_slow_mode_hv(sc);
   13913 
   13914 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   13915 
   13916 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   13917 
   13918 	/* 82578 */
   13919 	if (sc->sc_phytype == WMPHY_82578) {
   13920 		struct mii_softc *child;
   13921 
   13922 		/*
   13923 		 * Return registers to default by doing a soft reset then
   13924 		 * writing 0x3140 to the control register
   13925 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   13926 		 */
   13927 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   13928 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   13929 			PHY_RESET(child);
   13930 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   13931 			    0x3140);
   13932 		}
   13933 	}
   13934 
   13935 	/* Select page 0 */
   13936 	sc->phy.acquire(sc);
   13937 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   13938 	sc->phy.release(sc);
   13939 
   13940 	/*
   13941 	 * Configure the K1 Si workaround during phy reset assuming there is
   13942 	 * link so that it disables K1 if link is in 1Gbps.
   13943 	 */
   13944 	wm_k1_gig_workaround_hv(sc, 1);
   13945 }
   13946 
   13947 static void
   13948 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   13949 {
   13950 
   13951 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13952 		device_xname(sc->sc_dev), __func__));
   13953 	KASSERT(sc->sc_type == WM_T_PCH2);
   13954 
   13955 	wm_set_mdio_slow_mode_hv(sc);
   13956 }
   13957 
   13958 static int
   13959 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   13960 {
   13961 	int k1_enable = sc->sc_nvm_k1_enabled;
   13962 
   13963 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   13964 		device_xname(sc->sc_dev), __func__));
   13965 
   13966 	if (sc->phy.acquire(sc) != 0)
   13967 		return -1;
   13968 
   13969 	if (link) {
   13970 		k1_enable = 0;
   13971 
   13972 		/* Link stall fix for link up */
   13973 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   13974 	} else {
   13975 		/* Link stall fix for link down */
   13976 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   13977 	}
   13978 
   13979 	wm_configure_k1_ich8lan(sc, k1_enable);
   13980 	sc->phy.release(sc);
   13981 
   13982 	return 0;
   13983 }
   13984 
   13985 static void
   13986 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   13987 {
   13988 	uint32_t reg;
   13989 
   13990 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   13991 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   13992 	    reg | HV_KMRN_MDIO_SLOW);
   13993 }
   13994 
   13995 static void
   13996 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   13997 {
   13998 	uint32_t ctrl, ctrl_ext, tmp;
   13999 	uint16_t kmreg;
   14000 	int rv;
   14001 
   14002 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
   14003 	if (rv != 0)
   14004 		return;
   14005 
   14006 	if (k1_enable)
   14007 		kmreg |= KUMCTRLSTA_K1_ENABLE;
   14008 	else
   14009 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
   14010 
   14011 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
   14012 	if (rv != 0)
   14013 		return;
   14014 
   14015 	delay(20);
   14016 
   14017 	ctrl = CSR_READ(sc, WMREG_CTRL);
   14018 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   14019 
   14020 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   14021 	tmp |= CTRL_FRCSPD;
   14022 
   14023 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   14024 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   14025 	CSR_WRITE_FLUSH(sc);
   14026 	delay(20);
   14027 
   14028 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   14029 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   14030 	CSR_WRITE_FLUSH(sc);
   14031 	delay(20);
   14032 
   14033 	return;
   14034 }
   14035 
   14036 /* special case - for 82575 - need to do manual init ... */
   14037 static void
   14038 wm_reset_init_script_82575(struct wm_softc *sc)
   14039 {
   14040 	/*
   14041 	 * remark: this is untested code - we have no board without EEPROM
   14042 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   14043 	 */
   14044 
   14045 	/* SerDes configuration via SERDESCTRL */
   14046 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   14047 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   14048 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   14049 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   14050 
   14051 	/* CCM configuration via CCMCTL register */
   14052 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   14053 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   14054 
   14055 	/* PCIe lanes configuration */
   14056 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   14057 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   14058 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   14059 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   14060 
   14061 	/* PCIe PLL Configuration */
   14062 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   14063 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   14064 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   14065 }
   14066 
   14067 static void
   14068 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   14069 {
   14070 	uint32_t reg;
   14071 	uint16_t nvmword;
   14072 	int rv;
   14073 
   14074 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   14075 		return;
   14076 
   14077 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   14078 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   14079 	if (rv != 0) {
   14080 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   14081 		    __func__);
   14082 		return;
   14083 	}
   14084 
   14085 	reg = CSR_READ(sc, WMREG_MDICNFG);
   14086 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   14087 		reg |= MDICNFG_DEST;
   14088 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   14089 		reg |= MDICNFG_COM_MDIO;
   14090 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   14091 }
   14092 
   14093 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
   14094 
   14095 static bool
   14096 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
   14097 {
   14098 	int i;
   14099 	uint32_t reg;
   14100 	uint16_t id1, id2;
   14101 
   14102 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14103 		device_xname(sc->sc_dev), __func__));
   14104 	id1 = id2 = 0xffff;
   14105 	for (i = 0; i < 2; i++) {
   14106 		id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1);
   14107 		if (MII_INVALIDID(id1))
   14108 			continue;
   14109 		id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2);
   14110 		if (MII_INVALIDID(id2))
   14111 			continue;
   14112 		break;
   14113 	}
   14114 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) {
   14115 		goto out;
   14116 	}
   14117 
   14118 	if (sc->sc_type < WM_T_PCH_LPT) {
   14119 		sc->phy.release(sc);
   14120 		wm_set_mdio_slow_mode_hv(sc);
   14121 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
   14122 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
   14123 		sc->phy.acquire(sc);
   14124 	}
   14125 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
   14126 		printf("XXX return with false\n");
   14127 		return false;
   14128 	}
   14129 out:
   14130 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   14131 		/* Only unforce SMBus if ME is not active */
   14132 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
   14133 			/* Unforce SMBus mode in PHY */
   14134 			reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
   14135 			    CV_SMB_CTRL);
   14136 			reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
   14137 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
   14138 			    CV_SMB_CTRL, reg);
   14139 
   14140 			/* Unforce SMBus mode in MAC */
   14141 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14142 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   14143 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14144 		}
   14145 	}
   14146 	return true;
   14147 }
   14148 
   14149 static void
   14150 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
   14151 {
   14152 	uint32_t reg;
   14153 	int i;
   14154 
   14155 	/* Set PHY Config Counter to 50msec */
   14156 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
   14157 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
   14158 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
   14159 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
   14160 
   14161 	/* Toggle LANPHYPC */
   14162 	reg = CSR_READ(sc, WMREG_CTRL);
   14163 	reg |= CTRL_LANPHYPC_OVERRIDE;
   14164 	reg &= ~CTRL_LANPHYPC_VALUE;
   14165 	CSR_WRITE(sc, WMREG_CTRL, reg);
   14166 	CSR_WRITE_FLUSH(sc);
   14167 	delay(1000);
   14168 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
   14169 	CSR_WRITE(sc, WMREG_CTRL, reg);
   14170 	CSR_WRITE_FLUSH(sc);
   14171 
   14172 	if (sc->sc_type < WM_T_PCH_LPT)
   14173 		delay(50 * 1000);
   14174 	else {
   14175 		i = 20;
   14176 
   14177 		do {
   14178 			delay(5 * 1000);
   14179 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
   14180 		    && i--);
   14181 
   14182 		delay(30 * 1000);
   14183 	}
   14184 }
   14185 
   14186 static int
   14187 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
   14188 {
   14189 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
   14190 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
   14191 	uint32_t rxa;
   14192 	uint16_t scale = 0, lat_enc = 0;
   14193 	int32_t obff_hwm = 0;
   14194 	int64_t lat_ns, value;
   14195 
   14196 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14197 		device_xname(sc->sc_dev), __func__));
   14198 
   14199 	if (link) {
   14200 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
   14201 		uint32_t status;
   14202 		uint16_t speed;
   14203 		pcireg_t preg;
   14204 
   14205 		status = CSR_READ(sc, WMREG_STATUS);
   14206 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
   14207 		case STATUS_SPEED_10:
   14208 			speed = 10;
   14209 			break;
   14210 		case STATUS_SPEED_100:
   14211 			speed = 100;
   14212 			break;
   14213 		case STATUS_SPEED_1000:
   14214 			speed = 1000;
   14215 			break;
   14216 		default:
   14217 			device_printf(sc->sc_dev, "Unknown speed "
   14218 			    "(status = %08x)\n", status);
   14219 			return -1;
   14220 		}
   14221 
   14222 		/* Rx Packet Buffer Allocation size (KB) */
   14223 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
   14224 
   14225 		/*
   14226 		 * Determine the maximum latency tolerated by the device.
   14227 		 *
   14228 		 * Per the PCIe spec, the tolerated latencies are encoded as
   14229 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
   14230 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
   14231 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
   14232 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
   14233 		 */
   14234 		lat_ns = ((int64_t)rxa * 1024 -
   14235 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
   14236 			+ ETHER_HDR_LEN))) * 8 * 1000;
   14237 		if (lat_ns < 0)
   14238 			lat_ns = 0;
   14239 		else
   14240 			lat_ns /= speed;
   14241 		value = lat_ns;
   14242 
   14243 		while (value > LTRV_VALUE) {
   14244 			scale ++;
   14245 			value = howmany(value, __BIT(5));
   14246 		}
   14247 		if (scale > LTRV_SCALE_MAX) {
   14248 			printf("%s: Invalid LTR latency scale %d\n",
   14249 			    device_xname(sc->sc_dev), scale);
   14250 			return -1;
   14251 		}
   14252 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
   14253 
   14254 		/* Determine the maximum latency tolerated by the platform */
   14255 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14256 		    WM_PCI_LTR_CAP_LPT);
   14257 		max_snoop = preg & 0xffff;
   14258 		max_nosnoop = preg >> 16;
   14259 
   14260 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
   14261 
   14262 		if (lat_enc > max_ltr_enc) {
   14263 			lat_enc = max_ltr_enc;
   14264 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
   14265 			    * PCI_LTR_SCALETONS(
   14266 				    __SHIFTOUT(lat_enc,
   14267 					PCI_LTR_MAXSNOOPLAT_SCALE));
   14268 		}
   14269 
   14270 		if (lat_ns) {
   14271 			lat_ns *= speed * 1000;
   14272 			lat_ns /= 8;
   14273 			lat_ns /= 1000000000;
   14274 			obff_hwm = (int32_t)(rxa - lat_ns);
   14275 		}
   14276 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
   14277 			device_printf(sc->sc_dev, "Invalid high water mark %d"
   14278 			    "(rxa = %d, lat_ns = %d)\n",
   14279 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
   14280 			return -1;
   14281 		}
   14282 	}
   14283 	/* Snoop and No-Snoop latencies the same */
   14284 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
   14285 	CSR_WRITE(sc, WMREG_LTRV, reg);
   14286 
   14287 	/* Set OBFF high water mark */
   14288 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
   14289 	reg |= obff_hwm;
   14290 	CSR_WRITE(sc, WMREG_SVT, reg);
   14291 
   14292 	/* Enable OBFF */
   14293 	reg = CSR_READ(sc, WMREG_SVCR);
   14294 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
   14295 	CSR_WRITE(sc, WMREG_SVCR, reg);
   14296 
   14297 	return 0;
   14298 }
   14299 
   14300 /*
   14301  * I210 Errata 25 and I211 Errata 10
   14302  * Slow System Clock.
   14303  */
   14304 static void
   14305 wm_pll_workaround_i210(struct wm_softc *sc)
   14306 {
   14307 	uint32_t mdicnfg, wuc;
   14308 	uint32_t reg;
   14309 	pcireg_t pcireg;
   14310 	uint32_t pmreg;
   14311 	uint16_t nvmword, tmp_nvmword;
   14312 	int phyval;
   14313 	bool wa_done = false;
   14314 	int i;
   14315 
   14316 	/* Save WUC and MDICNFG registers */
   14317 	wuc = CSR_READ(sc, WMREG_WUC);
   14318 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   14319 
   14320 	reg = mdicnfg & ~MDICNFG_DEST;
   14321 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   14322 
   14323 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   14324 		nvmword = INVM_DEFAULT_AL;
   14325 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   14326 
   14327 	/* Get Power Management cap offset */
   14328 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   14329 		&pmreg, NULL) == 0)
   14330 		return;
   14331 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   14332 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   14333 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   14334 
   14335 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   14336 			break; /* OK */
   14337 		}
   14338 
   14339 		wa_done = true;
   14340 		/* Directly reset the internal PHY */
   14341 		reg = CSR_READ(sc, WMREG_CTRL);
   14342 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   14343 
   14344 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   14345 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   14346 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   14347 
   14348 		CSR_WRITE(sc, WMREG_WUC, 0);
   14349 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   14350 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   14351 
   14352 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   14353 		    pmreg + PCI_PMCSR);
   14354 		pcireg |= PCI_PMCSR_STATE_D3;
   14355 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14356 		    pmreg + PCI_PMCSR, pcireg);
   14357 		delay(1000);
   14358 		pcireg &= ~PCI_PMCSR_STATE_D3;
   14359 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   14360 		    pmreg + PCI_PMCSR, pcireg);
   14361 
   14362 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   14363 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   14364 
   14365 		/* Restore WUC register */
   14366 		CSR_WRITE(sc, WMREG_WUC, wuc);
   14367 	}
   14368 
   14369 	/* Restore MDICNFG setting */
   14370 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   14371 	if (wa_done)
   14372 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   14373 }
   14374 
   14375 static void
   14376 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
   14377 {
   14378 	uint32_t reg;
   14379 
   14380 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   14381 		device_xname(sc->sc_dev), __func__));
   14382 	KASSERT(sc->sc_type == WM_T_PCH_SPT);
   14383 
   14384 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
   14385 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
   14386 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
   14387 
   14388 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
   14389 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
   14390 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
   14391 }
   14392