Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.430
      1 /*	$NetBSD: if_wm.c,v 1.430 2016/10/28 05:21:48 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- Disable D0 LPLU on 8257[12356], 82580 and I350.
     77  *	- TX Multi queue improvement (refine queue selection logic)
     78  *	- Advanced Receive Descriptor
     79  *	- EEE (Energy Efficiency Ethernet)
     80  *	- Virtual Function
     81  *	- Set LED correctly (based on contents in EEPROM)
     82  *	- Rework how parameters are loaded from the EEPROM.
     83  *	- Image Unique ID
     84  */
     85 
     86 #include <sys/cdefs.h>
     87 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.430 2016/10/28 05:21:48 msaitoh Exp $");
     88 
     89 #ifdef _KERNEL_OPT
     90 #include "opt_net_mpsafe.h"
     91 #endif
     92 
     93 #include <sys/param.h>
     94 #include <sys/systm.h>
     95 #include <sys/callout.h>
     96 #include <sys/mbuf.h>
     97 #include <sys/malloc.h>
     98 #include <sys/kmem.h>
     99 #include <sys/kernel.h>
    100 #include <sys/socket.h>
    101 #include <sys/ioctl.h>
    102 #include <sys/errno.h>
    103 #include <sys/device.h>
    104 #include <sys/queue.h>
    105 #include <sys/syslog.h>
    106 #include <sys/interrupt.h>
    107 #include <sys/cpu.h>
    108 #include <sys/pcq.h>
    109 
    110 #include <sys/rndsource.h>
    111 
    112 #include <net/if.h>
    113 #include <net/if_dl.h>
    114 #include <net/if_media.h>
    115 #include <net/if_ether.h>
    116 
    117 #include <net/bpf.h>
    118 
    119 #include <netinet/in.h>			/* XXX for struct ip */
    120 #include <netinet/in_systm.h>		/* XXX for struct ip */
    121 #include <netinet/ip.h>			/* XXX for struct ip */
    122 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    123 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    124 
    125 #include <sys/bus.h>
    126 #include <sys/intr.h>
    127 #include <machine/endian.h>
    128 
    129 #include <dev/mii/mii.h>
    130 #include <dev/mii/miivar.h>
    131 #include <dev/mii/miidevs.h>
    132 #include <dev/mii/mii_bitbang.h>
    133 #include <dev/mii/ikphyreg.h>
    134 #include <dev/mii/igphyreg.h>
    135 #include <dev/mii/igphyvar.h>
    136 #include <dev/mii/inbmphyreg.h>
    137 
    138 #include <dev/pci/pcireg.h>
    139 #include <dev/pci/pcivar.h>
    140 #include <dev/pci/pcidevs.h>
    141 
    142 #include <dev/pci/if_wmreg.h>
    143 #include <dev/pci/if_wmvar.h>
    144 
    145 #ifdef WM_DEBUG
    146 #define	WM_DEBUG_LINK		__BIT(0)
    147 #define	WM_DEBUG_TX		__BIT(1)
    148 #define	WM_DEBUG_RX		__BIT(2)
    149 #define	WM_DEBUG_GMII		__BIT(3)
    150 #define	WM_DEBUG_MANAGE		__BIT(4)
    151 #define	WM_DEBUG_NVM		__BIT(5)
    152 #define	WM_DEBUG_INIT		__BIT(6)
    153 #define	WM_DEBUG_LOCK		__BIT(7)
    154 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    155     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
    156 
    157 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    158 #else
    159 #define	DPRINTF(x, y)	/* nothing */
    160 #endif /* WM_DEBUG */
    161 
    162 #ifdef NET_MPSAFE
    163 #define WM_MPSAFE	1
    164 #endif
    165 
    166 /*
    167  * This device driver's max interrupt numbers.
    168  */
    169 #define WM_MAX_NQUEUEINTR	16
    170 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
    171 
    172 /*
    173  * Transmit descriptor list size.  Due to errata, we can only have
    174  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    175  * on >= 82544.  We tell the upper layers that they can queue a lot
    176  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    177  * of them at a time.
    178  *
    179  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    180  * chains containing many small mbufs have been observed in zero-copy
    181  * situations with jumbo frames.
    182  */
    183 #define	WM_NTXSEGS		256
    184 #define	WM_IFQUEUELEN		256
    185 #define	WM_TXQUEUELEN_MAX	64
    186 #define	WM_TXQUEUELEN_MAX_82547	16
    187 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    188 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    189 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    190 #define	WM_NTXDESC_82542	256
    191 #define	WM_NTXDESC_82544	4096
    192 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    193 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    194 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
    195 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    196 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    197 
    198 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    199 
    200 #define	WM_TXINTERQSIZE		256
    201 
    202 /*
    203  * Receive descriptor list size.  We have one Rx buffer for normal
    204  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    205  * packet.  We allocate 256 receive descriptors, each with a 2k
    206  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    207  */
    208 #define	WM_NRXDESC		256
    209 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    210 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    211 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    212 
    213 typedef union txdescs {
    214 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    215 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    216 } txdescs_t;
    217 
    218 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
    219 #define	WM_CDRXOFF(x)	(sizeof(wiseman_rxdesc_t) * x)
    220 
    221 /*
    222  * Software state for transmit jobs.
    223  */
    224 struct wm_txsoft {
    225 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    226 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    227 	int txs_firstdesc;		/* first descriptor in packet */
    228 	int txs_lastdesc;		/* last descriptor in packet */
    229 	int txs_ndesc;			/* # of descriptors used */
    230 };
    231 
    232 /*
    233  * Software state for receive buffers.  Each descriptor gets a
    234  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    235  * more than one buffer, we chain them together.
    236  */
    237 struct wm_rxsoft {
    238 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    239 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    240 };
    241 
    242 #define WM_LINKUP_TIMEOUT	50
    243 
    244 static uint16_t swfwphysem[] = {
    245 	SWFW_PHY0_SM,
    246 	SWFW_PHY1_SM,
    247 	SWFW_PHY2_SM,
    248 	SWFW_PHY3_SM
    249 };
    250 
    251 static const uint32_t wm_82580_rxpbs_table[] = {
    252 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    253 };
    254 
    255 struct wm_softc;
    256 
    257 #ifdef WM_EVENT_COUNTERS
    258 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
    259 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
    260 	struct evcnt qname##_ev_##evname;
    261 
    262 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
    263 	do{								\
    264 		snprintf((q)->qname##_##evname##_evcnt_name,		\
    265 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
    266 		    "%s%02d%s", #qname, (qnum), #evname);		\
    267 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
    268 		    (evtype), NULL, (xname),				\
    269 		    (q)->qname##_##evname##_evcnt_name);		\
    270 	}while(0)
    271 
    272 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    273 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
    274 
    275 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
    276 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
    277 #endif /* WM_EVENT_COUNTERS */
    278 
    279 struct wm_txqueue {
    280 	kmutex_t *txq_lock;		/* lock for tx operations */
    281 
    282 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
    283 
    284 	/* Software state for the transmit descriptors. */
    285 	int txq_num;			/* must be a power of two */
    286 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    287 
    288 	/* TX control data structures. */
    289 	int txq_ndesc;			/* must be a power of two */
    290 	size_t txq_descsize;		/* a tx descriptor size */
    291 	txdescs_t *txq_descs_u;
    292         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    293 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    294 	int txq_desc_rseg;		/* real number of control segment */
    295 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    296 #define	txq_descs	txq_descs_u->sctxu_txdescs
    297 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    298 
    299 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    300 
    301 	int txq_free;			/* number of free Tx descriptors */
    302 	int txq_next;			/* next ready Tx descriptor */
    303 
    304 	int txq_sfree;			/* number of free Tx jobs */
    305 	int txq_snext;			/* next free Tx job */
    306 	int txq_sdirty;			/* dirty Tx jobs */
    307 
    308 	/* These 4 variables are used only on the 82547. */
    309 	int txq_fifo_size;		/* Tx FIFO size */
    310 	int txq_fifo_head;		/* current head of FIFO */
    311 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    312 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    313 
    314 	/*
    315 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
    316 	 * CPUs. This queue intermediate them without block.
    317 	 */
    318 	pcq_t *txq_interq;
    319 
    320 	/*
    321 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
    322 	 * to manage Tx H/W queue's busy flag.
    323 	 */
    324 	int txq_flags;			/* flags for H/W queue, see below */
    325 #define	WM_TXQ_NO_SPACE	0x1
    326 
    327 	bool txq_stopping;
    328 
    329 #ifdef WM_EVENT_COUNTERS
    330 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
    331 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
    332 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
    333 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
    334 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
    335 						/* XXX not used? */
    336 
    337 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
    338 	WM_Q_EVCNT_DEFINE(txq,txtusum)		/* TCP/UDP cksums comp. out-bound */
    339 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
    340 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
    341 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
    342 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
    343 
    344 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
    345 
    346 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
    347 
    348 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
    349 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    350 #endif /* WM_EVENT_COUNTERS */
    351 };
    352 
    353 struct wm_rxqueue {
    354 	kmutex_t *rxq_lock;		/* lock for rx operations */
    355 
    356 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
    357 
    358 	/* Software state for the receive descriptors. */
    359 	wiseman_rxdesc_t *rxq_descs;
    360 
    361 	/* RX control data structures. */
    362 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    363 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    364 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    365 	int rxq_desc_rseg;		/* real number of control segment */
    366 	size_t rxq_desc_size;		/* control data size */
    367 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    368 
    369 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    370 
    371 	int rxq_ptr;			/* next ready Rx desc/queue ent */
    372 	int rxq_discard;
    373 	int rxq_len;
    374 	struct mbuf *rxq_head;
    375 	struct mbuf *rxq_tail;
    376 	struct mbuf **rxq_tailp;
    377 
    378 	bool rxq_stopping;
    379 
    380 #ifdef WM_EVENT_COUNTERS
    381 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
    382 
    383 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
    384 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
    385 #endif
    386 };
    387 
    388 struct wm_queue {
    389 	int wmq_id;			/* index of transmit and receive queues */
    390 	int wmq_intr_idx;		/* index of MSI-X tables */
    391 
    392 	struct wm_txqueue wmq_txq;
    393 	struct wm_rxqueue wmq_rxq;
    394 };
    395 
    396 struct wm_phyop {
    397 	int (*acquire)(struct wm_softc *);
    398 	void (*release)(struct wm_softc *);
    399 };
    400 
    401 /*
    402  * Software state per device.
    403  */
    404 struct wm_softc {
    405 	device_t sc_dev;		/* generic device information */
    406 	bus_space_tag_t sc_st;		/* bus space tag */
    407 	bus_space_handle_t sc_sh;	/* bus space handle */
    408 	bus_size_t sc_ss;		/* bus space size */
    409 	bus_space_tag_t sc_iot;		/* I/O space tag */
    410 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    411 	bus_size_t sc_ios;		/* I/O space size */
    412 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    413 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    414 	bus_size_t sc_flashs;		/* flash registers space size */
    415 	off_t sc_flashreg_offset;	/*
    416 					 * offset to flash registers from
    417 					 * start of BAR
    418 					 */
    419 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    420 
    421 	struct ethercom sc_ethercom;	/* ethernet common data */
    422 	struct mii_data sc_mii;		/* MII/media information */
    423 
    424 	pci_chipset_tag_t sc_pc;
    425 	pcitag_t sc_pcitag;
    426 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    427 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    428 
    429 	uint16_t sc_pcidevid;		/* PCI device ID */
    430 	wm_chip_type sc_type;		/* MAC type */
    431 	int sc_rev;			/* MAC revision */
    432 	wm_phy_type sc_phytype;		/* PHY type */
    433 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    434 #define	WM_MEDIATYPE_UNKNOWN		0x00
    435 #define	WM_MEDIATYPE_FIBER		0x01
    436 #define	WM_MEDIATYPE_COPPER		0x02
    437 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    438 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    439 	int sc_flags;			/* flags; see below */
    440 	int sc_if_flags;		/* last if_flags */
    441 	int sc_flowflags;		/* 802.3x flow control flags */
    442 	int sc_align_tweak;
    443 
    444 	void *sc_ihs[WM_MAX_NINTR];	/*
    445 					 * interrupt cookie.
    446 					 * legacy and msi use sc_ihs[0].
    447 					 */
    448 	pci_intr_handle_t *sc_intrs;	/* legacy and msi use sc_intrs[0] */
    449 	int sc_nintrs;			/* number of interrupts */
    450 
    451 	int sc_link_intr_idx;		/* index of MSI-X tables */
    452 
    453 	callout_t sc_tick_ch;		/* tick callout */
    454 	bool sc_core_stopping;
    455 
    456 	int sc_nvm_ver_major;
    457 	int sc_nvm_ver_minor;
    458 	int sc_nvm_ver_build;
    459 	int sc_nvm_addrbits;		/* NVM address bits */
    460 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    461 	int sc_ich8_flash_base;
    462 	int sc_ich8_flash_bank_size;
    463 	int sc_nvm_k1_enabled;
    464 
    465 	int sc_nqueues;
    466 	struct wm_queue *sc_queue;
    467 
    468 	int sc_affinity_offset;
    469 
    470 #ifdef WM_EVENT_COUNTERS
    471 	/* Event counters. */
    472 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    473 
    474         /* WM_T_82542_2_1 only */
    475 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    476 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    477 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    478 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    479 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    480 #endif /* WM_EVENT_COUNTERS */
    481 
    482 	/* This variable are used only on the 82547. */
    483 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    484 
    485 	uint32_t sc_ctrl;		/* prototype CTRL register */
    486 #if 0
    487 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    488 #endif
    489 	uint32_t sc_icr;		/* prototype interrupt bits */
    490 	uint32_t sc_itr;		/* prototype intr throttling reg */
    491 	uint32_t sc_tctl;		/* prototype TCTL register */
    492 	uint32_t sc_rctl;		/* prototype RCTL register */
    493 	uint32_t sc_txcw;		/* prototype TXCW register */
    494 	uint32_t sc_tipg;		/* prototype TIPG register */
    495 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    496 	uint32_t sc_pba;		/* prototype PBA register */
    497 
    498 	int sc_tbi_linkup;		/* TBI link status */
    499 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    500 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    501 
    502 	int sc_mchash_type;		/* multicast filter offset */
    503 
    504 	krndsource_t rnd_source;	/* random source */
    505 
    506 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
    507 
    508 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    509 	kmutex_t *sc_ich_phymtx;	/*
    510 					 * 82574/82583/ICH/PCH specific PHY
    511 					 * mutex. For 82574/82583, the mutex
    512 					 * is used for both PHY and NVM.
    513 					 */
    514 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
    515 
    516 	struct wm_phyop phy;
    517 };
    518 
    519 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    520 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    521 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    522 
    523 #ifdef WM_MPSAFE
    524 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    525 #else
    526 #define CALLOUT_FLAGS	0
    527 #endif
    528 
    529 #define	WM_RXCHAIN_RESET(rxq)						\
    530 do {									\
    531 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    532 	*(rxq)->rxq_tailp = NULL;					\
    533 	(rxq)->rxq_len = 0;						\
    534 } while (/*CONSTCOND*/0)
    535 
    536 #define	WM_RXCHAIN_LINK(rxq, m)						\
    537 do {									\
    538 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    539 	(rxq)->rxq_tailp = &(m)->m_next;				\
    540 } while (/*CONSTCOND*/0)
    541 
    542 #ifdef WM_EVENT_COUNTERS
    543 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    544 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    545 
    546 #define WM_Q_EVCNT_INCR(qname, evname)			\
    547 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
    548 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
    549 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
    550 #else /* !WM_EVENT_COUNTERS */
    551 #define	WM_EVCNT_INCR(ev)	/* nothing */
    552 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    553 
    554 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
    555 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
    556 #endif /* !WM_EVENT_COUNTERS */
    557 
    558 #define	CSR_READ(sc, reg)						\
    559 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    560 #define	CSR_WRITE(sc, reg, val)						\
    561 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    562 #define	CSR_WRITE_FLUSH(sc)						\
    563 	(void) CSR_READ((sc), WMREG_STATUS)
    564 
    565 #define ICH8_FLASH_READ32(sc, reg)					\
    566 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    567 	    (reg) + sc->sc_flashreg_offset)
    568 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
    569 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
    570 	    (reg) + sc->sc_flashreg_offset, (data))
    571 
    572 #define ICH8_FLASH_READ16(sc, reg)					\
    573 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    574 	    (reg) + sc->sc_flashreg_offset)
    575 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
    576 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
    577 	    (reg) + sc->sc_flashreg_offset, (data))
    578 
    579 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
    580 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((x)))
    581 
    582 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    583 #define	WM_CDTXADDR_HI(txq, x)						\
    584 	(sizeof(bus_addr_t) == 8 ?					\
    585 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    586 
    587 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    588 #define	WM_CDRXADDR_HI(rxq, x)						\
    589 	(sizeof(bus_addr_t) == 8 ?					\
    590 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    591 
    592 /*
    593  * Register read/write functions.
    594  * Other than CSR_{READ|WRITE}().
    595  */
    596 #if 0
    597 static inline uint32_t wm_io_read(struct wm_softc *, int);
    598 #endif
    599 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    600 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    601 	uint32_t, uint32_t);
    602 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    603 
    604 /*
    605  * Descriptor sync/init functions.
    606  */
    607 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    608 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    609 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    610 
    611 /*
    612  * Device driver interface functions and commonly used functions.
    613  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    614  */
    615 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    616 static int	wm_match(device_t, cfdata_t, void *);
    617 static void	wm_attach(device_t, device_t, void *);
    618 static int	wm_detach(device_t, int);
    619 static bool	wm_suspend(device_t, const pmf_qual_t *);
    620 static bool	wm_resume(device_t, const pmf_qual_t *);
    621 static void	wm_watchdog(struct ifnet *);
    622 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
    623 static void	wm_tick(void *);
    624 static int	wm_ifflags_cb(struct ethercom *);
    625 static int	wm_ioctl(struct ifnet *, u_long, void *);
    626 /* MAC address related */
    627 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    628 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    629 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    630 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    631 static void	wm_set_filter(struct wm_softc *);
    632 /* Reset and init related */
    633 static void	wm_set_vlan(struct wm_softc *);
    634 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    635 static void	wm_get_auto_rd_done(struct wm_softc *);
    636 static void	wm_lan_init_done(struct wm_softc *);
    637 static void	wm_get_cfg_done(struct wm_softc *);
    638 static void	wm_initialize_hardware_bits(struct wm_softc *);
    639 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    640 static void	wm_reset(struct wm_softc *);
    641 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    642 static void	wm_rxdrain(struct wm_rxqueue *);
    643 static void	wm_rss_getkey(uint8_t *);
    644 static void	wm_init_rss(struct wm_softc *);
    645 static void	wm_adjust_qnum(struct wm_softc *, int);
    646 static int	wm_setup_legacy(struct wm_softc *);
    647 static int	wm_setup_msix(struct wm_softc *);
    648 static int	wm_init(struct ifnet *);
    649 static int	wm_init_locked(struct ifnet *);
    650 static void	wm_turnon(struct wm_softc *);
    651 static void	wm_turnoff(struct wm_softc *);
    652 static void	wm_stop(struct ifnet *, int);
    653 static void	wm_stop_locked(struct ifnet *, int);
    654 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    655 static void	wm_82547_txfifo_stall(void *);
    656 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    657 /* DMA related */
    658 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    659 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    660 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    661 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
    662     struct wm_txqueue *);
    663 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    664 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    665 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
    666     struct wm_rxqueue *);
    667 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    668 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    669 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    670 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    671 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    672 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    673 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
    674     struct wm_txqueue *);
    675 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
    676     struct wm_rxqueue *);
    677 static int	wm_alloc_txrx_queues(struct wm_softc *);
    678 static void	wm_free_txrx_queues(struct wm_softc *);
    679 static int	wm_init_txrx_queues(struct wm_softc *);
    680 /* Start */
    681 static int	wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
    682     uint32_t *, uint8_t *);
    683 static void	wm_start(struct ifnet *);
    684 static void	wm_start_locked(struct ifnet *);
    685 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
    686     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
    687 static void	wm_nq_start(struct ifnet *);
    688 static void	wm_nq_start_locked(struct ifnet *);
    689 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
    690 static inline int	wm_nq_select_txqueue(struct ifnet *, struct mbuf *);
    691 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
    692 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
    693 /* Interrupt */
    694 static int	wm_txeof(struct wm_softc *, struct wm_txqueue *);
    695 static void	wm_rxeof(struct wm_rxqueue *);
    696 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    697 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    698 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    699 static void	wm_linkintr(struct wm_softc *, uint32_t);
    700 static int	wm_intr_legacy(void *);
    701 static int	wm_txrxintr_msix(void *);
    702 static int	wm_linkintr_msix(void *);
    703 
    704 /*
    705  * Media related.
    706  * GMII, SGMII, TBI, SERDES and SFP.
    707  */
    708 /* Common */
    709 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    710 /* GMII related */
    711 static void	wm_gmii_reset(struct wm_softc *);
    712 static int	wm_get_phy_id_82575(struct wm_softc *);
    713 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    714 static int	wm_gmii_mediachange(struct ifnet *);
    715 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    716 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    717 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    718 static int	wm_gmii_i82543_readreg(device_t, int, int);
    719 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    720 static int	wm_gmii_mdic_readreg(device_t, int, int);
    721 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
    722 static int	wm_gmii_i82544_readreg(device_t, int, int);
    723 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    724 static int	wm_gmii_i80003_readreg(device_t, int, int);
    725 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    726 static int	wm_gmii_bm_readreg(device_t, int, int);
    727 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    728 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    729 static int	wm_gmii_hv_readreg(device_t, int, int);
    730 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
    731 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    732 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
    733 static int	wm_gmii_82580_readreg(device_t, int, int);
    734 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    735 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    736 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    737 static void	wm_gmii_statchg(struct ifnet *);
    738 static int	wm_kmrn_readreg(struct wm_softc *, int);
    739 static int	wm_kmrn_readreg_locked(struct wm_softc *, int);
    740 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
    741 static void	wm_kmrn_writereg_locked(struct wm_softc *, int, int);
    742 /* SGMII */
    743 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    744 static int	wm_sgmii_readreg(device_t, int, int);
    745 static void	wm_sgmii_writereg(device_t, int, int, int);
    746 /* TBI related */
    747 static void	wm_tbi_mediainit(struct wm_softc *);
    748 static int	wm_tbi_mediachange(struct ifnet *);
    749 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    750 static int	wm_check_for_link(struct wm_softc *);
    751 static void	wm_tbi_tick(struct wm_softc *);
    752 /* SERDES related */
    753 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    754 static int	wm_serdes_mediachange(struct ifnet *);
    755 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    756 static void	wm_serdes_tick(struct wm_softc *);
    757 /* SFP related */
    758 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    759 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    760 
    761 /*
    762  * NVM related.
    763  * Microwire, SPI (w/wo EERD) and Flash.
    764  */
    765 /* Misc functions */
    766 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    767 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    768 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    769 /* Microwire */
    770 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    771 /* SPI */
    772 static int	wm_nvm_ready_spi(struct wm_softc *);
    773 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    774 /* Using with EERD */
    775 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    776 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    777 /* Flash */
    778 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    779     unsigned int *);
    780 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    781 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    782 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    783 	uint32_t *);
    784 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    785 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    786 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
    787 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    788 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
    789 /* iNVM */
    790 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    791 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    792 /* Lock, detecting NVM type, validate checksum and read */
    793 static int	wm_nvm_acquire(struct wm_softc *);
    794 static void	wm_nvm_release(struct wm_softc *);
    795 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    796 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    797 static int	wm_nvm_validate_checksum(struct wm_softc *);
    798 static void	wm_nvm_version_invm(struct wm_softc *);
    799 static void	wm_nvm_version(struct wm_softc *);
    800 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    801 
    802 /*
    803  * Hardware semaphores.
    804  * Very complexed...
    805  */
    806 static int	wm_get_null(struct wm_softc *);
    807 static void	wm_put_null(struct wm_softc *);
    808 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
    809 static void	wm_put_swsm_semaphore(struct wm_softc *);
    810 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    811 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    812 static int	wm_get_phy_82575(struct wm_softc *);
    813 static void	wm_put_phy_82575(struct wm_softc *);
    814 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
    815 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    816 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
    817 static void	wm_put_swflag_ich8lan(struct wm_softc *);
    818 static int	wm_get_nvm_ich8lan(struct wm_softc *);		/* For NVM */
    819 static void	wm_put_nvm_ich8lan(struct wm_softc *);
    820 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    821 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    822 
    823 /*
    824  * Management mode and power management related subroutines.
    825  * BMC, AMT, suspend/resume and EEE.
    826  */
    827 #ifdef WM_WOL
    828 static int	wm_check_mng_mode(struct wm_softc *);
    829 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    830 static int	wm_check_mng_mode_82574(struct wm_softc *);
    831 static int	wm_check_mng_mode_generic(struct wm_softc *);
    832 #endif
    833 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    834 static bool	wm_phy_resetisblocked(struct wm_softc *);
    835 static void	wm_get_hw_control(struct wm_softc *);
    836 static void	wm_release_hw_control(struct wm_softc *);
    837 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
    838 static void	wm_smbustopci(struct wm_softc *);
    839 static void	wm_init_manageability(struct wm_softc *);
    840 static void	wm_release_manageability(struct wm_softc *);
    841 static void	wm_get_wakeup(struct wm_softc *);
    842 #ifdef WM_WOL
    843 static void	wm_enable_phy_wakeup(struct wm_softc *);
    844 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    845 static void	wm_enable_wakeup(struct wm_softc *);
    846 #endif
    847 /* LPLU (Low Power Link Up) */
    848 static void	wm_lplu_d0_disable(struct wm_softc *);
    849 static void	wm_lplu_d0_disable_pch(struct wm_softc *);
    850 /* EEE */
    851 static void	wm_set_eee_i350(struct wm_softc *);
    852 
    853 /*
    854  * Workarounds (mainly PHY related).
    855  * Basically, PHY's workarounds are in the PHY drivers.
    856  */
    857 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    858 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    859 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    860 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    861 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    862 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    863 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    864 static void	wm_reset_init_script_82575(struct wm_softc *);
    865 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    866 static void	wm_pll_workaround_i210(struct wm_softc *);
    867 
    868 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    869     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    870 
    871 /*
    872  * Devices supported by this driver.
    873  */
    874 static const struct wm_product {
    875 	pci_vendor_id_t		wmp_vendor;
    876 	pci_product_id_t	wmp_product;
    877 	const char		*wmp_name;
    878 	wm_chip_type		wmp_type;
    879 	uint32_t		wmp_flags;
    880 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    881 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    882 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    883 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    884 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    885 } wm_products[] = {
    886 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    887 	  "Intel i82542 1000BASE-X Ethernet",
    888 	  WM_T_82542_2_1,	WMP_F_FIBER },
    889 
    890 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    891 	  "Intel i82543GC 1000BASE-X Ethernet",
    892 	  WM_T_82543,		WMP_F_FIBER },
    893 
    894 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    895 	  "Intel i82543GC 1000BASE-T Ethernet",
    896 	  WM_T_82543,		WMP_F_COPPER },
    897 
    898 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    899 	  "Intel i82544EI 1000BASE-T Ethernet",
    900 	  WM_T_82544,		WMP_F_COPPER },
    901 
    902 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    903 	  "Intel i82544EI 1000BASE-X Ethernet",
    904 	  WM_T_82544,		WMP_F_FIBER },
    905 
    906 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    907 	  "Intel i82544GC 1000BASE-T Ethernet",
    908 	  WM_T_82544,		WMP_F_COPPER },
    909 
    910 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    911 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    912 	  WM_T_82544,		WMP_F_COPPER },
    913 
    914 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    915 	  "Intel i82540EM 1000BASE-T Ethernet",
    916 	  WM_T_82540,		WMP_F_COPPER },
    917 
    918 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    919 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    920 	  WM_T_82540,		WMP_F_COPPER },
    921 
    922 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    923 	  "Intel i82540EP 1000BASE-T Ethernet",
    924 	  WM_T_82540,		WMP_F_COPPER },
    925 
    926 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    927 	  "Intel i82540EP 1000BASE-T Ethernet",
    928 	  WM_T_82540,		WMP_F_COPPER },
    929 
    930 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    931 	  "Intel i82540EP 1000BASE-T Ethernet",
    932 	  WM_T_82540,		WMP_F_COPPER },
    933 
    934 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
    935 	  "Intel i82545EM 1000BASE-T Ethernet",
    936 	  WM_T_82545,		WMP_F_COPPER },
    937 
    938 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
    939 	  "Intel i82545GM 1000BASE-T Ethernet",
    940 	  WM_T_82545_3,		WMP_F_COPPER },
    941 
    942 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
    943 	  "Intel i82545GM 1000BASE-X Ethernet",
    944 	  WM_T_82545_3,		WMP_F_FIBER },
    945 
    946 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
    947 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
    948 	  WM_T_82545_3,		WMP_F_SERDES },
    949 
    950 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
    951 	  "Intel i82546EB 1000BASE-T Ethernet",
    952 	  WM_T_82546,		WMP_F_COPPER },
    953 
    954 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
    955 	  "Intel i82546EB 1000BASE-T Ethernet",
    956 	  WM_T_82546,		WMP_F_COPPER },
    957 
    958 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
    959 	  "Intel i82545EM 1000BASE-X Ethernet",
    960 	  WM_T_82545,		WMP_F_FIBER },
    961 
    962 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
    963 	  "Intel i82546EB 1000BASE-X Ethernet",
    964 	  WM_T_82546,		WMP_F_FIBER },
    965 
    966 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
    967 	  "Intel i82546GB 1000BASE-T Ethernet",
    968 	  WM_T_82546_3,		WMP_F_COPPER },
    969 
    970 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
    971 	  "Intel i82546GB 1000BASE-X Ethernet",
    972 	  WM_T_82546_3,		WMP_F_FIBER },
    973 
    974 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
    975 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
    976 	  WM_T_82546_3,		WMP_F_SERDES },
    977 
    978 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
    979 	  "i82546GB quad-port Gigabit Ethernet",
    980 	  WM_T_82546_3,		WMP_F_COPPER },
    981 
    982 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
    983 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
    984 	  WM_T_82546_3,		WMP_F_COPPER },
    985 
    986 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
    987 	  "Intel PRO/1000MT (82546GB)",
    988 	  WM_T_82546_3,		WMP_F_COPPER },
    989 
    990 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
    991 	  "Intel i82541EI 1000BASE-T Ethernet",
    992 	  WM_T_82541,		WMP_F_COPPER },
    993 
    994 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
    995 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
    996 	  WM_T_82541,		WMP_F_COPPER },
    997 
    998 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
    999 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
   1000 	  WM_T_82541,		WMP_F_COPPER },
   1001 
   1002 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
   1003 	  "Intel i82541ER 1000BASE-T Ethernet",
   1004 	  WM_T_82541_2,		WMP_F_COPPER },
   1005 
   1006 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
   1007 	  "Intel i82541GI 1000BASE-T Ethernet",
   1008 	  WM_T_82541_2,		WMP_F_COPPER },
   1009 
   1010 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
   1011 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
   1012 	  WM_T_82541_2,		WMP_F_COPPER },
   1013 
   1014 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
   1015 	  "Intel i82541PI 1000BASE-T Ethernet",
   1016 	  WM_T_82541_2,		WMP_F_COPPER },
   1017 
   1018 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
   1019 	  "Intel i82547EI 1000BASE-T Ethernet",
   1020 	  WM_T_82547,		WMP_F_COPPER },
   1021 
   1022 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
   1023 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
   1024 	  WM_T_82547,		WMP_F_COPPER },
   1025 
   1026 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
   1027 	  "Intel i82547GI 1000BASE-T Ethernet",
   1028 	  WM_T_82547_2,		WMP_F_COPPER },
   1029 
   1030 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
   1031 	  "Intel PRO/1000 PT (82571EB)",
   1032 	  WM_T_82571,		WMP_F_COPPER },
   1033 
   1034 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
   1035 	  "Intel PRO/1000 PF (82571EB)",
   1036 	  WM_T_82571,		WMP_F_FIBER },
   1037 
   1038 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
   1039 	  "Intel PRO/1000 PB (82571EB)",
   1040 	  WM_T_82571,		WMP_F_SERDES },
   1041 
   1042 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
   1043 	  "Intel PRO/1000 QT (82571EB)",
   1044 	  WM_T_82571,		WMP_F_COPPER },
   1045 
   1046 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
   1047 	  "Intel PRO/1000 PT Quad Port Server Adapter",
   1048 	  WM_T_82571,		WMP_F_COPPER, },
   1049 
   1050 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
   1051 	  "Intel Gigabit PT Quad Port Server ExpressModule",
   1052 	  WM_T_82571,		WMP_F_COPPER, },
   1053 
   1054 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
   1055 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
   1056 	  WM_T_82571,		WMP_F_SERDES, },
   1057 
   1058 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
   1059 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
   1060 	  WM_T_82571,		WMP_F_SERDES, },
   1061 
   1062 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
   1063 	  "Intel 82571EB Quad 1000baseX Ethernet",
   1064 	  WM_T_82571,		WMP_F_FIBER, },
   1065 
   1066 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
   1067 	  "Intel i82572EI 1000baseT Ethernet",
   1068 	  WM_T_82572,		WMP_F_COPPER },
   1069 
   1070 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
   1071 	  "Intel i82572EI 1000baseX Ethernet",
   1072 	  WM_T_82572,		WMP_F_FIBER },
   1073 
   1074 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
   1075 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
   1076 	  WM_T_82572,		WMP_F_SERDES },
   1077 
   1078 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
   1079 	  "Intel i82572EI 1000baseT Ethernet",
   1080 	  WM_T_82572,		WMP_F_COPPER },
   1081 
   1082 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
   1083 	  "Intel i82573E",
   1084 	  WM_T_82573,		WMP_F_COPPER },
   1085 
   1086 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
   1087 	  "Intel i82573E IAMT",
   1088 	  WM_T_82573,		WMP_F_COPPER },
   1089 
   1090 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
   1091 	  "Intel i82573L Gigabit Ethernet",
   1092 	  WM_T_82573,		WMP_F_COPPER },
   1093 
   1094 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
   1095 	  "Intel i82574L",
   1096 	  WM_T_82574,		WMP_F_COPPER },
   1097 
   1098 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
   1099 	  "Intel i82574L",
   1100 	  WM_T_82574,		WMP_F_COPPER },
   1101 
   1102 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
   1103 	  "Intel i82583V",
   1104 	  WM_T_82583,		WMP_F_COPPER },
   1105 
   1106 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1107 	  "i80003 dual 1000baseT Ethernet",
   1108 	  WM_T_80003,		WMP_F_COPPER },
   1109 
   1110 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1111 	  "i80003 dual 1000baseX Ethernet",
   1112 	  WM_T_80003,		WMP_F_COPPER },
   1113 
   1114 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1115 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1116 	  WM_T_80003,		WMP_F_SERDES },
   1117 
   1118 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1119 	  "Intel i80003 1000baseT Ethernet",
   1120 	  WM_T_80003,		WMP_F_COPPER },
   1121 
   1122 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1123 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1124 	  WM_T_80003,		WMP_F_SERDES },
   1125 
   1126 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1127 	  "Intel i82801H (M_AMT) LAN Controller",
   1128 	  WM_T_ICH8,		WMP_F_COPPER },
   1129 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1130 	  "Intel i82801H (AMT) LAN Controller",
   1131 	  WM_T_ICH8,		WMP_F_COPPER },
   1132 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1133 	  "Intel i82801H LAN Controller",
   1134 	  WM_T_ICH8,		WMP_F_COPPER },
   1135 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1136 	  "Intel i82801H (IFE) LAN Controller",
   1137 	  WM_T_ICH8,		WMP_F_COPPER },
   1138 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1139 	  "Intel i82801H (M) LAN Controller",
   1140 	  WM_T_ICH8,		WMP_F_COPPER },
   1141 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1142 	  "Intel i82801H IFE (GT) LAN Controller",
   1143 	  WM_T_ICH8,		WMP_F_COPPER },
   1144 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1145 	  "Intel i82801H IFE (G) LAN Controller",
   1146 	  WM_T_ICH8,		WMP_F_COPPER },
   1147 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
   1148 	  "82567V-3 LAN Controller",
   1149 	  WM_T_ICH8,		WMP_F_COPPER },
   1150 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1151 	  "82801I (AMT) LAN Controller",
   1152 	  WM_T_ICH9,		WMP_F_COPPER },
   1153 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1154 	  "82801I LAN Controller",
   1155 	  WM_T_ICH9,		WMP_F_COPPER },
   1156 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1157 	  "82801I (G) LAN Controller",
   1158 	  WM_T_ICH9,		WMP_F_COPPER },
   1159 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1160 	  "82801I (GT) LAN Controller",
   1161 	  WM_T_ICH9,		WMP_F_COPPER },
   1162 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1163 	  "82801I (C) LAN Controller",
   1164 	  WM_T_ICH9,		WMP_F_COPPER },
   1165 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1166 	  "82801I mobile LAN Controller",
   1167 	  WM_T_ICH9,		WMP_F_COPPER },
   1168 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IGP_M_V,
   1169 	  "82801I mobile (V) LAN Controller",
   1170 	  WM_T_ICH9,		WMP_F_COPPER },
   1171 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1172 	  "82801I mobile (AMT) LAN Controller",
   1173 	  WM_T_ICH9,		WMP_F_COPPER },
   1174 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1175 	  "82567LM-4 LAN Controller",
   1176 	  WM_T_ICH9,		WMP_F_COPPER },
   1177 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1178 	  "82567LM-2 LAN Controller",
   1179 	  WM_T_ICH10,		WMP_F_COPPER },
   1180 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1181 	  "82567LF-2 LAN Controller",
   1182 	  WM_T_ICH10,		WMP_F_COPPER },
   1183 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1184 	  "82567LM-3 LAN Controller",
   1185 	  WM_T_ICH10,		WMP_F_COPPER },
   1186 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1187 	  "82567LF-3 LAN Controller",
   1188 	  WM_T_ICH10,		WMP_F_COPPER },
   1189 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1190 	  "82567V-2 LAN Controller",
   1191 	  WM_T_ICH10,		WMP_F_COPPER },
   1192 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1193 	  "82567V-3? LAN Controller",
   1194 	  WM_T_ICH10,		WMP_F_COPPER },
   1195 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1196 	  "HANKSVILLE LAN Controller",
   1197 	  WM_T_ICH10,		WMP_F_COPPER },
   1198 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1199 	  "PCH LAN (82577LM) Controller",
   1200 	  WM_T_PCH,		WMP_F_COPPER },
   1201 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1202 	  "PCH LAN (82577LC) Controller",
   1203 	  WM_T_PCH,		WMP_F_COPPER },
   1204 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1205 	  "PCH LAN (82578DM) Controller",
   1206 	  WM_T_PCH,		WMP_F_COPPER },
   1207 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1208 	  "PCH LAN (82578DC) Controller",
   1209 	  WM_T_PCH,		WMP_F_COPPER },
   1210 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1211 	  "PCH2 LAN (82579LM) Controller",
   1212 	  WM_T_PCH2,		WMP_F_COPPER },
   1213 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1214 	  "PCH2 LAN (82579V) Controller",
   1215 	  WM_T_PCH2,		WMP_F_COPPER },
   1216 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1217 	  "82575EB dual-1000baseT Ethernet",
   1218 	  WM_T_82575,		WMP_F_COPPER },
   1219 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1220 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1221 	  WM_T_82575,		WMP_F_SERDES },
   1222 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1223 	  "82575GB quad-1000baseT Ethernet",
   1224 	  WM_T_82575,		WMP_F_COPPER },
   1225 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1226 	  "82575GB quad-1000baseT Ethernet (PM)",
   1227 	  WM_T_82575,		WMP_F_COPPER },
   1228 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1229 	  "82576 1000BaseT Ethernet",
   1230 	  WM_T_82576,		WMP_F_COPPER },
   1231 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1232 	  "82576 1000BaseX Ethernet",
   1233 	  WM_T_82576,		WMP_F_FIBER },
   1234 
   1235 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1236 	  "82576 gigabit Ethernet (SERDES)",
   1237 	  WM_T_82576,		WMP_F_SERDES },
   1238 
   1239 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1240 	  "82576 quad-1000BaseT Ethernet",
   1241 	  WM_T_82576,		WMP_F_COPPER },
   1242 
   1243 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1244 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1245 	  WM_T_82576,		WMP_F_COPPER },
   1246 
   1247 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1248 	  "82576 gigabit Ethernet",
   1249 	  WM_T_82576,		WMP_F_COPPER },
   1250 
   1251 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1252 	  "82576 gigabit Ethernet (SERDES)",
   1253 	  WM_T_82576,		WMP_F_SERDES },
   1254 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1255 	  "82576 quad-gigabit Ethernet (SERDES)",
   1256 	  WM_T_82576,		WMP_F_SERDES },
   1257 
   1258 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1259 	  "82580 1000BaseT Ethernet",
   1260 	  WM_T_82580,		WMP_F_COPPER },
   1261 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1262 	  "82580 1000BaseX Ethernet",
   1263 	  WM_T_82580,		WMP_F_FIBER },
   1264 
   1265 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1266 	  "82580 1000BaseT Ethernet (SERDES)",
   1267 	  WM_T_82580,		WMP_F_SERDES },
   1268 
   1269 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1270 	  "82580 gigabit Ethernet (SGMII)",
   1271 	  WM_T_82580,		WMP_F_COPPER },
   1272 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1273 	  "82580 dual-1000BaseT Ethernet",
   1274 	  WM_T_82580,		WMP_F_COPPER },
   1275 
   1276 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1277 	  "82580 quad-1000BaseX Ethernet",
   1278 	  WM_T_82580,		WMP_F_FIBER },
   1279 
   1280 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1281 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1282 	  WM_T_82580,		WMP_F_COPPER },
   1283 
   1284 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1285 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1286 	  WM_T_82580,		WMP_F_SERDES },
   1287 
   1288 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1289 	  "DH89XXCC 1000BASE-KX Ethernet",
   1290 	  WM_T_82580,		WMP_F_SERDES },
   1291 
   1292 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1293 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1294 	  WM_T_82580,		WMP_F_SERDES },
   1295 
   1296 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1297 	  "I350 Gigabit Network Connection",
   1298 	  WM_T_I350,		WMP_F_COPPER },
   1299 
   1300 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1301 	  "I350 Gigabit Fiber Network Connection",
   1302 	  WM_T_I350,		WMP_F_FIBER },
   1303 
   1304 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1305 	  "I350 Gigabit Backplane Connection",
   1306 	  WM_T_I350,		WMP_F_SERDES },
   1307 
   1308 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1309 	  "I350 Quad Port Gigabit Ethernet",
   1310 	  WM_T_I350,		WMP_F_SERDES },
   1311 
   1312 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1313 	  "I350 Gigabit Connection",
   1314 	  WM_T_I350,		WMP_F_COPPER },
   1315 
   1316 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1317 	  "I354 Gigabit Ethernet (KX)",
   1318 	  WM_T_I354,		WMP_F_SERDES },
   1319 
   1320 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1321 	  "I354 Gigabit Ethernet (SGMII)",
   1322 	  WM_T_I354,		WMP_F_COPPER },
   1323 
   1324 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1325 	  "I354 Gigabit Ethernet (2.5G)",
   1326 	  WM_T_I354,		WMP_F_COPPER },
   1327 
   1328 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1329 	  "I210-T1 Ethernet Server Adapter",
   1330 	  WM_T_I210,		WMP_F_COPPER },
   1331 
   1332 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1333 	  "I210 Ethernet (Copper OEM)",
   1334 	  WM_T_I210,		WMP_F_COPPER },
   1335 
   1336 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1337 	  "I210 Ethernet (Copper IT)",
   1338 	  WM_T_I210,		WMP_F_COPPER },
   1339 
   1340 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1341 	  "I210 Ethernet (FLASH less)",
   1342 	  WM_T_I210,		WMP_F_COPPER },
   1343 
   1344 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1345 	  "I210 Gigabit Ethernet (Fiber)",
   1346 	  WM_T_I210,		WMP_F_FIBER },
   1347 
   1348 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1349 	  "I210 Gigabit Ethernet (SERDES)",
   1350 	  WM_T_I210,		WMP_F_SERDES },
   1351 
   1352 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1353 	  "I210 Gigabit Ethernet (FLASH less)",
   1354 	  WM_T_I210,		WMP_F_SERDES },
   1355 
   1356 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1357 	  "I210 Gigabit Ethernet (SGMII)",
   1358 	  WM_T_I210,		WMP_F_COPPER },
   1359 
   1360 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1361 	  "I211 Ethernet (COPPER)",
   1362 	  WM_T_I211,		WMP_F_COPPER },
   1363 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1364 	  "I217 V Ethernet Connection",
   1365 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1366 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1367 	  "I217 LM Ethernet Connection",
   1368 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1369 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1370 	  "I218 V Ethernet Connection",
   1371 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1372 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1373 	  "I218 V Ethernet Connection",
   1374 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1375 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1376 	  "I218 V Ethernet Connection",
   1377 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1378 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1379 	  "I218 LM Ethernet Connection",
   1380 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1381 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1382 	  "I218 LM Ethernet Connection",
   1383 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1384 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1385 	  "I218 LM Ethernet Connection",
   1386 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1387 #if 0
   1388 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
   1389 	  "I219 V Ethernet Connection",
   1390 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1391 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
   1392 	  "I219 V Ethernet Connection",
   1393 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1394 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
   1395 	  "I219 V Ethernet Connection",
   1396 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1397 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
   1398 	  "I219 V Ethernet Connection",
   1399 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1400 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
   1401 	  "I219 LM Ethernet Connection",
   1402 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1403 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
   1404 	  "I219 LM Ethernet Connection",
   1405 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1406 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
   1407 	  "I219 LM Ethernet Connection",
   1408 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1409 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
   1410 	  "I219 LM Ethernet Connection",
   1411 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1412 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
   1413 	  "I219 LM Ethernet Connection",
   1414 	  WM_T_PCH_SPT,		WMP_F_COPPER },
   1415 #endif
   1416 	{ 0,			0,
   1417 	  NULL,
   1418 	  0,			0 },
   1419 };
   1420 
   1421 /*
   1422  * Register read/write functions.
   1423  * Other than CSR_{READ|WRITE}().
   1424  */
   1425 
   1426 #if 0 /* Not currently used */
   1427 static inline uint32_t
   1428 wm_io_read(struct wm_softc *sc, int reg)
   1429 {
   1430 
   1431 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1432 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1433 }
   1434 #endif
   1435 
   1436 static inline void
   1437 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1438 {
   1439 
   1440 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1441 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1442 }
   1443 
   1444 static inline void
   1445 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1446     uint32_t data)
   1447 {
   1448 	uint32_t regval;
   1449 	int i;
   1450 
   1451 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1452 
   1453 	CSR_WRITE(sc, reg, regval);
   1454 
   1455 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1456 		delay(5);
   1457 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1458 			break;
   1459 	}
   1460 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1461 		aprint_error("%s: WARNING:"
   1462 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1463 		    device_xname(sc->sc_dev), reg);
   1464 	}
   1465 }
   1466 
   1467 static inline void
   1468 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1469 {
   1470 	wa->wa_low = htole32(v & 0xffffffffU);
   1471 	if (sizeof(bus_addr_t) == 8)
   1472 		wa->wa_high = htole32((uint64_t) v >> 32);
   1473 	else
   1474 		wa->wa_high = 0;
   1475 }
   1476 
   1477 /*
   1478  * Descriptor sync/init functions.
   1479  */
   1480 static inline void
   1481 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1482 {
   1483 	struct wm_softc *sc = txq->txq_sc;
   1484 
   1485 	/* If it will wrap around, sync to the end of the ring. */
   1486 	if ((start + num) > WM_NTXDESC(txq)) {
   1487 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1488 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
   1489 		    (WM_NTXDESC(txq) - start), ops);
   1490 		num -= (WM_NTXDESC(txq) - start);
   1491 		start = 0;
   1492 	}
   1493 
   1494 	/* Now sync whatever is left. */
   1495 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1496 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
   1497 }
   1498 
   1499 static inline void
   1500 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1501 {
   1502 	struct wm_softc *sc = rxq->rxq_sc;
   1503 
   1504 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1505 	    WM_CDRXOFF(start), sizeof(wiseman_rxdesc_t), ops);
   1506 }
   1507 
   1508 static inline void
   1509 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1510 {
   1511 	struct wm_softc *sc = rxq->rxq_sc;
   1512 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1513 	wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1514 	struct mbuf *m = rxs->rxs_mbuf;
   1515 
   1516 	/*
   1517 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1518 	 * so that the payload after the Ethernet header is aligned
   1519 	 * to a 4-byte boundary.
   1520 
   1521 	 * XXX BRAINDAMAGE ALERT!
   1522 	 * The stupid chip uses the same size for every buffer, which
   1523 	 * is set in the Receive Control register.  We are using the 2K
   1524 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1525 	 * reason, we can't "scoot" packets longer than the standard
   1526 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1527 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1528 	 * the upper layer copy the headers.
   1529 	 */
   1530 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1531 
   1532 	wm_set_dma_addr(&rxd->wrx_addr,
   1533 	    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1534 	rxd->wrx_len = 0;
   1535 	rxd->wrx_cksum = 0;
   1536 	rxd->wrx_status = 0;
   1537 	rxd->wrx_errors = 0;
   1538 	rxd->wrx_special = 0;
   1539 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   1540 
   1541 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1542 }
   1543 
   1544 /*
   1545  * Device driver interface functions and commonly used functions.
   1546  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1547  */
   1548 
   1549 /* Lookup supported device table */
   1550 static const struct wm_product *
   1551 wm_lookup(const struct pci_attach_args *pa)
   1552 {
   1553 	const struct wm_product *wmp;
   1554 
   1555 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1556 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1557 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1558 			return wmp;
   1559 	}
   1560 	return NULL;
   1561 }
   1562 
   1563 /* The match function (ca_match) */
   1564 static int
   1565 wm_match(device_t parent, cfdata_t cf, void *aux)
   1566 {
   1567 	struct pci_attach_args *pa = aux;
   1568 
   1569 	if (wm_lookup(pa) != NULL)
   1570 		return 1;
   1571 
   1572 	return 0;
   1573 }
   1574 
   1575 /* The attach function (ca_attach) */
   1576 static void
   1577 wm_attach(device_t parent, device_t self, void *aux)
   1578 {
   1579 	struct wm_softc *sc = device_private(self);
   1580 	struct pci_attach_args *pa = aux;
   1581 	prop_dictionary_t dict;
   1582 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1583 	pci_chipset_tag_t pc = pa->pa_pc;
   1584 	int counts[PCI_INTR_TYPE_SIZE];
   1585 	pci_intr_type_t max_type;
   1586 	const char *eetype, *xname;
   1587 	bus_space_tag_t memt;
   1588 	bus_space_handle_t memh;
   1589 	bus_size_t memsize;
   1590 	int memh_valid;
   1591 	int i, error;
   1592 	const struct wm_product *wmp;
   1593 	prop_data_t ea;
   1594 	prop_number_t pn;
   1595 	uint8_t enaddr[ETHER_ADDR_LEN];
   1596 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1597 	pcireg_t preg, memtype;
   1598 	uint16_t eeprom_data, apme_mask;
   1599 	bool force_clear_smbi;
   1600 	uint32_t link_mode;
   1601 	uint32_t reg;
   1602 
   1603 	sc->sc_dev = self;
   1604 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1605 	sc->sc_core_stopping = false;
   1606 
   1607 	wmp = wm_lookup(pa);
   1608 #ifdef DIAGNOSTIC
   1609 	if (wmp == NULL) {
   1610 		printf("\n");
   1611 		panic("wm_attach: impossible");
   1612 	}
   1613 #endif
   1614 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1615 
   1616 	sc->sc_pc = pa->pa_pc;
   1617 	sc->sc_pcitag = pa->pa_tag;
   1618 
   1619 	if (pci_dma64_available(pa))
   1620 		sc->sc_dmat = pa->pa_dmat64;
   1621 	else
   1622 		sc->sc_dmat = pa->pa_dmat;
   1623 
   1624 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1625 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
   1626 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1627 
   1628 	sc->sc_type = wmp->wmp_type;
   1629 
   1630 	/* Set default function pointers */
   1631 	sc->phy.acquire = wm_get_null;
   1632 	sc->phy.release = wm_put_null;
   1633 
   1634 	if (sc->sc_type < WM_T_82543) {
   1635 		if (sc->sc_rev < 2) {
   1636 			aprint_error_dev(sc->sc_dev,
   1637 			    "i82542 must be at least rev. 2\n");
   1638 			return;
   1639 		}
   1640 		if (sc->sc_rev < 3)
   1641 			sc->sc_type = WM_T_82542_2_0;
   1642 	}
   1643 
   1644 	/*
   1645 	 * Disable MSI for Errata:
   1646 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1647 	 *
   1648 	 *  82544: Errata 25
   1649 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1650 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1651 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1652 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1653 	 *
   1654 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1655 	 *
   1656 	 *  82571 & 82572: Errata 63
   1657 	 */
   1658 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1659 	    || (sc->sc_type == WM_T_82572))
   1660 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1661 
   1662 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1663 	    || (sc->sc_type == WM_T_82580)
   1664 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1665 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1666 		sc->sc_flags |= WM_F_NEWQUEUE;
   1667 
   1668 	/* Set device properties (mactype) */
   1669 	dict = device_properties(sc->sc_dev);
   1670 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1671 
   1672 	/*
   1673 	 * Map the device.  All devices support memory-mapped acccess,
   1674 	 * and it is really required for normal operation.
   1675 	 */
   1676 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1677 	switch (memtype) {
   1678 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1679 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1680 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1681 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1682 		break;
   1683 	default:
   1684 		memh_valid = 0;
   1685 		break;
   1686 	}
   1687 
   1688 	if (memh_valid) {
   1689 		sc->sc_st = memt;
   1690 		sc->sc_sh = memh;
   1691 		sc->sc_ss = memsize;
   1692 	} else {
   1693 		aprint_error_dev(sc->sc_dev,
   1694 		    "unable to map device registers\n");
   1695 		return;
   1696 	}
   1697 
   1698 	/*
   1699 	 * In addition, i82544 and later support I/O mapped indirect
   1700 	 * register access.  It is not desirable (nor supported in
   1701 	 * this driver) to use it for normal operation, though it is
   1702 	 * required to work around bugs in some chip versions.
   1703 	 */
   1704 	if (sc->sc_type >= WM_T_82544) {
   1705 		/* First we have to find the I/O BAR. */
   1706 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1707 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1708 			if (memtype == PCI_MAPREG_TYPE_IO)
   1709 				break;
   1710 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1711 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1712 				i += 4;	/* skip high bits, too */
   1713 		}
   1714 		if (i < PCI_MAPREG_END) {
   1715 			/*
   1716 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1717 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1718 			 * It's no problem because newer chips has no this
   1719 			 * bug.
   1720 			 *
   1721 			 * The i8254x doesn't apparently respond when the
   1722 			 * I/O BAR is 0, which looks somewhat like it's not
   1723 			 * been configured.
   1724 			 */
   1725 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1726 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1727 				aprint_error_dev(sc->sc_dev,
   1728 				    "WARNING: I/O BAR at zero.\n");
   1729 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1730 					0, &sc->sc_iot, &sc->sc_ioh,
   1731 					NULL, &sc->sc_ios) == 0) {
   1732 				sc->sc_flags |= WM_F_IOH_VALID;
   1733 			} else {
   1734 				aprint_error_dev(sc->sc_dev,
   1735 				    "WARNING: unable to map I/O space\n");
   1736 			}
   1737 		}
   1738 
   1739 	}
   1740 
   1741 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1742 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1743 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1744 	if (sc->sc_type < WM_T_82542_2_1)
   1745 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1746 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1747 
   1748 	/* power up chip */
   1749 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1750 	    NULL)) && error != EOPNOTSUPP) {
   1751 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1752 		return;
   1753 	}
   1754 
   1755 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1756 
   1757 	/* Allocation settings */
   1758 	max_type = PCI_INTR_TYPE_MSIX;
   1759 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1;
   1760 	counts[PCI_INTR_TYPE_MSI] = 1;
   1761 	counts[PCI_INTR_TYPE_INTX] = 1;
   1762 
   1763 alloc_retry:
   1764 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1765 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1766 		return;
   1767 	}
   1768 
   1769 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1770 		error = wm_setup_msix(sc);
   1771 		if (error) {
   1772 			pci_intr_release(pc, sc->sc_intrs,
   1773 			    counts[PCI_INTR_TYPE_MSIX]);
   1774 
   1775 			/* Setup for MSI: Disable MSI-X */
   1776 			max_type = PCI_INTR_TYPE_MSI;
   1777 			counts[PCI_INTR_TYPE_MSI] = 1;
   1778 			counts[PCI_INTR_TYPE_INTX] = 1;
   1779 			goto alloc_retry;
   1780 		}
   1781 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1782 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1783 		error = wm_setup_legacy(sc);
   1784 		if (error) {
   1785 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1786 			    counts[PCI_INTR_TYPE_MSI]);
   1787 
   1788 			/* The next try is for INTx: Disable MSI */
   1789 			max_type = PCI_INTR_TYPE_INTX;
   1790 			counts[PCI_INTR_TYPE_INTX] = 1;
   1791 			goto alloc_retry;
   1792 		}
   1793 	} else {
   1794 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1795 		error = wm_setup_legacy(sc);
   1796 		if (error) {
   1797 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1798 			    counts[PCI_INTR_TYPE_INTX]);
   1799 			return;
   1800 		}
   1801 	}
   1802 
   1803 	/*
   1804 	 * Check the function ID (unit number of the chip).
   1805 	 */
   1806 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1807 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1808 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1809 	    || (sc->sc_type == WM_T_82580)
   1810 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1811 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1812 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1813 	else
   1814 		sc->sc_funcid = 0;
   1815 
   1816 	/*
   1817 	 * Determine a few things about the bus we're connected to.
   1818 	 */
   1819 	if (sc->sc_type < WM_T_82543) {
   1820 		/* We don't really know the bus characteristics here. */
   1821 		sc->sc_bus_speed = 33;
   1822 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1823 		/*
   1824 		 * CSA (Communication Streaming Architecture) is about as fast
   1825 		 * a 32-bit 66MHz PCI Bus.
   1826 		 */
   1827 		sc->sc_flags |= WM_F_CSA;
   1828 		sc->sc_bus_speed = 66;
   1829 		aprint_verbose_dev(sc->sc_dev,
   1830 		    "Communication Streaming Architecture\n");
   1831 		if (sc->sc_type == WM_T_82547) {
   1832 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1833 			callout_setfunc(&sc->sc_txfifo_ch,
   1834 					wm_82547_txfifo_stall, sc);
   1835 			aprint_verbose_dev(sc->sc_dev,
   1836 			    "using 82547 Tx FIFO stall work-around\n");
   1837 		}
   1838 	} else if (sc->sc_type >= WM_T_82571) {
   1839 		sc->sc_flags |= WM_F_PCIE;
   1840 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1841 		    && (sc->sc_type != WM_T_ICH10)
   1842 		    && (sc->sc_type != WM_T_PCH)
   1843 		    && (sc->sc_type != WM_T_PCH2)
   1844 		    && (sc->sc_type != WM_T_PCH_LPT)
   1845 		    && (sc->sc_type != WM_T_PCH_SPT)) {
   1846 			/* ICH* and PCH* have no PCIe capability registers */
   1847 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1848 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1849 				NULL) == 0)
   1850 				aprint_error_dev(sc->sc_dev,
   1851 				    "unable to find PCIe capability\n");
   1852 		}
   1853 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1854 	} else {
   1855 		reg = CSR_READ(sc, WMREG_STATUS);
   1856 		if (reg & STATUS_BUS64)
   1857 			sc->sc_flags |= WM_F_BUS64;
   1858 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1859 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1860 
   1861 			sc->sc_flags |= WM_F_PCIX;
   1862 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1863 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1864 				aprint_error_dev(sc->sc_dev,
   1865 				    "unable to find PCIX capability\n");
   1866 			else if (sc->sc_type != WM_T_82545_3 &&
   1867 				 sc->sc_type != WM_T_82546_3) {
   1868 				/*
   1869 				 * Work around a problem caused by the BIOS
   1870 				 * setting the max memory read byte count
   1871 				 * incorrectly.
   1872 				 */
   1873 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1874 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1875 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1876 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1877 
   1878 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1879 				    PCIX_CMD_BYTECNT_SHIFT;
   1880 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1881 				    PCIX_STATUS_MAXB_SHIFT;
   1882 				if (bytecnt > maxb) {
   1883 					aprint_verbose_dev(sc->sc_dev,
   1884 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1885 					    512 << bytecnt, 512 << maxb);
   1886 					pcix_cmd = (pcix_cmd &
   1887 					    ~PCIX_CMD_BYTECNT_MASK) |
   1888 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1889 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1890 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1891 					    pcix_cmd);
   1892 				}
   1893 			}
   1894 		}
   1895 		/*
   1896 		 * The quad port adapter is special; it has a PCIX-PCIX
   1897 		 * bridge on the board, and can run the secondary bus at
   1898 		 * a higher speed.
   1899 		 */
   1900 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   1901 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   1902 								      : 66;
   1903 		} else if (sc->sc_flags & WM_F_PCIX) {
   1904 			switch (reg & STATUS_PCIXSPD_MASK) {
   1905 			case STATUS_PCIXSPD_50_66:
   1906 				sc->sc_bus_speed = 66;
   1907 				break;
   1908 			case STATUS_PCIXSPD_66_100:
   1909 				sc->sc_bus_speed = 100;
   1910 				break;
   1911 			case STATUS_PCIXSPD_100_133:
   1912 				sc->sc_bus_speed = 133;
   1913 				break;
   1914 			default:
   1915 				aprint_error_dev(sc->sc_dev,
   1916 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   1917 				    reg & STATUS_PCIXSPD_MASK);
   1918 				sc->sc_bus_speed = 66;
   1919 				break;
   1920 			}
   1921 		} else
   1922 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   1923 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   1924 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   1925 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   1926 	}
   1927 
   1928 	/* clear interesting stat counters */
   1929 	CSR_READ(sc, WMREG_COLC);
   1930 	CSR_READ(sc, WMREG_RXERRC);
   1931 
   1932 	/* get PHY control from SMBus to PCIe */
   1933 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   1934 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
   1935 		wm_smbustopci(sc);
   1936 
   1937 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
   1938 	    || (sc->sc_type >= WM_T_ICH8))
   1939 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   1940 	if (sc->sc_type >= WM_T_ICH8)
   1941 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   1942 
   1943 	/* Set PHY, NVM mutex related stuff */
   1944 	switch (sc->sc_type) {
   1945 	case WM_T_82542_2_0:
   1946 	case WM_T_82542_2_1:
   1947 	case WM_T_82543:
   1948 	case WM_T_82544:
   1949 		/* Microwire */
   1950 		sc->sc_nvm_wordsize = 64;
   1951 		sc->sc_nvm_addrbits = 6;
   1952 		break;
   1953 	case WM_T_82540:
   1954 	case WM_T_82545:
   1955 	case WM_T_82545_3:
   1956 	case WM_T_82546:
   1957 	case WM_T_82546_3:
   1958 		/* Microwire */
   1959 		reg = CSR_READ(sc, WMREG_EECD);
   1960 		if (reg & EECD_EE_SIZE) {
   1961 			sc->sc_nvm_wordsize = 256;
   1962 			sc->sc_nvm_addrbits = 8;
   1963 		} else {
   1964 			sc->sc_nvm_wordsize = 64;
   1965 			sc->sc_nvm_addrbits = 6;
   1966 		}
   1967 		sc->sc_flags |= WM_F_LOCK_EECD;
   1968 		break;
   1969 	case WM_T_82541:
   1970 	case WM_T_82541_2:
   1971 	case WM_T_82547:
   1972 	case WM_T_82547_2:
   1973 		sc->sc_flags |= WM_F_LOCK_EECD;
   1974 		reg = CSR_READ(sc, WMREG_EECD);
   1975 		if (reg & EECD_EE_TYPE) {
   1976 			/* SPI */
   1977 			sc->sc_flags |= WM_F_EEPROM_SPI;
   1978 			wm_nvm_set_addrbits_size_eecd(sc);
   1979 		} else {
   1980 			/* Microwire */
   1981 			if ((reg & EECD_EE_ABITS) != 0) {
   1982 				sc->sc_nvm_wordsize = 256;
   1983 				sc->sc_nvm_addrbits = 8;
   1984 			} else {
   1985 				sc->sc_nvm_wordsize = 64;
   1986 				sc->sc_nvm_addrbits = 6;
   1987 			}
   1988 		}
   1989 		break;
   1990 	case WM_T_82571:
   1991 	case WM_T_82572:
   1992 		/* SPI */
   1993 		sc->sc_flags |= WM_F_EEPROM_SPI;
   1994 		wm_nvm_set_addrbits_size_eecd(sc);
   1995 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
   1996 		sc->phy.acquire = wm_get_swsm_semaphore;
   1997 		sc->phy.release = wm_put_swsm_semaphore;
   1998 		break;
   1999 	case WM_T_82573:
   2000 	case WM_T_82574:
   2001 	case WM_T_82583:
   2002 		if (sc->sc_type == WM_T_82573) {
   2003 			sc->sc_flags |= WM_F_LOCK_SWSM;
   2004 			sc->phy.acquire = wm_get_swsm_semaphore;
   2005 			sc->phy.release = wm_put_swsm_semaphore;
   2006 		} else {
   2007 			sc->sc_flags |= WM_F_LOCK_EXTCNF;
   2008 			/* Both PHY and NVM use the same semaphore. */
   2009 			sc->phy.acquire
   2010 			    = wm_get_swfwhw_semaphore;
   2011 			sc->phy.release
   2012 			    = wm_put_swfwhw_semaphore;
   2013 		}
   2014 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   2015 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   2016 			sc->sc_nvm_wordsize = 2048;
   2017 		} else {
   2018 			/* SPI */
   2019 			sc->sc_flags |= WM_F_EEPROM_SPI;
   2020 			wm_nvm_set_addrbits_size_eecd(sc);
   2021 		}
   2022 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2023 		break;
   2024 	case WM_T_82575:
   2025 	case WM_T_82576:
   2026 	case WM_T_82580:
   2027 	case WM_T_I350:
   2028 	case WM_T_I354:
   2029 	case WM_T_80003:
   2030 		/* SPI */
   2031 		sc->sc_flags |= WM_F_EEPROM_SPI;
   2032 		wm_nvm_set_addrbits_size_eecd(sc);
   2033 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
   2034 		    | WM_F_LOCK_SWSM;
   2035 		sc->phy.acquire = wm_get_phy_82575;
   2036 		sc->phy.release = wm_put_phy_82575;
   2037 		break;
   2038 	case WM_T_ICH8:
   2039 	case WM_T_ICH9:
   2040 	case WM_T_ICH10:
   2041 	case WM_T_PCH:
   2042 	case WM_T_PCH2:
   2043 	case WM_T_PCH_LPT:
   2044 		/* FLASH */
   2045 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2046 		sc->sc_nvm_wordsize = 2048;
   2047 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
   2048 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   2049 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   2050 			aprint_error_dev(sc->sc_dev,
   2051 			    "can't map FLASH registers\n");
   2052 			goto out;
   2053 		}
   2054 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   2055 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   2056 		    ICH_FLASH_SECTOR_SIZE;
   2057 		sc->sc_ich8_flash_bank_size =
   2058 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   2059 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
   2060 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   2061 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   2062 		sc->sc_flashreg_offset = 0;
   2063 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2064 		sc->phy.release = wm_put_swflag_ich8lan;
   2065 		break;
   2066 	case WM_T_PCH_SPT:
   2067 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
   2068 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   2069 		sc->sc_flasht = sc->sc_st;
   2070 		sc->sc_flashh = sc->sc_sh;
   2071 		sc->sc_ich8_flash_base = 0;
   2072 		sc->sc_nvm_wordsize =
   2073 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
   2074 			* NVM_SIZE_MULTIPLIER;
   2075 		/* It is size in bytes, we want words */
   2076 		sc->sc_nvm_wordsize /= 2;
   2077 		/* assume 2 banks */
   2078 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
   2079 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
   2080 		sc->phy.acquire = wm_get_swflag_ich8lan;
   2081 		sc->phy.release = wm_put_swflag_ich8lan;
   2082 		break;
   2083 	case WM_T_I210:
   2084 	case WM_T_I211:
   2085 		if (wm_nvm_get_flash_presence_i210(sc)) {
   2086 			wm_nvm_set_addrbits_size_eecd(sc);
   2087 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   2088 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   2089 		} else {
   2090 			sc->sc_nvm_wordsize = INVM_SIZE;
   2091 			sc->sc_flags |= WM_F_EEPROM_INVM;
   2092 		}
   2093 		sc->sc_flags |= WM_F_LOCK_SWFW | WM_F_LOCK_SWSM;
   2094 		sc->phy.acquire = wm_get_phy_82575;
   2095 		sc->phy.release = wm_put_phy_82575;
   2096 		break;
   2097 	default:
   2098 		break;
   2099 	}
   2100 
   2101 	/* Reset the chip to a known state. */
   2102 	wm_reset(sc);
   2103 
   2104 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   2105 	switch (sc->sc_type) {
   2106 	case WM_T_82571:
   2107 	case WM_T_82572:
   2108 		reg = CSR_READ(sc, WMREG_SWSM2);
   2109 		if ((reg & SWSM2_LOCK) == 0) {
   2110 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   2111 			force_clear_smbi = true;
   2112 		} else
   2113 			force_clear_smbi = false;
   2114 		break;
   2115 	case WM_T_82573:
   2116 	case WM_T_82574:
   2117 	case WM_T_82583:
   2118 		force_clear_smbi = true;
   2119 		break;
   2120 	default:
   2121 		force_clear_smbi = false;
   2122 		break;
   2123 	}
   2124 	if (force_clear_smbi) {
   2125 		reg = CSR_READ(sc, WMREG_SWSM);
   2126 		if ((reg & SWSM_SMBI) != 0)
   2127 			aprint_error_dev(sc->sc_dev,
   2128 			    "Please update the Bootagent\n");
   2129 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   2130 	}
   2131 
   2132 	/*
   2133 	 * Defer printing the EEPROM type until after verifying the checksum
   2134 	 * This allows the EEPROM type to be printed correctly in the case
   2135 	 * that no EEPROM is attached.
   2136 	 */
   2137 	/*
   2138 	 * Validate the EEPROM checksum. If the checksum fails, flag
   2139 	 * this for later, so we can fail future reads from the EEPROM.
   2140 	 */
   2141 	if (wm_nvm_validate_checksum(sc)) {
   2142 		/*
   2143 		 * Read twice again because some PCI-e parts fail the
   2144 		 * first check due to the link being in sleep state.
   2145 		 */
   2146 		if (wm_nvm_validate_checksum(sc))
   2147 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   2148 	}
   2149 
   2150 	/* Set device properties (macflags) */
   2151 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   2152 
   2153 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   2154 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   2155 	else {
   2156 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   2157 		    sc->sc_nvm_wordsize);
   2158 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   2159 			aprint_verbose("iNVM");
   2160 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   2161 			aprint_verbose("FLASH(HW)");
   2162 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   2163 			aprint_verbose("FLASH");
   2164 		else {
   2165 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   2166 				eetype = "SPI";
   2167 			else
   2168 				eetype = "MicroWire";
   2169 			aprint_verbose("(%d address bits) %s EEPROM",
   2170 			    sc->sc_nvm_addrbits, eetype);
   2171 		}
   2172 	}
   2173 	wm_nvm_version(sc);
   2174 	aprint_verbose("\n");
   2175 
   2176 	/* Check for I21[01] PLL workaround */
   2177 	if (sc->sc_type == WM_T_I210)
   2178 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2179 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2180 		/* NVM image release 3.25 has a workaround */
   2181 		if ((sc->sc_nvm_ver_major < 3)
   2182 		    || ((sc->sc_nvm_ver_major == 3)
   2183 			&& (sc->sc_nvm_ver_minor < 25))) {
   2184 			aprint_verbose_dev(sc->sc_dev,
   2185 			    "ROM image version %d.%d is older than 3.25\n",
   2186 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2187 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2188 		}
   2189 	}
   2190 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2191 		wm_pll_workaround_i210(sc);
   2192 
   2193 	wm_get_wakeup(sc);
   2194 	switch (sc->sc_type) {
   2195 	case WM_T_82571:
   2196 	case WM_T_82572:
   2197 	case WM_T_82573:
   2198 	case WM_T_82574:
   2199 	case WM_T_82583:
   2200 	case WM_T_80003:
   2201 	case WM_T_ICH8:
   2202 	case WM_T_ICH9:
   2203 	case WM_T_ICH10:
   2204 	case WM_T_PCH:
   2205 	case WM_T_PCH2:
   2206 	case WM_T_PCH_LPT:
   2207 	case WM_T_PCH_SPT:
   2208 		/* Non-AMT based hardware can now take control from firmware */
   2209 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2210 			wm_get_hw_control(sc);
   2211 		break;
   2212 	default:
   2213 		break;
   2214 	}
   2215 
   2216 	/*
   2217 	 * Read the Ethernet address from the EEPROM, if not first found
   2218 	 * in device properties.
   2219 	 */
   2220 	ea = prop_dictionary_get(dict, "mac-address");
   2221 	if (ea != NULL) {
   2222 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2223 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2224 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2225 	} else {
   2226 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2227 			aprint_error_dev(sc->sc_dev,
   2228 			    "unable to read Ethernet address\n");
   2229 			goto out;
   2230 		}
   2231 	}
   2232 
   2233 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2234 	    ether_sprintf(enaddr));
   2235 
   2236 	/*
   2237 	 * Read the config info from the EEPROM, and set up various
   2238 	 * bits in the control registers based on their contents.
   2239 	 */
   2240 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2241 	if (pn != NULL) {
   2242 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2243 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2244 	} else {
   2245 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2246 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2247 			goto out;
   2248 		}
   2249 	}
   2250 
   2251 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2252 	if (pn != NULL) {
   2253 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2254 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2255 	} else {
   2256 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2257 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2258 			goto out;
   2259 		}
   2260 	}
   2261 
   2262 	/* check for WM_F_WOL */
   2263 	switch (sc->sc_type) {
   2264 	case WM_T_82542_2_0:
   2265 	case WM_T_82542_2_1:
   2266 	case WM_T_82543:
   2267 		/* dummy? */
   2268 		eeprom_data = 0;
   2269 		apme_mask = NVM_CFG3_APME;
   2270 		break;
   2271 	case WM_T_82544:
   2272 		apme_mask = NVM_CFG2_82544_APM_EN;
   2273 		eeprom_data = cfg2;
   2274 		break;
   2275 	case WM_T_82546:
   2276 	case WM_T_82546_3:
   2277 	case WM_T_82571:
   2278 	case WM_T_82572:
   2279 	case WM_T_82573:
   2280 	case WM_T_82574:
   2281 	case WM_T_82583:
   2282 	case WM_T_80003:
   2283 	default:
   2284 		apme_mask = NVM_CFG3_APME;
   2285 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2286 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2287 		break;
   2288 	case WM_T_82575:
   2289 	case WM_T_82576:
   2290 	case WM_T_82580:
   2291 	case WM_T_I350:
   2292 	case WM_T_I354: /* XXX ok? */
   2293 	case WM_T_ICH8:
   2294 	case WM_T_ICH9:
   2295 	case WM_T_ICH10:
   2296 	case WM_T_PCH:
   2297 	case WM_T_PCH2:
   2298 	case WM_T_PCH_LPT:
   2299 	case WM_T_PCH_SPT:
   2300 		/* XXX The funcid should be checked on some devices */
   2301 		apme_mask = WUC_APME;
   2302 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2303 		break;
   2304 	}
   2305 
   2306 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2307 	if ((eeprom_data & apme_mask) != 0)
   2308 		sc->sc_flags |= WM_F_WOL;
   2309 #ifdef WM_DEBUG
   2310 	if ((sc->sc_flags & WM_F_WOL) != 0)
   2311 		printf("WOL\n");
   2312 #endif
   2313 
   2314 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2315 		/* Check NVM for autonegotiation */
   2316 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2317 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2318 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2319 		}
   2320 	}
   2321 
   2322 	/*
   2323 	 * XXX need special handling for some multiple port cards
   2324 	 * to disable a paticular port.
   2325 	 */
   2326 
   2327 	if (sc->sc_type >= WM_T_82544) {
   2328 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2329 		if (pn != NULL) {
   2330 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2331 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2332 		} else {
   2333 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2334 				aprint_error_dev(sc->sc_dev,
   2335 				    "unable to read SWDPIN\n");
   2336 				goto out;
   2337 			}
   2338 		}
   2339 	}
   2340 
   2341 	if (cfg1 & NVM_CFG1_ILOS)
   2342 		sc->sc_ctrl |= CTRL_ILOS;
   2343 
   2344 	/*
   2345 	 * XXX
   2346 	 * This code isn't correct because pin 2 and 3 are located
   2347 	 * in different position on newer chips. Check all datasheet.
   2348 	 *
   2349 	 * Until resolve this problem, check if a chip < 82580
   2350 	 */
   2351 	if (sc->sc_type <= WM_T_82580) {
   2352 		if (sc->sc_type >= WM_T_82544) {
   2353 			sc->sc_ctrl |=
   2354 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2355 			    CTRL_SWDPIO_SHIFT;
   2356 			sc->sc_ctrl |=
   2357 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2358 			    CTRL_SWDPINS_SHIFT;
   2359 		} else {
   2360 			sc->sc_ctrl |=
   2361 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2362 			    CTRL_SWDPIO_SHIFT;
   2363 		}
   2364 	}
   2365 
   2366 	/* XXX For other than 82580? */
   2367 	if (sc->sc_type == WM_T_82580) {
   2368 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2369 		if (nvmword & __BIT(13))
   2370 			sc->sc_ctrl |= CTRL_ILOS;
   2371 	}
   2372 
   2373 #if 0
   2374 	if (sc->sc_type >= WM_T_82544) {
   2375 		if (cfg1 & NVM_CFG1_IPS0)
   2376 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2377 		if (cfg1 & NVM_CFG1_IPS1)
   2378 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2379 		sc->sc_ctrl_ext |=
   2380 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2381 		    CTRL_EXT_SWDPIO_SHIFT;
   2382 		sc->sc_ctrl_ext |=
   2383 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2384 		    CTRL_EXT_SWDPINS_SHIFT;
   2385 	} else {
   2386 		sc->sc_ctrl_ext |=
   2387 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2388 		    CTRL_EXT_SWDPIO_SHIFT;
   2389 	}
   2390 #endif
   2391 
   2392 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2393 #if 0
   2394 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2395 #endif
   2396 
   2397 	if (sc->sc_type == WM_T_PCH) {
   2398 		uint16_t val;
   2399 
   2400 		/* Save the NVM K1 bit setting */
   2401 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2402 
   2403 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2404 			sc->sc_nvm_k1_enabled = 1;
   2405 		else
   2406 			sc->sc_nvm_k1_enabled = 0;
   2407 	}
   2408 
   2409 	/*
   2410 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
   2411 	 * media structures accordingly.
   2412 	 */
   2413 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2414 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2415 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2416 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
   2417 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2418 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
   2419 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2420 	} else if (sc->sc_type < WM_T_82543 ||
   2421 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2422 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2423 			aprint_error_dev(sc->sc_dev,
   2424 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2425 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2426 		}
   2427 		wm_tbi_mediainit(sc);
   2428 	} else {
   2429 		switch (sc->sc_type) {
   2430 		case WM_T_82575:
   2431 		case WM_T_82576:
   2432 		case WM_T_82580:
   2433 		case WM_T_I350:
   2434 		case WM_T_I354:
   2435 		case WM_T_I210:
   2436 		case WM_T_I211:
   2437 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2438 			link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2439 			switch (link_mode) {
   2440 			case CTRL_EXT_LINK_MODE_1000KX:
   2441 				aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2442 				sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2443 				break;
   2444 			case CTRL_EXT_LINK_MODE_SGMII:
   2445 				if (wm_sgmii_uses_mdio(sc)) {
   2446 					aprint_verbose_dev(sc->sc_dev,
   2447 					    "SGMII(MDIO)\n");
   2448 					sc->sc_flags |= WM_F_SGMII;
   2449 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2450 					break;
   2451 				}
   2452 				aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2453 				/*FALLTHROUGH*/
   2454 			case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2455 				sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2456 				if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2457 					if (link_mode
   2458 					    == CTRL_EXT_LINK_MODE_SGMII) {
   2459 						sc->sc_mediatype
   2460 						    = WM_MEDIATYPE_COPPER;
   2461 						sc->sc_flags |= WM_F_SGMII;
   2462 					} else {
   2463 						sc->sc_mediatype
   2464 						    = WM_MEDIATYPE_SERDES;
   2465 						aprint_verbose_dev(sc->sc_dev,
   2466 						    "SERDES\n");
   2467 					}
   2468 					break;
   2469 				}
   2470 				if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2471 					aprint_verbose_dev(sc->sc_dev,
   2472 					    "SERDES\n");
   2473 
   2474 				/* Change current link mode setting */
   2475 				reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2476 				switch (sc->sc_mediatype) {
   2477 				case WM_MEDIATYPE_COPPER:
   2478 					reg |= CTRL_EXT_LINK_MODE_SGMII;
   2479 					break;
   2480 				case WM_MEDIATYPE_SERDES:
   2481 					reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2482 					break;
   2483 				default:
   2484 					break;
   2485 				}
   2486 				CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2487 				break;
   2488 			case CTRL_EXT_LINK_MODE_GMII:
   2489 			default:
   2490 				aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2491 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2492 				break;
   2493 			}
   2494 
   2495 			reg &= ~CTRL_EXT_I2C_ENA;
   2496 			if ((sc->sc_flags & WM_F_SGMII) != 0)
   2497 				reg |= CTRL_EXT_I2C_ENA;
   2498 			else
   2499 				reg &= ~CTRL_EXT_I2C_ENA;
   2500 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2501 
   2502 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2503 				wm_gmii_mediainit(sc, wmp->wmp_product);
   2504 			else
   2505 				wm_tbi_mediainit(sc);
   2506 			break;
   2507 		default:
   2508 			if (sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   2509 				aprint_error_dev(sc->sc_dev,
   2510 				    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2511 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2512 			wm_gmii_mediainit(sc, wmp->wmp_product);
   2513 		}
   2514 	}
   2515 
   2516 	ifp = &sc->sc_ethercom.ec_if;
   2517 	xname = device_xname(sc->sc_dev);
   2518 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2519 	ifp->if_softc = sc;
   2520 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2521 	ifp->if_extflags = IFEF_START_MPSAFE;
   2522 	ifp->if_ioctl = wm_ioctl;
   2523 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   2524 		ifp->if_start = wm_nq_start;
   2525 		if (sc->sc_nqueues > 1)
   2526 			ifp->if_transmit = wm_nq_transmit;
   2527 	} else
   2528 		ifp->if_start = wm_start;
   2529 	ifp->if_watchdog = wm_watchdog;
   2530 	ifp->if_init = wm_init;
   2531 	ifp->if_stop = wm_stop;
   2532 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2533 	IFQ_SET_READY(&ifp->if_snd);
   2534 
   2535 	/* Check for jumbo frame */
   2536 	switch (sc->sc_type) {
   2537 	case WM_T_82573:
   2538 		/* XXX limited to 9234 if ASPM is disabled */
   2539 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2540 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2541 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2542 		break;
   2543 	case WM_T_82571:
   2544 	case WM_T_82572:
   2545 	case WM_T_82574:
   2546 	case WM_T_82575:
   2547 	case WM_T_82576:
   2548 	case WM_T_82580:
   2549 	case WM_T_I350:
   2550 	case WM_T_I354: /* XXXX ok? */
   2551 	case WM_T_I210:
   2552 	case WM_T_I211:
   2553 	case WM_T_80003:
   2554 	case WM_T_ICH9:
   2555 	case WM_T_ICH10:
   2556 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2557 	case WM_T_PCH_LPT:
   2558 	case WM_T_PCH_SPT:
   2559 		/* XXX limited to 9234 */
   2560 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2561 		break;
   2562 	case WM_T_PCH:
   2563 		/* XXX limited to 4096 */
   2564 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2565 		break;
   2566 	case WM_T_82542_2_0:
   2567 	case WM_T_82542_2_1:
   2568 	case WM_T_82583:
   2569 	case WM_T_ICH8:
   2570 		/* No support for jumbo frame */
   2571 		break;
   2572 	default:
   2573 		/* ETHER_MAX_LEN_JUMBO */
   2574 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2575 		break;
   2576 	}
   2577 
   2578 	/* If we're a i82543 or greater, we can support VLANs. */
   2579 	if (sc->sc_type >= WM_T_82543)
   2580 		sc->sc_ethercom.ec_capabilities |=
   2581 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2582 
   2583 	/*
   2584 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2585 	 * on i82543 and later.
   2586 	 */
   2587 	if (sc->sc_type >= WM_T_82543) {
   2588 		ifp->if_capabilities |=
   2589 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2590 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2591 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2592 		    IFCAP_CSUM_TCPv6_Tx |
   2593 		    IFCAP_CSUM_UDPv6_Tx;
   2594 	}
   2595 
   2596 	/*
   2597 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2598 	 *
   2599 	 *	82541GI (8086:1076) ... no
   2600 	 *	82572EI (8086:10b9) ... yes
   2601 	 */
   2602 	if (sc->sc_type >= WM_T_82571) {
   2603 		ifp->if_capabilities |=
   2604 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2605 	}
   2606 
   2607 	/*
   2608 	 * If we're a i82544 or greater (except i82547), we can do
   2609 	 * TCP segmentation offload.
   2610 	 */
   2611 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2612 		ifp->if_capabilities |= IFCAP_TSOv4;
   2613 	}
   2614 
   2615 	if (sc->sc_type >= WM_T_82571) {
   2616 		ifp->if_capabilities |= IFCAP_TSOv6;
   2617 	}
   2618 
   2619 #ifdef WM_MPSAFE
   2620 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2621 #else
   2622 	sc->sc_core_lock = NULL;
   2623 #endif
   2624 
   2625 	/* Attach the interface. */
   2626 	if_initialize(ifp);
   2627 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
   2628 	ether_ifattach(ifp, enaddr);
   2629 	if_register(ifp);
   2630 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2631 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2632 			  RND_FLAG_DEFAULT);
   2633 
   2634 #ifdef WM_EVENT_COUNTERS
   2635 	/* Attach event counters. */
   2636 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2637 	    NULL, xname, "linkintr");
   2638 
   2639 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2640 	    NULL, xname, "tx_xoff");
   2641 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2642 	    NULL, xname, "tx_xon");
   2643 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2644 	    NULL, xname, "rx_xoff");
   2645 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2646 	    NULL, xname, "rx_xon");
   2647 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2648 	    NULL, xname, "rx_macctl");
   2649 #endif /* WM_EVENT_COUNTERS */
   2650 
   2651 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2652 		pmf_class_network_register(self, ifp);
   2653 	else
   2654 		aprint_error_dev(self, "couldn't establish power handler\n");
   2655 
   2656 	sc->sc_flags |= WM_F_ATTACHED;
   2657  out:
   2658 	return;
   2659 }
   2660 
   2661 /* The detach function (ca_detach) */
   2662 static int
   2663 wm_detach(device_t self, int flags __unused)
   2664 {
   2665 	struct wm_softc *sc = device_private(self);
   2666 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2667 	int i;
   2668 
   2669 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2670 		return 0;
   2671 
   2672 	/* Stop the interface. Callouts are stopped in it. */
   2673 	wm_stop(ifp, 1);
   2674 
   2675 	pmf_device_deregister(self);
   2676 
   2677 	/* Tell the firmware about the release */
   2678 	WM_CORE_LOCK(sc);
   2679 	wm_release_manageability(sc);
   2680 	wm_release_hw_control(sc);
   2681 	WM_CORE_UNLOCK(sc);
   2682 
   2683 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2684 
   2685 	/* Delete all remaining media. */
   2686 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2687 
   2688 	ether_ifdetach(ifp);
   2689 	if_detach(ifp);
   2690 	if_percpuq_destroy(sc->sc_ipq);
   2691 
   2692 	/* Unload RX dmamaps and free mbufs */
   2693 	for (i = 0; i < sc->sc_nqueues; i++) {
   2694 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   2695 		mutex_enter(rxq->rxq_lock);
   2696 		wm_rxdrain(rxq);
   2697 		mutex_exit(rxq->rxq_lock);
   2698 	}
   2699 	/* Must unlock here */
   2700 
   2701 	/* Disestablish the interrupt handler */
   2702 	for (i = 0; i < sc->sc_nintrs; i++) {
   2703 		if (sc->sc_ihs[i] != NULL) {
   2704 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2705 			sc->sc_ihs[i] = NULL;
   2706 		}
   2707 	}
   2708 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2709 
   2710 	wm_free_txrx_queues(sc);
   2711 
   2712 	/* Unmap the registers */
   2713 	if (sc->sc_ss) {
   2714 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2715 		sc->sc_ss = 0;
   2716 	}
   2717 	if (sc->sc_ios) {
   2718 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2719 		sc->sc_ios = 0;
   2720 	}
   2721 	if (sc->sc_flashs) {
   2722 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2723 		sc->sc_flashs = 0;
   2724 	}
   2725 
   2726 	if (sc->sc_core_lock)
   2727 		mutex_obj_free(sc->sc_core_lock);
   2728 	if (sc->sc_ich_phymtx)
   2729 		mutex_obj_free(sc->sc_ich_phymtx);
   2730 	if (sc->sc_ich_nvmmtx)
   2731 		mutex_obj_free(sc->sc_ich_nvmmtx);
   2732 
   2733 	return 0;
   2734 }
   2735 
   2736 static bool
   2737 wm_suspend(device_t self, const pmf_qual_t *qual)
   2738 {
   2739 	struct wm_softc *sc = device_private(self);
   2740 
   2741 	wm_release_manageability(sc);
   2742 	wm_release_hw_control(sc);
   2743 #ifdef WM_WOL
   2744 	wm_enable_wakeup(sc);
   2745 #endif
   2746 
   2747 	return true;
   2748 }
   2749 
   2750 static bool
   2751 wm_resume(device_t self, const pmf_qual_t *qual)
   2752 {
   2753 	struct wm_softc *sc = device_private(self);
   2754 
   2755 	wm_init_manageability(sc);
   2756 
   2757 	return true;
   2758 }
   2759 
   2760 /*
   2761  * wm_watchdog:		[ifnet interface function]
   2762  *
   2763  *	Watchdog timer handler.
   2764  */
   2765 static void
   2766 wm_watchdog(struct ifnet *ifp)
   2767 {
   2768 	int qid;
   2769 	struct wm_softc *sc = ifp->if_softc;
   2770 
   2771 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
   2772 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
   2773 
   2774 		wm_watchdog_txq(ifp, txq);
   2775 	}
   2776 
   2777 	/* Reset the interface. */
   2778 	(void) wm_init(ifp);
   2779 
   2780 	/*
   2781 	 * There are still some upper layer processing which call
   2782 	 * ifp->if_start(). e.g. ALTQ
   2783 	 */
   2784 	/* Try to get more packets going. */
   2785 	ifp->if_start(ifp);
   2786 }
   2787 
   2788 static void
   2789 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
   2790 {
   2791 	struct wm_softc *sc = ifp->if_softc;
   2792 
   2793 	/*
   2794 	 * Since we're using delayed interrupts, sweep up
   2795 	 * before we report an error.
   2796 	 */
   2797 	mutex_enter(txq->txq_lock);
   2798 	wm_txeof(sc, txq);
   2799 	mutex_exit(txq->txq_lock);
   2800 
   2801 	if (txq->txq_free != WM_NTXDESC(txq)) {
   2802 #ifdef WM_DEBUG
   2803 		int i, j;
   2804 		struct wm_txsoft *txs;
   2805 #endif
   2806 		log(LOG_ERR,
   2807 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2808 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   2809 		    txq->txq_next);
   2810 		ifp->if_oerrors++;
   2811 #ifdef WM_DEBUG
   2812 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   2813 		    i = WM_NEXTTXS(txq, i)) {
   2814 		    txs = &txq->txq_soft[i];
   2815 		    printf("txs %d tx %d -> %d\n",
   2816 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2817 		    for (j = txs->txs_firstdesc; ;
   2818 			j = WM_NEXTTX(txq, j)) {
   2819 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2820 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   2821 			printf("\t %#08x%08x\n",
   2822 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   2823 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   2824 			if (j == txs->txs_lastdesc)
   2825 				break;
   2826 			}
   2827 		}
   2828 #endif
   2829 	}
   2830 }
   2831 
   2832 /*
   2833  * wm_tick:
   2834  *
   2835  *	One second timer, used to check link status, sweep up
   2836  *	completed transmit jobs, etc.
   2837  */
   2838 static void
   2839 wm_tick(void *arg)
   2840 {
   2841 	struct wm_softc *sc = arg;
   2842 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2843 #ifndef WM_MPSAFE
   2844 	int s = splnet();
   2845 #endif
   2846 
   2847 	WM_CORE_LOCK(sc);
   2848 
   2849 	if (sc->sc_core_stopping)
   2850 		goto out;
   2851 
   2852 	if (sc->sc_type >= WM_T_82542_2_1) {
   2853 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2854 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2855 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2856 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2857 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2858 	}
   2859 
   2860 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2861 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
   2862 	    + CSR_READ(sc, WMREG_CRCERRS)
   2863 	    + CSR_READ(sc, WMREG_ALGNERRC)
   2864 	    + CSR_READ(sc, WMREG_SYMERRC)
   2865 	    + CSR_READ(sc, WMREG_RXERRC)
   2866 	    + CSR_READ(sc, WMREG_SEC)
   2867 	    + CSR_READ(sc, WMREG_CEXTERR)
   2868 	    + CSR_READ(sc, WMREG_RLEC);
   2869 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
   2870 
   2871 	if (sc->sc_flags & WM_F_HAS_MII)
   2872 		mii_tick(&sc->sc_mii);
   2873 	else if ((sc->sc_type >= WM_T_82575)
   2874 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   2875 		wm_serdes_tick(sc);
   2876 	else
   2877 		wm_tbi_tick(sc);
   2878 
   2879 out:
   2880 	WM_CORE_UNLOCK(sc);
   2881 #ifndef WM_MPSAFE
   2882 	splx(s);
   2883 #endif
   2884 
   2885 	if (!sc->sc_core_stopping)
   2886 		callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2887 }
   2888 
   2889 static int
   2890 wm_ifflags_cb(struct ethercom *ec)
   2891 {
   2892 	struct ifnet *ifp = &ec->ec_if;
   2893 	struct wm_softc *sc = ifp->if_softc;
   2894 	int rc = 0;
   2895 
   2896 	WM_CORE_LOCK(sc);
   2897 
   2898 	int change = ifp->if_flags ^ sc->sc_if_flags;
   2899 	sc->sc_if_flags = ifp->if_flags;
   2900 
   2901 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
   2902 		rc = ENETRESET;
   2903 		goto out;
   2904 	}
   2905 
   2906 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   2907 		wm_set_filter(sc);
   2908 
   2909 	wm_set_vlan(sc);
   2910 
   2911 out:
   2912 	WM_CORE_UNLOCK(sc);
   2913 
   2914 	return rc;
   2915 }
   2916 
   2917 /*
   2918  * wm_ioctl:		[ifnet interface function]
   2919  *
   2920  *	Handle control requests from the operator.
   2921  */
   2922 static int
   2923 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   2924 {
   2925 	struct wm_softc *sc = ifp->if_softc;
   2926 	struct ifreq *ifr = (struct ifreq *) data;
   2927 	struct ifaddr *ifa = (struct ifaddr *)data;
   2928 	struct sockaddr_dl *sdl;
   2929 	int s, error;
   2930 
   2931 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   2932 		device_xname(sc->sc_dev), __func__));
   2933 
   2934 #ifndef WM_MPSAFE
   2935 	s = splnet();
   2936 #endif
   2937 	switch (cmd) {
   2938 	case SIOCSIFMEDIA:
   2939 	case SIOCGIFMEDIA:
   2940 		WM_CORE_LOCK(sc);
   2941 		/* Flow control requires full-duplex mode. */
   2942 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   2943 		    (ifr->ifr_media & IFM_FDX) == 0)
   2944 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   2945 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   2946 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   2947 				/* We can do both TXPAUSE and RXPAUSE. */
   2948 				ifr->ifr_media |=
   2949 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   2950 			}
   2951 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   2952 		}
   2953 		WM_CORE_UNLOCK(sc);
   2954 #ifdef WM_MPSAFE
   2955 		s = splnet();
   2956 #endif
   2957 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   2958 #ifdef WM_MPSAFE
   2959 		splx(s);
   2960 #endif
   2961 		break;
   2962 	case SIOCINITIFADDR:
   2963 		WM_CORE_LOCK(sc);
   2964 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   2965 			sdl = satosdl(ifp->if_dl->ifa_addr);
   2966 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   2967 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   2968 			/* unicast address is first multicast entry */
   2969 			wm_set_filter(sc);
   2970 			error = 0;
   2971 			WM_CORE_UNLOCK(sc);
   2972 			break;
   2973 		}
   2974 		WM_CORE_UNLOCK(sc);
   2975 		/*FALLTHROUGH*/
   2976 	default:
   2977 #ifdef WM_MPSAFE
   2978 		s = splnet();
   2979 #endif
   2980 		/* It may call wm_start, so unlock here */
   2981 		error = ether_ioctl(ifp, cmd, data);
   2982 #ifdef WM_MPSAFE
   2983 		splx(s);
   2984 #endif
   2985 		if (error != ENETRESET)
   2986 			break;
   2987 
   2988 		error = 0;
   2989 
   2990 		if (cmd == SIOCSIFCAP) {
   2991 			error = (*ifp->if_init)(ifp);
   2992 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   2993 			;
   2994 		else if (ifp->if_flags & IFF_RUNNING) {
   2995 			/*
   2996 			 * Multicast list has changed; set the hardware filter
   2997 			 * accordingly.
   2998 			 */
   2999 			WM_CORE_LOCK(sc);
   3000 			wm_set_filter(sc);
   3001 			WM_CORE_UNLOCK(sc);
   3002 		}
   3003 		break;
   3004 	}
   3005 
   3006 #ifndef WM_MPSAFE
   3007 	splx(s);
   3008 #endif
   3009 	return error;
   3010 }
   3011 
   3012 /* MAC address related */
   3013 
   3014 /*
   3015  * Get the offset of MAC address and return it.
   3016  * If error occured, use offset 0.
   3017  */
   3018 static uint16_t
   3019 wm_check_alt_mac_addr(struct wm_softc *sc)
   3020 {
   3021 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3022 	uint16_t offset = NVM_OFF_MACADDR;
   3023 
   3024 	/* Try to read alternative MAC address pointer */
   3025 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   3026 		return 0;
   3027 
   3028 	/* Check pointer if it's valid or not. */
   3029 	if ((offset == 0x0000) || (offset == 0xffff))
   3030 		return 0;
   3031 
   3032 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   3033 	/*
   3034 	 * Check whether alternative MAC address is valid or not.
   3035 	 * Some cards have non 0xffff pointer but those don't use
   3036 	 * alternative MAC address in reality.
   3037 	 *
   3038 	 * Check whether the broadcast bit is set or not.
   3039 	 */
   3040 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   3041 		if (((myea[0] & 0xff) & 0x01) == 0)
   3042 			return offset; /* Found */
   3043 
   3044 	/* Not found */
   3045 	return 0;
   3046 }
   3047 
   3048 static int
   3049 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   3050 {
   3051 	uint16_t myea[ETHER_ADDR_LEN / 2];
   3052 	uint16_t offset = NVM_OFF_MACADDR;
   3053 	int do_invert = 0;
   3054 
   3055 	switch (sc->sc_type) {
   3056 	case WM_T_82580:
   3057 	case WM_T_I350:
   3058 	case WM_T_I354:
   3059 		/* EEPROM Top Level Partitioning */
   3060 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   3061 		break;
   3062 	case WM_T_82571:
   3063 	case WM_T_82575:
   3064 	case WM_T_82576:
   3065 	case WM_T_80003:
   3066 	case WM_T_I210:
   3067 	case WM_T_I211:
   3068 		offset = wm_check_alt_mac_addr(sc);
   3069 		if (offset == 0)
   3070 			if ((sc->sc_funcid & 0x01) == 1)
   3071 				do_invert = 1;
   3072 		break;
   3073 	default:
   3074 		if ((sc->sc_funcid & 0x01) == 1)
   3075 			do_invert = 1;
   3076 		break;
   3077 	}
   3078 
   3079 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
   3080 		goto bad;
   3081 
   3082 	enaddr[0] = myea[0] & 0xff;
   3083 	enaddr[1] = myea[0] >> 8;
   3084 	enaddr[2] = myea[1] & 0xff;
   3085 	enaddr[3] = myea[1] >> 8;
   3086 	enaddr[4] = myea[2] & 0xff;
   3087 	enaddr[5] = myea[2] >> 8;
   3088 
   3089 	/*
   3090 	 * Toggle the LSB of the MAC address on the second port
   3091 	 * of some dual port cards.
   3092 	 */
   3093 	if (do_invert != 0)
   3094 		enaddr[5] ^= 1;
   3095 
   3096 	return 0;
   3097 
   3098  bad:
   3099 	return -1;
   3100 }
   3101 
   3102 /*
   3103  * wm_set_ral:
   3104  *
   3105  *	Set an entery in the receive address list.
   3106  */
   3107 static void
   3108 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   3109 {
   3110 	uint32_t ral_lo, ral_hi;
   3111 
   3112 	if (enaddr != NULL) {
   3113 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   3114 		    (enaddr[3] << 24);
   3115 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   3116 		ral_hi |= RAL_AV;
   3117 	} else {
   3118 		ral_lo = 0;
   3119 		ral_hi = 0;
   3120 	}
   3121 
   3122 	if (sc->sc_type >= WM_T_82544) {
   3123 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
   3124 		    ral_lo);
   3125 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
   3126 		    ral_hi);
   3127 	} else {
   3128 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
   3129 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
   3130 	}
   3131 }
   3132 
   3133 /*
   3134  * wm_mchash:
   3135  *
   3136  *	Compute the hash of the multicast address for the 4096-bit
   3137  *	multicast filter.
   3138  */
   3139 static uint32_t
   3140 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   3141 {
   3142 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   3143 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   3144 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3145 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3146 	uint32_t hash;
   3147 
   3148 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3149 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3150 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3151 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   3152 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3153 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3154 		return (hash & 0x3ff);
   3155 	}
   3156 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3157 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3158 
   3159 	return (hash & 0xfff);
   3160 }
   3161 
   3162 /*
   3163  * wm_set_filter:
   3164  *
   3165  *	Set up the receive filter.
   3166  */
   3167 static void
   3168 wm_set_filter(struct wm_softc *sc)
   3169 {
   3170 	struct ethercom *ec = &sc->sc_ethercom;
   3171 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3172 	struct ether_multi *enm;
   3173 	struct ether_multistep step;
   3174 	bus_addr_t mta_reg;
   3175 	uint32_t hash, reg, bit;
   3176 	int i, size, ralmax;
   3177 
   3178 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3179 		device_xname(sc->sc_dev), __func__));
   3180 
   3181 	if (sc->sc_type >= WM_T_82544)
   3182 		mta_reg = WMREG_CORDOVA_MTA;
   3183 	else
   3184 		mta_reg = WMREG_MTA;
   3185 
   3186 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3187 
   3188 	if (ifp->if_flags & IFF_BROADCAST)
   3189 		sc->sc_rctl |= RCTL_BAM;
   3190 	if (ifp->if_flags & IFF_PROMISC) {
   3191 		sc->sc_rctl |= RCTL_UPE;
   3192 		goto allmulti;
   3193 	}
   3194 
   3195 	/*
   3196 	 * Set the station address in the first RAL slot, and
   3197 	 * clear the remaining slots.
   3198 	 */
   3199 	if (sc->sc_type == WM_T_ICH8)
   3200 		size = WM_RAL_TABSIZE_ICH8 -1;
   3201 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3202 	    || (sc->sc_type == WM_T_PCH))
   3203 		size = WM_RAL_TABSIZE_ICH8;
   3204 	else if (sc->sc_type == WM_T_PCH2)
   3205 		size = WM_RAL_TABSIZE_PCH2;
   3206 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
   3207 		size = WM_RAL_TABSIZE_PCH_LPT;
   3208 	else if (sc->sc_type == WM_T_82575)
   3209 		size = WM_RAL_TABSIZE_82575;
   3210 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3211 		size = WM_RAL_TABSIZE_82576;
   3212 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3213 		size = WM_RAL_TABSIZE_I350;
   3214 	else
   3215 		size = WM_RAL_TABSIZE;
   3216 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3217 
   3218 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
   3219 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
   3220 		switch (i) {
   3221 		case 0:
   3222 			/* We can use all entries */
   3223 			ralmax = size;
   3224 			break;
   3225 		case 1:
   3226 			/* Only RAR[0] */
   3227 			ralmax = 1;
   3228 			break;
   3229 		default:
   3230 			/* available SHRA + RAR[0] */
   3231 			ralmax = i + 1;
   3232 		}
   3233 	} else
   3234 		ralmax = size;
   3235 	for (i = 1; i < size; i++) {
   3236 		if (i < ralmax)
   3237 			wm_set_ral(sc, NULL, i);
   3238 	}
   3239 
   3240 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3241 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3242 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   3243 	    || (sc->sc_type == WM_T_PCH_SPT))
   3244 		size = WM_ICH8_MC_TABSIZE;
   3245 	else
   3246 		size = WM_MC_TABSIZE;
   3247 	/* Clear out the multicast table. */
   3248 	for (i = 0; i < size; i++)
   3249 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3250 
   3251 	ETHER_FIRST_MULTI(step, ec, enm);
   3252 	while (enm != NULL) {
   3253 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3254 			/*
   3255 			 * We must listen to a range of multicast addresses.
   3256 			 * For now, just accept all multicasts, rather than
   3257 			 * trying to set only those filter bits needed to match
   3258 			 * the range.  (At this time, the only use of address
   3259 			 * ranges is for IP multicast routing, for which the
   3260 			 * range is big enough to require all bits set.)
   3261 			 */
   3262 			goto allmulti;
   3263 		}
   3264 
   3265 		hash = wm_mchash(sc, enm->enm_addrlo);
   3266 
   3267 		reg = (hash >> 5);
   3268 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3269 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3270 		    || (sc->sc_type == WM_T_PCH2)
   3271 		    || (sc->sc_type == WM_T_PCH_LPT)
   3272 		    || (sc->sc_type == WM_T_PCH_SPT))
   3273 			reg &= 0x1f;
   3274 		else
   3275 			reg &= 0x7f;
   3276 		bit = hash & 0x1f;
   3277 
   3278 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3279 		hash |= 1U << bit;
   3280 
   3281 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3282 			/*
   3283 			 * 82544 Errata 9: Certain register cannot be written
   3284 			 * with particular alignments in PCI-X bus operation
   3285 			 * (FCAH, MTA and VFTA).
   3286 			 */
   3287 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3288 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3289 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3290 		} else
   3291 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3292 
   3293 		ETHER_NEXT_MULTI(step, enm);
   3294 	}
   3295 
   3296 	ifp->if_flags &= ~IFF_ALLMULTI;
   3297 	goto setit;
   3298 
   3299  allmulti:
   3300 	ifp->if_flags |= IFF_ALLMULTI;
   3301 	sc->sc_rctl |= RCTL_MPE;
   3302 
   3303  setit:
   3304 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3305 }
   3306 
   3307 /* Reset and init related */
   3308 
   3309 static void
   3310 wm_set_vlan(struct wm_softc *sc)
   3311 {
   3312 
   3313 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3314 		device_xname(sc->sc_dev), __func__));
   3315 
   3316 	/* Deal with VLAN enables. */
   3317 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3318 		sc->sc_ctrl |= CTRL_VME;
   3319 	else
   3320 		sc->sc_ctrl &= ~CTRL_VME;
   3321 
   3322 	/* Write the control registers. */
   3323 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3324 }
   3325 
   3326 static void
   3327 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3328 {
   3329 	uint32_t gcr;
   3330 	pcireg_t ctrl2;
   3331 
   3332 	gcr = CSR_READ(sc, WMREG_GCR);
   3333 
   3334 	/* Only take action if timeout value is defaulted to 0 */
   3335 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3336 		goto out;
   3337 
   3338 	if ((gcr & GCR_CAP_VER2) == 0) {
   3339 		gcr |= GCR_CMPL_TMOUT_10MS;
   3340 		goto out;
   3341 	}
   3342 
   3343 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3344 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3345 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3346 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3347 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3348 
   3349 out:
   3350 	/* Disable completion timeout resend */
   3351 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3352 
   3353 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3354 }
   3355 
   3356 void
   3357 wm_get_auto_rd_done(struct wm_softc *sc)
   3358 {
   3359 	int i;
   3360 
   3361 	/* wait for eeprom to reload */
   3362 	switch (sc->sc_type) {
   3363 	case WM_T_82571:
   3364 	case WM_T_82572:
   3365 	case WM_T_82573:
   3366 	case WM_T_82574:
   3367 	case WM_T_82583:
   3368 	case WM_T_82575:
   3369 	case WM_T_82576:
   3370 	case WM_T_82580:
   3371 	case WM_T_I350:
   3372 	case WM_T_I354:
   3373 	case WM_T_I210:
   3374 	case WM_T_I211:
   3375 	case WM_T_80003:
   3376 	case WM_T_ICH8:
   3377 	case WM_T_ICH9:
   3378 		for (i = 0; i < 10; i++) {
   3379 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3380 				break;
   3381 			delay(1000);
   3382 		}
   3383 		if (i == 10) {
   3384 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3385 			    "complete\n", device_xname(sc->sc_dev));
   3386 		}
   3387 		break;
   3388 	default:
   3389 		break;
   3390 	}
   3391 }
   3392 
   3393 void
   3394 wm_lan_init_done(struct wm_softc *sc)
   3395 {
   3396 	uint32_t reg = 0;
   3397 	int i;
   3398 
   3399 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3400 		device_xname(sc->sc_dev), __func__));
   3401 
   3402 	/* Wait for eeprom to reload */
   3403 	switch (sc->sc_type) {
   3404 	case WM_T_ICH10:
   3405 	case WM_T_PCH:
   3406 	case WM_T_PCH2:
   3407 	case WM_T_PCH_LPT:
   3408 	case WM_T_PCH_SPT:
   3409 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3410 			reg = CSR_READ(sc, WMREG_STATUS);
   3411 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3412 				break;
   3413 			delay(100);
   3414 		}
   3415 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3416 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3417 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3418 		}
   3419 		break;
   3420 	default:
   3421 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3422 		    __func__);
   3423 		break;
   3424 	}
   3425 
   3426 	reg &= ~STATUS_LAN_INIT_DONE;
   3427 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3428 }
   3429 
   3430 void
   3431 wm_get_cfg_done(struct wm_softc *sc)
   3432 {
   3433 	int mask;
   3434 	uint32_t reg;
   3435 	int i;
   3436 
   3437 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3438 		device_xname(sc->sc_dev), __func__));
   3439 
   3440 	/* Wait for eeprom to reload */
   3441 	switch (sc->sc_type) {
   3442 	case WM_T_82542_2_0:
   3443 	case WM_T_82542_2_1:
   3444 		/* null */
   3445 		break;
   3446 	case WM_T_82543:
   3447 	case WM_T_82544:
   3448 	case WM_T_82540:
   3449 	case WM_T_82545:
   3450 	case WM_T_82545_3:
   3451 	case WM_T_82546:
   3452 	case WM_T_82546_3:
   3453 	case WM_T_82541:
   3454 	case WM_T_82541_2:
   3455 	case WM_T_82547:
   3456 	case WM_T_82547_2:
   3457 	case WM_T_82573:
   3458 	case WM_T_82574:
   3459 	case WM_T_82583:
   3460 		/* generic */
   3461 		delay(10*1000);
   3462 		break;
   3463 	case WM_T_80003:
   3464 	case WM_T_82571:
   3465 	case WM_T_82572:
   3466 	case WM_T_82575:
   3467 	case WM_T_82576:
   3468 	case WM_T_82580:
   3469 	case WM_T_I350:
   3470 	case WM_T_I354:
   3471 	case WM_T_I210:
   3472 	case WM_T_I211:
   3473 		if (sc->sc_type == WM_T_82571) {
   3474 			/* Only 82571 shares port 0 */
   3475 			mask = EEMNGCTL_CFGDONE_0;
   3476 		} else
   3477 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3478 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3479 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3480 				break;
   3481 			delay(1000);
   3482 		}
   3483 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3484 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3485 				device_xname(sc->sc_dev), __func__));
   3486 		}
   3487 		break;
   3488 	case WM_T_ICH8:
   3489 	case WM_T_ICH9:
   3490 	case WM_T_ICH10:
   3491 	case WM_T_PCH:
   3492 	case WM_T_PCH2:
   3493 	case WM_T_PCH_LPT:
   3494 	case WM_T_PCH_SPT:
   3495 		delay(10*1000);
   3496 		if (sc->sc_type >= WM_T_ICH10)
   3497 			wm_lan_init_done(sc);
   3498 		else
   3499 			wm_get_auto_rd_done(sc);
   3500 
   3501 		reg = CSR_READ(sc, WMREG_STATUS);
   3502 		if ((reg & STATUS_PHYRA) != 0)
   3503 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3504 		break;
   3505 	default:
   3506 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3507 		    __func__);
   3508 		break;
   3509 	}
   3510 }
   3511 
   3512 /* Init hardware bits */
   3513 void
   3514 wm_initialize_hardware_bits(struct wm_softc *sc)
   3515 {
   3516 	uint32_t tarc0, tarc1, reg;
   3517 
   3518 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3519 		device_xname(sc->sc_dev), __func__));
   3520 
   3521 	/* For 82571 variant, 80003 and ICHs */
   3522 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3523 	    || (sc->sc_type >= WM_T_80003)) {
   3524 
   3525 		/* Transmit Descriptor Control 0 */
   3526 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3527 		reg |= TXDCTL_COUNT_DESC;
   3528 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3529 
   3530 		/* Transmit Descriptor Control 1 */
   3531 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3532 		reg |= TXDCTL_COUNT_DESC;
   3533 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3534 
   3535 		/* TARC0 */
   3536 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3537 		switch (sc->sc_type) {
   3538 		case WM_T_82571:
   3539 		case WM_T_82572:
   3540 		case WM_T_82573:
   3541 		case WM_T_82574:
   3542 		case WM_T_82583:
   3543 		case WM_T_80003:
   3544 			/* Clear bits 30..27 */
   3545 			tarc0 &= ~__BITS(30, 27);
   3546 			break;
   3547 		default:
   3548 			break;
   3549 		}
   3550 
   3551 		switch (sc->sc_type) {
   3552 		case WM_T_82571:
   3553 		case WM_T_82572:
   3554 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3555 
   3556 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3557 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3558 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3559 			/* 8257[12] Errata No.7 */
   3560 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3561 
   3562 			/* TARC1 bit 28 */
   3563 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3564 				tarc1 &= ~__BIT(28);
   3565 			else
   3566 				tarc1 |= __BIT(28);
   3567 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3568 
   3569 			/*
   3570 			 * 8257[12] Errata No.13
   3571 			 * Disable Dyamic Clock Gating.
   3572 			 */
   3573 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3574 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3575 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3576 			break;
   3577 		case WM_T_82573:
   3578 		case WM_T_82574:
   3579 		case WM_T_82583:
   3580 			if ((sc->sc_type == WM_T_82574)
   3581 			    || (sc->sc_type == WM_T_82583))
   3582 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3583 
   3584 			/* Extended Device Control */
   3585 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3586 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3587 			reg |= __BIT(22);	/* Set bit 22 */
   3588 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3589 
   3590 			/* Device Control */
   3591 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   3592 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3593 
   3594 			/* PCIe Control Register */
   3595 			/*
   3596 			 * 82573 Errata (unknown).
   3597 			 *
   3598 			 * 82574 Errata 25 and 82583 Errata 12
   3599 			 * "Dropped Rx Packets":
   3600 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   3601 			 */
   3602 			reg = CSR_READ(sc, WMREG_GCR);
   3603 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   3604 			CSR_WRITE(sc, WMREG_GCR, reg);
   3605 
   3606 			if ((sc->sc_type == WM_T_82574)
   3607 			    || (sc->sc_type == WM_T_82583)) {
   3608 				/*
   3609 				 * Document says this bit must be set for
   3610 				 * proper operation.
   3611 				 */
   3612 				reg = CSR_READ(sc, WMREG_GCR);
   3613 				reg |= __BIT(22);
   3614 				CSR_WRITE(sc, WMREG_GCR, reg);
   3615 
   3616 				/*
   3617 				 * Apply workaround for hardware errata
   3618 				 * documented in errata docs Fixes issue where
   3619 				 * some error prone or unreliable PCIe
   3620 				 * completions are occurring, particularly
   3621 				 * with ASPM enabled. Without fix, issue can
   3622 				 * cause Tx timeouts.
   3623 				 */
   3624 				reg = CSR_READ(sc, WMREG_GCR2);
   3625 				reg |= __BIT(0);
   3626 				CSR_WRITE(sc, WMREG_GCR2, reg);
   3627 			}
   3628 			break;
   3629 		case WM_T_80003:
   3630 			/* TARC0 */
   3631 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   3632 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3633 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   3634 
   3635 			/* TARC1 bit 28 */
   3636 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3637 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3638 				tarc1 &= ~__BIT(28);
   3639 			else
   3640 				tarc1 |= __BIT(28);
   3641 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3642 			break;
   3643 		case WM_T_ICH8:
   3644 		case WM_T_ICH9:
   3645 		case WM_T_ICH10:
   3646 		case WM_T_PCH:
   3647 		case WM_T_PCH2:
   3648 		case WM_T_PCH_LPT:
   3649 		case WM_T_PCH_SPT:
   3650 			/* TARC0 */
   3651 			if ((sc->sc_type == WM_T_ICH8)
   3652 			    || (sc->sc_type == WM_T_PCH_SPT)) {
   3653 				/* Set TARC0 bits 29 and 28 */
   3654 				tarc0 |= __BITS(29, 28);
   3655 			}
   3656 			/* Set TARC0 bits 23,24,26,27 */
   3657 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   3658 
   3659 			/* CTRL_EXT */
   3660 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3661 			reg |= __BIT(22);	/* Set bit 22 */
   3662 			/*
   3663 			 * Enable PHY low-power state when MAC is at D3
   3664 			 * w/o WoL
   3665 			 */
   3666 			if (sc->sc_type >= WM_T_PCH)
   3667 				reg |= CTRL_EXT_PHYPDEN;
   3668 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3669 
   3670 			/* TARC1 */
   3671 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3672 			/* bit 28 */
   3673 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3674 				tarc1 &= ~__BIT(28);
   3675 			else
   3676 				tarc1 |= __BIT(28);
   3677 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   3678 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3679 
   3680 			/* Device Status */
   3681 			if (sc->sc_type == WM_T_ICH8) {
   3682 				reg = CSR_READ(sc, WMREG_STATUS);
   3683 				reg &= ~__BIT(31);
   3684 				CSR_WRITE(sc, WMREG_STATUS, reg);
   3685 
   3686 			}
   3687 
   3688 			/* IOSFPC */
   3689 			if (sc->sc_type == WM_T_PCH_SPT) {
   3690 				reg = CSR_READ(sc, WMREG_IOSFPC);
   3691 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
   3692 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
   3693 			}
   3694 			/*
   3695 			 * Work-around descriptor data corruption issue during
   3696 			 * NFS v2 UDP traffic, just disable the NFS filtering
   3697 			 * capability.
   3698 			 */
   3699 			reg = CSR_READ(sc, WMREG_RFCTL);
   3700 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   3701 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3702 			break;
   3703 		default:
   3704 			break;
   3705 		}
   3706 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   3707 
   3708 		/*
   3709 		 * 8257[12] Errata No.52 and some others.
   3710 		 * Avoid RSS Hash Value bug.
   3711 		 */
   3712 		switch (sc->sc_type) {
   3713 		case WM_T_82571:
   3714 		case WM_T_82572:
   3715 		case WM_T_82573:
   3716 		case WM_T_80003:
   3717 		case WM_T_ICH8:
   3718 			reg = CSR_READ(sc, WMREG_RFCTL);
   3719 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   3720 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3721 			break;
   3722 		default:
   3723 			break;
   3724 		}
   3725 	}
   3726 }
   3727 
   3728 static uint32_t
   3729 wm_rxpbs_adjust_82580(uint32_t val)
   3730 {
   3731 	uint32_t rv = 0;
   3732 
   3733 	if (val < __arraycount(wm_82580_rxpbs_table))
   3734 		rv = wm_82580_rxpbs_table[val];
   3735 
   3736 	return rv;
   3737 }
   3738 
   3739 /*
   3740  * wm_reset:
   3741  *
   3742  *	Reset the i82542 chip.
   3743  */
   3744 static void
   3745 wm_reset(struct wm_softc *sc)
   3746 {
   3747 	int phy_reset = 0;
   3748 	int i, error = 0;
   3749 	uint32_t reg;
   3750 
   3751 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   3752 		device_xname(sc->sc_dev), __func__));
   3753 	KASSERT(sc->sc_type != 0);
   3754 
   3755 	/*
   3756 	 * Allocate on-chip memory according to the MTU size.
   3757 	 * The Packet Buffer Allocation register must be written
   3758 	 * before the chip is reset.
   3759 	 */
   3760 	switch (sc->sc_type) {
   3761 	case WM_T_82547:
   3762 	case WM_T_82547_2:
   3763 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3764 		    PBA_22K : PBA_30K;
   3765 		for (i = 0; i < sc->sc_nqueues; i++) {
   3766 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   3767 			txq->txq_fifo_head = 0;
   3768 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   3769 			txq->txq_fifo_size =
   3770 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   3771 			txq->txq_fifo_stall = 0;
   3772 		}
   3773 		break;
   3774 	case WM_T_82571:
   3775 	case WM_T_82572:
   3776 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   3777 	case WM_T_80003:
   3778 		sc->sc_pba = PBA_32K;
   3779 		break;
   3780 	case WM_T_82573:
   3781 		sc->sc_pba = PBA_12K;
   3782 		break;
   3783 	case WM_T_82574:
   3784 	case WM_T_82583:
   3785 		sc->sc_pba = PBA_20K;
   3786 		break;
   3787 	case WM_T_82576:
   3788 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   3789 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   3790 		break;
   3791 	case WM_T_82580:
   3792 	case WM_T_I350:
   3793 	case WM_T_I354:
   3794 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   3795 		break;
   3796 	case WM_T_I210:
   3797 	case WM_T_I211:
   3798 		sc->sc_pba = PBA_34K;
   3799 		break;
   3800 	case WM_T_ICH8:
   3801 		/* Workaround for a bit corruption issue in FIFO memory */
   3802 		sc->sc_pba = PBA_8K;
   3803 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   3804 		break;
   3805 	case WM_T_ICH9:
   3806 	case WM_T_ICH10:
   3807 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   3808 		    PBA_14K : PBA_10K;
   3809 		break;
   3810 	case WM_T_PCH:
   3811 	case WM_T_PCH2:
   3812 	case WM_T_PCH_LPT:
   3813 	case WM_T_PCH_SPT:
   3814 		sc->sc_pba = PBA_26K;
   3815 		break;
   3816 	default:
   3817 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3818 		    PBA_40K : PBA_48K;
   3819 		break;
   3820 	}
   3821 	/*
   3822 	 * Only old or non-multiqueue devices have the PBA register
   3823 	 * XXX Need special handling for 82575.
   3824 	 */
   3825 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   3826 	    || (sc->sc_type == WM_T_82575))
   3827 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   3828 
   3829 	/* Prevent the PCI-E bus from sticking */
   3830 	if (sc->sc_flags & WM_F_PCIE) {
   3831 		int timeout = 800;
   3832 
   3833 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   3834 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3835 
   3836 		while (timeout--) {
   3837 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   3838 			    == 0)
   3839 				break;
   3840 			delay(100);
   3841 		}
   3842 	}
   3843 
   3844 	/* Set the completion timeout for interface */
   3845 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   3846 	    || (sc->sc_type == WM_T_82580)
   3847 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   3848 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   3849 		wm_set_pcie_completion_timeout(sc);
   3850 
   3851 	/* Clear interrupt */
   3852 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   3853 	if (sc->sc_nintrs > 1) {
   3854 		if (sc->sc_type != WM_T_82574) {
   3855 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   3856 			CSR_WRITE(sc, WMREG_EIAC, 0);
   3857 		} else {
   3858 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   3859 		}
   3860 	}
   3861 
   3862 	/* Stop the transmit and receive processes. */
   3863 	CSR_WRITE(sc, WMREG_RCTL, 0);
   3864 	sc->sc_rctl &= ~RCTL_EN;
   3865 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   3866 	CSR_WRITE_FLUSH(sc);
   3867 
   3868 	/* XXX set_tbi_sbp_82543() */
   3869 
   3870 	delay(10*1000);
   3871 
   3872 	/* Must acquire the MDIO ownership before MAC reset */
   3873 	switch (sc->sc_type) {
   3874 	case WM_T_82573:
   3875 	case WM_T_82574:
   3876 	case WM_T_82583:
   3877 		error = wm_get_hw_semaphore_82573(sc);
   3878 		break;
   3879 	default:
   3880 		break;
   3881 	}
   3882 
   3883 	/*
   3884 	 * 82541 Errata 29? & 82547 Errata 28?
   3885 	 * See also the description about PHY_RST bit in CTRL register
   3886 	 * in 8254x_GBe_SDM.pdf.
   3887 	 */
   3888 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   3889 		CSR_WRITE(sc, WMREG_CTRL,
   3890 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   3891 		CSR_WRITE_FLUSH(sc);
   3892 		delay(5000);
   3893 	}
   3894 
   3895 	switch (sc->sc_type) {
   3896 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   3897 	case WM_T_82541:
   3898 	case WM_T_82541_2:
   3899 	case WM_T_82547:
   3900 	case WM_T_82547_2:
   3901 		/*
   3902 		 * On some chipsets, a reset through a memory-mapped write
   3903 		 * cycle can cause the chip to reset before completing the
   3904 		 * write cycle.  This causes major headache that can be
   3905 		 * avoided by issuing the reset via indirect register writes
   3906 		 * through I/O space.
   3907 		 *
   3908 		 * So, if we successfully mapped the I/O BAR at attach time,
   3909 		 * use that.  Otherwise, try our luck with a memory-mapped
   3910 		 * reset.
   3911 		 */
   3912 		if (sc->sc_flags & WM_F_IOH_VALID)
   3913 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   3914 		else
   3915 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   3916 		break;
   3917 	case WM_T_82545_3:
   3918 	case WM_T_82546_3:
   3919 		/* Use the shadow control register on these chips. */
   3920 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   3921 		break;
   3922 	case WM_T_80003:
   3923 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   3924 		sc->phy.acquire(sc);
   3925 		CSR_WRITE(sc, WMREG_CTRL, reg);
   3926 		sc->phy.release(sc);
   3927 		break;
   3928 	case WM_T_ICH8:
   3929 	case WM_T_ICH9:
   3930 	case WM_T_ICH10:
   3931 	case WM_T_PCH:
   3932 	case WM_T_PCH2:
   3933 	case WM_T_PCH_LPT:
   3934 	case WM_T_PCH_SPT:
   3935 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   3936 		if (wm_phy_resetisblocked(sc) == false) {
   3937 			/*
   3938 			 * Gate automatic PHY configuration by hardware on
   3939 			 * non-managed 82579
   3940 			 */
   3941 			if ((sc->sc_type == WM_T_PCH2)
   3942 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   3943 				== 0))
   3944 				wm_gate_hw_phy_config_ich8lan(sc, true);
   3945 
   3946 			reg |= CTRL_PHY_RESET;
   3947 			phy_reset = 1;
   3948 		} else
   3949 			printf("XXX reset is blocked!!!\n");
   3950 		sc->phy.acquire(sc);
   3951 		CSR_WRITE(sc, WMREG_CTRL, reg);
   3952 		/* Don't insert a completion barrier when reset */
   3953 		delay(20*1000);
   3954 		mutex_exit(sc->sc_ich_phymtx);
   3955 		break;
   3956 	case WM_T_82580:
   3957 	case WM_T_I350:
   3958 	case WM_T_I354:
   3959 	case WM_T_I210:
   3960 	case WM_T_I211:
   3961 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   3962 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   3963 			CSR_WRITE_FLUSH(sc);
   3964 		delay(5000);
   3965 		break;
   3966 	case WM_T_82542_2_0:
   3967 	case WM_T_82542_2_1:
   3968 	case WM_T_82543:
   3969 	case WM_T_82540:
   3970 	case WM_T_82545:
   3971 	case WM_T_82546:
   3972 	case WM_T_82571:
   3973 	case WM_T_82572:
   3974 	case WM_T_82573:
   3975 	case WM_T_82574:
   3976 	case WM_T_82575:
   3977 	case WM_T_82576:
   3978 	case WM_T_82583:
   3979 	default:
   3980 		/* Everything else can safely use the documented method. */
   3981 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   3982 		break;
   3983 	}
   3984 
   3985 	/* Must release the MDIO ownership after MAC reset */
   3986 	switch (sc->sc_type) {
   3987 	case WM_T_82573:
   3988 	case WM_T_82574:
   3989 	case WM_T_82583:
   3990 		if (error == 0)
   3991 			wm_put_hw_semaphore_82573(sc);
   3992 		break;
   3993 	default:
   3994 		break;
   3995 	}
   3996 
   3997 	if (phy_reset != 0) {
   3998 		wm_get_cfg_done(sc);
   3999 		delay(10 * 1000);
   4000 		if (sc->sc_type >= WM_T_PCH) {
   4001 			reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
   4002 			    BM_PORT_GEN_CFG);
   4003 			reg &= ~BM_WUC_HOST_WU_BIT;
   4004 			wm_gmii_hv_writereg(sc->sc_dev, 2,
   4005 			    BM_PORT_GEN_CFG, reg);
   4006 		}
   4007 	}
   4008 
   4009 	/* reload EEPROM */
   4010 	switch (sc->sc_type) {
   4011 	case WM_T_82542_2_0:
   4012 	case WM_T_82542_2_1:
   4013 	case WM_T_82543:
   4014 	case WM_T_82544:
   4015 		delay(10);
   4016 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4017 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4018 		CSR_WRITE_FLUSH(sc);
   4019 		delay(2000);
   4020 		break;
   4021 	case WM_T_82540:
   4022 	case WM_T_82545:
   4023 	case WM_T_82545_3:
   4024 	case WM_T_82546:
   4025 	case WM_T_82546_3:
   4026 		delay(5*1000);
   4027 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4028 		break;
   4029 	case WM_T_82541:
   4030 	case WM_T_82541_2:
   4031 	case WM_T_82547:
   4032 	case WM_T_82547_2:
   4033 		delay(20000);
   4034 		/* XXX Disable HW ARPs on ASF enabled adapters */
   4035 		break;
   4036 	case WM_T_82571:
   4037 	case WM_T_82572:
   4038 	case WM_T_82573:
   4039 	case WM_T_82574:
   4040 	case WM_T_82583:
   4041 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   4042 			delay(10);
   4043 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   4044 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4045 			CSR_WRITE_FLUSH(sc);
   4046 		}
   4047 		/* check EECD_EE_AUTORD */
   4048 		wm_get_auto_rd_done(sc);
   4049 		/*
   4050 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   4051 		 * is set.
   4052 		 */
   4053 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   4054 		    || (sc->sc_type == WM_T_82583))
   4055 			delay(25*1000);
   4056 		break;
   4057 	case WM_T_82575:
   4058 	case WM_T_82576:
   4059 	case WM_T_82580:
   4060 	case WM_T_I350:
   4061 	case WM_T_I354:
   4062 	case WM_T_I210:
   4063 	case WM_T_I211:
   4064 	case WM_T_80003:
   4065 		/* check EECD_EE_AUTORD */
   4066 		wm_get_auto_rd_done(sc);
   4067 		break;
   4068 	case WM_T_ICH8:
   4069 	case WM_T_ICH9:
   4070 	case WM_T_ICH10:
   4071 	case WM_T_PCH:
   4072 	case WM_T_PCH2:
   4073 	case WM_T_PCH_LPT:
   4074 	case WM_T_PCH_SPT:
   4075 		break;
   4076 	default:
   4077 		panic("%s: unknown type\n", __func__);
   4078 	}
   4079 
   4080 	/* Check whether EEPROM is present or not */
   4081 	switch (sc->sc_type) {
   4082 	case WM_T_82575:
   4083 	case WM_T_82576:
   4084 	case WM_T_82580:
   4085 	case WM_T_I350:
   4086 	case WM_T_I354:
   4087 	case WM_T_ICH8:
   4088 	case WM_T_ICH9:
   4089 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   4090 			/* Not found */
   4091 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   4092 			if (sc->sc_type == WM_T_82575)
   4093 				wm_reset_init_script_82575(sc);
   4094 		}
   4095 		break;
   4096 	default:
   4097 		break;
   4098 	}
   4099 
   4100 	if ((sc->sc_type == WM_T_82580)
   4101 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   4102 		/* clear global device reset status bit */
   4103 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   4104 	}
   4105 
   4106 	/* Clear any pending interrupt events. */
   4107 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4108 	reg = CSR_READ(sc, WMREG_ICR);
   4109 	if (sc->sc_nintrs > 1) {
   4110 		if (sc->sc_type != WM_T_82574) {
   4111 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   4112 			CSR_WRITE(sc, WMREG_EIAC, 0);
   4113 		} else
   4114 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   4115 	}
   4116 
   4117 	/* reload sc_ctrl */
   4118 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   4119 
   4120 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   4121 		wm_set_eee_i350(sc);
   4122 
   4123 	/* dummy read from WUC */
   4124 	if (sc->sc_type == WM_T_PCH)
   4125 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
   4126 	/*
   4127 	 * For PCH, this write will make sure that any noise will be detected
   4128 	 * as a CRC error and be dropped rather than show up as a bad packet
   4129 	 * to the DMA engine
   4130 	 */
   4131 	if (sc->sc_type == WM_T_PCH)
   4132 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   4133 
   4134 	if (sc->sc_type >= WM_T_82544)
   4135 		CSR_WRITE(sc, WMREG_WUC, 0);
   4136 
   4137 	wm_reset_mdicnfg_82580(sc);
   4138 
   4139 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   4140 		wm_pll_workaround_i210(sc);
   4141 }
   4142 
   4143 /*
   4144  * wm_add_rxbuf:
   4145  *
   4146  *	Add a receive buffer to the indiciated descriptor.
   4147  */
   4148 static int
   4149 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   4150 {
   4151 	struct wm_softc *sc = rxq->rxq_sc;
   4152 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   4153 	struct mbuf *m;
   4154 	int error;
   4155 
   4156 	KASSERT(mutex_owned(rxq->rxq_lock));
   4157 
   4158 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   4159 	if (m == NULL)
   4160 		return ENOBUFS;
   4161 
   4162 	MCLGET(m, M_DONTWAIT);
   4163 	if ((m->m_flags & M_EXT) == 0) {
   4164 		m_freem(m);
   4165 		return ENOBUFS;
   4166 	}
   4167 
   4168 	if (rxs->rxs_mbuf != NULL)
   4169 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4170 
   4171 	rxs->rxs_mbuf = m;
   4172 
   4173 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   4174 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   4175 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
   4176 	if (error) {
   4177 		/* XXX XXX XXX */
   4178 		aprint_error_dev(sc->sc_dev,
   4179 		    "unable to load rx DMA map %d, error = %d\n",
   4180 		    idx, error);
   4181 		panic("wm_add_rxbuf");
   4182 	}
   4183 
   4184 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   4185 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   4186 
   4187 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4188 		if ((sc->sc_rctl & RCTL_EN) != 0)
   4189 			wm_init_rxdesc(rxq, idx);
   4190 	} else
   4191 		wm_init_rxdesc(rxq, idx);
   4192 
   4193 	return 0;
   4194 }
   4195 
   4196 /*
   4197  * wm_rxdrain:
   4198  *
   4199  *	Drain the receive queue.
   4200  */
   4201 static void
   4202 wm_rxdrain(struct wm_rxqueue *rxq)
   4203 {
   4204 	struct wm_softc *sc = rxq->rxq_sc;
   4205 	struct wm_rxsoft *rxs;
   4206 	int i;
   4207 
   4208 	KASSERT(mutex_owned(rxq->rxq_lock));
   4209 
   4210 	for (i = 0; i < WM_NRXDESC; i++) {
   4211 		rxs = &rxq->rxq_soft[i];
   4212 		if (rxs->rxs_mbuf != NULL) {
   4213 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   4214 			m_freem(rxs->rxs_mbuf);
   4215 			rxs->rxs_mbuf = NULL;
   4216 		}
   4217 	}
   4218 }
   4219 
   4220 
   4221 /*
   4222  * XXX copy from FreeBSD's sys/net/rss_config.c
   4223  */
   4224 /*
   4225  * RSS secret key, intended to prevent attacks on load-balancing.  Its
   4226  * effectiveness may be limited by algorithm choice and available entropy
   4227  * during the boot.
   4228  *
   4229  * XXXRW: And that we don't randomize it yet!
   4230  *
   4231  * This is the default Microsoft RSS specification key which is also
   4232  * the Chelsio T5 firmware default key.
   4233  */
   4234 #define RSS_KEYSIZE 40
   4235 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
   4236 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
   4237 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
   4238 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
   4239 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
   4240 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
   4241 };
   4242 
   4243 /*
   4244  * Caller must pass an array of size sizeof(rss_key).
   4245  *
   4246  * XXX
   4247  * As if_ixgbe may use this function, this function should not be
   4248  * if_wm specific function.
   4249  */
   4250 static void
   4251 wm_rss_getkey(uint8_t *key)
   4252 {
   4253 
   4254 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
   4255 }
   4256 
   4257 /*
   4258  * Setup registers for RSS.
   4259  *
   4260  * XXX not yet VMDq support
   4261  */
   4262 static void
   4263 wm_init_rss(struct wm_softc *sc)
   4264 {
   4265 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4266 	int i;
   4267 
   4268 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
   4269 
   4270 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4271 		int qid, reta_ent;
   4272 
   4273 		qid  = i % sc->sc_nqueues;
   4274 		switch(sc->sc_type) {
   4275 		case WM_T_82574:
   4276 			reta_ent = __SHIFTIN(qid,
   4277 			    RETA_ENT_QINDEX_MASK_82574);
   4278 			break;
   4279 		case WM_T_82575:
   4280 			reta_ent = __SHIFTIN(qid,
   4281 			    RETA_ENT_QINDEX1_MASK_82575);
   4282 			break;
   4283 		default:
   4284 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4285 			break;
   4286 		}
   4287 
   4288 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4289 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4290 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4291 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4292 	}
   4293 
   4294 	wm_rss_getkey((uint8_t *)rss_key);
   4295 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4296 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4297 
   4298 	if (sc->sc_type == WM_T_82574)
   4299 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4300 	else
   4301 		mrqc = MRQC_ENABLE_RSS_MQ;
   4302 
   4303 	/* XXXX
   4304 	 * The same as FreeBSD igb.
   4305 	 * Why doesn't use MRQC_RSS_FIELD_IPV6_EX?
   4306 	 */
   4307 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4308 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4309 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4310 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4311 
   4312 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4313 }
   4314 
   4315 /*
   4316  * Adjust TX and RX queue numbers which the system actulally uses.
   4317  *
   4318  * The numbers are affected by below parameters.
   4319  *     - The nubmer of hardware queues
   4320  *     - The number of MSI-X vectors (= "nvectors" argument)
   4321  *     - ncpu
   4322  */
   4323 static void
   4324 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4325 {
   4326 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
   4327 
   4328 	if (nvectors < 2) {
   4329 		sc->sc_nqueues = 1;
   4330 		return;
   4331 	}
   4332 
   4333 	switch(sc->sc_type) {
   4334 	case WM_T_82572:
   4335 		hw_ntxqueues = 2;
   4336 		hw_nrxqueues = 2;
   4337 		break;
   4338 	case WM_T_82574:
   4339 		hw_ntxqueues = 2;
   4340 		hw_nrxqueues = 2;
   4341 		break;
   4342 	case WM_T_82575:
   4343 		hw_ntxqueues = 4;
   4344 		hw_nrxqueues = 4;
   4345 		break;
   4346 	case WM_T_82576:
   4347 		hw_ntxqueues = 16;
   4348 		hw_nrxqueues = 16;
   4349 		break;
   4350 	case WM_T_82580:
   4351 	case WM_T_I350:
   4352 	case WM_T_I354:
   4353 		hw_ntxqueues = 8;
   4354 		hw_nrxqueues = 8;
   4355 		break;
   4356 	case WM_T_I210:
   4357 		hw_ntxqueues = 4;
   4358 		hw_nrxqueues = 4;
   4359 		break;
   4360 	case WM_T_I211:
   4361 		hw_ntxqueues = 2;
   4362 		hw_nrxqueues = 2;
   4363 		break;
   4364 		/*
   4365 		 * As below ethernet controllers does not support MSI-X,
   4366 		 * this driver let them not use multiqueue.
   4367 		 *     - WM_T_80003
   4368 		 *     - WM_T_ICH8
   4369 		 *     - WM_T_ICH9
   4370 		 *     - WM_T_ICH10
   4371 		 *     - WM_T_PCH
   4372 		 *     - WM_T_PCH2
   4373 		 *     - WM_T_PCH_LPT
   4374 		 */
   4375 	default:
   4376 		hw_ntxqueues = 1;
   4377 		hw_nrxqueues = 1;
   4378 		break;
   4379 	}
   4380 
   4381 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
   4382 
   4383 	/*
   4384 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
   4385 	 * the number of queues used actually.
   4386 	 */
   4387 	if (nvectors < hw_nqueues + 1) {
   4388 		sc->sc_nqueues = nvectors - 1;
   4389 	} else {
   4390 		sc->sc_nqueues = hw_nqueues;
   4391 	}
   4392 
   4393 	/*
   4394 	 * As queues more then cpus cannot improve scaling, we limit
   4395 	 * the number of queues used actually.
   4396 	 */
   4397 	if (ncpu < sc->sc_nqueues)
   4398 		sc->sc_nqueues = ncpu;
   4399 }
   4400 
   4401 /*
   4402  * Both single interrupt MSI and INTx can use this function.
   4403  */
   4404 static int
   4405 wm_setup_legacy(struct wm_softc *sc)
   4406 {
   4407 	pci_chipset_tag_t pc = sc->sc_pc;
   4408 	const char *intrstr = NULL;
   4409 	char intrbuf[PCI_INTRSTR_LEN];
   4410 	int error;
   4411 
   4412 	error = wm_alloc_txrx_queues(sc);
   4413 	if (error) {
   4414 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4415 		    error);
   4416 		return ENOMEM;
   4417 	}
   4418 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   4419 	    sizeof(intrbuf));
   4420 #ifdef WM_MPSAFE
   4421 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   4422 #endif
   4423 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   4424 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   4425 	if (sc->sc_ihs[0] == NULL) {
   4426 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   4427 		    (pci_intr_type(pc, sc->sc_intrs[0])
   4428 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   4429 		return ENOMEM;
   4430 	}
   4431 
   4432 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   4433 	sc->sc_nintrs = 1;
   4434 	return 0;
   4435 }
   4436 
   4437 static int
   4438 wm_setup_msix(struct wm_softc *sc)
   4439 {
   4440 	void *vih;
   4441 	kcpuset_t *affinity;
   4442 	int qidx, error, intr_idx, txrx_established;
   4443 	pci_chipset_tag_t pc = sc->sc_pc;
   4444 	const char *intrstr = NULL;
   4445 	char intrbuf[PCI_INTRSTR_LEN];
   4446 	char intr_xname[INTRDEVNAMEBUF];
   4447 
   4448 	if (sc->sc_nqueues < ncpu) {
   4449 		/*
   4450 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
   4451 		 * interrupts start from CPU#1.
   4452 		 */
   4453 		sc->sc_affinity_offset = 1;
   4454 	} else {
   4455 		/*
   4456 		 * In this case, this device use all CPUs. So, we unify
   4457 		 * affinitied cpu_index to msix vector number for readability.
   4458 		 */
   4459 		sc->sc_affinity_offset = 0;
   4460 	}
   4461 
   4462 	error = wm_alloc_txrx_queues(sc);
   4463 	if (error) {
   4464 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4465 		    error);
   4466 		return ENOMEM;
   4467 	}
   4468 
   4469 	kcpuset_create(&affinity, false);
   4470 	intr_idx = 0;
   4471 
   4472 	/*
   4473 	 * TX and RX
   4474 	 */
   4475 	txrx_established = 0;
   4476 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   4477 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4478 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
   4479 
   4480 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4481 		    sizeof(intrbuf));
   4482 #ifdef WM_MPSAFE
   4483 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   4484 		    PCI_INTR_MPSAFE, true);
   4485 #endif
   4486 		memset(intr_xname, 0, sizeof(intr_xname));
   4487 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
   4488 		    device_xname(sc->sc_dev), qidx);
   4489 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4490 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
   4491 		if (vih == NULL) {
   4492 			aprint_error_dev(sc->sc_dev,
   4493 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
   4494 			    intrstr ? " at " : "",
   4495 			    intrstr ? intrstr : "");
   4496 
   4497 			goto fail;
   4498 		}
   4499 		kcpuset_zero(affinity);
   4500 		/* Round-robin affinity */
   4501 		kcpuset_set(affinity, affinity_to);
   4502 		error = interrupt_distribute(vih, affinity, NULL);
   4503 		if (error == 0) {
   4504 			aprint_normal_dev(sc->sc_dev,
   4505 			    "for TX and RX interrupting at %s affinity to %u\n",
   4506 			    intrstr, affinity_to);
   4507 		} else {
   4508 			aprint_normal_dev(sc->sc_dev,
   4509 			    "for TX and RX interrupting at %s\n", intrstr);
   4510 		}
   4511 		sc->sc_ihs[intr_idx] = vih;
   4512 		wmq->wmq_id= qidx;
   4513 		wmq->wmq_intr_idx = intr_idx;
   4514 
   4515 		txrx_established++;
   4516 		intr_idx++;
   4517 	}
   4518 
   4519 	/*
   4520 	 * LINK
   4521 	 */
   4522 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4523 	    sizeof(intrbuf));
   4524 #ifdef WM_MPSAFE
   4525 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
   4526 #endif
   4527 	memset(intr_xname, 0, sizeof(intr_xname));
   4528 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   4529 	    device_xname(sc->sc_dev));
   4530 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4531 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   4532 	if (vih == NULL) {
   4533 		aprint_error_dev(sc->sc_dev,
   4534 		    "unable to establish MSI-X(for LINK)%s%s\n",
   4535 		    intrstr ? " at " : "",
   4536 		    intrstr ? intrstr : "");
   4537 
   4538 		goto fail;
   4539 	}
   4540 	/* keep default affinity to LINK interrupt */
   4541 	aprint_normal_dev(sc->sc_dev,
   4542 	    "for LINK interrupting at %s\n", intrstr);
   4543 	sc->sc_ihs[intr_idx] = vih;
   4544 	sc->sc_link_intr_idx = intr_idx;
   4545 
   4546 	sc->sc_nintrs = sc->sc_nqueues + 1;
   4547 	kcpuset_destroy(affinity);
   4548 	return 0;
   4549 
   4550  fail:
   4551 	for (qidx = 0; qidx < txrx_established; qidx++) {
   4552 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   4553 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
   4554 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
   4555 	}
   4556 
   4557 	kcpuset_destroy(affinity);
   4558 	return ENOMEM;
   4559 }
   4560 
   4561 static void
   4562 wm_turnon(struct wm_softc *sc)
   4563 {
   4564 	int i;
   4565 
   4566 	for(i = 0; i < sc->sc_nqueues; i++) {
   4567 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4568 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   4569 
   4570 		mutex_enter(txq->txq_lock);
   4571 		txq->txq_stopping = false;
   4572 		mutex_exit(txq->txq_lock);
   4573 
   4574 		mutex_enter(rxq->rxq_lock);
   4575 		rxq->rxq_stopping = false;
   4576 		mutex_exit(rxq->rxq_lock);
   4577 	}
   4578 
   4579 	WM_CORE_LOCK(sc);
   4580 	sc->sc_core_stopping = false;
   4581 	WM_CORE_UNLOCK(sc);
   4582 }
   4583 
   4584 static void
   4585 wm_turnoff(struct wm_softc *sc)
   4586 {
   4587 	int i;
   4588 
   4589 	WM_CORE_LOCK(sc);
   4590 	sc->sc_core_stopping = true;
   4591 	WM_CORE_UNLOCK(sc);
   4592 
   4593 	for(i = 0; i < sc->sc_nqueues; i++) {
   4594 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   4595 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   4596 
   4597 		mutex_enter(rxq->rxq_lock);
   4598 		rxq->rxq_stopping = true;
   4599 		mutex_exit(rxq->rxq_lock);
   4600 
   4601 		mutex_enter(txq->txq_lock);
   4602 		txq->txq_stopping = true;
   4603 		mutex_exit(txq->txq_lock);
   4604 	}
   4605 }
   4606 
   4607 /*
   4608  * wm_init:		[ifnet interface function]
   4609  *
   4610  *	Initialize the interface.
   4611  */
   4612 static int
   4613 wm_init(struct ifnet *ifp)
   4614 {
   4615 	struct wm_softc *sc = ifp->if_softc;
   4616 	int ret;
   4617 
   4618 	WM_CORE_LOCK(sc);
   4619 	ret = wm_init_locked(ifp);
   4620 	WM_CORE_UNLOCK(sc);
   4621 
   4622 	return ret;
   4623 }
   4624 
   4625 static int
   4626 wm_init_locked(struct ifnet *ifp)
   4627 {
   4628 	struct wm_softc *sc = ifp->if_softc;
   4629 	int i, j, trynum, error = 0;
   4630 	uint32_t reg;
   4631 
   4632 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   4633 		device_xname(sc->sc_dev), __func__));
   4634 	KASSERT(WM_CORE_LOCKED(sc));
   4635 
   4636 	/*
   4637 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   4638 	 * There is a small but measurable benefit to avoiding the adjusment
   4639 	 * of the descriptor so that the headers are aligned, for normal mtu,
   4640 	 * on such platforms.  One possibility is that the DMA itself is
   4641 	 * slightly more efficient if the front of the entire packet (instead
   4642 	 * of the front of the headers) is aligned.
   4643 	 *
   4644 	 * Note we must always set align_tweak to 0 if we are using
   4645 	 * jumbo frames.
   4646 	 */
   4647 #ifdef __NO_STRICT_ALIGNMENT
   4648 	sc->sc_align_tweak = 0;
   4649 #else
   4650 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   4651 		sc->sc_align_tweak = 0;
   4652 	else
   4653 		sc->sc_align_tweak = 2;
   4654 #endif /* __NO_STRICT_ALIGNMENT */
   4655 
   4656 	/* Cancel any pending I/O. */
   4657 	wm_stop_locked(ifp, 0);
   4658 
   4659 	/* update statistics before reset */
   4660 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   4661 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   4662 
   4663 	/* Reset the chip to a known state. */
   4664 	wm_reset(sc);
   4665 
   4666 	switch (sc->sc_type) {
   4667 	case WM_T_82571:
   4668 	case WM_T_82572:
   4669 	case WM_T_82573:
   4670 	case WM_T_82574:
   4671 	case WM_T_82583:
   4672 	case WM_T_80003:
   4673 	case WM_T_ICH8:
   4674 	case WM_T_ICH9:
   4675 	case WM_T_ICH10:
   4676 	case WM_T_PCH:
   4677 	case WM_T_PCH2:
   4678 	case WM_T_PCH_LPT:
   4679 	case WM_T_PCH_SPT:
   4680 		/* AMT based hardware can now take control from firmware */
   4681 		if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   4682 			wm_get_hw_control(sc);
   4683 		break;
   4684 	default:
   4685 		break;
   4686 	}
   4687 
   4688 	/* Init hardware bits */
   4689 	wm_initialize_hardware_bits(sc);
   4690 
   4691 	/* Reset the PHY. */
   4692 	if (sc->sc_flags & WM_F_HAS_MII)
   4693 		wm_gmii_reset(sc);
   4694 
   4695 	/* Calculate (E)ITR value */
   4696 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4697 		sc->sc_itr = 450;	/* For EITR */
   4698 	} else if (sc->sc_type >= WM_T_82543) {
   4699 		/*
   4700 		 * Set up the interrupt throttling register (units of 256ns)
   4701 		 * Note that a footnote in Intel's documentation says this
   4702 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   4703 		 * or 10Mbit mode.  Empirically, it appears to be the case
   4704 		 * that that is also true for the 1024ns units of the other
   4705 		 * interrupt-related timer registers -- so, really, we ought
   4706 		 * to divide this value by 4 when the link speed is low.
   4707 		 *
   4708 		 * XXX implement this division at link speed change!
   4709 		 */
   4710 
   4711 		/*
   4712 		 * For N interrupts/sec, set this value to:
   4713 		 * 1000000000 / (N * 256).  Note that we set the
   4714 		 * absolute and packet timer values to this value
   4715 		 * divided by 4 to get "simple timer" behavior.
   4716 		 */
   4717 
   4718 		sc->sc_itr = 1500;		/* 2604 ints/sec */
   4719 	}
   4720 
   4721 	error = wm_init_txrx_queues(sc);
   4722 	if (error)
   4723 		goto out;
   4724 
   4725 	/*
   4726 	 * Clear out the VLAN table -- we don't use it (yet).
   4727 	 */
   4728 	CSR_WRITE(sc, WMREG_VET, 0);
   4729 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   4730 		trynum = 10; /* Due to hw errata */
   4731 	else
   4732 		trynum = 1;
   4733 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   4734 		for (j = 0; j < trynum; j++)
   4735 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   4736 
   4737 	/*
   4738 	 * Set up flow-control parameters.
   4739 	 *
   4740 	 * XXX Values could probably stand some tuning.
   4741 	 */
   4742 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   4743 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   4744 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
   4745 	    && (sc->sc_type != WM_T_PCH_SPT)) {
   4746 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   4747 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   4748 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   4749 	}
   4750 
   4751 	sc->sc_fcrtl = FCRTL_DFLT;
   4752 	if (sc->sc_type < WM_T_82543) {
   4753 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   4754 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   4755 	} else {
   4756 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   4757 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   4758 	}
   4759 
   4760 	if (sc->sc_type == WM_T_80003)
   4761 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   4762 	else
   4763 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   4764 
   4765 	/* Writes the control register. */
   4766 	wm_set_vlan(sc);
   4767 
   4768 	if (sc->sc_flags & WM_F_HAS_MII) {
   4769 		int val;
   4770 
   4771 		switch (sc->sc_type) {
   4772 		case WM_T_80003:
   4773 		case WM_T_ICH8:
   4774 		case WM_T_ICH9:
   4775 		case WM_T_ICH10:
   4776 		case WM_T_PCH:
   4777 		case WM_T_PCH2:
   4778 		case WM_T_PCH_LPT:
   4779 		case WM_T_PCH_SPT:
   4780 			/*
   4781 			 * Set the mac to wait the maximum time between each
   4782 			 * iteration and increase the max iterations when
   4783 			 * polling the phy; this fixes erroneous timeouts at
   4784 			 * 10Mbps.
   4785 			 */
   4786 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   4787 			    0xFFFF);
   4788 			val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
   4789 			val |= 0x3F;
   4790 			wm_kmrn_writereg(sc,
   4791 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
   4792 			break;
   4793 		default:
   4794 			break;
   4795 		}
   4796 
   4797 		if (sc->sc_type == WM_T_80003) {
   4798 			val = CSR_READ(sc, WMREG_CTRL_EXT);
   4799 			val &= ~CTRL_EXT_LINK_MODE_MASK;
   4800 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   4801 
   4802 			/* Bypass RX and TX FIFO's */
   4803 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   4804 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   4805 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   4806 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   4807 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   4808 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   4809 		}
   4810 	}
   4811 #if 0
   4812 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   4813 #endif
   4814 
   4815 	/* Set up checksum offload parameters. */
   4816 	reg = CSR_READ(sc, WMREG_RXCSUM);
   4817 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   4818 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   4819 		reg |= RXCSUM_IPOFL;
   4820 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   4821 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   4822 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   4823 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   4824 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   4825 
   4826 	/* Set up MSI-X */
   4827 	if (sc->sc_nintrs > 1) {
   4828 		uint32_t ivar;
   4829 		struct wm_queue *wmq;
   4830 		int qid, qintr_idx;
   4831 
   4832 		if (sc->sc_type == WM_T_82575) {
   4833 			/* Interrupt control */
   4834 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4835 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   4836 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4837 
   4838 			/* TX and RX */
   4839 			for (i = 0; i < sc->sc_nqueues; i++) {
   4840 				wmq = &sc->sc_queue[i];
   4841 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
   4842 				    EITR_TX_QUEUE(wmq->wmq_id)
   4843 				    | EITR_RX_QUEUE(wmq->wmq_id));
   4844 			}
   4845 			/* Link status */
   4846 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   4847 			    EITR_OTHER);
   4848 		} else if (sc->sc_type == WM_T_82574) {
   4849 			/* Interrupt control */
   4850 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4851 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   4852 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4853 
   4854 			ivar = 0;
   4855 			/* TX and RX */
   4856 			for (i = 0; i < sc->sc_nqueues; i++) {
   4857 				wmq = &sc->sc_queue[i];
   4858 				qid = wmq->wmq_id;
   4859 				qintr_idx = wmq->wmq_intr_idx;
   4860 
   4861 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   4862 				    IVAR_TX_MASK_Q_82574(qid));
   4863 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
   4864 				    IVAR_RX_MASK_Q_82574(qid));
   4865 			}
   4866 			/* Link status */
   4867 			ivar |= __SHIFTIN((IVAR_VALID_82574
   4868 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
   4869 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   4870 		} else {
   4871 			/* Interrupt control */
   4872 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
   4873 			    | GPIE_EIAME | GPIE_PBA);
   4874 
   4875 			switch (sc->sc_type) {
   4876 			case WM_T_82580:
   4877 			case WM_T_I350:
   4878 			case WM_T_I354:
   4879 			case WM_T_I210:
   4880 			case WM_T_I211:
   4881 				/* TX and RX */
   4882 				for (i = 0; i < sc->sc_nqueues; i++) {
   4883 					wmq = &sc->sc_queue[i];
   4884 					qid = wmq->wmq_id;
   4885 					qintr_idx = wmq->wmq_intr_idx;
   4886 
   4887 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   4888 					ivar &= ~IVAR_TX_MASK_Q(qid);
   4889 					ivar |= __SHIFTIN((qintr_idx
   4890 						| IVAR_VALID),
   4891 					    IVAR_TX_MASK_Q(qid));
   4892 					ivar &= ~IVAR_RX_MASK_Q(qid);
   4893 					ivar |= __SHIFTIN((qintr_idx
   4894 						| IVAR_VALID),
   4895 					    IVAR_RX_MASK_Q(qid));
   4896 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   4897 				}
   4898 				break;
   4899 			case WM_T_82576:
   4900 				/* TX and RX */
   4901 				for (i = 0; i < sc->sc_nqueues; i++) {
   4902 					wmq = &sc->sc_queue[i];
   4903 					qid = wmq->wmq_id;
   4904 					qintr_idx = wmq->wmq_intr_idx;
   4905 
   4906 					ivar = CSR_READ(sc,
   4907 					    WMREG_IVAR_Q_82576(qid));
   4908 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   4909 					ivar |= __SHIFTIN((qintr_idx
   4910 						| IVAR_VALID),
   4911 					    IVAR_TX_MASK_Q_82576(qid));
   4912 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   4913 					ivar |= __SHIFTIN((qintr_idx
   4914 						| IVAR_VALID),
   4915 					    IVAR_RX_MASK_Q_82576(qid));
   4916 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
   4917 					    ivar);
   4918 				}
   4919 				break;
   4920 			default:
   4921 				break;
   4922 			}
   4923 
   4924 			/* Link status */
   4925 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   4926 			    IVAR_MISC_OTHER);
   4927 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   4928 		}
   4929 
   4930 		if (sc->sc_nqueues > 1) {
   4931 			wm_init_rss(sc);
   4932 
   4933 			/*
   4934 			** NOTE: Receive Full-Packet Checksum Offload
   4935 			** is mutually exclusive with Multiqueue. However
   4936 			** this is not the same as TCP/IP checksums which
   4937 			** still work.
   4938 			*/
   4939 			reg = CSR_READ(sc, WMREG_RXCSUM);
   4940 			reg |= RXCSUM_PCSD;
   4941 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   4942 		}
   4943 	}
   4944 
   4945 	/* Set up the interrupt registers. */
   4946 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4947 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   4948 	    ICR_RXO | ICR_RXT0;
   4949 	if (sc->sc_nintrs > 1) {
   4950 		uint32_t mask;
   4951 		struct wm_queue *wmq;
   4952 
   4953 		switch (sc->sc_type) {
   4954 		case WM_T_82574:
   4955 			CSR_WRITE(sc, WMREG_EIAC_82574,
   4956 			    WMREG_EIAC_82574_MSIX_MASK);
   4957 			sc->sc_icr |= WMREG_EIAC_82574_MSIX_MASK;
   4958 			CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   4959 			break;
   4960 		default:
   4961 			if (sc->sc_type == WM_T_82575) {
   4962 				mask = 0;
   4963 				for (i = 0; i < sc->sc_nqueues; i++) {
   4964 					wmq = &sc->sc_queue[i];
   4965 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
   4966 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
   4967 				}
   4968 				mask |= EITR_OTHER;
   4969 			} else {
   4970 				mask = 0;
   4971 				for (i = 0; i < sc->sc_nqueues; i++) {
   4972 					wmq = &sc->sc_queue[i];
   4973 					mask |= 1 << wmq->wmq_intr_idx;
   4974 				}
   4975 				mask |= 1 << sc->sc_link_intr_idx;
   4976 			}
   4977 			CSR_WRITE(sc, WMREG_EIAC, mask);
   4978 			CSR_WRITE(sc, WMREG_EIAM, mask);
   4979 			CSR_WRITE(sc, WMREG_EIMS, mask);
   4980 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   4981 			break;
   4982 		}
   4983 	} else
   4984 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   4985 
   4986 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4987 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4988 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
   4989 	    || (sc->sc_type == WM_T_PCH_SPT)) {
   4990 		reg = CSR_READ(sc, WMREG_KABGTXD);
   4991 		reg |= KABGTXD_BGSQLBIAS;
   4992 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   4993 	}
   4994 
   4995 	/* Set up the inter-packet gap. */
   4996 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   4997 
   4998 	if (sc->sc_type >= WM_T_82543) {
   4999 		/*
   5000 		 * XXX 82574 has both ITR and EITR. SET EITR when we use
   5001 		 * the multi queue function with MSI-X.
   5002 		 */
   5003 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5004 			int qidx;
   5005 			for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5006 				struct wm_queue *wmq = &sc->sc_queue[qidx];
   5007 				CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx),
   5008 				    sc->sc_itr);
   5009 			}
   5010 			/*
   5011 			 * Link interrupts occur much less than TX
   5012 			 * interrupts and RX interrupts. So, we don't
   5013 			 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   5014 			 * FreeBSD's if_igb.
   5015 			 */
   5016 		} else
   5017 			CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
   5018 	}
   5019 
   5020 	/* Set the VLAN ethernetype. */
   5021 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   5022 
   5023 	/*
   5024 	 * Set up the transmit control register; we start out with
   5025 	 * a collision distance suitable for FDX, but update it whe
   5026 	 * we resolve the media type.
   5027 	 */
   5028 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   5029 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   5030 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   5031 	if (sc->sc_type >= WM_T_82571)
   5032 		sc->sc_tctl |= TCTL_MULR;
   5033 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   5034 
   5035 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5036 		/* Write TDT after TCTL.EN is set. See the document. */
   5037 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   5038 	}
   5039 
   5040 	if (sc->sc_type == WM_T_80003) {
   5041 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   5042 		reg &= ~TCTL_EXT_GCEX_MASK;
   5043 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   5044 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   5045 	}
   5046 
   5047 	/* Set the media. */
   5048 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   5049 		goto out;
   5050 
   5051 	/* Configure for OS presence */
   5052 	wm_init_manageability(sc);
   5053 
   5054 	/*
   5055 	 * Set up the receive control register; we actually program
   5056 	 * the register when we set the receive filter.  Use multicast
   5057 	 * address offset type 0.
   5058 	 *
   5059 	 * Only the i82544 has the ability to strip the incoming
   5060 	 * CRC, so we don't enable that feature.
   5061 	 */
   5062 	sc->sc_mchash_type = 0;
   5063 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   5064 	    | RCTL_MO(sc->sc_mchash_type);
   5065 
   5066 	/*
   5067 	 * The I350 has a bug where it always strips the CRC whether
   5068 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   5069 	 */
   5070 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   5071 	    || (sc->sc_type == WM_T_I210))
   5072 		sc->sc_rctl |= RCTL_SECRC;
   5073 
   5074 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   5075 	    && (ifp->if_mtu > ETHERMTU)) {
   5076 		sc->sc_rctl |= RCTL_LPE;
   5077 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5078 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   5079 	}
   5080 
   5081 	if (MCLBYTES == 2048) {
   5082 		sc->sc_rctl |= RCTL_2k;
   5083 	} else {
   5084 		if (sc->sc_type >= WM_T_82543) {
   5085 			switch (MCLBYTES) {
   5086 			case 4096:
   5087 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   5088 				break;
   5089 			case 8192:
   5090 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   5091 				break;
   5092 			case 16384:
   5093 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   5094 				break;
   5095 			default:
   5096 				panic("wm_init: MCLBYTES %d unsupported",
   5097 				    MCLBYTES);
   5098 				break;
   5099 			}
   5100 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   5101 	}
   5102 
   5103 	/* Set the receive filter. */
   5104 	wm_set_filter(sc);
   5105 
   5106 	/* Enable ECC */
   5107 	switch (sc->sc_type) {
   5108 	case WM_T_82571:
   5109 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   5110 		reg |= PBA_ECC_CORR_EN;
   5111 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   5112 		break;
   5113 	case WM_T_PCH_LPT:
   5114 	case WM_T_PCH_SPT:
   5115 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   5116 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   5117 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   5118 
   5119 		reg = CSR_READ(sc, WMREG_CTRL);
   5120 		reg |= CTRL_MEHE;
   5121 		CSR_WRITE(sc, WMREG_CTRL, reg);
   5122 		break;
   5123 	default:
   5124 		break;
   5125 	}
   5126 
   5127 	/* On 575 and later set RDT only if RX enabled */
   5128 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5129 		int qidx;
   5130 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5131 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
   5132 			for (i = 0; i < WM_NRXDESC; i++) {
   5133 				mutex_enter(rxq->rxq_lock);
   5134 				wm_init_rxdesc(rxq, i);
   5135 				mutex_exit(rxq->rxq_lock);
   5136 
   5137 			}
   5138 		}
   5139 	}
   5140 
   5141 	wm_turnon(sc);
   5142 
   5143 	/* Start the one second link check clock. */
   5144 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   5145 
   5146 	/* ...all done! */
   5147 	ifp->if_flags |= IFF_RUNNING;
   5148 	ifp->if_flags &= ~IFF_OACTIVE;
   5149 
   5150  out:
   5151 	sc->sc_if_flags = ifp->if_flags;
   5152 	if (error)
   5153 		log(LOG_ERR, "%s: interface not running\n",
   5154 		    device_xname(sc->sc_dev));
   5155 	return error;
   5156 }
   5157 
   5158 /*
   5159  * wm_stop:		[ifnet interface function]
   5160  *
   5161  *	Stop transmission on the interface.
   5162  */
   5163 static void
   5164 wm_stop(struct ifnet *ifp, int disable)
   5165 {
   5166 	struct wm_softc *sc = ifp->if_softc;
   5167 
   5168 	WM_CORE_LOCK(sc);
   5169 	wm_stop_locked(ifp, disable);
   5170 	WM_CORE_UNLOCK(sc);
   5171 }
   5172 
   5173 static void
   5174 wm_stop_locked(struct ifnet *ifp, int disable)
   5175 {
   5176 	struct wm_softc *sc = ifp->if_softc;
   5177 	struct wm_txsoft *txs;
   5178 	int i, qidx;
   5179 
   5180 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5181 		device_xname(sc->sc_dev), __func__));
   5182 	KASSERT(WM_CORE_LOCKED(sc));
   5183 
   5184 	wm_turnoff(sc);
   5185 
   5186 	/* Stop the one second clock. */
   5187 	callout_stop(&sc->sc_tick_ch);
   5188 
   5189 	/* Stop the 82547 Tx FIFO stall check timer. */
   5190 	if (sc->sc_type == WM_T_82547)
   5191 		callout_stop(&sc->sc_txfifo_ch);
   5192 
   5193 	if (sc->sc_flags & WM_F_HAS_MII) {
   5194 		/* Down the MII. */
   5195 		mii_down(&sc->sc_mii);
   5196 	} else {
   5197 #if 0
   5198 		/* Should we clear PHY's status properly? */
   5199 		wm_reset(sc);
   5200 #endif
   5201 	}
   5202 
   5203 	/* Stop the transmit and receive processes. */
   5204 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5205 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5206 	sc->sc_rctl &= ~RCTL_EN;
   5207 
   5208 	/*
   5209 	 * Clear the interrupt mask to ensure the device cannot assert its
   5210 	 * interrupt line.
   5211 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5212 	 * service any currently pending or shared interrupt.
   5213 	 */
   5214 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5215 	sc->sc_icr = 0;
   5216 	if (sc->sc_nintrs > 1) {
   5217 		if (sc->sc_type != WM_T_82574) {
   5218 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5219 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5220 		} else
   5221 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5222 	}
   5223 
   5224 	/* Release any queued transmit buffers. */
   5225 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
   5226 		struct wm_queue *wmq = &sc->sc_queue[qidx];
   5227 		struct wm_txqueue *txq = &wmq->wmq_txq;
   5228 		mutex_enter(txq->txq_lock);
   5229 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5230 			txs = &txq->txq_soft[i];
   5231 			if (txs->txs_mbuf != NULL) {
   5232 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
   5233 				m_freem(txs->txs_mbuf);
   5234 				txs->txs_mbuf = NULL;
   5235 			}
   5236 		}
   5237 		if (sc->sc_type == WM_T_PCH_SPT) {
   5238 			pcireg_t preg;
   5239 			uint32_t reg;
   5240 			int nexttx;
   5241 
   5242 			/* First, disable MULR fix in FEXTNVM11 */
   5243 			reg = CSR_READ(sc, WMREG_FEXTNVM11);
   5244 			reg |= FEXTNVM11_DIS_MULRFIX;
   5245 			CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
   5246 
   5247 			preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   5248 			    WM_PCI_DESCRING_STATUS);
   5249 			reg = CSR_READ(sc, WMREG_TDLEN(0));
   5250 			printf("XXX RST: FLUSH = %08x, len = %u\n",
   5251 			    (uint32_t)(preg & DESCRING_STATUS_FLUSH_REQ), reg);
   5252 			if (((preg & DESCRING_STATUS_FLUSH_REQ) != 0)
   5253 			    && (reg != 0)) {
   5254 				/* TX */
   5255 				printf("XXX need TX flush (reg = %08x)\n",
   5256 				    preg);
   5257 				wm_init_tx_descs(sc, txq);
   5258 				wm_init_tx_regs(sc, wmq, txq);
   5259 				nexttx = txq->txq_next;
   5260 				wm_set_dma_addr(
   5261 					&txq->txq_descs[nexttx].wtx_addr,
   5262 					WM_CDTXADDR(txq, nexttx));
   5263 				txq->txq_descs[nexttx].wtx_cmdlen
   5264 				    = htole32(WTX_CMD_IFCS | 512);
   5265 				wm_cdtxsync(txq, nexttx, 1,
   5266 				    BUS_DMASYNC_PREREAD |BUS_DMASYNC_PREWRITE);
   5267 				CSR_WRITE(sc, WMREG_TCTL, TCTL_EN);
   5268 				CSR_WRITE(sc, WMREG_TDT(0), nexttx);
   5269 				CSR_WRITE_FLUSH(sc);
   5270 				delay(250);
   5271 				CSR_WRITE(sc, WMREG_TCTL, 0);
   5272 			}
   5273 			preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   5274 			    WM_PCI_DESCRING_STATUS);
   5275 			if (preg & DESCRING_STATUS_FLUSH_REQ) {
   5276 				/* RX */
   5277 				printf("XXX need RX flush\n");
   5278 			}
   5279 		}
   5280 		mutex_exit(txq->txq_lock);
   5281 	}
   5282 
   5283 	/* Mark the interface as down and cancel the watchdog timer. */
   5284 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5285 	ifp->if_timer = 0;
   5286 
   5287 	if (disable) {
   5288 		for (i = 0; i < sc->sc_nqueues; i++) {
   5289 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5290 			mutex_enter(rxq->rxq_lock);
   5291 			wm_rxdrain(rxq);
   5292 			mutex_exit(rxq->rxq_lock);
   5293 		}
   5294 	}
   5295 
   5296 #if 0 /* notyet */
   5297 	if (sc->sc_type >= WM_T_82544)
   5298 		CSR_WRITE(sc, WMREG_WUC, 0);
   5299 #endif
   5300 }
   5301 
   5302 static void
   5303 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   5304 {
   5305 	struct mbuf *m;
   5306 	int i;
   5307 
   5308 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   5309 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   5310 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   5311 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   5312 		    m->m_data, m->m_len, m->m_flags);
   5313 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   5314 	    i, i == 1 ? "" : "s");
   5315 }
   5316 
   5317 /*
   5318  * wm_82547_txfifo_stall:
   5319  *
   5320  *	Callout used to wait for the 82547 Tx FIFO to drain,
   5321  *	reset the FIFO pointers, and restart packet transmission.
   5322  */
   5323 static void
   5324 wm_82547_txfifo_stall(void *arg)
   5325 {
   5326 	struct wm_softc *sc = arg;
   5327 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5328 
   5329 	mutex_enter(txq->txq_lock);
   5330 
   5331 	if (txq->txq_stopping)
   5332 		goto out;
   5333 
   5334 	if (txq->txq_fifo_stall) {
   5335 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   5336 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   5337 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   5338 			/*
   5339 			 * Packets have drained.  Stop transmitter, reset
   5340 			 * FIFO pointers, restart transmitter, and kick
   5341 			 * the packet queue.
   5342 			 */
   5343 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   5344 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   5345 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   5346 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   5347 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   5348 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   5349 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   5350 			CSR_WRITE_FLUSH(sc);
   5351 
   5352 			txq->txq_fifo_head = 0;
   5353 			txq->txq_fifo_stall = 0;
   5354 			wm_start_locked(&sc->sc_ethercom.ec_if);
   5355 		} else {
   5356 			/*
   5357 			 * Still waiting for packets to drain; try again in
   5358 			 * another tick.
   5359 			 */
   5360 			callout_schedule(&sc->sc_txfifo_ch, 1);
   5361 		}
   5362 	}
   5363 
   5364 out:
   5365 	mutex_exit(txq->txq_lock);
   5366 }
   5367 
   5368 /*
   5369  * wm_82547_txfifo_bugchk:
   5370  *
   5371  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   5372  *	prevent enqueueing a packet that would wrap around the end
   5373  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   5374  *
   5375  *	We do this by checking the amount of space before the end
   5376  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   5377  *	the Tx FIFO, wait for all remaining packets to drain, reset
   5378  *	the internal FIFO pointers to the beginning, and restart
   5379  *	transmission on the interface.
   5380  */
   5381 #define	WM_FIFO_HDR		0x10
   5382 #define	WM_82547_PAD_LEN	0x3e0
   5383 static int
   5384 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   5385 {
   5386 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   5387 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   5388 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   5389 
   5390 	/* Just return if already stalled. */
   5391 	if (txq->txq_fifo_stall)
   5392 		return 1;
   5393 
   5394 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   5395 		/* Stall only occurs in half-duplex mode. */
   5396 		goto send_packet;
   5397 	}
   5398 
   5399 	if (len >= WM_82547_PAD_LEN + space) {
   5400 		txq->txq_fifo_stall = 1;
   5401 		callout_schedule(&sc->sc_txfifo_ch, 1);
   5402 		return 1;
   5403 	}
   5404 
   5405  send_packet:
   5406 	txq->txq_fifo_head += len;
   5407 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   5408 		txq->txq_fifo_head -= txq->txq_fifo_size;
   5409 
   5410 	return 0;
   5411 }
   5412 
   5413 static int
   5414 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5415 {
   5416 	int error;
   5417 
   5418 	/*
   5419 	 * Allocate the control data structures, and create and load the
   5420 	 * DMA map for it.
   5421 	 *
   5422 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5423 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5424 	 * both sets within the same 4G segment.
   5425 	 */
   5426 	if (sc->sc_type < WM_T_82544)
   5427 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   5428 	else
   5429 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   5430 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5431 		txq->txq_descsize = sizeof(nq_txdesc_t);
   5432 	else
   5433 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
   5434 
   5435 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
   5436 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
   5437 		    1, &txq->txq_desc_rseg, 0)) != 0) {
   5438 		aprint_error_dev(sc->sc_dev,
   5439 		    "unable to allocate TX control data, error = %d\n",
   5440 		    error);
   5441 		goto fail_0;
   5442 	}
   5443 
   5444 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   5445 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
   5446 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5447 		aprint_error_dev(sc->sc_dev,
   5448 		    "unable to map TX control data, error = %d\n", error);
   5449 		goto fail_1;
   5450 	}
   5451 
   5452 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
   5453 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
   5454 		aprint_error_dev(sc->sc_dev,
   5455 		    "unable to create TX control data DMA map, error = %d\n",
   5456 		    error);
   5457 		goto fail_2;
   5458 	}
   5459 
   5460 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   5461 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
   5462 		aprint_error_dev(sc->sc_dev,
   5463 		    "unable to load TX control data DMA map, error = %d\n",
   5464 		    error);
   5465 		goto fail_3;
   5466 	}
   5467 
   5468 	return 0;
   5469 
   5470  fail_3:
   5471 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5472  fail_2:
   5473 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5474 	    WM_TXDESCS_SIZE(txq));
   5475  fail_1:
   5476 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5477  fail_0:
   5478 	return error;
   5479 }
   5480 
   5481 static void
   5482 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5483 {
   5484 
   5485 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   5486 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5487 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5488 	    WM_TXDESCS_SIZE(txq));
   5489 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5490 }
   5491 
   5492 static int
   5493 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5494 {
   5495 	int error;
   5496 
   5497 	/*
   5498 	 * Allocate the control data structures, and create and load the
   5499 	 * DMA map for it.
   5500 	 *
   5501 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5502 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5503 	 * both sets within the same 4G segment.
   5504 	 */
   5505 	rxq->rxq_desc_size = sizeof(wiseman_rxdesc_t) * WM_NRXDESC;
   5506 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq->rxq_desc_size,
   5507 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
   5508 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
   5509 		aprint_error_dev(sc->sc_dev,
   5510 		    "unable to allocate RX control data, error = %d\n",
   5511 		    error);
   5512 		goto fail_0;
   5513 	}
   5514 
   5515 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   5516 		    rxq->rxq_desc_rseg, rxq->rxq_desc_size,
   5517 		    (void **)&rxq->rxq_descs, BUS_DMA_COHERENT)) != 0) {
   5518 		aprint_error_dev(sc->sc_dev,
   5519 		    "unable to map RX control data, error = %d\n", error);
   5520 		goto fail_1;
   5521 	}
   5522 
   5523 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq->rxq_desc_size, 1,
   5524 		    rxq->rxq_desc_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   5525 		aprint_error_dev(sc->sc_dev,
   5526 		    "unable to create RX control data DMA map, error = %d\n",
   5527 		    error);
   5528 		goto fail_2;
   5529 	}
   5530 
   5531 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   5532 		    rxq->rxq_descs, rxq->rxq_desc_size, NULL, 0)) != 0) {
   5533 		aprint_error_dev(sc->sc_dev,
   5534 		    "unable to load RX control data DMA map, error = %d\n",
   5535 		    error);
   5536 		goto fail_3;
   5537 	}
   5538 
   5539 	return 0;
   5540 
   5541  fail_3:
   5542 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5543  fail_2:
   5544 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
   5545 	    rxq->rxq_desc_size);
   5546  fail_1:
   5547 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5548  fail_0:
   5549 	return error;
   5550 }
   5551 
   5552 static void
   5553 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5554 {
   5555 
   5556 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5557 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5558 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
   5559 	    rxq->rxq_desc_size);
   5560 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5561 }
   5562 
   5563 
   5564 static int
   5565 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5566 {
   5567 	int i, error;
   5568 
   5569 	/* Create the transmit buffer DMA maps. */
   5570 	WM_TXQUEUELEN(txq) =
   5571 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   5572 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   5573 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5574 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   5575 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   5576 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   5577 			aprint_error_dev(sc->sc_dev,
   5578 			    "unable to create Tx DMA map %d, error = %d\n",
   5579 			    i, error);
   5580 			goto fail;
   5581 		}
   5582 	}
   5583 
   5584 	return 0;
   5585 
   5586  fail:
   5587 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5588 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5589 			bus_dmamap_destroy(sc->sc_dmat,
   5590 			    txq->txq_soft[i].txs_dmamap);
   5591 	}
   5592 	return error;
   5593 }
   5594 
   5595 static void
   5596 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5597 {
   5598 	int i;
   5599 
   5600 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5601 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5602 			bus_dmamap_destroy(sc->sc_dmat,
   5603 			    txq->txq_soft[i].txs_dmamap);
   5604 	}
   5605 }
   5606 
   5607 static int
   5608 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5609 {
   5610 	int i, error;
   5611 
   5612 	/* Create the receive buffer DMA maps. */
   5613 	for (i = 0; i < WM_NRXDESC; i++) {
   5614 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   5615 			    MCLBYTES, 0, 0,
   5616 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   5617 			aprint_error_dev(sc->sc_dev,
   5618 			    "unable to create Rx DMA map %d error = %d\n",
   5619 			    i, error);
   5620 			goto fail;
   5621 		}
   5622 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   5623 	}
   5624 
   5625 	return 0;
   5626 
   5627  fail:
   5628 	for (i = 0; i < WM_NRXDESC; i++) {
   5629 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5630 			bus_dmamap_destroy(sc->sc_dmat,
   5631 			    rxq->rxq_soft[i].rxs_dmamap);
   5632 	}
   5633 	return error;
   5634 }
   5635 
   5636 static void
   5637 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5638 {
   5639 	int i;
   5640 
   5641 	for (i = 0; i < WM_NRXDESC; i++) {
   5642 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5643 			bus_dmamap_destroy(sc->sc_dmat,
   5644 			    rxq->rxq_soft[i].rxs_dmamap);
   5645 	}
   5646 }
   5647 
   5648 /*
   5649  * wm_alloc_quques:
   5650  *	Allocate {tx,rx}descs and {tx,rx} buffers
   5651  */
   5652 static int
   5653 wm_alloc_txrx_queues(struct wm_softc *sc)
   5654 {
   5655 	int i, error, tx_done, rx_done;
   5656 
   5657 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
   5658 	    KM_SLEEP);
   5659 	if (sc->sc_queue == NULL) {
   5660 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
   5661 		error = ENOMEM;
   5662 		goto fail_0;
   5663 	}
   5664 
   5665 	/*
   5666 	 * For transmission
   5667 	 */
   5668 	error = 0;
   5669 	tx_done = 0;
   5670 	for (i = 0; i < sc->sc_nqueues; i++) {
   5671 #ifdef WM_EVENT_COUNTERS
   5672 		int j;
   5673 		const char *xname;
   5674 #endif
   5675 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5676 		txq->txq_sc = sc;
   5677 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5678 
   5679 		error = wm_alloc_tx_descs(sc, txq);
   5680 		if (error)
   5681 			break;
   5682 		error = wm_alloc_tx_buffer(sc, txq);
   5683 		if (error) {
   5684 			wm_free_tx_descs(sc, txq);
   5685 			break;
   5686 		}
   5687 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
   5688 		if (txq->txq_interq == NULL) {
   5689 			wm_free_tx_descs(sc, txq);
   5690 			wm_free_tx_buffer(sc, txq);
   5691 			error = ENOMEM;
   5692 			break;
   5693 		}
   5694 
   5695 #ifdef WM_EVENT_COUNTERS
   5696 		xname = device_xname(sc->sc_dev);
   5697 
   5698 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
   5699 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
   5700 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
   5701 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
   5702 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
   5703 
   5704 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
   5705 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
   5706 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
   5707 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
   5708 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
   5709 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
   5710 
   5711 		for (j = 0; j < WM_NTXSEGS; j++) {
   5712 			snprintf(txq->txq_txseg_evcnt_names[j],
   5713 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
   5714 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
   5715 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
   5716 		}
   5717 
   5718 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
   5719 
   5720 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
   5721 #endif /* WM_EVENT_COUNTERS */
   5722 
   5723 		tx_done++;
   5724 	}
   5725 	if (error)
   5726 		goto fail_1;
   5727 
   5728 	/*
   5729 	 * For recieve
   5730 	 */
   5731 	error = 0;
   5732 	rx_done = 0;
   5733 	for (i = 0; i < sc->sc_nqueues; i++) {
   5734 #ifdef WM_EVENT_COUNTERS
   5735 		const char *xname;
   5736 #endif
   5737 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5738 		rxq->rxq_sc = sc;
   5739 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5740 
   5741 		error = wm_alloc_rx_descs(sc, rxq);
   5742 		if (error)
   5743 			break;
   5744 
   5745 		error = wm_alloc_rx_buffer(sc, rxq);
   5746 		if (error) {
   5747 			wm_free_rx_descs(sc, rxq);
   5748 			break;
   5749 		}
   5750 
   5751 #ifdef WM_EVENT_COUNTERS
   5752 		xname = device_xname(sc->sc_dev);
   5753 
   5754 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
   5755 
   5756 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
   5757 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
   5758 #endif /* WM_EVENT_COUNTERS */
   5759 
   5760 		rx_done++;
   5761 	}
   5762 	if (error)
   5763 		goto fail_2;
   5764 
   5765 	return 0;
   5766 
   5767  fail_2:
   5768 	for (i = 0; i < rx_done; i++) {
   5769 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5770 		wm_free_rx_buffer(sc, rxq);
   5771 		wm_free_rx_descs(sc, rxq);
   5772 		if (rxq->rxq_lock)
   5773 			mutex_obj_free(rxq->rxq_lock);
   5774 	}
   5775  fail_1:
   5776 	for (i = 0; i < tx_done; i++) {
   5777 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5778 		pcq_destroy(txq->txq_interq);
   5779 		wm_free_tx_buffer(sc, txq);
   5780 		wm_free_tx_descs(sc, txq);
   5781 		if (txq->txq_lock)
   5782 			mutex_obj_free(txq->txq_lock);
   5783 	}
   5784 
   5785 	kmem_free(sc->sc_queue,
   5786 	    sizeof(struct wm_queue) * sc->sc_nqueues);
   5787  fail_0:
   5788 	return error;
   5789 }
   5790 
   5791 /*
   5792  * wm_free_quques:
   5793  *	Free {tx,rx}descs and {tx,rx} buffers
   5794  */
   5795 static void
   5796 wm_free_txrx_queues(struct wm_softc *sc)
   5797 {
   5798 	int i;
   5799 
   5800 	for (i = 0; i < sc->sc_nqueues; i++) {
   5801 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
   5802 		wm_free_rx_buffer(sc, rxq);
   5803 		wm_free_rx_descs(sc, rxq);
   5804 		if (rxq->rxq_lock)
   5805 			mutex_obj_free(rxq->rxq_lock);
   5806 	}
   5807 
   5808 	for (i = 0; i < sc->sc_nqueues; i++) {
   5809 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
   5810 		wm_free_tx_buffer(sc, txq);
   5811 		wm_free_tx_descs(sc, txq);
   5812 		if (txq->txq_lock)
   5813 			mutex_obj_free(txq->txq_lock);
   5814 	}
   5815 
   5816 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
   5817 }
   5818 
   5819 static void
   5820 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   5821 {
   5822 
   5823 	KASSERT(mutex_owned(txq->txq_lock));
   5824 
   5825 	/* Initialize the transmit descriptor ring. */
   5826 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
   5827 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   5828 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   5829 	txq->txq_free = WM_NTXDESC(txq);
   5830 	txq->txq_next = 0;
   5831 }
   5832 
   5833 static void
   5834 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   5835     struct wm_txqueue *txq)
   5836 {
   5837 
   5838 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   5839 		device_xname(sc->sc_dev), __func__));
   5840 	KASSERT(mutex_owned(txq->txq_lock));
   5841 
   5842 	if (sc->sc_type < WM_T_82543) {
   5843 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   5844 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   5845 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
   5846 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   5847 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   5848 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   5849 	} else {
   5850 		int qid = wmq->wmq_id;
   5851 
   5852 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   5853 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   5854 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
   5855 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   5856 
   5857 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5858 			/*
   5859 			 * Don't write TDT before TCTL.EN is set.
   5860 			 * See the document.
   5861 			 */
   5862 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   5863 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   5864 			    | TXDCTL_WTHRESH(0));
   5865 		else {
   5866 			/* ITR / 4 */
   5867 			CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
   5868 			if (sc->sc_type >= WM_T_82540) {
   5869 				/* should be same */
   5870 				CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
   5871 			}
   5872 
   5873 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   5874 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   5875 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   5876 		}
   5877 	}
   5878 }
   5879 
   5880 static void
   5881 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   5882 {
   5883 	int i;
   5884 
   5885 	KASSERT(mutex_owned(txq->txq_lock));
   5886 
   5887 	/* Initialize the transmit job descriptors. */
   5888 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   5889 		txq->txq_soft[i].txs_mbuf = NULL;
   5890 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   5891 	txq->txq_snext = 0;
   5892 	txq->txq_sdirty = 0;
   5893 }
   5894 
   5895 static void
   5896 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   5897     struct wm_txqueue *txq)
   5898 {
   5899 
   5900 	KASSERT(mutex_owned(txq->txq_lock));
   5901 
   5902 	/*
   5903 	 * Set up some register offsets that are different between
   5904 	 * the i82542 and the i82543 and later chips.
   5905 	 */
   5906 	if (sc->sc_type < WM_T_82543)
   5907 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   5908 	else
   5909 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
   5910 
   5911 	wm_init_tx_descs(sc, txq);
   5912 	wm_init_tx_regs(sc, wmq, txq);
   5913 	wm_init_tx_buffer(sc, txq);
   5914 }
   5915 
   5916 static void
   5917 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
   5918     struct wm_rxqueue *rxq)
   5919 {
   5920 
   5921 	KASSERT(mutex_owned(rxq->rxq_lock));
   5922 
   5923 	/*
   5924 	 * Initialize the receive descriptor and receive job
   5925 	 * descriptor rings.
   5926 	 */
   5927 	if (sc->sc_type < WM_T_82543) {
   5928 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   5929 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   5930 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   5931 		    sizeof(wiseman_rxdesc_t) * WM_NRXDESC);
   5932 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   5933 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   5934 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   5935 
   5936 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   5937 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   5938 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   5939 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   5940 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   5941 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   5942 	} else {
   5943 		int qid = wmq->wmq_id;
   5944 
   5945 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   5946 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   5947 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_desc_size);
   5948 
   5949 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5950 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   5951 				panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
   5952 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_LEGACY
   5953 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   5954 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   5955 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   5956 			    | RXDCTL_WTHRESH(1));
   5957 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   5958 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   5959 		} else {
   5960 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   5961 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   5962 			/* ITR / 4 */
   5963 			CSR_WRITE(sc, WMREG_RDTR, (sc->sc_itr / 4) | RDTR_FPD);
   5964 			/* MUST be same */
   5965 			CSR_WRITE(sc, WMREG_RADV, sc->sc_itr / 4);
   5966 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   5967 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   5968 		}
   5969 	}
   5970 }
   5971 
   5972 static int
   5973 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5974 {
   5975 	struct wm_rxsoft *rxs;
   5976 	int error, i;
   5977 
   5978 	KASSERT(mutex_owned(rxq->rxq_lock));
   5979 
   5980 	for (i = 0; i < WM_NRXDESC; i++) {
   5981 		rxs = &rxq->rxq_soft[i];
   5982 		if (rxs->rxs_mbuf == NULL) {
   5983 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   5984 				log(LOG_ERR, "%s: unable to allocate or map "
   5985 				    "rx buffer %d, error = %d\n",
   5986 				    device_xname(sc->sc_dev), i, error);
   5987 				/*
   5988 				 * XXX Should attempt to run with fewer receive
   5989 				 * XXX buffers instead of just failing.
   5990 				 */
   5991 				wm_rxdrain(rxq);
   5992 				return ENOMEM;
   5993 			}
   5994 		} else {
   5995 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   5996 				wm_init_rxdesc(rxq, i);
   5997 			/*
   5998 			 * For 82575 and newer device, the RX descriptors
   5999 			 * must be initialized after the setting of RCTL.EN in
   6000 			 * wm_set_filter()
   6001 			 */
   6002 		}
   6003 	}
   6004 	rxq->rxq_ptr = 0;
   6005 	rxq->rxq_discard = 0;
   6006 	WM_RXCHAIN_RESET(rxq);
   6007 
   6008 	return 0;
   6009 }
   6010 
   6011 static int
   6012 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
   6013     struct wm_rxqueue *rxq)
   6014 {
   6015 
   6016 	KASSERT(mutex_owned(rxq->rxq_lock));
   6017 
   6018 	/*
   6019 	 * Set up some register offsets that are different between
   6020 	 * the i82542 and the i82543 and later chips.
   6021 	 */
   6022 	if (sc->sc_type < WM_T_82543)
   6023 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   6024 	else
   6025 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
   6026 
   6027 	wm_init_rx_regs(sc, wmq, rxq);
   6028 	return wm_init_rx_buffer(sc, rxq);
   6029 }
   6030 
   6031 /*
   6032  * wm_init_quques:
   6033  *	Initialize {tx,rx}descs and {tx,rx} buffers
   6034  */
   6035 static int
   6036 wm_init_txrx_queues(struct wm_softc *sc)
   6037 {
   6038 	int i, error = 0;
   6039 
   6040 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   6041 		device_xname(sc->sc_dev), __func__));
   6042 
   6043 	for (i = 0; i < sc->sc_nqueues; i++) {
   6044 		struct wm_queue *wmq = &sc->sc_queue[i];
   6045 		struct wm_txqueue *txq = &wmq->wmq_txq;
   6046 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   6047 
   6048 		mutex_enter(txq->txq_lock);
   6049 		wm_init_tx_queue(sc, wmq, txq);
   6050 		mutex_exit(txq->txq_lock);
   6051 
   6052 		mutex_enter(rxq->rxq_lock);
   6053 		error = wm_init_rx_queue(sc, wmq, rxq);
   6054 		mutex_exit(rxq->rxq_lock);
   6055 		if (error)
   6056 			break;
   6057 	}
   6058 
   6059 	return error;
   6060 }
   6061 
   6062 /*
   6063  * wm_tx_offload:
   6064  *
   6065  *	Set up TCP/IP checksumming parameters for the
   6066  *	specified packet.
   6067  */
   6068 static int
   6069 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
   6070     uint8_t *fieldsp)
   6071 {
   6072 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6073 	struct mbuf *m0 = txs->txs_mbuf;
   6074 	struct livengood_tcpip_ctxdesc *t;
   6075 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   6076 	uint32_t ipcse;
   6077 	struct ether_header *eh;
   6078 	int offset, iphl;
   6079 	uint8_t fields;
   6080 
   6081 	/*
   6082 	 * XXX It would be nice if the mbuf pkthdr had offset
   6083 	 * fields for the protocol headers.
   6084 	 */
   6085 
   6086 	eh = mtod(m0, struct ether_header *);
   6087 	switch (htons(eh->ether_type)) {
   6088 	case ETHERTYPE_IP:
   6089 	case ETHERTYPE_IPV6:
   6090 		offset = ETHER_HDR_LEN;
   6091 		break;
   6092 
   6093 	case ETHERTYPE_VLAN:
   6094 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6095 		break;
   6096 
   6097 	default:
   6098 		/*
   6099 		 * Don't support this protocol or encapsulation.
   6100 		 */
   6101 		*fieldsp = 0;
   6102 		*cmdp = 0;
   6103 		return 0;
   6104 	}
   6105 
   6106 	if ((m0->m_pkthdr.csum_flags &
   6107 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4)) != 0) {
   6108 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6109 	} else {
   6110 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6111 	}
   6112 	ipcse = offset + iphl - 1;
   6113 
   6114 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   6115 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   6116 	seg = 0;
   6117 	fields = 0;
   6118 
   6119 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6120 		int hlen = offset + iphl;
   6121 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6122 
   6123 		if (__predict_false(m0->m_len <
   6124 				    (hlen + sizeof(struct tcphdr)))) {
   6125 			/*
   6126 			 * TCP/IP headers are not in the first mbuf; we need
   6127 			 * to do this the slow and painful way.  Let's just
   6128 			 * hope this doesn't happen very often.
   6129 			 */
   6130 			struct tcphdr th;
   6131 
   6132 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6133 
   6134 			m_copydata(m0, hlen, sizeof(th), &th);
   6135 			if (v4) {
   6136 				struct ip ip;
   6137 
   6138 				m_copydata(m0, offset, sizeof(ip), &ip);
   6139 				ip.ip_len = 0;
   6140 				m_copyback(m0,
   6141 				    offset + offsetof(struct ip, ip_len),
   6142 				    sizeof(ip.ip_len), &ip.ip_len);
   6143 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6144 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6145 			} else {
   6146 				struct ip6_hdr ip6;
   6147 
   6148 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6149 				ip6.ip6_plen = 0;
   6150 				m_copyback(m0,
   6151 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6152 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6153 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6154 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6155 			}
   6156 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6157 			    sizeof(th.th_sum), &th.th_sum);
   6158 
   6159 			hlen += th.th_off << 2;
   6160 		} else {
   6161 			/*
   6162 			 * TCP/IP headers are in the first mbuf; we can do
   6163 			 * this the easy way.
   6164 			 */
   6165 			struct tcphdr *th;
   6166 
   6167 			if (v4) {
   6168 				struct ip *ip =
   6169 				    (void *)(mtod(m0, char *) + offset);
   6170 				th = (void *)(mtod(m0, char *) + hlen);
   6171 
   6172 				ip->ip_len = 0;
   6173 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6174 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6175 			} else {
   6176 				struct ip6_hdr *ip6 =
   6177 				    (void *)(mtod(m0, char *) + offset);
   6178 				th = (void *)(mtod(m0, char *) + hlen);
   6179 
   6180 				ip6->ip6_plen = 0;
   6181 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6182 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6183 			}
   6184 			hlen += th->th_off << 2;
   6185 		}
   6186 
   6187 		if (v4) {
   6188 			WM_Q_EVCNT_INCR(txq, txtso);
   6189 			cmdlen |= WTX_TCPIP_CMD_IP;
   6190 		} else {
   6191 			WM_Q_EVCNT_INCR(txq, txtso6);
   6192 			ipcse = 0;
   6193 		}
   6194 		cmd |= WTX_TCPIP_CMD_TSE;
   6195 		cmdlen |= WTX_TCPIP_CMD_TSE |
   6196 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   6197 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   6198 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   6199 	}
   6200 
   6201 	/*
   6202 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   6203 	 * offload feature, if we load the context descriptor, we
   6204 	 * MUST provide valid values for IPCSS and TUCSS fields.
   6205 	 */
   6206 
   6207 	ipcs = WTX_TCPIP_IPCSS(offset) |
   6208 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   6209 	    WTX_TCPIP_IPCSE(ipcse);
   6210 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
   6211 		WM_Q_EVCNT_INCR(txq, txipsum);
   6212 		fields |= WTX_IXSM;
   6213 	}
   6214 
   6215 	offset += iphl;
   6216 
   6217 	if (m0->m_pkthdr.csum_flags &
   6218 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
   6219 		WM_Q_EVCNT_INCR(txq, txtusum);
   6220 		fields |= WTX_TXSM;
   6221 		tucs = WTX_TCPIP_TUCSS(offset) |
   6222 		    WTX_TCPIP_TUCSO(offset +
   6223 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   6224 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6225 	} else if ((m0->m_pkthdr.csum_flags &
   6226 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
   6227 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6228 		fields |= WTX_TXSM;
   6229 		tucs = WTX_TCPIP_TUCSS(offset) |
   6230 		    WTX_TCPIP_TUCSO(offset +
   6231 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   6232 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6233 	} else {
   6234 		/* Just initialize it to a valid TCP context. */
   6235 		tucs = WTX_TCPIP_TUCSS(offset) |
   6236 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   6237 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   6238 	}
   6239 
   6240 	/* Fill in the context descriptor. */
   6241 	t = (struct livengood_tcpip_ctxdesc *)
   6242 	    &txq->txq_descs[txq->txq_next];
   6243 	t->tcpip_ipcs = htole32(ipcs);
   6244 	t->tcpip_tucs = htole32(tucs);
   6245 	t->tcpip_cmdlen = htole32(cmdlen);
   6246 	t->tcpip_seg = htole32(seg);
   6247 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6248 
   6249 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6250 	txs->txs_ndesc++;
   6251 
   6252 	*cmdp = cmd;
   6253 	*fieldsp = fields;
   6254 
   6255 	return 0;
   6256 }
   6257 
   6258 /*
   6259  * wm_start:		[ifnet interface function]
   6260  *
   6261  *	Start packet transmission on the interface.
   6262  */
   6263 static void
   6264 wm_start(struct ifnet *ifp)
   6265 {
   6266 	struct wm_softc *sc = ifp->if_softc;
   6267 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6268 
   6269 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6270 
   6271 	mutex_enter(txq->txq_lock);
   6272 	if (!txq->txq_stopping)
   6273 		wm_start_locked(ifp);
   6274 	mutex_exit(txq->txq_lock);
   6275 }
   6276 
   6277 static void
   6278 wm_start_locked(struct ifnet *ifp)
   6279 {
   6280 	struct wm_softc *sc = ifp->if_softc;
   6281 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6282 	struct mbuf *m0;
   6283 	struct m_tag *mtag;
   6284 	struct wm_txsoft *txs;
   6285 	bus_dmamap_t dmamap;
   6286 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   6287 	bus_addr_t curaddr;
   6288 	bus_size_t seglen, curlen;
   6289 	uint32_t cksumcmd;
   6290 	uint8_t cksumfields;
   6291 
   6292 	KASSERT(mutex_owned(txq->txq_lock));
   6293 
   6294 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
   6295 		return;
   6296 
   6297 	/* Remember the previous number of free descriptors. */
   6298 	ofree = txq->txq_free;
   6299 
   6300 	/*
   6301 	 * Loop through the send queue, setting up transmit descriptors
   6302 	 * until we drain the queue, or use up all available transmit
   6303 	 * descriptors.
   6304 	 */
   6305 	for (;;) {
   6306 		m0 = NULL;
   6307 
   6308 		/* Get a work queue entry. */
   6309 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6310 			wm_txeof(sc, txq);
   6311 			if (txq->txq_sfree == 0) {
   6312 				DPRINTF(WM_DEBUG_TX,
   6313 				    ("%s: TX: no free job descriptors\n",
   6314 					device_xname(sc->sc_dev)));
   6315 				WM_Q_EVCNT_INCR(txq, txsstall);
   6316 				break;
   6317 			}
   6318 		}
   6319 
   6320 		/* Grab a packet off the queue. */
   6321 		IFQ_DEQUEUE(&ifp->if_snd, m0);
   6322 		if (m0 == NULL)
   6323 			break;
   6324 
   6325 		DPRINTF(WM_DEBUG_TX,
   6326 		    ("%s: TX: have packet to transmit: %p\n",
   6327 		    device_xname(sc->sc_dev), m0));
   6328 
   6329 		txs = &txq->txq_soft[txq->txq_snext];
   6330 		dmamap = txs->txs_dmamap;
   6331 
   6332 		use_tso = (m0->m_pkthdr.csum_flags &
   6333 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   6334 
   6335 		/*
   6336 		 * So says the Linux driver:
   6337 		 * The controller does a simple calculation to make sure
   6338 		 * there is enough room in the FIFO before initiating the
   6339 		 * DMA for each buffer.  The calc is:
   6340 		 *	4 = ceil(buffer len / MSS)
   6341 		 * To make sure we don't overrun the FIFO, adjust the max
   6342 		 * buffer len if the MSS drops.
   6343 		 */
   6344 		dmamap->dm_maxsegsz =
   6345 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   6346 		    ? m0->m_pkthdr.segsz << 2
   6347 		    : WTX_MAX_LEN;
   6348 
   6349 		/*
   6350 		 * Load the DMA map.  If this fails, the packet either
   6351 		 * didn't fit in the allotted number of segments, or we
   6352 		 * were short on resources.  For the too-many-segments
   6353 		 * case, we simply report an error and drop the packet,
   6354 		 * since we can't sanely copy a jumbo packet to a single
   6355 		 * buffer.
   6356 		 */
   6357 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6358 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   6359 		if (error) {
   6360 			if (error == EFBIG) {
   6361 				WM_Q_EVCNT_INCR(txq, txdrop);
   6362 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6363 				    "DMA segments, dropping...\n",
   6364 				    device_xname(sc->sc_dev));
   6365 				wm_dump_mbuf_chain(sc, m0);
   6366 				m_freem(m0);
   6367 				continue;
   6368 			}
   6369 			/*  Short on resources, just stop for now. */
   6370 			DPRINTF(WM_DEBUG_TX,
   6371 			    ("%s: TX: dmamap load failed: %d\n",
   6372 			    device_xname(sc->sc_dev), error));
   6373 			break;
   6374 		}
   6375 
   6376 		segs_needed = dmamap->dm_nsegs;
   6377 		if (use_tso) {
   6378 			/* For sentinel descriptor; see below. */
   6379 			segs_needed++;
   6380 		}
   6381 
   6382 		/*
   6383 		 * Ensure we have enough descriptors free to describe
   6384 		 * the packet.  Note, we always reserve one descriptor
   6385 		 * at the end of the ring due to the semantics of the
   6386 		 * TDT register, plus one more in the event we need
   6387 		 * to load offload context.
   6388 		 */
   6389 		if (segs_needed > txq->txq_free - 2) {
   6390 			/*
   6391 			 * Not enough free descriptors to transmit this
   6392 			 * packet.  We haven't committed anything yet,
   6393 			 * so just unload the DMA map, put the packet
   6394 			 * pack on the queue, and punt.  Notify the upper
   6395 			 * layer that there are no more slots left.
   6396 			 */
   6397 			DPRINTF(WM_DEBUG_TX,
   6398 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6399 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6400 			    segs_needed, txq->txq_free - 1));
   6401 			ifp->if_flags |= IFF_OACTIVE;
   6402 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6403 			WM_Q_EVCNT_INCR(txq, txdstall);
   6404 			break;
   6405 		}
   6406 
   6407 		/*
   6408 		 * Check for 82547 Tx FIFO bug.  We need to do this
   6409 		 * once we know we can transmit the packet, since we
   6410 		 * do some internal FIFO space accounting here.
   6411 		 */
   6412 		if (sc->sc_type == WM_T_82547 &&
   6413 		    wm_82547_txfifo_bugchk(sc, m0)) {
   6414 			DPRINTF(WM_DEBUG_TX,
   6415 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   6416 			    device_xname(sc->sc_dev)));
   6417 			ifp->if_flags |= IFF_OACTIVE;
   6418 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6419 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
   6420 			break;
   6421 		}
   6422 
   6423 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6424 
   6425 		DPRINTF(WM_DEBUG_TX,
   6426 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6427 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6428 
   6429 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   6430 
   6431 		/*
   6432 		 * Store a pointer to the packet so that we can free it
   6433 		 * later.
   6434 		 *
   6435 		 * Initially, we consider the number of descriptors the
   6436 		 * packet uses the number of DMA segments.  This may be
   6437 		 * incremented by 1 if we do checksum offload (a descriptor
   6438 		 * is used to set the checksum context).
   6439 		 */
   6440 		txs->txs_mbuf = m0;
   6441 		txs->txs_firstdesc = txq->txq_next;
   6442 		txs->txs_ndesc = segs_needed;
   6443 
   6444 		/* Set up offload parameters for this packet. */
   6445 		if (m0->m_pkthdr.csum_flags &
   6446 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   6447 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   6448 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   6449 			if (wm_tx_offload(sc, txs, &cksumcmd,
   6450 					  &cksumfields) != 0) {
   6451 				/* Error message already displayed. */
   6452 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   6453 				continue;
   6454 			}
   6455 		} else {
   6456 			cksumcmd = 0;
   6457 			cksumfields = 0;
   6458 		}
   6459 
   6460 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   6461 
   6462 		/* Sync the DMA map. */
   6463 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   6464 		    BUS_DMASYNC_PREWRITE);
   6465 
   6466 		/* Initialize the transmit descriptor. */
   6467 		for (nexttx = txq->txq_next, seg = 0;
   6468 		     seg < dmamap->dm_nsegs; seg++) {
   6469 			for (seglen = dmamap->dm_segs[seg].ds_len,
   6470 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   6471 			     seglen != 0;
   6472 			     curaddr += curlen, seglen -= curlen,
   6473 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   6474 				curlen = seglen;
   6475 
   6476 				/*
   6477 				 * So says the Linux driver:
   6478 				 * Work around for premature descriptor
   6479 				 * write-backs in TSO mode.  Append a
   6480 				 * 4-byte sentinel descriptor.
   6481 				 */
   6482 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
   6483 				    curlen > 8)
   6484 					curlen -= 4;
   6485 
   6486 				wm_set_dma_addr(
   6487 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
   6488 				txq->txq_descs[nexttx].wtx_cmdlen
   6489 				    = htole32(cksumcmd | curlen);
   6490 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
   6491 				    = 0;
   6492 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
   6493 				    = cksumfields;
   6494 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   6495 				lasttx = nexttx;
   6496 
   6497 				DPRINTF(WM_DEBUG_TX,
   6498 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   6499 				     "len %#04zx\n",
   6500 				    device_xname(sc->sc_dev), nexttx,
   6501 				    (uint64_t)curaddr, curlen));
   6502 			}
   6503 		}
   6504 
   6505 		KASSERT(lasttx != -1);
   6506 
   6507 		/*
   6508 		 * Set up the command byte on the last descriptor of
   6509 		 * the packet.  If we're in the interrupt delay window,
   6510 		 * delay the interrupt.
   6511 		 */
   6512 		txq->txq_descs[lasttx].wtx_cmdlen |=
   6513 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   6514 
   6515 		/*
   6516 		 * If VLANs are enabled and the packet has a VLAN tag, set
   6517 		 * up the descriptor to encapsulate the packet for us.
   6518 		 *
   6519 		 * This is only valid on the last descriptor of the packet.
   6520 		 */
   6521 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6522 			txq->txq_descs[lasttx].wtx_cmdlen |=
   6523 			    htole32(WTX_CMD_VLE);
   6524 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   6525 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   6526 		}
   6527 
   6528 		txs->txs_lastdesc = lasttx;
   6529 
   6530 		DPRINTF(WM_DEBUG_TX,
   6531 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   6532 		    device_xname(sc->sc_dev),
   6533 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   6534 
   6535 		/* Sync the descriptors we're using. */
   6536 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   6537 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   6538 
   6539 		/* Give the packet to the chip. */
   6540 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   6541 
   6542 		DPRINTF(WM_DEBUG_TX,
   6543 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   6544 
   6545 		DPRINTF(WM_DEBUG_TX,
   6546 		    ("%s: TX: finished transmitting packet, job %d\n",
   6547 		    device_xname(sc->sc_dev), txq->txq_snext));
   6548 
   6549 		/* Advance the tx pointer. */
   6550 		txq->txq_free -= txs->txs_ndesc;
   6551 		txq->txq_next = nexttx;
   6552 
   6553 		txq->txq_sfree--;
   6554 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   6555 
   6556 		/* Pass the packet to any BPF listeners. */
   6557 		bpf_mtap(ifp, m0);
   6558 	}
   6559 
   6560 	if (m0 != NULL) {
   6561 		ifp->if_flags |= IFF_OACTIVE;
   6562 		WM_Q_EVCNT_INCR(txq, txdrop);
   6563 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   6564 			__func__));
   6565 		m_freem(m0);
   6566 	}
   6567 
   6568 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   6569 		/* No more slots; notify upper layer. */
   6570 		ifp->if_flags |= IFF_OACTIVE;
   6571 	}
   6572 
   6573 	if (txq->txq_free != ofree) {
   6574 		/* Set a watchdog timer in case the chip flakes out. */
   6575 		ifp->if_timer = 5;
   6576 	}
   6577 }
   6578 
   6579 /*
   6580  * wm_nq_tx_offload:
   6581  *
   6582  *	Set up TCP/IP checksumming parameters for the
   6583  *	specified packet, for NEWQUEUE devices
   6584  */
   6585 static int
   6586 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
   6587     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   6588 {
   6589 	struct mbuf *m0 = txs->txs_mbuf;
   6590 	struct m_tag *mtag;
   6591 	uint32_t vl_len, mssidx, cmdc;
   6592 	struct ether_header *eh;
   6593 	int offset, iphl;
   6594 
   6595 	/*
   6596 	 * XXX It would be nice if the mbuf pkthdr had offset
   6597 	 * fields for the protocol headers.
   6598 	 */
   6599 	*cmdlenp = 0;
   6600 	*fieldsp = 0;
   6601 
   6602 	eh = mtod(m0, struct ether_header *);
   6603 	switch (htons(eh->ether_type)) {
   6604 	case ETHERTYPE_IP:
   6605 	case ETHERTYPE_IPV6:
   6606 		offset = ETHER_HDR_LEN;
   6607 		break;
   6608 
   6609 	case ETHERTYPE_VLAN:
   6610 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6611 		break;
   6612 
   6613 	default:
   6614 		/* Don't support this protocol or encapsulation. */
   6615 		*do_csum = false;
   6616 		return 0;
   6617 	}
   6618 	*do_csum = true;
   6619 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   6620 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   6621 
   6622 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   6623 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   6624 
   6625 	if ((m0->m_pkthdr.csum_flags &
   6626 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
   6627 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6628 	} else {
   6629 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6630 	}
   6631 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   6632 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   6633 
   6634 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6635 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   6636 		     << NQTXC_VLLEN_VLAN_SHIFT);
   6637 		*cmdlenp |= NQTX_CMD_VLE;
   6638 	}
   6639 
   6640 	mssidx = 0;
   6641 
   6642 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6643 		int hlen = offset + iphl;
   6644 		int tcp_hlen;
   6645 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6646 
   6647 		if (__predict_false(m0->m_len <
   6648 				    (hlen + sizeof(struct tcphdr)))) {
   6649 			/*
   6650 			 * TCP/IP headers are not in the first mbuf; we need
   6651 			 * to do this the slow and painful way.  Let's just
   6652 			 * hope this doesn't happen very often.
   6653 			 */
   6654 			struct tcphdr th;
   6655 
   6656 			WM_Q_EVCNT_INCR(txq, txtsopain);
   6657 
   6658 			m_copydata(m0, hlen, sizeof(th), &th);
   6659 			if (v4) {
   6660 				struct ip ip;
   6661 
   6662 				m_copydata(m0, offset, sizeof(ip), &ip);
   6663 				ip.ip_len = 0;
   6664 				m_copyback(m0,
   6665 				    offset + offsetof(struct ip, ip_len),
   6666 				    sizeof(ip.ip_len), &ip.ip_len);
   6667 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6668 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6669 			} else {
   6670 				struct ip6_hdr ip6;
   6671 
   6672 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6673 				ip6.ip6_plen = 0;
   6674 				m_copyback(m0,
   6675 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6676 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6677 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6678 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6679 			}
   6680 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6681 			    sizeof(th.th_sum), &th.th_sum);
   6682 
   6683 			tcp_hlen = th.th_off << 2;
   6684 		} else {
   6685 			/*
   6686 			 * TCP/IP headers are in the first mbuf; we can do
   6687 			 * this the easy way.
   6688 			 */
   6689 			struct tcphdr *th;
   6690 
   6691 			if (v4) {
   6692 				struct ip *ip =
   6693 				    (void *)(mtod(m0, char *) + offset);
   6694 				th = (void *)(mtod(m0, char *) + hlen);
   6695 
   6696 				ip->ip_len = 0;
   6697 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6698 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6699 			} else {
   6700 				struct ip6_hdr *ip6 =
   6701 				    (void *)(mtod(m0, char *) + offset);
   6702 				th = (void *)(mtod(m0, char *) + hlen);
   6703 
   6704 				ip6->ip6_plen = 0;
   6705 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6706 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6707 			}
   6708 			tcp_hlen = th->th_off << 2;
   6709 		}
   6710 		hlen += tcp_hlen;
   6711 		*cmdlenp |= NQTX_CMD_TSE;
   6712 
   6713 		if (v4) {
   6714 			WM_Q_EVCNT_INCR(txq, txtso);
   6715 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   6716 		} else {
   6717 			WM_Q_EVCNT_INCR(txq, txtso6);
   6718 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   6719 		}
   6720 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   6721 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6722 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   6723 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   6724 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   6725 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   6726 	} else {
   6727 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   6728 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6729 	}
   6730 
   6731 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   6732 		*fieldsp |= NQTXD_FIELDS_IXSM;
   6733 		cmdc |= NQTXC_CMD_IP4;
   6734 	}
   6735 
   6736 	if (m0->m_pkthdr.csum_flags &
   6737 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6738 		WM_Q_EVCNT_INCR(txq, txtusum);
   6739 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6740 			cmdc |= NQTXC_CMD_TCP;
   6741 		} else {
   6742 			cmdc |= NQTXC_CMD_UDP;
   6743 		}
   6744 		cmdc |= NQTXC_CMD_IP4;
   6745 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6746 	}
   6747 	if (m0->m_pkthdr.csum_flags &
   6748 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6749 		WM_Q_EVCNT_INCR(txq, txtusum6);
   6750 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6751 			cmdc |= NQTXC_CMD_TCP;
   6752 		} else {
   6753 			cmdc |= NQTXC_CMD_UDP;
   6754 		}
   6755 		cmdc |= NQTXC_CMD_IP6;
   6756 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6757 	}
   6758 
   6759 	/* Fill in the context descriptor. */
   6760 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   6761 	    htole32(vl_len);
   6762 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   6763 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   6764 	    htole32(cmdc);
   6765 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   6766 	    htole32(mssidx);
   6767 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6768 	DPRINTF(WM_DEBUG_TX,
   6769 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   6770 	    txq->txq_next, 0, vl_len));
   6771 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   6772 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6773 	txs->txs_ndesc++;
   6774 	return 0;
   6775 }
   6776 
   6777 /*
   6778  * wm_nq_start:		[ifnet interface function]
   6779  *
   6780  *	Start packet transmission on the interface for NEWQUEUE devices
   6781  */
   6782 static void
   6783 wm_nq_start(struct ifnet *ifp)
   6784 {
   6785 	struct wm_softc *sc = ifp->if_softc;
   6786 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6787 
   6788 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
   6789 
   6790 	mutex_enter(txq->txq_lock);
   6791 	if (!txq->txq_stopping)
   6792 		wm_nq_start_locked(ifp);
   6793 	mutex_exit(txq->txq_lock);
   6794 }
   6795 
   6796 static void
   6797 wm_nq_start_locked(struct ifnet *ifp)
   6798 {
   6799 	struct wm_softc *sc = ifp->if_softc;
   6800 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   6801 
   6802 	wm_nq_send_common_locked(ifp, txq, false);
   6803 }
   6804 
   6805 static inline int
   6806 wm_nq_select_txqueue(struct ifnet *ifp, struct mbuf *m)
   6807 {
   6808 	struct wm_softc *sc = ifp->if_softc;
   6809 	u_int cpuid = cpu_index(curcpu());
   6810 
   6811 	/*
   6812 	 * Currently, simple distribute strategy.
   6813 	 * TODO:
   6814 	 * destribute by flowid(RSS has value).
   6815 	 */
   6816 	return (cpuid + sc->sc_affinity_offset) % sc->sc_nqueues;
   6817 }
   6818 
   6819 static int
   6820 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
   6821 {
   6822 	int qid;
   6823 	struct wm_softc *sc = ifp->if_softc;
   6824 	struct wm_txqueue *txq;
   6825 
   6826 	qid = wm_nq_select_txqueue(ifp, m);
   6827 	txq = &sc->sc_queue[qid].wmq_txq;
   6828 
   6829 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
   6830 		m_freem(m);
   6831 		WM_Q_EVCNT_INCR(txq, txdrop);
   6832 		return ENOBUFS;
   6833 	}
   6834 
   6835 	if (mutex_tryenter(txq->txq_lock)) {
   6836 		/* XXXX should be per TX queue */
   6837 		ifp->if_obytes += m->m_pkthdr.len;
   6838 		if (m->m_flags & M_MCAST)
   6839 			ifp->if_omcasts++;
   6840 
   6841 		if (!txq->txq_stopping)
   6842 			wm_nq_transmit_locked(ifp, txq);
   6843 		mutex_exit(txq->txq_lock);
   6844 	}
   6845 
   6846 	return 0;
   6847 }
   6848 
   6849 static void
   6850 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
   6851 {
   6852 
   6853 	wm_nq_send_common_locked(ifp, txq, true);
   6854 }
   6855 
   6856 static void
   6857 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
   6858     bool is_transmit)
   6859 {
   6860 	struct wm_softc *sc = ifp->if_softc;
   6861 	struct mbuf *m0;
   6862 	struct m_tag *mtag;
   6863 	struct wm_txsoft *txs;
   6864 	bus_dmamap_t dmamap;
   6865 	int error, nexttx, lasttx = -1, seg, segs_needed;
   6866 	bool do_csum, sent;
   6867 
   6868 	KASSERT(mutex_owned(txq->txq_lock));
   6869 
   6870 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
   6871 		return;
   6872 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
   6873 		return;
   6874 
   6875 	sent = false;
   6876 
   6877 	/*
   6878 	 * Loop through the send queue, setting up transmit descriptors
   6879 	 * until we drain the queue, or use up all available transmit
   6880 	 * descriptors.
   6881 	 */
   6882 	for (;;) {
   6883 		m0 = NULL;
   6884 
   6885 		/* Get a work queue entry. */
   6886 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6887 			wm_txeof(sc, txq);
   6888 			if (txq->txq_sfree == 0) {
   6889 				DPRINTF(WM_DEBUG_TX,
   6890 				    ("%s: TX: no free job descriptors\n",
   6891 					device_xname(sc->sc_dev)));
   6892 				WM_Q_EVCNT_INCR(txq, txsstall);
   6893 				break;
   6894 			}
   6895 		}
   6896 
   6897 		/* Grab a packet off the queue. */
   6898 		if (is_transmit)
   6899 			m0 = pcq_get(txq->txq_interq);
   6900 		else
   6901 			IFQ_DEQUEUE(&ifp->if_snd, m0);
   6902 		if (m0 == NULL)
   6903 			break;
   6904 
   6905 		DPRINTF(WM_DEBUG_TX,
   6906 		    ("%s: TX: have packet to transmit: %p\n",
   6907 		    device_xname(sc->sc_dev), m0));
   6908 
   6909 		txs = &txq->txq_soft[txq->txq_snext];
   6910 		dmamap = txs->txs_dmamap;
   6911 
   6912 		/*
   6913 		 * Load the DMA map.  If this fails, the packet either
   6914 		 * didn't fit in the allotted number of segments, or we
   6915 		 * were short on resources.  For the too-many-segments
   6916 		 * case, we simply report an error and drop the packet,
   6917 		 * since we can't sanely copy a jumbo packet to a single
   6918 		 * buffer.
   6919 		 */
   6920 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6921 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
   6922 		if (error) {
   6923 			if (error == EFBIG) {
   6924 				WM_Q_EVCNT_INCR(txq, txdrop);
   6925 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6926 				    "DMA segments, dropping...\n",
   6927 				    device_xname(sc->sc_dev));
   6928 				wm_dump_mbuf_chain(sc, m0);
   6929 				m_freem(m0);
   6930 				continue;
   6931 			}
   6932 			/* Short on resources, just stop for now. */
   6933 			DPRINTF(WM_DEBUG_TX,
   6934 			    ("%s: TX: dmamap load failed: %d\n",
   6935 			    device_xname(sc->sc_dev), error));
   6936 			break;
   6937 		}
   6938 
   6939 		segs_needed = dmamap->dm_nsegs;
   6940 
   6941 		/*
   6942 		 * Ensure we have enough descriptors free to describe
   6943 		 * the packet.  Note, we always reserve one descriptor
   6944 		 * at the end of the ring due to the semantics of the
   6945 		 * TDT register, plus one more in the event we need
   6946 		 * to load offload context.
   6947 		 */
   6948 		if (segs_needed > txq->txq_free - 2) {
   6949 			/*
   6950 			 * Not enough free descriptors to transmit this
   6951 			 * packet.  We haven't committed anything yet,
   6952 			 * so just unload the DMA map, put the packet
   6953 			 * pack on the queue, and punt.  Notify the upper
   6954 			 * layer that there are no more slots left.
   6955 			 */
   6956 			DPRINTF(WM_DEBUG_TX,
   6957 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6958 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6959 			    segs_needed, txq->txq_free - 1));
   6960 			txq->txq_flags |= WM_TXQ_NO_SPACE;
   6961 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6962 			WM_Q_EVCNT_INCR(txq, txdstall);
   6963 			break;
   6964 		}
   6965 
   6966 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6967 
   6968 		DPRINTF(WM_DEBUG_TX,
   6969 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6970 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6971 
   6972 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
   6973 
   6974 		/*
   6975 		 * Store a pointer to the packet so that we can free it
   6976 		 * later.
   6977 		 *
   6978 		 * Initially, we consider the number of descriptors the
   6979 		 * packet uses the number of DMA segments.  This may be
   6980 		 * incremented by 1 if we do checksum offload (a descriptor
   6981 		 * is used to set the checksum context).
   6982 		 */
   6983 		txs->txs_mbuf = m0;
   6984 		txs->txs_firstdesc = txq->txq_next;
   6985 		txs->txs_ndesc = segs_needed;
   6986 
   6987 		/* Set up offload parameters for this packet. */
   6988 		uint32_t cmdlen, fields, dcmdlen;
   6989 		if (m0->m_pkthdr.csum_flags &
   6990 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
   6991 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   6992 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
   6993 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
   6994 			    &do_csum) != 0) {
   6995 				/* Error message already displayed. */
   6996 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   6997 				continue;
   6998 			}
   6999 		} else {
   7000 			do_csum = false;
   7001 			cmdlen = 0;
   7002 			fields = 0;
   7003 		}
   7004 
   7005 		/* Sync the DMA map. */
   7006 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   7007 		    BUS_DMASYNC_PREWRITE);
   7008 
   7009 		/* Initialize the first transmit descriptor. */
   7010 		nexttx = txq->txq_next;
   7011 		if (!do_csum) {
   7012 			/* setup a legacy descriptor */
   7013 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
   7014 			    dmamap->dm_segs[0].ds_addr);
   7015 			txq->txq_descs[nexttx].wtx_cmdlen =
   7016 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   7017 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   7018 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   7019 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   7020 			    NULL) {
   7021 				txq->txq_descs[nexttx].wtx_cmdlen |=
   7022 				    htole32(WTX_CMD_VLE);
   7023 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   7024 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   7025 			} else {
   7026 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   7027 			}
   7028 			dcmdlen = 0;
   7029 		} else {
   7030 			/* setup an advanced data descriptor */
   7031 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7032 			    htole64(dmamap->dm_segs[0].ds_addr);
   7033 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   7034 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7035 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   7036 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   7037 			    htole32(fields);
   7038 			DPRINTF(WM_DEBUG_TX,
   7039 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   7040 			    device_xname(sc->sc_dev), nexttx,
   7041 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   7042 			DPRINTF(WM_DEBUG_TX,
   7043 			    ("\t 0x%08x%08x\n", fields,
   7044 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   7045 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   7046 		}
   7047 
   7048 		lasttx = nexttx;
   7049 		nexttx = WM_NEXTTX(txq, nexttx);
   7050 		/*
   7051 		 * fill in the next descriptors. legacy or adcanced format
   7052 		 * is the same here
   7053 		 */
   7054 		for (seg = 1; seg < dmamap->dm_nsegs;
   7055 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   7056 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   7057 			    htole64(dmamap->dm_segs[seg].ds_addr);
   7058 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   7059 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   7060 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   7061 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   7062 			lasttx = nexttx;
   7063 
   7064 			DPRINTF(WM_DEBUG_TX,
   7065 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   7066 			     "len %#04zx\n",
   7067 			    device_xname(sc->sc_dev), nexttx,
   7068 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   7069 			    dmamap->dm_segs[seg].ds_len));
   7070 		}
   7071 
   7072 		KASSERT(lasttx != -1);
   7073 
   7074 		/*
   7075 		 * Set up the command byte on the last descriptor of
   7076 		 * the packet.  If we're in the interrupt delay window,
   7077 		 * delay the interrupt.
   7078 		 */
   7079 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   7080 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   7081 		txq->txq_descs[lasttx].wtx_cmdlen |=
   7082 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   7083 
   7084 		txs->txs_lastdesc = lasttx;
   7085 
   7086 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
   7087 		    device_xname(sc->sc_dev),
   7088 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   7089 
   7090 		/* Sync the descriptors we're using. */
   7091 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   7092 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
   7093 
   7094 		/* Give the packet to the chip. */
   7095 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   7096 		sent = true;
   7097 
   7098 		DPRINTF(WM_DEBUG_TX,
   7099 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   7100 
   7101 		DPRINTF(WM_DEBUG_TX,
   7102 		    ("%s: TX: finished transmitting packet, job %d\n",
   7103 		    device_xname(sc->sc_dev), txq->txq_snext));
   7104 
   7105 		/* Advance the tx pointer. */
   7106 		txq->txq_free -= txs->txs_ndesc;
   7107 		txq->txq_next = nexttx;
   7108 
   7109 		txq->txq_sfree--;
   7110 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   7111 
   7112 		/* Pass the packet to any BPF listeners. */
   7113 		bpf_mtap(ifp, m0);
   7114 	}
   7115 
   7116 	if (m0 != NULL) {
   7117 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7118 		WM_Q_EVCNT_INCR(txq, txdrop);
   7119 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
   7120 			__func__));
   7121 		m_freem(m0);
   7122 	}
   7123 
   7124 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   7125 		/* No more slots; notify upper layer. */
   7126 		txq->txq_flags |= WM_TXQ_NO_SPACE;
   7127 	}
   7128 
   7129 	if (sent) {
   7130 		/* Set a watchdog timer in case the chip flakes out. */
   7131 		ifp->if_timer = 5;
   7132 	}
   7133 }
   7134 
   7135 /* Interrupt */
   7136 
   7137 /*
   7138  * wm_txeof:
   7139  *
   7140  *	Helper; handle transmit interrupts.
   7141  */
   7142 static int
   7143 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
   7144 {
   7145 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7146 	struct wm_txsoft *txs;
   7147 	bool processed = false;
   7148 	int count = 0;
   7149 	int i;
   7150 	uint8_t status;
   7151 
   7152 	KASSERT(mutex_owned(txq->txq_lock));
   7153 
   7154 	if (txq->txq_stopping)
   7155 		return 0;
   7156 
   7157 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   7158 		txq->txq_flags &= ~WM_TXQ_NO_SPACE;
   7159 	else
   7160 		ifp->if_flags &= ~IFF_OACTIVE;
   7161 
   7162 	/*
   7163 	 * Go through the Tx list and free mbufs for those
   7164 	 * frames which have been transmitted.
   7165 	 */
   7166 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   7167 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   7168 		txs = &txq->txq_soft[i];
   7169 
   7170 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
   7171 			device_xname(sc->sc_dev), i));
   7172 
   7173 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   7174 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   7175 
   7176 		status =
   7177 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   7178 		if ((status & WTX_ST_DD) == 0) {
   7179 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   7180 			    BUS_DMASYNC_PREREAD);
   7181 			break;
   7182 		}
   7183 
   7184 		processed = true;
   7185 		count++;
   7186 		DPRINTF(WM_DEBUG_TX,
   7187 		    ("%s: TX: job %d done: descs %d..%d\n",
   7188 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   7189 		    txs->txs_lastdesc));
   7190 
   7191 		/*
   7192 		 * XXX We should probably be using the statistics
   7193 		 * XXX registers, but I don't know if they exist
   7194 		 * XXX on chips before the i82544.
   7195 		 */
   7196 
   7197 #ifdef WM_EVENT_COUNTERS
   7198 		if (status & WTX_ST_TU)
   7199 			WM_Q_EVCNT_INCR(txq, tu);
   7200 #endif /* WM_EVENT_COUNTERS */
   7201 
   7202 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
   7203 			ifp->if_oerrors++;
   7204 			if (status & WTX_ST_LC)
   7205 				log(LOG_WARNING, "%s: late collision\n",
   7206 				    device_xname(sc->sc_dev));
   7207 			else if (status & WTX_ST_EC) {
   7208 				ifp->if_collisions += 16;
   7209 				log(LOG_WARNING, "%s: excessive collisions\n",
   7210 				    device_xname(sc->sc_dev));
   7211 			}
   7212 		} else
   7213 			ifp->if_opackets++;
   7214 
   7215 		txq->txq_free += txs->txs_ndesc;
   7216 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   7217 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   7218 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   7219 		m_freem(txs->txs_mbuf);
   7220 		txs->txs_mbuf = NULL;
   7221 	}
   7222 
   7223 	/* Update the dirty transmit buffer pointer. */
   7224 	txq->txq_sdirty = i;
   7225 	DPRINTF(WM_DEBUG_TX,
   7226 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   7227 
   7228 	if (count != 0)
   7229 		rnd_add_uint32(&sc->rnd_source, count);
   7230 
   7231 	/*
   7232 	 * If there are no more pending transmissions, cancel the watchdog
   7233 	 * timer.
   7234 	 */
   7235 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   7236 		ifp->if_timer = 0;
   7237 
   7238 	return processed;
   7239 }
   7240 
   7241 /*
   7242  * wm_rxeof:
   7243  *
   7244  *	Helper; handle receive interrupts.
   7245  */
   7246 static void
   7247 wm_rxeof(struct wm_rxqueue *rxq)
   7248 {
   7249 	struct wm_softc *sc = rxq->rxq_sc;
   7250 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7251 	struct wm_rxsoft *rxs;
   7252 	struct mbuf *m;
   7253 	int i, len;
   7254 	int count = 0;
   7255 	uint8_t status, errors;
   7256 	uint16_t vlantag;
   7257 
   7258 	KASSERT(mutex_owned(rxq->rxq_lock));
   7259 
   7260 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   7261 		rxs = &rxq->rxq_soft[i];
   7262 
   7263 		DPRINTF(WM_DEBUG_RX,
   7264 		    ("%s: RX: checking descriptor %d\n",
   7265 		    device_xname(sc->sc_dev), i));
   7266 
   7267 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   7268 
   7269 		status = rxq->rxq_descs[i].wrx_status;
   7270 		errors = rxq->rxq_descs[i].wrx_errors;
   7271 		len = le16toh(rxq->rxq_descs[i].wrx_len);
   7272 		vlantag = rxq->rxq_descs[i].wrx_special;
   7273 
   7274 		if ((status & WRX_ST_DD) == 0) {
   7275 			/* We have processed all of the receive descriptors. */
   7276 			wm_cdrxsync(rxq, i, BUS_DMASYNC_PREREAD);
   7277 			break;
   7278 		}
   7279 
   7280 		count++;
   7281 		if (__predict_false(rxq->rxq_discard)) {
   7282 			DPRINTF(WM_DEBUG_RX,
   7283 			    ("%s: RX: discarding contents of descriptor %d\n",
   7284 			    device_xname(sc->sc_dev), i));
   7285 			wm_init_rxdesc(rxq, i);
   7286 			if (status & WRX_ST_EOP) {
   7287 				/* Reset our state. */
   7288 				DPRINTF(WM_DEBUG_RX,
   7289 				    ("%s: RX: resetting rxdiscard -> 0\n",
   7290 				    device_xname(sc->sc_dev)));
   7291 				rxq->rxq_discard = 0;
   7292 			}
   7293 			continue;
   7294 		}
   7295 
   7296 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   7297 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   7298 
   7299 		m = rxs->rxs_mbuf;
   7300 
   7301 		/*
   7302 		 * Add a new receive buffer to the ring, unless of
   7303 		 * course the length is zero. Treat the latter as a
   7304 		 * failed mapping.
   7305 		 */
   7306 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   7307 			/*
   7308 			 * Failed, throw away what we've done so
   7309 			 * far, and discard the rest of the packet.
   7310 			 */
   7311 			ifp->if_ierrors++;
   7312 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   7313 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   7314 			wm_init_rxdesc(rxq, i);
   7315 			if ((status & WRX_ST_EOP) == 0)
   7316 				rxq->rxq_discard = 1;
   7317 			if (rxq->rxq_head != NULL)
   7318 				m_freem(rxq->rxq_head);
   7319 			WM_RXCHAIN_RESET(rxq);
   7320 			DPRINTF(WM_DEBUG_RX,
   7321 			    ("%s: RX: Rx buffer allocation failed, "
   7322 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   7323 			    rxq->rxq_discard ? " (discard)" : ""));
   7324 			continue;
   7325 		}
   7326 
   7327 		m->m_len = len;
   7328 		rxq->rxq_len += len;
   7329 		DPRINTF(WM_DEBUG_RX,
   7330 		    ("%s: RX: buffer at %p len %d\n",
   7331 		    device_xname(sc->sc_dev), m->m_data, len));
   7332 
   7333 		/* If this is not the end of the packet, keep looking. */
   7334 		if ((status & WRX_ST_EOP) == 0) {
   7335 			WM_RXCHAIN_LINK(rxq, m);
   7336 			DPRINTF(WM_DEBUG_RX,
   7337 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   7338 			    device_xname(sc->sc_dev), rxq->rxq_len));
   7339 			continue;
   7340 		}
   7341 
   7342 		/*
   7343 		 * Okay, we have the entire packet now.  The chip is
   7344 		 * configured to include the FCS except I350 and I21[01]
   7345 		 * (not all chips can be configured to strip it),
   7346 		 * so we need to trim it.
   7347 		 * May need to adjust length of previous mbuf in the
   7348 		 * chain if the current mbuf is too short.
   7349 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   7350 		 * is always set in I350, so we don't trim it.
   7351 		 */
   7352 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   7353 		    && (sc->sc_type != WM_T_I210)
   7354 		    && (sc->sc_type != WM_T_I211)) {
   7355 			if (m->m_len < ETHER_CRC_LEN) {
   7356 				rxq->rxq_tail->m_len
   7357 				    -= (ETHER_CRC_LEN - m->m_len);
   7358 				m->m_len = 0;
   7359 			} else
   7360 				m->m_len -= ETHER_CRC_LEN;
   7361 			len = rxq->rxq_len - ETHER_CRC_LEN;
   7362 		} else
   7363 			len = rxq->rxq_len;
   7364 
   7365 		WM_RXCHAIN_LINK(rxq, m);
   7366 
   7367 		*rxq->rxq_tailp = NULL;
   7368 		m = rxq->rxq_head;
   7369 
   7370 		WM_RXCHAIN_RESET(rxq);
   7371 
   7372 		DPRINTF(WM_DEBUG_RX,
   7373 		    ("%s: RX: have entire packet, len -> %d\n",
   7374 		    device_xname(sc->sc_dev), len));
   7375 
   7376 		/* If an error occurred, update stats and drop the packet. */
   7377 		if (errors &
   7378 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
   7379 			if (errors & WRX_ER_SE)
   7380 				log(LOG_WARNING, "%s: symbol error\n",
   7381 				    device_xname(sc->sc_dev));
   7382 			else if (errors & WRX_ER_SEQ)
   7383 				log(LOG_WARNING, "%s: receive sequence error\n",
   7384 				    device_xname(sc->sc_dev));
   7385 			else if (errors & WRX_ER_CE)
   7386 				log(LOG_WARNING, "%s: CRC error\n",
   7387 				    device_xname(sc->sc_dev));
   7388 			m_freem(m);
   7389 			continue;
   7390 		}
   7391 
   7392 		/* No errors.  Receive the packet. */
   7393 		m_set_rcvif(m, ifp);
   7394 		m->m_pkthdr.len = len;
   7395 
   7396 		/*
   7397 		 * If VLANs are enabled, VLAN packets have been unwrapped
   7398 		 * for us.  Associate the tag with the packet.
   7399 		 */
   7400 		/* XXXX should check for i350 and i354 */
   7401 		if ((status & WRX_ST_VP) != 0) {
   7402 			VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), continue);
   7403 		}
   7404 
   7405 		/* Set up checksum info for this packet. */
   7406 		if ((status & WRX_ST_IXSM) == 0) {
   7407 			if (status & WRX_ST_IPCS) {
   7408 				WM_Q_EVCNT_INCR(rxq, rxipsum);
   7409 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   7410 				if (errors & WRX_ER_IPE)
   7411 					m->m_pkthdr.csum_flags |=
   7412 					    M_CSUM_IPv4_BAD;
   7413 			}
   7414 			if (status & WRX_ST_TCPCS) {
   7415 				/*
   7416 				 * Note: we don't know if this was TCP or UDP,
   7417 				 * so we just set both bits, and expect the
   7418 				 * upper layers to deal.
   7419 				 */
   7420 				WM_Q_EVCNT_INCR(rxq, rxtusum);
   7421 				m->m_pkthdr.csum_flags |=
   7422 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7423 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   7424 				if (errors & WRX_ER_TCPE)
   7425 					m->m_pkthdr.csum_flags |=
   7426 					    M_CSUM_TCP_UDP_BAD;
   7427 			}
   7428 		}
   7429 
   7430 		ifp->if_ipackets++;
   7431 
   7432 		mutex_exit(rxq->rxq_lock);
   7433 
   7434 		/* Pass this up to any BPF listeners. */
   7435 		bpf_mtap(ifp, m);
   7436 
   7437 		/* Pass it on. */
   7438 		if_percpuq_enqueue(sc->sc_ipq, m);
   7439 
   7440 		mutex_enter(rxq->rxq_lock);
   7441 
   7442 		if (rxq->rxq_stopping)
   7443 			break;
   7444 	}
   7445 
   7446 	/* Update the receive pointer. */
   7447 	rxq->rxq_ptr = i;
   7448 	if (count != 0)
   7449 		rnd_add_uint32(&sc->rnd_source, count);
   7450 
   7451 	DPRINTF(WM_DEBUG_RX,
   7452 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   7453 }
   7454 
   7455 /*
   7456  * wm_linkintr_gmii:
   7457  *
   7458  *	Helper; handle link interrupts for GMII.
   7459  */
   7460 static void
   7461 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   7462 {
   7463 
   7464 	KASSERT(WM_CORE_LOCKED(sc));
   7465 
   7466 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7467 		__func__));
   7468 
   7469 	if (icr & ICR_LSC) {
   7470 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   7471 
   7472 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   7473 			wm_gig_downshift_workaround_ich8lan(sc);
   7474 
   7475 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   7476 			device_xname(sc->sc_dev)));
   7477 		mii_pollstat(&sc->sc_mii);
   7478 		if (sc->sc_type == WM_T_82543) {
   7479 			int miistatus, active;
   7480 
   7481 			/*
   7482 			 * With 82543, we need to force speed and
   7483 			 * duplex on the MAC equal to what the PHY
   7484 			 * speed and duplex configuration is.
   7485 			 */
   7486 			miistatus = sc->sc_mii.mii_media_status;
   7487 
   7488 			if (miistatus & IFM_ACTIVE) {
   7489 				active = sc->sc_mii.mii_media_active;
   7490 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   7491 				switch (IFM_SUBTYPE(active)) {
   7492 				case IFM_10_T:
   7493 					sc->sc_ctrl |= CTRL_SPEED_10;
   7494 					break;
   7495 				case IFM_100_TX:
   7496 					sc->sc_ctrl |= CTRL_SPEED_100;
   7497 					break;
   7498 				case IFM_1000_T:
   7499 					sc->sc_ctrl |= CTRL_SPEED_1000;
   7500 					break;
   7501 				default:
   7502 					/*
   7503 					 * fiber?
   7504 					 * Shoud not enter here.
   7505 					 */
   7506 					printf("unknown media (%x)\n", active);
   7507 					break;
   7508 				}
   7509 				if (active & IFM_FDX)
   7510 					sc->sc_ctrl |= CTRL_FD;
   7511 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7512 			}
   7513 		} else if ((sc->sc_type == WM_T_ICH8)
   7514 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   7515 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   7516 		} else if (sc->sc_type == WM_T_PCH) {
   7517 			wm_k1_gig_workaround_hv(sc,
   7518 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   7519 		}
   7520 
   7521 		if ((sc->sc_phytype == WMPHY_82578)
   7522 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   7523 			== IFM_1000_T)) {
   7524 
   7525 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   7526 				delay(200*1000); /* XXX too big */
   7527 
   7528 				/* Link stall fix for link up */
   7529 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   7530 				    HV_MUX_DATA_CTRL,
   7531 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   7532 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   7533 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   7534 				    HV_MUX_DATA_CTRL,
   7535 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   7536 			}
   7537 		}
   7538 	} else if (icr & ICR_RXSEQ) {
   7539 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
   7540 			device_xname(sc->sc_dev)));
   7541 	}
   7542 }
   7543 
   7544 /*
   7545  * wm_linkintr_tbi:
   7546  *
   7547  *	Helper; handle link interrupts for TBI mode.
   7548  */
   7549 static void
   7550 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   7551 {
   7552 	uint32_t status;
   7553 
   7554 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7555 		__func__));
   7556 
   7557 	status = CSR_READ(sc, WMREG_STATUS);
   7558 	if (icr & ICR_LSC) {
   7559 		if (status & STATUS_LU) {
   7560 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   7561 			    device_xname(sc->sc_dev),
   7562 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   7563 			/*
   7564 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   7565 			 * so we should update sc->sc_ctrl
   7566 			 */
   7567 
   7568 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   7569 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   7570 			sc->sc_fcrtl &= ~FCRTL_XONE;
   7571 			if (status & STATUS_FD)
   7572 				sc->sc_tctl |=
   7573 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   7574 			else
   7575 				sc->sc_tctl |=
   7576 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   7577 			if (sc->sc_ctrl & CTRL_TFCE)
   7578 				sc->sc_fcrtl |= FCRTL_XONE;
   7579 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   7580 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   7581 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   7582 				      sc->sc_fcrtl);
   7583 			sc->sc_tbi_linkup = 1;
   7584 		} else {
   7585 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   7586 			    device_xname(sc->sc_dev)));
   7587 			sc->sc_tbi_linkup = 0;
   7588 		}
   7589 		/* Update LED */
   7590 		wm_tbi_serdes_set_linkled(sc);
   7591 	} else if (icr & ICR_RXSEQ) {
   7592 		DPRINTF(WM_DEBUG_LINK,
   7593 		    ("%s: LINK: Receive sequence error\n",
   7594 		    device_xname(sc->sc_dev)));
   7595 	}
   7596 }
   7597 
   7598 /*
   7599  * wm_linkintr_serdes:
   7600  *
   7601  *	Helper; handle link interrupts for TBI mode.
   7602  */
   7603 static void
   7604 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   7605 {
   7606 	struct mii_data *mii = &sc->sc_mii;
   7607 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   7608 	uint32_t pcs_adv, pcs_lpab, reg;
   7609 
   7610 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7611 		__func__));
   7612 
   7613 	if (icr & ICR_LSC) {
   7614 		/* Check PCS */
   7615 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   7616 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   7617 			mii->mii_media_status |= IFM_ACTIVE;
   7618 			sc->sc_tbi_linkup = 1;
   7619 		} else {
   7620 			mii->mii_media_status |= IFM_NONE;
   7621 			sc->sc_tbi_linkup = 0;
   7622 			wm_tbi_serdes_set_linkled(sc);
   7623 			return;
   7624 		}
   7625 		mii->mii_media_active |= IFM_1000_SX;
   7626 		if ((reg & PCS_LSTS_FDX) != 0)
   7627 			mii->mii_media_active |= IFM_FDX;
   7628 		else
   7629 			mii->mii_media_active |= IFM_HDX;
   7630 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   7631 			/* Check flow */
   7632 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   7633 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   7634 				DPRINTF(WM_DEBUG_LINK,
   7635 				    ("XXX LINKOK but not ACOMP\n"));
   7636 				return;
   7637 			}
   7638 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   7639 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   7640 			DPRINTF(WM_DEBUG_LINK,
   7641 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   7642 			if ((pcs_adv & TXCW_SYM_PAUSE)
   7643 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   7644 				mii->mii_media_active |= IFM_FLOW
   7645 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   7646 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   7647 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   7648 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   7649 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   7650 				mii->mii_media_active |= IFM_FLOW
   7651 				    | IFM_ETH_TXPAUSE;
   7652 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   7653 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   7654 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   7655 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   7656 				mii->mii_media_active |= IFM_FLOW
   7657 				    | IFM_ETH_RXPAUSE;
   7658 		}
   7659 		/* Update LED */
   7660 		wm_tbi_serdes_set_linkled(sc);
   7661 	} else {
   7662 		DPRINTF(WM_DEBUG_LINK,
   7663 		    ("%s: LINK: Receive sequence error\n",
   7664 		    device_xname(sc->sc_dev)));
   7665 	}
   7666 }
   7667 
   7668 /*
   7669  * wm_linkintr:
   7670  *
   7671  *	Helper; handle link interrupts.
   7672  */
   7673 static void
   7674 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   7675 {
   7676 
   7677 	KASSERT(WM_CORE_LOCKED(sc));
   7678 
   7679 	if (sc->sc_flags & WM_F_HAS_MII)
   7680 		wm_linkintr_gmii(sc, icr);
   7681 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   7682 	    && (sc->sc_type >= WM_T_82575))
   7683 		wm_linkintr_serdes(sc, icr);
   7684 	else
   7685 		wm_linkintr_tbi(sc, icr);
   7686 }
   7687 
   7688 /*
   7689  * wm_intr_legacy:
   7690  *
   7691  *	Interrupt service routine for INTx and MSI.
   7692  */
   7693 static int
   7694 wm_intr_legacy(void *arg)
   7695 {
   7696 	struct wm_softc *sc = arg;
   7697 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
   7698 	struct wm_rxqueue *rxq = &sc->sc_queue[0].wmq_rxq;
   7699 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7700 	uint32_t icr, rndval = 0;
   7701 	int handled = 0;
   7702 
   7703 	DPRINTF(WM_DEBUG_TX,
   7704 	    ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
   7705 	while (1 /* CONSTCOND */) {
   7706 		icr = CSR_READ(sc, WMREG_ICR);
   7707 		if ((icr & sc->sc_icr) == 0)
   7708 			break;
   7709 		if (rndval == 0)
   7710 			rndval = icr;
   7711 
   7712 		mutex_enter(rxq->rxq_lock);
   7713 
   7714 		if (rxq->rxq_stopping) {
   7715 			mutex_exit(rxq->rxq_lock);
   7716 			break;
   7717 		}
   7718 
   7719 		handled = 1;
   7720 
   7721 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   7722 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
   7723 			DPRINTF(WM_DEBUG_RX,
   7724 			    ("%s: RX: got Rx intr 0x%08x\n",
   7725 			    device_xname(sc->sc_dev),
   7726 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
   7727 			WM_Q_EVCNT_INCR(rxq, rxintr);
   7728 		}
   7729 #endif
   7730 		wm_rxeof(rxq);
   7731 
   7732 		mutex_exit(rxq->rxq_lock);
   7733 		mutex_enter(txq->txq_lock);
   7734 
   7735 		if (txq->txq_stopping) {
   7736 			mutex_exit(txq->txq_lock);
   7737 			break;
   7738 		}
   7739 
   7740 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   7741 		if (icr & ICR_TXDW) {
   7742 			DPRINTF(WM_DEBUG_TX,
   7743 			    ("%s: TX: got TXDW interrupt\n",
   7744 			    device_xname(sc->sc_dev)));
   7745 			WM_Q_EVCNT_INCR(txq, txdw);
   7746 		}
   7747 #endif
   7748 		wm_txeof(sc, txq);
   7749 
   7750 		mutex_exit(txq->txq_lock);
   7751 		WM_CORE_LOCK(sc);
   7752 
   7753 		if (sc->sc_core_stopping) {
   7754 			WM_CORE_UNLOCK(sc);
   7755 			break;
   7756 		}
   7757 
   7758 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
   7759 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   7760 			wm_linkintr(sc, icr);
   7761 		}
   7762 
   7763 		WM_CORE_UNLOCK(sc);
   7764 
   7765 		if (icr & ICR_RXO) {
   7766 #if defined(WM_DEBUG)
   7767 			log(LOG_WARNING, "%s: Receive overrun\n",
   7768 			    device_xname(sc->sc_dev));
   7769 #endif /* defined(WM_DEBUG) */
   7770 		}
   7771 	}
   7772 
   7773 	rnd_add_uint32(&sc->rnd_source, rndval);
   7774 
   7775 	if (handled) {
   7776 		/* Try to get more packets going. */
   7777 		ifp->if_start(ifp);
   7778 	}
   7779 
   7780 	return handled;
   7781 }
   7782 
   7783 static int
   7784 wm_txrxintr_msix(void *arg)
   7785 {
   7786 	struct wm_queue *wmq = arg;
   7787 	struct wm_txqueue *txq = &wmq->wmq_txq;
   7788 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
   7789 	struct wm_softc *sc = txq->txq_sc;
   7790 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7791 
   7792 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
   7793 
   7794 	DPRINTF(WM_DEBUG_TX,
   7795 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   7796 
   7797 	if (sc->sc_type == WM_T_82574)
   7798 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   7799 	else if (sc->sc_type == WM_T_82575)
   7800 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   7801 	else
   7802 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
   7803 
   7804 	mutex_enter(txq->txq_lock);
   7805 
   7806 	if (txq->txq_stopping) {
   7807 		mutex_exit(txq->txq_lock);
   7808 		return 0;
   7809 	}
   7810 
   7811 	WM_Q_EVCNT_INCR(txq, txdw);
   7812 	wm_txeof(sc, txq);
   7813 
   7814 	/* Try to get more packets going. */
   7815 	if (pcq_peek(txq->txq_interq) != NULL)
   7816 		wm_nq_transmit_locked(ifp, txq);
   7817 	/*
   7818 	 * There are still some upper layer processing which call
   7819 	 * ifp->if_start(). e.g. ALTQ
   7820 	 */
   7821 	if (wmq->wmq_id == 0) {
   7822 		if (!IFQ_IS_EMPTY(&ifp->if_snd))
   7823 			wm_nq_start_locked(ifp);
   7824 	}
   7825 
   7826 	mutex_exit(txq->txq_lock);
   7827 
   7828 	DPRINTF(WM_DEBUG_RX,
   7829 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   7830 	mutex_enter(rxq->rxq_lock);
   7831 
   7832 	if (rxq->rxq_stopping) {
   7833 		mutex_exit(rxq->rxq_lock);
   7834 		return 0;
   7835 	}
   7836 
   7837 	WM_Q_EVCNT_INCR(rxq, rxintr);
   7838 	wm_rxeof(rxq);
   7839 	mutex_exit(rxq->rxq_lock);
   7840 
   7841 	if (sc->sc_type == WM_T_82574)
   7842 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
   7843 	else if (sc->sc_type == WM_T_82575)
   7844 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
   7845 	else
   7846 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
   7847 
   7848 	return 1;
   7849 }
   7850 
   7851 /*
   7852  * wm_linkintr_msix:
   7853  *
   7854  *	Interrupt service routine for link status change for MSI-X.
   7855  */
   7856 static int
   7857 wm_linkintr_msix(void *arg)
   7858 {
   7859 	struct wm_softc *sc = arg;
   7860 	uint32_t reg;
   7861 
   7862 	DPRINTF(WM_DEBUG_LINK,
   7863 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   7864 
   7865 	reg = CSR_READ(sc, WMREG_ICR);
   7866 	WM_CORE_LOCK(sc);
   7867 	if ((sc->sc_core_stopping) || ((reg & ICR_LSC) == 0))
   7868 		goto out;
   7869 
   7870 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   7871 	wm_linkintr(sc, ICR_LSC);
   7872 
   7873 out:
   7874 	WM_CORE_UNLOCK(sc);
   7875 
   7876 	if (sc->sc_type == WM_T_82574)
   7877 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
   7878 	else if (sc->sc_type == WM_T_82575)
   7879 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   7880 	else
   7881 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   7882 
   7883 	return 1;
   7884 }
   7885 
   7886 /*
   7887  * Media related.
   7888  * GMII, SGMII, TBI (and SERDES)
   7889  */
   7890 
   7891 /* Common */
   7892 
   7893 /*
   7894  * wm_tbi_serdes_set_linkled:
   7895  *
   7896  *	Update the link LED on TBI and SERDES devices.
   7897  */
   7898 static void
   7899 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   7900 {
   7901 
   7902 	if (sc->sc_tbi_linkup)
   7903 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   7904 	else
   7905 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   7906 
   7907 	/* 82540 or newer devices are active low */
   7908 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   7909 
   7910 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7911 }
   7912 
   7913 /* GMII related */
   7914 
   7915 /*
   7916  * wm_gmii_reset:
   7917  *
   7918  *	Reset the PHY.
   7919  */
   7920 static void
   7921 wm_gmii_reset(struct wm_softc *sc)
   7922 {
   7923 	uint32_t reg;
   7924 	int rv;
   7925 
   7926 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   7927 		device_xname(sc->sc_dev), __func__));
   7928 
   7929 	rv = sc->phy.acquire(sc);
   7930 	if (rv != 0) {
   7931 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7932 		    __func__);
   7933 		return;
   7934 	}
   7935 
   7936 	switch (sc->sc_type) {
   7937 	case WM_T_82542_2_0:
   7938 	case WM_T_82542_2_1:
   7939 		/* null */
   7940 		break;
   7941 	case WM_T_82543:
   7942 		/*
   7943 		 * With 82543, we need to force speed and duplex on the MAC
   7944 		 * equal to what the PHY speed and duplex configuration is.
   7945 		 * In addition, we need to perform a hardware reset on the PHY
   7946 		 * to take it out of reset.
   7947 		 */
   7948 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   7949 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7950 
   7951 		/* The PHY reset pin is active-low. */
   7952 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   7953 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   7954 		    CTRL_EXT_SWDPIN(4));
   7955 		reg |= CTRL_EXT_SWDPIO(4);
   7956 
   7957 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   7958 		CSR_WRITE_FLUSH(sc);
   7959 		delay(10*1000);
   7960 
   7961 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   7962 		CSR_WRITE_FLUSH(sc);
   7963 		delay(150);
   7964 #if 0
   7965 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   7966 #endif
   7967 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   7968 		break;
   7969 	case WM_T_82544:	/* reset 10000us */
   7970 	case WM_T_82540:
   7971 	case WM_T_82545:
   7972 	case WM_T_82545_3:
   7973 	case WM_T_82546:
   7974 	case WM_T_82546_3:
   7975 	case WM_T_82541:
   7976 	case WM_T_82541_2:
   7977 	case WM_T_82547:
   7978 	case WM_T_82547_2:
   7979 	case WM_T_82571:	/* reset 100us */
   7980 	case WM_T_82572:
   7981 	case WM_T_82573:
   7982 	case WM_T_82574:
   7983 	case WM_T_82575:
   7984 	case WM_T_82576:
   7985 	case WM_T_82580:
   7986 	case WM_T_I350:
   7987 	case WM_T_I354:
   7988 	case WM_T_I210:
   7989 	case WM_T_I211:
   7990 	case WM_T_82583:
   7991 	case WM_T_80003:
   7992 		/* generic reset */
   7993 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   7994 		CSR_WRITE_FLUSH(sc);
   7995 		delay(20000);
   7996 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7997 		CSR_WRITE_FLUSH(sc);
   7998 		delay(20000);
   7999 
   8000 		if ((sc->sc_type == WM_T_82541)
   8001 		    || (sc->sc_type == WM_T_82541_2)
   8002 		    || (sc->sc_type == WM_T_82547)
   8003 		    || (sc->sc_type == WM_T_82547_2)) {
   8004 			/* workaround for igp are done in igp_reset() */
   8005 			/* XXX add code to set LED after phy reset */
   8006 		}
   8007 		break;
   8008 	case WM_T_ICH8:
   8009 	case WM_T_ICH9:
   8010 	case WM_T_ICH10:
   8011 	case WM_T_PCH:
   8012 	case WM_T_PCH2:
   8013 	case WM_T_PCH_LPT:
   8014 	case WM_T_PCH_SPT:
   8015 		/* generic reset */
   8016 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   8017 		CSR_WRITE_FLUSH(sc);
   8018 		delay(100);
   8019 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8020 		CSR_WRITE_FLUSH(sc);
   8021 		delay(150);
   8022 		break;
   8023 	default:
   8024 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   8025 		    __func__);
   8026 		break;
   8027 	}
   8028 
   8029 	sc->phy.release(sc);
   8030 
   8031 	/* get_cfg_done */
   8032 	wm_get_cfg_done(sc);
   8033 
   8034 	/* extra setup */
   8035 	switch (sc->sc_type) {
   8036 	case WM_T_82542_2_0:
   8037 	case WM_T_82542_2_1:
   8038 	case WM_T_82543:
   8039 	case WM_T_82544:
   8040 	case WM_T_82540:
   8041 	case WM_T_82545:
   8042 	case WM_T_82545_3:
   8043 	case WM_T_82546:
   8044 	case WM_T_82546_3:
   8045 	case WM_T_82541_2:
   8046 	case WM_T_82547_2:
   8047 	case WM_T_82571:
   8048 	case WM_T_82572:
   8049 	case WM_T_82573:
   8050 	case WM_T_82575:
   8051 	case WM_T_82576:
   8052 	case WM_T_82580:
   8053 	case WM_T_I350:
   8054 	case WM_T_I354:
   8055 	case WM_T_I210:
   8056 	case WM_T_I211:
   8057 	case WM_T_80003:
   8058 		/* null */
   8059 		break;
   8060 	case WM_T_82574:
   8061 	case WM_T_82583:
   8062 		wm_lplu_d0_disable(sc);
   8063 		break;
   8064 	case WM_T_82541:
   8065 	case WM_T_82547:
   8066 		/* XXX Configure actively LED after PHY reset */
   8067 		break;
   8068 	case WM_T_ICH8:
   8069 	case WM_T_ICH9:
   8070 	case WM_T_ICH10:
   8071 	case WM_T_PCH:
   8072 	case WM_T_PCH2:
   8073 	case WM_T_PCH_LPT:
   8074 	case WM_T_PCH_SPT:
   8075 		/* Allow time for h/w to get to a quiescent state afer reset */
   8076 		delay(10*1000);
   8077 
   8078 		if (sc->sc_type == WM_T_PCH)
   8079 			wm_hv_phy_workaround_ich8lan(sc);
   8080 
   8081 		if (sc->sc_type == WM_T_PCH2)
   8082 			wm_lv_phy_workaround_ich8lan(sc);
   8083 
   8084 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
   8085 			/*
   8086 			 * dummy read to clear the phy wakeup bit after lcd
   8087 			 * reset
   8088 			 */
   8089 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
   8090 		}
   8091 
   8092 		/*
   8093 		 * XXX Configure the LCD with th extended configuration region
   8094 		 * in NVM
   8095 		 */
   8096 
   8097 		/* Disable D0 LPLU. */
   8098 		if (sc->sc_type >= WM_T_PCH)	/* PCH* */
   8099 			wm_lplu_d0_disable_pch(sc);
   8100 		else
   8101 			wm_lplu_d0_disable(sc);	/* ICH* */
   8102 		break;
   8103 	default:
   8104 		panic("%s: unknown type\n", __func__);
   8105 		break;
   8106 	}
   8107 }
   8108 
   8109 /*
   8110  * wm_get_phy_id_82575:
   8111  *
   8112  * Return PHY ID. Return -1 if it failed.
   8113  */
   8114 static int
   8115 wm_get_phy_id_82575(struct wm_softc *sc)
   8116 {
   8117 	uint32_t reg;
   8118 	int phyid = -1;
   8119 
   8120 	/* XXX */
   8121 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   8122 		return -1;
   8123 
   8124 	if (wm_sgmii_uses_mdio(sc)) {
   8125 		switch (sc->sc_type) {
   8126 		case WM_T_82575:
   8127 		case WM_T_82576:
   8128 			reg = CSR_READ(sc, WMREG_MDIC);
   8129 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   8130 			break;
   8131 		case WM_T_82580:
   8132 		case WM_T_I350:
   8133 		case WM_T_I354:
   8134 		case WM_T_I210:
   8135 		case WM_T_I211:
   8136 			reg = CSR_READ(sc, WMREG_MDICNFG);
   8137 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   8138 			break;
   8139 		default:
   8140 			return -1;
   8141 		}
   8142 	}
   8143 
   8144 	return phyid;
   8145 }
   8146 
   8147 
   8148 /*
   8149  * wm_gmii_mediainit:
   8150  *
   8151  *	Initialize media for use on 1000BASE-T devices.
   8152  */
   8153 static void
   8154 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   8155 {
   8156 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   8157 	struct mii_data *mii = &sc->sc_mii;
   8158 	uint32_t reg;
   8159 
   8160 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   8161 		device_xname(sc->sc_dev), __func__));
   8162 
   8163 	/* We have GMII. */
   8164 	sc->sc_flags |= WM_F_HAS_MII;
   8165 
   8166 	if (sc->sc_type == WM_T_80003)
   8167 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   8168 	else
   8169 		sc->sc_tipg = TIPG_1000T_DFLT;
   8170 
   8171 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   8172 	if ((sc->sc_type == WM_T_82580)
   8173 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   8174 	    || (sc->sc_type == WM_T_I211)) {
   8175 		reg = CSR_READ(sc, WMREG_PHPM);
   8176 		reg &= ~PHPM_GO_LINK_D;
   8177 		CSR_WRITE(sc, WMREG_PHPM, reg);
   8178 	}
   8179 
   8180 	/*
   8181 	 * Let the chip set speed/duplex on its own based on
   8182 	 * signals from the PHY.
   8183 	 * XXXbouyer - I'm not sure this is right for the 80003,
   8184 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   8185 	 */
   8186 	sc->sc_ctrl |= CTRL_SLU;
   8187 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8188 
   8189 	/* Initialize our media structures and probe the GMII. */
   8190 	mii->mii_ifp = ifp;
   8191 
   8192 	/*
   8193 	 * Determine the PHY access method.
   8194 	 *
   8195 	 *  For SGMII, use SGMII specific method.
   8196 	 *
   8197 	 *  For some devices, we can determine the PHY access method
   8198 	 * from sc_type.
   8199 	 *
   8200 	 *  For ICH and PCH variants, it's difficult to determine the PHY
   8201 	 * access  method by sc_type, so use the PCI product ID for some
   8202 	 * devices.
   8203 	 * For other ICH8 variants, try to use igp's method. If the PHY
   8204 	 * can't detect, then use bm's method.
   8205 	 */
   8206 	switch (prodid) {
   8207 	case PCI_PRODUCT_INTEL_PCH_M_LM:
   8208 	case PCI_PRODUCT_INTEL_PCH_M_LC:
   8209 		/* 82577 */
   8210 		sc->sc_phytype = WMPHY_82577;
   8211 		break;
   8212 	case PCI_PRODUCT_INTEL_PCH_D_DM:
   8213 	case PCI_PRODUCT_INTEL_PCH_D_DC:
   8214 		/* 82578 */
   8215 		sc->sc_phytype = WMPHY_82578;
   8216 		break;
   8217 	case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   8218 	case PCI_PRODUCT_INTEL_PCH2_LV_V:
   8219 		/* 82579 */
   8220 		sc->sc_phytype = WMPHY_82579;
   8221 		break;
   8222 	case PCI_PRODUCT_INTEL_82801H_82567V_3:
   8223 	case PCI_PRODUCT_INTEL_82801I_BM:
   8224 	case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   8225 	case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   8226 	case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   8227 	case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   8228 	case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   8229 		/* ICH8, 9, 10 with 82567 */
   8230 		sc->sc_phytype = WMPHY_BM;
   8231 		mii->mii_readreg = wm_gmii_bm_readreg;
   8232 		mii->mii_writereg = wm_gmii_bm_writereg;
   8233 		break;
   8234 	default:
   8235 		if (((sc->sc_flags & WM_F_SGMII) != 0)
   8236 		    && !wm_sgmii_uses_mdio(sc)){
   8237 			/* SGMII */
   8238 			mii->mii_readreg = wm_sgmii_readreg;
   8239 			mii->mii_writereg = wm_sgmii_writereg;
   8240 		} else if (sc->sc_type >= WM_T_ICH8) {
   8241 			/* non-82567 ICH8, 9 and 10 */
   8242 			mii->mii_readreg = wm_gmii_i82544_readreg;
   8243 			mii->mii_writereg = wm_gmii_i82544_writereg;
   8244 		} else if (sc->sc_type >= WM_T_80003) {
   8245 			/* 80003 */
   8246 			mii->mii_readreg = wm_gmii_i80003_readreg;
   8247 			mii->mii_writereg = wm_gmii_i80003_writereg;
   8248 		} else if (sc->sc_type >= WM_T_I210) {
   8249 			/* I210 and I211 */
   8250 			mii->mii_readreg = wm_gmii_gs40g_readreg;
   8251 			mii->mii_writereg = wm_gmii_gs40g_writereg;
   8252 		} else if (sc->sc_type >= WM_T_82580) {
   8253 			/* 82580, I350 and I354 */
   8254 			sc->sc_phytype = WMPHY_82580;
   8255 			mii->mii_readreg = wm_gmii_82580_readreg;
   8256 			mii->mii_writereg = wm_gmii_82580_writereg;
   8257 		} else if (sc->sc_type >= WM_T_82544) {
   8258 			/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   8259 			mii->mii_readreg = wm_gmii_i82544_readreg;
   8260 			mii->mii_writereg = wm_gmii_i82544_writereg;
   8261 		} else {
   8262 			mii->mii_readreg = wm_gmii_i82543_readreg;
   8263 			mii->mii_writereg = wm_gmii_i82543_writereg;
   8264 		}
   8265 		break;
   8266 	}
   8267 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
   8268 		/* All PCH* use _hv_ */
   8269 		mii->mii_readreg = wm_gmii_hv_readreg;
   8270 		mii->mii_writereg = wm_gmii_hv_writereg;
   8271 	}
   8272 	mii->mii_statchg = wm_gmii_statchg;
   8273 
   8274 	wm_gmii_reset(sc);
   8275 
   8276 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   8277 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   8278 	    wm_gmii_mediastatus);
   8279 
   8280 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   8281 	    || (sc->sc_type == WM_T_82580)
   8282 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   8283 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   8284 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   8285 			/* Attach only one port */
   8286 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   8287 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8288 		} else {
   8289 			int i, id;
   8290 			uint32_t ctrl_ext;
   8291 
   8292 			id = wm_get_phy_id_82575(sc);
   8293 			if (id != -1) {
   8294 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   8295 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   8296 			}
   8297 			if ((id == -1)
   8298 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   8299 				/* Power on sgmii phy if it is disabled */
   8300 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   8301 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   8302 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   8303 				CSR_WRITE_FLUSH(sc);
   8304 				delay(300*1000); /* XXX too long */
   8305 
   8306 				/* from 1 to 8 */
   8307 				for (i = 1; i < 8; i++)
   8308 					mii_attach(sc->sc_dev, &sc->sc_mii,
   8309 					    0xffffffff, i, MII_OFFSET_ANY,
   8310 					    MIIF_DOPAUSE);
   8311 
   8312 				/* restore previous sfp cage power state */
   8313 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   8314 			}
   8315 		}
   8316 	} else {
   8317 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8318 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8319 	}
   8320 
   8321 	/*
   8322 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   8323 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   8324 	 */
   8325 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   8326 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   8327 		wm_set_mdio_slow_mode_hv(sc);
   8328 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8329 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8330 	}
   8331 
   8332 	/*
   8333 	 * (For ICH8 variants)
   8334 	 * If PHY detection failed, use BM's r/w function and retry.
   8335 	 */
   8336 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   8337 		/* if failed, retry with *_bm_* */
   8338 		mii->mii_readreg = wm_gmii_bm_readreg;
   8339 		mii->mii_writereg = wm_gmii_bm_writereg;
   8340 
   8341 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8342 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8343 	}
   8344 
   8345 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   8346 		/* Any PHY wasn't find */
   8347 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
   8348 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
   8349 		sc->sc_phytype = WMPHY_NONE;
   8350 	} else {
   8351 		/*
   8352 		 * PHY Found!
   8353 		 * Check PHY type.
   8354 		 */
   8355 		uint32_t model;
   8356 		struct mii_softc *child;
   8357 
   8358 		child = LIST_FIRST(&mii->mii_phys);
   8359 		model = child->mii_mpd_model;
   8360 		if (model == MII_MODEL_yyINTEL_I82566)
   8361 			sc->sc_phytype = WMPHY_IGP_3;
   8362 
   8363 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   8364 	}
   8365 }
   8366 
   8367 /*
   8368  * wm_gmii_mediachange:	[ifmedia interface function]
   8369  *
   8370  *	Set hardware to newly-selected media on a 1000BASE-T device.
   8371  */
   8372 static int
   8373 wm_gmii_mediachange(struct ifnet *ifp)
   8374 {
   8375 	struct wm_softc *sc = ifp->if_softc;
   8376 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8377 	int rc;
   8378 
   8379 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   8380 		device_xname(sc->sc_dev), __func__));
   8381 	if ((ifp->if_flags & IFF_UP) == 0)
   8382 		return 0;
   8383 
   8384 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8385 	sc->sc_ctrl |= CTRL_SLU;
   8386 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   8387 	    || (sc->sc_type > WM_T_82543)) {
   8388 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   8389 	} else {
   8390 		sc->sc_ctrl &= ~CTRL_ASDE;
   8391 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   8392 		if (ife->ifm_media & IFM_FDX)
   8393 			sc->sc_ctrl |= CTRL_FD;
   8394 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   8395 		case IFM_10_T:
   8396 			sc->sc_ctrl |= CTRL_SPEED_10;
   8397 			break;
   8398 		case IFM_100_TX:
   8399 			sc->sc_ctrl |= CTRL_SPEED_100;
   8400 			break;
   8401 		case IFM_1000_T:
   8402 			sc->sc_ctrl |= CTRL_SPEED_1000;
   8403 			break;
   8404 		default:
   8405 			panic("wm_gmii_mediachange: bad media 0x%x",
   8406 			    ife->ifm_media);
   8407 		}
   8408 	}
   8409 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8410 	if (sc->sc_type <= WM_T_82543)
   8411 		wm_gmii_reset(sc);
   8412 
   8413 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   8414 		return 0;
   8415 	return rc;
   8416 }
   8417 
   8418 /*
   8419  * wm_gmii_mediastatus:	[ifmedia interface function]
   8420  *
   8421  *	Get the current interface media status on a 1000BASE-T device.
   8422  */
   8423 static void
   8424 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   8425 {
   8426 	struct wm_softc *sc = ifp->if_softc;
   8427 
   8428 	ether_mediastatus(ifp, ifmr);
   8429 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   8430 	    | sc->sc_flowflags;
   8431 }
   8432 
   8433 #define	MDI_IO		CTRL_SWDPIN(2)
   8434 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   8435 #define	MDI_CLK		CTRL_SWDPIN(3)
   8436 
   8437 static void
   8438 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   8439 {
   8440 	uint32_t i, v;
   8441 
   8442 	v = CSR_READ(sc, WMREG_CTRL);
   8443 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   8444 	v |= MDI_DIR | CTRL_SWDPIO(3);
   8445 
   8446 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   8447 		if (data & i)
   8448 			v |= MDI_IO;
   8449 		else
   8450 			v &= ~MDI_IO;
   8451 		CSR_WRITE(sc, WMREG_CTRL, v);
   8452 		CSR_WRITE_FLUSH(sc);
   8453 		delay(10);
   8454 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8455 		CSR_WRITE_FLUSH(sc);
   8456 		delay(10);
   8457 		CSR_WRITE(sc, WMREG_CTRL, v);
   8458 		CSR_WRITE_FLUSH(sc);
   8459 		delay(10);
   8460 	}
   8461 }
   8462 
   8463 static uint32_t
   8464 wm_i82543_mii_recvbits(struct wm_softc *sc)
   8465 {
   8466 	uint32_t v, i, data = 0;
   8467 
   8468 	v = CSR_READ(sc, WMREG_CTRL);
   8469 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   8470 	v |= CTRL_SWDPIO(3);
   8471 
   8472 	CSR_WRITE(sc, WMREG_CTRL, v);
   8473 	CSR_WRITE_FLUSH(sc);
   8474 	delay(10);
   8475 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8476 	CSR_WRITE_FLUSH(sc);
   8477 	delay(10);
   8478 	CSR_WRITE(sc, WMREG_CTRL, v);
   8479 	CSR_WRITE_FLUSH(sc);
   8480 	delay(10);
   8481 
   8482 	for (i = 0; i < 16; i++) {
   8483 		data <<= 1;
   8484 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8485 		CSR_WRITE_FLUSH(sc);
   8486 		delay(10);
   8487 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   8488 			data |= 1;
   8489 		CSR_WRITE(sc, WMREG_CTRL, v);
   8490 		CSR_WRITE_FLUSH(sc);
   8491 		delay(10);
   8492 	}
   8493 
   8494 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8495 	CSR_WRITE_FLUSH(sc);
   8496 	delay(10);
   8497 	CSR_WRITE(sc, WMREG_CTRL, v);
   8498 	CSR_WRITE_FLUSH(sc);
   8499 	delay(10);
   8500 
   8501 	return data;
   8502 }
   8503 
   8504 #undef MDI_IO
   8505 #undef MDI_DIR
   8506 #undef MDI_CLK
   8507 
   8508 /*
   8509  * wm_gmii_i82543_readreg:	[mii interface function]
   8510  *
   8511  *	Read a PHY register on the GMII (i82543 version).
   8512  */
   8513 static int
   8514 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
   8515 {
   8516 	struct wm_softc *sc = device_private(self);
   8517 	int rv;
   8518 
   8519 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   8520 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   8521 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   8522 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   8523 
   8524 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   8525 	    device_xname(sc->sc_dev), phy, reg, rv));
   8526 
   8527 	return rv;
   8528 }
   8529 
   8530 /*
   8531  * wm_gmii_i82543_writereg:	[mii interface function]
   8532  *
   8533  *	Write a PHY register on the GMII (i82543 version).
   8534  */
   8535 static void
   8536 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
   8537 {
   8538 	struct wm_softc *sc = device_private(self);
   8539 
   8540 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   8541 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   8542 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   8543 	    (MII_COMMAND_START << 30), 32);
   8544 }
   8545 
   8546 /*
   8547  * wm_gmii_mdic_readreg:	[mii interface function]
   8548  *
   8549  *	Read a PHY register on the GMII.
   8550  */
   8551 static int
   8552 wm_gmii_mdic_readreg(device_t self, int phy, int reg)
   8553 {
   8554 	struct wm_softc *sc = device_private(self);
   8555 	uint32_t mdic = 0;
   8556 	int i, rv;
   8557 
   8558 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   8559 	    MDIC_REGADD(reg));
   8560 
   8561 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   8562 		mdic = CSR_READ(sc, WMREG_MDIC);
   8563 		if (mdic & MDIC_READY)
   8564 			break;
   8565 		delay(50);
   8566 	}
   8567 
   8568 	if ((mdic & MDIC_READY) == 0) {
   8569 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   8570 		    device_xname(sc->sc_dev), phy, reg);
   8571 		rv = 0;
   8572 	} else if (mdic & MDIC_E) {
   8573 #if 0 /* This is normal if no PHY is present. */
   8574 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   8575 		    device_xname(sc->sc_dev), phy, reg);
   8576 #endif
   8577 		rv = 0;
   8578 	} else {
   8579 		rv = MDIC_DATA(mdic);
   8580 		if (rv == 0xffff)
   8581 			rv = 0;
   8582 	}
   8583 
   8584 	return rv;
   8585 }
   8586 
   8587 /*
   8588  * wm_gmii_mdic_writereg:	[mii interface function]
   8589  *
   8590  *	Write a PHY register on the GMII.
   8591  */
   8592 static void
   8593 wm_gmii_mdic_writereg(device_t self, int phy, int reg, int val)
   8594 {
   8595 	struct wm_softc *sc = device_private(self);
   8596 	uint32_t mdic = 0;
   8597 	int i;
   8598 
   8599 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   8600 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   8601 
   8602 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   8603 		mdic = CSR_READ(sc, WMREG_MDIC);
   8604 		if (mdic & MDIC_READY)
   8605 			break;
   8606 		delay(50);
   8607 	}
   8608 
   8609 	if ((mdic & MDIC_READY) == 0)
   8610 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   8611 		    device_xname(sc->sc_dev), phy, reg);
   8612 	else if (mdic & MDIC_E)
   8613 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   8614 		    device_xname(sc->sc_dev), phy, reg);
   8615 }
   8616 
   8617 /*
   8618  * wm_gmii_i82544_readreg:	[mii interface function]
   8619  *
   8620  *	Read a PHY register on the GMII.
   8621  */
   8622 static int
   8623 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
   8624 {
   8625 	struct wm_softc *sc = device_private(self);
   8626 	int rv;
   8627 
   8628 	if (sc->phy.acquire(sc)) {
   8629 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8630 		    __func__);
   8631 		return 0;
   8632 	}
   8633 	rv = wm_gmii_mdic_readreg(self, phy, reg);
   8634 	sc->phy.release(sc);
   8635 
   8636 	return rv;
   8637 }
   8638 
   8639 /*
   8640  * wm_gmii_i82544_writereg:	[mii interface function]
   8641  *
   8642  *	Write a PHY register on the GMII.
   8643  */
   8644 static void
   8645 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
   8646 {
   8647 	struct wm_softc *sc = device_private(self);
   8648 
   8649 	if (sc->phy.acquire(sc)) {
   8650 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8651 		    __func__);
   8652 	}
   8653 	wm_gmii_mdic_writereg(self, phy, reg, val);
   8654 	sc->phy.release(sc);
   8655 }
   8656 
   8657 /*
   8658  * wm_gmii_i80003_readreg:	[mii interface function]
   8659  *
   8660  *	Read a PHY register on the kumeran
   8661  * This could be handled by the PHY layer if we didn't have to lock the
   8662  * ressource ...
   8663  */
   8664 static int
   8665 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
   8666 {
   8667 	struct wm_softc *sc = device_private(self);
   8668 	int rv;
   8669 
   8670 	if (phy != 1) /* only one PHY on kumeran bus */
   8671 		return 0;
   8672 
   8673 	if (sc->phy.acquire(sc)) {
   8674 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8675 		    __func__);
   8676 		return 0;
   8677 	}
   8678 
   8679 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
   8680 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   8681 		    reg >> GG82563_PAGE_SHIFT);
   8682 	} else {
   8683 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   8684 		    reg >> GG82563_PAGE_SHIFT);
   8685 	}
   8686 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   8687 	delay(200);
   8688 	rv = wm_gmii_mdic_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
   8689 	delay(200);
   8690 	sc->phy.release(sc);
   8691 
   8692 	return rv;
   8693 }
   8694 
   8695 /*
   8696  * wm_gmii_i80003_writereg:	[mii interface function]
   8697  *
   8698  *	Write a PHY register on the kumeran.
   8699  * This could be handled by the PHY layer if we didn't have to lock the
   8700  * ressource ...
   8701  */
   8702 static void
   8703 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
   8704 {
   8705 	struct wm_softc *sc = device_private(self);
   8706 
   8707 	if (phy != 1) /* only one PHY on kumeran bus */
   8708 		return;
   8709 
   8710 	if (sc->phy.acquire(sc)) {
   8711 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8712 		    __func__);
   8713 		return;
   8714 	}
   8715 
   8716 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
   8717 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   8718 		    reg >> GG82563_PAGE_SHIFT);
   8719 	} else {
   8720 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   8721 		    reg >> GG82563_PAGE_SHIFT);
   8722 	}
   8723 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   8724 	delay(200);
   8725 	wm_gmii_mdic_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
   8726 	delay(200);
   8727 
   8728 	sc->phy.release(sc);
   8729 }
   8730 
   8731 /*
   8732  * wm_gmii_bm_readreg:	[mii interface function]
   8733  *
   8734  *	Read a PHY register on the kumeran
   8735  * This could be handled by the PHY layer if we didn't have to lock the
   8736  * ressource ...
   8737  */
   8738 static int
   8739 wm_gmii_bm_readreg(device_t self, int phy, int reg)
   8740 {
   8741 	struct wm_softc *sc = device_private(self);
   8742 	int rv;
   8743 
   8744 	if (sc->phy.acquire(sc)) {
   8745 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8746 		    __func__);
   8747 		return 0;
   8748 	}
   8749 
   8750 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   8751 		if (phy == 1)
   8752 			wm_gmii_mdic_writereg(self, phy,
   8753 			    MII_IGPHY_PAGE_SELECT, reg);
   8754 		else
   8755 			wm_gmii_mdic_writereg(self, phy,
   8756 			    GG82563_PHY_PAGE_SELECT,
   8757 			    reg >> GG82563_PAGE_SHIFT);
   8758 	}
   8759 
   8760 	rv = wm_gmii_mdic_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
   8761 	sc->phy.release(sc);
   8762 	return rv;
   8763 }
   8764 
   8765 /*
   8766  * wm_gmii_bm_writereg:	[mii interface function]
   8767  *
   8768  *	Write a PHY register on the kumeran.
   8769  * This could be handled by the PHY layer if we didn't have to lock the
   8770  * ressource ...
   8771  */
   8772 static void
   8773 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
   8774 {
   8775 	struct wm_softc *sc = device_private(self);
   8776 
   8777 	if (sc->phy.acquire(sc)) {
   8778 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8779 		    __func__);
   8780 		return;
   8781 	}
   8782 
   8783 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   8784 		if (phy == 1)
   8785 			wm_gmii_mdic_writereg(self, phy,
   8786 			    MII_IGPHY_PAGE_SELECT, reg);
   8787 		else
   8788 			wm_gmii_mdic_writereg(self, phy,
   8789 			    GG82563_PHY_PAGE_SELECT,
   8790 			    reg >> GG82563_PAGE_SHIFT);
   8791 	}
   8792 
   8793 	wm_gmii_mdic_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
   8794 	sc->phy.release(sc);
   8795 }
   8796 
   8797 static void
   8798 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
   8799 {
   8800 	struct wm_softc *sc = device_private(self);
   8801 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   8802 	uint16_t wuce;
   8803 
   8804 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   8805 		device_xname(sc->sc_dev), __func__));
   8806 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   8807 	if (sc->sc_type == WM_T_PCH) {
   8808 		/* XXX e1000 driver do nothing... why? */
   8809 	}
   8810 
   8811 	/* Set page 769 */
   8812 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8813 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   8814 
   8815 	wuce = wm_gmii_mdic_readreg(self, 1, BM_WUC_ENABLE_REG);
   8816 
   8817 	wuce &= ~BM_WUC_HOST_WU_BIT;
   8818 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG,
   8819 	    wuce | BM_WUC_ENABLE_BIT);
   8820 
   8821 	/* Select page 800 */
   8822 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8823 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   8824 
   8825 	/* Write page 800 */
   8826 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   8827 
   8828 	if (rd)
   8829 		*val = wm_gmii_mdic_readreg(self, 1, BM_WUC_DATA_OPCODE);
   8830 	else
   8831 		wm_gmii_mdic_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
   8832 
   8833 	/* Set page 769 */
   8834 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8835 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   8836 
   8837 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
   8838 }
   8839 
   8840 /*
   8841  * wm_gmii_hv_readreg:	[mii interface function]
   8842  *
   8843  *	Read a PHY register on the kumeran
   8844  * This could be handled by the PHY layer if we didn't have to lock the
   8845  * ressource ...
   8846  */
   8847 static int
   8848 wm_gmii_hv_readreg(device_t self, int phy, int reg)
   8849 {
   8850 	struct wm_softc *sc = device_private(self);
   8851 	int rv;
   8852 
   8853 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   8854 		device_xname(sc->sc_dev), __func__));
   8855 	if (sc->phy.acquire(sc)) {
   8856 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8857 		    __func__);
   8858 		return 0;
   8859 	}
   8860 
   8861 	rv = wm_gmii_hv_readreg_locked(self, phy, reg);
   8862 	sc->phy.release(sc);
   8863 	return rv;
   8864 }
   8865 
   8866 static int
   8867 wm_gmii_hv_readreg_locked(device_t self, int phy, int reg)
   8868 {
   8869 	struct wm_softc *sc = device_private(self);
   8870 	uint16_t page = BM_PHY_REG_PAGE(reg);
   8871 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   8872 	uint16_t val;
   8873 	int rv;
   8874 
   8875 	/* XXX Workaround failure in MDIO access while cable is disconnected */
   8876 	if (sc->sc_phytype == WMPHY_82577) {
   8877 		/* XXX must write */
   8878 	}
   8879 
   8880 	/* Page 800 works differently than the rest so it has its own func */
   8881 	if (page == BM_WUC_PAGE) {
   8882 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   8883 		return val;
   8884 	}
   8885 
   8886 	/*
   8887 	 * Lower than page 768 works differently than the rest so it has its
   8888 	 * own func
   8889 	 */
   8890 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   8891 		printf("gmii_hv_readreg!!!\n");
   8892 		return 0;
   8893 	}
   8894 
   8895 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   8896 		wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8897 		    page << BME1000_PAGE_SHIFT);
   8898 	}
   8899 
   8900 	rv = wm_gmii_mdic_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
   8901 	return rv;
   8902 }
   8903 
   8904 /*
   8905  * wm_gmii_hv_writereg:	[mii interface function]
   8906  *
   8907  *	Write a PHY register on the kumeran.
   8908  * This could be handled by the PHY layer if we didn't have to lock the
   8909  * ressource ...
   8910  */
   8911 static void
   8912 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
   8913 {
   8914 	struct wm_softc *sc = device_private(self);
   8915 
   8916 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   8917 		device_xname(sc->sc_dev), __func__));
   8918 
   8919 	if (sc->phy.acquire(sc)) {
   8920 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8921 		    __func__);
   8922 		return;
   8923 	}
   8924 
   8925 	wm_gmii_hv_writereg_locked(self, phy, reg, val);
   8926 	sc->phy.release(sc);
   8927 }
   8928 
   8929 static void
   8930 wm_gmii_hv_writereg_locked(device_t self, int phy, int reg, int val)
   8931 {
   8932 	uint16_t page = BM_PHY_REG_PAGE(reg);
   8933 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   8934 
   8935 	/* XXX Workaround failure in MDIO access while cable is disconnected */
   8936 
   8937 	/* Page 800 works differently than the rest so it has its own func */
   8938 	if (page == BM_WUC_PAGE) {
   8939 		uint16_t tmp;
   8940 
   8941 		tmp = val;
   8942 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   8943 		return;
   8944 	}
   8945 
   8946 	/*
   8947 	 * Lower than page 768 works differently than the rest so it has its
   8948 	 * own func
   8949 	 */
   8950 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   8951 		printf("gmii_hv_writereg!!!\n");
   8952 		return;
   8953 	}
   8954 
   8955 	/*
   8956 	 * XXX Workaround MDIO accesses being disabled after entering IEEE
   8957 	 * Power Down (whenever bit 11 of the PHY control register is set)
   8958 	 */
   8959 
   8960 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   8961 		wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8962 		    page << BME1000_PAGE_SHIFT);
   8963 	}
   8964 
   8965 	wm_gmii_mdic_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
   8966 }
   8967 
   8968 /*
   8969  * wm_gmii_82580_readreg:	[mii interface function]
   8970  *
   8971  *	Read a PHY register on the 82580 and I350.
   8972  * This could be handled by the PHY layer if we didn't have to lock the
   8973  * ressource ...
   8974  */
   8975 static int
   8976 wm_gmii_82580_readreg(device_t self, int phy, int reg)
   8977 {
   8978 	struct wm_softc *sc = device_private(self);
   8979 	int rv;
   8980 
   8981 	if (sc->phy.acquire(sc) != 0) {
   8982 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8983 		    __func__);
   8984 		return 0;
   8985 	}
   8986 
   8987 	rv = wm_gmii_mdic_readreg(self, phy, reg);
   8988 
   8989 	sc->phy.release(sc);
   8990 	return rv;
   8991 }
   8992 
   8993 /*
   8994  * wm_gmii_82580_writereg:	[mii interface function]
   8995  *
   8996  *	Write a PHY register on the 82580 and I350.
   8997  * This could be handled by the PHY layer if we didn't have to lock the
   8998  * ressource ...
   8999  */
   9000 static void
   9001 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
   9002 {
   9003 	struct wm_softc *sc = device_private(self);
   9004 
   9005 	if (sc->phy.acquire(sc) != 0) {
   9006 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9007 		    __func__);
   9008 		return;
   9009 	}
   9010 
   9011 	wm_gmii_mdic_writereg(self, phy, reg, val);
   9012 
   9013 	sc->phy.release(sc);
   9014 }
   9015 
   9016 /*
   9017  * wm_gmii_gs40g_readreg:	[mii interface function]
   9018  *
   9019  *	Read a PHY register on the I2100 and I211.
   9020  * This could be handled by the PHY layer if we didn't have to lock the
   9021  * ressource ...
   9022  */
   9023 static int
   9024 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
   9025 {
   9026 	struct wm_softc *sc = device_private(self);
   9027 	int page, offset;
   9028 	int rv;
   9029 
   9030 	/* Acquire semaphore */
   9031 	if (sc->phy.acquire(sc)) {
   9032 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9033 		    __func__);
   9034 		return 0;
   9035 	}
   9036 
   9037 	/* Page select */
   9038 	page = reg >> GS40G_PAGE_SHIFT;
   9039 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
   9040 
   9041 	/* Read reg */
   9042 	offset = reg & GS40G_OFFSET_MASK;
   9043 	rv = wm_gmii_mdic_readreg(self, phy, offset);
   9044 
   9045 	sc->phy.release(sc);
   9046 	return rv;
   9047 }
   9048 
   9049 /*
   9050  * wm_gmii_gs40g_writereg:	[mii interface function]
   9051  *
   9052  *	Write a PHY register on the I210 and I211.
   9053  * This could be handled by the PHY layer if we didn't have to lock the
   9054  * ressource ...
   9055  */
   9056 static void
   9057 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
   9058 {
   9059 	struct wm_softc *sc = device_private(self);
   9060 	int page, offset;
   9061 
   9062 	/* Acquire semaphore */
   9063 	if (sc->phy.acquire(sc)) {
   9064 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9065 		    __func__);
   9066 		return;
   9067 	}
   9068 
   9069 	/* Page select */
   9070 	page = reg >> GS40G_PAGE_SHIFT;
   9071 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
   9072 
   9073 	/* Write reg */
   9074 	offset = reg & GS40G_OFFSET_MASK;
   9075 	wm_gmii_mdic_writereg(self, phy, offset, val);
   9076 
   9077 	/* Release semaphore */
   9078 	sc->phy.release(sc);
   9079 }
   9080 
   9081 /*
   9082  * wm_gmii_statchg:	[mii interface function]
   9083  *
   9084  *	Callback from MII layer when media changes.
   9085  */
   9086 static void
   9087 wm_gmii_statchg(struct ifnet *ifp)
   9088 {
   9089 	struct wm_softc *sc = ifp->if_softc;
   9090 	struct mii_data *mii = &sc->sc_mii;
   9091 
   9092 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   9093 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9094 	sc->sc_fcrtl &= ~FCRTL_XONE;
   9095 
   9096 	/*
   9097 	 * Get flow control negotiation result.
   9098 	 */
   9099 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   9100 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   9101 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   9102 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   9103 	}
   9104 
   9105 	if (sc->sc_flowflags & IFM_FLOW) {
   9106 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   9107 			sc->sc_ctrl |= CTRL_TFCE;
   9108 			sc->sc_fcrtl |= FCRTL_XONE;
   9109 		}
   9110 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   9111 			sc->sc_ctrl |= CTRL_RFCE;
   9112 	}
   9113 
   9114 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   9115 		DPRINTF(WM_DEBUG_LINK,
   9116 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   9117 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9118 	} else {
   9119 		DPRINTF(WM_DEBUG_LINK,
   9120 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   9121 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9122 	}
   9123 
   9124 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9125 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9126 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   9127 						 : WMREG_FCRTL, sc->sc_fcrtl);
   9128 	if (sc->sc_type == WM_T_80003) {
   9129 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   9130 		case IFM_1000_T:
   9131 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   9132 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   9133 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   9134 			break;
   9135 		default:
   9136 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   9137 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   9138 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   9139 			break;
   9140 		}
   9141 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   9142 	}
   9143 }
   9144 
   9145 /*
   9146  * wm_kmrn_readreg:
   9147  *
   9148  *	Read a kumeran register
   9149  */
   9150 static int
   9151 wm_kmrn_readreg(struct wm_softc *sc, int reg)
   9152 {
   9153 	int rv;
   9154 
   9155 	if (sc->sc_type == WM_T_80003)
   9156 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9157 	else
   9158 		rv = sc->phy.acquire(sc);
   9159 	if (rv != 0) {
   9160 		aprint_error_dev(sc->sc_dev,
   9161 		    "%s: failed to get semaphore\n", __func__);
   9162 		return 0;
   9163 	}
   9164 
   9165 	rv = wm_kmrn_readreg_locked(sc, reg);
   9166 
   9167 	if (sc->sc_type == WM_T_80003)
   9168 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9169 	else
   9170 		sc->phy.release(sc);
   9171 
   9172 	return rv;
   9173 }
   9174 
   9175 static int
   9176 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg)
   9177 {
   9178 	int rv;
   9179 
   9180 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   9181 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   9182 	    KUMCTRLSTA_REN);
   9183 	CSR_WRITE_FLUSH(sc);
   9184 	delay(2);
   9185 
   9186 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   9187 
   9188 	return rv;
   9189 }
   9190 
   9191 /*
   9192  * wm_kmrn_writereg:
   9193  *
   9194  *	Write a kumeran register
   9195  */
   9196 static void
   9197 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
   9198 {
   9199 	int rv;
   9200 
   9201 	if (sc->sc_type == WM_T_80003)
   9202 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9203 	else
   9204 		rv = sc->phy.acquire(sc);
   9205 	if (rv != 0) {
   9206 		aprint_error_dev(sc->sc_dev,
   9207 		    "%s: failed to get semaphore\n", __func__);
   9208 		return;
   9209 	}
   9210 
   9211 	wm_kmrn_writereg_locked(sc, reg, val);
   9212 
   9213 	if (sc->sc_type == WM_T_80003)
   9214 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   9215 	else
   9216 		sc->phy.release(sc);
   9217 }
   9218 
   9219 static void
   9220 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, int val)
   9221 {
   9222 
   9223 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   9224 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   9225 	    (val & KUMCTRLSTA_MASK));
   9226 }
   9227 
   9228 /* SGMII related */
   9229 
   9230 /*
   9231  * wm_sgmii_uses_mdio
   9232  *
   9233  * Check whether the transaction is to the internal PHY or the external
   9234  * MDIO interface. Return true if it's MDIO.
   9235  */
   9236 static bool
   9237 wm_sgmii_uses_mdio(struct wm_softc *sc)
   9238 {
   9239 	uint32_t reg;
   9240 	bool ismdio = false;
   9241 
   9242 	switch (sc->sc_type) {
   9243 	case WM_T_82575:
   9244 	case WM_T_82576:
   9245 		reg = CSR_READ(sc, WMREG_MDIC);
   9246 		ismdio = ((reg & MDIC_DEST) != 0);
   9247 		break;
   9248 	case WM_T_82580:
   9249 	case WM_T_I350:
   9250 	case WM_T_I354:
   9251 	case WM_T_I210:
   9252 	case WM_T_I211:
   9253 		reg = CSR_READ(sc, WMREG_MDICNFG);
   9254 		ismdio = ((reg & MDICNFG_DEST) != 0);
   9255 		break;
   9256 	default:
   9257 		break;
   9258 	}
   9259 
   9260 	return ismdio;
   9261 }
   9262 
   9263 /*
   9264  * wm_sgmii_readreg:	[mii interface function]
   9265  *
   9266  *	Read a PHY register on the SGMII
   9267  * This could be handled by the PHY layer if we didn't have to lock the
   9268  * ressource ...
   9269  */
   9270 static int
   9271 wm_sgmii_readreg(device_t self, int phy, int reg)
   9272 {
   9273 	struct wm_softc *sc = device_private(self);
   9274 	uint32_t i2ccmd;
   9275 	int i, rv;
   9276 
   9277 	if (sc->phy.acquire(sc)) {
   9278 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9279 		    __func__);
   9280 		return 0;
   9281 	}
   9282 
   9283 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   9284 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   9285 	    | I2CCMD_OPCODE_READ;
   9286 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9287 
   9288 	/* Poll the ready bit */
   9289 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9290 		delay(50);
   9291 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9292 		if (i2ccmd & I2CCMD_READY)
   9293 			break;
   9294 	}
   9295 	if ((i2ccmd & I2CCMD_READY) == 0)
   9296 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
   9297 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9298 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   9299 
   9300 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   9301 
   9302 	sc->phy.release(sc);
   9303 	return rv;
   9304 }
   9305 
   9306 /*
   9307  * wm_sgmii_writereg:	[mii interface function]
   9308  *
   9309  *	Write a PHY register on the SGMII.
   9310  * This could be handled by the PHY layer if we didn't have to lock the
   9311  * ressource ...
   9312  */
   9313 static void
   9314 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
   9315 {
   9316 	struct wm_softc *sc = device_private(self);
   9317 	uint32_t i2ccmd;
   9318 	int i;
   9319 	int val_swapped;
   9320 
   9321 	if (sc->phy.acquire(sc) != 0) {
   9322 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   9323 		    __func__);
   9324 		return;
   9325 	}
   9326 	/* Swap the data bytes for the I2C interface */
   9327 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   9328 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   9329 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   9330 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   9331 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9332 
   9333 	/* Poll the ready bit */
   9334 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9335 		delay(50);
   9336 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9337 		if (i2ccmd & I2CCMD_READY)
   9338 			break;
   9339 	}
   9340 	if ((i2ccmd & I2CCMD_READY) == 0)
   9341 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
   9342 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9343 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   9344 
   9345 	sc->phy.release(sc);
   9346 }
   9347 
   9348 /* TBI related */
   9349 
   9350 /*
   9351  * wm_tbi_mediainit:
   9352  *
   9353  *	Initialize media for use on 1000BASE-X devices.
   9354  */
   9355 static void
   9356 wm_tbi_mediainit(struct wm_softc *sc)
   9357 {
   9358 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9359 	const char *sep = "";
   9360 
   9361 	if (sc->sc_type < WM_T_82543)
   9362 		sc->sc_tipg = TIPG_WM_DFLT;
   9363 	else
   9364 		sc->sc_tipg = TIPG_LG_DFLT;
   9365 
   9366 	sc->sc_tbi_serdes_anegticks = 5;
   9367 
   9368 	/* Initialize our media structures */
   9369 	sc->sc_mii.mii_ifp = ifp;
   9370 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9371 
   9372 	if ((sc->sc_type >= WM_T_82575)
   9373 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   9374 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   9375 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   9376 	else
   9377 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   9378 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   9379 
   9380 	/*
   9381 	 * SWD Pins:
   9382 	 *
   9383 	 *	0 = Link LED (output)
   9384 	 *	1 = Loss Of Signal (input)
   9385 	 */
   9386 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   9387 
   9388 	/* XXX Perhaps this is only for TBI */
   9389 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   9390 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   9391 
   9392 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9393 		sc->sc_ctrl &= ~CTRL_LRST;
   9394 
   9395 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9396 
   9397 #define	ADD(ss, mm, dd)							\
   9398 do {									\
   9399 	aprint_normal("%s%s", sep, ss);					\
   9400 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
   9401 	sep = ", ";							\
   9402 } while (/*CONSTCOND*/0)
   9403 
   9404 	aprint_normal_dev(sc->sc_dev, "");
   9405 
   9406 	/* Only 82545 is LX */
   9407 	if (sc->sc_type == WM_T_82545) {
   9408 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   9409 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
   9410 	} else {
   9411 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   9412 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
   9413 	}
   9414 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
   9415 	aprint_normal("\n");
   9416 
   9417 #undef ADD
   9418 
   9419 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   9420 }
   9421 
   9422 /*
   9423  * wm_tbi_mediachange:	[ifmedia interface function]
   9424  *
   9425  *	Set hardware to newly-selected media on a 1000BASE-X device.
   9426  */
   9427 static int
   9428 wm_tbi_mediachange(struct ifnet *ifp)
   9429 {
   9430 	struct wm_softc *sc = ifp->if_softc;
   9431 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9432 	uint32_t status;
   9433 	int i;
   9434 
   9435 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   9436 		/* XXX need some work for >= 82571 and < 82575 */
   9437 		if (sc->sc_type < WM_T_82575)
   9438 			return 0;
   9439 	}
   9440 
   9441 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   9442 	    || (sc->sc_type >= WM_T_82575))
   9443 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   9444 
   9445 	sc->sc_ctrl &= ~CTRL_LRST;
   9446 	sc->sc_txcw = TXCW_ANE;
   9447 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9448 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   9449 	else if (ife->ifm_media & IFM_FDX)
   9450 		sc->sc_txcw |= TXCW_FD;
   9451 	else
   9452 		sc->sc_txcw |= TXCW_HD;
   9453 
   9454 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   9455 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   9456 
   9457 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   9458 		    device_xname(sc->sc_dev), sc->sc_txcw));
   9459 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9460 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9461 	CSR_WRITE_FLUSH(sc);
   9462 	delay(1000);
   9463 
   9464 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   9465 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   9466 
   9467 	/*
   9468 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   9469 	 * optics detect a signal, 0 if they don't.
   9470 	 */
   9471 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   9472 		/* Have signal; wait for the link to come up. */
   9473 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   9474 			delay(10000);
   9475 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   9476 				break;
   9477 		}
   9478 
   9479 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   9480 			    device_xname(sc->sc_dev),i));
   9481 
   9482 		status = CSR_READ(sc, WMREG_STATUS);
   9483 		DPRINTF(WM_DEBUG_LINK,
   9484 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   9485 			device_xname(sc->sc_dev),status, STATUS_LU));
   9486 		if (status & STATUS_LU) {
   9487 			/* Link is up. */
   9488 			DPRINTF(WM_DEBUG_LINK,
   9489 			    ("%s: LINK: set media -> link up %s\n",
   9490 			    device_xname(sc->sc_dev),
   9491 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   9492 
   9493 			/*
   9494 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9495 			 * so we should update sc->sc_ctrl
   9496 			 */
   9497 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9498 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9499 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9500 			if (status & STATUS_FD)
   9501 				sc->sc_tctl |=
   9502 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9503 			else
   9504 				sc->sc_tctl |=
   9505 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9506 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   9507 				sc->sc_fcrtl |= FCRTL_XONE;
   9508 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9509 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9510 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   9511 				      sc->sc_fcrtl);
   9512 			sc->sc_tbi_linkup = 1;
   9513 		} else {
   9514 			if (i == WM_LINKUP_TIMEOUT)
   9515 				wm_check_for_link(sc);
   9516 			/* Link is down. */
   9517 			DPRINTF(WM_DEBUG_LINK,
   9518 			    ("%s: LINK: set media -> link down\n",
   9519 			    device_xname(sc->sc_dev)));
   9520 			sc->sc_tbi_linkup = 0;
   9521 		}
   9522 	} else {
   9523 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   9524 		    device_xname(sc->sc_dev)));
   9525 		sc->sc_tbi_linkup = 0;
   9526 	}
   9527 
   9528 	wm_tbi_serdes_set_linkled(sc);
   9529 
   9530 	return 0;
   9531 }
   9532 
   9533 /*
   9534  * wm_tbi_mediastatus:	[ifmedia interface function]
   9535  *
   9536  *	Get the current interface media status on a 1000BASE-X device.
   9537  */
   9538 static void
   9539 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9540 {
   9541 	struct wm_softc *sc = ifp->if_softc;
   9542 	uint32_t ctrl, status;
   9543 
   9544 	ifmr->ifm_status = IFM_AVALID;
   9545 	ifmr->ifm_active = IFM_ETHER;
   9546 
   9547 	status = CSR_READ(sc, WMREG_STATUS);
   9548 	if ((status & STATUS_LU) == 0) {
   9549 		ifmr->ifm_active |= IFM_NONE;
   9550 		return;
   9551 	}
   9552 
   9553 	ifmr->ifm_status |= IFM_ACTIVE;
   9554 	/* Only 82545 is LX */
   9555 	if (sc->sc_type == WM_T_82545)
   9556 		ifmr->ifm_active |= IFM_1000_LX;
   9557 	else
   9558 		ifmr->ifm_active |= IFM_1000_SX;
   9559 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   9560 		ifmr->ifm_active |= IFM_FDX;
   9561 	else
   9562 		ifmr->ifm_active |= IFM_HDX;
   9563 	ctrl = CSR_READ(sc, WMREG_CTRL);
   9564 	if (ctrl & CTRL_RFCE)
   9565 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   9566 	if (ctrl & CTRL_TFCE)
   9567 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   9568 }
   9569 
   9570 /* XXX TBI only */
   9571 static int
   9572 wm_check_for_link(struct wm_softc *sc)
   9573 {
   9574 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9575 	uint32_t rxcw;
   9576 	uint32_t ctrl;
   9577 	uint32_t status;
   9578 	uint32_t sig;
   9579 
   9580 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   9581 		/* XXX need some work for >= 82571 */
   9582 		if (sc->sc_type >= WM_T_82571) {
   9583 			sc->sc_tbi_linkup = 1;
   9584 			return 0;
   9585 		}
   9586 	}
   9587 
   9588 	rxcw = CSR_READ(sc, WMREG_RXCW);
   9589 	ctrl = CSR_READ(sc, WMREG_CTRL);
   9590 	status = CSR_READ(sc, WMREG_STATUS);
   9591 
   9592 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   9593 
   9594 	DPRINTF(WM_DEBUG_LINK,
   9595 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   9596 		device_xname(sc->sc_dev), __func__,
   9597 		((ctrl & CTRL_SWDPIN(1)) == sig),
   9598 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
   9599 
   9600 	/*
   9601 	 * SWDPIN   LU RXCW
   9602 	 *      0    0    0
   9603 	 *      0    0    1	(should not happen)
   9604 	 *      0    1    0	(should not happen)
   9605 	 *      0    1    1	(should not happen)
   9606 	 *      1    0    0	Disable autonego and force linkup
   9607 	 *      1    0    1	got /C/ but not linkup yet
   9608 	 *      1    1    0	(linkup)
   9609 	 *      1    1    1	If IFM_AUTO, back to autonego
   9610 	 *
   9611 	 */
   9612 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   9613 	    && ((status & STATUS_LU) == 0)
   9614 	    && ((rxcw & RXCW_C) == 0)) {
   9615 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   9616 			__func__));
   9617 		sc->sc_tbi_linkup = 0;
   9618 		/* Disable auto-negotiation in the TXCW register */
   9619 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   9620 
   9621 		/*
   9622 		 * Force link-up and also force full-duplex.
   9623 		 *
   9624 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   9625 		 * so we should update sc->sc_ctrl
   9626 		 */
   9627 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   9628 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9629 	} else if (((status & STATUS_LU) != 0)
   9630 	    && ((rxcw & RXCW_C) != 0)
   9631 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   9632 		sc->sc_tbi_linkup = 1;
   9633 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   9634 			__func__));
   9635 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9636 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   9637 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   9638 	    && ((rxcw & RXCW_C) != 0)) {
   9639 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   9640 	} else {
   9641 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   9642 			status));
   9643 	}
   9644 
   9645 	return 0;
   9646 }
   9647 
   9648 /*
   9649  * wm_tbi_tick:
   9650  *
   9651  *	Check the link on TBI devices.
   9652  *	This function acts as mii_tick().
   9653  */
   9654 static void
   9655 wm_tbi_tick(struct wm_softc *sc)
   9656 {
   9657 	struct mii_data *mii = &sc->sc_mii;
   9658 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9659 	uint32_t status;
   9660 
   9661 	KASSERT(WM_CORE_LOCKED(sc));
   9662 
   9663 	status = CSR_READ(sc, WMREG_STATUS);
   9664 
   9665 	/* XXX is this needed? */
   9666 	(void)CSR_READ(sc, WMREG_RXCW);
   9667 	(void)CSR_READ(sc, WMREG_CTRL);
   9668 
   9669 	/* set link status */
   9670 	if ((status & STATUS_LU) == 0) {
   9671 		DPRINTF(WM_DEBUG_LINK,
   9672 		    ("%s: LINK: checklink -> down\n",
   9673 			device_xname(sc->sc_dev)));
   9674 		sc->sc_tbi_linkup = 0;
   9675 	} else if (sc->sc_tbi_linkup == 0) {
   9676 		DPRINTF(WM_DEBUG_LINK,
   9677 		    ("%s: LINK: checklink -> up %s\n",
   9678 			device_xname(sc->sc_dev),
   9679 			(status & STATUS_FD) ? "FDX" : "HDX"));
   9680 		sc->sc_tbi_linkup = 1;
   9681 		sc->sc_tbi_serdes_ticks = 0;
   9682 	}
   9683 
   9684 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   9685 		goto setled;
   9686 
   9687 	if ((status & STATUS_LU) == 0) {
   9688 		sc->sc_tbi_linkup = 0;
   9689 		/* If the timer expired, retry autonegotiation */
   9690 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9691 		    && (++sc->sc_tbi_serdes_ticks
   9692 			>= sc->sc_tbi_serdes_anegticks)) {
   9693 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   9694 			sc->sc_tbi_serdes_ticks = 0;
   9695 			/*
   9696 			 * Reset the link, and let autonegotiation do
   9697 			 * its thing
   9698 			 */
   9699 			sc->sc_ctrl |= CTRL_LRST;
   9700 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9701 			CSR_WRITE_FLUSH(sc);
   9702 			delay(1000);
   9703 			sc->sc_ctrl &= ~CTRL_LRST;
   9704 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9705 			CSR_WRITE_FLUSH(sc);
   9706 			delay(1000);
   9707 			CSR_WRITE(sc, WMREG_TXCW,
   9708 			    sc->sc_txcw & ~TXCW_ANE);
   9709 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9710 		}
   9711 	}
   9712 
   9713 setled:
   9714 	wm_tbi_serdes_set_linkled(sc);
   9715 }
   9716 
   9717 /* SERDES related */
   9718 static void
   9719 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   9720 {
   9721 	uint32_t reg;
   9722 
   9723 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   9724 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   9725 		return;
   9726 
   9727 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   9728 	reg |= PCS_CFG_PCS_EN;
   9729 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   9730 
   9731 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9732 	reg &= ~CTRL_EXT_SWDPIN(3);
   9733 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9734 	CSR_WRITE_FLUSH(sc);
   9735 }
   9736 
   9737 static int
   9738 wm_serdes_mediachange(struct ifnet *ifp)
   9739 {
   9740 	struct wm_softc *sc = ifp->if_softc;
   9741 	bool pcs_autoneg = true; /* XXX */
   9742 	uint32_t ctrl_ext, pcs_lctl, reg;
   9743 
   9744 	/* XXX Currently, this function is not called on 8257[12] */
   9745 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   9746 	    || (sc->sc_type >= WM_T_82575))
   9747 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   9748 
   9749 	wm_serdes_power_up_link_82575(sc);
   9750 
   9751 	sc->sc_ctrl |= CTRL_SLU;
   9752 
   9753 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   9754 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   9755 
   9756 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9757 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   9758 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   9759 	case CTRL_EXT_LINK_MODE_SGMII:
   9760 		pcs_autoneg = true;
   9761 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   9762 		break;
   9763 	case CTRL_EXT_LINK_MODE_1000KX:
   9764 		pcs_autoneg = false;
   9765 		/* FALLTHROUGH */
   9766 	default:
   9767 		if ((sc->sc_type == WM_T_82575)
   9768 		    || (sc->sc_type == WM_T_82576)) {
   9769 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   9770 				pcs_autoneg = false;
   9771 		}
   9772 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   9773 		    | CTRL_FRCFDX;
   9774 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   9775 	}
   9776 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9777 
   9778 	if (pcs_autoneg) {
   9779 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   9780 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   9781 
   9782 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   9783 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   9784 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   9785 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   9786 	} else
   9787 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   9788 
   9789 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   9790 
   9791 
   9792 	return 0;
   9793 }
   9794 
   9795 static void
   9796 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9797 {
   9798 	struct wm_softc *sc = ifp->if_softc;
   9799 	struct mii_data *mii = &sc->sc_mii;
   9800 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9801 	uint32_t pcs_adv, pcs_lpab, reg;
   9802 
   9803 	ifmr->ifm_status = IFM_AVALID;
   9804 	ifmr->ifm_active = IFM_ETHER;
   9805 
   9806 	/* Check PCS */
   9807 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9808 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   9809 		ifmr->ifm_active |= IFM_NONE;
   9810 		sc->sc_tbi_linkup = 0;
   9811 		goto setled;
   9812 	}
   9813 
   9814 	sc->sc_tbi_linkup = 1;
   9815 	ifmr->ifm_status |= IFM_ACTIVE;
   9816 	ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   9817 	if ((reg & PCS_LSTS_FDX) != 0)
   9818 		ifmr->ifm_active |= IFM_FDX;
   9819 	else
   9820 		ifmr->ifm_active |= IFM_HDX;
   9821 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   9822 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9823 		/* Check flow */
   9824 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9825 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9826 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
   9827 			goto setled;
   9828 		}
   9829 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9830 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9831 		DPRINTF(WM_DEBUG_LINK,
   9832 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
   9833 		if ((pcs_adv & TXCW_SYM_PAUSE)
   9834 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9835 			mii->mii_media_active |= IFM_FLOW
   9836 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9837 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9838 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   9839 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   9840 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   9841 			mii->mii_media_active |= IFM_FLOW
   9842 			    | IFM_ETH_TXPAUSE;
   9843 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   9844 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   9845 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9846 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   9847 			mii->mii_media_active |= IFM_FLOW
   9848 			    | IFM_ETH_RXPAUSE;
   9849 		} else {
   9850 		}
   9851 	}
   9852 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9853 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   9854 setled:
   9855 	wm_tbi_serdes_set_linkled(sc);
   9856 }
   9857 
   9858 /*
   9859  * wm_serdes_tick:
   9860  *
   9861  *	Check the link on serdes devices.
   9862  */
   9863 static void
   9864 wm_serdes_tick(struct wm_softc *sc)
   9865 {
   9866 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9867 	struct mii_data *mii = &sc->sc_mii;
   9868 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9869 	uint32_t reg;
   9870 
   9871 	KASSERT(WM_CORE_LOCKED(sc));
   9872 
   9873 	mii->mii_media_status = IFM_AVALID;
   9874 	mii->mii_media_active = IFM_ETHER;
   9875 
   9876 	/* Check PCS */
   9877 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9878 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   9879 		mii->mii_media_status |= IFM_ACTIVE;
   9880 		sc->sc_tbi_linkup = 1;
   9881 		sc->sc_tbi_serdes_ticks = 0;
   9882 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   9883 		if ((reg & PCS_LSTS_FDX) != 0)
   9884 			mii->mii_media_active |= IFM_FDX;
   9885 		else
   9886 			mii->mii_media_active |= IFM_HDX;
   9887 	} else {
   9888 		mii->mii_media_status |= IFM_NONE;
   9889 		sc->sc_tbi_linkup = 0;
   9890 		    /* If the timer expired, retry autonegotiation */
   9891 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9892 		    && (++sc->sc_tbi_serdes_ticks
   9893 			>= sc->sc_tbi_serdes_anegticks)) {
   9894 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   9895 			sc->sc_tbi_serdes_ticks = 0;
   9896 			/* XXX */
   9897 			wm_serdes_mediachange(ifp);
   9898 		}
   9899 	}
   9900 
   9901 	wm_tbi_serdes_set_linkled(sc);
   9902 }
   9903 
   9904 /* SFP related */
   9905 
   9906 static int
   9907 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   9908 {
   9909 	uint32_t i2ccmd;
   9910 	int i;
   9911 
   9912 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   9913 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9914 
   9915 	/* Poll the ready bit */
   9916 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9917 		delay(50);
   9918 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9919 		if (i2ccmd & I2CCMD_READY)
   9920 			break;
   9921 	}
   9922 	if ((i2ccmd & I2CCMD_READY) == 0)
   9923 		return -1;
   9924 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9925 		return -1;
   9926 
   9927 	*data = i2ccmd & 0x00ff;
   9928 
   9929 	return 0;
   9930 }
   9931 
   9932 static uint32_t
   9933 wm_sfp_get_media_type(struct wm_softc *sc)
   9934 {
   9935 	uint32_t ctrl_ext;
   9936 	uint8_t val = 0;
   9937 	int timeout = 3;
   9938 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   9939 	int rv = -1;
   9940 
   9941 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9942 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   9943 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   9944 	CSR_WRITE_FLUSH(sc);
   9945 
   9946 	/* Read SFP module data */
   9947 	while (timeout) {
   9948 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   9949 		if (rv == 0)
   9950 			break;
   9951 		delay(100*1000); /* XXX too big */
   9952 		timeout--;
   9953 	}
   9954 	if (rv != 0)
   9955 		goto out;
   9956 	switch (val) {
   9957 	case SFF_SFP_ID_SFF:
   9958 		aprint_normal_dev(sc->sc_dev,
   9959 		    "Module/Connector soldered to board\n");
   9960 		break;
   9961 	case SFF_SFP_ID_SFP:
   9962 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   9963 		break;
   9964 	case SFF_SFP_ID_UNKNOWN:
   9965 		goto out;
   9966 	default:
   9967 		break;
   9968 	}
   9969 
   9970 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   9971 	if (rv != 0) {
   9972 		goto out;
   9973 	}
   9974 
   9975 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   9976 		mediatype = WM_MEDIATYPE_SERDES;
   9977 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   9978 		sc->sc_flags |= WM_F_SGMII;
   9979 		mediatype = WM_MEDIATYPE_COPPER;
   9980 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   9981 		sc->sc_flags |= WM_F_SGMII;
   9982 		mediatype = WM_MEDIATYPE_SERDES;
   9983 	}
   9984 
   9985 out:
   9986 	/* Restore I2C interface setting */
   9987 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9988 
   9989 	return mediatype;
   9990 }
   9991 /*
   9992  * NVM related.
   9993  * Microwire, SPI (w/wo EERD) and Flash.
   9994  */
   9995 
   9996 /* Both spi and uwire */
   9997 
   9998 /*
   9999  * wm_eeprom_sendbits:
   10000  *
   10001  *	Send a series of bits to the EEPROM.
   10002  */
   10003 static void
   10004 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   10005 {
   10006 	uint32_t reg;
   10007 	int x;
   10008 
   10009 	reg = CSR_READ(sc, WMREG_EECD);
   10010 
   10011 	for (x = nbits; x > 0; x--) {
   10012 		if (bits & (1U << (x - 1)))
   10013 			reg |= EECD_DI;
   10014 		else
   10015 			reg &= ~EECD_DI;
   10016 		CSR_WRITE(sc, WMREG_EECD, reg);
   10017 		CSR_WRITE_FLUSH(sc);
   10018 		delay(2);
   10019 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   10020 		CSR_WRITE_FLUSH(sc);
   10021 		delay(2);
   10022 		CSR_WRITE(sc, WMREG_EECD, reg);
   10023 		CSR_WRITE_FLUSH(sc);
   10024 		delay(2);
   10025 	}
   10026 }
   10027 
   10028 /*
   10029  * wm_eeprom_recvbits:
   10030  *
   10031  *	Receive a series of bits from the EEPROM.
   10032  */
   10033 static void
   10034 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   10035 {
   10036 	uint32_t reg, val;
   10037 	int x;
   10038 
   10039 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   10040 
   10041 	val = 0;
   10042 	for (x = nbits; x > 0; x--) {
   10043 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   10044 		CSR_WRITE_FLUSH(sc);
   10045 		delay(2);
   10046 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   10047 			val |= (1U << (x - 1));
   10048 		CSR_WRITE(sc, WMREG_EECD, reg);
   10049 		CSR_WRITE_FLUSH(sc);
   10050 		delay(2);
   10051 	}
   10052 	*valp = val;
   10053 }
   10054 
   10055 /* Microwire */
   10056 
   10057 /*
   10058  * wm_nvm_read_uwire:
   10059  *
   10060  *	Read a word from the EEPROM using the MicroWire protocol.
   10061  */
   10062 static int
   10063 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   10064 {
   10065 	uint32_t reg, val;
   10066 	int i;
   10067 
   10068 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10069 		device_xname(sc->sc_dev), __func__));
   10070 
   10071 	for (i = 0; i < wordcnt; i++) {
   10072 		/* Clear SK and DI. */
   10073 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   10074 		CSR_WRITE(sc, WMREG_EECD, reg);
   10075 
   10076 		/*
   10077 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   10078 		 * and Xen.
   10079 		 *
   10080 		 * We use this workaround only for 82540 because qemu's
   10081 		 * e1000 act as 82540.
   10082 		 */
   10083 		if (sc->sc_type == WM_T_82540) {
   10084 			reg |= EECD_SK;
   10085 			CSR_WRITE(sc, WMREG_EECD, reg);
   10086 			reg &= ~EECD_SK;
   10087 			CSR_WRITE(sc, WMREG_EECD, reg);
   10088 			CSR_WRITE_FLUSH(sc);
   10089 			delay(2);
   10090 		}
   10091 		/* XXX: end of workaround */
   10092 
   10093 		/* Set CHIP SELECT. */
   10094 		reg |= EECD_CS;
   10095 		CSR_WRITE(sc, WMREG_EECD, reg);
   10096 		CSR_WRITE_FLUSH(sc);
   10097 		delay(2);
   10098 
   10099 		/* Shift in the READ command. */
   10100 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   10101 
   10102 		/* Shift in address. */
   10103 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   10104 
   10105 		/* Shift out the data. */
   10106 		wm_eeprom_recvbits(sc, &val, 16);
   10107 		data[i] = val & 0xffff;
   10108 
   10109 		/* Clear CHIP SELECT. */
   10110 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   10111 		CSR_WRITE(sc, WMREG_EECD, reg);
   10112 		CSR_WRITE_FLUSH(sc);
   10113 		delay(2);
   10114 	}
   10115 
   10116 	return 0;
   10117 }
   10118 
   10119 /* SPI */
   10120 
   10121 /*
   10122  * Set SPI and FLASH related information from the EECD register.
   10123  * For 82541 and 82547, the word size is taken from EEPROM.
   10124  */
   10125 static int
   10126 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   10127 {
   10128 	int size;
   10129 	uint32_t reg;
   10130 	uint16_t data;
   10131 
   10132 	reg = CSR_READ(sc, WMREG_EECD);
   10133 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   10134 
   10135 	/* Read the size of NVM from EECD by default */
   10136 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   10137 	switch (sc->sc_type) {
   10138 	case WM_T_82541:
   10139 	case WM_T_82541_2:
   10140 	case WM_T_82547:
   10141 	case WM_T_82547_2:
   10142 		/* Set dummy value to access EEPROM */
   10143 		sc->sc_nvm_wordsize = 64;
   10144 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
   10145 		reg = data;
   10146 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   10147 		if (size == 0)
   10148 			size = 6; /* 64 word size */
   10149 		else
   10150 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   10151 		break;
   10152 	case WM_T_80003:
   10153 	case WM_T_82571:
   10154 	case WM_T_82572:
   10155 	case WM_T_82573: /* SPI case */
   10156 	case WM_T_82574: /* SPI case */
   10157 	case WM_T_82583: /* SPI case */
   10158 		size += NVM_WORD_SIZE_BASE_SHIFT;
   10159 		if (size > 14)
   10160 			size = 14;
   10161 		break;
   10162 	case WM_T_82575:
   10163 	case WM_T_82576:
   10164 	case WM_T_82580:
   10165 	case WM_T_I350:
   10166 	case WM_T_I354:
   10167 	case WM_T_I210:
   10168 	case WM_T_I211:
   10169 		size += NVM_WORD_SIZE_BASE_SHIFT;
   10170 		if (size > 15)
   10171 			size = 15;
   10172 		break;
   10173 	default:
   10174 		aprint_error_dev(sc->sc_dev,
   10175 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   10176 		return -1;
   10177 		break;
   10178 	}
   10179 
   10180 	sc->sc_nvm_wordsize = 1 << size;
   10181 
   10182 	return 0;
   10183 }
   10184 
   10185 /*
   10186  * wm_nvm_ready_spi:
   10187  *
   10188  *	Wait for a SPI EEPROM to be ready for commands.
   10189  */
   10190 static int
   10191 wm_nvm_ready_spi(struct wm_softc *sc)
   10192 {
   10193 	uint32_t val;
   10194 	int usec;
   10195 
   10196 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10197 		device_xname(sc->sc_dev), __func__));
   10198 
   10199 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   10200 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   10201 		wm_eeprom_recvbits(sc, &val, 8);
   10202 		if ((val & SPI_SR_RDY) == 0)
   10203 			break;
   10204 	}
   10205 	if (usec >= SPI_MAX_RETRIES) {
   10206 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
   10207 		return 1;
   10208 	}
   10209 	return 0;
   10210 }
   10211 
   10212 /*
   10213  * wm_nvm_read_spi:
   10214  *
   10215  *	Read a work from the EEPROM using the SPI protocol.
   10216  */
   10217 static int
   10218 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   10219 {
   10220 	uint32_t reg, val;
   10221 	int i;
   10222 	uint8_t opc;
   10223 
   10224 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10225 		device_xname(sc->sc_dev), __func__));
   10226 
   10227 	/* Clear SK and CS. */
   10228 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   10229 	CSR_WRITE(sc, WMREG_EECD, reg);
   10230 	CSR_WRITE_FLUSH(sc);
   10231 	delay(2);
   10232 
   10233 	if (wm_nvm_ready_spi(sc))
   10234 		return 1;
   10235 
   10236 	/* Toggle CS to flush commands. */
   10237 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   10238 	CSR_WRITE_FLUSH(sc);
   10239 	delay(2);
   10240 	CSR_WRITE(sc, WMREG_EECD, reg);
   10241 	CSR_WRITE_FLUSH(sc);
   10242 	delay(2);
   10243 
   10244 	opc = SPI_OPC_READ;
   10245 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   10246 		opc |= SPI_OPC_A8;
   10247 
   10248 	wm_eeprom_sendbits(sc, opc, 8);
   10249 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   10250 
   10251 	for (i = 0; i < wordcnt; i++) {
   10252 		wm_eeprom_recvbits(sc, &val, 16);
   10253 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   10254 	}
   10255 
   10256 	/* Raise CS and clear SK. */
   10257 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   10258 	CSR_WRITE(sc, WMREG_EECD, reg);
   10259 	CSR_WRITE_FLUSH(sc);
   10260 	delay(2);
   10261 
   10262 	return 0;
   10263 }
   10264 
   10265 /* Using with EERD */
   10266 
   10267 static int
   10268 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   10269 {
   10270 	uint32_t attempts = 100000;
   10271 	uint32_t i, reg = 0;
   10272 	int32_t done = -1;
   10273 
   10274 	for (i = 0; i < attempts; i++) {
   10275 		reg = CSR_READ(sc, rw);
   10276 
   10277 		if (reg & EERD_DONE) {
   10278 			done = 0;
   10279 			break;
   10280 		}
   10281 		delay(5);
   10282 	}
   10283 
   10284 	return done;
   10285 }
   10286 
   10287 static int
   10288 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   10289     uint16_t *data)
   10290 {
   10291 	int i, eerd = 0;
   10292 	int error = 0;
   10293 
   10294 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10295 		device_xname(sc->sc_dev), __func__));
   10296 
   10297 	for (i = 0; i < wordcnt; i++) {
   10298 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   10299 
   10300 		CSR_WRITE(sc, WMREG_EERD, eerd);
   10301 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   10302 		if (error != 0)
   10303 			break;
   10304 
   10305 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   10306 	}
   10307 
   10308 	return error;
   10309 }
   10310 
   10311 /* Flash */
   10312 
   10313 static int
   10314 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   10315 {
   10316 	uint32_t eecd;
   10317 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   10318 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   10319 	uint8_t sig_byte = 0;
   10320 
   10321 	switch (sc->sc_type) {
   10322 	case WM_T_PCH_SPT:
   10323 		/*
   10324 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
   10325 		 * sector valid bits from the NVM.
   10326 		 */
   10327 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
   10328 		if ((*bank == 0) || (*bank == 1)) {
   10329 			aprint_error_dev(sc->sc_dev,
   10330 			    "%s: no valid NVM bank present (%u)\n", __func__,
   10331 				*bank);
   10332 			return -1;
   10333 		} else {
   10334 			*bank = *bank - 2;
   10335 			return 0;
   10336 		}
   10337 	case WM_T_ICH8:
   10338 	case WM_T_ICH9:
   10339 		eecd = CSR_READ(sc, WMREG_EECD);
   10340 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   10341 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   10342 			return 0;
   10343 		}
   10344 		/* FALLTHROUGH */
   10345 	default:
   10346 		/* Default to 0 */
   10347 		*bank = 0;
   10348 
   10349 		/* Check bank 0 */
   10350 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   10351 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   10352 			*bank = 0;
   10353 			return 0;
   10354 		}
   10355 
   10356 		/* Check bank 1 */
   10357 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   10358 		    &sig_byte);
   10359 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   10360 			*bank = 1;
   10361 			return 0;
   10362 		}
   10363 	}
   10364 
   10365 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   10366 		device_xname(sc->sc_dev)));
   10367 	return -1;
   10368 }
   10369 
   10370 /******************************************************************************
   10371  * This function does initial flash setup so that a new read/write/erase cycle
   10372  * can be started.
   10373  *
   10374  * sc - The pointer to the hw structure
   10375  ****************************************************************************/
   10376 static int32_t
   10377 wm_ich8_cycle_init(struct wm_softc *sc)
   10378 {
   10379 	uint16_t hsfsts;
   10380 	int32_t error = 1;
   10381 	int32_t i     = 0;
   10382 
   10383 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10384 
   10385 	/* May be check the Flash Des Valid bit in Hw status */
   10386 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   10387 		return error;
   10388 	}
   10389 
   10390 	/* Clear FCERR in Hw status by writing 1 */
   10391 	/* Clear DAEL in Hw status by writing a 1 */
   10392 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   10393 
   10394 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10395 
   10396 	/*
   10397 	 * Either we should have a hardware SPI cycle in progress bit to check
   10398 	 * against, in order to start a new cycle or FDONE bit should be
   10399 	 * changed in the hardware so that it is 1 after harware reset, which
   10400 	 * can then be used as an indication whether a cycle is in progress or
   10401 	 * has been completed .. we should also have some software semaphore
   10402 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   10403 	 * threads access to those bits can be sequentiallized or a way so that
   10404 	 * 2 threads dont start the cycle at the same time
   10405 	 */
   10406 
   10407 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   10408 		/*
   10409 		 * There is no cycle running at present, so we can start a
   10410 		 * cycle
   10411 		 */
   10412 
   10413 		/* Begin by setting Flash Cycle Done. */
   10414 		hsfsts |= HSFSTS_DONE;
   10415 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10416 		error = 0;
   10417 	} else {
   10418 		/*
   10419 		 * otherwise poll for sometime so the current cycle has a
   10420 		 * chance to end before giving up.
   10421 		 */
   10422 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   10423 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10424 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   10425 				error = 0;
   10426 				break;
   10427 			}
   10428 			delay(1);
   10429 		}
   10430 		if (error == 0) {
   10431 			/*
   10432 			 * Successful in waiting for previous cycle to timeout,
   10433 			 * now set the Flash Cycle Done.
   10434 			 */
   10435 			hsfsts |= HSFSTS_DONE;
   10436 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10437 		}
   10438 	}
   10439 	return error;
   10440 }
   10441 
   10442 /******************************************************************************
   10443  * This function starts a flash cycle and waits for its completion
   10444  *
   10445  * sc - The pointer to the hw structure
   10446  ****************************************************************************/
   10447 static int32_t
   10448 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   10449 {
   10450 	uint16_t hsflctl;
   10451 	uint16_t hsfsts;
   10452 	int32_t error = 1;
   10453 	uint32_t i = 0;
   10454 
   10455 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   10456 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   10457 	hsflctl |= HSFCTL_GO;
   10458 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   10459 
   10460 	/* Wait till FDONE bit is set to 1 */
   10461 	do {
   10462 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10463 		if (hsfsts & HSFSTS_DONE)
   10464 			break;
   10465 		delay(1);
   10466 		i++;
   10467 	} while (i < timeout);
   10468 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   10469 		error = 0;
   10470 
   10471 	return error;
   10472 }
   10473 
   10474 /******************************************************************************
   10475  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
   10476  *
   10477  * sc - The pointer to the hw structure
   10478  * index - The index of the byte or word to read.
   10479  * size - Size of data to read, 1=byte 2=word, 4=dword
   10480  * data - Pointer to the word to store the value read.
   10481  *****************************************************************************/
   10482 static int32_t
   10483 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   10484     uint32_t size, uint32_t *data)
   10485 {
   10486 	uint16_t hsfsts;
   10487 	uint16_t hsflctl;
   10488 	uint32_t flash_linear_address;
   10489 	uint32_t flash_data = 0;
   10490 	int32_t error = 1;
   10491 	int32_t count = 0;
   10492 
   10493 	if (size < 1  || size > 4 || data == 0x0 ||
   10494 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   10495 		return error;
   10496 
   10497 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   10498 	    sc->sc_ich8_flash_base;
   10499 
   10500 	do {
   10501 		delay(1);
   10502 		/* Steps */
   10503 		error = wm_ich8_cycle_init(sc);
   10504 		if (error)
   10505 			break;
   10506 
   10507 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   10508 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   10509 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   10510 		    & HSFCTL_BCOUNT_MASK;
   10511 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   10512 		if (sc->sc_type == WM_T_PCH_SPT) {
   10513 			/*
   10514 			 * In SPT, This register is in Lan memory space, not
   10515 			 * flash. Therefore, only 32 bit access is supported.
   10516 			 */
   10517 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
   10518 			    (uint32_t)hsflctl);
   10519 		} else
   10520 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   10521 
   10522 		/*
   10523 		 * Write the last 24 bits of index into Flash Linear address
   10524 		 * field in Flash Address
   10525 		 */
   10526 		/* TODO: TBD maybe check the index against the size of flash */
   10527 
   10528 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   10529 
   10530 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   10531 
   10532 		/*
   10533 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   10534 		 * the whole sequence a few more times, else read in (shift in)
   10535 		 * the Flash Data0, the order is least significant byte first
   10536 		 * msb to lsb
   10537 		 */
   10538 		if (error == 0) {
   10539 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   10540 			if (size == 1)
   10541 				*data = (uint8_t)(flash_data & 0x000000FF);
   10542 			else if (size == 2)
   10543 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   10544 			else if (size == 4)
   10545 				*data = (uint32_t)flash_data;
   10546 			break;
   10547 		} else {
   10548 			/*
   10549 			 * If we've gotten here, then things are probably
   10550 			 * completely hosed, but if the error condition is
   10551 			 * detected, it won't hurt to give it another try...
   10552 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   10553 			 */
   10554 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10555 			if (hsfsts & HSFSTS_ERR) {
   10556 				/* Repeat for some time before giving up. */
   10557 				continue;
   10558 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   10559 				break;
   10560 		}
   10561 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   10562 
   10563 	return error;
   10564 }
   10565 
   10566 /******************************************************************************
   10567  * Reads a single byte from the NVM using the ICH8 flash access registers.
   10568  *
   10569  * sc - pointer to wm_hw structure
   10570  * index - The index of the byte to read.
   10571  * data - Pointer to a byte to store the value read.
   10572  *****************************************************************************/
   10573 static int32_t
   10574 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   10575 {
   10576 	int32_t status;
   10577 	uint32_t word = 0;
   10578 
   10579 	status = wm_read_ich8_data(sc, index, 1, &word);
   10580 	if (status == 0)
   10581 		*data = (uint8_t)word;
   10582 	else
   10583 		*data = 0;
   10584 
   10585 	return status;
   10586 }
   10587 
   10588 /******************************************************************************
   10589  * Reads a word from the NVM using the ICH8 flash access registers.
   10590  *
   10591  * sc - pointer to wm_hw structure
   10592  * index - The starting byte index of the word to read.
   10593  * data - Pointer to a word to store the value read.
   10594  *****************************************************************************/
   10595 static int32_t
   10596 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   10597 {
   10598 	int32_t status;
   10599 	uint32_t word = 0;
   10600 
   10601 	status = wm_read_ich8_data(sc, index, 2, &word);
   10602 	if (status == 0)
   10603 		*data = (uint16_t)word;
   10604 	else
   10605 		*data = 0;
   10606 
   10607 	return status;
   10608 }
   10609 
   10610 /******************************************************************************
   10611  * Reads a dword from the NVM using the ICH8 flash access registers.
   10612  *
   10613  * sc - pointer to wm_hw structure
   10614  * index - The starting byte index of the word to read.
   10615  * data - Pointer to a word to store the value read.
   10616  *****************************************************************************/
   10617 static int32_t
   10618 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
   10619 {
   10620 	int32_t status;
   10621 
   10622 	status = wm_read_ich8_data(sc, index, 4, data);
   10623 	return status;
   10624 }
   10625 
   10626 /******************************************************************************
   10627  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   10628  * register.
   10629  *
   10630  * sc - Struct containing variables accessed by shared code
   10631  * offset - offset of word in the EEPROM to read
   10632  * data - word read from the EEPROM
   10633  * words - number of words to read
   10634  *****************************************************************************/
   10635 static int
   10636 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10637 {
   10638 	int32_t  error = 0;
   10639 	uint32_t flash_bank = 0;
   10640 	uint32_t act_offset = 0;
   10641 	uint32_t bank_offset = 0;
   10642 	uint16_t word = 0;
   10643 	uint16_t i = 0;
   10644 
   10645 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10646 		device_xname(sc->sc_dev), __func__));
   10647 
   10648 	/*
   10649 	 * We need to know which is the valid flash bank.  In the event
   10650 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   10651 	 * managing flash_bank.  So it cannot be trusted and needs
   10652 	 * to be updated with each read.
   10653 	 */
   10654 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   10655 	if (error) {
   10656 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   10657 			device_xname(sc->sc_dev)));
   10658 		flash_bank = 0;
   10659 	}
   10660 
   10661 	/*
   10662 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   10663 	 * size
   10664 	 */
   10665 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   10666 
   10667 	error = wm_get_swfwhw_semaphore(sc);
   10668 	if (error) {
   10669 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10670 		    __func__);
   10671 		return error;
   10672 	}
   10673 
   10674 	for (i = 0; i < words; i++) {
   10675 		/* The NVM part needs a byte offset, hence * 2 */
   10676 		act_offset = bank_offset + ((offset + i) * 2);
   10677 		error = wm_read_ich8_word(sc, act_offset, &word);
   10678 		if (error) {
   10679 			aprint_error_dev(sc->sc_dev,
   10680 			    "%s: failed to read NVM\n", __func__);
   10681 			break;
   10682 		}
   10683 		data[i] = word;
   10684 	}
   10685 
   10686 	wm_put_swfwhw_semaphore(sc);
   10687 	return error;
   10688 }
   10689 
   10690 /******************************************************************************
   10691  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
   10692  * register.
   10693  *
   10694  * sc - Struct containing variables accessed by shared code
   10695  * offset - offset of word in the EEPROM to read
   10696  * data - word read from the EEPROM
   10697  * words - number of words to read
   10698  *****************************************************************************/
   10699 static int
   10700 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10701 {
   10702 	int32_t  error = 0;
   10703 	uint32_t flash_bank = 0;
   10704 	uint32_t act_offset = 0;
   10705 	uint32_t bank_offset = 0;
   10706 	uint32_t dword = 0;
   10707 	uint16_t i = 0;
   10708 
   10709 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10710 		device_xname(sc->sc_dev), __func__));
   10711 
   10712 	/*
   10713 	 * We need to know which is the valid flash bank.  In the event
   10714 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   10715 	 * managing flash_bank.  So it cannot be trusted and needs
   10716 	 * to be updated with each read.
   10717 	 */
   10718 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   10719 	if (error) {
   10720 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   10721 			device_xname(sc->sc_dev)));
   10722 		flash_bank = 0;
   10723 	}
   10724 
   10725 	/*
   10726 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   10727 	 * size
   10728 	 */
   10729 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   10730 
   10731 	error = wm_get_swfwhw_semaphore(sc);
   10732 	if (error) {
   10733 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10734 		    __func__);
   10735 		return error;
   10736 	}
   10737 
   10738 	for (i = 0; i < words; i++) {
   10739 		/* The NVM part needs a byte offset, hence * 2 */
   10740 		act_offset = bank_offset + ((offset + i) * 2);
   10741 		/* but we must read dword aligned, so mask ... */
   10742 		error = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
   10743 		if (error) {
   10744 			aprint_error_dev(sc->sc_dev,
   10745 			    "%s: failed to read NVM\n", __func__);
   10746 			break;
   10747 		}
   10748 		/* ... and pick out low or high word */
   10749 		if ((act_offset & 0x2) == 0)
   10750 			data[i] = (uint16_t)(dword & 0xFFFF);
   10751 		else
   10752 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
   10753 	}
   10754 
   10755 	wm_put_swfwhw_semaphore(sc);
   10756 	return error;
   10757 }
   10758 
   10759 /* iNVM */
   10760 
   10761 static int
   10762 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   10763 {
   10764 	int32_t  rv = 0;
   10765 	uint32_t invm_dword;
   10766 	uint16_t i;
   10767 	uint8_t record_type, word_address;
   10768 
   10769 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10770 		device_xname(sc->sc_dev), __func__));
   10771 
   10772 	for (i = 0; i < INVM_SIZE; i++) {
   10773 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   10774 		/* Get record type */
   10775 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   10776 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   10777 			break;
   10778 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   10779 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   10780 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   10781 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   10782 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   10783 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   10784 			if (word_address == address) {
   10785 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   10786 				rv = 0;
   10787 				break;
   10788 			}
   10789 		}
   10790 	}
   10791 
   10792 	return rv;
   10793 }
   10794 
   10795 static int
   10796 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10797 {
   10798 	int rv = 0;
   10799 	int i;
   10800 
   10801 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10802 		device_xname(sc->sc_dev), __func__));
   10803 
   10804 	for (i = 0; i < words; i++) {
   10805 		switch (offset + i) {
   10806 		case NVM_OFF_MACADDR:
   10807 		case NVM_OFF_MACADDR1:
   10808 		case NVM_OFF_MACADDR2:
   10809 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   10810 			if (rv != 0) {
   10811 				data[i] = 0xffff;
   10812 				rv = -1;
   10813 			}
   10814 			break;
   10815 		case NVM_OFF_CFG2:
   10816 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10817 			if (rv != 0) {
   10818 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   10819 				rv = 0;
   10820 			}
   10821 			break;
   10822 		case NVM_OFF_CFG4:
   10823 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10824 			if (rv != 0) {
   10825 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   10826 				rv = 0;
   10827 			}
   10828 			break;
   10829 		case NVM_OFF_LED_1_CFG:
   10830 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10831 			if (rv != 0) {
   10832 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   10833 				rv = 0;
   10834 			}
   10835 			break;
   10836 		case NVM_OFF_LED_0_2_CFG:
   10837 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10838 			if (rv != 0) {
   10839 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   10840 				rv = 0;
   10841 			}
   10842 			break;
   10843 		case NVM_OFF_ID_LED_SETTINGS:
   10844 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10845 			if (rv != 0) {
   10846 				*data = ID_LED_RESERVED_FFFF;
   10847 				rv = 0;
   10848 			}
   10849 			break;
   10850 		default:
   10851 			DPRINTF(WM_DEBUG_NVM,
   10852 			    ("NVM word 0x%02x is not mapped.\n", offset));
   10853 			*data = NVM_RESERVED_WORD;
   10854 			break;
   10855 		}
   10856 	}
   10857 
   10858 	return rv;
   10859 }
   10860 
   10861 /* Lock, detecting NVM type, validate checksum, version and read */
   10862 
   10863 /*
   10864  * wm_nvm_acquire:
   10865  *
   10866  *	Perform the EEPROM handshake required on some chips.
   10867  */
   10868 static int
   10869 wm_nvm_acquire(struct wm_softc *sc)
   10870 {
   10871 	uint32_t reg;
   10872 	int x;
   10873 	int ret = 0;
   10874 
   10875 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10876 		device_xname(sc->sc_dev), __func__));
   10877 
   10878 	if (sc->sc_type >= WM_T_ICH8) {
   10879 		ret = wm_get_nvm_ich8lan(sc);
   10880 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   10881 		ret = wm_get_swfwhw_semaphore(sc);
   10882 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
   10883 		/* This will also do wm_get_swsm_semaphore() if needed */
   10884 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
   10885 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
   10886 		ret = wm_get_swsm_semaphore(sc);
   10887 	}
   10888 
   10889 	if (ret) {
   10890 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10891 			__func__);
   10892 		return 1;
   10893 	}
   10894 
   10895 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   10896 		reg = CSR_READ(sc, WMREG_EECD);
   10897 
   10898 		/* Request EEPROM access. */
   10899 		reg |= EECD_EE_REQ;
   10900 		CSR_WRITE(sc, WMREG_EECD, reg);
   10901 
   10902 		/* ..and wait for it to be granted. */
   10903 		for (x = 0; x < 1000; x++) {
   10904 			reg = CSR_READ(sc, WMREG_EECD);
   10905 			if (reg & EECD_EE_GNT)
   10906 				break;
   10907 			delay(5);
   10908 		}
   10909 		if ((reg & EECD_EE_GNT) == 0) {
   10910 			aprint_error_dev(sc->sc_dev,
   10911 			    "could not acquire EEPROM GNT\n");
   10912 			reg &= ~EECD_EE_REQ;
   10913 			CSR_WRITE(sc, WMREG_EECD, reg);
   10914 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   10915 				wm_put_swfwhw_semaphore(sc);
   10916 			if (sc->sc_flags & WM_F_LOCK_SWFW)
   10917 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   10918 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
   10919 				wm_put_swsm_semaphore(sc);
   10920 			return 1;
   10921 		}
   10922 	}
   10923 
   10924 	return 0;
   10925 }
   10926 
   10927 /*
   10928  * wm_nvm_release:
   10929  *
   10930  *	Release the EEPROM mutex.
   10931  */
   10932 static void
   10933 wm_nvm_release(struct wm_softc *sc)
   10934 {
   10935 	uint32_t reg;
   10936 
   10937 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   10938 		device_xname(sc->sc_dev), __func__));
   10939 
   10940 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   10941 		reg = CSR_READ(sc, WMREG_EECD);
   10942 		reg &= ~EECD_EE_REQ;
   10943 		CSR_WRITE(sc, WMREG_EECD, reg);
   10944 	}
   10945 
   10946 	if (sc->sc_type >= WM_T_ICH8) {
   10947 		wm_put_nvm_ich8lan(sc);
   10948 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   10949 		wm_put_swfwhw_semaphore(sc);
   10950 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   10951 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   10952 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
   10953 		wm_put_swsm_semaphore(sc);
   10954 }
   10955 
   10956 static int
   10957 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   10958 {
   10959 	uint32_t eecd = 0;
   10960 
   10961 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   10962 	    || sc->sc_type == WM_T_82583) {
   10963 		eecd = CSR_READ(sc, WMREG_EECD);
   10964 
   10965 		/* Isolate bits 15 & 16 */
   10966 		eecd = ((eecd >> 15) & 0x03);
   10967 
   10968 		/* If both bits are set, device is Flash type */
   10969 		if (eecd == 0x03)
   10970 			return 0;
   10971 	}
   10972 	return 1;
   10973 }
   10974 
   10975 static int
   10976 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   10977 {
   10978 	uint32_t eec;
   10979 
   10980 	eec = CSR_READ(sc, WMREG_EEC);
   10981 	if ((eec & EEC_FLASH_DETECTED) != 0)
   10982 		return 1;
   10983 
   10984 	return 0;
   10985 }
   10986 
   10987 /*
   10988  * wm_nvm_validate_checksum
   10989  *
   10990  * The checksum is defined as the sum of the first 64 (16 bit) words.
   10991  */
   10992 static int
   10993 wm_nvm_validate_checksum(struct wm_softc *sc)
   10994 {
   10995 	uint16_t checksum;
   10996 	uint16_t eeprom_data;
   10997 #ifdef WM_DEBUG
   10998 	uint16_t csum_wordaddr, valid_checksum;
   10999 #endif
   11000 	int i;
   11001 
   11002 	checksum = 0;
   11003 
   11004 	/* Don't check for I211 */
   11005 	if (sc->sc_type == WM_T_I211)
   11006 		return 0;
   11007 
   11008 #ifdef WM_DEBUG
   11009 	if (sc->sc_type == WM_T_PCH_LPT) {
   11010 		csum_wordaddr = NVM_OFF_COMPAT;
   11011 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   11012 	} else {
   11013 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   11014 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   11015 	}
   11016 
   11017 	/* Dump EEPROM image for debug */
   11018 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   11019 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   11020 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   11021 		/* XXX PCH_SPT? */
   11022 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   11023 		if ((eeprom_data & valid_checksum) == 0) {
   11024 			DPRINTF(WM_DEBUG_NVM,
   11025 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   11026 				device_xname(sc->sc_dev), eeprom_data,
   11027 				    valid_checksum));
   11028 		}
   11029 	}
   11030 
   11031 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   11032 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   11033 		for (i = 0; i < NVM_SIZE; i++) {
   11034 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   11035 				printf("XXXX ");
   11036 			else
   11037 				printf("%04hx ", eeprom_data);
   11038 			if (i % 8 == 7)
   11039 				printf("\n");
   11040 		}
   11041 	}
   11042 
   11043 #endif /* WM_DEBUG */
   11044 
   11045 	for (i = 0; i < NVM_SIZE; i++) {
   11046 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   11047 			return 1;
   11048 		checksum += eeprom_data;
   11049 	}
   11050 
   11051 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   11052 #ifdef WM_DEBUG
   11053 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   11054 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   11055 #endif
   11056 	}
   11057 
   11058 	return 0;
   11059 }
   11060 
   11061 static void
   11062 wm_nvm_version_invm(struct wm_softc *sc)
   11063 {
   11064 	uint32_t dword;
   11065 
   11066 	/*
   11067 	 * Linux's code to decode version is very strange, so we don't
   11068 	 * obey that algorithm and just use word 61 as the document.
   11069 	 * Perhaps it's not perfect though...
   11070 	 *
   11071 	 * Example:
   11072 	 *
   11073 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   11074 	 */
   11075 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   11076 	dword = __SHIFTOUT(dword, INVM_VER_1);
   11077 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   11078 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   11079 }
   11080 
   11081 static void
   11082 wm_nvm_version(struct wm_softc *sc)
   11083 {
   11084 	uint16_t major, minor, build, patch;
   11085 	uint16_t uid0, uid1;
   11086 	uint16_t nvm_data;
   11087 	uint16_t off;
   11088 	bool check_version = false;
   11089 	bool check_optionrom = false;
   11090 	bool have_build = false;
   11091 
   11092 	/*
   11093 	 * Version format:
   11094 	 *
   11095 	 * XYYZ
   11096 	 * X0YZ
   11097 	 * X0YY
   11098 	 *
   11099 	 * Example:
   11100 	 *
   11101 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   11102 	 *	82571	0x50a6	5.10.6?
   11103 	 *	82572	0x506a	5.6.10?
   11104 	 *	82572EI	0x5069	5.6.9?
   11105 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   11106 	 *		0x2013	2.1.3?
   11107 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   11108 	 */
   11109 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
   11110 	switch (sc->sc_type) {
   11111 	case WM_T_82571:
   11112 	case WM_T_82572:
   11113 	case WM_T_82574:
   11114 	case WM_T_82583:
   11115 		check_version = true;
   11116 		check_optionrom = true;
   11117 		have_build = true;
   11118 		break;
   11119 	case WM_T_82575:
   11120 	case WM_T_82576:
   11121 	case WM_T_82580:
   11122 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   11123 			check_version = true;
   11124 		break;
   11125 	case WM_T_I211:
   11126 		wm_nvm_version_invm(sc);
   11127 		goto printver;
   11128 	case WM_T_I210:
   11129 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   11130 			wm_nvm_version_invm(sc);
   11131 			goto printver;
   11132 		}
   11133 		/* FALLTHROUGH */
   11134 	case WM_T_I350:
   11135 	case WM_T_I354:
   11136 		check_version = true;
   11137 		check_optionrom = true;
   11138 		break;
   11139 	default:
   11140 		return;
   11141 	}
   11142 	if (check_version) {
   11143 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
   11144 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   11145 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   11146 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   11147 			build = nvm_data & NVM_BUILD_MASK;
   11148 			have_build = true;
   11149 		} else
   11150 			minor = nvm_data & 0x00ff;
   11151 
   11152 		/* Decimal */
   11153 		minor = (minor / 16) * 10 + (minor % 16);
   11154 		sc->sc_nvm_ver_major = major;
   11155 		sc->sc_nvm_ver_minor = minor;
   11156 
   11157 printver:
   11158 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   11159 		    sc->sc_nvm_ver_minor);
   11160 		if (have_build) {
   11161 			sc->sc_nvm_ver_build = build;
   11162 			aprint_verbose(".%d", build);
   11163 		}
   11164 	}
   11165 	if (check_optionrom) {
   11166 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
   11167 		/* Option ROM Version */
   11168 		if ((off != 0x0000) && (off != 0xffff)) {
   11169 			off += NVM_COMBO_VER_OFF;
   11170 			wm_nvm_read(sc, off + 1, 1, &uid1);
   11171 			wm_nvm_read(sc, off, 1, &uid0);
   11172 			if ((uid0 != 0) && (uid0 != 0xffff)
   11173 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   11174 				/* 16bits */
   11175 				major = uid0 >> 8;
   11176 				build = (uid0 << 8) | (uid1 >> 8);
   11177 				patch = uid1 & 0x00ff;
   11178 				aprint_verbose(", option ROM Version %d.%d.%d",
   11179 				    major, build, patch);
   11180 			}
   11181 		}
   11182 	}
   11183 
   11184 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
   11185 	aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   11186 }
   11187 
   11188 /*
   11189  * wm_nvm_read:
   11190  *
   11191  *	Read data from the serial EEPROM.
   11192  */
   11193 static int
   11194 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   11195 {
   11196 	int rv;
   11197 
   11198 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11199 		device_xname(sc->sc_dev), __func__));
   11200 
   11201 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   11202 		return 1;
   11203 
   11204 	if (wm_nvm_acquire(sc))
   11205 		return 1;
   11206 
   11207 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   11208 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   11209 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   11210 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
   11211 	else if (sc->sc_type == WM_T_PCH_SPT)
   11212 		rv = wm_nvm_read_spt(sc, word, wordcnt, data);
   11213 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
   11214 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
   11215 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
   11216 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
   11217 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
   11218 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
   11219 	else
   11220 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
   11221 
   11222 	wm_nvm_release(sc);
   11223 	return rv;
   11224 }
   11225 
   11226 /*
   11227  * Hardware semaphores.
   11228  * Very complexed...
   11229  */
   11230 
   11231 static int
   11232 wm_get_null(struct wm_softc *sc)
   11233 {
   11234 
   11235 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11236 		device_xname(sc->sc_dev), __func__));
   11237 	return 0;
   11238 }
   11239 
   11240 static void
   11241 wm_put_null(struct wm_softc *sc)
   11242 {
   11243 
   11244 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11245 		device_xname(sc->sc_dev), __func__));
   11246 	return;
   11247 }
   11248 
   11249 /*
   11250  * Get hardware semaphore.
   11251  * Same as e1000_get_hw_semaphore_generic()
   11252  */
   11253 static int
   11254 wm_get_swsm_semaphore(struct wm_softc *sc)
   11255 {
   11256 	int32_t timeout;
   11257 	uint32_t swsm;
   11258 
   11259 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11260 		device_xname(sc->sc_dev), __func__));
   11261 	KASSERT(sc->sc_nvm_wordsize > 0);
   11262 
   11263 	/* Get the SW semaphore. */
   11264 	timeout = sc->sc_nvm_wordsize + 1;
   11265 	while (timeout) {
   11266 		swsm = CSR_READ(sc, WMREG_SWSM);
   11267 
   11268 		if ((swsm & SWSM_SMBI) == 0)
   11269 			break;
   11270 
   11271 		delay(50);
   11272 		timeout--;
   11273 	}
   11274 
   11275 	if (timeout == 0) {
   11276 		aprint_error_dev(sc->sc_dev,
   11277 		    "could not acquire SWSM SMBI\n");
   11278 		return 1;
   11279 	}
   11280 
   11281 	/* Get the FW semaphore. */
   11282 	timeout = sc->sc_nvm_wordsize + 1;
   11283 	while (timeout) {
   11284 		swsm = CSR_READ(sc, WMREG_SWSM);
   11285 		swsm |= SWSM_SWESMBI;
   11286 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   11287 		/* If we managed to set the bit we got the semaphore. */
   11288 		swsm = CSR_READ(sc, WMREG_SWSM);
   11289 		if (swsm & SWSM_SWESMBI)
   11290 			break;
   11291 
   11292 		delay(50);
   11293 		timeout--;
   11294 	}
   11295 
   11296 	if (timeout == 0) {
   11297 		aprint_error_dev(sc->sc_dev,
   11298 		    "could not acquire SWSM SWESMBI\n");
   11299 		/* Release semaphores */
   11300 		wm_put_swsm_semaphore(sc);
   11301 		return 1;
   11302 	}
   11303 	return 0;
   11304 }
   11305 
   11306 /*
   11307  * Put hardware semaphore.
   11308  * Same as e1000_put_hw_semaphore_generic()
   11309  */
   11310 static void
   11311 wm_put_swsm_semaphore(struct wm_softc *sc)
   11312 {
   11313 	uint32_t swsm;
   11314 
   11315 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11316 		device_xname(sc->sc_dev), __func__));
   11317 
   11318 	swsm = CSR_READ(sc, WMREG_SWSM);
   11319 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   11320 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   11321 }
   11322 
   11323 /*
   11324  * Get SW/FW semaphore.
   11325  * Same as e1000_acquire_swfw_sync_82575().
   11326  */
   11327 static int
   11328 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   11329 {
   11330 	uint32_t swfw_sync;
   11331 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   11332 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   11333 	int timeout = 200;
   11334 
   11335 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11336 		device_xname(sc->sc_dev), __func__));
   11337 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   11338 
   11339 	for (timeout = 0; timeout < 200; timeout++) {
   11340 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11341 			if (wm_get_swsm_semaphore(sc)) {
   11342 				aprint_error_dev(sc->sc_dev,
   11343 				    "%s: failed to get semaphore\n",
   11344 				    __func__);
   11345 				return 1;
   11346 			}
   11347 		}
   11348 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   11349 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   11350 			swfw_sync |= swmask;
   11351 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   11352 			if (sc->sc_flags & WM_F_LOCK_SWSM)
   11353 				wm_put_swsm_semaphore(sc);
   11354 			return 0;
   11355 		}
   11356 		if (sc->sc_flags & WM_F_LOCK_SWSM)
   11357 			wm_put_swsm_semaphore(sc);
   11358 		delay(5000);
   11359 	}
   11360 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   11361 	    device_xname(sc->sc_dev), mask, swfw_sync);
   11362 	return 1;
   11363 }
   11364 
   11365 static void
   11366 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   11367 {
   11368 	uint32_t swfw_sync;
   11369 
   11370 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11371 		device_xname(sc->sc_dev), __func__));
   11372 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
   11373 
   11374 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   11375 		while (wm_get_swsm_semaphore(sc) != 0)
   11376 			continue;
   11377 	}
   11378 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   11379 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   11380 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   11381 	if (sc->sc_flags & WM_F_LOCK_SWSM)
   11382 		wm_put_swsm_semaphore(sc);
   11383 }
   11384 
   11385 static int
   11386 wm_get_phy_82575(struct wm_softc *sc)
   11387 {
   11388 
   11389 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11390 		device_xname(sc->sc_dev), __func__));
   11391 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   11392 }
   11393 
   11394 static void
   11395 wm_put_phy_82575(struct wm_softc *sc)
   11396 {
   11397 
   11398 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11399 		device_xname(sc->sc_dev), __func__));
   11400 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   11401 }
   11402 
   11403 static int
   11404 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   11405 {
   11406 	uint32_t ext_ctrl;
   11407 	int timeout = 200;
   11408 
   11409 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11410 		device_xname(sc->sc_dev), __func__));
   11411 
   11412 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   11413 	for (timeout = 0; timeout < 200; timeout++) {
   11414 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11415 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11416 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11417 
   11418 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11419 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   11420 			return 0;
   11421 		delay(5000);
   11422 	}
   11423 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   11424 	    device_xname(sc->sc_dev), ext_ctrl);
   11425 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   11426 	return 1;
   11427 }
   11428 
   11429 static void
   11430 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   11431 {
   11432 	uint32_t ext_ctrl;
   11433 
   11434 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11435 		device_xname(sc->sc_dev), __func__));
   11436 
   11437 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11438 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11439 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11440 
   11441 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
   11442 }
   11443 
   11444 static int
   11445 wm_get_swflag_ich8lan(struct wm_softc *sc)
   11446 {
   11447 	uint32_t ext_ctrl;
   11448 	int timeout;
   11449 
   11450 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11451 		device_xname(sc->sc_dev), __func__));
   11452 	mutex_enter(sc->sc_ich_phymtx);
   11453 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
   11454 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11455 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
   11456 			break;
   11457 		delay(1000);
   11458 	}
   11459 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
   11460 		printf("%s: SW has already locked the resource\n",
   11461 		    device_xname(sc->sc_dev));
   11462 		goto out;
   11463 	}
   11464 
   11465 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11466 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11467 	for (timeout = 0; timeout < 1000; timeout++) {
   11468 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11469 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   11470 			break;
   11471 		delay(1000);
   11472 	}
   11473 	if (timeout >= 1000) {
   11474 		printf("%s: failed to acquire semaphore\n",
   11475 		    device_xname(sc->sc_dev));
   11476 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11477 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11478 		goto out;
   11479 	}
   11480 	return 0;
   11481 
   11482 out:
   11483 	mutex_exit(sc->sc_ich_phymtx);
   11484 	return 1;
   11485 }
   11486 
   11487 static void
   11488 wm_put_swflag_ich8lan(struct wm_softc *sc)
   11489 {
   11490 	uint32_t ext_ctrl;
   11491 
   11492 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11493 		device_xname(sc->sc_dev), __func__));
   11494 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   11495 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
   11496 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11497 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   11498 	} else {
   11499 		printf("%s: Semaphore unexpectedly released\n",
   11500 		    device_xname(sc->sc_dev));
   11501 	}
   11502 
   11503 	mutex_exit(sc->sc_ich_phymtx);
   11504 }
   11505 
   11506 static int
   11507 wm_get_nvm_ich8lan(struct wm_softc *sc)
   11508 {
   11509 
   11510 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11511 		device_xname(sc->sc_dev), __func__));
   11512 	mutex_enter(sc->sc_ich_nvmmtx);
   11513 
   11514 	return 0;
   11515 }
   11516 
   11517 static void
   11518 wm_put_nvm_ich8lan(struct wm_softc *sc)
   11519 {
   11520 
   11521 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
   11522 		device_xname(sc->sc_dev), __func__));
   11523 	mutex_exit(sc->sc_ich_nvmmtx);
   11524 }
   11525 
   11526 static int
   11527 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   11528 {
   11529 	int i = 0;
   11530 	uint32_t reg;
   11531 
   11532 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11533 		device_xname(sc->sc_dev), __func__));
   11534 
   11535 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11536 	do {
   11537 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   11538 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   11539 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11540 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   11541 			break;
   11542 		delay(2*1000);
   11543 		i++;
   11544 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   11545 
   11546 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   11547 		wm_put_hw_semaphore_82573(sc);
   11548 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   11549 		    device_xname(sc->sc_dev));
   11550 		return -1;
   11551 	}
   11552 
   11553 	return 0;
   11554 }
   11555 
   11556 static void
   11557 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   11558 {
   11559 	uint32_t reg;
   11560 
   11561 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11562 		device_xname(sc->sc_dev), __func__));
   11563 
   11564 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11565 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   11566 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   11567 }
   11568 
   11569 /*
   11570  * Management mode and power management related subroutines.
   11571  * BMC, AMT, suspend/resume and EEE.
   11572  */
   11573 
   11574 #ifdef WM_WOL
   11575 static int
   11576 wm_check_mng_mode(struct wm_softc *sc)
   11577 {
   11578 	int rv;
   11579 
   11580 	switch (sc->sc_type) {
   11581 	case WM_T_ICH8:
   11582 	case WM_T_ICH9:
   11583 	case WM_T_ICH10:
   11584 	case WM_T_PCH:
   11585 	case WM_T_PCH2:
   11586 	case WM_T_PCH_LPT:
   11587 	case WM_T_PCH_SPT:
   11588 		rv = wm_check_mng_mode_ich8lan(sc);
   11589 		break;
   11590 	case WM_T_82574:
   11591 	case WM_T_82583:
   11592 		rv = wm_check_mng_mode_82574(sc);
   11593 		break;
   11594 	case WM_T_82571:
   11595 	case WM_T_82572:
   11596 	case WM_T_82573:
   11597 	case WM_T_80003:
   11598 		rv = wm_check_mng_mode_generic(sc);
   11599 		break;
   11600 	default:
   11601 		/* noting to do */
   11602 		rv = 0;
   11603 		break;
   11604 	}
   11605 
   11606 	return rv;
   11607 }
   11608 
   11609 static int
   11610 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   11611 {
   11612 	uint32_t fwsm;
   11613 
   11614 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11615 
   11616 	if (((fwsm & FWSM_FW_VALID) != 0)
   11617 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   11618 		return 1;
   11619 
   11620 	return 0;
   11621 }
   11622 
   11623 static int
   11624 wm_check_mng_mode_82574(struct wm_softc *sc)
   11625 {
   11626 	uint16_t data;
   11627 
   11628 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   11629 
   11630 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   11631 		return 1;
   11632 
   11633 	return 0;
   11634 }
   11635 
   11636 static int
   11637 wm_check_mng_mode_generic(struct wm_softc *sc)
   11638 {
   11639 	uint32_t fwsm;
   11640 
   11641 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11642 
   11643 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
   11644 		return 1;
   11645 
   11646 	return 0;
   11647 }
   11648 #endif /* WM_WOL */
   11649 
   11650 static int
   11651 wm_enable_mng_pass_thru(struct wm_softc *sc)
   11652 {
   11653 	uint32_t manc, fwsm, factps;
   11654 
   11655 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   11656 		return 0;
   11657 
   11658 	manc = CSR_READ(sc, WMREG_MANC);
   11659 
   11660 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   11661 		device_xname(sc->sc_dev), manc));
   11662 	if ((manc & MANC_RECV_TCO_EN) == 0)
   11663 		return 0;
   11664 
   11665 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   11666 		fwsm = CSR_READ(sc, WMREG_FWSM);
   11667 		factps = CSR_READ(sc, WMREG_FACTPS);
   11668 		if (((factps & FACTPS_MNGCG) == 0)
   11669 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
   11670 			return 1;
   11671 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   11672 		uint16_t data;
   11673 
   11674 		factps = CSR_READ(sc, WMREG_FACTPS);
   11675 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   11676 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   11677 			device_xname(sc->sc_dev), factps, data));
   11678 		if (((factps & FACTPS_MNGCG) == 0)
   11679 		    && ((data & NVM_CFG2_MNGM_MASK)
   11680 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   11681 			return 1;
   11682 	} else if (((manc & MANC_SMBUS_EN) != 0)
   11683 	    && ((manc & MANC_ASF_EN) == 0))
   11684 		return 1;
   11685 
   11686 	return 0;
   11687 }
   11688 
   11689 static bool
   11690 wm_phy_resetisblocked(struct wm_softc *sc)
   11691 {
   11692 	bool blocked = false;
   11693 	uint32_t reg;
   11694 	int i = 0;
   11695 
   11696 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11697 		device_xname(sc->sc_dev), __func__));
   11698 
   11699 	switch (sc->sc_type) {
   11700 	case WM_T_ICH8:
   11701 	case WM_T_ICH9:
   11702 	case WM_T_ICH10:
   11703 	case WM_T_PCH:
   11704 	case WM_T_PCH2:
   11705 	case WM_T_PCH_LPT:
   11706 	case WM_T_PCH_SPT:
   11707 		do {
   11708 			reg = CSR_READ(sc, WMREG_FWSM);
   11709 			if ((reg & FWSM_RSPCIPHY) == 0) {
   11710 				blocked = true;
   11711 				delay(10*1000);
   11712 				continue;
   11713 			}
   11714 			blocked = false;
   11715 		} while (blocked && (i++ < 30));
   11716 		return blocked;
   11717 		break;
   11718 	case WM_T_82571:
   11719 	case WM_T_82572:
   11720 	case WM_T_82573:
   11721 	case WM_T_82574:
   11722 	case WM_T_82583:
   11723 	case WM_T_80003:
   11724 		reg = CSR_READ(sc, WMREG_MANC);
   11725 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   11726 			return true;
   11727 		else
   11728 			return false;
   11729 		break;
   11730 	default:
   11731 		/* no problem */
   11732 		break;
   11733 	}
   11734 
   11735 	return false;
   11736 }
   11737 
   11738 static void
   11739 wm_get_hw_control(struct wm_softc *sc)
   11740 {
   11741 	uint32_t reg;
   11742 
   11743 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11744 		device_xname(sc->sc_dev), __func__));
   11745 
   11746 	switch (sc->sc_type) {
   11747 	case WM_T_82573:
   11748 		reg = CSR_READ(sc, WMREG_SWSM);
   11749 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   11750 		break;
   11751 	case WM_T_82571:
   11752 	case WM_T_82572:
   11753 	case WM_T_82574:
   11754 	case WM_T_82583:
   11755 	case WM_T_80003:
   11756 	case WM_T_ICH8:
   11757 	case WM_T_ICH9:
   11758 	case WM_T_ICH10:
   11759 	case WM_T_PCH:
   11760 	case WM_T_PCH2:
   11761 	case WM_T_PCH_LPT:
   11762 	case WM_T_PCH_SPT:
   11763 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11764 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   11765 		break;
   11766 	default:
   11767 		break;
   11768 	}
   11769 }
   11770 
   11771 static void
   11772 wm_release_hw_control(struct wm_softc *sc)
   11773 {
   11774 	uint32_t reg;
   11775 
   11776 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
   11777 		device_xname(sc->sc_dev), __func__));
   11778 
   11779 	if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
   11780 		return;
   11781 
   11782 	if (sc->sc_type == WM_T_82573) {
   11783 		reg = CSR_READ(sc, WMREG_SWSM);
   11784 		reg &= ~SWSM_DRV_LOAD;
   11785 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   11786 	} else {
   11787 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11788 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   11789 	}
   11790 }
   11791 
   11792 static void
   11793 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
   11794 {
   11795 	uint32_t reg;
   11796 
   11797 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11798 		device_xname(sc->sc_dev), __func__));
   11799 
   11800 	if (sc->sc_type < WM_T_PCH2)
   11801 		return;
   11802 
   11803 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11804 
   11805 	if (gate)
   11806 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   11807 	else
   11808 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   11809 
   11810 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   11811 }
   11812 
   11813 static void
   11814 wm_smbustopci(struct wm_softc *sc)
   11815 {
   11816 	uint32_t fwsm, reg;
   11817 
   11818 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11819 		device_xname(sc->sc_dev), __func__));
   11820 
   11821 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
   11822 	wm_gate_hw_phy_config_ich8lan(sc, true);
   11823 
   11824 	/* Acquire PHY semaphore */
   11825 	sc->phy.acquire(sc);
   11826 
   11827 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11828 	if (((fwsm & FWSM_FW_VALID) == 0)
   11829 	    && ((wm_phy_resetisblocked(sc) == false))) {
   11830 		if (sc->sc_type >= WM_T_PCH_LPT) {
   11831 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11832 			reg |= CTRL_EXT_FORCE_SMBUS;
   11833 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11834 			CSR_WRITE_FLUSH(sc);
   11835 			delay(50*1000);
   11836 		}
   11837 
   11838 		/* Toggle LANPHYPC */
   11839 		sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
   11840 		sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
   11841 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11842 		CSR_WRITE_FLUSH(sc);
   11843 		delay(1000);
   11844 		sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
   11845 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11846 		CSR_WRITE_FLUSH(sc);
   11847 		delay(50*1000);
   11848 
   11849 		if (sc->sc_type >= WM_T_PCH_LPT) {
   11850 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11851 			reg &= ~CTRL_EXT_FORCE_SMBUS;
   11852 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11853 		}
   11854 	}
   11855 
   11856 	/* Release semaphore */
   11857 	sc->phy.release(sc);
   11858 
   11859 	/*
   11860 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
   11861 	 */
   11862 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0))
   11863 		wm_gate_hw_phy_config_ich8lan(sc, false);
   11864 }
   11865 
   11866 static void
   11867 wm_init_manageability(struct wm_softc *sc)
   11868 {
   11869 
   11870 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   11871 		device_xname(sc->sc_dev), __func__));
   11872 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   11873 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   11874 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   11875 
   11876 		/* Disable hardware interception of ARP */
   11877 		manc &= ~MANC_ARP_EN;
   11878 
   11879 		/* Enable receiving management packets to the host */
   11880 		if (sc->sc_type >= WM_T_82571) {
   11881 			manc |= MANC_EN_MNG2HOST;
   11882 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   11883 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   11884 		}
   11885 
   11886 		CSR_WRITE(sc, WMREG_MANC, manc);
   11887 	}
   11888 }
   11889 
   11890 static void
   11891 wm_release_manageability(struct wm_softc *sc)
   11892 {
   11893 
   11894 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   11895 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   11896 
   11897 		manc |= MANC_ARP_EN;
   11898 		if (sc->sc_type >= WM_T_82571)
   11899 			manc &= ~MANC_EN_MNG2HOST;
   11900 
   11901 		CSR_WRITE(sc, WMREG_MANC, manc);
   11902 	}
   11903 }
   11904 
   11905 static void
   11906 wm_get_wakeup(struct wm_softc *sc)
   11907 {
   11908 
   11909 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   11910 	switch (sc->sc_type) {
   11911 	case WM_T_82573:
   11912 	case WM_T_82583:
   11913 		sc->sc_flags |= WM_F_HAS_AMT;
   11914 		/* FALLTHROUGH */
   11915 	case WM_T_80003:
   11916 	case WM_T_82541:
   11917 	case WM_T_82547:
   11918 	case WM_T_82571:
   11919 	case WM_T_82572:
   11920 	case WM_T_82574:
   11921 	case WM_T_82575:
   11922 	case WM_T_82576:
   11923 	case WM_T_82580:
   11924 	case WM_T_I350:
   11925 	case WM_T_I354:
   11926 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
   11927 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   11928 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   11929 		break;
   11930 	case WM_T_ICH8:
   11931 	case WM_T_ICH9:
   11932 	case WM_T_ICH10:
   11933 	case WM_T_PCH:
   11934 	case WM_T_PCH2:
   11935 	case WM_T_PCH_LPT:
   11936 	case WM_T_PCH_SPT: /* XXX only Q170 chipset? */
   11937 		sc->sc_flags |= WM_F_HAS_AMT;
   11938 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   11939 		break;
   11940 	default:
   11941 		break;
   11942 	}
   11943 
   11944 	/* 1: HAS_MANAGE */
   11945 	if (wm_enable_mng_pass_thru(sc) != 0)
   11946 		sc->sc_flags |= WM_F_HAS_MANAGE;
   11947 
   11948 #ifdef WM_DEBUG
   11949 	printf("\n");
   11950 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   11951 		printf("HAS_AMT,");
   11952 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
   11953 		printf("ARC_SUBSYS_VALID,");
   11954 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
   11955 		printf("ASF_FIRMWARE_PRES,");
   11956 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
   11957 		printf("HAS_MANAGE,");
   11958 	printf("\n");
   11959 #endif
   11960 	/*
   11961 	 * Note that the WOL flags is set after the resetting of the eeprom
   11962 	 * stuff
   11963 	 */
   11964 }
   11965 
   11966 #ifdef WM_WOL
   11967 /* WOL in the newer chipset interfaces (pchlan) */
   11968 static void
   11969 wm_enable_phy_wakeup(struct wm_softc *sc)
   11970 {
   11971 #if 0
   11972 	uint16_t preg;
   11973 
   11974 	/* Copy MAC RARs to PHY RARs */
   11975 
   11976 	/* Copy MAC MTA to PHY MTA */
   11977 
   11978 	/* Configure PHY Rx Control register */
   11979 
   11980 	/* Enable PHY wakeup in MAC register */
   11981 
   11982 	/* Configure and enable PHY wakeup in PHY registers */
   11983 
   11984 	/* Activate PHY wakeup */
   11985 
   11986 	/* XXX */
   11987 #endif
   11988 }
   11989 
   11990 /* Power down workaround on D3 */
   11991 static void
   11992 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   11993 {
   11994 	uint32_t reg;
   11995 	int i;
   11996 
   11997 	for (i = 0; i < 2; i++) {
   11998 		/* Disable link */
   11999 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   12000 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   12001 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   12002 
   12003 		/*
   12004 		 * Call gig speed drop workaround on Gig disable before
   12005 		 * accessing any PHY registers
   12006 		 */
   12007 		if (sc->sc_type == WM_T_ICH8)
   12008 			wm_gig_downshift_workaround_ich8lan(sc);
   12009 
   12010 		/* Write VR power-down enable */
   12011 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   12012 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   12013 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   12014 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   12015 
   12016 		/* Read it back and test */
   12017 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   12018 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   12019 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   12020 			break;
   12021 
   12022 		/* Issue PHY reset and repeat at most one more time */
   12023 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   12024 	}
   12025 }
   12026 
   12027 static void
   12028 wm_enable_wakeup(struct wm_softc *sc)
   12029 {
   12030 	uint32_t reg, pmreg;
   12031 	pcireg_t pmode;
   12032 
   12033 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12034 		device_xname(sc->sc_dev), __func__));
   12035 
   12036 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   12037 		&pmreg, NULL) == 0)
   12038 		return;
   12039 
   12040 	/* Advertise the wakeup capability */
   12041 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   12042 	    | CTRL_SWDPIN(3));
   12043 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   12044 
   12045 	/* ICH workaround */
   12046 	switch (sc->sc_type) {
   12047 	case WM_T_ICH8:
   12048 	case WM_T_ICH9:
   12049 	case WM_T_ICH10:
   12050 	case WM_T_PCH:
   12051 	case WM_T_PCH2:
   12052 	case WM_T_PCH_LPT:
   12053 	case WM_T_PCH_SPT:
   12054 		/* Disable gig during WOL */
   12055 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   12056 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   12057 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   12058 		if (sc->sc_type == WM_T_PCH)
   12059 			wm_gmii_reset(sc);
   12060 
   12061 		/* Power down workaround */
   12062 		if (sc->sc_phytype == WMPHY_82577) {
   12063 			struct mii_softc *child;
   12064 
   12065 			/* Assume that the PHY is copper */
   12066 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   12067 			if (child->mii_mpd_rev <= 2)
   12068 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   12069 				    (768 << 5) | 25, 0x0444); /* magic num */
   12070 		}
   12071 		break;
   12072 	default:
   12073 		break;
   12074 	}
   12075 
   12076 	/* Keep the laser running on fiber adapters */
   12077 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   12078 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   12079 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12080 		reg |= CTRL_EXT_SWDPIN(3);
   12081 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12082 	}
   12083 
   12084 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   12085 #if 0	/* for the multicast packet */
   12086 	reg |= WUFC_MC;
   12087 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   12088 #endif
   12089 
   12090 	if (sc->sc_type == WM_T_PCH) {
   12091 		wm_enable_phy_wakeup(sc);
   12092 	} else {
   12093 		CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
   12094 		CSR_WRITE(sc, WMREG_WUFC, reg);
   12095 	}
   12096 
   12097 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   12098 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   12099 		|| (sc->sc_type == WM_T_PCH2))
   12100 		    && (sc->sc_phytype == WMPHY_IGP_3))
   12101 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   12102 
   12103 	/* Request PME */
   12104 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   12105 #if 0
   12106 	/* Disable WOL */
   12107 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   12108 #else
   12109 	/* For WOL */
   12110 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   12111 #endif
   12112 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   12113 }
   12114 #endif /* WM_WOL */
   12115 
   12116 /* LPLU */
   12117 
   12118 static void
   12119 wm_lplu_d0_disable(struct wm_softc *sc)
   12120 {
   12121 	uint32_t reg;
   12122 
   12123 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12124 		device_xname(sc->sc_dev), __func__));
   12125 
   12126 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   12127 	reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   12128 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   12129 }
   12130 
   12131 static void
   12132 wm_lplu_d0_disable_pch(struct wm_softc *sc)
   12133 {
   12134 	uint32_t reg;
   12135 
   12136 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12137 		device_xname(sc->sc_dev), __func__));
   12138 
   12139 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   12140 	reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   12141 	reg |= HV_OEM_BITS_ANEGNOW;
   12142 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   12143 }
   12144 
   12145 /* EEE */
   12146 
   12147 static void
   12148 wm_set_eee_i350(struct wm_softc *sc)
   12149 {
   12150 	uint32_t ipcnfg, eeer;
   12151 
   12152 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   12153 	eeer = CSR_READ(sc, WMREG_EEER);
   12154 
   12155 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   12156 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   12157 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   12158 		    | EEER_LPI_FC);
   12159 	} else {
   12160 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   12161 		ipcnfg &= ~IPCNFG_10BASE_TE;
   12162 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   12163 		    | EEER_LPI_FC);
   12164 	}
   12165 
   12166 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   12167 	CSR_WRITE(sc, WMREG_EEER, eeer);
   12168 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   12169 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   12170 }
   12171 
   12172 /*
   12173  * Workarounds (mainly PHY related).
   12174  * Basically, PHY's workarounds are in the PHY drivers.
   12175  */
   12176 
   12177 /* Work-around for 82566 Kumeran PCS lock loss */
   12178 static void
   12179 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   12180 {
   12181 #if 0
   12182 	int miistatus, active, i;
   12183 	int reg;
   12184 
   12185 	miistatus = sc->sc_mii.mii_media_status;
   12186 
   12187 	/* If the link is not up, do nothing */
   12188 	if ((miistatus & IFM_ACTIVE) == 0)
   12189 		return;
   12190 
   12191 	active = sc->sc_mii.mii_media_active;
   12192 
   12193 	/* Nothing to do if the link is other than 1Gbps */
   12194 	if (IFM_SUBTYPE(active) != IFM_1000_T)
   12195 		return;
   12196 
   12197 	for (i = 0; i < 10; i++) {
   12198 		/* read twice */
   12199 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   12200 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   12201 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   12202 			goto out;	/* GOOD! */
   12203 
   12204 		/* Reset the PHY */
   12205 		wm_gmii_reset(sc);
   12206 		delay(5*1000);
   12207 	}
   12208 
   12209 	/* Disable GigE link negotiation */
   12210 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   12211 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   12212 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   12213 
   12214 	/*
   12215 	 * Call gig speed drop workaround on Gig disable before accessing
   12216 	 * any PHY registers.
   12217 	 */
   12218 	wm_gig_downshift_workaround_ich8lan(sc);
   12219 
   12220 out:
   12221 	return;
   12222 #endif
   12223 }
   12224 
   12225 /* WOL from S5 stops working */
   12226 static void
   12227 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   12228 {
   12229 	uint16_t kmrn_reg;
   12230 
   12231 	/* Only for igp3 */
   12232 	if (sc->sc_phytype == WMPHY_IGP_3) {
   12233 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
   12234 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
   12235 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   12236 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
   12237 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   12238 	}
   12239 }
   12240 
   12241 /*
   12242  * Workaround for pch's PHYs
   12243  * XXX should be moved to new PHY driver?
   12244  */
   12245 static void
   12246 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   12247 {
   12248 
   12249 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12250 		device_xname(sc->sc_dev), __func__));
   12251 	KASSERT(sc->sc_type == WM_T_PCH);
   12252 
   12253 	if (sc->sc_phytype == WMPHY_82577)
   12254 		wm_set_mdio_slow_mode_hv(sc);
   12255 
   12256 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   12257 
   12258 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   12259 
   12260 	/* 82578 */
   12261 	if (sc->sc_phytype == WMPHY_82578) {
   12262 		struct mii_softc *child;
   12263 
   12264 		/*
   12265 		 * Return registers to default by doing a soft reset then
   12266 		 * writing 0x3140 to the control register
   12267 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
   12268 		 */
   12269 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
   12270 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
   12271 			printf("XXX 82578 rev < 2\n");
   12272 			PHY_RESET(child);
   12273 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
   12274 			    0x3140);
   12275 		}
   12276 	}
   12277 
   12278 	/* Select page 0 */
   12279 	sc->phy.acquire(sc);
   12280 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   12281 	sc->phy.release(sc);
   12282 
   12283 	/*
   12284 	 * Configure the K1 Si workaround during phy reset assuming there is
   12285 	 * link so that it disables K1 if link is in 1Gbps.
   12286 	 */
   12287 	wm_k1_gig_workaround_hv(sc, 1);
   12288 }
   12289 
   12290 static void
   12291 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   12292 {
   12293 
   12294 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12295 		device_xname(sc->sc_dev), __func__));
   12296 	KASSERT(sc->sc_type == WM_T_PCH2);
   12297 
   12298 	wm_set_mdio_slow_mode_hv(sc);
   12299 }
   12300 
   12301 static int
   12302 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   12303 {
   12304 	int k1_enable = sc->sc_nvm_k1_enabled;
   12305 
   12306 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
   12307 		device_xname(sc->sc_dev), __func__));
   12308 
   12309 	if (sc->phy.acquire(sc) != 0)
   12310 		return -1;
   12311 
   12312 	if (link) {
   12313 		k1_enable = 0;
   12314 
   12315 		/* Link stall fix for link up */
   12316 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   12317 	} else {
   12318 		/* Link stall fix for link down */
   12319 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   12320 	}
   12321 
   12322 	wm_configure_k1_ich8lan(sc, k1_enable);
   12323 	sc->phy.release(sc);
   12324 
   12325 	return 0;
   12326 }
   12327 
   12328 static void
   12329 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   12330 {
   12331 	uint32_t reg;
   12332 
   12333 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   12334 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   12335 	    reg | HV_KMRN_MDIO_SLOW);
   12336 }
   12337 
   12338 static void
   12339 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   12340 {
   12341 	uint32_t ctrl, ctrl_ext, tmp;
   12342 	uint16_t kmrn_reg;
   12343 
   12344 	kmrn_reg = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
   12345 
   12346 	if (k1_enable)
   12347 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
   12348 	else
   12349 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
   12350 
   12351 	wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
   12352 
   12353 	delay(20);
   12354 
   12355 	ctrl = CSR_READ(sc, WMREG_CTRL);
   12356 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   12357 
   12358 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   12359 	tmp |= CTRL_FRCSPD;
   12360 
   12361 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   12362 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   12363 	CSR_WRITE_FLUSH(sc);
   12364 	delay(20);
   12365 
   12366 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   12367 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   12368 	CSR_WRITE_FLUSH(sc);
   12369 	delay(20);
   12370 }
   12371 
   12372 /* special case - for 82575 - need to do manual init ... */
   12373 static void
   12374 wm_reset_init_script_82575(struct wm_softc *sc)
   12375 {
   12376 	/*
   12377 	 * remark: this is untested code - we have no board without EEPROM
   12378 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   12379 	 */
   12380 
   12381 	/* SerDes configuration via SERDESCTRL */
   12382 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   12383 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   12384 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   12385 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   12386 
   12387 	/* CCM configuration via CCMCTL register */
   12388 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   12389 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   12390 
   12391 	/* PCIe lanes configuration */
   12392 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   12393 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   12394 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   12395 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   12396 
   12397 	/* PCIe PLL Configuration */
   12398 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   12399 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   12400 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   12401 }
   12402 
   12403 static void
   12404 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   12405 {
   12406 	uint32_t reg;
   12407 	uint16_t nvmword;
   12408 	int rv;
   12409 
   12410 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   12411 		return;
   12412 
   12413 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   12414 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   12415 	if (rv != 0) {
   12416 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   12417 		    __func__);
   12418 		return;
   12419 	}
   12420 
   12421 	reg = CSR_READ(sc, WMREG_MDICNFG);
   12422 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   12423 		reg |= MDICNFG_DEST;
   12424 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   12425 		reg |= MDICNFG_COM_MDIO;
   12426 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   12427 }
   12428 
   12429 /*
   12430  * I210 Errata 25 and I211 Errata 10
   12431  * Slow System Clock.
   12432  */
   12433 static void
   12434 wm_pll_workaround_i210(struct wm_softc *sc)
   12435 {
   12436 	uint32_t mdicnfg, wuc;
   12437 	uint32_t reg;
   12438 	pcireg_t pcireg;
   12439 	uint32_t pmreg;
   12440 	uint16_t nvmword, tmp_nvmword;
   12441 	int phyval;
   12442 	bool wa_done = false;
   12443 	int i;
   12444 
   12445 	/* Save WUC and MDICNFG registers */
   12446 	wuc = CSR_READ(sc, WMREG_WUC);
   12447 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   12448 
   12449 	reg = mdicnfg & ~MDICNFG_DEST;
   12450 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   12451 
   12452 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   12453 		nvmword = INVM_DEFAULT_AL;
   12454 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   12455 
   12456 	/* Get Power Management cap offset */
   12457 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   12458 		&pmreg, NULL) == 0)
   12459 		return;
   12460 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   12461 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   12462 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   12463 
   12464 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   12465 			break; /* OK */
   12466 		}
   12467 
   12468 		wa_done = true;
   12469 		/* Directly reset the internal PHY */
   12470 		reg = CSR_READ(sc, WMREG_CTRL);
   12471 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   12472 
   12473 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   12474 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   12475 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   12476 
   12477 		CSR_WRITE(sc, WMREG_WUC, 0);
   12478 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   12479 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   12480 
   12481 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   12482 		    pmreg + PCI_PMCSR);
   12483 		pcireg |= PCI_PMCSR_STATE_D3;
   12484 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   12485 		    pmreg + PCI_PMCSR, pcireg);
   12486 		delay(1000);
   12487 		pcireg &= ~PCI_PMCSR_STATE_D3;
   12488 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   12489 		    pmreg + PCI_PMCSR, pcireg);
   12490 
   12491 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   12492 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   12493 
   12494 		/* Restore WUC register */
   12495 		CSR_WRITE(sc, WMREG_WUC, wuc);
   12496 	}
   12497 
   12498 	/* Restore MDICNFG setting */
   12499 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   12500 	if (wa_done)
   12501 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   12502 }
   12503