Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.375
      1 /*	$NetBSD: if_wm.c,v 1.375 2015/10/29 07:24:01 msaitoh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- LPLU other than PCH*
     77  *	- TX Multi queue
     78  *	- EEE (Energy Efficiency Ethernet)
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  *	- Image Unique ID
     83  */
     84 
     85 #include <sys/cdefs.h>
     86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.375 2015/10/29 07:24:01 msaitoh Exp $");
     87 
     88 #ifdef _KERNEL_OPT
     89 #include "opt_net_mpsafe.h"
     90 #endif
     91 
     92 #include <sys/param.h>
     93 #include <sys/systm.h>
     94 #include <sys/callout.h>
     95 #include <sys/mbuf.h>
     96 #include <sys/malloc.h>
     97 #include <sys/kmem.h>
     98 #include <sys/kernel.h>
     99 #include <sys/socket.h>
    100 #include <sys/ioctl.h>
    101 #include <sys/errno.h>
    102 #include <sys/device.h>
    103 #include <sys/queue.h>
    104 #include <sys/syslog.h>
    105 #include <sys/interrupt.h>
    106 
    107 #include <sys/rndsource.h>
    108 
    109 #include <net/if.h>
    110 #include <net/if_dl.h>
    111 #include <net/if_media.h>
    112 #include <net/if_ether.h>
    113 
    114 #include <net/bpf.h>
    115 
    116 #include <netinet/in.h>			/* XXX for struct ip */
    117 #include <netinet/in_systm.h>		/* XXX for struct ip */
    118 #include <netinet/ip.h>			/* XXX for struct ip */
    119 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    120 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    121 
    122 #include <sys/bus.h>
    123 #include <sys/intr.h>
    124 #include <machine/endian.h>
    125 
    126 #include <dev/mii/mii.h>
    127 #include <dev/mii/miivar.h>
    128 #include <dev/mii/miidevs.h>
    129 #include <dev/mii/mii_bitbang.h>
    130 #include <dev/mii/ikphyreg.h>
    131 #include <dev/mii/igphyreg.h>
    132 #include <dev/mii/igphyvar.h>
    133 #include <dev/mii/inbmphyreg.h>
    134 
    135 #include <dev/pci/pcireg.h>
    136 #include <dev/pci/pcivar.h>
    137 #include <dev/pci/pcidevs.h>
    138 
    139 #include <dev/pci/if_wmreg.h>
    140 #include <dev/pci/if_wmvar.h>
    141 
    142 #ifdef WM_DEBUG
    143 #define	WM_DEBUG_LINK		0x01
    144 #define	WM_DEBUG_TX		0x02
    145 #define	WM_DEBUG_RX		0x04
    146 #define	WM_DEBUG_GMII		0x08
    147 #define	WM_DEBUG_MANAGE		0x10
    148 #define	WM_DEBUG_NVM		0x20
    149 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    150     | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
    151 
    152 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    153 #else
    154 #define	DPRINTF(x, y)	/* nothing */
    155 #endif /* WM_DEBUG */
    156 
    157 #ifdef NET_MPSAFE
    158 #define WM_MPSAFE	1
    159 #endif
    160 
    161 /*
    162  * This device driver's max interrupt numbers.
    163  */
    164 #define WM_MAX_NTXINTR		16
    165 #define WM_MAX_NRXINTR		16
    166 #define WM_MAX_NINTR		(WM_MAX_NTXINTR + WM_MAX_NRXINTR + 1)
    167 
    168 /*
    169  * Transmit descriptor list size.  Due to errata, we can only have
    170  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    171  * on >= 82544.  We tell the upper layers that they can queue a lot
    172  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    173  * of them at a time.
    174  *
    175  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    176  * chains containing many small mbufs have been observed in zero-copy
    177  * situations with jumbo frames.
    178  */
    179 #define	WM_NTXSEGS		256
    180 #define	WM_IFQUEUELEN		256
    181 #define	WM_TXQUEUELEN_MAX	64
    182 #define	WM_TXQUEUELEN_MAX_82547	16
    183 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    184 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    185 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    186 #define	WM_NTXDESC_82542	256
    187 #define	WM_NTXDESC_82544	4096
    188 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    189 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    190 #define	WM_TXDESCSIZE(txq)	(WM_NTXDESC(txq) * sizeof(wiseman_txdesc_t))
    191 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    192 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    193 
    194 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    195 
    196 /*
    197  * Receive descriptor list size.  We have one Rx buffer for normal
    198  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    199  * packet.  We allocate 256 receive descriptors, each with a 2k
    200  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    201  */
    202 #define	WM_NRXDESC		256
    203 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    204 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    205 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    206 
    207 typedef union txdescs {
    208 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    209 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    210 } txdescs_t;
    211 
    212 #define	WM_CDTXOFF(x)	(sizeof(wiseman_txdesc_t) * x)
    213 #define	WM_CDRXOFF(x)	(sizeof(wiseman_rxdesc_t) * x)
    214 
    215 /*
    216  * Software state for transmit jobs.
    217  */
    218 struct wm_txsoft {
    219 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    220 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    221 	int txs_firstdesc;		/* first descriptor in packet */
    222 	int txs_lastdesc;		/* last descriptor in packet */
    223 	int txs_ndesc;			/* # of descriptors used */
    224 };
    225 
    226 /*
    227  * Software state for receive buffers.  Each descriptor gets a
    228  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    229  * more than one buffer, we chain them together.
    230  */
    231 struct wm_rxsoft {
    232 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    233 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    234 };
    235 
    236 #define WM_LINKUP_TIMEOUT	50
    237 
    238 static uint16_t swfwphysem[] = {
    239 	SWFW_PHY0_SM,
    240 	SWFW_PHY1_SM,
    241 	SWFW_PHY2_SM,
    242 	SWFW_PHY3_SM
    243 };
    244 
    245 static const uint32_t wm_82580_rxpbs_table[] = {
    246 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    247 };
    248 
    249 struct wm_softc;
    250 
    251 struct wm_txqueue {
    252 	kmutex_t *txq_lock;		/* lock for tx operations */
    253 
    254 	struct wm_softc *txq_sc;
    255 
    256 	int txq_id;			/* index of transmit queues */
    257 	int txq_intr_idx;		/* index of MSI-X tables */
    258 
    259 	/* Software state for the transmit descriptors. */
    260 	int txq_num;			/* must be a power of two */
    261 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    262 
    263 	/* TX control data structures. */
    264 	int txq_ndesc;			/* must be a power of two */
    265 	txdescs_t *txq_descs_u;
    266         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    267 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    268 	int txq_desc_rseg;		/* real number of control segment */
    269 	size_t txq_desc_size;		/* control data size */
    270 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    271 #define	txq_descs	txq_descs_u->sctxu_txdescs
    272 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    273 
    274 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    275 
    276 	int txq_free;			/* number of free Tx descriptors */
    277 	int txq_next;			/* next ready Tx descriptor */
    278 
    279 	int txq_sfree;			/* number of free Tx jobs */
    280 	int txq_snext;			/* next free Tx job */
    281 	int txq_sdirty;			/* dirty Tx jobs */
    282 
    283 	/* These 4 variables are used only on the 82547. */
    284 	int txq_fifo_size;		/* Tx FIFO size */
    285 	int txq_fifo_head;		/* current head of FIFO */
    286 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    287 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    288 
    289 	/* XXX which event counter is required? */
    290 };
    291 
    292 struct wm_rxqueue {
    293 	kmutex_t *rxq_lock;		/* lock for rx operations */
    294 
    295 	struct wm_softc *rxq_sc;
    296 
    297 	int rxq_id;			/* index of receive queues */
    298 	int rxq_intr_idx;		/* index of MSI-X tables */
    299 
    300 	/* Software state for the receive descriptors. */
    301 	wiseman_rxdesc_t *rxq_descs;
    302 
    303 	/* RX control data structures. */
    304 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    305 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    306 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    307 	int rxq_desc_rseg;		/* real number of control segment */
    308 	size_t rxq_desc_size;		/* control data size */
    309 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    310 
    311 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    312 
    313 	int rxq_ptr;			/* next ready Rx descriptor/queue ent */
    314 	int rxq_discard;
    315 	int rxq_len;
    316 	struct mbuf *rxq_head;
    317 	struct mbuf *rxq_tail;
    318 	struct mbuf **rxq_tailp;
    319 
    320 	/* XXX which event counter is required? */
    321 };
    322 
    323 /*
    324  * Software state per device.
    325  */
    326 struct wm_softc {
    327 	device_t sc_dev;		/* generic device information */
    328 	bus_space_tag_t sc_st;		/* bus space tag */
    329 	bus_space_handle_t sc_sh;	/* bus space handle */
    330 	bus_size_t sc_ss;		/* bus space size */
    331 	bus_space_tag_t sc_iot;		/* I/O space tag */
    332 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    333 	bus_size_t sc_ios;		/* I/O space size */
    334 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    335 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    336 	bus_size_t sc_flashs;		/* flash registers space size */
    337 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    338 
    339 	struct ethercom sc_ethercom;	/* ethernet common data */
    340 	struct mii_data sc_mii;		/* MII/media information */
    341 
    342 	pci_chipset_tag_t sc_pc;
    343 	pcitag_t sc_pcitag;
    344 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    345 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    346 
    347 	uint16_t sc_pcidevid;		/* PCI device ID */
    348 	wm_chip_type sc_type;		/* MAC type */
    349 	int sc_rev;			/* MAC revision */
    350 	wm_phy_type sc_phytype;		/* PHY type */
    351 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    352 #define	WM_MEDIATYPE_UNKNOWN		0x00
    353 #define	WM_MEDIATYPE_FIBER		0x01
    354 #define	WM_MEDIATYPE_COPPER		0x02
    355 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    356 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    357 	int sc_flags;			/* flags; see below */
    358 	int sc_if_flags;		/* last if_flags */
    359 	int sc_flowflags;		/* 802.3x flow control flags */
    360 	int sc_align_tweak;
    361 
    362 	void *sc_ihs[WM_MAX_NINTR];	/*
    363 					 * interrupt cookie.
    364 					 * legacy and msi use sc_ihs[0].
    365 					 */
    366 	pci_intr_handle_t *sc_intrs;	/* legacy and msi use sc_intrs[0] */
    367 	int sc_nintrs;			/* number of interrupts */
    368 
    369 	int sc_link_intr_idx;		/* index of MSI-X tables */
    370 
    371 	callout_t sc_tick_ch;		/* tick callout */
    372 	bool sc_stopping;
    373 
    374 	int sc_nvm_ver_major;
    375 	int sc_nvm_ver_minor;
    376 	int sc_nvm_ver_build;
    377 	int sc_nvm_addrbits;		/* NVM address bits */
    378 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    379 	int sc_ich8_flash_base;
    380 	int sc_ich8_flash_bank_size;
    381 	int sc_nvm_k1_enabled;
    382 
    383 	int sc_ntxqueues;
    384 	struct wm_txqueue *sc_txq;
    385 
    386 	int sc_nrxqueues;
    387 	struct wm_rxqueue *sc_rxq;
    388 
    389 #ifdef WM_EVENT_COUNTERS
    390 	/* Event counters. */
    391 	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
    392 	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
    393 	struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
    394 	struct evcnt sc_ev_txdw;	/* Tx descriptor interrupts */
    395 	struct evcnt sc_ev_txqe;	/* Tx queue empty interrupts */
    396 	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
    397 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    398 
    399 	struct evcnt sc_ev_rxipsum;	/* IP checksums checked in-bound */
    400 	struct evcnt sc_ev_rxtusum;	/* TCP/UDP cksums checked in-bound */
    401 	struct evcnt sc_ev_txipsum;	/* IP checksums comp. out-bound */
    402 	struct evcnt sc_ev_txtusum;	/* TCP/UDP cksums comp. out-bound */
    403 	struct evcnt sc_ev_txtusum6;	/* TCP/UDP v6 cksums comp. out-bound */
    404 	struct evcnt sc_ev_txtso;	/* TCP seg offload out-bound (IPv4) */
    405 	struct evcnt sc_ev_txtso6;	/* TCP seg offload out-bound (IPv6) */
    406 	struct evcnt sc_ev_txtsopain;	/* painful header manip. for TSO */
    407 
    408 	struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    409 	struct evcnt sc_ev_txdrop;	/* Tx packets dropped (too many segs) */
    410 
    411 	struct evcnt sc_ev_tu;		/* Tx underrun */
    412 
    413 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    414 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    415 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    416 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    417 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    418 #endif /* WM_EVENT_COUNTERS */
    419 
    420 	/* This variable are used only on the 82547. */
    421 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    422 
    423 	uint32_t sc_ctrl;		/* prototype CTRL register */
    424 #if 0
    425 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    426 #endif
    427 	uint32_t sc_icr;		/* prototype interrupt bits */
    428 	uint32_t sc_itr;		/* prototype intr throttling reg */
    429 	uint32_t sc_tctl;		/* prototype TCTL register */
    430 	uint32_t sc_rctl;		/* prototype RCTL register */
    431 	uint32_t sc_txcw;		/* prototype TXCW register */
    432 	uint32_t sc_tipg;		/* prototype TIPG register */
    433 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    434 	uint32_t sc_pba;		/* prototype PBA register */
    435 
    436 	int sc_tbi_linkup;		/* TBI link status */
    437 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    438 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    439 
    440 	int sc_mchash_type;		/* multicast filter offset */
    441 
    442 	krndsource_t rnd_source;	/* random source */
    443 
    444 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    445 };
    446 
    447 #define WM_TX_LOCK(_txq)	if ((_txq)->txq_lock) mutex_enter((_txq)->txq_lock)
    448 #define WM_TX_UNLOCK(_txq)	if ((_txq)->txq_lock) mutex_exit((_txq)->txq_lock)
    449 #define WM_TX_LOCKED(_txq)	(!(_txq)->txq_lock || mutex_owned((_txq)->txq_lock))
    450 #define WM_RX_LOCK(_rxq)	if ((_rxq)->rxq_lock) mutex_enter((_rxq)->rxq_lock)
    451 #define WM_RX_UNLOCK(_rxq)	if ((_rxq)->rxq_lock) mutex_exit((_rxq)->rxq_lock)
    452 #define WM_RX_LOCKED(_rxq)	(!(_rxq)->rxq_lock || mutex_owned((_rxq)->rxq_lock))
    453 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    454 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    455 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    456 
    457 #ifdef WM_MPSAFE
    458 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    459 #else
    460 #define CALLOUT_FLAGS	0
    461 #endif
    462 
    463 #define	WM_RXCHAIN_RESET(rxq)						\
    464 do {									\
    465 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    466 	*(rxq)->rxq_tailp = NULL;					\
    467 	(rxq)->rxq_len = 0;						\
    468 } while (/*CONSTCOND*/0)
    469 
    470 #define	WM_RXCHAIN_LINK(rxq, m)						\
    471 do {									\
    472 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    473 	(rxq)->rxq_tailp = &(m)->m_next;				\
    474 } while (/*CONSTCOND*/0)
    475 
    476 #ifdef WM_EVENT_COUNTERS
    477 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    478 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    479 #else
    480 #define	WM_EVCNT_INCR(ev)	/* nothing */
    481 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    482 #endif
    483 
    484 #define	CSR_READ(sc, reg)						\
    485 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    486 #define	CSR_WRITE(sc, reg, val)						\
    487 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    488 #define	CSR_WRITE_FLUSH(sc)						\
    489 	(void) CSR_READ((sc), WMREG_STATUS)
    490 
    491 #define ICH8_FLASH_READ32(sc, reg) \
    492 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
    493 #define ICH8_FLASH_WRITE32(sc, reg, data) \
    494 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
    495 
    496 #define ICH8_FLASH_READ16(sc, reg) \
    497 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
    498 #define ICH8_FLASH_WRITE16(sc, reg, data) \
    499 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
    500 
    501 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((x)))
    502 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((x)))
    503 
    504 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    505 #define	WM_CDTXADDR_HI(txq, x)						\
    506 	(sizeof(bus_addr_t) == 8 ?					\
    507 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    508 
    509 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    510 #define	WM_CDRXADDR_HI(rxq, x)						\
    511 	(sizeof(bus_addr_t) == 8 ?					\
    512 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    513 
    514 /*
    515  * Register read/write functions.
    516  * Other than CSR_{READ|WRITE}().
    517  */
    518 #if 0
    519 static inline uint32_t wm_io_read(struct wm_softc *, int);
    520 #endif
    521 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    522 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    523 	uint32_t, uint32_t);
    524 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    525 
    526 /*
    527  * Descriptor sync/init functions.
    528  */
    529 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    530 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    531 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    532 
    533 /*
    534  * Device driver interface functions and commonly used functions.
    535  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    536  */
    537 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    538 static int	wm_match(device_t, cfdata_t, void *);
    539 static void	wm_attach(device_t, device_t, void *);
    540 static int	wm_detach(device_t, int);
    541 static bool	wm_suspend(device_t, const pmf_qual_t *);
    542 static bool	wm_resume(device_t, const pmf_qual_t *);
    543 static void	wm_watchdog(struct ifnet *);
    544 static void	wm_tick(void *);
    545 static int	wm_ifflags_cb(struct ethercom *);
    546 static int	wm_ioctl(struct ifnet *, u_long, void *);
    547 /* MAC address related */
    548 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    549 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    550 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    551 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    552 static void	wm_set_filter(struct wm_softc *);
    553 /* Reset and init related */
    554 static void	wm_set_vlan(struct wm_softc *);
    555 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    556 static void	wm_get_auto_rd_done(struct wm_softc *);
    557 static void	wm_lan_init_done(struct wm_softc *);
    558 static void	wm_get_cfg_done(struct wm_softc *);
    559 static void	wm_initialize_hardware_bits(struct wm_softc *);
    560 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    561 static void	wm_reset(struct wm_softc *);
    562 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    563 static void	wm_rxdrain(struct wm_rxqueue *);
    564 static void	wm_rss_getkey(uint8_t *);
    565 static void	wm_init_rss(struct wm_softc *);
    566 static void	wm_adjust_qnum(struct wm_softc *, int);
    567 static int	wm_setup_legacy(struct wm_softc *);
    568 static int	wm_setup_msix(struct wm_softc *);
    569 static int	wm_init(struct ifnet *);
    570 static int	wm_init_locked(struct ifnet *);
    571 static void	wm_stop(struct ifnet *, int);
    572 static void	wm_stop_locked(struct ifnet *, int);
    573 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    574 static void	wm_82547_txfifo_stall(void *);
    575 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    576 /* DMA related */
    577 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    578 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    579 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    580 static void	wm_init_tx_regs(struct wm_softc *, struct wm_txqueue *);
    581 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    582 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    583 static void	wm_init_rx_regs(struct wm_softc *, struct wm_rxqueue *);
    584 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    585 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    586 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    587 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    588 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    589 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    590 static void	wm_init_tx_queue(struct wm_softc *, struct wm_txqueue *);
    591 static int	wm_init_rx_queue(struct wm_softc *, struct wm_rxqueue *);
    592 static int	wm_alloc_txrx_queues(struct wm_softc *);
    593 static void	wm_free_txrx_queues(struct wm_softc *);
    594 static int	wm_init_txrx_queues(struct wm_softc *);
    595 /* Start */
    596 static int	wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
    597     uint32_t *, uint8_t *);
    598 static void	wm_start(struct ifnet *);
    599 static void	wm_start_locked(struct ifnet *);
    600 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txsoft *,
    601     uint32_t *, uint32_t *, bool *);
    602 static void	wm_nq_start(struct ifnet *);
    603 static void	wm_nq_start_locked(struct ifnet *);
    604 /* Interrupt */
    605 static int	wm_txeof(struct wm_softc *);
    606 static void	wm_rxeof(struct wm_rxqueue *);
    607 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    608 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    609 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    610 static void	wm_linkintr(struct wm_softc *, uint32_t);
    611 static int	wm_intr_legacy(void *);
    612 static int	wm_txintr_msix(void *);
    613 static int	wm_rxintr_msix(void *);
    614 static int	wm_linkintr_msix(void *);
    615 
    616 /*
    617  * Media related.
    618  * GMII, SGMII, TBI, SERDES and SFP.
    619  */
    620 /* Common */
    621 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    622 /* GMII related */
    623 static void	wm_gmii_reset(struct wm_softc *);
    624 static int	wm_get_phy_id_82575(struct wm_softc *);
    625 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    626 static int	wm_gmii_mediachange(struct ifnet *);
    627 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    628 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    629 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    630 static int	wm_gmii_i82543_readreg(device_t, int, int);
    631 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    632 static int	wm_gmii_i82544_readreg(device_t, int, int);
    633 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    634 static int	wm_gmii_i80003_readreg(device_t, int, int);
    635 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    636 static int	wm_gmii_bm_readreg(device_t, int, int);
    637 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    638 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    639 static int	wm_gmii_hv_readreg(device_t, int, int);
    640 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    641 static int	wm_gmii_82580_readreg(device_t, int, int);
    642 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    643 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    644 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    645 static void	wm_gmii_statchg(struct ifnet *);
    646 static int	wm_kmrn_readreg(struct wm_softc *, int);
    647 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
    648 /* SGMII */
    649 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    650 static int	wm_sgmii_readreg(device_t, int, int);
    651 static void	wm_sgmii_writereg(device_t, int, int, int);
    652 /* TBI related */
    653 static void	wm_tbi_mediainit(struct wm_softc *);
    654 static int	wm_tbi_mediachange(struct ifnet *);
    655 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    656 static int	wm_check_for_link(struct wm_softc *);
    657 static void	wm_tbi_tick(struct wm_softc *);
    658 /* SERDES related */
    659 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    660 static int	wm_serdes_mediachange(struct ifnet *);
    661 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    662 static void	wm_serdes_tick(struct wm_softc *);
    663 /* SFP related */
    664 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    665 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    666 
    667 /*
    668  * NVM related.
    669  * Microwire, SPI (w/wo EERD) and Flash.
    670  */
    671 /* Misc functions */
    672 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    673 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    674 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    675 /* Microwire */
    676 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    677 /* SPI */
    678 static int	wm_nvm_ready_spi(struct wm_softc *);
    679 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    680 /* Using with EERD */
    681 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    682 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    683 /* Flash */
    684 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    685     unsigned int *);
    686 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    687 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    688 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    689 	uint16_t *);
    690 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    691 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    692 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    693 /* iNVM */
    694 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    695 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    696 /* Lock, detecting NVM type, validate checksum and read */
    697 static int	wm_nvm_acquire(struct wm_softc *);
    698 static void	wm_nvm_release(struct wm_softc *);
    699 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    700 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    701 static int	wm_nvm_validate_checksum(struct wm_softc *);
    702 static void	wm_nvm_version_invm(struct wm_softc *);
    703 static void	wm_nvm_version(struct wm_softc *);
    704 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    705 
    706 /*
    707  * Hardware semaphores.
    708  * Very complexed...
    709  */
    710 static int	wm_get_swsm_semaphore(struct wm_softc *);
    711 static void	wm_put_swsm_semaphore(struct wm_softc *);
    712 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    713 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    714 static int	wm_get_swfwhw_semaphore(struct wm_softc *);
    715 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    716 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    717 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    718 
    719 /*
    720  * Management mode and power management related subroutines.
    721  * BMC, AMT, suspend/resume and EEE.
    722  */
    723 static int	wm_check_mng_mode(struct wm_softc *);
    724 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    725 static int	wm_check_mng_mode_82574(struct wm_softc *);
    726 static int	wm_check_mng_mode_generic(struct wm_softc *);
    727 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    728 static int	wm_check_reset_block(struct wm_softc *);
    729 static void	wm_get_hw_control(struct wm_softc *);
    730 static void	wm_release_hw_control(struct wm_softc *);
    731 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
    732 static void	wm_smbustopci(struct wm_softc *);
    733 static void	wm_init_manageability(struct wm_softc *);
    734 static void	wm_release_manageability(struct wm_softc *);
    735 static void	wm_get_wakeup(struct wm_softc *);
    736 #ifdef WM_WOL
    737 static void	wm_enable_phy_wakeup(struct wm_softc *);
    738 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    739 static void	wm_enable_wakeup(struct wm_softc *);
    740 #endif
    741 /* EEE */
    742 static void	wm_set_eee_i350(struct wm_softc *);
    743 
    744 /*
    745  * Workarounds (mainly PHY related).
    746  * Basically, PHY's workarounds are in the PHY drivers.
    747  */
    748 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    749 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    750 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    751 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    752 static void	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    753 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    754 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    755 static void	wm_reset_init_script_82575(struct wm_softc *);
    756 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    757 static void	wm_pll_workaround_i210(struct wm_softc *);
    758 
    759 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    760     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    761 
    762 /*
    763  * Devices supported by this driver.
    764  */
    765 static const struct wm_product {
    766 	pci_vendor_id_t		wmp_vendor;
    767 	pci_product_id_t	wmp_product;
    768 	const char		*wmp_name;
    769 	wm_chip_type		wmp_type;
    770 	uint32_t		wmp_flags;
    771 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    772 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    773 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    774 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    775 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    776 } wm_products[] = {
    777 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    778 	  "Intel i82542 1000BASE-X Ethernet",
    779 	  WM_T_82542_2_1,	WMP_F_FIBER },
    780 
    781 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    782 	  "Intel i82543GC 1000BASE-X Ethernet",
    783 	  WM_T_82543,		WMP_F_FIBER },
    784 
    785 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    786 	  "Intel i82543GC 1000BASE-T Ethernet",
    787 	  WM_T_82543,		WMP_F_COPPER },
    788 
    789 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    790 	  "Intel i82544EI 1000BASE-T Ethernet",
    791 	  WM_T_82544,		WMP_F_COPPER },
    792 
    793 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    794 	  "Intel i82544EI 1000BASE-X Ethernet",
    795 	  WM_T_82544,		WMP_F_FIBER },
    796 
    797 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    798 	  "Intel i82544GC 1000BASE-T Ethernet",
    799 	  WM_T_82544,		WMP_F_COPPER },
    800 
    801 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    802 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    803 	  WM_T_82544,		WMP_F_COPPER },
    804 
    805 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    806 	  "Intel i82540EM 1000BASE-T Ethernet",
    807 	  WM_T_82540,		WMP_F_COPPER },
    808 
    809 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    810 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    811 	  WM_T_82540,		WMP_F_COPPER },
    812 
    813 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    814 	  "Intel i82540EP 1000BASE-T Ethernet",
    815 	  WM_T_82540,		WMP_F_COPPER },
    816 
    817 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    818 	  "Intel i82540EP 1000BASE-T Ethernet",
    819 	  WM_T_82540,		WMP_F_COPPER },
    820 
    821 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    822 	  "Intel i82540EP 1000BASE-T Ethernet",
    823 	  WM_T_82540,		WMP_F_COPPER },
    824 
    825 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
    826 	  "Intel i82545EM 1000BASE-T Ethernet",
    827 	  WM_T_82545,		WMP_F_COPPER },
    828 
    829 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
    830 	  "Intel i82545GM 1000BASE-T Ethernet",
    831 	  WM_T_82545_3,		WMP_F_COPPER },
    832 
    833 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
    834 	  "Intel i82545GM 1000BASE-X Ethernet",
    835 	  WM_T_82545_3,		WMP_F_FIBER },
    836 
    837 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
    838 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
    839 	  WM_T_82545_3,		WMP_F_SERDES },
    840 
    841 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
    842 	  "Intel i82546EB 1000BASE-T Ethernet",
    843 	  WM_T_82546,		WMP_F_COPPER },
    844 
    845 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
    846 	  "Intel i82546EB 1000BASE-T Ethernet",
    847 	  WM_T_82546,		WMP_F_COPPER },
    848 
    849 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
    850 	  "Intel i82545EM 1000BASE-X Ethernet",
    851 	  WM_T_82545,		WMP_F_FIBER },
    852 
    853 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
    854 	  "Intel i82546EB 1000BASE-X Ethernet",
    855 	  WM_T_82546,		WMP_F_FIBER },
    856 
    857 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
    858 	  "Intel i82546GB 1000BASE-T Ethernet",
    859 	  WM_T_82546_3,		WMP_F_COPPER },
    860 
    861 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
    862 	  "Intel i82546GB 1000BASE-X Ethernet",
    863 	  WM_T_82546_3,		WMP_F_FIBER },
    864 
    865 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
    866 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
    867 	  WM_T_82546_3,		WMP_F_SERDES },
    868 
    869 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
    870 	  "i82546GB quad-port Gigabit Ethernet",
    871 	  WM_T_82546_3,		WMP_F_COPPER },
    872 
    873 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
    874 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
    875 	  WM_T_82546_3,		WMP_F_COPPER },
    876 
    877 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
    878 	  "Intel PRO/1000MT (82546GB)",
    879 	  WM_T_82546_3,		WMP_F_COPPER },
    880 
    881 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
    882 	  "Intel i82541EI 1000BASE-T Ethernet",
    883 	  WM_T_82541,		WMP_F_COPPER },
    884 
    885 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
    886 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
    887 	  WM_T_82541,		WMP_F_COPPER },
    888 
    889 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
    890 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
    891 	  WM_T_82541,		WMP_F_COPPER },
    892 
    893 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
    894 	  "Intel i82541ER 1000BASE-T Ethernet",
    895 	  WM_T_82541_2,		WMP_F_COPPER },
    896 
    897 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
    898 	  "Intel i82541GI 1000BASE-T Ethernet",
    899 	  WM_T_82541_2,		WMP_F_COPPER },
    900 
    901 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
    902 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
    903 	  WM_T_82541_2,		WMP_F_COPPER },
    904 
    905 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
    906 	  "Intel i82541PI 1000BASE-T Ethernet",
    907 	  WM_T_82541_2,		WMP_F_COPPER },
    908 
    909 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
    910 	  "Intel i82547EI 1000BASE-T Ethernet",
    911 	  WM_T_82547,		WMP_F_COPPER },
    912 
    913 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
    914 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
    915 	  WM_T_82547,		WMP_F_COPPER },
    916 
    917 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
    918 	  "Intel i82547GI 1000BASE-T Ethernet",
    919 	  WM_T_82547_2,		WMP_F_COPPER },
    920 
    921 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
    922 	  "Intel PRO/1000 PT (82571EB)",
    923 	  WM_T_82571,		WMP_F_COPPER },
    924 
    925 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
    926 	  "Intel PRO/1000 PF (82571EB)",
    927 	  WM_T_82571,		WMP_F_FIBER },
    928 
    929 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
    930 	  "Intel PRO/1000 PB (82571EB)",
    931 	  WM_T_82571,		WMP_F_SERDES },
    932 
    933 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
    934 	  "Intel PRO/1000 QT (82571EB)",
    935 	  WM_T_82571,		WMP_F_COPPER },
    936 
    937 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
    938 	  "Intel PRO/1000 PT Quad Port Server Adapter",
    939 	  WM_T_82571,		WMP_F_COPPER, },
    940 
    941 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
    942 	  "Intel Gigabit PT Quad Port Server ExpressModule",
    943 	  WM_T_82571,		WMP_F_COPPER, },
    944 
    945 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
    946 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
    947 	  WM_T_82571,		WMP_F_SERDES, },
    948 
    949 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
    950 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
    951 	  WM_T_82571,		WMP_F_SERDES, },
    952 
    953 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
    954 	  "Intel 82571EB Quad 1000baseX Ethernet",
    955 	  WM_T_82571,		WMP_F_FIBER, },
    956 
    957 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
    958 	  "Intel i82572EI 1000baseT Ethernet",
    959 	  WM_T_82572,		WMP_F_COPPER },
    960 
    961 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
    962 	  "Intel i82572EI 1000baseX Ethernet",
    963 	  WM_T_82572,		WMP_F_FIBER },
    964 
    965 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
    966 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
    967 	  WM_T_82572,		WMP_F_SERDES },
    968 
    969 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
    970 	  "Intel i82572EI 1000baseT Ethernet",
    971 	  WM_T_82572,		WMP_F_COPPER },
    972 
    973 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
    974 	  "Intel i82573E",
    975 	  WM_T_82573,		WMP_F_COPPER },
    976 
    977 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
    978 	  "Intel i82573E IAMT",
    979 	  WM_T_82573,		WMP_F_COPPER },
    980 
    981 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
    982 	  "Intel i82573L Gigabit Ethernet",
    983 	  WM_T_82573,		WMP_F_COPPER },
    984 
    985 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
    986 	  "Intel i82574L",
    987 	  WM_T_82574,		WMP_F_COPPER },
    988 
    989 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
    990 	  "Intel i82574L",
    991 	  WM_T_82574,		WMP_F_COPPER },
    992 
    993 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
    994 	  "Intel i82583V",
    995 	  WM_T_82583,		WMP_F_COPPER },
    996 
    997 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
    998 	  "i80003 dual 1000baseT Ethernet",
    999 	  WM_T_80003,		WMP_F_COPPER },
   1000 
   1001 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1002 	  "i80003 dual 1000baseX Ethernet",
   1003 	  WM_T_80003,		WMP_F_COPPER },
   1004 
   1005 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1006 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1007 	  WM_T_80003,		WMP_F_SERDES },
   1008 
   1009 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1010 	  "Intel i80003 1000baseT Ethernet",
   1011 	  WM_T_80003,		WMP_F_COPPER },
   1012 
   1013 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1014 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1015 	  WM_T_80003,		WMP_F_SERDES },
   1016 
   1017 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1018 	  "Intel i82801H (M_AMT) LAN Controller",
   1019 	  WM_T_ICH8,		WMP_F_COPPER },
   1020 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1021 	  "Intel i82801H (AMT) LAN Controller",
   1022 	  WM_T_ICH8,		WMP_F_COPPER },
   1023 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1024 	  "Intel i82801H LAN Controller",
   1025 	  WM_T_ICH8,		WMP_F_COPPER },
   1026 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1027 	  "Intel i82801H (IFE) LAN Controller",
   1028 	  WM_T_ICH8,		WMP_F_COPPER },
   1029 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1030 	  "Intel i82801H (M) LAN Controller",
   1031 	  WM_T_ICH8,		WMP_F_COPPER },
   1032 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1033 	  "Intel i82801H IFE (GT) LAN Controller",
   1034 	  WM_T_ICH8,		WMP_F_COPPER },
   1035 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1036 	  "Intel i82801H IFE (G) LAN Controller",
   1037 	  WM_T_ICH8,		WMP_F_COPPER },
   1038 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1039 	  "82801I (AMT) LAN Controller",
   1040 	  WM_T_ICH9,		WMP_F_COPPER },
   1041 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1042 	  "82801I LAN Controller",
   1043 	  WM_T_ICH9,		WMP_F_COPPER },
   1044 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1045 	  "82801I (G) LAN Controller",
   1046 	  WM_T_ICH9,		WMP_F_COPPER },
   1047 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1048 	  "82801I (GT) LAN Controller",
   1049 	  WM_T_ICH9,		WMP_F_COPPER },
   1050 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1051 	  "82801I (C) LAN Controller",
   1052 	  WM_T_ICH9,		WMP_F_COPPER },
   1053 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1054 	  "82801I mobile LAN Controller",
   1055 	  WM_T_ICH9,		WMP_F_COPPER },
   1056 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IGP_M_V,
   1057 	  "82801I mobile (V) LAN Controller",
   1058 	  WM_T_ICH9,		WMP_F_COPPER },
   1059 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1060 	  "82801I mobile (AMT) LAN Controller",
   1061 	  WM_T_ICH9,		WMP_F_COPPER },
   1062 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1063 	  "82567LM-4 LAN Controller",
   1064 	  WM_T_ICH9,		WMP_F_COPPER },
   1065 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_82567V_3,
   1066 	  "82567V-3 LAN Controller",
   1067 	  WM_T_ICH9,		WMP_F_COPPER },
   1068 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1069 	  "82567LM-2 LAN Controller",
   1070 	  WM_T_ICH10,		WMP_F_COPPER },
   1071 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1072 	  "82567LF-2 LAN Controller",
   1073 	  WM_T_ICH10,		WMP_F_COPPER },
   1074 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1075 	  "82567LM-3 LAN Controller",
   1076 	  WM_T_ICH10,		WMP_F_COPPER },
   1077 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1078 	  "82567LF-3 LAN Controller",
   1079 	  WM_T_ICH10,		WMP_F_COPPER },
   1080 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1081 	  "82567V-2 LAN Controller",
   1082 	  WM_T_ICH10,		WMP_F_COPPER },
   1083 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1084 	  "82567V-3? LAN Controller",
   1085 	  WM_T_ICH10,		WMP_F_COPPER },
   1086 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1087 	  "HANKSVILLE LAN Controller",
   1088 	  WM_T_ICH10,		WMP_F_COPPER },
   1089 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1090 	  "PCH LAN (82577LM) Controller",
   1091 	  WM_T_PCH,		WMP_F_COPPER },
   1092 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1093 	  "PCH LAN (82577LC) Controller",
   1094 	  WM_T_PCH,		WMP_F_COPPER },
   1095 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1096 	  "PCH LAN (82578DM) Controller",
   1097 	  WM_T_PCH,		WMP_F_COPPER },
   1098 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1099 	  "PCH LAN (82578DC) Controller",
   1100 	  WM_T_PCH,		WMP_F_COPPER },
   1101 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1102 	  "PCH2 LAN (82579LM) Controller",
   1103 	  WM_T_PCH2,		WMP_F_COPPER },
   1104 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1105 	  "PCH2 LAN (82579V) Controller",
   1106 	  WM_T_PCH2,		WMP_F_COPPER },
   1107 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1108 	  "82575EB dual-1000baseT Ethernet",
   1109 	  WM_T_82575,		WMP_F_COPPER },
   1110 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1111 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1112 	  WM_T_82575,		WMP_F_SERDES },
   1113 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1114 	  "82575GB quad-1000baseT Ethernet",
   1115 	  WM_T_82575,		WMP_F_COPPER },
   1116 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1117 	  "82575GB quad-1000baseT Ethernet (PM)",
   1118 	  WM_T_82575,		WMP_F_COPPER },
   1119 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1120 	  "82576 1000BaseT Ethernet",
   1121 	  WM_T_82576,		WMP_F_COPPER },
   1122 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1123 	  "82576 1000BaseX Ethernet",
   1124 	  WM_T_82576,		WMP_F_FIBER },
   1125 
   1126 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1127 	  "82576 gigabit Ethernet (SERDES)",
   1128 	  WM_T_82576,		WMP_F_SERDES },
   1129 
   1130 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1131 	  "82576 quad-1000BaseT Ethernet",
   1132 	  WM_T_82576,		WMP_F_COPPER },
   1133 
   1134 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1135 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1136 	  WM_T_82576,		WMP_F_COPPER },
   1137 
   1138 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1139 	  "82576 gigabit Ethernet",
   1140 	  WM_T_82576,		WMP_F_COPPER },
   1141 
   1142 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1143 	  "82576 gigabit Ethernet (SERDES)",
   1144 	  WM_T_82576,		WMP_F_SERDES },
   1145 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1146 	  "82576 quad-gigabit Ethernet (SERDES)",
   1147 	  WM_T_82576,		WMP_F_SERDES },
   1148 
   1149 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1150 	  "82580 1000BaseT Ethernet",
   1151 	  WM_T_82580,		WMP_F_COPPER },
   1152 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1153 	  "82580 1000BaseX Ethernet",
   1154 	  WM_T_82580,		WMP_F_FIBER },
   1155 
   1156 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1157 	  "82580 1000BaseT Ethernet (SERDES)",
   1158 	  WM_T_82580,		WMP_F_SERDES },
   1159 
   1160 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1161 	  "82580 gigabit Ethernet (SGMII)",
   1162 	  WM_T_82580,		WMP_F_COPPER },
   1163 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1164 	  "82580 dual-1000BaseT Ethernet",
   1165 	  WM_T_82580,		WMP_F_COPPER },
   1166 
   1167 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1168 	  "82580 quad-1000BaseX Ethernet",
   1169 	  WM_T_82580,		WMP_F_FIBER },
   1170 
   1171 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1172 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1173 	  WM_T_82580,		WMP_F_COPPER },
   1174 
   1175 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1176 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1177 	  WM_T_82580,		WMP_F_SERDES },
   1178 
   1179 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1180 	  "DH89XXCC 1000BASE-KX Ethernet",
   1181 	  WM_T_82580,		WMP_F_SERDES },
   1182 
   1183 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1184 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1185 	  WM_T_82580,		WMP_F_SERDES },
   1186 
   1187 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1188 	  "I350 Gigabit Network Connection",
   1189 	  WM_T_I350,		WMP_F_COPPER },
   1190 
   1191 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1192 	  "I350 Gigabit Fiber Network Connection",
   1193 	  WM_T_I350,		WMP_F_FIBER },
   1194 
   1195 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1196 	  "I350 Gigabit Backplane Connection",
   1197 	  WM_T_I350,		WMP_F_SERDES },
   1198 
   1199 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1200 	  "I350 Quad Port Gigabit Ethernet",
   1201 	  WM_T_I350,		WMP_F_SERDES },
   1202 
   1203 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1204 	  "I350 Gigabit Connection",
   1205 	  WM_T_I350,		WMP_F_COPPER },
   1206 
   1207 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1208 	  "I354 Gigabit Ethernet (KX)",
   1209 	  WM_T_I354,		WMP_F_SERDES },
   1210 
   1211 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1212 	  "I354 Gigabit Ethernet (SGMII)",
   1213 	  WM_T_I354,		WMP_F_COPPER },
   1214 
   1215 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1216 	  "I354 Gigabit Ethernet (2.5G)",
   1217 	  WM_T_I354,		WMP_F_COPPER },
   1218 
   1219 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1220 	  "I210-T1 Ethernet Server Adapter",
   1221 	  WM_T_I210,		WMP_F_COPPER },
   1222 
   1223 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1224 	  "I210 Ethernet (Copper OEM)",
   1225 	  WM_T_I210,		WMP_F_COPPER },
   1226 
   1227 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1228 	  "I210 Ethernet (Copper IT)",
   1229 	  WM_T_I210,		WMP_F_COPPER },
   1230 
   1231 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1232 	  "I210 Ethernet (FLASH less)",
   1233 	  WM_T_I210,		WMP_F_COPPER },
   1234 
   1235 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1236 	  "I210 Gigabit Ethernet (Fiber)",
   1237 	  WM_T_I210,		WMP_F_FIBER },
   1238 
   1239 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1240 	  "I210 Gigabit Ethernet (SERDES)",
   1241 	  WM_T_I210,		WMP_F_SERDES },
   1242 
   1243 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1244 	  "I210 Gigabit Ethernet (FLASH less)",
   1245 	  WM_T_I210,		WMP_F_SERDES },
   1246 
   1247 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1248 	  "I210 Gigabit Ethernet (SGMII)",
   1249 	  WM_T_I210,		WMP_F_COPPER },
   1250 
   1251 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1252 	  "I211 Ethernet (COPPER)",
   1253 	  WM_T_I211,		WMP_F_COPPER },
   1254 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1255 	  "I217 V Ethernet Connection",
   1256 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1257 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1258 	  "I217 LM Ethernet Connection",
   1259 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1260 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1261 	  "I218 V Ethernet Connection",
   1262 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1263 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1264 	  "I218 V Ethernet Connection",
   1265 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1266 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1267 	  "I218 V Ethernet Connection",
   1268 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1269 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1270 	  "I218 LM Ethernet Connection",
   1271 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1272 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1273 	  "I218 LM Ethernet Connection",
   1274 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1275 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1276 	  "I218 LM Ethernet Connection",
   1277 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1278 	{ 0,			0,
   1279 	  NULL,
   1280 	  0,			0 },
   1281 };
   1282 
   1283 #ifdef WM_EVENT_COUNTERS
   1284 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
   1285 #endif /* WM_EVENT_COUNTERS */
   1286 
   1287 
   1288 /*
   1289  * Register read/write functions.
   1290  * Other than CSR_{READ|WRITE}().
   1291  */
   1292 
   1293 #if 0 /* Not currently used */
   1294 static inline uint32_t
   1295 wm_io_read(struct wm_softc *sc, int reg)
   1296 {
   1297 
   1298 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1299 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1300 }
   1301 #endif
   1302 
   1303 static inline void
   1304 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1305 {
   1306 
   1307 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1308 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1309 }
   1310 
   1311 static inline void
   1312 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1313     uint32_t data)
   1314 {
   1315 	uint32_t regval;
   1316 	int i;
   1317 
   1318 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1319 
   1320 	CSR_WRITE(sc, reg, regval);
   1321 
   1322 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1323 		delay(5);
   1324 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1325 			break;
   1326 	}
   1327 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1328 		aprint_error("%s: WARNING:"
   1329 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1330 		    device_xname(sc->sc_dev), reg);
   1331 	}
   1332 }
   1333 
   1334 static inline void
   1335 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1336 {
   1337 	wa->wa_low = htole32(v & 0xffffffffU);
   1338 	if (sizeof(bus_addr_t) == 8)
   1339 		wa->wa_high = htole32((uint64_t) v >> 32);
   1340 	else
   1341 		wa->wa_high = 0;
   1342 }
   1343 
   1344 /*
   1345  * Descriptor sync/init functions.
   1346  */
   1347 static inline void
   1348 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1349 {
   1350 	struct wm_softc *sc = txq->txq_sc;
   1351 
   1352 	/* If it will wrap around, sync to the end of the ring. */
   1353 	if ((start + num) > WM_NTXDESC(txq)) {
   1354 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1355 		    WM_CDTXOFF(start), sizeof(wiseman_txdesc_t) *
   1356 		    (WM_NTXDESC(txq) - start), ops);
   1357 		num -= (WM_NTXDESC(txq) - start);
   1358 		start = 0;
   1359 	}
   1360 
   1361 	/* Now sync whatever is left. */
   1362 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1363 	    WM_CDTXOFF(start), sizeof(wiseman_txdesc_t) * num, ops);
   1364 }
   1365 
   1366 static inline void
   1367 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1368 {
   1369 	struct wm_softc *sc = rxq->rxq_sc;
   1370 
   1371 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1372 	    WM_CDRXOFF(start), sizeof(wiseman_rxdesc_t), ops);
   1373 }
   1374 
   1375 static inline void
   1376 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1377 {
   1378 	struct wm_softc *sc = rxq->rxq_sc;
   1379 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1380 	wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1381 	struct mbuf *m = rxs->rxs_mbuf;
   1382 
   1383 	/*
   1384 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1385 	 * so that the payload after the Ethernet header is aligned
   1386 	 * to a 4-byte boundary.
   1387 
   1388 	 * XXX BRAINDAMAGE ALERT!
   1389 	 * The stupid chip uses the same size for every buffer, which
   1390 	 * is set in the Receive Control register.  We are using the 2K
   1391 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1392 	 * reason, we can't "scoot" packets longer than the standard
   1393 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1394 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1395 	 * the upper layer copy the headers.
   1396 	 */
   1397 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1398 
   1399 	wm_set_dma_addr(&rxd->wrx_addr,
   1400 	    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1401 	rxd->wrx_len = 0;
   1402 	rxd->wrx_cksum = 0;
   1403 	rxd->wrx_status = 0;
   1404 	rxd->wrx_errors = 0;
   1405 	rxd->wrx_special = 0;
   1406 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   1407 
   1408 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1409 }
   1410 
   1411 /*
   1412  * Device driver interface functions and commonly used functions.
   1413  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1414  */
   1415 
   1416 /* Lookup supported device table */
   1417 static const struct wm_product *
   1418 wm_lookup(const struct pci_attach_args *pa)
   1419 {
   1420 	const struct wm_product *wmp;
   1421 
   1422 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1423 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1424 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1425 			return wmp;
   1426 	}
   1427 	return NULL;
   1428 }
   1429 
   1430 /* The match function (ca_match) */
   1431 static int
   1432 wm_match(device_t parent, cfdata_t cf, void *aux)
   1433 {
   1434 	struct pci_attach_args *pa = aux;
   1435 
   1436 	if (wm_lookup(pa) != NULL)
   1437 		return 1;
   1438 
   1439 	return 0;
   1440 }
   1441 
   1442 /* The attach function (ca_attach) */
   1443 static void
   1444 wm_attach(device_t parent, device_t self, void *aux)
   1445 {
   1446 	struct wm_softc *sc = device_private(self);
   1447 	struct pci_attach_args *pa = aux;
   1448 	prop_dictionary_t dict;
   1449 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1450 	pci_chipset_tag_t pc = pa->pa_pc;
   1451 	int counts[PCI_INTR_TYPE_SIZE];
   1452 	pci_intr_type_t max_type;
   1453 	const char *eetype, *xname;
   1454 	bus_space_tag_t memt;
   1455 	bus_space_handle_t memh;
   1456 	bus_size_t memsize;
   1457 	int memh_valid;
   1458 	int i, error;
   1459 	const struct wm_product *wmp;
   1460 	prop_data_t ea;
   1461 	prop_number_t pn;
   1462 	uint8_t enaddr[ETHER_ADDR_LEN];
   1463 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1464 	pcireg_t preg, memtype;
   1465 	uint16_t eeprom_data, apme_mask;
   1466 	bool force_clear_smbi;
   1467 	uint32_t link_mode;
   1468 	uint32_t reg;
   1469 
   1470 	sc->sc_dev = self;
   1471 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1472 	sc->sc_stopping = false;
   1473 
   1474 	wmp = wm_lookup(pa);
   1475 #ifdef DIAGNOSTIC
   1476 	if (wmp == NULL) {
   1477 		printf("\n");
   1478 		panic("wm_attach: impossible");
   1479 	}
   1480 #endif
   1481 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1482 
   1483 	sc->sc_pc = pa->pa_pc;
   1484 	sc->sc_pcitag = pa->pa_tag;
   1485 
   1486 	if (pci_dma64_available(pa))
   1487 		sc->sc_dmat = pa->pa_dmat64;
   1488 	else
   1489 		sc->sc_dmat = pa->pa_dmat;
   1490 
   1491 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1492 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
   1493 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1494 
   1495 	sc->sc_type = wmp->wmp_type;
   1496 	if (sc->sc_type < WM_T_82543) {
   1497 		if (sc->sc_rev < 2) {
   1498 			aprint_error_dev(sc->sc_dev,
   1499 			    "i82542 must be at least rev. 2\n");
   1500 			return;
   1501 		}
   1502 		if (sc->sc_rev < 3)
   1503 			sc->sc_type = WM_T_82542_2_0;
   1504 	}
   1505 
   1506 	/*
   1507 	 * Disable MSI for Errata:
   1508 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1509 	 *
   1510 	 *  82544: Errata 25
   1511 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1512 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1513 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1514 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1515 	 *
   1516 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1517 	 *
   1518 	 *  82571 & 82572: Errata 63
   1519 	 */
   1520 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1521 	    || (sc->sc_type == WM_T_82572))
   1522 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1523 
   1524 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1525 	    || (sc->sc_type == WM_T_82580)
   1526 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1527 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1528 		sc->sc_flags |= WM_F_NEWQUEUE;
   1529 
   1530 	/* Set device properties (mactype) */
   1531 	dict = device_properties(sc->sc_dev);
   1532 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1533 
   1534 	/*
   1535 	 * Map the device.  All devices support memory-mapped acccess,
   1536 	 * and it is really required for normal operation.
   1537 	 */
   1538 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1539 	switch (memtype) {
   1540 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1541 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1542 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1543 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1544 		break;
   1545 	default:
   1546 		memh_valid = 0;
   1547 		break;
   1548 	}
   1549 
   1550 	if (memh_valid) {
   1551 		sc->sc_st = memt;
   1552 		sc->sc_sh = memh;
   1553 		sc->sc_ss = memsize;
   1554 	} else {
   1555 		aprint_error_dev(sc->sc_dev,
   1556 		    "unable to map device registers\n");
   1557 		return;
   1558 	}
   1559 
   1560 	/*
   1561 	 * In addition, i82544 and later support I/O mapped indirect
   1562 	 * register access.  It is not desirable (nor supported in
   1563 	 * this driver) to use it for normal operation, though it is
   1564 	 * required to work around bugs in some chip versions.
   1565 	 */
   1566 	if (sc->sc_type >= WM_T_82544) {
   1567 		/* First we have to find the I/O BAR. */
   1568 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1569 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1570 			if (memtype == PCI_MAPREG_TYPE_IO)
   1571 				break;
   1572 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1573 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1574 				i += 4;	/* skip high bits, too */
   1575 		}
   1576 		if (i < PCI_MAPREG_END) {
   1577 			/*
   1578 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1579 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1580 			 * It's no problem because newer chips has no this
   1581 			 * bug.
   1582 			 *
   1583 			 * The i8254x doesn't apparently respond when the
   1584 			 * I/O BAR is 0, which looks somewhat like it's not
   1585 			 * been configured.
   1586 			 */
   1587 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1588 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1589 				aprint_error_dev(sc->sc_dev,
   1590 				    "WARNING: I/O BAR at zero.\n");
   1591 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1592 					0, &sc->sc_iot, &sc->sc_ioh,
   1593 					NULL, &sc->sc_ios) == 0) {
   1594 				sc->sc_flags |= WM_F_IOH_VALID;
   1595 			} else {
   1596 				aprint_error_dev(sc->sc_dev,
   1597 				    "WARNING: unable to map I/O space\n");
   1598 			}
   1599 		}
   1600 
   1601 	}
   1602 
   1603 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1604 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1605 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1606 	if (sc->sc_type < WM_T_82542_2_1)
   1607 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1608 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1609 
   1610 	/* power up chip */
   1611 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1612 	    NULL)) && error != EOPNOTSUPP) {
   1613 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1614 		return;
   1615 	}
   1616 
   1617 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1618 
   1619 	/* Allocation settings */
   1620 	max_type = PCI_INTR_TYPE_MSIX;
   1621 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_ntxqueues + sc->sc_nrxqueues + 1;
   1622 	counts[PCI_INTR_TYPE_MSI] = 1;
   1623 	counts[PCI_INTR_TYPE_INTX] = 1;
   1624 
   1625 alloc_retry:
   1626 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1627 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1628 		return;
   1629 	}
   1630 
   1631 	if (pci_intr_type(sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1632 		error = wm_setup_msix(sc);
   1633 		if (error) {
   1634 			pci_intr_release(pc, sc->sc_intrs,
   1635 			    counts[PCI_INTR_TYPE_MSIX]);
   1636 
   1637 			/* Setup for MSI: Disable MSI-X */
   1638 			max_type = PCI_INTR_TYPE_MSI;
   1639 			counts[PCI_INTR_TYPE_MSI] = 1;
   1640 			counts[PCI_INTR_TYPE_INTX] = 1;
   1641 			goto alloc_retry;
   1642 		}
   1643 	} else 	if (pci_intr_type(sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1644 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1645 		error = wm_setup_legacy(sc);
   1646 		if (error) {
   1647 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1648 			    counts[PCI_INTR_TYPE_MSI]);
   1649 
   1650 			/* The next try is for INTx: Disable MSI */
   1651 			max_type = PCI_INTR_TYPE_INTX;
   1652 			counts[PCI_INTR_TYPE_INTX] = 1;
   1653 			goto alloc_retry;
   1654 		}
   1655 	} else {
   1656 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1657 		error = wm_setup_legacy(sc);
   1658 		if (error) {
   1659 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1660 			    counts[PCI_INTR_TYPE_INTX]);
   1661 			return;
   1662 		}
   1663 	}
   1664 
   1665 	/*
   1666 	 * Check the function ID (unit number of the chip).
   1667 	 */
   1668 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1669 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1670 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1671 	    || (sc->sc_type == WM_T_82580)
   1672 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1673 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1674 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1675 	else
   1676 		sc->sc_funcid = 0;
   1677 
   1678 	/*
   1679 	 * Determine a few things about the bus we're connected to.
   1680 	 */
   1681 	if (sc->sc_type < WM_T_82543) {
   1682 		/* We don't really know the bus characteristics here. */
   1683 		sc->sc_bus_speed = 33;
   1684 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1685 		/*
   1686 		 * CSA (Communication Streaming Architecture) is about as fast
   1687 		 * a 32-bit 66MHz PCI Bus.
   1688 		 */
   1689 		sc->sc_flags |= WM_F_CSA;
   1690 		sc->sc_bus_speed = 66;
   1691 		aprint_verbose_dev(sc->sc_dev,
   1692 		    "Communication Streaming Architecture\n");
   1693 		if (sc->sc_type == WM_T_82547) {
   1694 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1695 			callout_setfunc(&sc->sc_txfifo_ch,
   1696 					wm_82547_txfifo_stall, sc);
   1697 			aprint_verbose_dev(sc->sc_dev,
   1698 			    "using 82547 Tx FIFO stall work-around\n");
   1699 		}
   1700 	} else if (sc->sc_type >= WM_T_82571) {
   1701 		sc->sc_flags |= WM_F_PCIE;
   1702 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1703 		    && (sc->sc_type != WM_T_ICH10)
   1704 		    && (sc->sc_type != WM_T_PCH)
   1705 		    && (sc->sc_type != WM_T_PCH2)
   1706 		    && (sc->sc_type != WM_T_PCH_LPT)) {
   1707 			/* ICH* and PCH* have no PCIe capability registers */
   1708 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1709 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1710 				NULL) == 0)
   1711 				aprint_error_dev(sc->sc_dev,
   1712 				    "unable to find PCIe capability\n");
   1713 		}
   1714 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1715 	} else {
   1716 		reg = CSR_READ(sc, WMREG_STATUS);
   1717 		if (reg & STATUS_BUS64)
   1718 			sc->sc_flags |= WM_F_BUS64;
   1719 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1720 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1721 
   1722 			sc->sc_flags |= WM_F_PCIX;
   1723 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1724 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1725 				aprint_error_dev(sc->sc_dev,
   1726 				    "unable to find PCIX capability\n");
   1727 			else if (sc->sc_type != WM_T_82545_3 &&
   1728 				 sc->sc_type != WM_T_82546_3) {
   1729 				/*
   1730 				 * Work around a problem caused by the BIOS
   1731 				 * setting the max memory read byte count
   1732 				 * incorrectly.
   1733 				 */
   1734 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1735 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1736 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1737 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1738 
   1739 				bytecnt =
   1740 				    (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1741 				    PCIX_CMD_BYTECNT_SHIFT;
   1742 				maxb =
   1743 				    (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1744 				    PCIX_STATUS_MAXB_SHIFT;
   1745 				if (bytecnt > maxb) {
   1746 					aprint_verbose_dev(sc->sc_dev,
   1747 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1748 					    512 << bytecnt, 512 << maxb);
   1749 					pcix_cmd = (pcix_cmd &
   1750 					    ~PCIX_CMD_BYTECNT_MASK) |
   1751 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1752 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1753 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1754 					    pcix_cmd);
   1755 				}
   1756 			}
   1757 		}
   1758 		/*
   1759 		 * The quad port adapter is special; it has a PCIX-PCIX
   1760 		 * bridge on the board, and can run the secondary bus at
   1761 		 * a higher speed.
   1762 		 */
   1763 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   1764 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   1765 								      : 66;
   1766 		} else if (sc->sc_flags & WM_F_PCIX) {
   1767 			switch (reg & STATUS_PCIXSPD_MASK) {
   1768 			case STATUS_PCIXSPD_50_66:
   1769 				sc->sc_bus_speed = 66;
   1770 				break;
   1771 			case STATUS_PCIXSPD_66_100:
   1772 				sc->sc_bus_speed = 100;
   1773 				break;
   1774 			case STATUS_PCIXSPD_100_133:
   1775 				sc->sc_bus_speed = 133;
   1776 				break;
   1777 			default:
   1778 				aprint_error_dev(sc->sc_dev,
   1779 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   1780 				    reg & STATUS_PCIXSPD_MASK);
   1781 				sc->sc_bus_speed = 66;
   1782 				break;
   1783 			}
   1784 		} else
   1785 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   1786 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   1787 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   1788 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   1789 	}
   1790 
   1791 	/* clear interesting stat counters */
   1792 	CSR_READ(sc, WMREG_COLC);
   1793 	CSR_READ(sc, WMREG_RXERRC);
   1794 
   1795 	/* get PHY control from SMBus to PCIe */
   1796 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   1797 	    || (sc->sc_type == WM_T_PCH_LPT))
   1798 		wm_smbustopci(sc);
   1799 
   1800 	/* Reset the chip to a known state. */
   1801 	wm_reset(sc);
   1802 
   1803 	/* Get some information about the EEPROM. */
   1804 	switch (sc->sc_type) {
   1805 	case WM_T_82542_2_0:
   1806 	case WM_T_82542_2_1:
   1807 	case WM_T_82543:
   1808 	case WM_T_82544:
   1809 		/* Microwire */
   1810 		sc->sc_nvm_wordsize = 64;
   1811 		sc->sc_nvm_addrbits = 6;
   1812 		break;
   1813 	case WM_T_82540:
   1814 	case WM_T_82545:
   1815 	case WM_T_82545_3:
   1816 	case WM_T_82546:
   1817 	case WM_T_82546_3:
   1818 		/* Microwire */
   1819 		reg = CSR_READ(sc, WMREG_EECD);
   1820 		if (reg & EECD_EE_SIZE) {
   1821 			sc->sc_nvm_wordsize = 256;
   1822 			sc->sc_nvm_addrbits = 8;
   1823 		} else {
   1824 			sc->sc_nvm_wordsize = 64;
   1825 			sc->sc_nvm_addrbits = 6;
   1826 		}
   1827 		sc->sc_flags |= WM_F_LOCK_EECD;
   1828 		break;
   1829 	case WM_T_82541:
   1830 	case WM_T_82541_2:
   1831 	case WM_T_82547:
   1832 	case WM_T_82547_2:
   1833 		sc->sc_flags |= WM_F_LOCK_EECD;
   1834 		reg = CSR_READ(sc, WMREG_EECD);
   1835 		if (reg & EECD_EE_TYPE) {
   1836 			/* SPI */
   1837 			sc->sc_flags |= WM_F_EEPROM_SPI;
   1838 			wm_nvm_set_addrbits_size_eecd(sc);
   1839 		} else {
   1840 			/* Microwire */
   1841 			if ((reg & EECD_EE_ABITS) != 0) {
   1842 				sc->sc_nvm_wordsize = 256;
   1843 				sc->sc_nvm_addrbits = 8;
   1844 			} else {
   1845 				sc->sc_nvm_wordsize = 64;
   1846 				sc->sc_nvm_addrbits = 6;
   1847 			}
   1848 		}
   1849 		break;
   1850 	case WM_T_82571:
   1851 	case WM_T_82572:
   1852 		/* SPI */
   1853 		sc->sc_flags |= WM_F_EEPROM_SPI;
   1854 		wm_nvm_set_addrbits_size_eecd(sc);
   1855 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
   1856 		break;
   1857 	case WM_T_82573:
   1858 		sc->sc_flags |= WM_F_LOCK_SWSM;
   1859 		/* FALLTHROUGH */
   1860 	case WM_T_82574:
   1861 	case WM_T_82583:
   1862 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   1863 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   1864 			sc->sc_nvm_wordsize = 2048;
   1865 		} else {
   1866 			/* SPI */
   1867 			sc->sc_flags |= WM_F_EEPROM_SPI;
   1868 			wm_nvm_set_addrbits_size_eecd(sc);
   1869 		}
   1870 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   1871 		break;
   1872 	case WM_T_82575:
   1873 	case WM_T_82576:
   1874 	case WM_T_82580:
   1875 	case WM_T_I350:
   1876 	case WM_T_I354:
   1877 	case WM_T_80003:
   1878 		/* SPI */
   1879 		sc->sc_flags |= WM_F_EEPROM_SPI;
   1880 		wm_nvm_set_addrbits_size_eecd(sc);
   1881 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
   1882 		    | WM_F_LOCK_SWSM;
   1883 		break;
   1884 	case WM_T_ICH8:
   1885 	case WM_T_ICH9:
   1886 	case WM_T_ICH10:
   1887 	case WM_T_PCH:
   1888 	case WM_T_PCH2:
   1889 	case WM_T_PCH_LPT:
   1890 		/* FLASH */
   1891 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   1892 		sc->sc_nvm_wordsize = 2048;
   1893 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
   1894 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   1895 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   1896 			aprint_error_dev(sc->sc_dev,
   1897 			    "can't map FLASH registers\n");
   1898 			goto out;
   1899 		}
   1900 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   1901 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   1902 						ICH_FLASH_SECTOR_SIZE;
   1903 		sc->sc_ich8_flash_bank_size =
   1904 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   1905 		sc->sc_ich8_flash_bank_size -=
   1906 		    (reg & ICH_GFPREG_BASE_MASK);
   1907 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   1908 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   1909 		break;
   1910 	case WM_T_I210:
   1911 	case WM_T_I211:
   1912 		if (wm_nvm_get_flash_presence_i210(sc)) {
   1913 			wm_nvm_set_addrbits_size_eecd(sc);
   1914 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   1915 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
   1916 		} else {
   1917 			sc->sc_nvm_wordsize = INVM_SIZE;
   1918 			sc->sc_flags |= WM_F_EEPROM_INVM;
   1919 			sc->sc_flags |= WM_F_LOCK_SWFW;
   1920 		}
   1921 		break;
   1922 	default:
   1923 		break;
   1924 	}
   1925 
   1926 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   1927 	switch (sc->sc_type) {
   1928 	case WM_T_82571:
   1929 	case WM_T_82572:
   1930 		reg = CSR_READ(sc, WMREG_SWSM2);
   1931 		if ((reg & SWSM2_LOCK) == 0) {
   1932 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   1933 			force_clear_smbi = true;
   1934 		} else
   1935 			force_clear_smbi = false;
   1936 		break;
   1937 	case WM_T_82573:
   1938 	case WM_T_82574:
   1939 	case WM_T_82583:
   1940 		force_clear_smbi = true;
   1941 		break;
   1942 	default:
   1943 		force_clear_smbi = false;
   1944 		break;
   1945 	}
   1946 	if (force_clear_smbi) {
   1947 		reg = CSR_READ(sc, WMREG_SWSM);
   1948 		if ((reg & SWSM_SMBI) != 0)
   1949 			aprint_error_dev(sc->sc_dev,
   1950 			    "Please update the Bootagent\n");
   1951 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   1952 	}
   1953 
   1954 	/*
   1955 	 * Defer printing the EEPROM type until after verifying the checksum
   1956 	 * This allows the EEPROM type to be printed correctly in the case
   1957 	 * that no EEPROM is attached.
   1958 	 */
   1959 	/*
   1960 	 * Validate the EEPROM checksum. If the checksum fails, flag
   1961 	 * this for later, so we can fail future reads from the EEPROM.
   1962 	 */
   1963 	if (wm_nvm_validate_checksum(sc)) {
   1964 		/*
   1965 		 * Read twice again because some PCI-e parts fail the
   1966 		 * first check due to the link being in sleep state.
   1967 		 */
   1968 		if (wm_nvm_validate_checksum(sc))
   1969 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   1970 	}
   1971 
   1972 	/* Set device properties (macflags) */
   1973 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   1974 
   1975 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   1976 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   1977 	else {
   1978 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   1979 		    sc->sc_nvm_wordsize);
   1980 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   1981 			aprint_verbose("iNVM");
   1982 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   1983 			aprint_verbose("FLASH(HW)");
   1984 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   1985 			aprint_verbose("FLASH");
   1986 		else {
   1987 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   1988 				eetype = "SPI";
   1989 			else
   1990 				eetype = "MicroWire";
   1991 			aprint_verbose("(%d address bits) %s EEPROM",
   1992 			    sc->sc_nvm_addrbits, eetype);
   1993 		}
   1994 	}
   1995 	wm_nvm_version(sc);
   1996 	aprint_verbose("\n");
   1997 
   1998 	/* Check for I21[01] PLL workaround */
   1999 	if (sc->sc_type == WM_T_I210)
   2000 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2001 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2002 		/* NVM image release 3.25 has a workaround */
   2003 		if ((sc->sc_nvm_ver_major < 3)
   2004 		    || ((sc->sc_nvm_ver_major == 3)
   2005 			&& (sc->sc_nvm_ver_minor < 25))) {
   2006 			aprint_verbose_dev(sc->sc_dev,
   2007 			    "ROM image version %d.%d is older than 3.25\n",
   2008 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2009 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2010 		}
   2011 	}
   2012 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2013 		wm_pll_workaround_i210(sc);
   2014 
   2015 	switch (sc->sc_type) {
   2016 	case WM_T_82571:
   2017 	case WM_T_82572:
   2018 	case WM_T_82573:
   2019 	case WM_T_82574:
   2020 	case WM_T_82583:
   2021 	case WM_T_80003:
   2022 	case WM_T_ICH8:
   2023 	case WM_T_ICH9:
   2024 	case WM_T_ICH10:
   2025 	case WM_T_PCH:
   2026 	case WM_T_PCH2:
   2027 	case WM_T_PCH_LPT:
   2028 		if (wm_check_mng_mode(sc) != 0)
   2029 			wm_get_hw_control(sc);
   2030 		break;
   2031 	default:
   2032 		break;
   2033 	}
   2034 	wm_get_wakeup(sc);
   2035 	/*
   2036 	 * Read the Ethernet address from the EEPROM, if not first found
   2037 	 * in device properties.
   2038 	 */
   2039 	ea = prop_dictionary_get(dict, "mac-address");
   2040 	if (ea != NULL) {
   2041 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2042 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2043 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2044 	} else {
   2045 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2046 			aprint_error_dev(sc->sc_dev,
   2047 			    "unable to read Ethernet address\n");
   2048 			goto out;
   2049 		}
   2050 	}
   2051 
   2052 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2053 	    ether_sprintf(enaddr));
   2054 
   2055 	/*
   2056 	 * Read the config info from the EEPROM, and set up various
   2057 	 * bits in the control registers based on their contents.
   2058 	 */
   2059 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2060 	if (pn != NULL) {
   2061 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2062 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2063 	} else {
   2064 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2065 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2066 			goto out;
   2067 		}
   2068 	}
   2069 
   2070 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2071 	if (pn != NULL) {
   2072 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2073 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2074 	} else {
   2075 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2076 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2077 			goto out;
   2078 		}
   2079 	}
   2080 
   2081 	/* check for WM_F_WOL */
   2082 	switch (sc->sc_type) {
   2083 	case WM_T_82542_2_0:
   2084 	case WM_T_82542_2_1:
   2085 	case WM_T_82543:
   2086 		/* dummy? */
   2087 		eeprom_data = 0;
   2088 		apme_mask = NVM_CFG3_APME;
   2089 		break;
   2090 	case WM_T_82544:
   2091 		apme_mask = NVM_CFG2_82544_APM_EN;
   2092 		eeprom_data = cfg2;
   2093 		break;
   2094 	case WM_T_82546:
   2095 	case WM_T_82546_3:
   2096 	case WM_T_82571:
   2097 	case WM_T_82572:
   2098 	case WM_T_82573:
   2099 	case WM_T_82574:
   2100 	case WM_T_82583:
   2101 	case WM_T_80003:
   2102 	default:
   2103 		apme_mask = NVM_CFG3_APME;
   2104 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2105 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2106 		break;
   2107 	case WM_T_82575:
   2108 	case WM_T_82576:
   2109 	case WM_T_82580:
   2110 	case WM_T_I350:
   2111 	case WM_T_I354: /* XXX ok? */
   2112 	case WM_T_ICH8:
   2113 	case WM_T_ICH9:
   2114 	case WM_T_ICH10:
   2115 	case WM_T_PCH:
   2116 	case WM_T_PCH2:
   2117 	case WM_T_PCH_LPT:
   2118 		/* XXX The funcid should be checked on some devices */
   2119 		apme_mask = WUC_APME;
   2120 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2121 		break;
   2122 	}
   2123 
   2124 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2125 	if ((eeprom_data & apme_mask) != 0)
   2126 		sc->sc_flags |= WM_F_WOL;
   2127 #ifdef WM_DEBUG
   2128 	if ((sc->sc_flags & WM_F_WOL) != 0)
   2129 		printf("WOL\n");
   2130 #endif
   2131 
   2132 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2133 		/* Check NVM for autonegotiation */
   2134 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2135 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2136 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2137 		}
   2138 	}
   2139 
   2140 	/*
   2141 	 * XXX need special handling for some multiple port cards
   2142 	 * to disable a paticular port.
   2143 	 */
   2144 
   2145 	if (sc->sc_type >= WM_T_82544) {
   2146 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2147 		if (pn != NULL) {
   2148 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2149 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2150 		} else {
   2151 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2152 				aprint_error_dev(sc->sc_dev,
   2153 				    "unable to read SWDPIN\n");
   2154 				goto out;
   2155 			}
   2156 		}
   2157 	}
   2158 
   2159 	if (cfg1 & NVM_CFG1_ILOS)
   2160 		sc->sc_ctrl |= CTRL_ILOS;
   2161 
   2162 	/*
   2163 	 * XXX
   2164 	 * This code isn't correct because pin 2 and 3 are located
   2165 	 * in different position on newer chips. Check all datasheet.
   2166 	 *
   2167 	 * Until resolve this problem, check if a chip < 82580
   2168 	 */
   2169 	if (sc->sc_type <= WM_T_82580) {
   2170 		if (sc->sc_type >= WM_T_82544) {
   2171 			sc->sc_ctrl |=
   2172 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2173 			    CTRL_SWDPIO_SHIFT;
   2174 			sc->sc_ctrl |=
   2175 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2176 			    CTRL_SWDPINS_SHIFT;
   2177 		} else {
   2178 			sc->sc_ctrl |=
   2179 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2180 			    CTRL_SWDPIO_SHIFT;
   2181 		}
   2182 	}
   2183 
   2184 	/* XXX For other than 82580? */
   2185 	if (sc->sc_type == WM_T_82580) {
   2186 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2187 		printf("CFG3 = %08x\n", (uint32_t)nvmword);
   2188 		if (nvmword & __BIT(13)) {
   2189 			printf("SET ILOS\n");
   2190 			sc->sc_ctrl |= CTRL_ILOS;
   2191 		}
   2192 	}
   2193 
   2194 #if 0
   2195 	if (sc->sc_type >= WM_T_82544) {
   2196 		if (cfg1 & NVM_CFG1_IPS0)
   2197 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2198 		if (cfg1 & NVM_CFG1_IPS1)
   2199 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2200 		sc->sc_ctrl_ext |=
   2201 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2202 		    CTRL_EXT_SWDPIO_SHIFT;
   2203 		sc->sc_ctrl_ext |=
   2204 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2205 		    CTRL_EXT_SWDPINS_SHIFT;
   2206 	} else {
   2207 		sc->sc_ctrl_ext |=
   2208 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2209 		    CTRL_EXT_SWDPIO_SHIFT;
   2210 	}
   2211 #endif
   2212 
   2213 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2214 #if 0
   2215 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2216 #endif
   2217 
   2218 	if (sc->sc_type == WM_T_PCH) {
   2219 		uint16_t val;
   2220 
   2221 		/* Save the NVM K1 bit setting */
   2222 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2223 
   2224 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2225 			sc->sc_nvm_k1_enabled = 1;
   2226 		else
   2227 			sc->sc_nvm_k1_enabled = 0;
   2228 	}
   2229 
   2230 	/*
   2231 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
   2232 	 * media structures accordingly.
   2233 	 */
   2234 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2235 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2236 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2237 	    || sc->sc_type == WM_T_82573
   2238 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2239 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
   2240 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2241 	} else if (sc->sc_type < WM_T_82543 ||
   2242 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2243 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2244 			aprint_error_dev(sc->sc_dev,
   2245 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2246 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2247 		}
   2248 		wm_tbi_mediainit(sc);
   2249 	} else {
   2250 		switch (sc->sc_type) {
   2251 		case WM_T_82575:
   2252 		case WM_T_82576:
   2253 		case WM_T_82580:
   2254 		case WM_T_I350:
   2255 		case WM_T_I354:
   2256 		case WM_T_I210:
   2257 		case WM_T_I211:
   2258 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2259 			link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2260 			switch (link_mode) {
   2261 			case CTRL_EXT_LINK_MODE_1000KX:
   2262 				aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2263 				sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2264 				break;
   2265 			case CTRL_EXT_LINK_MODE_SGMII:
   2266 				if (wm_sgmii_uses_mdio(sc)) {
   2267 					aprint_verbose_dev(sc->sc_dev,
   2268 					    "SGMII(MDIO)\n");
   2269 					sc->sc_flags |= WM_F_SGMII;
   2270 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2271 					break;
   2272 				}
   2273 				aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2274 				/*FALLTHROUGH*/
   2275 			case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2276 				sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2277 				if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2278 					if (link_mode
   2279 					    == CTRL_EXT_LINK_MODE_SGMII) {
   2280 						sc->sc_mediatype
   2281 						    = WM_MEDIATYPE_COPPER;
   2282 						sc->sc_flags |= WM_F_SGMII;
   2283 					} else {
   2284 						sc->sc_mediatype
   2285 						    = WM_MEDIATYPE_SERDES;
   2286 						aprint_verbose_dev(sc->sc_dev,
   2287 						    "SERDES\n");
   2288 					}
   2289 					break;
   2290 				}
   2291 				if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2292 					aprint_verbose_dev(sc->sc_dev,
   2293 					    "SERDES\n");
   2294 
   2295 				/* Change current link mode setting */
   2296 				reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2297 				switch (sc->sc_mediatype) {
   2298 				case WM_MEDIATYPE_COPPER:
   2299 					reg |= CTRL_EXT_LINK_MODE_SGMII;
   2300 					break;
   2301 				case WM_MEDIATYPE_SERDES:
   2302 					reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2303 					break;
   2304 				default:
   2305 					break;
   2306 				}
   2307 				CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2308 				break;
   2309 			case CTRL_EXT_LINK_MODE_GMII:
   2310 			default:
   2311 				aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2312 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2313 				break;
   2314 			}
   2315 
   2316 			reg &= ~CTRL_EXT_I2C_ENA;
   2317 			if ((sc->sc_flags & WM_F_SGMII) != 0)
   2318 				reg |= CTRL_EXT_I2C_ENA;
   2319 			else
   2320 				reg &= ~CTRL_EXT_I2C_ENA;
   2321 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2322 
   2323 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2324 				wm_gmii_mediainit(sc, wmp->wmp_product);
   2325 			else
   2326 				wm_tbi_mediainit(sc);
   2327 			break;
   2328 		default:
   2329 			if (sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   2330 				aprint_error_dev(sc->sc_dev,
   2331 				    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2332 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2333 			wm_gmii_mediainit(sc, wmp->wmp_product);
   2334 		}
   2335 	}
   2336 
   2337 	ifp = &sc->sc_ethercom.ec_if;
   2338 	xname = device_xname(sc->sc_dev);
   2339 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2340 	ifp->if_softc = sc;
   2341 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2342 	ifp->if_ioctl = wm_ioctl;
   2343 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   2344 		ifp->if_start = wm_nq_start;
   2345 	else
   2346 		ifp->if_start = wm_start;
   2347 	ifp->if_watchdog = wm_watchdog;
   2348 	ifp->if_init = wm_init;
   2349 	ifp->if_stop = wm_stop;
   2350 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2351 	IFQ_SET_READY(&ifp->if_snd);
   2352 
   2353 	/* Check for jumbo frame */
   2354 	switch (sc->sc_type) {
   2355 	case WM_T_82573:
   2356 		/* XXX limited to 9234 if ASPM is disabled */
   2357 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2358 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2359 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2360 		break;
   2361 	case WM_T_82571:
   2362 	case WM_T_82572:
   2363 	case WM_T_82574:
   2364 	case WM_T_82575:
   2365 	case WM_T_82576:
   2366 	case WM_T_82580:
   2367 	case WM_T_I350:
   2368 	case WM_T_I354: /* XXXX ok? */
   2369 	case WM_T_I210:
   2370 	case WM_T_I211:
   2371 	case WM_T_80003:
   2372 	case WM_T_ICH9:
   2373 	case WM_T_ICH10:
   2374 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2375 	case WM_T_PCH_LPT:
   2376 		/* XXX limited to 9234 */
   2377 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2378 		break;
   2379 	case WM_T_PCH:
   2380 		/* XXX limited to 4096 */
   2381 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2382 		break;
   2383 	case WM_T_82542_2_0:
   2384 	case WM_T_82542_2_1:
   2385 	case WM_T_82583:
   2386 	case WM_T_ICH8:
   2387 		/* No support for jumbo frame */
   2388 		break;
   2389 	default:
   2390 		/* ETHER_MAX_LEN_JUMBO */
   2391 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2392 		break;
   2393 	}
   2394 
   2395 	/* If we're a i82543 or greater, we can support VLANs. */
   2396 	if (sc->sc_type >= WM_T_82543)
   2397 		sc->sc_ethercom.ec_capabilities |=
   2398 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2399 
   2400 	/*
   2401 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2402 	 * on i82543 and later.
   2403 	 */
   2404 	if (sc->sc_type >= WM_T_82543) {
   2405 		ifp->if_capabilities |=
   2406 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2407 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2408 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2409 		    IFCAP_CSUM_TCPv6_Tx |
   2410 		    IFCAP_CSUM_UDPv6_Tx;
   2411 	}
   2412 
   2413 	/*
   2414 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2415 	 *
   2416 	 *	82541GI (8086:1076) ... no
   2417 	 *	82572EI (8086:10b9) ... yes
   2418 	 */
   2419 	if (sc->sc_type >= WM_T_82571) {
   2420 		ifp->if_capabilities |=
   2421 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2422 	}
   2423 
   2424 	/*
   2425 	 * If we're a i82544 or greater (except i82547), we can do
   2426 	 * TCP segmentation offload.
   2427 	 */
   2428 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2429 		ifp->if_capabilities |= IFCAP_TSOv4;
   2430 	}
   2431 
   2432 	if (sc->sc_type >= WM_T_82571) {
   2433 		ifp->if_capabilities |= IFCAP_TSOv6;
   2434 	}
   2435 
   2436 #ifdef WM_MPSAFE
   2437 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2438 #else
   2439 	sc->sc_core_lock = NULL;
   2440 #endif
   2441 
   2442 	/* Attach the interface. */
   2443 	if_attach(ifp);
   2444 	ether_ifattach(ifp, enaddr);
   2445 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2446 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2447 			  RND_FLAG_DEFAULT);
   2448 
   2449 #ifdef WM_EVENT_COUNTERS
   2450 	/* Attach event counters. */
   2451 	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
   2452 	    NULL, xname, "txsstall");
   2453 	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
   2454 	    NULL, xname, "txdstall");
   2455 	evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
   2456 	    NULL, xname, "txfifo_stall");
   2457 	evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
   2458 	    NULL, xname, "txdw");
   2459 	evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
   2460 	    NULL, xname, "txqe");
   2461 	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
   2462 	    NULL, xname, "rxintr");
   2463 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2464 	    NULL, xname, "linkintr");
   2465 
   2466 	evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
   2467 	    NULL, xname, "rxipsum");
   2468 	evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
   2469 	    NULL, xname, "rxtusum");
   2470 	evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
   2471 	    NULL, xname, "txipsum");
   2472 	evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
   2473 	    NULL, xname, "txtusum");
   2474 	evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
   2475 	    NULL, xname, "txtusum6");
   2476 
   2477 	evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
   2478 	    NULL, xname, "txtso");
   2479 	evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
   2480 	    NULL, xname, "txtso6");
   2481 	evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
   2482 	    NULL, xname, "txtsopain");
   2483 
   2484 	for (i = 0; i < WM_NTXSEGS; i++) {
   2485 		snprintf(wm_txseg_evcnt_names[i],
   2486 		    sizeof(wm_txseg_evcnt_names[i]), "txseg%d", i);
   2487 		evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
   2488 		    NULL, xname, wm_txseg_evcnt_names[i]);
   2489 	}
   2490 
   2491 	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
   2492 	    NULL, xname, "txdrop");
   2493 
   2494 	evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
   2495 	    NULL, xname, "tu");
   2496 
   2497 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2498 	    NULL, xname, "tx_xoff");
   2499 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2500 	    NULL, xname, "tx_xon");
   2501 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2502 	    NULL, xname, "rx_xoff");
   2503 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2504 	    NULL, xname, "rx_xon");
   2505 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2506 	    NULL, xname, "rx_macctl");
   2507 #endif /* WM_EVENT_COUNTERS */
   2508 
   2509 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2510 		pmf_class_network_register(self, ifp);
   2511 	else
   2512 		aprint_error_dev(self, "couldn't establish power handler\n");
   2513 
   2514 	sc->sc_flags |= WM_F_ATTACHED;
   2515  out:
   2516 	return;
   2517 }
   2518 
   2519 /* The detach function (ca_detach) */
   2520 static int
   2521 wm_detach(device_t self, int flags __unused)
   2522 {
   2523 	struct wm_softc *sc = device_private(self);
   2524 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2525 	int i;
   2526 #ifndef WM_MPSAFE
   2527 	int s;
   2528 #endif
   2529 
   2530 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2531 		return 0;
   2532 
   2533 #ifndef WM_MPSAFE
   2534 	s = splnet();
   2535 #endif
   2536 	/* Stop the interface. Callouts are stopped in it. */
   2537 	wm_stop(ifp, 1);
   2538 
   2539 #ifndef WM_MPSAFE
   2540 	splx(s);
   2541 #endif
   2542 
   2543 	pmf_device_deregister(self);
   2544 
   2545 	/* Tell the firmware about the release */
   2546 	WM_CORE_LOCK(sc);
   2547 	wm_release_manageability(sc);
   2548 	wm_release_hw_control(sc);
   2549 	WM_CORE_UNLOCK(sc);
   2550 
   2551 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2552 
   2553 	/* Delete all remaining media. */
   2554 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2555 
   2556 	ether_ifdetach(ifp);
   2557 	if_detach(ifp);
   2558 
   2559 
   2560 	/* Unload RX dmamaps and free mbufs */
   2561 	for (i = 0; i < sc->sc_nrxqueues; i++) {
   2562 		struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   2563 		WM_RX_LOCK(rxq);
   2564 		wm_rxdrain(rxq);
   2565 		WM_RX_UNLOCK(rxq);
   2566 	}
   2567 	/* Must unlock here */
   2568 
   2569 	wm_free_txrx_queues(sc);
   2570 
   2571 	/* Disestablish the interrupt handler */
   2572 	for (i = 0; i < sc->sc_nintrs; i++) {
   2573 		if (sc->sc_ihs[i] != NULL) {
   2574 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2575 			sc->sc_ihs[i] = NULL;
   2576 		}
   2577 	}
   2578 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2579 
   2580 	/* Unmap the registers */
   2581 	if (sc->sc_ss) {
   2582 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2583 		sc->sc_ss = 0;
   2584 	}
   2585 	if (sc->sc_ios) {
   2586 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2587 		sc->sc_ios = 0;
   2588 	}
   2589 	if (sc->sc_flashs) {
   2590 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2591 		sc->sc_flashs = 0;
   2592 	}
   2593 
   2594 	if (sc->sc_core_lock)
   2595 		mutex_obj_free(sc->sc_core_lock);
   2596 
   2597 	return 0;
   2598 }
   2599 
   2600 static bool
   2601 wm_suspend(device_t self, const pmf_qual_t *qual)
   2602 {
   2603 	struct wm_softc *sc = device_private(self);
   2604 
   2605 	wm_release_manageability(sc);
   2606 	wm_release_hw_control(sc);
   2607 #ifdef WM_WOL
   2608 	wm_enable_wakeup(sc);
   2609 #endif
   2610 
   2611 	return true;
   2612 }
   2613 
   2614 static bool
   2615 wm_resume(device_t self, const pmf_qual_t *qual)
   2616 {
   2617 	struct wm_softc *sc = device_private(self);
   2618 
   2619 	wm_init_manageability(sc);
   2620 
   2621 	return true;
   2622 }
   2623 
   2624 /*
   2625  * wm_watchdog:		[ifnet interface function]
   2626  *
   2627  *	Watchdog timer handler.
   2628  */
   2629 static void
   2630 wm_watchdog(struct ifnet *ifp)
   2631 {
   2632 	struct wm_softc *sc = ifp->if_softc;
   2633 	struct wm_txqueue *txq = &sc->sc_txq[0];
   2634 
   2635 	/*
   2636 	 * Since we're using delayed interrupts, sweep up
   2637 	 * before we report an error.
   2638 	 */
   2639 	WM_TX_LOCK(txq);
   2640 	wm_txeof(sc);
   2641 	WM_TX_UNLOCK(txq);
   2642 
   2643 	if (txq->txq_free != WM_NTXDESC(txq)) {
   2644 #ifdef WM_DEBUG
   2645 		int i, j;
   2646 		struct wm_txsoft *txs;
   2647 #endif
   2648 		log(LOG_ERR,
   2649 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2650 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   2651 		    txq->txq_next);
   2652 		ifp->if_oerrors++;
   2653 #ifdef WM_DEBUG
   2654 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   2655 		    i = WM_NEXTTXS(txq, i)) {
   2656 		    txs = &txq->txq_soft[i];
   2657 		    printf("txs %d tx %d -> %d\n",
   2658 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2659 		    for (j = txs->txs_firstdesc; ;
   2660 			j = WM_NEXTTX(txq, j)) {
   2661 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2662 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   2663 			printf("\t %#08x%08x\n",
   2664 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   2665 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   2666 			if (j == txs->txs_lastdesc)
   2667 				break;
   2668 			}
   2669 		}
   2670 #endif
   2671 		/* Reset the interface. */
   2672 		(void) wm_init(ifp);
   2673 	}
   2674 
   2675 	/* Try to get more packets going. */
   2676 	ifp->if_start(ifp);
   2677 }
   2678 
   2679 /*
   2680  * wm_tick:
   2681  *
   2682  *	One second timer, used to check link status, sweep up
   2683  *	completed transmit jobs, etc.
   2684  */
   2685 static void
   2686 wm_tick(void *arg)
   2687 {
   2688 	struct wm_softc *sc = arg;
   2689 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2690 #ifndef WM_MPSAFE
   2691 	int s;
   2692 
   2693 	s = splnet();
   2694 #endif
   2695 
   2696 	WM_CORE_LOCK(sc);
   2697 
   2698 	if (sc->sc_stopping)
   2699 		goto out;
   2700 
   2701 	if (sc->sc_type >= WM_T_82542_2_1) {
   2702 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2703 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2704 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2705 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2706 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2707 	}
   2708 
   2709 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2710 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
   2711 	    + CSR_READ(sc, WMREG_CRCERRS)
   2712 	    + CSR_READ(sc, WMREG_ALGNERRC)
   2713 	    + CSR_READ(sc, WMREG_SYMERRC)
   2714 	    + CSR_READ(sc, WMREG_RXERRC)
   2715 	    + CSR_READ(sc, WMREG_SEC)
   2716 	    + CSR_READ(sc, WMREG_CEXTERR)
   2717 	    + CSR_READ(sc, WMREG_RLEC);
   2718 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
   2719 
   2720 	if (sc->sc_flags & WM_F_HAS_MII)
   2721 		mii_tick(&sc->sc_mii);
   2722 	else if ((sc->sc_type >= WM_T_82575)
   2723 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   2724 		wm_serdes_tick(sc);
   2725 	else
   2726 		wm_tbi_tick(sc);
   2727 
   2728 out:
   2729 	WM_CORE_UNLOCK(sc);
   2730 #ifndef WM_MPSAFE
   2731 	splx(s);
   2732 #endif
   2733 
   2734 	if (!sc->sc_stopping)
   2735 		callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2736 }
   2737 
   2738 static int
   2739 wm_ifflags_cb(struct ethercom *ec)
   2740 {
   2741 	struct ifnet *ifp = &ec->ec_if;
   2742 	struct wm_softc *sc = ifp->if_softc;
   2743 	int change = ifp->if_flags ^ sc->sc_if_flags;
   2744 	int rc = 0;
   2745 
   2746 	WM_CORE_LOCK(sc);
   2747 
   2748 	if (change != 0)
   2749 		sc->sc_if_flags = ifp->if_flags;
   2750 
   2751 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
   2752 		rc = ENETRESET;
   2753 		goto out;
   2754 	}
   2755 
   2756 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   2757 		wm_set_filter(sc);
   2758 
   2759 	wm_set_vlan(sc);
   2760 
   2761 out:
   2762 	WM_CORE_UNLOCK(sc);
   2763 
   2764 	return rc;
   2765 }
   2766 
   2767 /*
   2768  * wm_ioctl:		[ifnet interface function]
   2769  *
   2770  *	Handle control requests from the operator.
   2771  */
   2772 static int
   2773 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   2774 {
   2775 	struct wm_softc *sc = ifp->if_softc;
   2776 	struct ifreq *ifr = (struct ifreq *) data;
   2777 	struct ifaddr *ifa = (struct ifaddr *)data;
   2778 	struct sockaddr_dl *sdl;
   2779 	int s, error;
   2780 
   2781 #ifndef WM_MPSAFE
   2782 	s = splnet();
   2783 #endif
   2784 	switch (cmd) {
   2785 	case SIOCSIFMEDIA:
   2786 	case SIOCGIFMEDIA:
   2787 		WM_CORE_LOCK(sc);
   2788 		/* Flow control requires full-duplex mode. */
   2789 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   2790 		    (ifr->ifr_media & IFM_FDX) == 0)
   2791 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   2792 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   2793 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   2794 				/* We can do both TXPAUSE and RXPAUSE. */
   2795 				ifr->ifr_media |=
   2796 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   2797 			}
   2798 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   2799 		}
   2800 		WM_CORE_UNLOCK(sc);
   2801 #ifdef WM_MPSAFE
   2802 		s = splnet();
   2803 #endif
   2804 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   2805 #ifdef WM_MPSAFE
   2806 		splx(s);
   2807 #endif
   2808 		break;
   2809 	case SIOCINITIFADDR:
   2810 		WM_CORE_LOCK(sc);
   2811 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   2812 			sdl = satosdl(ifp->if_dl->ifa_addr);
   2813 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   2814 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   2815 			/* unicast address is first multicast entry */
   2816 			wm_set_filter(sc);
   2817 			error = 0;
   2818 			WM_CORE_UNLOCK(sc);
   2819 			break;
   2820 		}
   2821 		WM_CORE_UNLOCK(sc);
   2822 		/*FALLTHROUGH*/
   2823 	default:
   2824 #ifdef WM_MPSAFE
   2825 		s = splnet();
   2826 #endif
   2827 		/* It may call wm_start, so unlock here */
   2828 		error = ether_ioctl(ifp, cmd, data);
   2829 #ifdef WM_MPSAFE
   2830 		splx(s);
   2831 #endif
   2832 		if (error != ENETRESET)
   2833 			break;
   2834 
   2835 		error = 0;
   2836 
   2837 		if (cmd == SIOCSIFCAP) {
   2838 			error = (*ifp->if_init)(ifp);
   2839 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   2840 			;
   2841 		else if (ifp->if_flags & IFF_RUNNING) {
   2842 			/*
   2843 			 * Multicast list has changed; set the hardware filter
   2844 			 * accordingly.
   2845 			 */
   2846 			WM_CORE_LOCK(sc);
   2847 			wm_set_filter(sc);
   2848 			WM_CORE_UNLOCK(sc);
   2849 		}
   2850 		break;
   2851 	}
   2852 
   2853 #ifndef WM_MPSAFE
   2854 	splx(s);
   2855 #endif
   2856 	return error;
   2857 }
   2858 
   2859 /* MAC address related */
   2860 
   2861 /*
   2862  * Get the offset of MAC address and return it.
   2863  * If error occured, use offset 0.
   2864  */
   2865 static uint16_t
   2866 wm_check_alt_mac_addr(struct wm_softc *sc)
   2867 {
   2868 	uint16_t myea[ETHER_ADDR_LEN / 2];
   2869 	uint16_t offset = NVM_OFF_MACADDR;
   2870 
   2871 	/* Try to read alternative MAC address pointer */
   2872 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   2873 		return 0;
   2874 
   2875 	/* Check pointer if it's valid or not. */
   2876 	if ((offset == 0x0000) || (offset == 0xffff))
   2877 		return 0;
   2878 
   2879 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   2880 	/*
   2881 	 * Check whether alternative MAC address is valid or not.
   2882 	 * Some cards have non 0xffff pointer but those don't use
   2883 	 * alternative MAC address in reality.
   2884 	 *
   2885 	 * Check whether the broadcast bit is set or not.
   2886 	 */
   2887 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   2888 		if (((myea[0] & 0xff) & 0x01) == 0)
   2889 			return offset; /* Found */
   2890 
   2891 	/* Not found */
   2892 	return 0;
   2893 }
   2894 
   2895 static int
   2896 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   2897 {
   2898 	uint16_t myea[ETHER_ADDR_LEN / 2];
   2899 	uint16_t offset = NVM_OFF_MACADDR;
   2900 	int do_invert = 0;
   2901 
   2902 	switch (sc->sc_type) {
   2903 	case WM_T_82580:
   2904 	case WM_T_I350:
   2905 	case WM_T_I354:
   2906 		/* EEPROM Top Level Partitioning */
   2907 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   2908 		break;
   2909 	case WM_T_82571:
   2910 	case WM_T_82575:
   2911 	case WM_T_82576:
   2912 	case WM_T_80003:
   2913 	case WM_T_I210:
   2914 	case WM_T_I211:
   2915 		offset = wm_check_alt_mac_addr(sc);
   2916 		if (offset == 0)
   2917 			if ((sc->sc_funcid & 0x01) == 1)
   2918 				do_invert = 1;
   2919 		break;
   2920 	default:
   2921 		if ((sc->sc_funcid & 0x01) == 1)
   2922 			do_invert = 1;
   2923 		break;
   2924 	}
   2925 
   2926 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]),
   2927 		myea) != 0)
   2928 		goto bad;
   2929 
   2930 	enaddr[0] = myea[0] & 0xff;
   2931 	enaddr[1] = myea[0] >> 8;
   2932 	enaddr[2] = myea[1] & 0xff;
   2933 	enaddr[3] = myea[1] >> 8;
   2934 	enaddr[4] = myea[2] & 0xff;
   2935 	enaddr[5] = myea[2] >> 8;
   2936 
   2937 	/*
   2938 	 * Toggle the LSB of the MAC address on the second port
   2939 	 * of some dual port cards.
   2940 	 */
   2941 	if (do_invert != 0)
   2942 		enaddr[5] ^= 1;
   2943 
   2944 	return 0;
   2945 
   2946  bad:
   2947 	return -1;
   2948 }
   2949 
   2950 /*
   2951  * wm_set_ral:
   2952  *
   2953  *	Set an entery in the receive address list.
   2954  */
   2955 static void
   2956 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   2957 {
   2958 	uint32_t ral_lo, ral_hi;
   2959 
   2960 	if (enaddr != NULL) {
   2961 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   2962 		    (enaddr[3] << 24);
   2963 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   2964 		ral_hi |= RAL_AV;
   2965 	} else {
   2966 		ral_lo = 0;
   2967 		ral_hi = 0;
   2968 	}
   2969 
   2970 	if (sc->sc_type >= WM_T_82544) {
   2971 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
   2972 		    ral_lo);
   2973 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
   2974 		    ral_hi);
   2975 	} else {
   2976 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
   2977 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
   2978 	}
   2979 }
   2980 
   2981 /*
   2982  * wm_mchash:
   2983  *
   2984  *	Compute the hash of the multicast address for the 4096-bit
   2985  *	multicast filter.
   2986  */
   2987 static uint32_t
   2988 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   2989 {
   2990 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   2991 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   2992 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   2993 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   2994 	uint32_t hash;
   2995 
   2996 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   2997 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   2998 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   2999 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3000 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3001 		return (hash & 0x3ff);
   3002 	}
   3003 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3004 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3005 
   3006 	return (hash & 0xfff);
   3007 }
   3008 
   3009 /*
   3010  * wm_set_filter:
   3011  *
   3012  *	Set up the receive filter.
   3013  */
   3014 static void
   3015 wm_set_filter(struct wm_softc *sc)
   3016 {
   3017 	struct ethercom *ec = &sc->sc_ethercom;
   3018 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3019 	struct ether_multi *enm;
   3020 	struct ether_multistep step;
   3021 	bus_addr_t mta_reg;
   3022 	uint32_t hash, reg, bit;
   3023 	int i, size;
   3024 
   3025 	if (sc->sc_type >= WM_T_82544)
   3026 		mta_reg = WMREG_CORDOVA_MTA;
   3027 	else
   3028 		mta_reg = WMREG_MTA;
   3029 
   3030 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3031 
   3032 	if (ifp->if_flags & IFF_BROADCAST)
   3033 		sc->sc_rctl |= RCTL_BAM;
   3034 	if (ifp->if_flags & IFF_PROMISC) {
   3035 		sc->sc_rctl |= RCTL_UPE;
   3036 		goto allmulti;
   3037 	}
   3038 
   3039 	/*
   3040 	 * Set the station address in the first RAL slot, and
   3041 	 * clear the remaining slots.
   3042 	 */
   3043 	if (sc->sc_type == WM_T_ICH8)
   3044 		size = WM_RAL_TABSIZE_ICH8 -1;
   3045 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3046 	    || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   3047 	    || (sc->sc_type == WM_T_PCH_LPT))
   3048 		size = WM_RAL_TABSIZE_ICH8;
   3049 	else if (sc->sc_type == WM_T_82575)
   3050 		size = WM_RAL_TABSIZE_82575;
   3051 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3052 		size = WM_RAL_TABSIZE_82576;
   3053 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3054 		size = WM_RAL_TABSIZE_I350;
   3055 	else
   3056 		size = WM_RAL_TABSIZE;
   3057 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3058 	for (i = 1; i < size; i++)
   3059 		wm_set_ral(sc, NULL, i);
   3060 
   3061 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3062 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3063 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   3064 		size = WM_ICH8_MC_TABSIZE;
   3065 	else
   3066 		size = WM_MC_TABSIZE;
   3067 	/* Clear out the multicast table. */
   3068 	for (i = 0; i < size; i++)
   3069 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3070 
   3071 	ETHER_FIRST_MULTI(step, ec, enm);
   3072 	while (enm != NULL) {
   3073 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3074 			/*
   3075 			 * We must listen to a range of multicast addresses.
   3076 			 * For now, just accept all multicasts, rather than
   3077 			 * trying to set only those filter bits needed to match
   3078 			 * the range.  (At this time, the only use of address
   3079 			 * ranges is for IP multicast routing, for which the
   3080 			 * range is big enough to require all bits set.)
   3081 			 */
   3082 			goto allmulti;
   3083 		}
   3084 
   3085 		hash = wm_mchash(sc, enm->enm_addrlo);
   3086 
   3087 		reg = (hash >> 5);
   3088 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3089 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3090 		    || (sc->sc_type == WM_T_PCH2)
   3091 		    || (sc->sc_type == WM_T_PCH_LPT))
   3092 			reg &= 0x1f;
   3093 		else
   3094 			reg &= 0x7f;
   3095 		bit = hash & 0x1f;
   3096 
   3097 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3098 		hash |= 1U << bit;
   3099 
   3100 		/* XXX Hardware bug?? */
   3101 		if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
   3102 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3103 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3104 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3105 		} else
   3106 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3107 
   3108 		ETHER_NEXT_MULTI(step, enm);
   3109 	}
   3110 
   3111 	ifp->if_flags &= ~IFF_ALLMULTI;
   3112 	goto setit;
   3113 
   3114  allmulti:
   3115 	ifp->if_flags |= IFF_ALLMULTI;
   3116 	sc->sc_rctl |= RCTL_MPE;
   3117 
   3118  setit:
   3119 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3120 }
   3121 
   3122 /* Reset and init related */
   3123 
   3124 static void
   3125 wm_set_vlan(struct wm_softc *sc)
   3126 {
   3127 	/* Deal with VLAN enables. */
   3128 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3129 		sc->sc_ctrl |= CTRL_VME;
   3130 	else
   3131 		sc->sc_ctrl &= ~CTRL_VME;
   3132 
   3133 	/* Write the control registers. */
   3134 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3135 }
   3136 
   3137 static void
   3138 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3139 {
   3140 	uint32_t gcr;
   3141 	pcireg_t ctrl2;
   3142 
   3143 	gcr = CSR_READ(sc, WMREG_GCR);
   3144 
   3145 	/* Only take action if timeout value is defaulted to 0 */
   3146 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3147 		goto out;
   3148 
   3149 	if ((gcr & GCR_CAP_VER2) == 0) {
   3150 		gcr |= GCR_CMPL_TMOUT_10MS;
   3151 		goto out;
   3152 	}
   3153 
   3154 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3155 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3156 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3157 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3158 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3159 
   3160 out:
   3161 	/* Disable completion timeout resend */
   3162 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3163 
   3164 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3165 }
   3166 
   3167 void
   3168 wm_get_auto_rd_done(struct wm_softc *sc)
   3169 {
   3170 	int i;
   3171 
   3172 	/* wait for eeprom to reload */
   3173 	switch (sc->sc_type) {
   3174 	case WM_T_82571:
   3175 	case WM_T_82572:
   3176 	case WM_T_82573:
   3177 	case WM_T_82574:
   3178 	case WM_T_82583:
   3179 	case WM_T_82575:
   3180 	case WM_T_82576:
   3181 	case WM_T_82580:
   3182 	case WM_T_I350:
   3183 	case WM_T_I354:
   3184 	case WM_T_I210:
   3185 	case WM_T_I211:
   3186 	case WM_T_80003:
   3187 	case WM_T_ICH8:
   3188 	case WM_T_ICH9:
   3189 		for (i = 0; i < 10; i++) {
   3190 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3191 				break;
   3192 			delay(1000);
   3193 		}
   3194 		if (i == 10) {
   3195 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3196 			    "complete\n", device_xname(sc->sc_dev));
   3197 		}
   3198 		break;
   3199 	default:
   3200 		break;
   3201 	}
   3202 }
   3203 
   3204 void
   3205 wm_lan_init_done(struct wm_softc *sc)
   3206 {
   3207 	uint32_t reg = 0;
   3208 	int i;
   3209 
   3210 	/* wait for eeprom to reload */
   3211 	switch (sc->sc_type) {
   3212 	case WM_T_ICH10:
   3213 	case WM_T_PCH:
   3214 	case WM_T_PCH2:
   3215 	case WM_T_PCH_LPT:
   3216 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3217 			reg = CSR_READ(sc, WMREG_STATUS);
   3218 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3219 				break;
   3220 			delay(100);
   3221 		}
   3222 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3223 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3224 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3225 		}
   3226 		break;
   3227 	default:
   3228 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3229 		    __func__);
   3230 		break;
   3231 	}
   3232 
   3233 	reg &= ~STATUS_LAN_INIT_DONE;
   3234 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3235 }
   3236 
   3237 void
   3238 wm_get_cfg_done(struct wm_softc *sc)
   3239 {
   3240 	int mask;
   3241 	uint32_t reg;
   3242 	int i;
   3243 
   3244 	/* wait for eeprom to reload */
   3245 	switch (sc->sc_type) {
   3246 	case WM_T_82542_2_0:
   3247 	case WM_T_82542_2_1:
   3248 		/* null */
   3249 		break;
   3250 	case WM_T_82543:
   3251 	case WM_T_82544:
   3252 	case WM_T_82540:
   3253 	case WM_T_82545:
   3254 	case WM_T_82545_3:
   3255 	case WM_T_82546:
   3256 	case WM_T_82546_3:
   3257 	case WM_T_82541:
   3258 	case WM_T_82541_2:
   3259 	case WM_T_82547:
   3260 	case WM_T_82547_2:
   3261 	case WM_T_82573:
   3262 	case WM_T_82574:
   3263 	case WM_T_82583:
   3264 		/* generic */
   3265 		delay(10*1000);
   3266 		break;
   3267 	case WM_T_80003:
   3268 	case WM_T_82571:
   3269 	case WM_T_82572:
   3270 	case WM_T_82575:
   3271 	case WM_T_82576:
   3272 	case WM_T_82580:
   3273 	case WM_T_I350:
   3274 	case WM_T_I354:
   3275 	case WM_T_I210:
   3276 	case WM_T_I211:
   3277 		if (sc->sc_type == WM_T_82571) {
   3278 			/* Only 82571 shares port 0 */
   3279 			mask = EEMNGCTL_CFGDONE_0;
   3280 		} else
   3281 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3282 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3283 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3284 				break;
   3285 			delay(1000);
   3286 		}
   3287 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3288 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3289 				device_xname(sc->sc_dev), __func__));
   3290 		}
   3291 		break;
   3292 	case WM_T_ICH8:
   3293 	case WM_T_ICH9:
   3294 	case WM_T_ICH10:
   3295 	case WM_T_PCH:
   3296 	case WM_T_PCH2:
   3297 	case WM_T_PCH_LPT:
   3298 		delay(10*1000);
   3299 		if (sc->sc_type >= WM_T_ICH10)
   3300 			wm_lan_init_done(sc);
   3301 		else
   3302 			wm_get_auto_rd_done(sc);
   3303 
   3304 		reg = CSR_READ(sc, WMREG_STATUS);
   3305 		if ((reg & STATUS_PHYRA) != 0)
   3306 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3307 		break;
   3308 	default:
   3309 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3310 		    __func__);
   3311 		break;
   3312 	}
   3313 }
   3314 
   3315 /* Init hardware bits */
   3316 void
   3317 wm_initialize_hardware_bits(struct wm_softc *sc)
   3318 {
   3319 	uint32_t tarc0, tarc1, reg;
   3320 
   3321 	/* For 82571 variant, 80003 and ICHs */
   3322 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3323 	    || (sc->sc_type >= WM_T_80003)) {
   3324 
   3325 		/* Transmit Descriptor Control 0 */
   3326 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3327 		reg |= TXDCTL_COUNT_DESC;
   3328 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3329 
   3330 		/* Transmit Descriptor Control 1 */
   3331 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3332 		reg |= TXDCTL_COUNT_DESC;
   3333 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3334 
   3335 		/* TARC0 */
   3336 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3337 		switch (sc->sc_type) {
   3338 		case WM_T_82571:
   3339 		case WM_T_82572:
   3340 		case WM_T_82573:
   3341 		case WM_T_82574:
   3342 		case WM_T_82583:
   3343 		case WM_T_80003:
   3344 			/* Clear bits 30..27 */
   3345 			tarc0 &= ~__BITS(30, 27);
   3346 			break;
   3347 		default:
   3348 			break;
   3349 		}
   3350 
   3351 		switch (sc->sc_type) {
   3352 		case WM_T_82571:
   3353 		case WM_T_82572:
   3354 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3355 
   3356 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3357 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3358 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3359 			/* 8257[12] Errata No.7 */
   3360 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3361 
   3362 			/* TARC1 bit 28 */
   3363 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3364 				tarc1 &= ~__BIT(28);
   3365 			else
   3366 				tarc1 |= __BIT(28);
   3367 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3368 
   3369 			/*
   3370 			 * 8257[12] Errata No.13
   3371 			 * Disable Dyamic Clock Gating.
   3372 			 */
   3373 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3374 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3375 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3376 			break;
   3377 		case WM_T_82573:
   3378 		case WM_T_82574:
   3379 		case WM_T_82583:
   3380 			if ((sc->sc_type == WM_T_82574)
   3381 			    || (sc->sc_type == WM_T_82583))
   3382 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3383 
   3384 			/* Extended Device Control */
   3385 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3386 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3387 			reg |= __BIT(22);	/* Set bit 22 */
   3388 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3389 
   3390 			/* Device Control */
   3391 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   3392 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3393 
   3394 			/* PCIe Control Register */
   3395 			/*
   3396 			 * 82573 Errata (unknown).
   3397 			 *
   3398 			 * 82574 Errata 25 and 82583 Errata 12
   3399 			 * "Dropped Rx Packets":
   3400 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   3401 			 */
   3402 			reg = CSR_READ(sc, WMREG_GCR);
   3403 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   3404 			CSR_WRITE(sc, WMREG_GCR, reg);
   3405 
   3406 			if ((sc->sc_type == WM_T_82574)
   3407 			    || (sc->sc_type == WM_T_82583)) {
   3408 				/*
   3409 				 * Document says this bit must be set for
   3410 				 * proper operation.
   3411 				 */
   3412 				reg = CSR_READ(sc, WMREG_GCR);
   3413 				reg |= __BIT(22);
   3414 				CSR_WRITE(sc, WMREG_GCR, reg);
   3415 
   3416 				/*
   3417 				 * Apply workaround for hardware errata
   3418 				 * documented in errata docs Fixes issue where
   3419 				 * some error prone or unreliable PCIe
   3420 				 * completions are occurring, particularly
   3421 				 * with ASPM enabled. Without fix, issue can
   3422 				 * cause Tx timeouts.
   3423 				 */
   3424 				reg = CSR_READ(sc, WMREG_GCR2);
   3425 				reg |= __BIT(0);
   3426 				CSR_WRITE(sc, WMREG_GCR2, reg);
   3427 			}
   3428 			break;
   3429 		case WM_T_80003:
   3430 			/* TARC0 */
   3431 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   3432 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3433 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   3434 
   3435 			/* TARC1 bit 28 */
   3436 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3437 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3438 				tarc1 &= ~__BIT(28);
   3439 			else
   3440 				tarc1 |= __BIT(28);
   3441 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3442 			break;
   3443 		case WM_T_ICH8:
   3444 		case WM_T_ICH9:
   3445 		case WM_T_ICH10:
   3446 		case WM_T_PCH:
   3447 		case WM_T_PCH2:
   3448 		case WM_T_PCH_LPT:
   3449 			/* TARC 0 */
   3450 			if (sc->sc_type == WM_T_ICH8) {
   3451 				/* Set TARC0 bits 29 and 28 */
   3452 				tarc0 |= __BITS(29, 28);
   3453 			}
   3454 			/* Set TARC0 bits 23,24,26,27 */
   3455 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   3456 
   3457 			/* CTRL_EXT */
   3458 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3459 			reg |= __BIT(22);	/* Set bit 22 */
   3460 			/*
   3461 			 * Enable PHY low-power state when MAC is at D3
   3462 			 * w/o WoL
   3463 			 */
   3464 			if (sc->sc_type >= WM_T_PCH)
   3465 				reg |= CTRL_EXT_PHYPDEN;
   3466 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3467 
   3468 			/* TARC1 */
   3469 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3470 			/* bit 28 */
   3471 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3472 				tarc1 &= ~__BIT(28);
   3473 			else
   3474 				tarc1 |= __BIT(28);
   3475 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   3476 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3477 
   3478 			/* Device Status */
   3479 			if (sc->sc_type == WM_T_ICH8) {
   3480 				reg = CSR_READ(sc, WMREG_STATUS);
   3481 				reg &= ~__BIT(31);
   3482 				CSR_WRITE(sc, WMREG_STATUS, reg);
   3483 
   3484 			}
   3485 
   3486 			/*
   3487 			 * Work-around descriptor data corruption issue during
   3488 			 * NFS v2 UDP traffic, just disable the NFS filtering
   3489 			 * capability.
   3490 			 */
   3491 			reg = CSR_READ(sc, WMREG_RFCTL);
   3492 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   3493 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3494 			break;
   3495 		default:
   3496 			break;
   3497 		}
   3498 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   3499 
   3500 		/*
   3501 		 * 8257[12] Errata No.52 and some others.
   3502 		 * Avoid RSS Hash Value bug.
   3503 		 */
   3504 		switch (sc->sc_type) {
   3505 		case WM_T_82571:
   3506 		case WM_T_82572:
   3507 		case WM_T_82573:
   3508 		case WM_T_80003:
   3509 		case WM_T_ICH8:
   3510 			reg = CSR_READ(sc, WMREG_RFCTL);
   3511 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   3512 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3513 			break;
   3514 		default:
   3515 			break;
   3516 		}
   3517 	}
   3518 }
   3519 
   3520 static uint32_t
   3521 wm_rxpbs_adjust_82580(uint32_t val)
   3522 {
   3523 	uint32_t rv = 0;
   3524 
   3525 	if (val < __arraycount(wm_82580_rxpbs_table))
   3526 		rv = wm_82580_rxpbs_table[val];
   3527 
   3528 	return rv;
   3529 }
   3530 
   3531 /*
   3532  * wm_reset:
   3533  *
   3534  *	Reset the i82542 chip.
   3535  */
   3536 static void
   3537 wm_reset(struct wm_softc *sc)
   3538 {
   3539 	int phy_reset = 0;
   3540 	int i, error = 0;
   3541 	uint32_t reg, mask;
   3542 
   3543 	/*
   3544 	 * Allocate on-chip memory according to the MTU size.
   3545 	 * The Packet Buffer Allocation register must be written
   3546 	 * before the chip is reset.
   3547 	 */
   3548 	switch (sc->sc_type) {
   3549 	case WM_T_82547:
   3550 	case WM_T_82547_2:
   3551 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3552 		    PBA_22K : PBA_30K;
   3553 		for (i = 0; i < sc->sc_ntxqueues; i++) {
   3554 			struct wm_txqueue *txq = &sc->sc_txq[i];
   3555 			txq->txq_fifo_head = 0;
   3556 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   3557 			txq->txq_fifo_size =
   3558 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   3559 			txq->txq_fifo_stall = 0;
   3560 		}
   3561 		break;
   3562 	case WM_T_82571:
   3563 	case WM_T_82572:
   3564 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   3565 	case WM_T_80003:
   3566 		sc->sc_pba = PBA_32K;
   3567 		break;
   3568 	case WM_T_82573:
   3569 		sc->sc_pba = PBA_12K;
   3570 		break;
   3571 	case WM_T_82574:
   3572 	case WM_T_82583:
   3573 		sc->sc_pba = PBA_20K;
   3574 		break;
   3575 	case WM_T_82576:
   3576 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   3577 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   3578 		break;
   3579 	case WM_T_82580:
   3580 	case WM_T_I350:
   3581 	case WM_T_I354:
   3582 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   3583 		break;
   3584 	case WM_T_I210:
   3585 	case WM_T_I211:
   3586 		sc->sc_pba = PBA_34K;
   3587 		break;
   3588 	case WM_T_ICH8:
   3589 		/* Workaround for a bit corruption issue in FIFO memory */
   3590 		sc->sc_pba = PBA_8K;
   3591 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   3592 		break;
   3593 	case WM_T_ICH9:
   3594 	case WM_T_ICH10:
   3595 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   3596 		    PBA_14K : PBA_10K;
   3597 		break;
   3598 	case WM_T_PCH:
   3599 	case WM_T_PCH2:
   3600 	case WM_T_PCH_LPT:
   3601 		sc->sc_pba = PBA_26K;
   3602 		break;
   3603 	default:
   3604 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3605 		    PBA_40K : PBA_48K;
   3606 		break;
   3607 	}
   3608 	/*
   3609 	 * Only old or non-multiqueue devices have the PBA register
   3610 	 * XXX Need special handling for 82575.
   3611 	 */
   3612 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   3613 	    || (sc->sc_type == WM_T_82575))
   3614 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   3615 
   3616 	/* Prevent the PCI-E bus from sticking */
   3617 	if (sc->sc_flags & WM_F_PCIE) {
   3618 		int timeout = 800;
   3619 
   3620 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   3621 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3622 
   3623 		while (timeout--) {
   3624 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   3625 			    == 0)
   3626 				break;
   3627 			delay(100);
   3628 		}
   3629 	}
   3630 
   3631 	/* Set the completion timeout for interface */
   3632 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   3633 	    || (sc->sc_type == WM_T_82580)
   3634 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   3635 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   3636 		wm_set_pcie_completion_timeout(sc);
   3637 
   3638 	/* Clear interrupt */
   3639 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   3640 	if (sc->sc_nintrs > 1) {
   3641 		if (sc->sc_type != WM_T_82574) {
   3642 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   3643 			CSR_WRITE(sc, WMREG_EIAC, 0);
   3644 		} else {
   3645 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   3646 		}
   3647 	}
   3648 
   3649 	/* Stop the transmit and receive processes. */
   3650 	CSR_WRITE(sc, WMREG_RCTL, 0);
   3651 	sc->sc_rctl &= ~RCTL_EN;
   3652 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   3653 	CSR_WRITE_FLUSH(sc);
   3654 
   3655 	/* XXX set_tbi_sbp_82543() */
   3656 
   3657 	delay(10*1000);
   3658 
   3659 	/* Must acquire the MDIO ownership before MAC reset */
   3660 	switch (sc->sc_type) {
   3661 	case WM_T_82573:
   3662 	case WM_T_82574:
   3663 	case WM_T_82583:
   3664 		error = wm_get_hw_semaphore_82573(sc);
   3665 		break;
   3666 	default:
   3667 		break;
   3668 	}
   3669 
   3670 	/*
   3671 	 * 82541 Errata 29? & 82547 Errata 28?
   3672 	 * See also the description about PHY_RST bit in CTRL register
   3673 	 * in 8254x_GBe_SDM.pdf.
   3674 	 */
   3675 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   3676 		CSR_WRITE(sc, WMREG_CTRL,
   3677 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   3678 		CSR_WRITE_FLUSH(sc);
   3679 		delay(5000);
   3680 	}
   3681 
   3682 	switch (sc->sc_type) {
   3683 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   3684 	case WM_T_82541:
   3685 	case WM_T_82541_2:
   3686 	case WM_T_82547:
   3687 	case WM_T_82547_2:
   3688 		/*
   3689 		 * On some chipsets, a reset through a memory-mapped write
   3690 		 * cycle can cause the chip to reset before completing the
   3691 		 * write cycle.  This causes major headache that can be
   3692 		 * avoided by issuing the reset via indirect register writes
   3693 		 * through I/O space.
   3694 		 *
   3695 		 * So, if we successfully mapped the I/O BAR at attach time,
   3696 		 * use that.  Otherwise, try our luck with a memory-mapped
   3697 		 * reset.
   3698 		 */
   3699 		if (sc->sc_flags & WM_F_IOH_VALID)
   3700 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   3701 		else
   3702 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   3703 		break;
   3704 	case WM_T_82545_3:
   3705 	case WM_T_82546_3:
   3706 		/* Use the shadow control register on these chips. */
   3707 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   3708 		break;
   3709 	case WM_T_80003:
   3710 		mask = swfwphysem[sc->sc_funcid];
   3711 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   3712 		wm_get_swfw_semaphore(sc, mask);
   3713 		CSR_WRITE(sc, WMREG_CTRL, reg);
   3714 		wm_put_swfw_semaphore(sc, mask);
   3715 		break;
   3716 	case WM_T_ICH8:
   3717 	case WM_T_ICH9:
   3718 	case WM_T_ICH10:
   3719 	case WM_T_PCH:
   3720 	case WM_T_PCH2:
   3721 	case WM_T_PCH_LPT:
   3722 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   3723 		if (wm_check_reset_block(sc) == 0) {
   3724 			/*
   3725 			 * Gate automatic PHY configuration by hardware on
   3726 			 * non-managed 82579
   3727 			 */
   3728 			if ((sc->sc_type == WM_T_PCH2)
   3729 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   3730 				!= 0))
   3731 				wm_gate_hw_phy_config_ich8lan(sc, 1);
   3732 
   3733 
   3734 			reg |= CTRL_PHY_RESET;
   3735 			phy_reset = 1;
   3736 		}
   3737 		wm_get_swfwhw_semaphore(sc);
   3738 		CSR_WRITE(sc, WMREG_CTRL, reg);
   3739 		/* Don't insert a completion barrier when reset */
   3740 		delay(20*1000);
   3741 		wm_put_swfwhw_semaphore(sc);
   3742 		break;
   3743 	case WM_T_82580:
   3744 	case WM_T_I350:
   3745 	case WM_T_I354:
   3746 	case WM_T_I210:
   3747 	case WM_T_I211:
   3748 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   3749 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   3750 			CSR_WRITE_FLUSH(sc);
   3751 		delay(5000);
   3752 		break;
   3753 	case WM_T_82542_2_0:
   3754 	case WM_T_82542_2_1:
   3755 	case WM_T_82543:
   3756 	case WM_T_82540:
   3757 	case WM_T_82545:
   3758 	case WM_T_82546:
   3759 	case WM_T_82571:
   3760 	case WM_T_82572:
   3761 	case WM_T_82573:
   3762 	case WM_T_82574:
   3763 	case WM_T_82575:
   3764 	case WM_T_82576:
   3765 	case WM_T_82583:
   3766 	default:
   3767 		/* Everything else can safely use the documented method. */
   3768 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   3769 		break;
   3770 	}
   3771 
   3772 	/* Must release the MDIO ownership after MAC reset */
   3773 	switch (sc->sc_type) {
   3774 	case WM_T_82573:
   3775 	case WM_T_82574:
   3776 	case WM_T_82583:
   3777 		if (error == 0)
   3778 			wm_put_hw_semaphore_82573(sc);
   3779 		break;
   3780 	default:
   3781 		break;
   3782 	}
   3783 
   3784 	if (phy_reset != 0)
   3785 		wm_get_cfg_done(sc);
   3786 
   3787 	/* reload EEPROM */
   3788 	switch (sc->sc_type) {
   3789 	case WM_T_82542_2_0:
   3790 	case WM_T_82542_2_1:
   3791 	case WM_T_82543:
   3792 	case WM_T_82544:
   3793 		delay(10);
   3794 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   3795 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3796 		CSR_WRITE_FLUSH(sc);
   3797 		delay(2000);
   3798 		break;
   3799 	case WM_T_82540:
   3800 	case WM_T_82545:
   3801 	case WM_T_82545_3:
   3802 	case WM_T_82546:
   3803 	case WM_T_82546_3:
   3804 		delay(5*1000);
   3805 		/* XXX Disable HW ARPs on ASF enabled adapters */
   3806 		break;
   3807 	case WM_T_82541:
   3808 	case WM_T_82541_2:
   3809 	case WM_T_82547:
   3810 	case WM_T_82547_2:
   3811 		delay(20000);
   3812 		/* XXX Disable HW ARPs on ASF enabled adapters */
   3813 		break;
   3814 	case WM_T_82571:
   3815 	case WM_T_82572:
   3816 	case WM_T_82573:
   3817 	case WM_T_82574:
   3818 	case WM_T_82583:
   3819 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   3820 			delay(10);
   3821 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   3822 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3823 			CSR_WRITE_FLUSH(sc);
   3824 		}
   3825 		/* check EECD_EE_AUTORD */
   3826 		wm_get_auto_rd_done(sc);
   3827 		/*
   3828 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   3829 		 * is set.
   3830 		 */
   3831 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   3832 		    || (sc->sc_type == WM_T_82583))
   3833 			delay(25*1000);
   3834 		break;
   3835 	case WM_T_82575:
   3836 	case WM_T_82576:
   3837 	case WM_T_82580:
   3838 	case WM_T_I350:
   3839 	case WM_T_I354:
   3840 	case WM_T_I210:
   3841 	case WM_T_I211:
   3842 	case WM_T_80003:
   3843 		/* check EECD_EE_AUTORD */
   3844 		wm_get_auto_rd_done(sc);
   3845 		break;
   3846 	case WM_T_ICH8:
   3847 	case WM_T_ICH9:
   3848 	case WM_T_ICH10:
   3849 	case WM_T_PCH:
   3850 	case WM_T_PCH2:
   3851 	case WM_T_PCH_LPT:
   3852 		break;
   3853 	default:
   3854 		panic("%s: unknown type\n", __func__);
   3855 	}
   3856 
   3857 	/* Check whether EEPROM is present or not */
   3858 	switch (sc->sc_type) {
   3859 	case WM_T_82575:
   3860 	case WM_T_82576:
   3861 	case WM_T_82580:
   3862 	case WM_T_I350:
   3863 	case WM_T_I354:
   3864 	case WM_T_ICH8:
   3865 	case WM_T_ICH9:
   3866 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   3867 			/* Not found */
   3868 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   3869 			if (sc->sc_type == WM_T_82575)
   3870 				wm_reset_init_script_82575(sc);
   3871 		}
   3872 		break;
   3873 	default:
   3874 		break;
   3875 	}
   3876 
   3877 	if ((sc->sc_type == WM_T_82580)
   3878 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   3879 		/* clear global device reset status bit */
   3880 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   3881 	}
   3882 
   3883 	/* Clear any pending interrupt events. */
   3884 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   3885 	reg = CSR_READ(sc, WMREG_ICR);
   3886 	if (sc->sc_nintrs > 1) {
   3887 		if (sc->sc_type != WM_T_82574) {
   3888 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   3889 			CSR_WRITE(sc, WMREG_EIAC, 0);
   3890 		} else
   3891 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   3892 	}
   3893 
   3894 	/* reload sc_ctrl */
   3895 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   3896 
   3897 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   3898 		wm_set_eee_i350(sc);
   3899 
   3900 	/* dummy read from WUC */
   3901 	if (sc->sc_type == WM_T_PCH)
   3902 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
   3903 	/*
   3904 	 * For PCH, this write will make sure that any noise will be detected
   3905 	 * as a CRC error and be dropped rather than show up as a bad packet
   3906 	 * to the DMA engine
   3907 	 */
   3908 	if (sc->sc_type == WM_T_PCH)
   3909 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   3910 
   3911 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   3912 		CSR_WRITE(sc, WMREG_WUC, 0);
   3913 
   3914 	wm_reset_mdicnfg_82580(sc);
   3915 
   3916 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   3917 		wm_pll_workaround_i210(sc);
   3918 }
   3919 
   3920 /*
   3921  * wm_add_rxbuf:
   3922  *
   3923  *	Add a receive buffer to the indiciated descriptor.
   3924  */
   3925 static int
   3926 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   3927 {
   3928 	struct wm_softc *sc = rxq->rxq_sc;
   3929 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   3930 	struct mbuf *m;
   3931 	int error;
   3932 
   3933 	KASSERT(WM_RX_LOCKED(rxq));
   3934 
   3935 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   3936 	if (m == NULL)
   3937 		return ENOBUFS;
   3938 
   3939 	MCLGET(m, M_DONTWAIT);
   3940 	if ((m->m_flags & M_EXT) == 0) {
   3941 		m_freem(m);
   3942 		return ENOBUFS;
   3943 	}
   3944 
   3945 	if (rxs->rxs_mbuf != NULL)
   3946 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   3947 
   3948 	rxs->rxs_mbuf = m;
   3949 
   3950 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   3951 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   3952 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
   3953 	if (error) {
   3954 		/* XXX XXX XXX */
   3955 		aprint_error_dev(sc->sc_dev,
   3956 		    "unable to load rx DMA map %d, error = %d\n",
   3957 		    idx, error);
   3958 		panic("wm_add_rxbuf");
   3959 	}
   3960 
   3961 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   3962 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   3963 
   3964 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3965 		if ((sc->sc_rctl & RCTL_EN) != 0)
   3966 			wm_init_rxdesc(rxq, idx);
   3967 	} else
   3968 		wm_init_rxdesc(rxq, idx);
   3969 
   3970 	return 0;
   3971 }
   3972 
   3973 /*
   3974  * wm_rxdrain:
   3975  *
   3976  *	Drain the receive queue.
   3977  */
   3978 static void
   3979 wm_rxdrain(struct wm_rxqueue *rxq)
   3980 {
   3981 	struct wm_softc *sc = rxq->rxq_sc;
   3982 	struct wm_rxsoft *rxs;
   3983 	int i;
   3984 
   3985 	KASSERT(WM_RX_LOCKED(rxq));
   3986 
   3987 	for (i = 0; i < WM_NRXDESC; i++) {
   3988 		rxs = &rxq->rxq_soft[i];
   3989 		if (rxs->rxs_mbuf != NULL) {
   3990 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   3991 			m_freem(rxs->rxs_mbuf);
   3992 			rxs->rxs_mbuf = NULL;
   3993 		}
   3994 	}
   3995 }
   3996 
   3997 
   3998 /*
   3999  * XXX copy from FreeBSD's sys/net/rss_config.c
   4000  */
   4001 /*
   4002  * RSS secret key, intended to prevent attacks on load-balancing.  Its
   4003  * effectiveness may be limited by algorithm choice and available entropy
   4004  * during the boot.
   4005  *
   4006  * XXXRW: And that we don't randomize it yet!
   4007  *
   4008  * This is the default Microsoft RSS specification key which is also
   4009  * the Chelsio T5 firmware default key.
   4010  */
   4011 #define RSS_KEYSIZE 40
   4012 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
   4013 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
   4014 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
   4015 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
   4016 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
   4017 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
   4018 };
   4019 
   4020 /*
   4021  * Caller must pass an array of size sizeof(rss_key).
   4022  *
   4023  * XXX
   4024  * As if_ixgbe may use this function, this function should not be
   4025  * if_wm specific function.
   4026  */
   4027 static void
   4028 wm_rss_getkey(uint8_t *key)
   4029 {
   4030 
   4031 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
   4032 }
   4033 
   4034 /*
   4035  * Setup registers for RSS.
   4036  *
   4037  * XXX not yet VMDq support
   4038  */
   4039 static void
   4040 wm_init_rss(struct wm_softc *sc)
   4041 {
   4042 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4043 	int i;
   4044 
   4045 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
   4046 
   4047 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4048 		int qid, reta_ent;
   4049 
   4050 		qid  = i % sc->sc_nrxqueues;
   4051 		switch(sc->sc_type) {
   4052 		case WM_T_82574:
   4053 			reta_ent = __SHIFTIN(qid,
   4054 			    RETA_ENT_QINDEX_MASK_82574);
   4055 			break;
   4056 		case WM_T_82575:
   4057 			reta_ent = __SHIFTIN(qid,
   4058 			    RETA_ENT_QINDEX1_MASK_82575);
   4059 			break;
   4060 		default:
   4061 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4062 			break;
   4063 		}
   4064 
   4065 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4066 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4067 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4068 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4069 	}
   4070 
   4071 	wm_rss_getkey((uint8_t *)rss_key);
   4072 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4073 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4074 
   4075 	if (sc->sc_type == WM_T_82574)
   4076 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4077 	else
   4078 		mrqc = MRQC_ENABLE_RSS_MQ;
   4079 
   4080 	/* XXXX
   4081 	 * The same as FreeBSD igb.
   4082 	 * Why doesn't use MRQC_RSS_FIELD_IPV6_EX?
   4083 	 */
   4084 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4085 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4086 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4087 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4088 
   4089 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4090 }
   4091 
   4092 /*
   4093  * Adjust TX and RX queue numbers which the system actulally uses.
   4094  *
   4095  * The numbers are affected by below parameters.
   4096  *     - The nubmer of hardware queues
   4097  *     - The number of MSI-X vectors (= "nvectors" argument)
   4098  *     - ncpu
   4099  */
   4100 static void
   4101 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4102 {
   4103 	int hw_ntxqueues, hw_nrxqueues;
   4104 
   4105 	if (nvectors < 3) {
   4106 		sc->sc_ntxqueues = 1;
   4107 		sc->sc_nrxqueues = 1;
   4108 		return;
   4109 	}
   4110 
   4111 	switch(sc->sc_type) {
   4112 	case WM_T_82572:
   4113 		hw_ntxqueues = 2;
   4114 		hw_nrxqueues = 2;
   4115 		break;
   4116 	case WM_T_82574:
   4117 		hw_ntxqueues = 2;
   4118 		hw_nrxqueues = 2;
   4119 		break;
   4120 	case WM_T_82575:
   4121 		hw_ntxqueues = 4;
   4122 		hw_nrxqueues = 4;
   4123 		break;
   4124 	case WM_T_82576:
   4125 		hw_ntxqueues = 16;
   4126 		hw_nrxqueues = 16;
   4127 		break;
   4128 	case WM_T_82580:
   4129 	case WM_T_I350:
   4130 	case WM_T_I354:
   4131 		hw_ntxqueues = 8;
   4132 		hw_nrxqueues = 8;
   4133 		break;
   4134 	case WM_T_I210:
   4135 		hw_ntxqueues = 4;
   4136 		hw_nrxqueues = 4;
   4137 		break;
   4138 	case WM_T_I211:
   4139 		hw_ntxqueues = 2;
   4140 		hw_nrxqueues = 2;
   4141 		break;
   4142 		/*
   4143 		 * As below ethernet controllers does not support MSI-X,
   4144 		 * this driver let them not use multiqueue.
   4145 		 *     - WM_T_80003
   4146 		 *     - WM_T_ICH8
   4147 		 *     - WM_T_ICH9
   4148 		 *     - WM_T_ICH10
   4149 		 *     - WM_T_PCH
   4150 		 *     - WM_T_PCH2
   4151 		 *     - WM_T_PCH_LPT
   4152 		 */
   4153 	default:
   4154 		hw_ntxqueues = 1;
   4155 		hw_nrxqueues = 1;
   4156 		break;
   4157 	}
   4158 
   4159 	/*
   4160 	 * As queues more then MSI-X vectors cannot improve scaling, we limit
   4161 	 * the number of queues used actually.
   4162 	 *
   4163 	 * XXX
   4164 	 * Currently, we separate TX queue interrupts and RX queue interrupts.
   4165 	 * Howerver, the number of MSI-X vectors of recent controllers (such as
   4166 	 * I354) expects that drivers bundle a TX queue interrupt and a RX
   4167 	 * interrupt to one interrupt. e.g. FreeBSD's igb deals interrupts in
   4168 	 * such a way.
   4169 	 */
   4170 	if (nvectors < hw_ntxqueues + hw_nrxqueues + 1) {
   4171 		sc->sc_ntxqueues = (nvectors - 1) / 2;
   4172 		sc->sc_nrxqueues = (nvectors - 1) / 2;
   4173 	} else {
   4174 		sc->sc_ntxqueues = hw_ntxqueues;
   4175 		sc->sc_nrxqueues = hw_nrxqueues;
   4176 	}
   4177 
   4178 	/*
   4179 	 * As queues more then cpus cannot improve scaling, we limit
   4180 	 * the number of queues used actually.
   4181 	 */
   4182 	if (ncpu < sc->sc_ntxqueues)
   4183 		sc->sc_ntxqueues = ncpu;
   4184 	if (ncpu < sc->sc_nrxqueues)
   4185 		sc->sc_nrxqueues = ncpu;
   4186 
   4187 	/* XXX Currently, this driver supports RX multiqueue only. */
   4188 	sc->sc_ntxqueues = 1;
   4189 }
   4190 
   4191 /*
   4192  * Both single interrupt MSI and INTx can use this function.
   4193  */
   4194 static int
   4195 wm_setup_legacy(struct wm_softc *sc)
   4196 {
   4197 	pci_chipset_tag_t pc = sc->sc_pc;
   4198 	const char *intrstr = NULL;
   4199 	char intrbuf[PCI_INTRSTR_LEN];
   4200 	int error;
   4201 
   4202 	error = wm_alloc_txrx_queues(sc);
   4203 	if (error) {
   4204 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4205 		    error);
   4206 		return ENOMEM;
   4207 	}
   4208 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   4209 	    sizeof(intrbuf));
   4210 #ifdef WM_MPSAFE
   4211 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   4212 #endif
   4213 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   4214 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   4215 	if (sc->sc_ihs[0] == NULL) {
   4216 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   4217 		    (pci_intr_type(sc->sc_intrs[0])
   4218 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   4219 		return ENOMEM;
   4220 	}
   4221 
   4222 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   4223 	sc->sc_nintrs = 1;
   4224 	return 0;
   4225 }
   4226 
   4227 static int
   4228 wm_setup_msix(struct wm_softc *sc)
   4229 {
   4230 	void *vih;
   4231 	kcpuset_t *affinity;
   4232 	int qidx, error, intr_idx, tx_established, rx_established;
   4233 	pci_chipset_tag_t pc = sc->sc_pc;
   4234 	const char *intrstr = NULL;
   4235 	char intrbuf[PCI_INTRSTR_LEN];
   4236 	char intr_xname[INTRDEVNAMEBUF];
   4237 
   4238 	error = wm_alloc_txrx_queues(sc);
   4239 	if (error) {
   4240 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4241 		    error);
   4242 		return ENOMEM;
   4243 	}
   4244 
   4245 	kcpuset_create(&affinity, false);
   4246 	intr_idx = 0;
   4247 
   4248 	/*
   4249 	 * TX
   4250 	 */
   4251 	tx_established = 0;
   4252 	for (qidx = 0; qidx < sc->sc_ntxqueues; qidx++) {
   4253 		struct wm_txqueue *txq = &sc->sc_txq[qidx];
   4254 
   4255 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4256 		    sizeof(intrbuf));
   4257 #ifdef WM_MPSAFE
   4258 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   4259 		    PCI_INTR_MPSAFE, true);
   4260 #endif
   4261 		memset(intr_xname, 0, sizeof(intr_xname));
   4262 		snprintf(intr_xname, sizeof(intr_xname), "%sTX%d",
   4263 		    device_xname(sc->sc_dev), qidx);
   4264 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4265 		    IPL_NET, wm_txintr_msix, txq, intr_xname);
   4266 		if (vih == NULL) {
   4267 			aprint_error_dev(sc->sc_dev,
   4268 			    "unable to establish MSI-X(for TX)%s%s\n",
   4269 			    intrstr ? " at " : "",
   4270 			    intrstr ? intrstr : "");
   4271 
   4272 			goto fail_0;
   4273 		}
   4274 		kcpuset_zero(affinity);
   4275 		/* Round-robin affinity */
   4276 		kcpuset_set(affinity, intr_idx % ncpu);
   4277 		error = interrupt_distribute(vih, affinity, NULL);
   4278 		if (error == 0) {
   4279 			aprint_normal_dev(sc->sc_dev,
   4280 			    "for TX interrupting at %s affinity to %u\n",
   4281 			    intrstr, intr_idx % ncpu);
   4282 		} else {
   4283 			aprint_normal_dev(sc->sc_dev,
   4284 			    "for TX interrupting at %s\n", intrstr);
   4285 		}
   4286 		sc->sc_ihs[intr_idx] = vih;
   4287 		txq->txq_id = qidx;
   4288 		txq->txq_intr_idx = intr_idx;
   4289 
   4290 		tx_established++;
   4291 		intr_idx++;
   4292 	}
   4293 
   4294 	/*
   4295 	 * RX
   4296 	 */
   4297 	rx_established = 0;
   4298 	for (qidx = 0; qidx < sc->sc_nrxqueues; qidx++) {
   4299 		struct wm_rxqueue *rxq = &sc->sc_rxq[qidx];
   4300 
   4301 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4302 		    sizeof(intrbuf));
   4303 #ifdef WM_MPSAFE
   4304 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   4305 		    PCI_INTR_MPSAFE, true);
   4306 #endif
   4307 		memset(intr_xname, 0, sizeof(intr_xname));
   4308 		snprintf(intr_xname, sizeof(intr_xname), "%sRX%d",
   4309 		    device_xname(sc->sc_dev), qidx);
   4310 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4311 		    IPL_NET, wm_rxintr_msix, rxq, intr_xname);
   4312 		if (vih == NULL) {
   4313 			aprint_error_dev(sc->sc_dev,
   4314 			    "unable to establish MSI-X(for RX)%s%s\n",
   4315 			    intrstr ? " at " : "",
   4316 			    intrstr ? intrstr : "");
   4317 
   4318 			goto fail_1;
   4319 		}
   4320 		kcpuset_zero(affinity);
   4321 		/* Round-robin affinity */
   4322 		kcpuset_set(affinity, intr_idx % ncpu);
   4323 		error = interrupt_distribute(vih, affinity, NULL);
   4324 		if (error == 0) {
   4325 			aprint_normal_dev(sc->sc_dev,
   4326 			    "for RX interrupting at %s affinity to %u\n",
   4327 			    intrstr, intr_idx % ncpu);
   4328 		} else {
   4329 			aprint_normal_dev(sc->sc_dev,
   4330 			    "for RX interrupting at %s\n", intrstr);
   4331 		}
   4332 		sc->sc_ihs[intr_idx] = vih;
   4333 		rxq->rxq_id = qidx;
   4334 		rxq->rxq_intr_idx = intr_idx;
   4335 
   4336 		rx_established++;
   4337 		intr_idx++;
   4338 	}
   4339 
   4340 	/*
   4341 	 * LINK
   4342 	 */
   4343 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4344 	    sizeof(intrbuf));
   4345 #ifdef WM_MPSAFE
   4346 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   4347 	    PCI_INTR_MPSAFE, true);
   4348 #endif
   4349 	memset(intr_xname, 0, sizeof(intr_xname));
   4350 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   4351 	    device_xname(sc->sc_dev));
   4352 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4353 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   4354 	if (vih == NULL) {
   4355 		aprint_error_dev(sc->sc_dev,
   4356 		    "unable to establish MSI-X(for LINK)%s%s\n",
   4357 		    intrstr ? " at " : "",
   4358 		    intrstr ? intrstr : "");
   4359 
   4360 		goto fail_1;
   4361 	}
   4362 	/* keep default affinity to LINK interrupt */
   4363 	aprint_normal_dev(sc->sc_dev,
   4364 	    "for LINK interrupting at %s\n", intrstr);
   4365 	sc->sc_ihs[intr_idx] = vih;
   4366 	sc->sc_link_intr_idx = intr_idx;
   4367 
   4368 	sc->sc_nintrs = sc->sc_ntxqueues + sc->sc_nrxqueues + 1;
   4369 	kcpuset_destroy(affinity);
   4370 	return 0;
   4371 
   4372  fail_1:
   4373 	for (qidx = 0; qidx < rx_established; qidx++) {
   4374 		struct wm_rxqueue *rxq = &sc->sc_rxq[qidx];
   4375 		pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[rxq->rxq_intr_idx]);
   4376 		sc->sc_ihs[rxq->rxq_intr_idx] = NULL;
   4377 	}
   4378  fail_0:
   4379 	for (qidx = 0; qidx < tx_established; qidx++) {
   4380 		struct wm_txqueue *txq = &sc->sc_txq[qidx];
   4381 		pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[txq->txq_intr_idx]);
   4382 		sc->sc_ihs[txq->txq_intr_idx] = NULL;
   4383 	}
   4384 
   4385 	kcpuset_destroy(affinity);
   4386 	return ENOMEM;
   4387 }
   4388 
   4389 /*
   4390  * wm_init:		[ifnet interface function]
   4391  *
   4392  *	Initialize the interface.
   4393  */
   4394 static int
   4395 wm_init(struct ifnet *ifp)
   4396 {
   4397 	struct wm_softc *sc = ifp->if_softc;
   4398 	int ret;
   4399 
   4400 	WM_CORE_LOCK(sc);
   4401 	ret = wm_init_locked(ifp);
   4402 	WM_CORE_UNLOCK(sc);
   4403 
   4404 	return ret;
   4405 }
   4406 
   4407 static int
   4408 wm_init_locked(struct ifnet *ifp)
   4409 {
   4410 	struct wm_softc *sc = ifp->if_softc;
   4411 	int i, j, trynum, error = 0;
   4412 	uint32_t reg;
   4413 
   4414 	KASSERT(WM_CORE_LOCKED(sc));
   4415 	/*
   4416 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   4417 	 * There is a small but measurable benefit to avoiding the adjusment
   4418 	 * of the descriptor so that the headers are aligned, for normal mtu,
   4419 	 * on such platforms.  One possibility is that the DMA itself is
   4420 	 * slightly more efficient if the front of the entire packet (instead
   4421 	 * of the front of the headers) is aligned.
   4422 	 *
   4423 	 * Note we must always set align_tweak to 0 if we are using
   4424 	 * jumbo frames.
   4425 	 */
   4426 #ifdef __NO_STRICT_ALIGNMENT
   4427 	sc->sc_align_tweak = 0;
   4428 #else
   4429 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   4430 		sc->sc_align_tweak = 0;
   4431 	else
   4432 		sc->sc_align_tweak = 2;
   4433 #endif /* __NO_STRICT_ALIGNMENT */
   4434 
   4435 	/* Cancel any pending I/O. */
   4436 	wm_stop_locked(ifp, 0);
   4437 
   4438 	/* update statistics before reset */
   4439 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   4440 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   4441 
   4442 	/* Reset the chip to a known state. */
   4443 	wm_reset(sc);
   4444 
   4445 	switch (sc->sc_type) {
   4446 	case WM_T_82571:
   4447 	case WM_T_82572:
   4448 	case WM_T_82573:
   4449 	case WM_T_82574:
   4450 	case WM_T_82583:
   4451 	case WM_T_80003:
   4452 	case WM_T_ICH8:
   4453 	case WM_T_ICH9:
   4454 	case WM_T_ICH10:
   4455 	case WM_T_PCH:
   4456 	case WM_T_PCH2:
   4457 	case WM_T_PCH_LPT:
   4458 		if (wm_check_mng_mode(sc) != 0)
   4459 			wm_get_hw_control(sc);
   4460 		break;
   4461 	default:
   4462 		break;
   4463 	}
   4464 
   4465 	/* Init hardware bits */
   4466 	wm_initialize_hardware_bits(sc);
   4467 
   4468 	/* Reset the PHY. */
   4469 	if (sc->sc_flags & WM_F_HAS_MII)
   4470 		wm_gmii_reset(sc);
   4471 
   4472 	/* Calculate (E)ITR value */
   4473 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4474 		sc->sc_itr = 450;	/* For EITR */
   4475 	} else if (sc->sc_type >= WM_T_82543) {
   4476 		/*
   4477 		 * Set up the interrupt throttling register (units of 256ns)
   4478 		 * Note that a footnote in Intel's documentation says this
   4479 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   4480 		 * or 10Mbit mode.  Empirically, it appears to be the case
   4481 		 * that that is also true for the 1024ns units of the other
   4482 		 * interrupt-related timer registers -- so, really, we ought
   4483 		 * to divide this value by 4 when the link speed is low.
   4484 		 *
   4485 		 * XXX implement this division at link speed change!
   4486 		 */
   4487 
   4488 		/*
   4489 		 * For N interrupts/sec, set this value to:
   4490 		 * 1000000000 / (N * 256).  Note that we set the
   4491 		 * absolute and packet timer values to this value
   4492 		 * divided by 4 to get "simple timer" behavior.
   4493 		 */
   4494 
   4495 		sc->sc_itr = 1500;		/* 2604 ints/sec */
   4496 	}
   4497 
   4498 	error = wm_init_txrx_queues(sc);
   4499 	if (error)
   4500 		goto out;
   4501 
   4502 	/*
   4503 	 * Clear out the VLAN table -- we don't use it (yet).
   4504 	 */
   4505 	CSR_WRITE(sc, WMREG_VET, 0);
   4506 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   4507 		trynum = 10; /* Due to hw errata */
   4508 	else
   4509 		trynum = 1;
   4510 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   4511 		for (j = 0; j < trynum; j++)
   4512 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   4513 
   4514 	/*
   4515 	 * Set up flow-control parameters.
   4516 	 *
   4517 	 * XXX Values could probably stand some tuning.
   4518 	 */
   4519 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   4520 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   4521 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)) {
   4522 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   4523 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   4524 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   4525 	}
   4526 
   4527 	sc->sc_fcrtl = FCRTL_DFLT;
   4528 	if (sc->sc_type < WM_T_82543) {
   4529 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   4530 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   4531 	} else {
   4532 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   4533 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   4534 	}
   4535 
   4536 	if (sc->sc_type == WM_T_80003)
   4537 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   4538 	else
   4539 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   4540 
   4541 	/* Writes the control register. */
   4542 	wm_set_vlan(sc);
   4543 
   4544 	if (sc->sc_flags & WM_F_HAS_MII) {
   4545 		int val;
   4546 
   4547 		switch (sc->sc_type) {
   4548 		case WM_T_80003:
   4549 		case WM_T_ICH8:
   4550 		case WM_T_ICH9:
   4551 		case WM_T_ICH10:
   4552 		case WM_T_PCH:
   4553 		case WM_T_PCH2:
   4554 		case WM_T_PCH_LPT:
   4555 			/*
   4556 			 * Set the mac to wait the maximum time between each
   4557 			 * iteration and increase the max iterations when
   4558 			 * polling the phy; this fixes erroneous timeouts at
   4559 			 * 10Mbps.
   4560 			 */
   4561 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   4562 			    0xFFFF);
   4563 			val = wm_kmrn_readreg(sc,
   4564 			    KUMCTRLSTA_OFFSET_INB_PARAM);
   4565 			val |= 0x3F;
   4566 			wm_kmrn_writereg(sc,
   4567 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
   4568 			break;
   4569 		default:
   4570 			break;
   4571 		}
   4572 
   4573 		if (sc->sc_type == WM_T_80003) {
   4574 			val = CSR_READ(sc, WMREG_CTRL_EXT);
   4575 			val &= ~CTRL_EXT_LINK_MODE_MASK;
   4576 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   4577 
   4578 			/* Bypass RX and TX FIFO's */
   4579 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   4580 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   4581 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   4582 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   4583 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   4584 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   4585 		}
   4586 	}
   4587 #if 0
   4588 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   4589 #endif
   4590 
   4591 	/* Set up checksum offload parameters. */
   4592 	reg = CSR_READ(sc, WMREG_RXCSUM);
   4593 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   4594 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   4595 		reg |= RXCSUM_IPOFL;
   4596 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   4597 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   4598 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   4599 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   4600 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   4601 
   4602 	/* Set up MSI-X */
   4603 	if (sc->sc_nintrs > 1) {
   4604 		uint32_t ivar;
   4605 
   4606 		if (sc->sc_type == WM_T_82575) {
   4607 			/* Interrupt control */
   4608 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4609 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   4610 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4611 
   4612 			/* TX */
   4613 			for (i = 0; i < sc->sc_ntxqueues; i++) {
   4614 				struct wm_txqueue *txq = &sc->sc_txq[i];
   4615 				CSR_WRITE(sc, WMREG_MSIXBM(txq->txq_intr_idx),
   4616 				    EITR_TX_QUEUE(txq->txq_id));
   4617 			}
   4618 			/* RX */
   4619 			for (i = 0; i < sc->sc_nrxqueues; i++) {
   4620 				struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   4621 				CSR_WRITE(sc, WMREG_MSIXBM(rxq->rxq_intr_idx),
   4622 				    EITR_RX_QUEUE(rxq->rxq_id));
   4623 			}
   4624 			/* Link status */
   4625 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   4626 			    EITR_OTHER);
   4627 		} else if (sc->sc_type == WM_T_82574) {
   4628 			/* Interrupt control */
   4629 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4630 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   4631 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4632 
   4633 			ivar = 0;
   4634 			/* TX */
   4635 			for (i = 0; i < sc->sc_ntxqueues; i++) {
   4636 				struct wm_txqueue *txq = &sc->sc_txq[i];
   4637 				ivar |= __SHIFTIN((IVAR_VALID_82574|txq->txq_intr_idx),
   4638 				    IVAR_TX_MASK_Q_82574(txq->txq_id));
   4639 			}
   4640 			/* RX */
   4641 			for (i = 0; i < sc->sc_nrxqueues; i++) {
   4642 				struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   4643 				ivar |= __SHIFTIN((IVAR_VALID_82574|rxq->rxq_intr_idx),
   4644 				    IVAR_RX_MASK_Q_82574(rxq->rxq_id));
   4645 			}
   4646 			/* Link status */
   4647 			ivar |= __SHIFTIN((IVAR_VALID_82574|sc->sc_link_intr_idx),
   4648 			    IVAR_OTHER_MASK);
   4649 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   4650 		} else {
   4651 			/* Interrupt control */
   4652 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR
   4653 			    | GPIE_MULTI_MSIX | GPIE_EIAME
   4654 			    | GPIE_PBA);
   4655 
   4656 			switch (sc->sc_type) {
   4657 			case WM_T_82580:
   4658 			case WM_T_I350:
   4659 			case WM_T_I354:
   4660 			case WM_T_I210:
   4661 			case WM_T_I211:
   4662 				/* TX */
   4663 				for (i = 0; i < sc->sc_ntxqueues; i++) {
   4664 					struct wm_txqueue *txq = &sc->sc_txq[i];
   4665 					int qid = txq->txq_id;
   4666 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   4667 					ivar &= ~IVAR_TX_MASK_Q(qid);
   4668 					ivar |= __SHIFTIN(
   4669 						(txq->txq_intr_idx | IVAR_VALID),
   4670 						IVAR_TX_MASK_Q(qid));
   4671 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   4672 				}
   4673 
   4674 				/* RX */
   4675 				for (i = 0; i < sc->sc_nrxqueues; i++) {
   4676 					struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   4677 					int qid = rxq->rxq_id;
   4678 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   4679 					ivar &= ~IVAR_RX_MASK_Q(qid);
   4680 					ivar |= __SHIFTIN(
   4681 						(rxq->rxq_intr_idx | IVAR_VALID),
   4682 						IVAR_RX_MASK_Q(qid));
   4683 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   4684 				}
   4685 				break;
   4686 			case WM_T_82576:
   4687 				/* TX */
   4688 				for (i = 0; i < sc->sc_ntxqueues; i++) {
   4689 					struct wm_txqueue *txq = &sc->sc_txq[i];
   4690 					int qid = txq->txq_id;
   4691 					ivar = CSR_READ(sc, WMREG_IVAR_Q_82576(qid));
   4692 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   4693 					ivar |= __SHIFTIN(
   4694 						(txq->txq_intr_idx | IVAR_VALID),
   4695 						IVAR_TX_MASK_Q_82576(qid));
   4696 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid), ivar);
   4697 				}
   4698 
   4699 				/* RX */
   4700 				for (i = 0; i < sc->sc_nrxqueues; i++) {
   4701 					struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   4702 					int qid = rxq->rxq_id;
   4703 					ivar = CSR_READ(sc, WMREG_IVAR_Q_82576(qid));
   4704 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   4705 					ivar |= __SHIFTIN(
   4706 						(rxq->rxq_intr_idx | IVAR_VALID),
   4707 						IVAR_RX_MASK_Q_82576(qid));
   4708 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid), ivar);
   4709 				}
   4710 				break;
   4711 			default:
   4712 				break;
   4713 			}
   4714 
   4715 			/* Link status */
   4716 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   4717 			    IVAR_MISC_OTHER);
   4718 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   4719 		}
   4720 
   4721 		if (sc->sc_nrxqueues > 1) {
   4722 			wm_init_rss(sc);
   4723 
   4724 			/*
   4725 			** NOTE: Receive Full-Packet Checksum Offload
   4726 			** is mutually exclusive with Multiqueue. However
   4727 			** this is not the same as TCP/IP checksums which
   4728 			** still work.
   4729 			*/
   4730 			reg = CSR_READ(sc, WMREG_RXCSUM);
   4731 			reg |= RXCSUM_PCSD;
   4732 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   4733 		}
   4734 	}
   4735 
   4736 	/* Set up the interrupt registers. */
   4737 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4738 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   4739 	    ICR_RXO | ICR_RXT0;
   4740 	if (sc->sc_nintrs > 1) {
   4741 		uint32_t mask;
   4742 		switch (sc->sc_type) {
   4743 		case WM_T_82574:
   4744 			CSR_WRITE(sc, WMREG_EIAC_82574,
   4745 			    WMREG_EIAC_82574_MSIX_MASK);
   4746 			sc->sc_icr |= WMREG_EIAC_82574_MSIX_MASK;
   4747 			CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   4748 			break;
   4749 		default:
   4750 			if (sc->sc_type == WM_T_82575) {
   4751 				mask = 0;
   4752 				for (i = 0; i < sc->sc_ntxqueues; i++) {
   4753 					struct wm_txqueue *txq = &sc->sc_txq[i];
   4754 					mask |= EITR_TX_QUEUE(txq->txq_id);
   4755 				}
   4756 				for (i = 0; i < sc->sc_nrxqueues; i++) {
   4757 					struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   4758 					mask |= EITR_RX_QUEUE(rxq->rxq_id);
   4759 				}
   4760 				mask |= EITR_OTHER;
   4761 			} else {
   4762 				mask = 0;
   4763 				for (i = 0; i < sc->sc_ntxqueues; i++) {
   4764 					struct wm_txqueue *txq = &sc->sc_txq[i];
   4765 					mask |= 1 << txq->txq_intr_idx;
   4766 				}
   4767 				for (i = 0; i < sc->sc_nrxqueues; i++) {
   4768 					struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   4769 					mask |= 1 << rxq->rxq_intr_idx;
   4770 				}
   4771 				mask |= 1 << sc->sc_link_intr_idx;
   4772 			}
   4773 			CSR_WRITE(sc, WMREG_EIAC, mask);
   4774 			CSR_WRITE(sc, WMREG_EIAM, mask);
   4775 			CSR_WRITE(sc, WMREG_EIMS, mask);
   4776 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   4777 			break;
   4778 		}
   4779 	} else
   4780 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   4781 
   4782 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4783 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4784 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   4785 		reg = CSR_READ(sc, WMREG_KABGTXD);
   4786 		reg |= KABGTXD_BGSQLBIAS;
   4787 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   4788 	}
   4789 
   4790 	/* Set up the inter-packet gap. */
   4791 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   4792 
   4793 	if (sc->sc_type >= WM_T_82543) {
   4794 		/*
   4795 		 * XXX 82574 has both ITR and EITR. SET EITR when we use
   4796 		 * the multi queue function with MSI-X.
   4797 		 */
   4798 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4799 			int qidx;
   4800 			for (qidx = 0; qidx < sc->sc_ntxqueues; qidx++) {
   4801 				struct wm_txqueue *txq = &sc->sc_txq[qidx];
   4802 				CSR_WRITE(sc, WMREG_EITR(txq->txq_intr_idx),
   4803 				    sc->sc_itr);
   4804 			}
   4805 			for (qidx = 0; qidx < sc->sc_nrxqueues; qidx++) {
   4806 				struct wm_rxqueue *rxq = &sc->sc_rxq[qidx];
   4807 				CSR_WRITE(sc, WMREG_EITR(rxq->rxq_intr_idx),
   4808 				    sc->sc_itr);
   4809 			}
   4810 			/*
   4811 			 * Link interrupts occur much less than TX
   4812 			 * interrupts and RX interrupts. So, we don't
   4813 			 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   4814 			 * FreeBSD's if_igb.
   4815 			 */
   4816 		} else
   4817 			CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
   4818 	}
   4819 
   4820 	/* Set the VLAN ethernetype. */
   4821 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   4822 
   4823 	/*
   4824 	 * Set up the transmit control register; we start out with
   4825 	 * a collision distance suitable for FDX, but update it whe
   4826 	 * we resolve the media type.
   4827 	 */
   4828 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   4829 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   4830 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   4831 	if (sc->sc_type >= WM_T_82571)
   4832 		sc->sc_tctl |= TCTL_MULR;
   4833 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   4834 
   4835 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4836 		/* Write TDT after TCTL.EN is set. See the document. */
   4837 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   4838 	}
   4839 
   4840 	if (sc->sc_type == WM_T_80003) {
   4841 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   4842 		reg &= ~TCTL_EXT_GCEX_MASK;
   4843 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   4844 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   4845 	}
   4846 
   4847 	/* Set the media. */
   4848 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   4849 		goto out;
   4850 
   4851 	/* Configure for OS presence */
   4852 	wm_init_manageability(sc);
   4853 
   4854 	/*
   4855 	 * Set up the receive control register; we actually program
   4856 	 * the register when we set the receive filter.  Use multicast
   4857 	 * address offset type 0.
   4858 	 *
   4859 	 * Only the i82544 has the ability to strip the incoming
   4860 	 * CRC, so we don't enable that feature.
   4861 	 */
   4862 	sc->sc_mchash_type = 0;
   4863 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   4864 	    | RCTL_MO(sc->sc_mchash_type);
   4865 
   4866 	/*
   4867 	 * The I350 has a bug where it always strips the CRC whether
   4868 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   4869 	 */
   4870 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4871 	    || (sc->sc_type == WM_T_I210))
   4872 		sc->sc_rctl |= RCTL_SECRC;
   4873 
   4874 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   4875 	    && (ifp->if_mtu > ETHERMTU)) {
   4876 		sc->sc_rctl |= RCTL_LPE;
   4877 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   4878 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   4879 	}
   4880 
   4881 	if (MCLBYTES == 2048) {
   4882 		sc->sc_rctl |= RCTL_2k;
   4883 	} else {
   4884 		if (sc->sc_type >= WM_T_82543) {
   4885 			switch (MCLBYTES) {
   4886 			case 4096:
   4887 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   4888 				break;
   4889 			case 8192:
   4890 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   4891 				break;
   4892 			case 16384:
   4893 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   4894 				break;
   4895 			default:
   4896 				panic("wm_init: MCLBYTES %d unsupported",
   4897 				    MCLBYTES);
   4898 				break;
   4899 			}
   4900 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   4901 	}
   4902 
   4903 	/* Set the receive filter. */
   4904 	wm_set_filter(sc);
   4905 
   4906 	/* Enable ECC */
   4907 	switch (sc->sc_type) {
   4908 	case WM_T_82571:
   4909 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   4910 		reg |= PBA_ECC_CORR_EN;
   4911 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   4912 		break;
   4913 	case WM_T_PCH_LPT:
   4914 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   4915 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   4916 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   4917 
   4918 		reg = CSR_READ(sc, WMREG_CTRL);
   4919 		reg |= CTRL_MEHE;
   4920 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4921 		break;
   4922 	default:
   4923 		break;
   4924 	}
   4925 
   4926 	/* On 575 and later set RDT only if RX enabled */
   4927 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4928 		int qidx;
   4929 		for (qidx = 0; qidx < sc->sc_nrxqueues; qidx++) {
   4930 			struct wm_rxqueue *rxq = &sc->sc_rxq[qidx];
   4931 			for (i = 0; i < WM_NRXDESC; i++) {
   4932 				WM_RX_LOCK(rxq);
   4933 				wm_init_rxdesc(rxq, i);
   4934 				WM_RX_UNLOCK(rxq);
   4935 
   4936 			}
   4937 		}
   4938 	}
   4939 
   4940 	sc->sc_stopping = false;
   4941 
   4942 	/* Start the one second link check clock. */
   4943 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   4944 
   4945 	/* ...all done! */
   4946 	ifp->if_flags |= IFF_RUNNING;
   4947 	ifp->if_flags &= ~IFF_OACTIVE;
   4948 
   4949  out:
   4950 	sc->sc_if_flags = ifp->if_flags;
   4951 	if (error)
   4952 		log(LOG_ERR, "%s: interface not running\n",
   4953 		    device_xname(sc->sc_dev));
   4954 	return error;
   4955 }
   4956 
   4957 /*
   4958  * wm_stop:		[ifnet interface function]
   4959  *
   4960  *	Stop transmission on the interface.
   4961  */
   4962 static void
   4963 wm_stop(struct ifnet *ifp, int disable)
   4964 {
   4965 	struct wm_softc *sc = ifp->if_softc;
   4966 
   4967 	WM_CORE_LOCK(sc);
   4968 	wm_stop_locked(ifp, disable);
   4969 	WM_CORE_UNLOCK(sc);
   4970 }
   4971 
   4972 static void
   4973 wm_stop_locked(struct ifnet *ifp, int disable)
   4974 {
   4975 	struct wm_softc *sc = ifp->if_softc;
   4976 	struct wm_txsoft *txs;
   4977 	int i, qidx;
   4978 
   4979 	KASSERT(WM_CORE_LOCKED(sc));
   4980 
   4981 	sc->sc_stopping = true;
   4982 
   4983 	/* Stop the one second clock. */
   4984 	callout_stop(&sc->sc_tick_ch);
   4985 
   4986 	/* Stop the 82547 Tx FIFO stall check timer. */
   4987 	if (sc->sc_type == WM_T_82547)
   4988 		callout_stop(&sc->sc_txfifo_ch);
   4989 
   4990 	if (sc->sc_flags & WM_F_HAS_MII) {
   4991 		/* Down the MII. */
   4992 		mii_down(&sc->sc_mii);
   4993 	} else {
   4994 #if 0
   4995 		/* Should we clear PHY's status properly? */
   4996 		wm_reset(sc);
   4997 #endif
   4998 	}
   4999 
   5000 	/* Stop the transmit and receive processes. */
   5001 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5002 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5003 	sc->sc_rctl &= ~RCTL_EN;
   5004 
   5005 	/*
   5006 	 * Clear the interrupt mask to ensure the device cannot assert its
   5007 	 * interrupt line.
   5008 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5009 	 * service any currently pending or shared interrupt.
   5010 	 */
   5011 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5012 	sc->sc_icr = 0;
   5013 	if (sc->sc_nintrs > 1) {
   5014 		if (sc->sc_type != WM_T_82574) {
   5015 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5016 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5017 		} else
   5018 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5019 	}
   5020 
   5021 	/* Release any queued transmit buffers. */
   5022 	for (qidx = 0; qidx < sc->sc_ntxqueues; qidx++) {
   5023 		struct wm_txqueue *txq = &sc->sc_txq[qidx];
   5024 		WM_TX_LOCK(txq);
   5025 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5026 			txs = &txq->txq_soft[i];
   5027 			if (txs->txs_mbuf != NULL) {
   5028 				bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   5029 				m_freem(txs->txs_mbuf);
   5030 				txs->txs_mbuf = NULL;
   5031 			}
   5032 		}
   5033 		WM_TX_UNLOCK(txq);
   5034 	}
   5035 
   5036 	/* Mark the interface as down and cancel the watchdog timer. */
   5037 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5038 	ifp->if_timer = 0;
   5039 
   5040 	if (disable) {
   5041 		for (i = 0; i < sc->sc_nrxqueues; i++) {
   5042 			struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   5043 			WM_RX_LOCK(rxq);
   5044 			wm_rxdrain(rxq);
   5045 			WM_RX_UNLOCK(rxq);
   5046 		}
   5047 	}
   5048 
   5049 #if 0 /* notyet */
   5050 	if (sc->sc_type >= WM_T_82544)
   5051 		CSR_WRITE(sc, WMREG_WUC, 0);
   5052 #endif
   5053 }
   5054 
   5055 static void
   5056 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   5057 {
   5058 	struct mbuf *m;
   5059 	int i;
   5060 
   5061 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   5062 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   5063 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   5064 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   5065 		    m->m_data, m->m_len, m->m_flags);
   5066 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   5067 	    i, i == 1 ? "" : "s");
   5068 }
   5069 
   5070 /*
   5071  * wm_82547_txfifo_stall:
   5072  *
   5073  *	Callout used to wait for the 82547 Tx FIFO to drain,
   5074  *	reset the FIFO pointers, and restart packet transmission.
   5075  */
   5076 static void
   5077 wm_82547_txfifo_stall(void *arg)
   5078 {
   5079 	struct wm_softc *sc = arg;
   5080 	struct wm_txqueue *txq = sc->sc_txq;
   5081 #ifndef WM_MPSAFE
   5082 	int s;
   5083 
   5084 	s = splnet();
   5085 #endif
   5086 	WM_TX_LOCK(txq);
   5087 
   5088 	if (sc->sc_stopping)
   5089 		goto out;
   5090 
   5091 	if (txq->txq_fifo_stall) {
   5092 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   5093 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   5094 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   5095 			/*
   5096 			 * Packets have drained.  Stop transmitter, reset
   5097 			 * FIFO pointers, restart transmitter, and kick
   5098 			 * the packet queue.
   5099 			 */
   5100 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   5101 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   5102 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   5103 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   5104 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   5105 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   5106 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   5107 			CSR_WRITE_FLUSH(sc);
   5108 
   5109 			txq->txq_fifo_head = 0;
   5110 			txq->txq_fifo_stall = 0;
   5111 			wm_start_locked(&sc->sc_ethercom.ec_if);
   5112 		} else {
   5113 			/*
   5114 			 * Still waiting for packets to drain; try again in
   5115 			 * another tick.
   5116 			 */
   5117 			callout_schedule(&sc->sc_txfifo_ch, 1);
   5118 		}
   5119 	}
   5120 
   5121 out:
   5122 	WM_TX_UNLOCK(txq);
   5123 #ifndef WM_MPSAFE
   5124 	splx(s);
   5125 #endif
   5126 }
   5127 
   5128 /*
   5129  * wm_82547_txfifo_bugchk:
   5130  *
   5131  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   5132  *	prevent enqueueing a packet that would wrap around the end
   5133  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   5134  *
   5135  *	We do this by checking the amount of space before the end
   5136  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   5137  *	the Tx FIFO, wait for all remaining packets to drain, reset
   5138  *	the internal FIFO pointers to the beginning, and restart
   5139  *	transmission on the interface.
   5140  */
   5141 #define	WM_FIFO_HDR		0x10
   5142 #define	WM_82547_PAD_LEN	0x3e0
   5143 static int
   5144 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   5145 {
   5146 	struct wm_txqueue *txq = &sc->sc_txq[0];
   5147 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   5148 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   5149 
   5150 	/* Just return if already stalled. */
   5151 	if (txq->txq_fifo_stall)
   5152 		return 1;
   5153 
   5154 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   5155 		/* Stall only occurs in half-duplex mode. */
   5156 		goto send_packet;
   5157 	}
   5158 
   5159 	if (len >= WM_82547_PAD_LEN + space) {
   5160 		txq->txq_fifo_stall = 1;
   5161 		callout_schedule(&sc->sc_txfifo_ch, 1);
   5162 		return 1;
   5163 	}
   5164 
   5165  send_packet:
   5166 	txq->txq_fifo_head += len;
   5167 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   5168 		txq->txq_fifo_head -= txq->txq_fifo_size;
   5169 
   5170 	return 0;
   5171 }
   5172 
   5173 static int
   5174 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5175 {
   5176 	int error;
   5177 
   5178 	/*
   5179 	 * Allocate the control data structures, and create and load the
   5180 	 * DMA map for it.
   5181 	 *
   5182 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5183 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5184 	 * both sets within the same 4G segment.
   5185 	 */
   5186 	if (sc->sc_type < WM_T_82544) {
   5187 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   5188 		txq->txq_desc_size = sizeof(wiseman_txdesc_t) * WM_NTXDESC(txq);
   5189 	} else {
   5190 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   5191 		txq->txq_desc_size = sizeof(txdescs_t);
   5192 	}
   5193 
   5194 	if ((error = bus_dmamem_alloc(sc->sc_dmat, txq->txq_desc_size, PAGE_SIZE,
   5195 		    (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg, 1,
   5196 		    &txq->txq_desc_rseg, 0)) != 0) {
   5197 		aprint_error_dev(sc->sc_dev,
   5198 		    "unable to allocate TX control data, error = %d\n",
   5199 		    error);
   5200 		goto fail_0;
   5201 	}
   5202 
   5203 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   5204 		    txq->txq_desc_rseg, txq->txq_desc_size,
   5205 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5206 		aprint_error_dev(sc->sc_dev,
   5207 		    "unable to map TX control data, error = %d\n", error);
   5208 		goto fail_1;
   5209 	}
   5210 
   5211 	if ((error = bus_dmamap_create(sc->sc_dmat, txq->txq_desc_size, 1,
   5212 		    txq->txq_desc_size, 0, 0, &txq->txq_desc_dmamap)) != 0) {
   5213 		aprint_error_dev(sc->sc_dev,
   5214 		    "unable to create TX control data DMA map, error = %d\n",
   5215 		    error);
   5216 		goto fail_2;
   5217 	}
   5218 
   5219 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   5220 		    txq->txq_descs_u, txq->txq_desc_size, NULL, 0)) != 0) {
   5221 		aprint_error_dev(sc->sc_dev,
   5222 		    "unable to load TX control data DMA map, error = %d\n",
   5223 		    error);
   5224 		goto fail_3;
   5225 	}
   5226 
   5227 	return 0;
   5228 
   5229  fail_3:
   5230 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5231  fail_2:
   5232 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5233 	    txq->txq_desc_size);
   5234  fail_1:
   5235 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5236  fail_0:
   5237 	return error;
   5238 }
   5239 
   5240 static void
   5241 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5242 {
   5243 
   5244 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   5245 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5246 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5247 	    txq->txq_desc_size);
   5248 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5249 }
   5250 
   5251 static int
   5252 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5253 {
   5254 	int error;
   5255 
   5256 	/*
   5257 	 * Allocate the control data structures, and create and load the
   5258 	 * DMA map for it.
   5259 	 *
   5260 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5261 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5262 	 * both sets within the same 4G segment.
   5263 	 */
   5264 	rxq->rxq_desc_size = sizeof(wiseman_rxdesc_t) * WM_NRXDESC;
   5265 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq->rxq_desc_size, PAGE_SIZE,
   5266 		    (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg, 1,
   5267 		    &rxq->rxq_desc_rseg, 0)) != 0) {
   5268 		aprint_error_dev(sc->sc_dev,
   5269 		    "unable to allocate RX control data, error = %d\n",
   5270 		    error);
   5271 		goto fail_0;
   5272 	}
   5273 
   5274 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   5275 		    rxq->rxq_desc_rseg, rxq->rxq_desc_size,
   5276 		    (void **)&rxq->rxq_descs, BUS_DMA_COHERENT)) != 0) {
   5277 		aprint_error_dev(sc->sc_dev,
   5278 		    "unable to map RX control data, error = %d\n", error);
   5279 		goto fail_1;
   5280 	}
   5281 
   5282 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq->rxq_desc_size, 1,
   5283 		    rxq->rxq_desc_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   5284 		aprint_error_dev(sc->sc_dev,
   5285 		    "unable to create RX control data DMA map, error = %d\n",
   5286 		    error);
   5287 		goto fail_2;
   5288 	}
   5289 
   5290 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   5291 		    rxq->rxq_descs, rxq->rxq_desc_size, NULL, 0)) != 0) {
   5292 		aprint_error_dev(sc->sc_dev,
   5293 		    "unable to load RX control data DMA map, error = %d\n",
   5294 		    error);
   5295 		goto fail_3;
   5296 	}
   5297 
   5298 	return 0;
   5299 
   5300  fail_3:
   5301 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5302  fail_2:
   5303 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
   5304 	    rxq->rxq_desc_size);
   5305  fail_1:
   5306 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5307  fail_0:
   5308 	return error;
   5309 }
   5310 
   5311 static void
   5312 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5313 {
   5314 
   5315 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5316 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5317 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
   5318 	    rxq->rxq_desc_size);
   5319 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5320 }
   5321 
   5322 
   5323 static int
   5324 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5325 {
   5326 	int i, error;
   5327 
   5328 	/* Create the transmit buffer DMA maps. */
   5329 	WM_TXQUEUELEN(txq) =
   5330 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   5331 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   5332 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5333 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   5334 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   5335 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   5336 			aprint_error_dev(sc->sc_dev,
   5337 			    "unable to create Tx DMA map %d, error = %d\n",
   5338 			    i, error);
   5339 			goto fail;
   5340 		}
   5341 	}
   5342 
   5343 	return 0;
   5344 
   5345  fail:
   5346 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5347 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5348 			bus_dmamap_destroy(sc->sc_dmat,
   5349 			    txq->txq_soft[i].txs_dmamap);
   5350 	}
   5351 	return error;
   5352 }
   5353 
   5354 static void
   5355 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5356 {
   5357 	int i;
   5358 
   5359 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5360 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5361 			bus_dmamap_destroy(sc->sc_dmat,
   5362 			    txq->txq_soft[i].txs_dmamap);
   5363 	}
   5364 }
   5365 
   5366 static int
   5367 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5368 {
   5369 	int i, error;
   5370 
   5371 	/* Create the receive buffer DMA maps. */
   5372 	for (i = 0; i < WM_NRXDESC; i++) {
   5373 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   5374 			    MCLBYTES, 0, 0,
   5375 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   5376 			aprint_error_dev(sc->sc_dev,
   5377 			    "unable to create Rx DMA map %d error = %d\n",
   5378 			    i, error);
   5379 			goto fail;
   5380 		}
   5381 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   5382 	}
   5383 
   5384 	return 0;
   5385 
   5386  fail:
   5387 	for (i = 0; i < WM_NRXDESC; i++) {
   5388 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5389 			bus_dmamap_destroy(sc->sc_dmat,
   5390 			    rxq->rxq_soft[i].rxs_dmamap);
   5391 	}
   5392 	return error;
   5393 }
   5394 
   5395 static void
   5396 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5397 {
   5398 	int i;
   5399 
   5400 	for (i = 0; i < WM_NRXDESC; i++) {
   5401 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5402 			bus_dmamap_destroy(sc->sc_dmat,
   5403 			    rxq->rxq_soft[i].rxs_dmamap);
   5404 	}
   5405 }
   5406 
   5407 /*
   5408  * wm_alloc_quques:
   5409  *	Allocate {tx,rx}descs and {tx,rx} buffers
   5410  */
   5411 static int
   5412 wm_alloc_txrx_queues(struct wm_softc *sc)
   5413 {
   5414 	int i, error, tx_done, rx_done;
   5415 
   5416 	/*
   5417 	 * For transmission
   5418 	 */
   5419 	sc->sc_txq = kmem_zalloc(sizeof(struct wm_txqueue) * sc->sc_ntxqueues,
   5420 	    KM_SLEEP);
   5421 	if (sc->sc_txq == NULL) {
   5422 		aprint_error_dev(sc->sc_dev, "unable to allocate wm_txqueue\n");
   5423 		error = ENOMEM;
   5424 		goto fail_0;
   5425 	}
   5426 
   5427 	error = 0;
   5428 	tx_done = 0;
   5429 	for (i = 0; i < sc->sc_ntxqueues; i++) {
   5430 		struct wm_txqueue *txq = &sc->sc_txq[i];
   5431 		txq->txq_sc = sc;
   5432 #ifdef WM_MPSAFE
   5433 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5434 #else
   5435 		txq->txq_lock = NULL;
   5436 #endif
   5437 		error = wm_alloc_tx_descs(sc, txq);
   5438 		if (error)
   5439 			break;
   5440 		error = wm_alloc_tx_buffer(sc, txq);
   5441 		if (error) {
   5442 			wm_free_tx_descs(sc, txq);
   5443 			break;
   5444 		}
   5445 		tx_done++;
   5446 	}
   5447 	if (error)
   5448 		goto fail_1;
   5449 
   5450 	/*
   5451 	 * For recieve
   5452 	 */
   5453 	sc->sc_rxq = kmem_zalloc(sizeof(struct wm_rxqueue) * sc->sc_nrxqueues,
   5454 	    KM_SLEEP);
   5455 	if (sc->sc_rxq == NULL) {
   5456 		aprint_error_dev(sc->sc_dev, "unable to allocate wm_rxqueue\n");
   5457 		error = ENOMEM;
   5458 		goto fail_1;
   5459 	}
   5460 
   5461 	error = 0;
   5462 	rx_done = 0;
   5463 	for (i = 0; i < sc->sc_nrxqueues; i++) {
   5464 		struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   5465 		rxq->rxq_sc = sc;
   5466 #ifdef WM_MPSAFE
   5467 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5468 #else
   5469 		rxq->rxq_lock = NULL;
   5470 #endif
   5471 		error = wm_alloc_rx_descs(sc, rxq);
   5472 		if (error)
   5473 			break;
   5474 
   5475 		error = wm_alloc_rx_buffer(sc, rxq);
   5476 		if (error) {
   5477 			wm_free_rx_descs(sc, rxq);
   5478 			break;
   5479 		}
   5480 
   5481 		rx_done++;
   5482 	}
   5483 	if (error)
   5484 		goto fail_2;
   5485 
   5486 	return 0;
   5487 
   5488  fail_2:
   5489 	for (i = 0; i < rx_done; i++) {
   5490 		struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   5491 		wm_free_rx_buffer(sc, rxq);
   5492 		wm_free_rx_descs(sc, rxq);
   5493 		if (rxq->rxq_lock)
   5494 			mutex_obj_free(rxq->rxq_lock);
   5495 	}
   5496 	kmem_free(sc->sc_rxq,
   5497 	    sizeof(struct wm_rxqueue) * sc->sc_nrxqueues);
   5498  fail_1:
   5499 	for (i = 0; i < tx_done; i++) {
   5500 		struct wm_txqueue *txq = &sc->sc_txq[i];
   5501 		wm_free_tx_buffer(sc, txq);
   5502 		wm_free_tx_descs(sc, txq);
   5503 		if (txq->txq_lock)
   5504 			mutex_obj_free(txq->txq_lock);
   5505 	}
   5506 	kmem_free(sc->sc_txq,
   5507 	    sizeof(struct wm_txqueue) * sc->sc_ntxqueues);
   5508  fail_0:
   5509 	return error;
   5510 }
   5511 
   5512 /*
   5513  * wm_free_quques:
   5514  *	Free {tx,rx}descs and {tx,rx} buffers
   5515  */
   5516 static void
   5517 wm_free_txrx_queues(struct wm_softc *sc)
   5518 {
   5519 	int i;
   5520 
   5521 	for (i = 0; i < sc->sc_nrxqueues; i++) {
   5522 		struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   5523 		wm_free_rx_buffer(sc, rxq);
   5524 		wm_free_rx_descs(sc, rxq);
   5525 		if (rxq->rxq_lock)
   5526 			mutex_obj_free(rxq->rxq_lock);
   5527 	}
   5528 	kmem_free(sc->sc_rxq, sizeof(struct wm_rxqueue) * sc->sc_nrxqueues);
   5529 
   5530 	for (i = 0; i < sc->sc_ntxqueues; i++) {
   5531 		struct wm_txqueue *txq = &sc->sc_txq[i];
   5532 		wm_free_tx_buffer(sc, txq);
   5533 		wm_free_tx_descs(sc, txq);
   5534 		if (txq->txq_lock)
   5535 			mutex_obj_free(txq->txq_lock);
   5536 	}
   5537 	kmem_free(sc->sc_txq, sizeof(struct wm_txqueue) * sc->sc_ntxqueues);
   5538 }
   5539 
   5540 static void
   5541 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   5542 {
   5543 
   5544 	KASSERT(WM_TX_LOCKED(txq));
   5545 
   5546 	/* Initialize the transmit descriptor ring. */
   5547 	memset(txq->txq_descs, 0, WM_TXDESCSIZE(txq));
   5548 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   5549 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   5550 	txq->txq_free = WM_NTXDESC(txq);
   5551 	txq->txq_next = 0;
   5552 }
   5553 
   5554 static void
   5555 wm_init_tx_regs(struct wm_softc *sc, struct wm_txqueue *txq)
   5556 {
   5557 
   5558 	KASSERT(WM_TX_LOCKED(txq));
   5559 
   5560 	if (sc->sc_type < WM_T_82543) {
   5561 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   5562 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   5563 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(txq));
   5564 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   5565 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   5566 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   5567 	} else {
   5568 		int qid = txq->txq_id;
   5569 
   5570 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   5571 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   5572 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCSIZE(txq));
   5573 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   5574 
   5575 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5576 			/*
   5577 			 * Don't write TDT before TCTL.EN is set.
   5578 			 * See the document.
   5579 			 */
   5580 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   5581 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   5582 			    | TXDCTL_WTHRESH(0));
   5583 		else {
   5584 			/* ITR / 4 */
   5585 			CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
   5586 			if (sc->sc_type >= WM_T_82540) {
   5587 				/* should be same */
   5588 				CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
   5589 			}
   5590 
   5591 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   5592 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   5593 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   5594 		}
   5595 	}
   5596 }
   5597 
   5598 static void
   5599 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   5600 {
   5601 	int i;
   5602 
   5603 	KASSERT(WM_TX_LOCKED(txq));
   5604 
   5605 	/* Initialize the transmit job descriptors. */
   5606 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   5607 		txq->txq_soft[i].txs_mbuf = NULL;
   5608 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   5609 	txq->txq_snext = 0;
   5610 	txq->txq_sdirty = 0;
   5611 }
   5612 
   5613 static void
   5614 wm_init_tx_queue(struct wm_softc *sc, struct wm_txqueue *txq)
   5615 {
   5616 
   5617 	KASSERT(WM_TX_LOCKED(txq));
   5618 
   5619 	/*
   5620 	 * Set up some register offsets that are different between
   5621 	 * the i82542 and the i82543 and later chips.
   5622 	 */
   5623 	if (sc->sc_type < WM_T_82543) {
   5624 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   5625 	} else {
   5626 		txq->txq_tdt_reg = WMREG_TDT(0);
   5627 	}
   5628 
   5629 	wm_init_tx_descs(sc, txq);
   5630 	wm_init_tx_regs(sc, txq);
   5631 	wm_init_tx_buffer(sc, txq);
   5632 }
   5633 
   5634 static void
   5635 wm_init_rx_regs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5636 {
   5637 
   5638 	KASSERT(WM_RX_LOCKED(rxq));
   5639 
   5640 	/*
   5641 	 * Initialize the receive descriptor and receive job
   5642 	 * descriptor rings.
   5643 	 */
   5644 	if (sc->sc_type < WM_T_82543) {
   5645 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   5646 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   5647 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   5648 		    sizeof(wiseman_rxdesc_t) * WM_NRXDESC);
   5649 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   5650 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   5651 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   5652 
   5653 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   5654 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   5655 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   5656 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   5657 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   5658 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   5659 	} else {
   5660 		int qid = rxq->rxq_id;
   5661 
   5662 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   5663 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   5664 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_desc_size);
   5665 
   5666 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5667 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   5668 				panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
   5669 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_LEGACY
   5670 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   5671 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   5672 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   5673 			    | RXDCTL_WTHRESH(1));
   5674 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   5675 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   5676 		} else {
   5677 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   5678 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   5679 			/* ITR / 4 */
   5680 			CSR_WRITE(sc, WMREG_RDTR, (sc->sc_itr / 4) | RDTR_FPD);
   5681 			/* MUST be same */
   5682 			CSR_WRITE(sc, WMREG_RADV, sc->sc_itr / 4);
   5683 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   5684 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   5685 		}
   5686 	}
   5687 }
   5688 
   5689 static int
   5690 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5691 {
   5692 	struct wm_rxsoft *rxs;
   5693 	int error, i;
   5694 
   5695 	KASSERT(WM_RX_LOCKED(rxq));
   5696 
   5697 	for (i = 0; i < WM_NRXDESC; i++) {
   5698 		rxs = &rxq->rxq_soft[i];
   5699 		if (rxs->rxs_mbuf == NULL) {
   5700 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   5701 				log(LOG_ERR, "%s: unable to allocate or map "
   5702 				    "rx buffer %d, error = %d\n",
   5703 				    device_xname(sc->sc_dev), i, error);
   5704 				/*
   5705 				 * XXX Should attempt to run with fewer receive
   5706 				 * XXX buffers instead of just failing.
   5707 				 */
   5708 				wm_rxdrain(rxq);
   5709 				return ENOMEM;
   5710 			}
   5711 		} else {
   5712 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   5713 				wm_init_rxdesc(rxq, i);
   5714 			/*
   5715 			 * For 82575 and newer device, the RX descriptors
   5716 			 * must be initialized after the setting of RCTL.EN in
   5717 			 * wm_set_filter()
   5718 			 */
   5719 		}
   5720 	}
   5721 	rxq->rxq_ptr = 0;
   5722 	rxq->rxq_discard = 0;
   5723 	WM_RXCHAIN_RESET(rxq);
   5724 
   5725 	return 0;
   5726 }
   5727 
   5728 static int
   5729 wm_init_rx_queue(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5730 {
   5731 
   5732 	KASSERT(WM_RX_LOCKED(rxq));
   5733 
   5734 	/*
   5735 	 * Set up some register offsets that are different between
   5736 	 * the i82542 and the i82543 and later chips.
   5737 	 */
   5738 	if (sc->sc_type < WM_T_82543) {
   5739 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   5740 	} else {
   5741 		rxq->rxq_rdt_reg = WMREG_RDT(rxq->rxq_id);
   5742 	}
   5743 
   5744 	wm_init_rx_regs(sc, rxq);
   5745 	return wm_init_rx_buffer(sc, rxq);
   5746 }
   5747 
   5748 /*
   5749  * wm_init_quques:
   5750  *	Initialize {tx,rx}descs and {tx,rx} buffers
   5751  */
   5752 static int
   5753 wm_init_txrx_queues(struct wm_softc *sc)
   5754 {
   5755 	int i, error;
   5756 
   5757 	for (i = 0; i < sc->sc_ntxqueues; i++) {
   5758 		struct wm_txqueue *txq = &sc->sc_txq[i];
   5759 		WM_TX_LOCK(txq);
   5760 		wm_init_tx_queue(sc, txq);
   5761 		WM_TX_UNLOCK(txq);
   5762 	}
   5763 
   5764 	error = 0;
   5765 	for (i = 0; i < sc->sc_nrxqueues; i++) {
   5766 		struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   5767 		WM_RX_LOCK(rxq);
   5768 		error = wm_init_rx_queue(sc, rxq);
   5769 		WM_RX_UNLOCK(rxq);
   5770 		if (error)
   5771 			break;
   5772 	}
   5773 
   5774 	return error;
   5775 }
   5776 
   5777 /*
   5778  * wm_tx_offload:
   5779  *
   5780  *	Set up TCP/IP checksumming parameters for the
   5781  *	specified packet.
   5782  */
   5783 static int
   5784 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
   5785     uint8_t *fieldsp)
   5786 {
   5787 	struct wm_txqueue *txq = &sc->sc_txq[0];
   5788 	struct mbuf *m0 = txs->txs_mbuf;
   5789 	struct livengood_tcpip_ctxdesc *t;
   5790 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   5791 	uint32_t ipcse;
   5792 	struct ether_header *eh;
   5793 	int offset, iphl;
   5794 	uint8_t fields;
   5795 
   5796 	/*
   5797 	 * XXX It would be nice if the mbuf pkthdr had offset
   5798 	 * fields for the protocol headers.
   5799 	 */
   5800 
   5801 	eh = mtod(m0, struct ether_header *);
   5802 	switch (htons(eh->ether_type)) {
   5803 	case ETHERTYPE_IP:
   5804 	case ETHERTYPE_IPV6:
   5805 		offset = ETHER_HDR_LEN;
   5806 		break;
   5807 
   5808 	case ETHERTYPE_VLAN:
   5809 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   5810 		break;
   5811 
   5812 	default:
   5813 		/*
   5814 		 * Don't support this protocol or encapsulation.
   5815 		 */
   5816 		*fieldsp = 0;
   5817 		*cmdp = 0;
   5818 		return 0;
   5819 	}
   5820 
   5821 	if ((m0->m_pkthdr.csum_flags &
   5822 	    (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
   5823 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   5824 	} else {
   5825 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   5826 	}
   5827 	ipcse = offset + iphl - 1;
   5828 
   5829 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   5830 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   5831 	seg = 0;
   5832 	fields = 0;
   5833 
   5834 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   5835 		int hlen = offset + iphl;
   5836 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   5837 
   5838 		if (__predict_false(m0->m_len <
   5839 				    (hlen + sizeof(struct tcphdr)))) {
   5840 			/*
   5841 			 * TCP/IP headers are not in the first mbuf; we need
   5842 			 * to do this the slow and painful way.  Let's just
   5843 			 * hope this doesn't happen very often.
   5844 			 */
   5845 			struct tcphdr th;
   5846 
   5847 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
   5848 
   5849 			m_copydata(m0, hlen, sizeof(th), &th);
   5850 			if (v4) {
   5851 				struct ip ip;
   5852 
   5853 				m_copydata(m0, offset, sizeof(ip), &ip);
   5854 				ip.ip_len = 0;
   5855 				m_copyback(m0,
   5856 				    offset + offsetof(struct ip, ip_len),
   5857 				    sizeof(ip.ip_len), &ip.ip_len);
   5858 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   5859 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   5860 			} else {
   5861 				struct ip6_hdr ip6;
   5862 
   5863 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   5864 				ip6.ip6_plen = 0;
   5865 				m_copyback(m0,
   5866 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   5867 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   5868 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   5869 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   5870 			}
   5871 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   5872 			    sizeof(th.th_sum), &th.th_sum);
   5873 
   5874 			hlen += th.th_off << 2;
   5875 		} else {
   5876 			/*
   5877 			 * TCP/IP headers are in the first mbuf; we can do
   5878 			 * this the easy way.
   5879 			 */
   5880 			struct tcphdr *th;
   5881 
   5882 			if (v4) {
   5883 				struct ip *ip =
   5884 				    (void *)(mtod(m0, char *) + offset);
   5885 				th = (void *)(mtod(m0, char *) + hlen);
   5886 
   5887 				ip->ip_len = 0;
   5888 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   5889 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   5890 			} else {
   5891 				struct ip6_hdr *ip6 =
   5892 				    (void *)(mtod(m0, char *) + offset);
   5893 				th = (void *)(mtod(m0, char *) + hlen);
   5894 
   5895 				ip6->ip6_plen = 0;
   5896 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   5897 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   5898 			}
   5899 			hlen += th->th_off << 2;
   5900 		}
   5901 
   5902 		if (v4) {
   5903 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
   5904 			cmdlen |= WTX_TCPIP_CMD_IP;
   5905 		} else {
   5906 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
   5907 			ipcse = 0;
   5908 		}
   5909 		cmd |= WTX_TCPIP_CMD_TSE;
   5910 		cmdlen |= WTX_TCPIP_CMD_TSE |
   5911 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   5912 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   5913 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   5914 	}
   5915 
   5916 	/*
   5917 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   5918 	 * offload feature, if we load the context descriptor, we
   5919 	 * MUST provide valid values for IPCSS and TUCSS fields.
   5920 	 */
   5921 
   5922 	ipcs = WTX_TCPIP_IPCSS(offset) |
   5923 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   5924 	    WTX_TCPIP_IPCSE(ipcse);
   5925 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
   5926 		WM_EVCNT_INCR(&sc->sc_ev_txipsum);
   5927 		fields |= WTX_IXSM;
   5928 	}
   5929 
   5930 	offset += iphl;
   5931 
   5932 	if (m0->m_pkthdr.csum_flags &
   5933 	    (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
   5934 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
   5935 		fields |= WTX_TXSM;
   5936 		tucs = WTX_TCPIP_TUCSS(offset) |
   5937 		    WTX_TCPIP_TUCSO(offset +
   5938 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   5939 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   5940 	} else if ((m0->m_pkthdr.csum_flags &
   5941 	    (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
   5942 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
   5943 		fields |= WTX_TXSM;
   5944 		tucs = WTX_TCPIP_TUCSS(offset) |
   5945 		    WTX_TCPIP_TUCSO(offset +
   5946 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   5947 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   5948 	} else {
   5949 		/* Just initialize it to a valid TCP context. */
   5950 		tucs = WTX_TCPIP_TUCSS(offset) |
   5951 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   5952 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   5953 	}
   5954 
   5955 	/* Fill in the context descriptor. */
   5956 	t = (struct livengood_tcpip_ctxdesc *)
   5957 	    &txq->txq_descs[txq->txq_next];
   5958 	t->tcpip_ipcs = htole32(ipcs);
   5959 	t->tcpip_tucs = htole32(tucs);
   5960 	t->tcpip_cmdlen = htole32(cmdlen);
   5961 	t->tcpip_seg = htole32(seg);
   5962 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   5963 
   5964 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   5965 	txs->txs_ndesc++;
   5966 
   5967 	*cmdp = cmd;
   5968 	*fieldsp = fields;
   5969 
   5970 	return 0;
   5971 }
   5972 
   5973 /*
   5974  * wm_start:		[ifnet interface function]
   5975  *
   5976  *	Start packet transmission on the interface.
   5977  */
   5978 static void
   5979 wm_start(struct ifnet *ifp)
   5980 {
   5981 	struct wm_softc *sc = ifp->if_softc;
   5982 	struct wm_txqueue *txq = &sc->sc_txq[0];
   5983 
   5984 	WM_TX_LOCK(txq);
   5985 	if (!sc->sc_stopping)
   5986 		wm_start_locked(ifp);
   5987 	WM_TX_UNLOCK(txq);
   5988 }
   5989 
   5990 static void
   5991 wm_start_locked(struct ifnet *ifp)
   5992 {
   5993 	struct wm_softc *sc = ifp->if_softc;
   5994 	struct wm_txqueue *txq = &sc->sc_txq[0];
   5995 	struct mbuf *m0;
   5996 	struct m_tag *mtag;
   5997 	struct wm_txsoft *txs;
   5998 	bus_dmamap_t dmamap;
   5999 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   6000 	bus_addr_t curaddr;
   6001 	bus_size_t seglen, curlen;
   6002 	uint32_t cksumcmd;
   6003 	uint8_t cksumfields;
   6004 
   6005 	KASSERT(WM_TX_LOCKED(txq));
   6006 
   6007 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
   6008 		return;
   6009 
   6010 	/* Remember the previous number of free descriptors. */
   6011 	ofree = txq->txq_free;
   6012 
   6013 	/*
   6014 	 * Loop through the send queue, setting up transmit descriptors
   6015 	 * until we drain the queue, or use up all available transmit
   6016 	 * descriptors.
   6017 	 */
   6018 	for (;;) {
   6019 		m0 = NULL;
   6020 
   6021 		/* Get a work queue entry. */
   6022 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6023 			wm_txeof(sc);
   6024 			if (txq->txq_sfree == 0) {
   6025 				DPRINTF(WM_DEBUG_TX,
   6026 				    ("%s: TX: no free job descriptors\n",
   6027 					device_xname(sc->sc_dev)));
   6028 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
   6029 				break;
   6030 			}
   6031 		}
   6032 
   6033 		/* Grab a packet off the queue. */
   6034 		IFQ_DEQUEUE(&ifp->if_snd, m0);
   6035 		if (m0 == NULL)
   6036 			break;
   6037 
   6038 		DPRINTF(WM_DEBUG_TX,
   6039 		    ("%s: TX: have packet to transmit: %p\n",
   6040 		    device_xname(sc->sc_dev), m0));
   6041 
   6042 		txs = &txq->txq_soft[txq->txq_snext];
   6043 		dmamap = txs->txs_dmamap;
   6044 
   6045 		use_tso = (m0->m_pkthdr.csum_flags &
   6046 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   6047 
   6048 		/*
   6049 		 * So says the Linux driver:
   6050 		 * The controller does a simple calculation to make sure
   6051 		 * there is enough room in the FIFO before initiating the
   6052 		 * DMA for each buffer.  The calc is:
   6053 		 *	4 = ceil(buffer len / MSS)
   6054 		 * To make sure we don't overrun the FIFO, adjust the max
   6055 		 * buffer len if the MSS drops.
   6056 		 */
   6057 		dmamap->dm_maxsegsz =
   6058 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   6059 		    ? m0->m_pkthdr.segsz << 2
   6060 		    : WTX_MAX_LEN;
   6061 
   6062 		/*
   6063 		 * Load the DMA map.  If this fails, the packet either
   6064 		 * didn't fit in the allotted number of segments, or we
   6065 		 * were short on resources.  For the too-many-segments
   6066 		 * case, we simply report an error and drop the packet,
   6067 		 * since we can't sanely copy a jumbo packet to a single
   6068 		 * buffer.
   6069 		 */
   6070 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6071 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
   6072 		if (error) {
   6073 			if (error == EFBIG) {
   6074 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   6075 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6076 				    "DMA segments, dropping...\n",
   6077 				    device_xname(sc->sc_dev));
   6078 				wm_dump_mbuf_chain(sc, m0);
   6079 				m_freem(m0);
   6080 				continue;
   6081 			}
   6082 			/*  Short on resources, just stop for now. */
   6083 			DPRINTF(WM_DEBUG_TX,
   6084 			    ("%s: TX: dmamap load failed: %d\n",
   6085 			    device_xname(sc->sc_dev), error));
   6086 			break;
   6087 		}
   6088 
   6089 		segs_needed = dmamap->dm_nsegs;
   6090 		if (use_tso) {
   6091 			/* For sentinel descriptor; see below. */
   6092 			segs_needed++;
   6093 		}
   6094 
   6095 		/*
   6096 		 * Ensure we have enough descriptors free to describe
   6097 		 * the packet.  Note, we always reserve one descriptor
   6098 		 * at the end of the ring due to the semantics of the
   6099 		 * TDT register, plus one more in the event we need
   6100 		 * to load offload context.
   6101 		 */
   6102 		if (segs_needed > txq->txq_free - 2) {
   6103 			/*
   6104 			 * Not enough free descriptors to transmit this
   6105 			 * packet.  We haven't committed anything yet,
   6106 			 * so just unload the DMA map, put the packet
   6107 			 * pack on the queue, and punt.  Notify the upper
   6108 			 * layer that there are no more slots left.
   6109 			 */
   6110 			DPRINTF(WM_DEBUG_TX,
   6111 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6112 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6113 			    segs_needed, txq->txq_free - 1));
   6114 			ifp->if_flags |= IFF_OACTIVE;
   6115 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6116 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
   6117 			break;
   6118 		}
   6119 
   6120 		/*
   6121 		 * Check for 82547 Tx FIFO bug.  We need to do this
   6122 		 * once we know we can transmit the packet, since we
   6123 		 * do some internal FIFO space accounting here.
   6124 		 */
   6125 		if (sc->sc_type == WM_T_82547 &&
   6126 		    wm_82547_txfifo_bugchk(sc, m0)) {
   6127 			DPRINTF(WM_DEBUG_TX,
   6128 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   6129 			    device_xname(sc->sc_dev)));
   6130 			ifp->if_flags |= IFF_OACTIVE;
   6131 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6132 			WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
   6133 			break;
   6134 		}
   6135 
   6136 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6137 
   6138 		DPRINTF(WM_DEBUG_TX,
   6139 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6140 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6141 
   6142 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
   6143 
   6144 		/*
   6145 		 * Store a pointer to the packet so that we can free it
   6146 		 * later.
   6147 		 *
   6148 		 * Initially, we consider the number of descriptors the
   6149 		 * packet uses the number of DMA segments.  This may be
   6150 		 * incremented by 1 if we do checksum offload (a descriptor
   6151 		 * is used to set the checksum context).
   6152 		 */
   6153 		txs->txs_mbuf = m0;
   6154 		txs->txs_firstdesc = txq->txq_next;
   6155 		txs->txs_ndesc = segs_needed;
   6156 
   6157 		/* Set up offload parameters for this packet. */
   6158 		if (m0->m_pkthdr.csum_flags &
   6159 		    (M_CSUM_TSOv4|M_CSUM_TSOv6|
   6160 		    M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
   6161 		    M_CSUM_TCPv6|M_CSUM_UDPv6)) {
   6162 			if (wm_tx_offload(sc, txs, &cksumcmd,
   6163 					  &cksumfields) != 0) {
   6164 				/* Error message already displayed. */
   6165 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   6166 				continue;
   6167 			}
   6168 		} else {
   6169 			cksumcmd = 0;
   6170 			cksumfields = 0;
   6171 		}
   6172 
   6173 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   6174 
   6175 		/* Sync the DMA map. */
   6176 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   6177 		    BUS_DMASYNC_PREWRITE);
   6178 
   6179 		/* Initialize the transmit descriptor. */
   6180 		for (nexttx = txq->txq_next, seg = 0;
   6181 		     seg < dmamap->dm_nsegs; seg++) {
   6182 			for (seglen = dmamap->dm_segs[seg].ds_len,
   6183 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   6184 			     seglen != 0;
   6185 			     curaddr += curlen, seglen -= curlen,
   6186 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   6187 				curlen = seglen;
   6188 
   6189 				/*
   6190 				 * So says the Linux driver:
   6191 				 * Work around for premature descriptor
   6192 				 * write-backs in TSO mode.  Append a
   6193 				 * 4-byte sentinel descriptor.
   6194 				 */
   6195 				if (use_tso &&
   6196 				    seg == dmamap->dm_nsegs - 1 &&
   6197 				    curlen > 8)
   6198 					curlen -= 4;
   6199 
   6200 				wm_set_dma_addr(
   6201 				    &txq->txq_descs[nexttx].wtx_addr,
   6202 				    curaddr);
   6203 				txq->txq_descs[nexttx].wtx_cmdlen =
   6204 				    htole32(cksumcmd | curlen);
   6205 				txq->txq_descs[nexttx].wtx_fields.wtxu_status =
   6206 				    0;
   6207 				txq->txq_descs[nexttx].wtx_fields.wtxu_options =
   6208 				    cksumfields;
   6209 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan = 0;
   6210 				lasttx = nexttx;
   6211 
   6212 				DPRINTF(WM_DEBUG_TX,
   6213 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   6214 				     "len %#04zx\n",
   6215 				    device_xname(sc->sc_dev), nexttx,
   6216 				    (uint64_t)curaddr, curlen));
   6217 			}
   6218 		}
   6219 
   6220 		KASSERT(lasttx != -1);
   6221 
   6222 		/*
   6223 		 * Set up the command byte on the last descriptor of
   6224 		 * the packet.  If we're in the interrupt delay window,
   6225 		 * delay the interrupt.
   6226 		 */
   6227 		txq->txq_descs[lasttx].wtx_cmdlen |=
   6228 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   6229 
   6230 		/*
   6231 		 * If VLANs are enabled and the packet has a VLAN tag, set
   6232 		 * up the descriptor to encapsulate the packet for us.
   6233 		 *
   6234 		 * This is only valid on the last descriptor of the packet.
   6235 		 */
   6236 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6237 			txq->txq_descs[lasttx].wtx_cmdlen |=
   6238 			    htole32(WTX_CMD_VLE);
   6239 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   6240 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   6241 		}
   6242 
   6243 		txs->txs_lastdesc = lasttx;
   6244 
   6245 		DPRINTF(WM_DEBUG_TX,
   6246 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   6247 		    device_xname(sc->sc_dev),
   6248 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   6249 
   6250 		/* Sync the descriptors we're using. */
   6251 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   6252 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   6253 
   6254 		/* Give the packet to the chip. */
   6255 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   6256 
   6257 		DPRINTF(WM_DEBUG_TX,
   6258 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   6259 
   6260 		DPRINTF(WM_DEBUG_TX,
   6261 		    ("%s: TX: finished transmitting packet, job %d\n",
   6262 		    device_xname(sc->sc_dev), txq->txq_snext));
   6263 
   6264 		/* Advance the tx pointer. */
   6265 		txq->txq_free -= txs->txs_ndesc;
   6266 		txq->txq_next = nexttx;
   6267 
   6268 		txq->txq_sfree--;
   6269 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   6270 
   6271 		/* Pass the packet to any BPF listeners. */
   6272 		bpf_mtap(ifp, m0);
   6273 	}
   6274 
   6275 	if (m0 != NULL) {
   6276 		ifp->if_flags |= IFF_OACTIVE;
   6277 		WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   6278 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
   6279 		m_freem(m0);
   6280 	}
   6281 
   6282 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   6283 		/* No more slots; notify upper layer. */
   6284 		ifp->if_flags |= IFF_OACTIVE;
   6285 	}
   6286 
   6287 	if (txq->txq_free != ofree) {
   6288 		/* Set a watchdog timer in case the chip flakes out. */
   6289 		ifp->if_timer = 5;
   6290 	}
   6291 }
   6292 
   6293 /*
   6294  * wm_nq_tx_offload:
   6295  *
   6296  *	Set up TCP/IP checksumming parameters for the
   6297  *	specified packet, for NEWQUEUE devices
   6298  */
   6299 static int
   6300 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
   6301     uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   6302 {
   6303 	struct wm_txqueue *txq = &sc->sc_txq[0];
   6304 	struct mbuf *m0 = txs->txs_mbuf;
   6305 	struct m_tag *mtag;
   6306 	uint32_t vl_len, mssidx, cmdc;
   6307 	struct ether_header *eh;
   6308 	int offset, iphl;
   6309 
   6310 	/*
   6311 	 * XXX It would be nice if the mbuf pkthdr had offset
   6312 	 * fields for the protocol headers.
   6313 	 */
   6314 	*cmdlenp = 0;
   6315 	*fieldsp = 0;
   6316 
   6317 	eh = mtod(m0, struct ether_header *);
   6318 	switch (htons(eh->ether_type)) {
   6319 	case ETHERTYPE_IP:
   6320 	case ETHERTYPE_IPV6:
   6321 		offset = ETHER_HDR_LEN;
   6322 		break;
   6323 
   6324 	case ETHERTYPE_VLAN:
   6325 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6326 		break;
   6327 
   6328 	default:
   6329 		/* Don't support this protocol or encapsulation. */
   6330 		*do_csum = false;
   6331 		return 0;
   6332 	}
   6333 	*do_csum = true;
   6334 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   6335 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   6336 
   6337 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   6338 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   6339 
   6340 	if ((m0->m_pkthdr.csum_flags &
   6341 	    (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) {
   6342 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6343 	} else {
   6344 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6345 	}
   6346 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   6347 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   6348 
   6349 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6350 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   6351 		     << NQTXC_VLLEN_VLAN_SHIFT);
   6352 		*cmdlenp |= NQTX_CMD_VLE;
   6353 	}
   6354 
   6355 	mssidx = 0;
   6356 
   6357 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6358 		int hlen = offset + iphl;
   6359 		int tcp_hlen;
   6360 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6361 
   6362 		if (__predict_false(m0->m_len <
   6363 				    (hlen + sizeof(struct tcphdr)))) {
   6364 			/*
   6365 			 * TCP/IP headers are not in the first mbuf; we need
   6366 			 * to do this the slow and painful way.  Let's just
   6367 			 * hope this doesn't happen very often.
   6368 			 */
   6369 			struct tcphdr th;
   6370 
   6371 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
   6372 
   6373 			m_copydata(m0, hlen, sizeof(th), &th);
   6374 			if (v4) {
   6375 				struct ip ip;
   6376 
   6377 				m_copydata(m0, offset, sizeof(ip), &ip);
   6378 				ip.ip_len = 0;
   6379 				m_copyback(m0,
   6380 				    offset + offsetof(struct ip, ip_len),
   6381 				    sizeof(ip.ip_len), &ip.ip_len);
   6382 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6383 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6384 			} else {
   6385 				struct ip6_hdr ip6;
   6386 
   6387 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6388 				ip6.ip6_plen = 0;
   6389 				m_copyback(m0,
   6390 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6391 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6392 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6393 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6394 			}
   6395 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6396 			    sizeof(th.th_sum), &th.th_sum);
   6397 
   6398 			tcp_hlen = th.th_off << 2;
   6399 		} else {
   6400 			/*
   6401 			 * TCP/IP headers are in the first mbuf; we can do
   6402 			 * this the easy way.
   6403 			 */
   6404 			struct tcphdr *th;
   6405 
   6406 			if (v4) {
   6407 				struct ip *ip =
   6408 				    (void *)(mtod(m0, char *) + offset);
   6409 				th = (void *)(mtod(m0, char *) + hlen);
   6410 
   6411 				ip->ip_len = 0;
   6412 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6413 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6414 			} else {
   6415 				struct ip6_hdr *ip6 =
   6416 				    (void *)(mtod(m0, char *) + offset);
   6417 				th = (void *)(mtod(m0, char *) + hlen);
   6418 
   6419 				ip6->ip6_plen = 0;
   6420 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6421 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6422 			}
   6423 			tcp_hlen = th->th_off << 2;
   6424 		}
   6425 		hlen += tcp_hlen;
   6426 		*cmdlenp |= NQTX_CMD_TSE;
   6427 
   6428 		if (v4) {
   6429 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
   6430 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   6431 		} else {
   6432 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
   6433 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   6434 		}
   6435 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   6436 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6437 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   6438 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   6439 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   6440 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   6441 	} else {
   6442 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   6443 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6444 	}
   6445 
   6446 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   6447 		*fieldsp |= NQTXD_FIELDS_IXSM;
   6448 		cmdc |= NQTXC_CMD_IP4;
   6449 	}
   6450 
   6451 	if (m0->m_pkthdr.csum_flags &
   6452 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6453 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
   6454 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6455 			cmdc |= NQTXC_CMD_TCP;
   6456 		} else {
   6457 			cmdc |= NQTXC_CMD_UDP;
   6458 		}
   6459 		cmdc |= NQTXC_CMD_IP4;
   6460 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6461 	}
   6462 	if (m0->m_pkthdr.csum_flags &
   6463 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6464 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
   6465 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6466 			cmdc |= NQTXC_CMD_TCP;
   6467 		} else {
   6468 			cmdc |= NQTXC_CMD_UDP;
   6469 		}
   6470 		cmdc |= NQTXC_CMD_IP6;
   6471 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6472 	}
   6473 
   6474 	/* Fill in the context descriptor. */
   6475 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   6476 	    htole32(vl_len);
   6477 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   6478 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   6479 	    htole32(cmdc);
   6480 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   6481 	    htole32(mssidx);
   6482 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6483 	DPRINTF(WM_DEBUG_TX,
   6484 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   6485 	    txq->txq_next, 0, vl_len));
   6486 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   6487 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6488 	txs->txs_ndesc++;
   6489 	return 0;
   6490 }
   6491 
   6492 /*
   6493  * wm_nq_start:		[ifnet interface function]
   6494  *
   6495  *	Start packet transmission on the interface for NEWQUEUE devices
   6496  */
   6497 static void
   6498 wm_nq_start(struct ifnet *ifp)
   6499 {
   6500 	struct wm_softc *sc = ifp->if_softc;
   6501 	struct wm_txqueue *txq = &sc->sc_txq[0];
   6502 
   6503 	WM_TX_LOCK(txq);
   6504 	if (!sc->sc_stopping)
   6505 		wm_nq_start_locked(ifp);
   6506 	WM_TX_UNLOCK(txq);
   6507 }
   6508 
   6509 static void
   6510 wm_nq_start_locked(struct ifnet *ifp)
   6511 {
   6512 	struct wm_softc *sc = ifp->if_softc;
   6513 	struct wm_txqueue *txq = &sc->sc_txq[0];
   6514 	struct mbuf *m0;
   6515 	struct m_tag *mtag;
   6516 	struct wm_txsoft *txs;
   6517 	bus_dmamap_t dmamap;
   6518 	int error, nexttx, lasttx = -1, seg, segs_needed;
   6519 	bool do_csum, sent;
   6520 
   6521 	KASSERT(WM_TX_LOCKED(txq));
   6522 
   6523 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
   6524 		return;
   6525 
   6526 	sent = false;
   6527 
   6528 	/*
   6529 	 * Loop through the send queue, setting up transmit descriptors
   6530 	 * until we drain the queue, or use up all available transmit
   6531 	 * descriptors.
   6532 	 */
   6533 	for (;;) {
   6534 		m0 = NULL;
   6535 
   6536 		/* Get a work queue entry. */
   6537 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6538 			wm_txeof(sc);
   6539 			if (txq->txq_sfree == 0) {
   6540 				DPRINTF(WM_DEBUG_TX,
   6541 				    ("%s: TX: no free job descriptors\n",
   6542 					device_xname(sc->sc_dev)));
   6543 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
   6544 				break;
   6545 			}
   6546 		}
   6547 
   6548 		/* Grab a packet off the queue. */
   6549 		IFQ_DEQUEUE(&ifp->if_snd, m0);
   6550 		if (m0 == NULL)
   6551 			break;
   6552 
   6553 		DPRINTF(WM_DEBUG_TX,
   6554 		    ("%s: TX: have packet to transmit: %p\n",
   6555 		    device_xname(sc->sc_dev), m0));
   6556 
   6557 		txs = &txq->txq_soft[txq->txq_snext];
   6558 		dmamap = txs->txs_dmamap;
   6559 
   6560 		/*
   6561 		 * Load the DMA map.  If this fails, the packet either
   6562 		 * didn't fit in the allotted number of segments, or we
   6563 		 * were short on resources.  For the too-many-segments
   6564 		 * case, we simply report an error and drop the packet,
   6565 		 * since we can't sanely copy a jumbo packet to a single
   6566 		 * buffer.
   6567 		 */
   6568 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6569 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
   6570 		if (error) {
   6571 			if (error == EFBIG) {
   6572 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   6573 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6574 				    "DMA segments, dropping...\n",
   6575 				    device_xname(sc->sc_dev));
   6576 				wm_dump_mbuf_chain(sc, m0);
   6577 				m_freem(m0);
   6578 				continue;
   6579 			}
   6580 			/* Short on resources, just stop for now. */
   6581 			DPRINTF(WM_DEBUG_TX,
   6582 			    ("%s: TX: dmamap load failed: %d\n",
   6583 			    device_xname(sc->sc_dev), error));
   6584 			break;
   6585 		}
   6586 
   6587 		segs_needed = dmamap->dm_nsegs;
   6588 
   6589 		/*
   6590 		 * Ensure we have enough descriptors free to describe
   6591 		 * the packet.  Note, we always reserve one descriptor
   6592 		 * at the end of the ring due to the semantics of the
   6593 		 * TDT register, plus one more in the event we need
   6594 		 * to load offload context.
   6595 		 */
   6596 		if (segs_needed > txq->txq_free - 2) {
   6597 			/*
   6598 			 * Not enough free descriptors to transmit this
   6599 			 * packet.  We haven't committed anything yet,
   6600 			 * so just unload the DMA map, put the packet
   6601 			 * pack on the queue, and punt.  Notify the upper
   6602 			 * layer that there are no more slots left.
   6603 			 */
   6604 			DPRINTF(WM_DEBUG_TX,
   6605 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6606 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6607 			    segs_needed, txq->txq_free - 1));
   6608 			ifp->if_flags |= IFF_OACTIVE;
   6609 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6610 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
   6611 			break;
   6612 		}
   6613 
   6614 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6615 
   6616 		DPRINTF(WM_DEBUG_TX,
   6617 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6618 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6619 
   6620 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
   6621 
   6622 		/*
   6623 		 * Store a pointer to the packet so that we can free it
   6624 		 * later.
   6625 		 *
   6626 		 * Initially, we consider the number of descriptors the
   6627 		 * packet uses the number of DMA segments.  This may be
   6628 		 * incremented by 1 if we do checksum offload (a descriptor
   6629 		 * is used to set the checksum context).
   6630 		 */
   6631 		txs->txs_mbuf = m0;
   6632 		txs->txs_firstdesc = txq->txq_next;
   6633 		txs->txs_ndesc = segs_needed;
   6634 
   6635 		/* Set up offload parameters for this packet. */
   6636 		uint32_t cmdlen, fields, dcmdlen;
   6637 		if (m0->m_pkthdr.csum_flags &
   6638 		    (M_CSUM_TSOv4|M_CSUM_TSOv6|
   6639 		    M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
   6640 		    M_CSUM_TCPv6|M_CSUM_UDPv6)) {
   6641 			if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
   6642 			    &do_csum) != 0) {
   6643 				/* Error message already displayed. */
   6644 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   6645 				continue;
   6646 			}
   6647 		} else {
   6648 			do_csum = false;
   6649 			cmdlen = 0;
   6650 			fields = 0;
   6651 		}
   6652 
   6653 		/* Sync the DMA map. */
   6654 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   6655 		    BUS_DMASYNC_PREWRITE);
   6656 
   6657 		/* Initialize the first transmit descriptor. */
   6658 		nexttx = txq->txq_next;
   6659 		if (!do_csum) {
   6660 			/* setup a legacy descriptor */
   6661 			wm_set_dma_addr(
   6662 			    &txq->txq_descs[nexttx].wtx_addr,
   6663 			    dmamap->dm_segs[0].ds_addr);
   6664 			txq->txq_descs[nexttx].wtx_cmdlen =
   6665 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   6666 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   6667 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   6668 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   6669 			    NULL) {
   6670 				txq->txq_descs[nexttx].wtx_cmdlen |=
   6671 				    htole32(WTX_CMD_VLE);
   6672 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   6673 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   6674 			} else {
   6675 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   6676 			}
   6677 			dcmdlen = 0;
   6678 		} else {
   6679 			/* setup an advanced data descriptor */
   6680 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   6681 			    htole64(dmamap->dm_segs[0].ds_addr);
   6682 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   6683 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   6684 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   6685 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   6686 			    htole32(fields);
   6687 			DPRINTF(WM_DEBUG_TX,
   6688 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   6689 			    device_xname(sc->sc_dev), nexttx,
   6690 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   6691 			DPRINTF(WM_DEBUG_TX,
   6692 			    ("\t 0x%08x%08x\n", fields,
   6693 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   6694 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   6695 		}
   6696 
   6697 		lasttx = nexttx;
   6698 		nexttx = WM_NEXTTX(txq, nexttx);
   6699 		/*
   6700 		 * fill in the next descriptors. legacy or adcanced format
   6701 		 * is the same here
   6702 		 */
   6703 		for (seg = 1; seg < dmamap->dm_nsegs;
   6704 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   6705 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   6706 			    htole64(dmamap->dm_segs[seg].ds_addr);
   6707 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   6708 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   6709 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   6710 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   6711 			lasttx = nexttx;
   6712 
   6713 			DPRINTF(WM_DEBUG_TX,
   6714 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   6715 			     "len %#04zx\n",
   6716 			    device_xname(sc->sc_dev), nexttx,
   6717 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   6718 			    dmamap->dm_segs[seg].ds_len));
   6719 		}
   6720 
   6721 		KASSERT(lasttx != -1);
   6722 
   6723 		/*
   6724 		 * Set up the command byte on the last descriptor of
   6725 		 * the packet.  If we're in the interrupt delay window,
   6726 		 * delay the interrupt.
   6727 		 */
   6728 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   6729 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   6730 		txq->txq_descs[lasttx].wtx_cmdlen |=
   6731 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   6732 
   6733 		txs->txs_lastdesc = lasttx;
   6734 
   6735 		DPRINTF(WM_DEBUG_TX,
   6736 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   6737 		    device_xname(sc->sc_dev),
   6738 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   6739 
   6740 		/* Sync the descriptors we're using. */
   6741 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   6742 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   6743 
   6744 		/* Give the packet to the chip. */
   6745 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   6746 		sent = true;
   6747 
   6748 		DPRINTF(WM_DEBUG_TX,
   6749 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   6750 
   6751 		DPRINTF(WM_DEBUG_TX,
   6752 		    ("%s: TX: finished transmitting packet, job %d\n",
   6753 		    device_xname(sc->sc_dev), txq->txq_snext));
   6754 
   6755 		/* Advance the tx pointer. */
   6756 		txq->txq_free -= txs->txs_ndesc;
   6757 		txq->txq_next = nexttx;
   6758 
   6759 		txq->txq_sfree--;
   6760 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   6761 
   6762 		/* Pass the packet to any BPF listeners. */
   6763 		bpf_mtap(ifp, m0);
   6764 	}
   6765 
   6766 	if (m0 != NULL) {
   6767 		ifp->if_flags |= IFF_OACTIVE;
   6768 		WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   6769 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
   6770 		m_freem(m0);
   6771 	}
   6772 
   6773 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   6774 		/* No more slots; notify upper layer. */
   6775 		ifp->if_flags |= IFF_OACTIVE;
   6776 	}
   6777 
   6778 	if (sent) {
   6779 		/* Set a watchdog timer in case the chip flakes out. */
   6780 		ifp->if_timer = 5;
   6781 	}
   6782 }
   6783 
   6784 /* Interrupt */
   6785 
   6786 /*
   6787  * wm_txeof:
   6788  *
   6789  *	Helper; handle transmit interrupts.
   6790  */
   6791 static int
   6792 wm_txeof(struct wm_softc *sc)
   6793 {
   6794 	struct wm_txqueue *txq = &sc->sc_txq[0];
   6795 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   6796 	struct wm_txsoft *txs;
   6797 	bool processed = false;
   6798 	int count = 0;
   6799 	int i;
   6800 	uint8_t status;
   6801 
   6802 	if (sc->sc_stopping)
   6803 		return 0;
   6804 
   6805 	ifp->if_flags &= ~IFF_OACTIVE;
   6806 
   6807 	/*
   6808 	 * Go through the Tx list and free mbufs for those
   6809 	 * frames which have been transmitted.
   6810 	 */
   6811 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   6812 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   6813 		txs = &txq->txq_soft[i];
   6814 
   6815 		DPRINTF(WM_DEBUG_TX,
   6816 		    ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
   6817 
   6818 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   6819 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   6820 
   6821 		status =
   6822 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   6823 		if ((status & WTX_ST_DD) == 0) {
   6824 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   6825 			    BUS_DMASYNC_PREREAD);
   6826 			break;
   6827 		}
   6828 
   6829 		processed = true;
   6830 		count++;
   6831 		DPRINTF(WM_DEBUG_TX,
   6832 		    ("%s: TX: job %d done: descs %d..%d\n",
   6833 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   6834 		    txs->txs_lastdesc));
   6835 
   6836 		/*
   6837 		 * XXX We should probably be using the statistics
   6838 		 * XXX registers, but I don't know if they exist
   6839 		 * XXX on chips before the i82544.
   6840 		 */
   6841 
   6842 #ifdef WM_EVENT_COUNTERS
   6843 		if (status & WTX_ST_TU)
   6844 			WM_EVCNT_INCR(&sc->sc_ev_tu);
   6845 #endif /* WM_EVENT_COUNTERS */
   6846 
   6847 		if (status & (WTX_ST_EC|WTX_ST_LC)) {
   6848 			ifp->if_oerrors++;
   6849 			if (status & WTX_ST_LC)
   6850 				log(LOG_WARNING, "%s: late collision\n",
   6851 				    device_xname(sc->sc_dev));
   6852 			else if (status & WTX_ST_EC) {
   6853 				ifp->if_collisions += 16;
   6854 				log(LOG_WARNING, "%s: excessive collisions\n",
   6855 				    device_xname(sc->sc_dev));
   6856 			}
   6857 		} else
   6858 			ifp->if_opackets++;
   6859 
   6860 		txq->txq_free += txs->txs_ndesc;
   6861 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   6862 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   6863 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   6864 		m_freem(txs->txs_mbuf);
   6865 		txs->txs_mbuf = NULL;
   6866 	}
   6867 
   6868 	/* Update the dirty transmit buffer pointer. */
   6869 	txq->txq_sdirty = i;
   6870 	DPRINTF(WM_DEBUG_TX,
   6871 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   6872 
   6873 	if (count != 0)
   6874 		rnd_add_uint32(&sc->rnd_source, count);
   6875 
   6876 	/*
   6877 	 * If there are no more pending transmissions, cancel the watchdog
   6878 	 * timer.
   6879 	 */
   6880 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   6881 		ifp->if_timer = 0;
   6882 
   6883 	return processed;
   6884 }
   6885 
   6886 /*
   6887  * wm_rxeof:
   6888  *
   6889  *	Helper; handle receive interrupts.
   6890  */
   6891 static void
   6892 wm_rxeof(struct wm_rxqueue *rxq)
   6893 {
   6894 	struct wm_softc *sc = rxq->rxq_sc;
   6895 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   6896 	struct wm_rxsoft *rxs;
   6897 	struct mbuf *m;
   6898 	int i, len;
   6899 	int count = 0;
   6900 	uint8_t status, errors;
   6901 	uint16_t vlantag;
   6902 
   6903 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   6904 		rxs = &rxq->rxq_soft[i];
   6905 
   6906 		DPRINTF(WM_DEBUG_RX,
   6907 		    ("%s: RX: checking descriptor %d\n",
   6908 		    device_xname(sc->sc_dev), i));
   6909 
   6910 		wm_cdrxsync(rxq, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   6911 
   6912 		status = rxq->rxq_descs[i].wrx_status;
   6913 		errors = rxq->rxq_descs[i].wrx_errors;
   6914 		len = le16toh(rxq->rxq_descs[i].wrx_len);
   6915 		vlantag = rxq->rxq_descs[i].wrx_special;
   6916 
   6917 		if ((status & WRX_ST_DD) == 0) {
   6918 			/* We have processed all of the receive descriptors. */
   6919 			wm_cdrxsync(rxq, i, BUS_DMASYNC_PREREAD);
   6920 			break;
   6921 		}
   6922 
   6923 		count++;
   6924 		if (__predict_false(rxq->rxq_discard)) {
   6925 			DPRINTF(WM_DEBUG_RX,
   6926 			    ("%s: RX: discarding contents of descriptor %d\n",
   6927 			    device_xname(sc->sc_dev), i));
   6928 			wm_init_rxdesc(rxq, i);
   6929 			if (status & WRX_ST_EOP) {
   6930 				/* Reset our state. */
   6931 				DPRINTF(WM_DEBUG_RX,
   6932 				    ("%s: RX: resetting rxdiscard -> 0\n",
   6933 				    device_xname(sc->sc_dev)));
   6934 				rxq->rxq_discard = 0;
   6935 			}
   6936 			continue;
   6937 		}
   6938 
   6939 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   6940 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   6941 
   6942 		m = rxs->rxs_mbuf;
   6943 
   6944 		/*
   6945 		 * Add a new receive buffer to the ring, unless of
   6946 		 * course the length is zero. Treat the latter as a
   6947 		 * failed mapping.
   6948 		 */
   6949 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   6950 			/*
   6951 			 * Failed, throw away what we've done so
   6952 			 * far, and discard the rest of the packet.
   6953 			 */
   6954 			ifp->if_ierrors++;
   6955 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   6956 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   6957 			wm_init_rxdesc(rxq, i);
   6958 			if ((status & WRX_ST_EOP) == 0)
   6959 				rxq->rxq_discard = 1;
   6960 			if (rxq->rxq_head != NULL)
   6961 				m_freem(rxq->rxq_head);
   6962 			WM_RXCHAIN_RESET(rxq);
   6963 			DPRINTF(WM_DEBUG_RX,
   6964 			    ("%s: RX: Rx buffer allocation failed, "
   6965 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   6966 			    rxq->rxq_discard ? " (discard)" : ""));
   6967 			continue;
   6968 		}
   6969 
   6970 		m->m_len = len;
   6971 		rxq->rxq_len += len;
   6972 		DPRINTF(WM_DEBUG_RX,
   6973 		    ("%s: RX: buffer at %p len %d\n",
   6974 		    device_xname(sc->sc_dev), m->m_data, len));
   6975 
   6976 		/* If this is not the end of the packet, keep looking. */
   6977 		if ((status & WRX_ST_EOP) == 0) {
   6978 			WM_RXCHAIN_LINK(rxq, m);
   6979 			DPRINTF(WM_DEBUG_RX,
   6980 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   6981 			    device_xname(sc->sc_dev), rxq->rxq_len));
   6982 			continue;
   6983 		}
   6984 
   6985 		/*
   6986 		 * Okay, we have the entire packet now.  The chip is
   6987 		 * configured to include the FCS except I350 and I21[01]
   6988 		 * (not all chips can be configured to strip it),
   6989 		 * so we need to trim it.
   6990 		 * May need to adjust length of previous mbuf in the
   6991 		 * chain if the current mbuf is too short.
   6992 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   6993 		 * is always set in I350, so we don't trim it.
   6994 		 */
   6995 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   6996 		    && (sc->sc_type != WM_T_I210)
   6997 		    && (sc->sc_type != WM_T_I211)) {
   6998 			if (m->m_len < ETHER_CRC_LEN) {
   6999 				rxq->rxq_tail->m_len
   7000 				    -= (ETHER_CRC_LEN - m->m_len);
   7001 				m->m_len = 0;
   7002 			} else
   7003 				m->m_len -= ETHER_CRC_LEN;
   7004 			len = rxq->rxq_len - ETHER_CRC_LEN;
   7005 		} else
   7006 			len = rxq->rxq_len;
   7007 
   7008 		WM_RXCHAIN_LINK(rxq, m);
   7009 
   7010 		*rxq->rxq_tailp = NULL;
   7011 		m = rxq->rxq_head;
   7012 
   7013 		WM_RXCHAIN_RESET(rxq);
   7014 
   7015 		DPRINTF(WM_DEBUG_RX,
   7016 		    ("%s: RX: have entire packet, len -> %d\n",
   7017 		    device_xname(sc->sc_dev), len));
   7018 
   7019 		/* If an error occurred, update stats and drop the packet. */
   7020 		if (errors &
   7021 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
   7022 			if (errors & WRX_ER_SE)
   7023 				log(LOG_WARNING, "%s: symbol error\n",
   7024 				    device_xname(sc->sc_dev));
   7025 			else if (errors & WRX_ER_SEQ)
   7026 				log(LOG_WARNING, "%s: receive sequence error\n",
   7027 				    device_xname(sc->sc_dev));
   7028 			else if (errors & WRX_ER_CE)
   7029 				log(LOG_WARNING, "%s: CRC error\n",
   7030 				    device_xname(sc->sc_dev));
   7031 			m_freem(m);
   7032 			continue;
   7033 		}
   7034 
   7035 		/* No errors.  Receive the packet. */
   7036 		m->m_pkthdr.rcvif = ifp;
   7037 		m->m_pkthdr.len = len;
   7038 
   7039 		/*
   7040 		 * If VLANs are enabled, VLAN packets have been unwrapped
   7041 		 * for us.  Associate the tag with the packet.
   7042 		 */
   7043 		/* XXXX should check for i350 and i354 */
   7044 		if ((status & WRX_ST_VP) != 0) {
   7045 			VLAN_INPUT_TAG(ifp, m,
   7046 			    le16toh(vlantag),
   7047 			    continue);
   7048 		}
   7049 
   7050 		/* Set up checksum info for this packet. */
   7051 		if ((status & WRX_ST_IXSM) == 0) {
   7052 			if (status & WRX_ST_IPCS) {
   7053 				WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
   7054 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   7055 				if (errors & WRX_ER_IPE)
   7056 					m->m_pkthdr.csum_flags |=
   7057 					    M_CSUM_IPv4_BAD;
   7058 			}
   7059 			if (status & WRX_ST_TCPCS) {
   7060 				/*
   7061 				 * Note: we don't know if this was TCP or UDP,
   7062 				 * so we just set both bits, and expect the
   7063 				 * upper layers to deal.
   7064 				 */
   7065 				WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
   7066 				m->m_pkthdr.csum_flags |=
   7067 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7068 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   7069 				if (errors & WRX_ER_TCPE)
   7070 					m->m_pkthdr.csum_flags |=
   7071 					    M_CSUM_TCP_UDP_BAD;
   7072 			}
   7073 		}
   7074 
   7075 		ifp->if_ipackets++;
   7076 
   7077 		WM_RX_UNLOCK(rxq);
   7078 
   7079 		/* Pass this up to any BPF listeners. */
   7080 		bpf_mtap(ifp, m);
   7081 
   7082 		/* Pass it on. */
   7083 		(*ifp->if_input)(ifp, m);
   7084 
   7085 		WM_RX_LOCK(rxq);
   7086 
   7087 		if (sc->sc_stopping)
   7088 			break;
   7089 	}
   7090 
   7091 	/* Update the receive pointer. */
   7092 	rxq->rxq_ptr = i;
   7093 	if (count != 0)
   7094 		rnd_add_uint32(&sc->rnd_source, count);
   7095 
   7096 	DPRINTF(WM_DEBUG_RX,
   7097 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   7098 }
   7099 
   7100 /*
   7101  * wm_linkintr_gmii:
   7102  *
   7103  *	Helper; handle link interrupts for GMII.
   7104  */
   7105 static void
   7106 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   7107 {
   7108 
   7109 	KASSERT(WM_CORE_LOCKED(sc));
   7110 
   7111 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7112 		__func__));
   7113 
   7114 	if (icr & ICR_LSC) {
   7115 		DPRINTF(WM_DEBUG_LINK,
   7116 		    ("%s: LINK: LSC -> mii_pollstat\n",
   7117 			device_xname(sc->sc_dev)));
   7118 		mii_pollstat(&sc->sc_mii);
   7119 		if (sc->sc_type == WM_T_82543) {
   7120 			int miistatus, active;
   7121 
   7122 			/*
   7123 			 * With 82543, we need to force speed and
   7124 			 * duplex on the MAC equal to what the PHY
   7125 			 * speed and duplex configuration is.
   7126 			 */
   7127 			miistatus = sc->sc_mii.mii_media_status;
   7128 
   7129 			if (miistatus & IFM_ACTIVE) {
   7130 				active = sc->sc_mii.mii_media_active;
   7131 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   7132 				switch (IFM_SUBTYPE(active)) {
   7133 				case IFM_10_T:
   7134 					sc->sc_ctrl |= CTRL_SPEED_10;
   7135 					break;
   7136 				case IFM_100_TX:
   7137 					sc->sc_ctrl |= CTRL_SPEED_100;
   7138 					break;
   7139 				case IFM_1000_T:
   7140 					sc->sc_ctrl |= CTRL_SPEED_1000;
   7141 					break;
   7142 				default:
   7143 					/*
   7144 					 * fiber?
   7145 					 * Shoud not enter here.
   7146 					 */
   7147 					printf("unknown media (%x)\n",
   7148 					    active);
   7149 					break;
   7150 				}
   7151 				if (active & IFM_FDX)
   7152 					sc->sc_ctrl |= CTRL_FD;
   7153 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7154 			}
   7155 		} else if ((sc->sc_type == WM_T_ICH8)
   7156 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   7157 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   7158 		} else if (sc->sc_type == WM_T_PCH) {
   7159 			wm_k1_gig_workaround_hv(sc,
   7160 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   7161 		}
   7162 
   7163 		if ((sc->sc_phytype == WMPHY_82578)
   7164 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   7165 			== IFM_1000_T)) {
   7166 
   7167 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   7168 				delay(200*1000); /* XXX too big */
   7169 
   7170 				/* Link stall fix for link up */
   7171 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   7172 				    HV_MUX_DATA_CTRL,
   7173 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   7174 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   7175 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   7176 				    HV_MUX_DATA_CTRL,
   7177 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   7178 			}
   7179 		}
   7180 	} else if (icr & ICR_RXSEQ) {
   7181 		DPRINTF(WM_DEBUG_LINK,
   7182 		    ("%s: LINK Receive sequence error\n",
   7183 			device_xname(sc->sc_dev)));
   7184 	}
   7185 }
   7186 
   7187 /*
   7188  * wm_linkintr_tbi:
   7189  *
   7190  *	Helper; handle link interrupts for TBI mode.
   7191  */
   7192 static void
   7193 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   7194 {
   7195 	uint32_t status;
   7196 
   7197 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7198 		__func__));
   7199 
   7200 	status = CSR_READ(sc, WMREG_STATUS);
   7201 	if (icr & ICR_LSC) {
   7202 		if (status & STATUS_LU) {
   7203 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   7204 			    device_xname(sc->sc_dev),
   7205 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   7206 			/*
   7207 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   7208 			 * so we should update sc->sc_ctrl
   7209 			 */
   7210 
   7211 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   7212 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   7213 			sc->sc_fcrtl &= ~FCRTL_XONE;
   7214 			if (status & STATUS_FD)
   7215 				sc->sc_tctl |=
   7216 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   7217 			else
   7218 				sc->sc_tctl |=
   7219 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   7220 			if (sc->sc_ctrl & CTRL_TFCE)
   7221 				sc->sc_fcrtl |= FCRTL_XONE;
   7222 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   7223 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   7224 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   7225 				      sc->sc_fcrtl);
   7226 			sc->sc_tbi_linkup = 1;
   7227 		} else {
   7228 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   7229 			    device_xname(sc->sc_dev)));
   7230 			sc->sc_tbi_linkup = 0;
   7231 		}
   7232 		/* Update LED */
   7233 		wm_tbi_serdes_set_linkled(sc);
   7234 	} else if (icr & ICR_RXSEQ) {
   7235 		DPRINTF(WM_DEBUG_LINK,
   7236 		    ("%s: LINK: Receive sequence error\n",
   7237 		    device_xname(sc->sc_dev)));
   7238 	}
   7239 }
   7240 
   7241 /*
   7242  * wm_linkintr_serdes:
   7243  *
   7244  *	Helper; handle link interrupts for TBI mode.
   7245  */
   7246 static void
   7247 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   7248 {
   7249 	struct mii_data *mii = &sc->sc_mii;
   7250 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   7251 	uint32_t pcs_adv, pcs_lpab, reg;
   7252 
   7253 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7254 		__func__));
   7255 
   7256 	if (icr & ICR_LSC) {
   7257 		/* Check PCS */
   7258 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   7259 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   7260 			mii->mii_media_status |= IFM_ACTIVE;
   7261 			sc->sc_tbi_linkup = 1;
   7262 		} else {
   7263 			mii->mii_media_status |= IFM_NONE;
   7264 			sc->sc_tbi_linkup = 0;
   7265 			wm_tbi_serdes_set_linkled(sc);
   7266 			return;
   7267 		}
   7268 		mii->mii_media_active |= IFM_1000_SX;
   7269 		if ((reg & PCS_LSTS_FDX) != 0)
   7270 			mii->mii_media_active |= IFM_FDX;
   7271 		else
   7272 			mii->mii_media_active |= IFM_HDX;
   7273 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   7274 			/* Check flow */
   7275 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   7276 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   7277 				DPRINTF(WM_DEBUG_LINK,
   7278 				    ("XXX LINKOK but not ACOMP\n"));
   7279 				return;
   7280 			}
   7281 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   7282 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   7283 			DPRINTF(WM_DEBUG_LINK,
   7284 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   7285 			if ((pcs_adv & TXCW_SYM_PAUSE)
   7286 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   7287 				mii->mii_media_active |= IFM_FLOW
   7288 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   7289 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   7290 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   7291 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   7292 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   7293 				mii->mii_media_active |= IFM_FLOW
   7294 				    | IFM_ETH_TXPAUSE;
   7295 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   7296 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   7297 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   7298 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   7299 				mii->mii_media_active |= IFM_FLOW
   7300 				    | IFM_ETH_RXPAUSE;
   7301 		}
   7302 		/* Update LED */
   7303 		wm_tbi_serdes_set_linkled(sc);
   7304 	} else {
   7305 		DPRINTF(WM_DEBUG_LINK,
   7306 		    ("%s: LINK: Receive sequence error\n",
   7307 		    device_xname(sc->sc_dev)));
   7308 	}
   7309 }
   7310 
   7311 /*
   7312  * wm_linkintr:
   7313  *
   7314  *	Helper; handle link interrupts.
   7315  */
   7316 static void
   7317 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   7318 {
   7319 
   7320 	KASSERT(WM_CORE_LOCKED(sc));
   7321 
   7322 	if (sc->sc_flags & WM_F_HAS_MII)
   7323 		wm_linkintr_gmii(sc, icr);
   7324 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   7325 	    && (sc->sc_type >= WM_T_82575))
   7326 		wm_linkintr_serdes(sc, icr);
   7327 	else
   7328 		wm_linkintr_tbi(sc, icr);
   7329 }
   7330 
   7331 /*
   7332  * wm_intr_legacy:
   7333  *
   7334  *	Interrupt service routine for INTx and MSI.
   7335  */
   7336 static int
   7337 wm_intr_legacy(void *arg)
   7338 {
   7339 	struct wm_softc *sc = arg;
   7340 	struct wm_txqueue *txq = &sc->sc_txq[0];
   7341 	struct wm_rxqueue *rxq = &sc->sc_rxq[0];
   7342 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7343 	uint32_t icr, rndval = 0;
   7344 	int handled = 0;
   7345 
   7346 	DPRINTF(WM_DEBUG_TX,
   7347 	    ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
   7348 	while (1 /* CONSTCOND */) {
   7349 		icr = CSR_READ(sc, WMREG_ICR);
   7350 		if ((icr & sc->sc_icr) == 0)
   7351 			break;
   7352 		if (rndval == 0)
   7353 			rndval = icr;
   7354 
   7355 		WM_RX_LOCK(rxq);
   7356 
   7357 		if (sc->sc_stopping) {
   7358 			WM_RX_UNLOCK(rxq);
   7359 			break;
   7360 		}
   7361 
   7362 		handled = 1;
   7363 
   7364 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   7365 		if (icr & (ICR_RXDMT0|ICR_RXT0)) {
   7366 			DPRINTF(WM_DEBUG_RX,
   7367 			    ("%s: RX: got Rx intr 0x%08x\n",
   7368 			    device_xname(sc->sc_dev),
   7369 			    icr & (ICR_RXDMT0|ICR_RXT0)));
   7370 			WM_EVCNT_INCR(&sc->sc_ev_rxintr);
   7371 		}
   7372 #endif
   7373 		wm_rxeof(rxq);
   7374 
   7375 		WM_RX_UNLOCK(rxq);
   7376 		WM_TX_LOCK(txq);
   7377 
   7378 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   7379 		if (icr & ICR_TXDW) {
   7380 			DPRINTF(WM_DEBUG_TX,
   7381 			    ("%s: TX: got TXDW interrupt\n",
   7382 			    device_xname(sc->sc_dev)));
   7383 			WM_EVCNT_INCR(&sc->sc_ev_txdw);
   7384 		}
   7385 #endif
   7386 		wm_txeof(sc);
   7387 
   7388 		WM_TX_UNLOCK(txq);
   7389 		WM_CORE_LOCK(sc);
   7390 
   7391 		if (icr & (ICR_LSC|ICR_RXSEQ)) {
   7392 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   7393 			wm_linkintr(sc, icr);
   7394 		}
   7395 
   7396 		WM_CORE_UNLOCK(sc);
   7397 
   7398 		if (icr & ICR_RXO) {
   7399 #if defined(WM_DEBUG)
   7400 			log(LOG_WARNING, "%s: Receive overrun\n",
   7401 			    device_xname(sc->sc_dev));
   7402 #endif /* defined(WM_DEBUG) */
   7403 		}
   7404 	}
   7405 
   7406 	rnd_add_uint32(&sc->rnd_source, rndval);
   7407 
   7408 	if (handled) {
   7409 		/* Try to get more packets going. */
   7410 		ifp->if_start(ifp);
   7411 	}
   7412 
   7413 	return handled;
   7414 }
   7415 
   7416 /*
   7417  * wm_txintr_msix:
   7418  *
   7419  *	Interrupt service routine for TX complete interrupt for MSI-X.
   7420  */
   7421 static int
   7422 wm_txintr_msix(void *arg)
   7423 {
   7424 	struct wm_txqueue *txq = arg;
   7425 	struct wm_softc *sc = txq->txq_sc;
   7426 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7427 	int handled = 0;
   7428 
   7429 	DPRINTF(WM_DEBUG_TX,
   7430 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   7431 
   7432 	if (sc->sc_type == WM_T_82574)
   7433 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(txq->txq_id)); /* 82574 only */
   7434 	else if (sc->sc_type == WM_T_82575)
   7435 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(txq->txq_id));
   7436 	else
   7437 		CSR_WRITE(sc, WMREG_EIMC, 1 << txq->txq_intr_idx);
   7438 
   7439 	WM_TX_LOCK(txq);
   7440 
   7441 	if (sc->sc_stopping)
   7442 		goto out;
   7443 
   7444 	WM_EVCNT_INCR(&sc->sc_ev_txdw);
   7445 	handled = wm_txeof(sc);
   7446 
   7447 out:
   7448 	WM_TX_UNLOCK(txq);
   7449 
   7450 	if (sc->sc_type == WM_T_82574)
   7451 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(txq->txq_id)); /* 82574 only */
   7452 	else if (sc->sc_type == WM_T_82575)
   7453 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(txq->txq_id));
   7454 	else
   7455 		CSR_WRITE(sc, WMREG_EIMS, 1 << txq->txq_intr_idx);
   7456 
   7457 	if (handled) {
   7458 		/* Try to get more packets going. */
   7459 		ifp->if_start(ifp);
   7460 	}
   7461 
   7462 	return handled;
   7463 }
   7464 
   7465 /*
   7466  * wm_rxintr_msix:
   7467  *
   7468  *	Interrupt service routine for RX interrupt for MSI-X.
   7469  */
   7470 static int
   7471 wm_rxintr_msix(void *arg)
   7472 {
   7473 	struct wm_rxqueue *rxq = arg;
   7474 	struct wm_softc *sc = rxq->rxq_sc;
   7475 
   7476 	DPRINTF(WM_DEBUG_RX,
   7477 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   7478 
   7479 	if (sc->sc_type == WM_T_82574)
   7480 		CSR_WRITE(sc, WMREG_IMC, ICR_RXQ(rxq->rxq_id)); /* 82574 only */
   7481 	else if (sc->sc_type == WM_T_82575)
   7482 		CSR_WRITE(sc, WMREG_EIMC, EITR_RX_QUEUE(rxq->rxq_id));
   7483 	else
   7484 		CSR_WRITE(sc, WMREG_EIMC, 1 << rxq->rxq_intr_idx);
   7485 
   7486 	WM_RX_LOCK(rxq);
   7487 
   7488 	if (sc->sc_stopping)
   7489 		goto out;
   7490 
   7491 	WM_EVCNT_INCR(&sc->sc_ev_rxintr);
   7492 	wm_rxeof(rxq);
   7493 
   7494 out:
   7495 	WM_RX_UNLOCK(rxq);
   7496 
   7497 	if (sc->sc_type == WM_T_82574)
   7498 		CSR_WRITE(sc, WMREG_IMS, ICR_RXQ(rxq->rxq_id));
   7499 	else if (sc->sc_type == WM_T_82575)
   7500 		CSR_WRITE(sc, WMREG_EIMS, EITR_RX_QUEUE(rxq->rxq_id));
   7501 	else
   7502 		CSR_WRITE(sc, WMREG_EIMS, 1 << rxq->rxq_intr_idx);
   7503 
   7504 	return 1;
   7505 }
   7506 
   7507 /*
   7508  * wm_linkintr_msix:
   7509  *
   7510  *	Interrupt service routine for link status change for MSI-X.
   7511  */
   7512 static int
   7513 wm_linkintr_msix(void *arg)
   7514 {
   7515 	struct wm_softc *sc = arg;
   7516 	uint32_t reg;
   7517 
   7518 	DPRINTF(WM_DEBUG_LINK,
   7519 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   7520 
   7521 	reg = CSR_READ(sc, WMREG_ICR);
   7522 	WM_CORE_LOCK(sc);
   7523 	if ((sc->sc_stopping) || ((reg & ICR_LSC) == 0))
   7524 		goto out;
   7525 
   7526 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   7527 	wm_linkintr(sc, ICR_LSC);
   7528 
   7529 out:
   7530 	WM_CORE_UNLOCK(sc);
   7531 
   7532 	if (sc->sc_type == WM_T_82574)
   7533 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC); /* 82574 only */
   7534 	else if (sc->sc_type == WM_T_82575)
   7535 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   7536 	else
   7537 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   7538 
   7539 	return 1;
   7540 }
   7541 
   7542 /*
   7543  * Media related.
   7544  * GMII, SGMII, TBI (and SERDES)
   7545  */
   7546 
   7547 /* Common */
   7548 
   7549 /*
   7550  * wm_tbi_serdes_set_linkled:
   7551  *
   7552  *	Update the link LED on TBI and SERDES devices.
   7553  */
   7554 static void
   7555 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   7556 {
   7557 
   7558 	if (sc->sc_tbi_linkup)
   7559 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   7560 	else
   7561 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   7562 
   7563 	/* 82540 or newer devices are active low */
   7564 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   7565 
   7566 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7567 }
   7568 
   7569 /* GMII related */
   7570 
   7571 /*
   7572  * wm_gmii_reset:
   7573  *
   7574  *	Reset the PHY.
   7575  */
   7576 static void
   7577 wm_gmii_reset(struct wm_softc *sc)
   7578 {
   7579 	uint32_t reg;
   7580 	int rv;
   7581 
   7582 	/* get phy semaphore */
   7583 	switch (sc->sc_type) {
   7584 	case WM_T_82571:
   7585 	case WM_T_82572:
   7586 	case WM_T_82573:
   7587 	case WM_T_82574:
   7588 	case WM_T_82583:
   7589 		 /* XXX should get sw semaphore, too */
   7590 		rv = wm_get_swsm_semaphore(sc);
   7591 		break;
   7592 	case WM_T_82575:
   7593 	case WM_T_82576:
   7594 	case WM_T_82580:
   7595 	case WM_T_I350:
   7596 	case WM_T_I354:
   7597 	case WM_T_I210:
   7598 	case WM_T_I211:
   7599 	case WM_T_80003:
   7600 		rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   7601 		break;
   7602 	case WM_T_ICH8:
   7603 	case WM_T_ICH9:
   7604 	case WM_T_ICH10:
   7605 	case WM_T_PCH:
   7606 	case WM_T_PCH2:
   7607 	case WM_T_PCH_LPT:
   7608 		rv = wm_get_swfwhw_semaphore(sc);
   7609 		break;
   7610 	default:
   7611 		/* nothing to do*/
   7612 		rv = 0;
   7613 		break;
   7614 	}
   7615 	if (rv != 0) {
   7616 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7617 		    __func__);
   7618 		return;
   7619 	}
   7620 
   7621 	switch (sc->sc_type) {
   7622 	case WM_T_82542_2_0:
   7623 	case WM_T_82542_2_1:
   7624 		/* null */
   7625 		break;
   7626 	case WM_T_82543:
   7627 		/*
   7628 		 * With 82543, we need to force speed and duplex on the MAC
   7629 		 * equal to what the PHY speed and duplex configuration is.
   7630 		 * In addition, we need to perform a hardware reset on the PHY
   7631 		 * to take it out of reset.
   7632 		 */
   7633 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   7634 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7635 
   7636 		/* The PHY reset pin is active-low. */
   7637 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   7638 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   7639 		    CTRL_EXT_SWDPIN(4));
   7640 		reg |= CTRL_EXT_SWDPIO(4);
   7641 
   7642 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   7643 		CSR_WRITE_FLUSH(sc);
   7644 		delay(10*1000);
   7645 
   7646 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   7647 		CSR_WRITE_FLUSH(sc);
   7648 		delay(150);
   7649 #if 0
   7650 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   7651 #endif
   7652 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   7653 		break;
   7654 	case WM_T_82544:	/* reset 10000us */
   7655 	case WM_T_82540:
   7656 	case WM_T_82545:
   7657 	case WM_T_82545_3:
   7658 	case WM_T_82546:
   7659 	case WM_T_82546_3:
   7660 	case WM_T_82541:
   7661 	case WM_T_82541_2:
   7662 	case WM_T_82547:
   7663 	case WM_T_82547_2:
   7664 	case WM_T_82571:	/* reset 100us */
   7665 	case WM_T_82572:
   7666 	case WM_T_82573:
   7667 	case WM_T_82574:
   7668 	case WM_T_82575:
   7669 	case WM_T_82576:
   7670 	case WM_T_82580:
   7671 	case WM_T_I350:
   7672 	case WM_T_I354:
   7673 	case WM_T_I210:
   7674 	case WM_T_I211:
   7675 	case WM_T_82583:
   7676 	case WM_T_80003:
   7677 		/* generic reset */
   7678 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   7679 		CSR_WRITE_FLUSH(sc);
   7680 		delay(20000);
   7681 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7682 		CSR_WRITE_FLUSH(sc);
   7683 		delay(20000);
   7684 
   7685 		if ((sc->sc_type == WM_T_82541)
   7686 		    || (sc->sc_type == WM_T_82541_2)
   7687 		    || (sc->sc_type == WM_T_82547)
   7688 		    || (sc->sc_type == WM_T_82547_2)) {
   7689 			/* workaround for igp are done in igp_reset() */
   7690 			/* XXX add code to set LED after phy reset */
   7691 		}
   7692 		break;
   7693 	case WM_T_ICH8:
   7694 	case WM_T_ICH9:
   7695 	case WM_T_ICH10:
   7696 	case WM_T_PCH:
   7697 	case WM_T_PCH2:
   7698 	case WM_T_PCH_LPT:
   7699 		/* generic reset */
   7700 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   7701 		CSR_WRITE_FLUSH(sc);
   7702 		delay(100);
   7703 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7704 		CSR_WRITE_FLUSH(sc);
   7705 		delay(150);
   7706 		break;
   7707 	default:
   7708 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   7709 		    __func__);
   7710 		break;
   7711 	}
   7712 
   7713 	/* release PHY semaphore */
   7714 	switch (sc->sc_type) {
   7715 	case WM_T_82571:
   7716 	case WM_T_82572:
   7717 	case WM_T_82573:
   7718 	case WM_T_82574:
   7719 	case WM_T_82583:
   7720 		 /* XXX should put sw semaphore, too */
   7721 		wm_put_swsm_semaphore(sc);
   7722 		break;
   7723 	case WM_T_82575:
   7724 	case WM_T_82576:
   7725 	case WM_T_82580:
   7726 	case WM_T_I350:
   7727 	case WM_T_I354:
   7728 	case WM_T_I210:
   7729 	case WM_T_I211:
   7730 	case WM_T_80003:
   7731 		wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   7732 		break;
   7733 	case WM_T_ICH8:
   7734 	case WM_T_ICH9:
   7735 	case WM_T_ICH10:
   7736 	case WM_T_PCH:
   7737 	case WM_T_PCH2:
   7738 	case WM_T_PCH_LPT:
   7739 		wm_put_swfwhw_semaphore(sc);
   7740 		break;
   7741 	default:
   7742 		/* nothing to do*/
   7743 		rv = 0;
   7744 		break;
   7745 	}
   7746 
   7747 	/* get_cfg_done */
   7748 	wm_get_cfg_done(sc);
   7749 
   7750 	/* extra setup */
   7751 	switch (sc->sc_type) {
   7752 	case WM_T_82542_2_0:
   7753 	case WM_T_82542_2_1:
   7754 	case WM_T_82543:
   7755 	case WM_T_82544:
   7756 	case WM_T_82540:
   7757 	case WM_T_82545:
   7758 	case WM_T_82545_3:
   7759 	case WM_T_82546:
   7760 	case WM_T_82546_3:
   7761 	case WM_T_82541_2:
   7762 	case WM_T_82547_2:
   7763 	case WM_T_82571:
   7764 	case WM_T_82572:
   7765 	case WM_T_82573:
   7766 	case WM_T_82574:
   7767 	case WM_T_82575:
   7768 	case WM_T_82576:
   7769 	case WM_T_82580:
   7770 	case WM_T_I350:
   7771 	case WM_T_I354:
   7772 	case WM_T_I210:
   7773 	case WM_T_I211:
   7774 	case WM_T_82583:
   7775 	case WM_T_80003:
   7776 		/* null */
   7777 		break;
   7778 	case WM_T_82541:
   7779 	case WM_T_82547:
   7780 		/* XXX Configure actively LED after PHY reset */
   7781 		break;
   7782 	case WM_T_ICH8:
   7783 	case WM_T_ICH9:
   7784 	case WM_T_ICH10:
   7785 	case WM_T_PCH:
   7786 	case WM_T_PCH2:
   7787 	case WM_T_PCH_LPT:
   7788 		/* Allow time for h/w to get to a quiescent state afer reset */
   7789 		delay(10*1000);
   7790 
   7791 		if (sc->sc_type == WM_T_PCH)
   7792 			wm_hv_phy_workaround_ich8lan(sc);
   7793 
   7794 		if (sc->sc_type == WM_T_PCH2)
   7795 			wm_lv_phy_workaround_ich8lan(sc);
   7796 
   7797 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
   7798 			/*
   7799 			 * dummy read to clear the phy wakeup bit after lcd
   7800 			 * reset
   7801 			 */
   7802 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
   7803 		}
   7804 
   7805 		/*
   7806 		 * XXX Configure the LCD with th extended configuration region
   7807 		 * in NVM
   7808 		 */
   7809 
   7810 		/* Configure the LCD with the OEM bits in NVM */
   7811 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   7812 		    || (sc->sc_type == WM_T_PCH_LPT)) {
   7813 			/*
   7814 			 * Disable LPLU.
   7815 			 * XXX It seems that 82567 has LPLU, too.
   7816 			 */
   7817 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   7818 			reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
   7819 			reg |= HV_OEM_BITS_ANEGNOW;
   7820 			wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   7821 		}
   7822 		break;
   7823 	default:
   7824 		panic("%s: unknown type\n", __func__);
   7825 		break;
   7826 	}
   7827 }
   7828 
   7829 /*
   7830  * wm_get_phy_id_82575:
   7831  *
   7832  * Return PHY ID. Return -1 if it failed.
   7833  */
   7834 static int
   7835 wm_get_phy_id_82575(struct wm_softc *sc)
   7836 {
   7837 	uint32_t reg;
   7838 	int phyid = -1;
   7839 
   7840 	/* XXX */
   7841 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   7842 		return -1;
   7843 
   7844 	if (wm_sgmii_uses_mdio(sc)) {
   7845 		switch (sc->sc_type) {
   7846 		case WM_T_82575:
   7847 		case WM_T_82576:
   7848 			reg = CSR_READ(sc, WMREG_MDIC);
   7849 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   7850 			break;
   7851 		case WM_T_82580:
   7852 		case WM_T_I350:
   7853 		case WM_T_I354:
   7854 		case WM_T_I210:
   7855 		case WM_T_I211:
   7856 			reg = CSR_READ(sc, WMREG_MDICNFG);
   7857 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   7858 			break;
   7859 		default:
   7860 			return -1;
   7861 		}
   7862 	}
   7863 
   7864 	return phyid;
   7865 }
   7866 
   7867 
   7868 /*
   7869  * wm_gmii_mediainit:
   7870  *
   7871  *	Initialize media for use on 1000BASE-T devices.
   7872  */
   7873 static void
   7874 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   7875 {
   7876 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7877 	struct mii_data *mii = &sc->sc_mii;
   7878 	uint32_t reg;
   7879 
   7880 	/* We have GMII. */
   7881 	sc->sc_flags |= WM_F_HAS_MII;
   7882 
   7883 	if (sc->sc_type == WM_T_80003)
   7884 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   7885 	else
   7886 		sc->sc_tipg = TIPG_1000T_DFLT;
   7887 
   7888 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   7889 	if ((sc->sc_type == WM_T_82580)
   7890 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   7891 	    || (sc->sc_type == WM_T_I211)) {
   7892 		reg = CSR_READ(sc, WMREG_PHPM);
   7893 		reg &= ~PHPM_GO_LINK_D;
   7894 		CSR_WRITE(sc, WMREG_PHPM, reg);
   7895 	}
   7896 
   7897 	/*
   7898 	 * Let the chip set speed/duplex on its own based on
   7899 	 * signals from the PHY.
   7900 	 * XXXbouyer - I'm not sure this is right for the 80003,
   7901 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   7902 	 */
   7903 	sc->sc_ctrl |= CTRL_SLU;
   7904 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7905 
   7906 	/* Initialize our media structures and probe the GMII. */
   7907 	mii->mii_ifp = ifp;
   7908 
   7909 	/*
   7910 	 * Determine the PHY access method.
   7911 	 *
   7912 	 *  For SGMII, use SGMII specific method.
   7913 	 *
   7914 	 *  For some devices, we can determine the PHY access method
   7915 	 * from sc_type.
   7916 	 *
   7917 	 *  For ICH and PCH variants, it's difficult to determine the PHY
   7918 	 * access  method by sc_type, so use the PCI product ID for some
   7919 	 * devices.
   7920 	 * For other ICH8 variants, try to use igp's method. If the PHY
   7921 	 * can't detect, then use bm's method.
   7922 	 */
   7923 	switch (prodid) {
   7924 	case PCI_PRODUCT_INTEL_PCH_M_LM:
   7925 	case PCI_PRODUCT_INTEL_PCH_M_LC:
   7926 		/* 82577 */
   7927 		sc->sc_phytype = WMPHY_82577;
   7928 		break;
   7929 	case PCI_PRODUCT_INTEL_PCH_D_DM:
   7930 	case PCI_PRODUCT_INTEL_PCH_D_DC:
   7931 		/* 82578 */
   7932 		sc->sc_phytype = WMPHY_82578;
   7933 		break;
   7934 	case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   7935 	case PCI_PRODUCT_INTEL_PCH2_LV_V:
   7936 		/* 82579 */
   7937 		sc->sc_phytype = WMPHY_82579;
   7938 		break;
   7939 	case PCI_PRODUCT_INTEL_82801I_BM:
   7940 	case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   7941 	case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   7942 	case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   7943 	case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   7944 	case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   7945 		/* 82567 */
   7946 		sc->sc_phytype = WMPHY_BM;
   7947 		mii->mii_readreg = wm_gmii_bm_readreg;
   7948 		mii->mii_writereg = wm_gmii_bm_writereg;
   7949 		break;
   7950 	default:
   7951 		if (((sc->sc_flags & WM_F_SGMII) != 0)
   7952 		    && !wm_sgmii_uses_mdio(sc)){
   7953 			/* SGMII */
   7954 			mii->mii_readreg = wm_sgmii_readreg;
   7955 			mii->mii_writereg = wm_sgmii_writereg;
   7956 		} else if (sc->sc_type >= WM_T_80003) {
   7957 			/* 80003 */
   7958 			mii->mii_readreg = wm_gmii_i80003_readreg;
   7959 			mii->mii_writereg = wm_gmii_i80003_writereg;
   7960 		} else if (sc->sc_type >= WM_T_I210) {
   7961 			/* I210 and I211 */
   7962 			mii->mii_readreg = wm_gmii_gs40g_readreg;
   7963 			mii->mii_writereg = wm_gmii_gs40g_writereg;
   7964 		} else if (sc->sc_type >= WM_T_82580) {
   7965 			/* 82580, I350 and I354 */
   7966 			sc->sc_phytype = WMPHY_82580;
   7967 			mii->mii_readreg = wm_gmii_82580_readreg;
   7968 			mii->mii_writereg = wm_gmii_82580_writereg;
   7969 		} else if (sc->sc_type >= WM_T_82544) {
   7970 			/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   7971 			mii->mii_readreg = wm_gmii_i82544_readreg;
   7972 			mii->mii_writereg = wm_gmii_i82544_writereg;
   7973 		} else {
   7974 			mii->mii_readreg = wm_gmii_i82543_readreg;
   7975 			mii->mii_writereg = wm_gmii_i82543_writereg;
   7976 		}
   7977 		break;
   7978 	}
   7979 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_LPT)) {
   7980 		/* All PCH* use _hv_ */
   7981 		mii->mii_readreg = wm_gmii_hv_readreg;
   7982 		mii->mii_writereg = wm_gmii_hv_writereg;
   7983 	}
   7984 	mii->mii_statchg = wm_gmii_statchg;
   7985 
   7986 	wm_gmii_reset(sc);
   7987 
   7988 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   7989 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   7990 	    wm_gmii_mediastatus);
   7991 
   7992 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   7993 	    || (sc->sc_type == WM_T_82580)
   7994 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   7995 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   7996 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   7997 			/* Attach only one port */
   7998 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   7999 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8000 		} else {
   8001 			int i, id;
   8002 			uint32_t ctrl_ext;
   8003 
   8004 			id = wm_get_phy_id_82575(sc);
   8005 			if (id != -1) {
   8006 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   8007 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   8008 			}
   8009 			if ((id == -1)
   8010 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   8011 				/* Power on sgmii phy if it is disabled */
   8012 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   8013 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   8014 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   8015 				CSR_WRITE_FLUSH(sc);
   8016 				delay(300*1000); /* XXX too long */
   8017 
   8018 				/* from 1 to 8 */
   8019 				for (i = 1; i < 8; i++)
   8020 					mii_attach(sc->sc_dev, &sc->sc_mii,
   8021 					    0xffffffff, i, MII_OFFSET_ANY,
   8022 					    MIIF_DOPAUSE);
   8023 
   8024 				/* restore previous sfp cage power state */
   8025 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   8026 			}
   8027 		}
   8028 	} else {
   8029 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8030 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8031 	}
   8032 
   8033 	/*
   8034 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   8035 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   8036 	 */
   8037 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   8038 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   8039 		wm_set_mdio_slow_mode_hv(sc);
   8040 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8041 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8042 	}
   8043 
   8044 	/*
   8045 	 * (For ICH8 variants)
   8046 	 * If PHY detection failed, use BM's r/w function and retry.
   8047 	 */
   8048 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   8049 		/* if failed, retry with *_bm_* */
   8050 		mii->mii_readreg = wm_gmii_bm_readreg;
   8051 		mii->mii_writereg = wm_gmii_bm_writereg;
   8052 
   8053 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8054 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8055 	}
   8056 
   8057 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   8058 		/* Any PHY wasn't find */
   8059 		ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
   8060 		ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
   8061 		sc->sc_phytype = WMPHY_NONE;
   8062 	} else {
   8063 		/*
   8064 		 * PHY Found!
   8065 		 * Check PHY type.
   8066 		 */
   8067 		uint32_t model;
   8068 		struct mii_softc *child;
   8069 
   8070 		child = LIST_FIRST(&mii->mii_phys);
   8071 		if (device_is_a(child->mii_dev, "igphy")) {
   8072 			struct igphy_softc *isc = (struct igphy_softc *)child;
   8073 
   8074 			model = isc->sc_mii.mii_mpd_model;
   8075 			if (model == MII_MODEL_yyINTEL_I82566)
   8076 				sc->sc_phytype = WMPHY_IGP_3;
   8077 		}
   8078 
   8079 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   8080 	}
   8081 }
   8082 
   8083 /*
   8084  * wm_gmii_mediachange:	[ifmedia interface function]
   8085  *
   8086  *	Set hardware to newly-selected media on a 1000BASE-T device.
   8087  */
   8088 static int
   8089 wm_gmii_mediachange(struct ifnet *ifp)
   8090 {
   8091 	struct wm_softc *sc = ifp->if_softc;
   8092 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8093 	int rc;
   8094 
   8095 	if ((ifp->if_flags & IFF_UP) == 0)
   8096 		return 0;
   8097 
   8098 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8099 	sc->sc_ctrl |= CTRL_SLU;
   8100 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   8101 	    || (sc->sc_type > WM_T_82543)) {
   8102 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   8103 	} else {
   8104 		sc->sc_ctrl &= ~CTRL_ASDE;
   8105 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   8106 		if (ife->ifm_media & IFM_FDX)
   8107 			sc->sc_ctrl |= CTRL_FD;
   8108 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   8109 		case IFM_10_T:
   8110 			sc->sc_ctrl |= CTRL_SPEED_10;
   8111 			break;
   8112 		case IFM_100_TX:
   8113 			sc->sc_ctrl |= CTRL_SPEED_100;
   8114 			break;
   8115 		case IFM_1000_T:
   8116 			sc->sc_ctrl |= CTRL_SPEED_1000;
   8117 			break;
   8118 		default:
   8119 			panic("wm_gmii_mediachange: bad media 0x%x",
   8120 			    ife->ifm_media);
   8121 		}
   8122 	}
   8123 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8124 	if (sc->sc_type <= WM_T_82543)
   8125 		wm_gmii_reset(sc);
   8126 
   8127 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   8128 		return 0;
   8129 	return rc;
   8130 }
   8131 
   8132 /*
   8133  * wm_gmii_mediastatus:	[ifmedia interface function]
   8134  *
   8135  *	Get the current interface media status on a 1000BASE-T device.
   8136  */
   8137 static void
   8138 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   8139 {
   8140 	struct wm_softc *sc = ifp->if_softc;
   8141 
   8142 	ether_mediastatus(ifp, ifmr);
   8143 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   8144 	    | sc->sc_flowflags;
   8145 }
   8146 
   8147 #define	MDI_IO		CTRL_SWDPIN(2)
   8148 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   8149 #define	MDI_CLK		CTRL_SWDPIN(3)
   8150 
   8151 static void
   8152 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   8153 {
   8154 	uint32_t i, v;
   8155 
   8156 	v = CSR_READ(sc, WMREG_CTRL);
   8157 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   8158 	v |= MDI_DIR | CTRL_SWDPIO(3);
   8159 
   8160 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   8161 		if (data & i)
   8162 			v |= MDI_IO;
   8163 		else
   8164 			v &= ~MDI_IO;
   8165 		CSR_WRITE(sc, WMREG_CTRL, v);
   8166 		CSR_WRITE_FLUSH(sc);
   8167 		delay(10);
   8168 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8169 		CSR_WRITE_FLUSH(sc);
   8170 		delay(10);
   8171 		CSR_WRITE(sc, WMREG_CTRL, v);
   8172 		CSR_WRITE_FLUSH(sc);
   8173 		delay(10);
   8174 	}
   8175 }
   8176 
   8177 static uint32_t
   8178 wm_i82543_mii_recvbits(struct wm_softc *sc)
   8179 {
   8180 	uint32_t v, i, data = 0;
   8181 
   8182 	v = CSR_READ(sc, WMREG_CTRL);
   8183 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   8184 	v |= CTRL_SWDPIO(3);
   8185 
   8186 	CSR_WRITE(sc, WMREG_CTRL, v);
   8187 	CSR_WRITE_FLUSH(sc);
   8188 	delay(10);
   8189 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8190 	CSR_WRITE_FLUSH(sc);
   8191 	delay(10);
   8192 	CSR_WRITE(sc, WMREG_CTRL, v);
   8193 	CSR_WRITE_FLUSH(sc);
   8194 	delay(10);
   8195 
   8196 	for (i = 0; i < 16; i++) {
   8197 		data <<= 1;
   8198 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8199 		CSR_WRITE_FLUSH(sc);
   8200 		delay(10);
   8201 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   8202 			data |= 1;
   8203 		CSR_WRITE(sc, WMREG_CTRL, v);
   8204 		CSR_WRITE_FLUSH(sc);
   8205 		delay(10);
   8206 	}
   8207 
   8208 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8209 	CSR_WRITE_FLUSH(sc);
   8210 	delay(10);
   8211 	CSR_WRITE(sc, WMREG_CTRL, v);
   8212 	CSR_WRITE_FLUSH(sc);
   8213 	delay(10);
   8214 
   8215 	return data;
   8216 }
   8217 
   8218 #undef MDI_IO
   8219 #undef MDI_DIR
   8220 #undef MDI_CLK
   8221 
   8222 /*
   8223  * wm_gmii_i82543_readreg:	[mii interface function]
   8224  *
   8225  *	Read a PHY register on the GMII (i82543 version).
   8226  */
   8227 static int
   8228 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
   8229 {
   8230 	struct wm_softc *sc = device_private(self);
   8231 	int rv;
   8232 
   8233 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   8234 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   8235 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   8236 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   8237 
   8238 	DPRINTF(WM_DEBUG_GMII,
   8239 	    ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   8240 	    device_xname(sc->sc_dev), phy, reg, rv));
   8241 
   8242 	return rv;
   8243 }
   8244 
   8245 /*
   8246  * wm_gmii_i82543_writereg:	[mii interface function]
   8247  *
   8248  *	Write a PHY register on the GMII (i82543 version).
   8249  */
   8250 static void
   8251 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
   8252 {
   8253 	struct wm_softc *sc = device_private(self);
   8254 
   8255 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   8256 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   8257 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   8258 	    (MII_COMMAND_START << 30), 32);
   8259 }
   8260 
   8261 /*
   8262  * wm_gmii_i82544_readreg:	[mii interface function]
   8263  *
   8264  *	Read a PHY register on the GMII.
   8265  */
   8266 static int
   8267 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
   8268 {
   8269 	struct wm_softc *sc = device_private(self);
   8270 	uint32_t mdic = 0;
   8271 	int i, rv;
   8272 
   8273 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   8274 	    MDIC_REGADD(reg));
   8275 
   8276 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   8277 		mdic = CSR_READ(sc, WMREG_MDIC);
   8278 		if (mdic & MDIC_READY)
   8279 			break;
   8280 		delay(50);
   8281 	}
   8282 
   8283 	if ((mdic & MDIC_READY) == 0) {
   8284 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   8285 		    device_xname(sc->sc_dev), phy, reg);
   8286 		rv = 0;
   8287 	} else if (mdic & MDIC_E) {
   8288 #if 0 /* This is normal if no PHY is present. */
   8289 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   8290 		    device_xname(sc->sc_dev), phy, reg);
   8291 #endif
   8292 		rv = 0;
   8293 	} else {
   8294 		rv = MDIC_DATA(mdic);
   8295 		if (rv == 0xffff)
   8296 			rv = 0;
   8297 	}
   8298 
   8299 	return rv;
   8300 }
   8301 
   8302 /*
   8303  * wm_gmii_i82544_writereg:	[mii interface function]
   8304  *
   8305  *	Write a PHY register on the GMII.
   8306  */
   8307 static void
   8308 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
   8309 {
   8310 	struct wm_softc *sc = device_private(self);
   8311 	uint32_t mdic = 0;
   8312 	int i;
   8313 
   8314 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   8315 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   8316 
   8317 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   8318 		mdic = CSR_READ(sc, WMREG_MDIC);
   8319 		if (mdic & MDIC_READY)
   8320 			break;
   8321 		delay(50);
   8322 	}
   8323 
   8324 	if ((mdic & MDIC_READY) == 0)
   8325 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   8326 		    device_xname(sc->sc_dev), phy, reg);
   8327 	else if (mdic & MDIC_E)
   8328 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   8329 		    device_xname(sc->sc_dev), phy, reg);
   8330 }
   8331 
   8332 /*
   8333  * wm_gmii_i80003_readreg:	[mii interface function]
   8334  *
   8335  *	Read a PHY register on the kumeran
   8336  * This could be handled by the PHY layer if we didn't have to lock the
   8337  * ressource ...
   8338  */
   8339 static int
   8340 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
   8341 {
   8342 	struct wm_softc *sc = device_private(self);
   8343 	int sem;
   8344 	int rv;
   8345 
   8346 	if (phy != 1) /* only one PHY on kumeran bus */
   8347 		return 0;
   8348 
   8349 	sem = swfwphysem[sc->sc_funcid];
   8350 	if (wm_get_swfw_semaphore(sc, sem)) {
   8351 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8352 		    __func__);
   8353 		return 0;
   8354 	}
   8355 
   8356 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
   8357 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   8358 		    reg >> GG82563_PAGE_SHIFT);
   8359 	} else {
   8360 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   8361 		    reg >> GG82563_PAGE_SHIFT);
   8362 	}
   8363 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   8364 	delay(200);
   8365 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
   8366 	delay(200);
   8367 
   8368 	wm_put_swfw_semaphore(sc, sem);
   8369 	return rv;
   8370 }
   8371 
   8372 /*
   8373  * wm_gmii_i80003_writereg:	[mii interface function]
   8374  *
   8375  *	Write a PHY register on the kumeran.
   8376  * This could be handled by the PHY layer if we didn't have to lock the
   8377  * ressource ...
   8378  */
   8379 static void
   8380 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
   8381 {
   8382 	struct wm_softc *sc = device_private(self);
   8383 	int sem;
   8384 
   8385 	if (phy != 1) /* only one PHY on kumeran bus */
   8386 		return;
   8387 
   8388 	sem = swfwphysem[sc->sc_funcid];
   8389 	if (wm_get_swfw_semaphore(sc, sem)) {
   8390 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8391 		    __func__);
   8392 		return;
   8393 	}
   8394 
   8395 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
   8396 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   8397 		    reg >> GG82563_PAGE_SHIFT);
   8398 	} else {
   8399 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   8400 		    reg >> GG82563_PAGE_SHIFT);
   8401 	}
   8402 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   8403 	delay(200);
   8404 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
   8405 	delay(200);
   8406 
   8407 	wm_put_swfw_semaphore(sc, sem);
   8408 }
   8409 
   8410 /*
   8411  * wm_gmii_bm_readreg:	[mii interface function]
   8412  *
   8413  *	Read a PHY register on the kumeran
   8414  * This could be handled by the PHY layer if we didn't have to lock the
   8415  * ressource ...
   8416  */
   8417 static int
   8418 wm_gmii_bm_readreg(device_t self, int phy, int reg)
   8419 {
   8420 	struct wm_softc *sc = device_private(self);
   8421 	int sem;
   8422 	int rv;
   8423 
   8424 	sem = swfwphysem[sc->sc_funcid];
   8425 	if (wm_get_swfw_semaphore(sc, sem)) {
   8426 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8427 		    __func__);
   8428 		return 0;
   8429 	}
   8430 
   8431 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   8432 		if (phy == 1)
   8433 			wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
   8434 			    reg);
   8435 		else
   8436 			wm_gmii_i82544_writereg(self, phy,
   8437 			    GG82563_PHY_PAGE_SELECT,
   8438 			    reg >> GG82563_PAGE_SHIFT);
   8439 	}
   8440 
   8441 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
   8442 	wm_put_swfw_semaphore(sc, sem);
   8443 	return rv;
   8444 }
   8445 
   8446 /*
   8447  * wm_gmii_bm_writereg:	[mii interface function]
   8448  *
   8449  *	Write a PHY register on the kumeran.
   8450  * This could be handled by the PHY layer if we didn't have to lock the
   8451  * ressource ...
   8452  */
   8453 static void
   8454 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
   8455 {
   8456 	struct wm_softc *sc = device_private(self);
   8457 	int sem;
   8458 
   8459 	sem = swfwphysem[sc->sc_funcid];
   8460 	if (wm_get_swfw_semaphore(sc, sem)) {
   8461 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8462 		    __func__);
   8463 		return;
   8464 	}
   8465 
   8466 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   8467 		if (phy == 1)
   8468 			wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
   8469 			    reg);
   8470 		else
   8471 			wm_gmii_i82544_writereg(self, phy,
   8472 			    GG82563_PHY_PAGE_SELECT,
   8473 			    reg >> GG82563_PAGE_SHIFT);
   8474 	}
   8475 
   8476 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
   8477 	wm_put_swfw_semaphore(sc, sem);
   8478 }
   8479 
   8480 static void
   8481 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
   8482 {
   8483 	struct wm_softc *sc = device_private(self);
   8484 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   8485 	uint16_t wuce;
   8486 
   8487 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   8488 	if (sc->sc_type == WM_T_PCH) {
   8489 		/* XXX e1000 driver do nothing... why? */
   8490 	}
   8491 
   8492 	/* Set page 769 */
   8493 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8494 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   8495 
   8496 	wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
   8497 
   8498 	wuce &= ~BM_WUC_HOST_WU_BIT;
   8499 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
   8500 	    wuce | BM_WUC_ENABLE_BIT);
   8501 
   8502 	/* Select page 800 */
   8503 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8504 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   8505 
   8506 	/* Write page 800 */
   8507 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   8508 
   8509 	if (rd)
   8510 		*val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
   8511 	else
   8512 		wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
   8513 
   8514 	/* Set page 769 */
   8515 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8516 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   8517 
   8518 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
   8519 }
   8520 
   8521 /*
   8522  * wm_gmii_hv_readreg:	[mii interface function]
   8523  *
   8524  *	Read a PHY register on the kumeran
   8525  * This could be handled by the PHY layer if we didn't have to lock the
   8526  * ressource ...
   8527  */
   8528 static int
   8529 wm_gmii_hv_readreg(device_t self, int phy, int reg)
   8530 {
   8531 	struct wm_softc *sc = device_private(self);
   8532 	uint16_t page = BM_PHY_REG_PAGE(reg);
   8533 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   8534 	uint16_t val;
   8535 	int rv;
   8536 
   8537 	if (wm_get_swfwhw_semaphore(sc)) {
   8538 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8539 		    __func__);
   8540 		return 0;
   8541 	}
   8542 
   8543 	/* XXX Workaround failure in MDIO access while cable is disconnected */
   8544 	if (sc->sc_phytype == WMPHY_82577) {
   8545 		/* XXX must write */
   8546 	}
   8547 
   8548 	/* Page 800 works differently than the rest so it has its own func */
   8549 	if (page == BM_WUC_PAGE) {
   8550 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   8551 		return val;
   8552 	}
   8553 
   8554 	/*
   8555 	 * Lower than page 768 works differently than the rest so it has its
   8556 	 * own func
   8557 	 */
   8558 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   8559 		printf("gmii_hv_readreg!!!\n");
   8560 		return 0;
   8561 	}
   8562 
   8563 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   8564 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8565 		    page << BME1000_PAGE_SHIFT);
   8566 	}
   8567 
   8568 	rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
   8569 	wm_put_swfwhw_semaphore(sc);
   8570 	return rv;
   8571 }
   8572 
   8573 /*
   8574  * wm_gmii_hv_writereg:	[mii interface function]
   8575  *
   8576  *	Write a PHY register on the kumeran.
   8577  * This could be handled by the PHY layer if we didn't have to lock the
   8578  * ressource ...
   8579  */
   8580 static void
   8581 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
   8582 {
   8583 	struct wm_softc *sc = device_private(self);
   8584 	uint16_t page = BM_PHY_REG_PAGE(reg);
   8585 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   8586 
   8587 	if (wm_get_swfwhw_semaphore(sc)) {
   8588 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8589 		    __func__);
   8590 		return;
   8591 	}
   8592 
   8593 	/* XXX Workaround failure in MDIO access while cable is disconnected */
   8594 
   8595 	/* Page 800 works differently than the rest so it has its own func */
   8596 	if (page == BM_WUC_PAGE) {
   8597 		uint16_t tmp;
   8598 
   8599 		tmp = val;
   8600 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   8601 		return;
   8602 	}
   8603 
   8604 	/*
   8605 	 * Lower than page 768 works differently than the rest so it has its
   8606 	 * own func
   8607 	 */
   8608 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   8609 		printf("gmii_hv_writereg!!!\n");
   8610 		return;
   8611 	}
   8612 
   8613 	/*
   8614 	 * XXX Workaround MDIO accesses being disabled after entering IEEE
   8615 	 * Power Down (whenever bit 11 of the PHY control register is set)
   8616 	 */
   8617 
   8618 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   8619 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8620 		    page << BME1000_PAGE_SHIFT);
   8621 	}
   8622 
   8623 	wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
   8624 	wm_put_swfwhw_semaphore(sc);
   8625 }
   8626 
   8627 /*
   8628  * wm_gmii_82580_readreg:	[mii interface function]
   8629  *
   8630  *	Read a PHY register on the 82580 and I350.
   8631  * This could be handled by the PHY layer if we didn't have to lock the
   8632  * ressource ...
   8633  */
   8634 static int
   8635 wm_gmii_82580_readreg(device_t self, int phy, int reg)
   8636 {
   8637 	struct wm_softc *sc = device_private(self);
   8638 	int sem;
   8639 	int rv;
   8640 
   8641 	sem = swfwphysem[sc->sc_funcid];
   8642 	if (wm_get_swfw_semaphore(sc, sem)) {
   8643 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8644 		    __func__);
   8645 		return 0;
   8646 	}
   8647 
   8648 	rv = wm_gmii_i82544_readreg(self, phy, reg);
   8649 
   8650 	wm_put_swfw_semaphore(sc, sem);
   8651 	return rv;
   8652 }
   8653 
   8654 /*
   8655  * wm_gmii_82580_writereg:	[mii interface function]
   8656  *
   8657  *	Write a PHY register on the 82580 and I350.
   8658  * This could be handled by the PHY layer if we didn't have to lock the
   8659  * ressource ...
   8660  */
   8661 static void
   8662 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
   8663 {
   8664 	struct wm_softc *sc = device_private(self);
   8665 	int sem;
   8666 
   8667 	sem = swfwphysem[sc->sc_funcid];
   8668 	if (wm_get_swfw_semaphore(sc, sem)) {
   8669 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8670 		    __func__);
   8671 		return;
   8672 	}
   8673 
   8674 	wm_gmii_i82544_writereg(self, phy, reg, val);
   8675 
   8676 	wm_put_swfw_semaphore(sc, sem);
   8677 }
   8678 
   8679 /*
   8680  * wm_gmii_gs40g_readreg:	[mii interface function]
   8681  *
   8682  *	Read a PHY register on the I2100 and I211.
   8683  * This could be handled by the PHY layer if we didn't have to lock the
   8684  * ressource ...
   8685  */
   8686 static int
   8687 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
   8688 {
   8689 	struct wm_softc *sc = device_private(self);
   8690 	int sem;
   8691 	int page, offset;
   8692 	int rv;
   8693 
   8694 	/* Acquire semaphore */
   8695 	sem = swfwphysem[sc->sc_funcid];
   8696 	if (wm_get_swfw_semaphore(sc, sem)) {
   8697 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8698 		    __func__);
   8699 		return 0;
   8700 	}
   8701 
   8702 	/* Page select */
   8703 	page = reg >> GS40G_PAGE_SHIFT;
   8704 	wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
   8705 
   8706 	/* Read reg */
   8707 	offset = reg & GS40G_OFFSET_MASK;
   8708 	rv = wm_gmii_i82544_readreg(self, phy, offset);
   8709 
   8710 	wm_put_swfw_semaphore(sc, sem);
   8711 	return rv;
   8712 }
   8713 
   8714 /*
   8715  * wm_gmii_gs40g_writereg:	[mii interface function]
   8716  *
   8717  *	Write a PHY register on the I210 and I211.
   8718  * This could be handled by the PHY layer if we didn't have to lock the
   8719  * ressource ...
   8720  */
   8721 static void
   8722 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
   8723 {
   8724 	struct wm_softc *sc = device_private(self);
   8725 	int sem;
   8726 	int page, offset;
   8727 
   8728 	/* Acquire semaphore */
   8729 	sem = swfwphysem[sc->sc_funcid];
   8730 	if (wm_get_swfw_semaphore(sc, sem)) {
   8731 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8732 		    __func__);
   8733 		return;
   8734 	}
   8735 
   8736 	/* Page select */
   8737 	page = reg >> GS40G_PAGE_SHIFT;
   8738 	wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
   8739 
   8740 	/* Write reg */
   8741 	offset = reg & GS40G_OFFSET_MASK;
   8742 	wm_gmii_i82544_writereg(self, phy, offset, val);
   8743 
   8744 	/* Release semaphore */
   8745 	wm_put_swfw_semaphore(sc, sem);
   8746 }
   8747 
   8748 /*
   8749  * wm_gmii_statchg:	[mii interface function]
   8750  *
   8751  *	Callback from MII layer when media changes.
   8752  */
   8753 static void
   8754 wm_gmii_statchg(struct ifnet *ifp)
   8755 {
   8756 	struct wm_softc *sc = ifp->if_softc;
   8757 	struct mii_data *mii = &sc->sc_mii;
   8758 
   8759 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   8760 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8761 	sc->sc_fcrtl &= ~FCRTL_XONE;
   8762 
   8763 	/*
   8764 	 * Get flow control negotiation result.
   8765 	 */
   8766 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   8767 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   8768 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   8769 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   8770 	}
   8771 
   8772 	if (sc->sc_flowflags & IFM_FLOW) {
   8773 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   8774 			sc->sc_ctrl |= CTRL_TFCE;
   8775 			sc->sc_fcrtl |= FCRTL_XONE;
   8776 		}
   8777 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   8778 			sc->sc_ctrl |= CTRL_RFCE;
   8779 	}
   8780 
   8781 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   8782 		DPRINTF(WM_DEBUG_LINK,
   8783 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   8784 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8785 	} else {
   8786 		DPRINTF(WM_DEBUG_LINK,
   8787 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   8788 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8789 	}
   8790 
   8791 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8792 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8793 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   8794 						 : WMREG_FCRTL, sc->sc_fcrtl);
   8795 	if (sc->sc_type == WM_T_80003) {
   8796 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   8797 		case IFM_1000_T:
   8798 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   8799 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   8800 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   8801 			break;
   8802 		default:
   8803 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   8804 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   8805 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   8806 			break;
   8807 		}
   8808 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   8809 	}
   8810 }
   8811 
   8812 /*
   8813  * wm_kmrn_readreg:
   8814  *
   8815  *	Read a kumeran register
   8816  */
   8817 static int
   8818 wm_kmrn_readreg(struct wm_softc *sc, int reg)
   8819 {
   8820 	int rv;
   8821 
   8822 	if (sc->sc_flags & WM_F_LOCK_SWFW) {
   8823 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
   8824 			aprint_error_dev(sc->sc_dev,
   8825 			    "%s: failed to get semaphore\n", __func__);
   8826 			return 0;
   8827 		}
   8828 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   8829 		if (wm_get_swfwhw_semaphore(sc)) {
   8830 			aprint_error_dev(sc->sc_dev,
   8831 			    "%s: failed to get semaphore\n", __func__);
   8832 			return 0;
   8833 		}
   8834 	}
   8835 
   8836 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   8837 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   8838 	    KUMCTRLSTA_REN);
   8839 	CSR_WRITE_FLUSH(sc);
   8840 	delay(2);
   8841 
   8842 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   8843 
   8844 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   8845 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   8846 	else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   8847 		wm_put_swfwhw_semaphore(sc);
   8848 
   8849 	return rv;
   8850 }
   8851 
   8852 /*
   8853  * wm_kmrn_writereg:
   8854  *
   8855  *	Write a kumeran register
   8856  */
   8857 static void
   8858 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
   8859 {
   8860 
   8861 	if (sc->sc_flags & WM_F_LOCK_SWFW) {
   8862 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
   8863 			aprint_error_dev(sc->sc_dev,
   8864 			    "%s: failed to get semaphore\n", __func__);
   8865 			return;
   8866 		}
   8867 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   8868 		if (wm_get_swfwhw_semaphore(sc)) {
   8869 			aprint_error_dev(sc->sc_dev,
   8870 			    "%s: failed to get semaphore\n", __func__);
   8871 			return;
   8872 		}
   8873 	}
   8874 
   8875 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   8876 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   8877 	    (val & KUMCTRLSTA_MASK));
   8878 
   8879 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   8880 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   8881 	else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   8882 		wm_put_swfwhw_semaphore(sc);
   8883 }
   8884 
   8885 /* SGMII related */
   8886 
   8887 /*
   8888  * wm_sgmii_uses_mdio
   8889  *
   8890  * Check whether the transaction is to the internal PHY or the external
   8891  * MDIO interface. Return true if it's MDIO.
   8892  */
   8893 static bool
   8894 wm_sgmii_uses_mdio(struct wm_softc *sc)
   8895 {
   8896 	uint32_t reg;
   8897 	bool ismdio = false;
   8898 
   8899 	switch (sc->sc_type) {
   8900 	case WM_T_82575:
   8901 	case WM_T_82576:
   8902 		reg = CSR_READ(sc, WMREG_MDIC);
   8903 		ismdio = ((reg & MDIC_DEST) != 0);
   8904 		break;
   8905 	case WM_T_82580:
   8906 	case WM_T_I350:
   8907 	case WM_T_I354:
   8908 	case WM_T_I210:
   8909 	case WM_T_I211:
   8910 		reg = CSR_READ(sc, WMREG_MDICNFG);
   8911 		ismdio = ((reg & MDICNFG_DEST) != 0);
   8912 		break;
   8913 	default:
   8914 		break;
   8915 	}
   8916 
   8917 	return ismdio;
   8918 }
   8919 
   8920 /*
   8921  * wm_sgmii_readreg:	[mii interface function]
   8922  *
   8923  *	Read a PHY register on the SGMII
   8924  * This could be handled by the PHY layer if we didn't have to lock the
   8925  * ressource ...
   8926  */
   8927 static int
   8928 wm_sgmii_readreg(device_t self, int phy, int reg)
   8929 {
   8930 	struct wm_softc *sc = device_private(self);
   8931 	uint32_t i2ccmd;
   8932 	int i, rv;
   8933 
   8934 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
   8935 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8936 		    __func__);
   8937 		return 0;
   8938 	}
   8939 
   8940 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   8941 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   8942 	    | I2CCMD_OPCODE_READ;
   8943 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   8944 
   8945 	/* Poll the ready bit */
   8946 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   8947 		delay(50);
   8948 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   8949 		if (i2ccmd & I2CCMD_READY)
   8950 			break;
   8951 	}
   8952 	if ((i2ccmd & I2CCMD_READY) == 0)
   8953 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
   8954 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   8955 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   8956 
   8957 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   8958 
   8959 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   8960 	return rv;
   8961 }
   8962 
   8963 /*
   8964  * wm_sgmii_writereg:	[mii interface function]
   8965  *
   8966  *	Write a PHY register on the SGMII.
   8967  * This could be handled by the PHY layer if we didn't have to lock the
   8968  * ressource ...
   8969  */
   8970 static void
   8971 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
   8972 {
   8973 	struct wm_softc *sc = device_private(self);
   8974 	uint32_t i2ccmd;
   8975 	int i;
   8976 	int val_swapped;
   8977 
   8978 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
   8979 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8980 		    __func__);
   8981 		return;
   8982 	}
   8983 	/* Swap the data bytes for the I2C interface */
   8984 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   8985 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   8986 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   8987 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   8988 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   8989 
   8990 	/* Poll the ready bit */
   8991 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   8992 		delay(50);
   8993 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   8994 		if (i2ccmd & I2CCMD_READY)
   8995 			break;
   8996 	}
   8997 	if ((i2ccmd & I2CCMD_READY) == 0)
   8998 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
   8999 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9000 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   9001 
   9002 	wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
   9003 }
   9004 
   9005 /* TBI related */
   9006 
   9007 /*
   9008  * wm_tbi_mediainit:
   9009  *
   9010  *	Initialize media for use on 1000BASE-X devices.
   9011  */
   9012 static void
   9013 wm_tbi_mediainit(struct wm_softc *sc)
   9014 {
   9015 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9016 	const char *sep = "";
   9017 
   9018 	if (sc->sc_type < WM_T_82543)
   9019 		sc->sc_tipg = TIPG_WM_DFLT;
   9020 	else
   9021 		sc->sc_tipg = TIPG_LG_DFLT;
   9022 
   9023 	sc->sc_tbi_serdes_anegticks = 5;
   9024 
   9025 	/* Initialize our media structures */
   9026 	sc->sc_mii.mii_ifp = ifp;
   9027 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9028 
   9029 	if ((sc->sc_type >= WM_T_82575)
   9030 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   9031 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   9032 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   9033 	else
   9034 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   9035 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   9036 
   9037 	/*
   9038 	 * SWD Pins:
   9039 	 *
   9040 	 *	0 = Link LED (output)
   9041 	 *	1 = Loss Of Signal (input)
   9042 	 */
   9043 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   9044 
   9045 	/* XXX Perhaps this is only for TBI */
   9046 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   9047 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   9048 
   9049 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9050 		sc->sc_ctrl &= ~CTRL_LRST;
   9051 
   9052 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9053 
   9054 #define	ADD(ss, mm, dd)							\
   9055 do {									\
   9056 	aprint_normal("%s%s", sep, ss);					\
   9057 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL);	\
   9058 	sep = ", ";							\
   9059 } while (/*CONSTCOND*/0)
   9060 
   9061 	aprint_normal_dev(sc->sc_dev, "");
   9062 
   9063 	/* Only 82545 is LX */
   9064 	if (sc->sc_type == WM_T_82545) {
   9065 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   9066 		ADD("1000baseLX-FDX", IFM_1000_LX|IFM_FDX, ANAR_X_FD);
   9067 	} else {
   9068 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   9069 		ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
   9070 	}
   9071 	ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
   9072 	aprint_normal("\n");
   9073 
   9074 #undef ADD
   9075 
   9076 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   9077 }
   9078 
   9079 /*
   9080  * wm_tbi_mediachange:	[ifmedia interface function]
   9081  *
   9082  *	Set hardware to newly-selected media on a 1000BASE-X device.
   9083  */
   9084 static int
   9085 wm_tbi_mediachange(struct ifnet *ifp)
   9086 {
   9087 	struct wm_softc *sc = ifp->if_softc;
   9088 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9089 	uint32_t status;
   9090 	int i;
   9091 
   9092 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   9093 		/* XXX need some work for >= 82571 and < 82575 */
   9094 		if (sc->sc_type < WM_T_82575)
   9095 			return 0;
   9096 	}
   9097 
   9098 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   9099 	    || (sc->sc_type >= WM_T_82575))
   9100 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   9101 
   9102 	sc->sc_ctrl &= ~CTRL_LRST;
   9103 	sc->sc_txcw = TXCW_ANE;
   9104 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9105 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   9106 	else if (ife->ifm_media & IFM_FDX)
   9107 		sc->sc_txcw |= TXCW_FD;
   9108 	else
   9109 		sc->sc_txcw |= TXCW_HD;
   9110 
   9111 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   9112 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   9113 
   9114 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   9115 		    device_xname(sc->sc_dev), sc->sc_txcw));
   9116 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9117 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9118 	CSR_WRITE_FLUSH(sc);
   9119 	delay(1000);
   9120 
   9121 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   9122 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   9123 
   9124 	/*
   9125 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   9126 	 * optics detect a signal, 0 if they don't.
   9127 	 */
   9128 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   9129 		/* Have signal; wait for the link to come up. */
   9130 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   9131 			delay(10000);
   9132 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   9133 				break;
   9134 		}
   9135 
   9136 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   9137 			    device_xname(sc->sc_dev),i));
   9138 
   9139 		status = CSR_READ(sc, WMREG_STATUS);
   9140 		DPRINTF(WM_DEBUG_LINK,
   9141 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   9142 			device_xname(sc->sc_dev),status, STATUS_LU));
   9143 		if (status & STATUS_LU) {
   9144 			/* Link is up. */
   9145 			DPRINTF(WM_DEBUG_LINK,
   9146 			    ("%s: LINK: set media -> link up %s\n",
   9147 			    device_xname(sc->sc_dev),
   9148 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   9149 
   9150 			/*
   9151 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9152 			 * so we should update sc->sc_ctrl
   9153 			 */
   9154 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9155 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9156 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9157 			if (status & STATUS_FD)
   9158 				sc->sc_tctl |=
   9159 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9160 			else
   9161 				sc->sc_tctl |=
   9162 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9163 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   9164 				sc->sc_fcrtl |= FCRTL_XONE;
   9165 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9166 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9167 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   9168 				      sc->sc_fcrtl);
   9169 			sc->sc_tbi_linkup = 1;
   9170 		} else {
   9171 			if (i == WM_LINKUP_TIMEOUT)
   9172 				wm_check_for_link(sc);
   9173 			/* Link is down. */
   9174 			DPRINTF(WM_DEBUG_LINK,
   9175 			    ("%s: LINK: set media -> link down\n",
   9176 			    device_xname(sc->sc_dev)));
   9177 			sc->sc_tbi_linkup = 0;
   9178 		}
   9179 	} else {
   9180 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   9181 		    device_xname(sc->sc_dev)));
   9182 		sc->sc_tbi_linkup = 0;
   9183 	}
   9184 
   9185 	wm_tbi_serdes_set_linkled(sc);
   9186 
   9187 	return 0;
   9188 }
   9189 
   9190 /*
   9191  * wm_tbi_mediastatus:	[ifmedia interface function]
   9192  *
   9193  *	Get the current interface media status on a 1000BASE-X device.
   9194  */
   9195 static void
   9196 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9197 {
   9198 	struct wm_softc *sc = ifp->if_softc;
   9199 	uint32_t ctrl, status;
   9200 
   9201 	ifmr->ifm_status = IFM_AVALID;
   9202 	ifmr->ifm_active = IFM_ETHER;
   9203 
   9204 	status = CSR_READ(sc, WMREG_STATUS);
   9205 	if ((status & STATUS_LU) == 0) {
   9206 		ifmr->ifm_active |= IFM_NONE;
   9207 		return;
   9208 	}
   9209 
   9210 	ifmr->ifm_status |= IFM_ACTIVE;
   9211 	/* Only 82545 is LX */
   9212 	if (sc->sc_type == WM_T_82545)
   9213 		ifmr->ifm_active |= IFM_1000_LX;
   9214 	else
   9215 		ifmr->ifm_active |= IFM_1000_SX;
   9216 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   9217 		ifmr->ifm_active |= IFM_FDX;
   9218 	else
   9219 		ifmr->ifm_active |= IFM_HDX;
   9220 	ctrl = CSR_READ(sc, WMREG_CTRL);
   9221 	if (ctrl & CTRL_RFCE)
   9222 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   9223 	if (ctrl & CTRL_TFCE)
   9224 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   9225 }
   9226 
   9227 /* XXX TBI only */
   9228 static int
   9229 wm_check_for_link(struct wm_softc *sc)
   9230 {
   9231 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9232 	uint32_t rxcw;
   9233 	uint32_t ctrl;
   9234 	uint32_t status;
   9235 	uint32_t sig;
   9236 
   9237 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   9238 		/* XXX need some work for >= 82571 */
   9239 		if (sc->sc_type >= WM_T_82571) {
   9240 			sc->sc_tbi_linkup = 1;
   9241 			return 0;
   9242 		}
   9243 	}
   9244 
   9245 	rxcw = CSR_READ(sc, WMREG_RXCW);
   9246 	ctrl = CSR_READ(sc, WMREG_CTRL);
   9247 	status = CSR_READ(sc, WMREG_STATUS);
   9248 
   9249 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   9250 
   9251 	DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   9252 		device_xname(sc->sc_dev), __func__,
   9253 		((ctrl & CTRL_SWDPIN(1)) == sig),
   9254 		((status & STATUS_LU) != 0),
   9255 		((rxcw & RXCW_C) != 0)
   9256 		    ));
   9257 
   9258 	/*
   9259 	 * SWDPIN   LU RXCW
   9260 	 *      0    0    0
   9261 	 *      0    0    1	(should not happen)
   9262 	 *      0    1    0	(should not happen)
   9263 	 *      0    1    1	(should not happen)
   9264 	 *      1    0    0	Disable autonego and force linkup
   9265 	 *      1    0    1	got /C/ but not linkup yet
   9266 	 *      1    1    0	(linkup)
   9267 	 *      1    1    1	If IFM_AUTO, back to autonego
   9268 	 *
   9269 	 */
   9270 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   9271 	    && ((status & STATUS_LU) == 0)
   9272 	    && ((rxcw & RXCW_C) == 0)) {
   9273 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   9274 			__func__));
   9275 		sc->sc_tbi_linkup = 0;
   9276 		/* Disable auto-negotiation in the TXCW register */
   9277 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   9278 
   9279 		/*
   9280 		 * Force link-up and also force full-duplex.
   9281 		 *
   9282 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   9283 		 * so we should update sc->sc_ctrl
   9284 		 */
   9285 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   9286 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9287 	} else if (((status & STATUS_LU) != 0)
   9288 	    && ((rxcw & RXCW_C) != 0)
   9289 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   9290 		sc->sc_tbi_linkup = 1;
   9291 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   9292 			__func__));
   9293 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9294 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   9295 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   9296 	    && ((rxcw & RXCW_C) != 0)) {
   9297 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   9298 	} else {
   9299 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   9300 			status));
   9301 	}
   9302 
   9303 	return 0;
   9304 }
   9305 
   9306 /*
   9307  * wm_tbi_tick:
   9308  *
   9309  *	Check the link on TBI devices.
   9310  *	This function acts as mii_tick().
   9311  */
   9312 static void
   9313 wm_tbi_tick(struct wm_softc *sc)
   9314 {
   9315 	struct wm_txqueue *txq __diagused = &sc->sc_txq[0];
   9316 	struct mii_data *mii = &sc->sc_mii;
   9317 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9318 	uint32_t status;
   9319 
   9320 	KASSERT(WM_TX_LOCKED(txq));
   9321 
   9322 	status = CSR_READ(sc, WMREG_STATUS);
   9323 
   9324 	/* XXX is this needed? */
   9325 	(void)CSR_READ(sc, WMREG_RXCW);
   9326 	(void)CSR_READ(sc, WMREG_CTRL);
   9327 
   9328 	/* set link status */
   9329 	if ((status & STATUS_LU) == 0) {
   9330 		DPRINTF(WM_DEBUG_LINK,
   9331 		    ("%s: LINK: checklink -> down\n",
   9332 			device_xname(sc->sc_dev)));
   9333 		sc->sc_tbi_linkup = 0;
   9334 	} else if (sc->sc_tbi_linkup == 0) {
   9335 		DPRINTF(WM_DEBUG_LINK,
   9336 		    ("%s: LINK: checklink -> up %s\n",
   9337 			device_xname(sc->sc_dev),
   9338 			(status & STATUS_FD) ? "FDX" : "HDX"));
   9339 		sc->sc_tbi_linkup = 1;
   9340 		sc->sc_tbi_serdes_ticks = 0;
   9341 	}
   9342 
   9343 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   9344 		goto setled;
   9345 
   9346 	if ((status & STATUS_LU) == 0) {
   9347 		sc->sc_tbi_linkup = 0;
   9348 		/* If the timer expired, retry autonegotiation */
   9349 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9350 		    && (++sc->sc_tbi_serdes_ticks
   9351 			>= sc->sc_tbi_serdes_anegticks)) {
   9352 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   9353 			sc->sc_tbi_serdes_ticks = 0;
   9354 			/*
   9355 			 * Reset the link, and let autonegotiation do
   9356 			 * its thing
   9357 			 */
   9358 			sc->sc_ctrl |= CTRL_LRST;
   9359 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9360 			CSR_WRITE_FLUSH(sc);
   9361 			delay(1000);
   9362 			sc->sc_ctrl &= ~CTRL_LRST;
   9363 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9364 			CSR_WRITE_FLUSH(sc);
   9365 			delay(1000);
   9366 			CSR_WRITE(sc, WMREG_TXCW,
   9367 			    sc->sc_txcw & ~TXCW_ANE);
   9368 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9369 		}
   9370 	}
   9371 
   9372 setled:
   9373 	wm_tbi_serdes_set_linkled(sc);
   9374 }
   9375 
   9376 /* SERDES related */
   9377 static void
   9378 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   9379 {
   9380 	uint32_t reg;
   9381 
   9382 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   9383 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   9384 		return;
   9385 
   9386 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   9387 	reg |= PCS_CFG_PCS_EN;
   9388 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   9389 
   9390 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9391 	reg &= ~CTRL_EXT_SWDPIN(3);
   9392 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9393 	CSR_WRITE_FLUSH(sc);
   9394 }
   9395 
   9396 static int
   9397 wm_serdes_mediachange(struct ifnet *ifp)
   9398 {
   9399 	struct wm_softc *sc = ifp->if_softc;
   9400 	bool pcs_autoneg = true; /* XXX */
   9401 	uint32_t ctrl_ext, pcs_lctl, reg;
   9402 
   9403 	/* XXX Currently, this function is not called on 8257[12] */
   9404 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   9405 	    || (sc->sc_type >= WM_T_82575))
   9406 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   9407 
   9408 	wm_serdes_power_up_link_82575(sc);
   9409 
   9410 	sc->sc_ctrl |= CTRL_SLU;
   9411 
   9412 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   9413 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   9414 
   9415 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9416 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   9417 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   9418 	case CTRL_EXT_LINK_MODE_SGMII:
   9419 		pcs_autoneg = true;
   9420 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   9421 		break;
   9422 	case CTRL_EXT_LINK_MODE_1000KX:
   9423 		pcs_autoneg = false;
   9424 		/* FALLTHROUGH */
   9425 	default:
   9426 		if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)){
   9427 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   9428 				pcs_autoneg = false;
   9429 		}
   9430 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   9431 		    | CTRL_FRCFDX;
   9432 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   9433 	}
   9434 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9435 
   9436 	if (pcs_autoneg) {
   9437 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   9438 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   9439 
   9440 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   9441 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   9442 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   9443 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   9444 	} else
   9445 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   9446 
   9447 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   9448 
   9449 
   9450 	return 0;
   9451 }
   9452 
   9453 static void
   9454 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9455 {
   9456 	struct wm_softc *sc = ifp->if_softc;
   9457 	struct mii_data *mii = &sc->sc_mii;
   9458 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9459 	uint32_t pcs_adv, pcs_lpab, reg;
   9460 
   9461 	ifmr->ifm_status = IFM_AVALID;
   9462 	ifmr->ifm_active = IFM_ETHER;
   9463 
   9464 	/* Check PCS */
   9465 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9466 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   9467 		ifmr->ifm_active |= IFM_NONE;
   9468 		sc->sc_tbi_linkup = 0;
   9469 		goto setled;
   9470 	}
   9471 
   9472 	sc->sc_tbi_linkup = 1;
   9473 	ifmr->ifm_status |= IFM_ACTIVE;
   9474 	ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   9475 	if ((reg & PCS_LSTS_FDX) != 0)
   9476 		ifmr->ifm_active |= IFM_FDX;
   9477 	else
   9478 		ifmr->ifm_active |= IFM_HDX;
   9479 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   9480 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9481 		/* Check flow */
   9482 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9483 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9484 			printf("XXX LINKOK but not ACOMP\n");
   9485 			goto setled;
   9486 		}
   9487 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9488 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9489 			printf("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab);
   9490 		if ((pcs_adv & TXCW_SYM_PAUSE)
   9491 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9492 			mii->mii_media_active |= IFM_FLOW
   9493 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9494 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9495 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   9496 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   9497 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   9498 			mii->mii_media_active |= IFM_FLOW
   9499 			    | IFM_ETH_TXPAUSE;
   9500 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   9501 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   9502 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9503 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   9504 			mii->mii_media_active |= IFM_FLOW
   9505 			    | IFM_ETH_RXPAUSE;
   9506 		} else {
   9507 		}
   9508 	}
   9509 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9510 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   9511 setled:
   9512 	wm_tbi_serdes_set_linkled(sc);
   9513 }
   9514 
   9515 /*
   9516  * wm_serdes_tick:
   9517  *
   9518  *	Check the link on serdes devices.
   9519  */
   9520 static void
   9521 wm_serdes_tick(struct wm_softc *sc)
   9522 {
   9523 	struct wm_txqueue *txq __diagused = &sc->sc_txq[0];
   9524 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9525 	struct mii_data *mii = &sc->sc_mii;
   9526 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9527 	uint32_t reg;
   9528 
   9529 	KASSERT(WM_TX_LOCKED(txq));
   9530 
   9531 	mii->mii_media_status = IFM_AVALID;
   9532 	mii->mii_media_active = IFM_ETHER;
   9533 
   9534 	/* Check PCS */
   9535 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9536 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   9537 		mii->mii_media_status |= IFM_ACTIVE;
   9538 		sc->sc_tbi_linkup = 1;
   9539 		sc->sc_tbi_serdes_ticks = 0;
   9540 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   9541 		if ((reg & PCS_LSTS_FDX) != 0)
   9542 			mii->mii_media_active |= IFM_FDX;
   9543 		else
   9544 			mii->mii_media_active |= IFM_HDX;
   9545 	} else {
   9546 		mii->mii_media_status |= IFM_NONE;
   9547 		sc->sc_tbi_linkup = 0;
   9548 		    /* If the timer expired, retry autonegotiation */
   9549 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9550 		    && (++sc->sc_tbi_serdes_ticks
   9551 			>= sc->sc_tbi_serdes_anegticks)) {
   9552 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   9553 			sc->sc_tbi_serdes_ticks = 0;
   9554 			/* XXX */
   9555 			wm_serdes_mediachange(ifp);
   9556 		}
   9557 	}
   9558 
   9559 	wm_tbi_serdes_set_linkled(sc);
   9560 }
   9561 
   9562 /* SFP related */
   9563 
   9564 static int
   9565 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   9566 {
   9567 	uint32_t i2ccmd;
   9568 	int i;
   9569 
   9570 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   9571 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9572 
   9573 	/* Poll the ready bit */
   9574 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9575 		delay(50);
   9576 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9577 		if (i2ccmd & I2CCMD_READY)
   9578 			break;
   9579 	}
   9580 	if ((i2ccmd & I2CCMD_READY) == 0)
   9581 		return -1;
   9582 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9583 		return -1;
   9584 
   9585 	*data = i2ccmd & 0x00ff;
   9586 
   9587 	return 0;
   9588 }
   9589 
   9590 static uint32_t
   9591 wm_sfp_get_media_type(struct wm_softc *sc)
   9592 {
   9593 	uint32_t ctrl_ext;
   9594 	uint8_t val = 0;
   9595 	int timeout = 3;
   9596 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   9597 	int rv = -1;
   9598 
   9599 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9600 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   9601 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   9602 	CSR_WRITE_FLUSH(sc);
   9603 
   9604 	/* Read SFP module data */
   9605 	while (timeout) {
   9606 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   9607 		if (rv == 0)
   9608 			break;
   9609 		delay(100*1000); /* XXX too big */
   9610 		timeout--;
   9611 	}
   9612 	if (rv != 0)
   9613 		goto out;
   9614 	switch (val) {
   9615 	case SFF_SFP_ID_SFF:
   9616 		aprint_normal_dev(sc->sc_dev,
   9617 		    "Module/Connector soldered to board\n");
   9618 		break;
   9619 	case SFF_SFP_ID_SFP:
   9620 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   9621 		break;
   9622 	case SFF_SFP_ID_UNKNOWN:
   9623 		goto out;
   9624 	default:
   9625 		break;
   9626 	}
   9627 
   9628 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   9629 	if (rv != 0) {
   9630 		goto out;
   9631 	}
   9632 
   9633 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   9634 		mediatype = WM_MEDIATYPE_SERDES;
   9635 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   9636 		sc->sc_flags |= WM_F_SGMII;
   9637 		mediatype = WM_MEDIATYPE_COPPER;
   9638 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   9639 		sc->sc_flags |= WM_F_SGMII;
   9640 		mediatype = WM_MEDIATYPE_SERDES;
   9641 	}
   9642 
   9643 out:
   9644 	/* Restore I2C interface setting */
   9645 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9646 
   9647 	return mediatype;
   9648 }
   9649 /*
   9650  * NVM related.
   9651  * Microwire, SPI (w/wo EERD) and Flash.
   9652  */
   9653 
   9654 /* Both spi and uwire */
   9655 
   9656 /*
   9657  * wm_eeprom_sendbits:
   9658  *
   9659  *	Send a series of bits to the EEPROM.
   9660  */
   9661 static void
   9662 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   9663 {
   9664 	uint32_t reg;
   9665 	int x;
   9666 
   9667 	reg = CSR_READ(sc, WMREG_EECD);
   9668 
   9669 	for (x = nbits; x > 0; x--) {
   9670 		if (bits & (1U << (x - 1)))
   9671 			reg |= EECD_DI;
   9672 		else
   9673 			reg &= ~EECD_DI;
   9674 		CSR_WRITE(sc, WMREG_EECD, reg);
   9675 		CSR_WRITE_FLUSH(sc);
   9676 		delay(2);
   9677 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   9678 		CSR_WRITE_FLUSH(sc);
   9679 		delay(2);
   9680 		CSR_WRITE(sc, WMREG_EECD, reg);
   9681 		CSR_WRITE_FLUSH(sc);
   9682 		delay(2);
   9683 	}
   9684 }
   9685 
   9686 /*
   9687  * wm_eeprom_recvbits:
   9688  *
   9689  *	Receive a series of bits from the EEPROM.
   9690  */
   9691 static void
   9692 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   9693 {
   9694 	uint32_t reg, val;
   9695 	int x;
   9696 
   9697 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   9698 
   9699 	val = 0;
   9700 	for (x = nbits; x > 0; x--) {
   9701 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   9702 		CSR_WRITE_FLUSH(sc);
   9703 		delay(2);
   9704 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   9705 			val |= (1U << (x - 1));
   9706 		CSR_WRITE(sc, WMREG_EECD, reg);
   9707 		CSR_WRITE_FLUSH(sc);
   9708 		delay(2);
   9709 	}
   9710 	*valp = val;
   9711 }
   9712 
   9713 /* Microwire */
   9714 
   9715 /*
   9716  * wm_nvm_read_uwire:
   9717  *
   9718  *	Read a word from the EEPROM using the MicroWire protocol.
   9719  */
   9720 static int
   9721 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   9722 {
   9723 	uint32_t reg, val;
   9724 	int i;
   9725 
   9726 	for (i = 0; i < wordcnt; i++) {
   9727 		/* Clear SK and DI. */
   9728 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   9729 		CSR_WRITE(sc, WMREG_EECD, reg);
   9730 
   9731 		/*
   9732 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   9733 		 * and Xen.
   9734 		 *
   9735 		 * We use this workaround only for 82540 because qemu's
   9736 		 * e1000 act as 82540.
   9737 		 */
   9738 		if (sc->sc_type == WM_T_82540) {
   9739 			reg |= EECD_SK;
   9740 			CSR_WRITE(sc, WMREG_EECD, reg);
   9741 			reg &= ~EECD_SK;
   9742 			CSR_WRITE(sc, WMREG_EECD, reg);
   9743 			CSR_WRITE_FLUSH(sc);
   9744 			delay(2);
   9745 		}
   9746 		/* XXX: end of workaround */
   9747 
   9748 		/* Set CHIP SELECT. */
   9749 		reg |= EECD_CS;
   9750 		CSR_WRITE(sc, WMREG_EECD, reg);
   9751 		CSR_WRITE_FLUSH(sc);
   9752 		delay(2);
   9753 
   9754 		/* Shift in the READ command. */
   9755 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   9756 
   9757 		/* Shift in address. */
   9758 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   9759 
   9760 		/* Shift out the data. */
   9761 		wm_eeprom_recvbits(sc, &val, 16);
   9762 		data[i] = val & 0xffff;
   9763 
   9764 		/* Clear CHIP SELECT. */
   9765 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   9766 		CSR_WRITE(sc, WMREG_EECD, reg);
   9767 		CSR_WRITE_FLUSH(sc);
   9768 		delay(2);
   9769 	}
   9770 
   9771 	return 0;
   9772 }
   9773 
   9774 /* SPI */
   9775 
   9776 /*
   9777  * Set SPI and FLASH related information from the EECD register.
   9778  * For 82541 and 82547, the word size is taken from EEPROM.
   9779  */
   9780 static int
   9781 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   9782 {
   9783 	int size;
   9784 	uint32_t reg;
   9785 	uint16_t data;
   9786 
   9787 	reg = CSR_READ(sc, WMREG_EECD);
   9788 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   9789 
   9790 	/* Read the size of NVM from EECD by default */
   9791 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   9792 	switch (sc->sc_type) {
   9793 	case WM_T_82541:
   9794 	case WM_T_82541_2:
   9795 	case WM_T_82547:
   9796 	case WM_T_82547_2:
   9797 		/* Set dummy value to access EEPROM */
   9798 		sc->sc_nvm_wordsize = 64;
   9799 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
   9800 		reg = data;
   9801 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   9802 		if (size == 0)
   9803 			size = 6; /* 64 word size */
   9804 		else
   9805 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   9806 		break;
   9807 	case WM_T_80003:
   9808 	case WM_T_82571:
   9809 	case WM_T_82572:
   9810 	case WM_T_82573: /* SPI case */
   9811 	case WM_T_82574: /* SPI case */
   9812 	case WM_T_82583: /* SPI case */
   9813 		size += NVM_WORD_SIZE_BASE_SHIFT;
   9814 		if (size > 14)
   9815 			size = 14;
   9816 		break;
   9817 	case WM_T_82575:
   9818 	case WM_T_82576:
   9819 	case WM_T_82580:
   9820 	case WM_T_I350:
   9821 	case WM_T_I354:
   9822 	case WM_T_I210:
   9823 	case WM_T_I211:
   9824 		size += NVM_WORD_SIZE_BASE_SHIFT;
   9825 		if (size > 15)
   9826 			size = 15;
   9827 		break;
   9828 	default:
   9829 		aprint_error_dev(sc->sc_dev,
   9830 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   9831 		return -1;
   9832 		break;
   9833 	}
   9834 
   9835 	sc->sc_nvm_wordsize = 1 << size;
   9836 
   9837 	return 0;
   9838 }
   9839 
   9840 /*
   9841  * wm_nvm_ready_spi:
   9842  *
   9843  *	Wait for a SPI EEPROM to be ready for commands.
   9844  */
   9845 static int
   9846 wm_nvm_ready_spi(struct wm_softc *sc)
   9847 {
   9848 	uint32_t val;
   9849 	int usec;
   9850 
   9851 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   9852 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   9853 		wm_eeprom_recvbits(sc, &val, 8);
   9854 		if ((val & SPI_SR_RDY) == 0)
   9855 			break;
   9856 	}
   9857 	if (usec >= SPI_MAX_RETRIES) {
   9858 		aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
   9859 		return 1;
   9860 	}
   9861 	return 0;
   9862 }
   9863 
   9864 /*
   9865  * wm_nvm_read_spi:
   9866  *
   9867  *	Read a work from the EEPROM using the SPI protocol.
   9868  */
   9869 static int
   9870 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   9871 {
   9872 	uint32_t reg, val;
   9873 	int i;
   9874 	uint8_t opc;
   9875 
   9876 	/* Clear SK and CS. */
   9877 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   9878 	CSR_WRITE(sc, WMREG_EECD, reg);
   9879 	CSR_WRITE_FLUSH(sc);
   9880 	delay(2);
   9881 
   9882 	if (wm_nvm_ready_spi(sc))
   9883 		return 1;
   9884 
   9885 	/* Toggle CS to flush commands. */
   9886 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   9887 	CSR_WRITE_FLUSH(sc);
   9888 	delay(2);
   9889 	CSR_WRITE(sc, WMREG_EECD, reg);
   9890 	CSR_WRITE_FLUSH(sc);
   9891 	delay(2);
   9892 
   9893 	opc = SPI_OPC_READ;
   9894 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   9895 		opc |= SPI_OPC_A8;
   9896 
   9897 	wm_eeprom_sendbits(sc, opc, 8);
   9898 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   9899 
   9900 	for (i = 0; i < wordcnt; i++) {
   9901 		wm_eeprom_recvbits(sc, &val, 16);
   9902 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   9903 	}
   9904 
   9905 	/* Raise CS and clear SK. */
   9906 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   9907 	CSR_WRITE(sc, WMREG_EECD, reg);
   9908 	CSR_WRITE_FLUSH(sc);
   9909 	delay(2);
   9910 
   9911 	return 0;
   9912 }
   9913 
   9914 /* Using with EERD */
   9915 
   9916 static int
   9917 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   9918 {
   9919 	uint32_t attempts = 100000;
   9920 	uint32_t i, reg = 0;
   9921 	int32_t done = -1;
   9922 
   9923 	for (i = 0; i < attempts; i++) {
   9924 		reg = CSR_READ(sc, rw);
   9925 
   9926 		if (reg & EERD_DONE) {
   9927 			done = 0;
   9928 			break;
   9929 		}
   9930 		delay(5);
   9931 	}
   9932 
   9933 	return done;
   9934 }
   9935 
   9936 static int
   9937 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   9938     uint16_t *data)
   9939 {
   9940 	int i, eerd = 0;
   9941 	int error = 0;
   9942 
   9943 	for (i = 0; i < wordcnt; i++) {
   9944 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   9945 
   9946 		CSR_WRITE(sc, WMREG_EERD, eerd);
   9947 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   9948 		if (error != 0)
   9949 			break;
   9950 
   9951 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   9952 	}
   9953 
   9954 	return error;
   9955 }
   9956 
   9957 /* Flash */
   9958 
   9959 static int
   9960 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   9961 {
   9962 	uint32_t eecd;
   9963 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   9964 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   9965 	uint8_t sig_byte = 0;
   9966 
   9967 	switch (sc->sc_type) {
   9968 	case WM_T_ICH8:
   9969 	case WM_T_ICH9:
   9970 		eecd = CSR_READ(sc, WMREG_EECD);
   9971 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   9972 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   9973 			return 0;
   9974 		}
   9975 		/* FALLTHROUGH */
   9976 	default:
   9977 		/* Default to 0 */
   9978 		*bank = 0;
   9979 
   9980 		/* Check bank 0 */
   9981 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   9982 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   9983 			*bank = 0;
   9984 			return 0;
   9985 		}
   9986 
   9987 		/* Check bank 1 */
   9988 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   9989 		    &sig_byte);
   9990 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   9991 			*bank = 1;
   9992 			return 0;
   9993 		}
   9994 	}
   9995 
   9996 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   9997 		device_xname(sc->sc_dev)));
   9998 	return -1;
   9999 }
   10000 
   10001 /******************************************************************************
   10002  * This function does initial flash setup so that a new read/write/erase cycle
   10003  * can be started.
   10004  *
   10005  * sc - The pointer to the hw structure
   10006  ****************************************************************************/
   10007 static int32_t
   10008 wm_ich8_cycle_init(struct wm_softc *sc)
   10009 {
   10010 	uint16_t hsfsts;
   10011 	int32_t error = 1;
   10012 	int32_t i     = 0;
   10013 
   10014 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10015 
   10016 	/* May be check the Flash Des Valid bit in Hw status */
   10017 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   10018 		return error;
   10019 	}
   10020 
   10021 	/* Clear FCERR in Hw status by writing 1 */
   10022 	/* Clear DAEL in Hw status by writing a 1 */
   10023 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   10024 
   10025 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10026 
   10027 	/*
   10028 	 * Either we should have a hardware SPI cycle in progress bit to check
   10029 	 * against, in order to start a new cycle or FDONE bit should be
   10030 	 * changed in the hardware so that it is 1 after harware reset, which
   10031 	 * can then be used as an indication whether a cycle is in progress or
   10032 	 * has been completed .. we should also have some software semaphore
   10033 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   10034 	 * threads access to those bits can be sequentiallized or a way so that
   10035 	 * 2 threads dont start the cycle at the same time
   10036 	 */
   10037 
   10038 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   10039 		/*
   10040 		 * There is no cycle running at present, so we can start a
   10041 		 * cycle
   10042 		 */
   10043 
   10044 		/* Begin by setting Flash Cycle Done. */
   10045 		hsfsts |= HSFSTS_DONE;
   10046 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10047 		error = 0;
   10048 	} else {
   10049 		/*
   10050 		 * otherwise poll for sometime so the current cycle has a
   10051 		 * chance to end before giving up.
   10052 		 */
   10053 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   10054 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10055 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   10056 				error = 0;
   10057 				break;
   10058 			}
   10059 			delay(1);
   10060 		}
   10061 		if (error == 0) {
   10062 			/*
   10063 			 * Successful in waiting for previous cycle to timeout,
   10064 			 * now set the Flash Cycle Done.
   10065 			 */
   10066 			hsfsts |= HSFSTS_DONE;
   10067 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10068 		}
   10069 	}
   10070 	return error;
   10071 }
   10072 
   10073 /******************************************************************************
   10074  * This function starts a flash cycle and waits for its completion
   10075  *
   10076  * sc - The pointer to the hw structure
   10077  ****************************************************************************/
   10078 static int32_t
   10079 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   10080 {
   10081 	uint16_t hsflctl;
   10082 	uint16_t hsfsts;
   10083 	int32_t error = 1;
   10084 	uint32_t i = 0;
   10085 
   10086 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   10087 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   10088 	hsflctl |= HSFCTL_GO;
   10089 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   10090 
   10091 	/* Wait till FDONE bit is set to 1 */
   10092 	do {
   10093 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10094 		if (hsfsts & HSFSTS_DONE)
   10095 			break;
   10096 		delay(1);
   10097 		i++;
   10098 	} while (i < timeout);
   10099 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   10100 		error = 0;
   10101 
   10102 	return error;
   10103 }
   10104 
   10105 /******************************************************************************
   10106  * Reads a byte or word from the NVM using the ICH8 flash access registers.
   10107  *
   10108  * sc - The pointer to the hw structure
   10109  * index - The index of the byte or word to read.
   10110  * size - Size of data to read, 1=byte 2=word
   10111  * data - Pointer to the word to store the value read.
   10112  *****************************************************************************/
   10113 static int32_t
   10114 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   10115     uint32_t size, uint16_t *data)
   10116 {
   10117 	uint16_t hsfsts;
   10118 	uint16_t hsflctl;
   10119 	uint32_t flash_linear_address;
   10120 	uint32_t flash_data = 0;
   10121 	int32_t error = 1;
   10122 	int32_t count = 0;
   10123 
   10124 	if (size < 1  || size > 2 || data == 0x0 ||
   10125 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   10126 		return error;
   10127 
   10128 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   10129 	    sc->sc_ich8_flash_base;
   10130 
   10131 	do {
   10132 		delay(1);
   10133 		/* Steps */
   10134 		error = wm_ich8_cycle_init(sc);
   10135 		if (error)
   10136 			break;
   10137 
   10138 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   10139 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   10140 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   10141 		    & HSFCTL_BCOUNT_MASK;
   10142 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   10143 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   10144 
   10145 		/*
   10146 		 * Write the last 24 bits of index into Flash Linear address
   10147 		 * field in Flash Address
   10148 		 */
   10149 		/* TODO: TBD maybe check the index against the size of flash */
   10150 
   10151 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   10152 
   10153 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   10154 
   10155 		/*
   10156 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   10157 		 * the whole sequence a few more times, else read in (shift in)
   10158 		 * the Flash Data0, the order is least significant byte first
   10159 		 * msb to lsb
   10160 		 */
   10161 		if (error == 0) {
   10162 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   10163 			if (size == 1)
   10164 				*data = (uint8_t)(flash_data & 0x000000FF);
   10165 			else if (size == 2)
   10166 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   10167 			break;
   10168 		} else {
   10169 			/*
   10170 			 * If we've gotten here, then things are probably
   10171 			 * completely hosed, but if the error condition is
   10172 			 * detected, it won't hurt to give it another try...
   10173 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   10174 			 */
   10175 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10176 			if (hsfsts & HSFSTS_ERR) {
   10177 				/* Repeat for some time before giving up. */
   10178 				continue;
   10179 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   10180 				break;
   10181 		}
   10182 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   10183 
   10184 	return error;
   10185 }
   10186 
   10187 /******************************************************************************
   10188  * Reads a single byte from the NVM using the ICH8 flash access registers.
   10189  *
   10190  * sc - pointer to wm_hw structure
   10191  * index - The index of the byte to read.
   10192  * data - Pointer to a byte to store the value read.
   10193  *****************************************************************************/
   10194 static int32_t
   10195 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   10196 {
   10197 	int32_t status;
   10198 	uint16_t word = 0;
   10199 
   10200 	status = wm_read_ich8_data(sc, index, 1, &word);
   10201 	if (status == 0)
   10202 		*data = (uint8_t)word;
   10203 	else
   10204 		*data = 0;
   10205 
   10206 	return status;
   10207 }
   10208 
   10209 /******************************************************************************
   10210  * Reads a word from the NVM using the ICH8 flash access registers.
   10211  *
   10212  * sc - pointer to wm_hw structure
   10213  * index - The starting byte index of the word to read.
   10214  * data - Pointer to a word to store the value read.
   10215  *****************************************************************************/
   10216 static int32_t
   10217 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   10218 {
   10219 	int32_t status;
   10220 
   10221 	status = wm_read_ich8_data(sc, index, 2, data);
   10222 	return status;
   10223 }
   10224 
   10225 /******************************************************************************
   10226  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   10227  * register.
   10228  *
   10229  * sc - Struct containing variables accessed by shared code
   10230  * offset - offset of word in the EEPROM to read
   10231  * data - word read from the EEPROM
   10232  * words - number of words to read
   10233  *****************************************************************************/
   10234 static int
   10235 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10236 {
   10237 	int32_t  error = 0;
   10238 	uint32_t flash_bank = 0;
   10239 	uint32_t act_offset = 0;
   10240 	uint32_t bank_offset = 0;
   10241 	uint16_t word = 0;
   10242 	uint16_t i = 0;
   10243 
   10244 	/*
   10245 	 * We need to know which is the valid flash bank.  In the event
   10246 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   10247 	 * managing flash_bank.  So it cannot be trusted and needs
   10248 	 * to be updated with each read.
   10249 	 */
   10250 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   10251 	if (error) {
   10252 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   10253 			device_xname(sc->sc_dev)));
   10254 		flash_bank = 0;
   10255 	}
   10256 
   10257 	/*
   10258 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   10259 	 * size
   10260 	 */
   10261 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   10262 
   10263 	error = wm_get_swfwhw_semaphore(sc);
   10264 	if (error) {
   10265 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10266 		    __func__);
   10267 		return error;
   10268 	}
   10269 
   10270 	for (i = 0; i < words; i++) {
   10271 		/* The NVM part needs a byte offset, hence * 2 */
   10272 		act_offset = bank_offset + ((offset + i) * 2);
   10273 		error = wm_read_ich8_word(sc, act_offset, &word);
   10274 		if (error) {
   10275 			aprint_error_dev(sc->sc_dev,
   10276 			    "%s: failed to read NVM\n", __func__);
   10277 			break;
   10278 		}
   10279 		data[i] = word;
   10280 	}
   10281 
   10282 	wm_put_swfwhw_semaphore(sc);
   10283 	return error;
   10284 }
   10285 
   10286 /* iNVM */
   10287 
   10288 static int
   10289 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   10290 {
   10291 	int32_t  rv = 0;
   10292 	uint32_t invm_dword;
   10293 	uint16_t i;
   10294 	uint8_t record_type, word_address;
   10295 
   10296 	for (i = 0; i < INVM_SIZE; i++) {
   10297 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   10298 		/* Get record type */
   10299 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   10300 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   10301 			break;
   10302 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   10303 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   10304 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   10305 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   10306 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   10307 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   10308 			if (word_address == address) {
   10309 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   10310 				rv = 0;
   10311 				break;
   10312 			}
   10313 		}
   10314 	}
   10315 
   10316 	return rv;
   10317 }
   10318 
   10319 static int
   10320 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10321 {
   10322 	int rv = 0;
   10323 	int i;
   10324 
   10325 	for (i = 0; i < words; i++) {
   10326 		switch (offset + i) {
   10327 		case NVM_OFF_MACADDR:
   10328 		case NVM_OFF_MACADDR1:
   10329 		case NVM_OFF_MACADDR2:
   10330 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   10331 			if (rv != 0) {
   10332 				data[i] = 0xffff;
   10333 				rv = -1;
   10334 			}
   10335 			break;
   10336 		case NVM_OFF_CFG2:
   10337 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10338 			if (rv != 0) {
   10339 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   10340 				rv = 0;
   10341 			}
   10342 			break;
   10343 		case NVM_OFF_CFG4:
   10344 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10345 			if (rv != 0) {
   10346 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   10347 				rv = 0;
   10348 			}
   10349 			break;
   10350 		case NVM_OFF_LED_1_CFG:
   10351 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10352 			if (rv != 0) {
   10353 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   10354 				rv = 0;
   10355 			}
   10356 			break;
   10357 		case NVM_OFF_LED_0_2_CFG:
   10358 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10359 			if (rv != 0) {
   10360 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   10361 				rv = 0;
   10362 			}
   10363 			break;
   10364 		case NVM_OFF_ID_LED_SETTINGS:
   10365 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10366 			if (rv != 0) {
   10367 				*data = ID_LED_RESERVED_FFFF;
   10368 				rv = 0;
   10369 			}
   10370 			break;
   10371 		default:
   10372 			DPRINTF(WM_DEBUG_NVM,
   10373 			    ("NVM word 0x%02x is not mapped.\n", offset));
   10374 			*data = NVM_RESERVED_WORD;
   10375 			break;
   10376 		}
   10377 	}
   10378 
   10379 	return rv;
   10380 }
   10381 
   10382 /* Lock, detecting NVM type, validate checksum, version and read */
   10383 
   10384 /*
   10385  * wm_nvm_acquire:
   10386  *
   10387  *	Perform the EEPROM handshake required on some chips.
   10388  */
   10389 static int
   10390 wm_nvm_acquire(struct wm_softc *sc)
   10391 {
   10392 	uint32_t reg;
   10393 	int x;
   10394 	int ret = 0;
   10395 
   10396 	/* always success */
   10397 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
   10398 		return 0;
   10399 
   10400 	if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   10401 		ret = wm_get_swfwhw_semaphore(sc);
   10402 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
   10403 		/* This will also do wm_get_swsm_semaphore() if needed */
   10404 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
   10405 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
   10406 		ret = wm_get_swsm_semaphore(sc);
   10407 	}
   10408 
   10409 	if (ret) {
   10410 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10411 			__func__);
   10412 		return 1;
   10413 	}
   10414 
   10415 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   10416 		reg = CSR_READ(sc, WMREG_EECD);
   10417 
   10418 		/* Request EEPROM access. */
   10419 		reg |= EECD_EE_REQ;
   10420 		CSR_WRITE(sc, WMREG_EECD, reg);
   10421 
   10422 		/* ..and wait for it to be granted. */
   10423 		for (x = 0; x < 1000; x++) {
   10424 			reg = CSR_READ(sc, WMREG_EECD);
   10425 			if (reg & EECD_EE_GNT)
   10426 				break;
   10427 			delay(5);
   10428 		}
   10429 		if ((reg & EECD_EE_GNT) == 0) {
   10430 			aprint_error_dev(sc->sc_dev,
   10431 			    "could not acquire EEPROM GNT\n");
   10432 			reg &= ~EECD_EE_REQ;
   10433 			CSR_WRITE(sc, WMREG_EECD, reg);
   10434 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   10435 				wm_put_swfwhw_semaphore(sc);
   10436 			if (sc->sc_flags & WM_F_LOCK_SWFW)
   10437 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   10438 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
   10439 				wm_put_swsm_semaphore(sc);
   10440 			return 1;
   10441 		}
   10442 	}
   10443 
   10444 	return 0;
   10445 }
   10446 
   10447 /*
   10448  * wm_nvm_release:
   10449  *
   10450  *	Release the EEPROM mutex.
   10451  */
   10452 static void
   10453 wm_nvm_release(struct wm_softc *sc)
   10454 {
   10455 	uint32_t reg;
   10456 
   10457 	/* always success */
   10458 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
   10459 		return;
   10460 
   10461 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   10462 		reg = CSR_READ(sc, WMREG_EECD);
   10463 		reg &= ~EECD_EE_REQ;
   10464 		CSR_WRITE(sc, WMREG_EECD, reg);
   10465 	}
   10466 
   10467 	if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   10468 		wm_put_swfwhw_semaphore(sc);
   10469 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   10470 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   10471 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
   10472 		wm_put_swsm_semaphore(sc);
   10473 }
   10474 
   10475 static int
   10476 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   10477 {
   10478 	uint32_t eecd = 0;
   10479 
   10480 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   10481 	    || sc->sc_type == WM_T_82583) {
   10482 		eecd = CSR_READ(sc, WMREG_EECD);
   10483 
   10484 		/* Isolate bits 15 & 16 */
   10485 		eecd = ((eecd >> 15) & 0x03);
   10486 
   10487 		/* If both bits are set, device is Flash type */
   10488 		if (eecd == 0x03)
   10489 			return 0;
   10490 	}
   10491 	return 1;
   10492 }
   10493 
   10494 static int
   10495 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   10496 {
   10497 	uint32_t eec;
   10498 
   10499 	eec = CSR_READ(sc, WMREG_EEC);
   10500 	if ((eec & EEC_FLASH_DETECTED) != 0)
   10501 		return 1;
   10502 
   10503 	return 0;
   10504 }
   10505 
   10506 /*
   10507  * wm_nvm_validate_checksum
   10508  *
   10509  * The checksum is defined as the sum of the first 64 (16 bit) words.
   10510  */
   10511 static int
   10512 wm_nvm_validate_checksum(struct wm_softc *sc)
   10513 {
   10514 	uint16_t checksum;
   10515 	uint16_t eeprom_data;
   10516 #ifdef WM_DEBUG
   10517 	uint16_t csum_wordaddr, valid_checksum;
   10518 #endif
   10519 	int i;
   10520 
   10521 	checksum = 0;
   10522 
   10523 	/* Don't check for I211 */
   10524 	if (sc->sc_type == WM_T_I211)
   10525 		return 0;
   10526 
   10527 #ifdef WM_DEBUG
   10528 	if (sc->sc_type == WM_T_PCH_LPT) {
   10529 		csum_wordaddr = NVM_OFF_COMPAT;
   10530 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   10531 	} else {
   10532 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   10533 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   10534 	}
   10535 
   10536 	/* Dump EEPROM image for debug */
   10537 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   10538 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   10539 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   10540 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   10541 		if ((eeprom_data & valid_checksum) == 0) {
   10542 			DPRINTF(WM_DEBUG_NVM,
   10543 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   10544 				device_xname(sc->sc_dev), eeprom_data,
   10545 				    valid_checksum));
   10546 		}
   10547 	}
   10548 
   10549 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   10550 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   10551 		for (i = 0; i < NVM_SIZE; i++) {
   10552 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   10553 				printf("XXXX ");
   10554 			else
   10555 				printf("%04hx ", eeprom_data);
   10556 			if (i % 8 == 7)
   10557 				printf("\n");
   10558 		}
   10559 	}
   10560 
   10561 #endif /* WM_DEBUG */
   10562 
   10563 	for (i = 0; i < NVM_SIZE; i++) {
   10564 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   10565 			return 1;
   10566 		checksum += eeprom_data;
   10567 	}
   10568 
   10569 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   10570 #ifdef WM_DEBUG
   10571 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   10572 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   10573 #endif
   10574 	}
   10575 
   10576 	return 0;
   10577 }
   10578 
   10579 static void
   10580 wm_nvm_version_invm(struct wm_softc *sc)
   10581 {
   10582 	uint32_t dword;
   10583 
   10584 	/*
   10585 	 * Linux's code to decode version is very strange, so we don't
   10586 	 * obey that algorithm and just use word 61 as the document.
   10587 	 * Perhaps it's not perfect though...
   10588 	 *
   10589 	 * Example:
   10590 	 *
   10591 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   10592 	 */
   10593 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   10594 	dword = __SHIFTOUT(dword, INVM_VER_1);
   10595 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   10596 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   10597 }
   10598 
   10599 static void
   10600 wm_nvm_version(struct wm_softc *sc)
   10601 {
   10602 	uint16_t major, minor, build, patch;
   10603 	uint16_t uid0, uid1;
   10604 	uint16_t nvm_data;
   10605 	uint16_t off;
   10606 	bool check_version = false;
   10607 	bool check_optionrom = false;
   10608 	bool have_build = false;
   10609 
   10610 	/*
   10611 	 * Version format:
   10612 	 *
   10613 	 * XYYZ
   10614 	 * X0YZ
   10615 	 * X0YY
   10616 	 *
   10617 	 * Example:
   10618 	 *
   10619 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   10620 	 *	82571	0x50a6	5.10.6?
   10621 	 *	82572	0x506a	5.6.10?
   10622 	 *	82572EI	0x5069	5.6.9?
   10623 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   10624 	 *		0x2013	2.1.3?
   10625 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   10626 	 */
   10627 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
   10628 	switch (sc->sc_type) {
   10629 	case WM_T_82571:
   10630 	case WM_T_82572:
   10631 	case WM_T_82574:
   10632 	case WM_T_82583:
   10633 		check_version = true;
   10634 		check_optionrom = true;
   10635 		have_build = true;
   10636 		break;
   10637 	case WM_T_82575:
   10638 	case WM_T_82576:
   10639 	case WM_T_82580:
   10640 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   10641 			check_version = true;
   10642 		break;
   10643 	case WM_T_I211:
   10644 		wm_nvm_version_invm(sc);
   10645 		goto printver;
   10646 	case WM_T_I210:
   10647 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   10648 			wm_nvm_version_invm(sc);
   10649 			goto printver;
   10650 		}
   10651 		/* FALLTHROUGH */
   10652 	case WM_T_I350:
   10653 	case WM_T_I354:
   10654 		check_version = true;
   10655 		check_optionrom = true;
   10656 		break;
   10657 	default:
   10658 		return;
   10659 	}
   10660 	if (check_version) {
   10661 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
   10662 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   10663 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   10664 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   10665 			build = nvm_data & NVM_BUILD_MASK;
   10666 			have_build = true;
   10667 		} else
   10668 			minor = nvm_data & 0x00ff;
   10669 
   10670 		/* Decimal */
   10671 		minor = (minor / 16) * 10 + (minor % 16);
   10672 		sc->sc_nvm_ver_major = major;
   10673 		sc->sc_nvm_ver_minor = minor;
   10674 
   10675 printver:
   10676 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   10677 		    sc->sc_nvm_ver_minor);
   10678 		if (have_build) {
   10679 			sc->sc_nvm_ver_build = build;
   10680 			aprint_verbose(".%d", build);
   10681 		}
   10682 	}
   10683 	if (check_optionrom) {
   10684 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
   10685 		/* Option ROM Version */
   10686 		if ((off != 0x0000) && (off != 0xffff)) {
   10687 			off += NVM_COMBO_VER_OFF;
   10688 			wm_nvm_read(sc, off + 1, 1, &uid1);
   10689 			wm_nvm_read(sc, off, 1, &uid0);
   10690 			if ((uid0 != 0) && (uid0 != 0xffff)
   10691 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   10692 				/* 16bits */
   10693 				major = uid0 >> 8;
   10694 				build = (uid0 << 8) | (uid1 >> 8);
   10695 				patch = uid1 & 0x00ff;
   10696 				aprint_verbose(", option ROM Version %d.%d.%d",
   10697 				    major, build, patch);
   10698 			}
   10699 		}
   10700 	}
   10701 
   10702 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
   10703 	aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   10704 }
   10705 
   10706 /*
   10707  * wm_nvm_read:
   10708  *
   10709  *	Read data from the serial EEPROM.
   10710  */
   10711 static int
   10712 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   10713 {
   10714 	int rv;
   10715 
   10716 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   10717 		return 1;
   10718 
   10719 	if (wm_nvm_acquire(sc))
   10720 		return 1;
   10721 
   10722 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   10723 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   10724 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   10725 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
   10726 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
   10727 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
   10728 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
   10729 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
   10730 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
   10731 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
   10732 	else
   10733 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
   10734 
   10735 	wm_nvm_release(sc);
   10736 	return rv;
   10737 }
   10738 
   10739 /*
   10740  * Hardware semaphores.
   10741  * Very complexed...
   10742  */
   10743 
   10744 static int
   10745 wm_get_swsm_semaphore(struct wm_softc *sc)
   10746 {
   10747 	int32_t timeout;
   10748 	uint32_t swsm;
   10749 
   10750 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   10751 		/* Get the SW semaphore. */
   10752 		timeout = sc->sc_nvm_wordsize + 1;
   10753 		while (timeout) {
   10754 			swsm = CSR_READ(sc, WMREG_SWSM);
   10755 
   10756 			if ((swsm & SWSM_SMBI) == 0)
   10757 				break;
   10758 
   10759 			delay(50);
   10760 			timeout--;
   10761 		}
   10762 
   10763 		if (timeout == 0) {
   10764 			aprint_error_dev(sc->sc_dev,
   10765 			    "could not acquire SWSM SMBI\n");
   10766 			return 1;
   10767 		}
   10768 	}
   10769 
   10770 	/* Get the FW semaphore. */
   10771 	timeout = sc->sc_nvm_wordsize + 1;
   10772 	while (timeout) {
   10773 		swsm = CSR_READ(sc, WMREG_SWSM);
   10774 		swsm |= SWSM_SWESMBI;
   10775 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   10776 		/* If we managed to set the bit we got the semaphore. */
   10777 		swsm = CSR_READ(sc, WMREG_SWSM);
   10778 		if (swsm & SWSM_SWESMBI)
   10779 			break;
   10780 
   10781 		delay(50);
   10782 		timeout--;
   10783 	}
   10784 
   10785 	if (timeout == 0) {
   10786 		aprint_error_dev(sc->sc_dev, "could not acquire SWSM SWESMBI\n");
   10787 		/* Release semaphores */
   10788 		wm_put_swsm_semaphore(sc);
   10789 		return 1;
   10790 	}
   10791 	return 0;
   10792 }
   10793 
   10794 static void
   10795 wm_put_swsm_semaphore(struct wm_softc *sc)
   10796 {
   10797 	uint32_t swsm;
   10798 
   10799 	swsm = CSR_READ(sc, WMREG_SWSM);
   10800 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   10801 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   10802 }
   10803 
   10804 static int
   10805 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   10806 {
   10807 	uint32_t swfw_sync;
   10808 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   10809 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   10810 	int timeout = 200;
   10811 
   10812 	for (timeout = 0; timeout < 200; timeout++) {
   10813 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
   10814 			if (wm_get_swsm_semaphore(sc)) {
   10815 				aprint_error_dev(sc->sc_dev,
   10816 				    "%s: failed to get semaphore\n",
   10817 				    __func__);
   10818 				return 1;
   10819 			}
   10820 		}
   10821 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   10822 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   10823 			swfw_sync |= swmask;
   10824 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   10825 			if (sc->sc_flags & WM_F_LOCK_SWSM)
   10826 				wm_put_swsm_semaphore(sc);
   10827 			return 0;
   10828 		}
   10829 		if (sc->sc_flags & WM_F_LOCK_SWSM)
   10830 			wm_put_swsm_semaphore(sc);
   10831 		delay(5000);
   10832 	}
   10833 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   10834 	    device_xname(sc->sc_dev), mask, swfw_sync);
   10835 	return 1;
   10836 }
   10837 
   10838 static void
   10839 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   10840 {
   10841 	uint32_t swfw_sync;
   10842 
   10843 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   10844 		while (wm_get_swsm_semaphore(sc) != 0)
   10845 			continue;
   10846 	}
   10847 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   10848 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   10849 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   10850 	if (sc->sc_flags & WM_F_LOCK_SWSM)
   10851 		wm_put_swsm_semaphore(sc);
   10852 }
   10853 
   10854 static int
   10855 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   10856 {
   10857 	uint32_t ext_ctrl;
   10858 	int timeout = 200;
   10859 
   10860 	for (timeout = 0; timeout < 200; timeout++) {
   10861 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   10862 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   10863 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   10864 
   10865 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   10866 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   10867 			return 0;
   10868 		delay(5000);
   10869 	}
   10870 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   10871 	    device_xname(sc->sc_dev), ext_ctrl);
   10872 	return 1;
   10873 }
   10874 
   10875 static void
   10876 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   10877 {
   10878 	uint32_t ext_ctrl;
   10879 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   10880 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   10881 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   10882 }
   10883 
   10884 static int
   10885 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   10886 {
   10887 	int i = 0;
   10888 	uint32_t reg;
   10889 
   10890 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   10891 	do {
   10892 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   10893 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   10894 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   10895 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   10896 			break;
   10897 		delay(2*1000);
   10898 		i++;
   10899 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   10900 
   10901 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   10902 		wm_put_hw_semaphore_82573(sc);
   10903 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   10904 		    device_xname(sc->sc_dev));
   10905 		return -1;
   10906 	}
   10907 
   10908 	return 0;
   10909 }
   10910 
   10911 static void
   10912 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   10913 {
   10914 	uint32_t reg;
   10915 
   10916 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   10917 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   10918 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   10919 }
   10920 
   10921 /*
   10922  * Management mode and power management related subroutines.
   10923  * BMC, AMT, suspend/resume and EEE.
   10924  */
   10925 
   10926 static int
   10927 wm_check_mng_mode(struct wm_softc *sc)
   10928 {
   10929 	int rv;
   10930 
   10931 	switch (sc->sc_type) {
   10932 	case WM_T_ICH8:
   10933 	case WM_T_ICH9:
   10934 	case WM_T_ICH10:
   10935 	case WM_T_PCH:
   10936 	case WM_T_PCH2:
   10937 	case WM_T_PCH_LPT:
   10938 		rv = wm_check_mng_mode_ich8lan(sc);
   10939 		break;
   10940 	case WM_T_82574:
   10941 	case WM_T_82583:
   10942 		rv = wm_check_mng_mode_82574(sc);
   10943 		break;
   10944 	case WM_T_82571:
   10945 	case WM_T_82572:
   10946 	case WM_T_82573:
   10947 	case WM_T_80003:
   10948 		rv = wm_check_mng_mode_generic(sc);
   10949 		break;
   10950 	default:
   10951 		/* noting to do */
   10952 		rv = 0;
   10953 		break;
   10954 	}
   10955 
   10956 	return rv;
   10957 }
   10958 
   10959 static int
   10960 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   10961 {
   10962 	uint32_t fwsm;
   10963 
   10964 	fwsm = CSR_READ(sc, WMREG_FWSM);
   10965 
   10966 	if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
   10967 		return 1;
   10968 
   10969 	return 0;
   10970 }
   10971 
   10972 static int
   10973 wm_check_mng_mode_82574(struct wm_softc *sc)
   10974 {
   10975 	uint16_t data;
   10976 
   10977 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   10978 
   10979 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   10980 		return 1;
   10981 
   10982 	return 0;
   10983 }
   10984 
   10985 static int
   10986 wm_check_mng_mode_generic(struct wm_softc *sc)
   10987 {
   10988 	uint32_t fwsm;
   10989 
   10990 	fwsm = CSR_READ(sc, WMREG_FWSM);
   10991 
   10992 	if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
   10993 		return 1;
   10994 
   10995 	return 0;
   10996 }
   10997 
   10998 static int
   10999 wm_enable_mng_pass_thru(struct wm_softc *sc)
   11000 {
   11001 	uint32_t manc, fwsm, factps;
   11002 
   11003 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   11004 		return 0;
   11005 
   11006 	manc = CSR_READ(sc, WMREG_MANC);
   11007 
   11008 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   11009 		device_xname(sc->sc_dev), manc));
   11010 	if ((manc & MANC_RECV_TCO_EN) == 0)
   11011 		return 0;
   11012 
   11013 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   11014 		fwsm = CSR_READ(sc, WMREG_FWSM);
   11015 		factps = CSR_READ(sc, WMREG_FACTPS);
   11016 		if (((factps & FACTPS_MNGCG) == 0)
   11017 		    && ((fwsm & FWSM_MODE_MASK)
   11018 			== (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
   11019 			return 1;
   11020 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   11021 		uint16_t data;
   11022 
   11023 		factps = CSR_READ(sc, WMREG_FACTPS);
   11024 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   11025 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   11026 			device_xname(sc->sc_dev), factps, data));
   11027 		if (((factps & FACTPS_MNGCG) == 0)
   11028 		    && ((data & NVM_CFG2_MNGM_MASK)
   11029 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   11030 			return 1;
   11031 	} else if (((manc & MANC_SMBUS_EN) != 0)
   11032 	    && ((manc & MANC_ASF_EN) == 0))
   11033 		return 1;
   11034 
   11035 	return 0;
   11036 }
   11037 
   11038 static int
   11039 wm_check_reset_block(struct wm_softc *sc)
   11040 {
   11041 	uint32_t reg;
   11042 
   11043 	switch (sc->sc_type) {
   11044 	case WM_T_ICH8:
   11045 	case WM_T_ICH9:
   11046 	case WM_T_ICH10:
   11047 	case WM_T_PCH:
   11048 	case WM_T_PCH2:
   11049 	case WM_T_PCH_LPT:
   11050 		reg = CSR_READ(sc, WMREG_FWSM);
   11051 		if ((reg & FWSM_RSPCIPHY) != 0)
   11052 			return 0;
   11053 		else
   11054 			return -1;
   11055 		break;
   11056 	case WM_T_82571:
   11057 	case WM_T_82572:
   11058 	case WM_T_82573:
   11059 	case WM_T_82574:
   11060 	case WM_T_82583:
   11061 	case WM_T_80003:
   11062 		reg = CSR_READ(sc, WMREG_MANC);
   11063 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   11064 			return -1;
   11065 		else
   11066 			return 0;
   11067 		break;
   11068 	default:
   11069 		/* no problem */
   11070 		break;
   11071 	}
   11072 
   11073 	return 0;
   11074 }
   11075 
   11076 static void
   11077 wm_get_hw_control(struct wm_softc *sc)
   11078 {
   11079 	uint32_t reg;
   11080 
   11081 	switch (sc->sc_type) {
   11082 	case WM_T_82573:
   11083 		reg = CSR_READ(sc, WMREG_SWSM);
   11084 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   11085 		break;
   11086 	case WM_T_82571:
   11087 	case WM_T_82572:
   11088 	case WM_T_82574:
   11089 	case WM_T_82583:
   11090 	case WM_T_80003:
   11091 	case WM_T_ICH8:
   11092 	case WM_T_ICH9:
   11093 	case WM_T_ICH10:
   11094 	case WM_T_PCH:
   11095 	case WM_T_PCH2:
   11096 	case WM_T_PCH_LPT:
   11097 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11098 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   11099 		break;
   11100 	default:
   11101 		break;
   11102 	}
   11103 }
   11104 
   11105 static void
   11106 wm_release_hw_control(struct wm_softc *sc)
   11107 {
   11108 	uint32_t reg;
   11109 
   11110 	if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
   11111 		return;
   11112 
   11113 	if (sc->sc_type == WM_T_82573) {
   11114 		reg = CSR_READ(sc, WMREG_SWSM);
   11115 		reg &= ~SWSM_DRV_LOAD;
   11116 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   11117 	} else {
   11118 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11119 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   11120 	}
   11121 }
   11122 
   11123 static void
   11124 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
   11125 {
   11126 	uint32_t reg;
   11127 
   11128 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11129 
   11130 	if (on != 0)
   11131 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   11132 	else
   11133 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   11134 
   11135 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   11136 }
   11137 
   11138 static void
   11139 wm_smbustopci(struct wm_softc *sc)
   11140 {
   11141 	uint32_t fwsm;
   11142 
   11143 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11144 	if (((fwsm & FWSM_FW_VALID) == 0)
   11145 	    && ((wm_check_reset_block(sc) == 0))) {
   11146 		sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
   11147 		sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
   11148 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11149 		CSR_WRITE_FLUSH(sc);
   11150 		delay(10);
   11151 		sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
   11152 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11153 		CSR_WRITE_FLUSH(sc);
   11154 		delay(50*1000);
   11155 
   11156 		/*
   11157 		 * Gate automatic PHY configuration by hardware on non-managed
   11158 		 * 82579
   11159 		 */
   11160 		if (sc->sc_type == WM_T_PCH2)
   11161 			wm_gate_hw_phy_config_ich8lan(sc, 1);
   11162 	}
   11163 }
   11164 
   11165 static void
   11166 wm_init_manageability(struct wm_softc *sc)
   11167 {
   11168 
   11169 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   11170 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   11171 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   11172 
   11173 		/* Disable hardware interception of ARP */
   11174 		manc &= ~MANC_ARP_EN;
   11175 
   11176 		/* Enable receiving management packets to the host */
   11177 		if (sc->sc_type >= WM_T_82571) {
   11178 			manc |= MANC_EN_MNG2HOST;
   11179 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   11180 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   11181 		}
   11182 
   11183 		CSR_WRITE(sc, WMREG_MANC, manc);
   11184 	}
   11185 }
   11186 
   11187 static void
   11188 wm_release_manageability(struct wm_softc *sc)
   11189 {
   11190 
   11191 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   11192 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   11193 
   11194 		manc |= MANC_ARP_EN;
   11195 		if (sc->sc_type >= WM_T_82571)
   11196 			manc &= ~MANC_EN_MNG2HOST;
   11197 
   11198 		CSR_WRITE(sc, WMREG_MANC, manc);
   11199 	}
   11200 }
   11201 
   11202 static void
   11203 wm_get_wakeup(struct wm_softc *sc)
   11204 {
   11205 
   11206 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   11207 	switch (sc->sc_type) {
   11208 	case WM_T_82573:
   11209 	case WM_T_82583:
   11210 		sc->sc_flags |= WM_F_HAS_AMT;
   11211 		/* FALLTHROUGH */
   11212 	case WM_T_80003:
   11213 	case WM_T_82541:
   11214 	case WM_T_82547:
   11215 	case WM_T_82571:
   11216 	case WM_T_82572:
   11217 	case WM_T_82574:
   11218 	case WM_T_82575:
   11219 	case WM_T_82576:
   11220 	case WM_T_82580:
   11221 	case WM_T_I350:
   11222 	case WM_T_I354:
   11223 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
   11224 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   11225 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   11226 		break;
   11227 	case WM_T_ICH8:
   11228 	case WM_T_ICH9:
   11229 	case WM_T_ICH10:
   11230 	case WM_T_PCH:
   11231 	case WM_T_PCH2:
   11232 	case WM_T_PCH_LPT:
   11233 		sc->sc_flags |= WM_F_HAS_AMT;
   11234 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   11235 		break;
   11236 	default:
   11237 		break;
   11238 	}
   11239 
   11240 	/* 1: HAS_MANAGE */
   11241 	if (wm_enable_mng_pass_thru(sc) != 0)
   11242 		sc->sc_flags |= WM_F_HAS_MANAGE;
   11243 
   11244 #ifdef WM_DEBUG
   11245 	printf("\n");
   11246 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   11247 		printf("HAS_AMT,");
   11248 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
   11249 		printf("ARC_SUBSYS_VALID,");
   11250 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
   11251 		printf("ASF_FIRMWARE_PRES,");
   11252 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
   11253 		printf("HAS_MANAGE,");
   11254 	printf("\n");
   11255 #endif
   11256 	/*
   11257 	 * Note that the WOL flags is set after the resetting of the eeprom
   11258 	 * stuff
   11259 	 */
   11260 }
   11261 
   11262 #ifdef WM_WOL
   11263 /* WOL in the newer chipset interfaces (pchlan) */
   11264 static void
   11265 wm_enable_phy_wakeup(struct wm_softc *sc)
   11266 {
   11267 #if 0
   11268 	uint16_t preg;
   11269 
   11270 	/* Copy MAC RARs to PHY RARs */
   11271 
   11272 	/* Copy MAC MTA to PHY MTA */
   11273 
   11274 	/* Configure PHY Rx Control register */
   11275 
   11276 	/* Enable PHY wakeup in MAC register */
   11277 
   11278 	/* Configure and enable PHY wakeup in PHY registers */
   11279 
   11280 	/* Activate PHY wakeup */
   11281 
   11282 	/* XXX */
   11283 #endif
   11284 }
   11285 
   11286 /* Power down workaround on D3 */
   11287 static void
   11288 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   11289 {
   11290 	uint32_t reg;
   11291 	int i;
   11292 
   11293 	for (i = 0; i < 2; i++) {
   11294 		/* Disable link */
   11295 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   11296 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   11297 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   11298 
   11299 		/*
   11300 		 * Call gig speed drop workaround on Gig disable before
   11301 		 * accessing any PHY registers
   11302 		 */
   11303 		if (sc->sc_type == WM_T_ICH8)
   11304 			wm_gig_downshift_workaround_ich8lan(sc);
   11305 
   11306 		/* Write VR power-down enable */
   11307 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   11308 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   11309 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   11310 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   11311 
   11312 		/* Read it back and test */
   11313 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   11314 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   11315 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   11316 			break;
   11317 
   11318 		/* Issue PHY reset and repeat at most one more time */
   11319 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   11320 	}
   11321 }
   11322 
   11323 static void
   11324 wm_enable_wakeup(struct wm_softc *sc)
   11325 {
   11326 	uint32_t reg, pmreg;
   11327 	pcireg_t pmode;
   11328 
   11329 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   11330 		&pmreg, NULL) == 0)
   11331 		return;
   11332 
   11333 	/* Advertise the wakeup capability */
   11334 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   11335 	    | CTRL_SWDPIN(3));
   11336 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   11337 
   11338 	/* ICH workaround */
   11339 	switch (sc->sc_type) {
   11340 	case WM_T_ICH8:
   11341 	case WM_T_ICH9:
   11342 	case WM_T_ICH10:
   11343 	case WM_T_PCH:
   11344 	case WM_T_PCH2:
   11345 	case WM_T_PCH_LPT:
   11346 		/* Disable gig during WOL */
   11347 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   11348 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   11349 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   11350 		if (sc->sc_type == WM_T_PCH)
   11351 			wm_gmii_reset(sc);
   11352 
   11353 		/* Power down workaround */
   11354 		if (sc->sc_phytype == WMPHY_82577) {
   11355 			struct mii_softc *child;
   11356 
   11357 			/* Assume that the PHY is copper */
   11358 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   11359 			if (child->mii_mpd_rev <= 2)
   11360 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   11361 				    (768 << 5) | 25, 0x0444); /* magic num */
   11362 		}
   11363 		break;
   11364 	default:
   11365 		break;
   11366 	}
   11367 
   11368 	/* Keep the laser running on fiber adapters */
   11369 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   11370 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   11371 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11372 		reg |= CTRL_EXT_SWDPIN(3);
   11373 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11374 	}
   11375 
   11376 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   11377 #if 0	/* for the multicast packet */
   11378 	reg |= WUFC_MC;
   11379 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   11380 #endif
   11381 
   11382 	if (sc->sc_type == WM_T_PCH) {
   11383 		wm_enable_phy_wakeup(sc);
   11384 	} else {
   11385 		CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
   11386 		CSR_WRITE(sc, WMREG_WUFC, reg);
   11387 	}
   11388 
   11389 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   11390 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   11391 		|| (sc->sc_type == WM_T_PCH2))
   11392 		    && (sc->sc_phytype == WMPHY_IGP_3))
   11393 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   11394 
   11395 	/* Request PME */
   11396 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   11397 #if 0
   11398 	/* Disable WOL */
   11399 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   11400 #else
   11401 	/* For WOL */
   11402 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   11403 #endif
   11404 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   11405 }
   11406 #endif /* WM_WOL */
   11407 
   11408 /* EEE */
   11409 
   11410 static void
   11411 wm_set_eee_i350(struct wm_softc *sc)
   11412 {
   11413 	uint32_t ipcnfg, eeer;
   11414 
   11415 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   11416 	eeer = CSR_READ(sc, WMREG_EEER);
   11417 
   11418 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   11419 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   11420 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   11421 		    | EEER_LPI_FC);
   11422 	} else {
   11423 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   11424 		ipcnfg &= ~IPCNFG_10BASE_TE;
   11425 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   11426 		    | EEER_LPI_FC);
   11427 	}
   11428 
   11429 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   11430 	CSR_WRITE(sc, WMREG_EEER, eeer);
   11431 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   11432 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   11433 }
   11434 
   11435 /*
   11436  * Workarounds (mainly PHY related).
   11437  * Basically, PHY's workarounds are in the PHY drivers.
   11438  */
   11439 
   11440 /* Work-around for 82566 Kumeran PCS lock loss */
   11441 static void
   11442 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   11443 {
   11444 	int miistatus, active, i;
   11445 	int reg;
   11446 
   11447 	miistatus = sc->sc_mii.mii_media_status;
   11448 
   11449 	/* If the link is not up, do nothing */
   11450 	if ((miistatus & IFM_ACTIVE) != 0)
   11451 		return;
   11452 
   11453 	active = sc->sc_mii.mii_media_active;
   11454 
   11455 	/* Nothing to do if the link is other than 1Gbps */
   11456 	if (IFM_SUBTYPE(active) != IFM_1000_T)
   11457 		return;
   11458 
   11459 	for (i = 0; i < 10; i++) {
   11460 		/* read twice */
   11461 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   11462 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   11463 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
   11464 			goto out;	/* GOOD! */
   11465 
   11466 		/* Reset the PHY */
   11467 		wm_gmii_reset(sc);
   11468 		delay(5*1000);
   11469 	}
   11470 
   11471 	/* Disable GigE link negotiation */
   11472 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   11473 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   11474 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   11475 
   11476 	/*
   11477 	 * Call gig speed drop workaround on Gig disable before accessing
   11478 	 * any PHY registers.
   11479 	 */
   11480 	wm_gig_downshift_workaround_ich8lan(sc);
   11481 
   11482 out:
   11483 	return;
   11484 }
   11485 
   11486 /* WOL from S5 stops working */
   11487 static void
   11488 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   11489 {
   11490 	uint16_t kmrn_reg;
   11491 
   11492 	/* Only for igp3 */
   11493 	if (sc->sc_phytype == WMPHY_IGP_3) {
   11494 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
   11495 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
   11496 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   11497 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
   11498 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   11499 	}
   11500 }
   11501 
   11502 /*
   11503  * Workaround for pch's PHYs
   11504  * XXX should be moved to new PHY driver?
   11505  */
   11506 static void
   11507 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   11508 {
   11509 	if (sc->sc_phytype == WMPHY_82577)
   11510 		wm_set_mdio_slow_mode_hv(sc);
   11511 
   11512 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   11513 
   11514 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   11515 
   11516 	/* 82578 */
   11517 	if (sc->sc_phytype == WMPHY_82578) {
   11518 		/* PCH rev. < 3 */
   11519 		if (sc->sc_rev < 3) {
   11520 			/* XXX 6 bit shift? Why? Is it page2? */
   11521 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
   11522 			    0x66c0);
   11523 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
   11524 			    0xffff);
   11525 		}
   11526 
   11527 		/* XXX phy rev. < 2 */
   11528 	}
   11529 
   11530 	/* Select page 0 */
   11531 
   11532 	/* XXX acquire semaphore */
   11533 	wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   11534 	/* XXX release semaphore */
   11535 
   11536 	/*
   11537 	 * Configure the K1 Si workaround during phy reset assuming there is
   11538 	 * link so that it disables K1 if link is in 1Gbps.
   11539 	 */
   11540 	wm_k1_gig_workaround_hv(sc, 1);
   11541 }
   11542 
   11543 static void
   11544 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   11545 {
   11546 
   11547 	wm_set_mdio_slow_mode_hv(sc);
   11548 }
   11549 
   11550 static void
   11551 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   11552 {
   11553 	int k1_enable = sc->sc_nvm_k1_enabled;
   11554 
   11555 	/* XXX acquire semaphore */
   11556 
   11557 	if (link) {
   11558 		k1_enable = 0;
   11559 
   11560 		/* Link stall fix for link up */
   11561 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   11562 	} else {
   11563 		/* Link stall fix for link down */
   11564 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   11565 	}
   11566 
   11567 	wm_configure_k1_ich8lan(sc, k1_enable);
   11568 
   11569 	/* XXX release semaphore */
   11570 }
   11571 
   11572 static void
   11573 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   11574 {
   11575 	uint32_t reg;
   11576 
   11577 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   11578 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   11579 	    reg | HV_KMRN_MDIO_SLOW);
   11580 }
   11581 
   11582 static void
   11583 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   11584 {
   11585 	uint32_t ctrl, ctrl_ext, tmp;
   11586 	uint16_t kmrn_reg;
   11587 
   11588 	kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
   11589 
   11590 	if (k1_enable)
   11591 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
   11592 	else
   11593 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
   11594 
   11595 	wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
   11596 
   11597 	delay(20);
   11598 
   11599 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11600 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11601 
   11602 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   11603 	tmp |= CTRL_FRCSPD;
   11604 
   11605 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   11606 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   11607 	CSR_WRITE_FLUSH(sc);
   11608 	delay(20);
   11609 
   11610 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   11611 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11612 	CSR_WRITE_FLUSH(sc);
   11613 	delay(20);
   11614 }
   11615 
   11616 /* special case - for 82575 - need to do manual init ... */
   11617 static void
   11618 wm_reset_init_script_82575(struct wm_softc *sc)
   11619 {
   11620 	/*
   11621 	 * remark: this is untested code - we have no board without EEPROM
   11622 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   11623 	 */
   11624 
   11625 	/* SerDes configuration via SERDESCTRL */
   11626 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   11627 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   11628 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   11629 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   11630 
   11631 	/* CCM configuration via CCMCTL register */
   11632 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   11633 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   11634 
   11635 	/* PCIe lanes configuration */
   11636 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   11637 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   11638 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   11639 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   11640 
   11641 	/* PCIe PLL Configuration */
   11642 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   11643 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   11644 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   11645 }
   11646 
   11647 static void
   11648 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   11649 {
   11650 	uint32_t reg;
   11651 	uint16_t nvmword;
   11652 	int rv;
   11653 
   11654 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   11655 		return;
   11656 
   11657 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   11658 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   11659 	if (rv != 0) {
   11660 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   11661 		    __func__);
   11662 		return;
   11663 	}
   11664 
   11665 	reg = CSR_READ(sc, WMREG_MDICNFG);
   11666 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   11667 		reg |= MDICNFG_DEST;
   11668 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   11669 		reg |= MDICNFG_COM_MDIO;
   11670 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   11671 }
   11672 
   11673 /*
   11674  * I210 Errata 25 and I211 Errata 10
   11675  * Slow System Clock.
   11676  */
   11677 static void
   11678 wm_pll_workaround_i210(struct wm_softc *sc)
   11679 {
   11680 	uint32_t mdicnfg, wuc;
   11681 	uint32_t reg;
   11682 	pcireg_t pcireg;
   11683 	uint32_t pmreg;
   11684 	uint16_t nvmword, tmp_nvmword;
   11685 	int phyval;
   11686 	bool wa_done = false;
   11687 	int i;
   11688 
   11689 	/* Save WUC and MDICNFG registers */
   11690 	wuc = CSR_READ(sc, WMREG_WUC);
   11691 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   11692 
   11693 	reg = mdicnfg & ~MDICNFG_DEST;
   11694 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   11695 
   11696 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   11697 		nvmword = INVM_DEFAULT_AL;
   11698 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   11699 
   11700 	/* Get Power Management cap offset */
   11701 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   11702 		&pmreg, NULL) == 0)
   11703 		return;
   11704 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   11705 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   11706 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   11707 
   11708 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   11709 			break; /* OK */
   11710 		}
   11711 
   11712 		wa_done = true;
   11713 		/* Directly reset the internal PHY */
   11714 		reg = CSR_READ(sc, WMREG_CTRL);
   11715 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   11716 
   11717 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11718 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   11719 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11720 
   11721 		CSR_WRITE(sc, WMREG_WUC, 0);
   11722 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   11723 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   11724 
   11725 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   11726 		    pmreg + PCI_PMCSR);
   11727 		pcireg |= PCI_PMCSR_STATE_D3;
   11728 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   11729 		    pmreg + PCI_PMCSR, pcireg);
   11730 		delay(1000);
   11731 		pcireg &= ~PCI_PMCSR_STATE_D3;
   11732 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   11733 		    pmreg + PCI_PMCSR, pcireg);
   11734 
   11735 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   11736 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   11737 
   11738 		/* Restore WUC register */
   11739 		CSR_WRITE(sc, WMREG_WUC, wuc);
   11740 	}
   11741 
   11742 	/* Restore MDICNFG setting */
   11743 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   11744 	if (wa_done)
   11745 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   11746 }
   11747