Home | History | Annotate | Line # | Download | only in pci
if_wm.c revision 1.382
      1 /*	$NetBSD: if_wm.c,v 1.382 2015/12/13 19:06:43 christos Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*******************************************************************************
     39 
     40   Copyright (c) 2001-2005, Intel Corporation
     41   All rights reserved.
     42 
     43   Redistribution and use in source and binary forms, with or without
     44   modification, are permitted provided that the following conditions are met:
     45 
     46    1. Redistributions of source code must retain the above copyright notice,
     47       this list of conditions and the following disclaimer.
     48 
     49    2. Redistributions in binary form must reproduce the above copyright
     50       notice, this list of conditions and the following disclaimer in the
     51       documentation and/or other materials provided with the distribution.
     52 
     53    3. Neither the name of the Intel Corporation nor the names of its
     54       contributors may be used to endorse or promote products derived from
     55       this software without specific prior written permission.
     56 
     57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
     58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
     61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67   POSSIBILITY OF SUCH DAMAGE.
     68 
     69 *******************************************************************************/
     70 /*
     71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
     72  *
     73  * TODO (in order of importance):
     74  *
     75  *	- Check XXX'ed comments
     76  *	- Disable D0 LPLU on 8257[12356], 82580 and I350.
     77  *	- TX Multi queue
     78  *	- EEE (Energy Efficiency Ethernet)
     79  *	- Virtual Function
     80  *	- Set LED correctly (based on contents in EEPROM)
     81  *	- Rework how parameters are loaded from the EEPROM.
     82  *	- Image Unique ID
     83  */
     84 
     85 #include <sys/cdefs.h>
     86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.382 2015/12/13 19:06:43 christos Exp $");
     87 
     88 #ifdef _KERNEL_OPT
     89 #include "opt_net_mpsafe.h"
     90 #endif
     91 
     92 #include <sys/param.h>
     93 #include <sys/systm.h>
     94 #include <sys/callout.h>
     95 #include <sys/mbuf.h>
     96 #include <sys/malloc.h>
     97 #include <sys/kmem.h>
     98 #include <sys/kernel.h>
     99 #include <sys/socket.h>
    100 #include <sys/ioctl.h>
    101 #include <sys/errno.h>
    102 #include <sys/device.h>
    103 #include <sys/queue.h>
    104 #include <sys/syslog.h>
    105 #include <sys/interrupt.h>
    106 
    107 #include <sys/rndsource.h>
    108 
    109 #include <net/if.h>
    110 #include <net/if_dl.h>
    111 #include <net/if_media.h>
    112 #include <net/if_ether.h>
    113 
    114 #include <net/bpf.h>
    115 
    116 #include <netinet/in.h>			/* XXX for struct ip */
    117 #include <netinet/in_systm.h>		/* XXX for struct ip */
    118 #include <netinet/ip.h>			/* XXX for struct ip */
    119 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
    120 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
    121 
    122 #include <sys/bus.h>
    123 #include <sys/intr.h>
    124 #include <machine/endian.h>
    125 
    126 #include <dev/mii/mii.h>
    127 #include <dev/mii/miivar.h>
    128 #include <dev/mii/miidevs.h>
    129 #include <dev/mii/mii_bitbang.h>
    130 #include <dev/mii/ikphyreg.h>
    131 #include <dev/mii/igphyreg.h>
    132 #include <dev/mii/igphyvar.h>
    133 #include <dev/mii/inbmphyreg.h>
    134 
    135 #include <dev/pci/pcireg.h>
    136 #include <dev/pci/pcivar.h>
    137 #include <dev/pci/pcidevs.h>
    138 
    139 #include <dev/pci/if_wmreg.h>
    140 #include <dev/pci/if_wmvar.h>
    141 
    142 #ifdef WM_DEBUG
    143 #define	WM_DEBUG_LINK		0x01
    144 #define	WM_DEBUG_TX		0x02
    145 #define	WM_DEBUG_RX		0x04
    146 #define	WM_DEBUG_GMII		0x08
    147 #define	WM_DEBUG_MANAGE		0x10
    148 #define	WM_DEBUG_NVM		0x20
    149 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
    150     | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
    151 
    152 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
    153 #else
    154 #define	DPRINTF(x, y)	/* nothing */
    155 #endif /* WM_DEBUG */
    156 
    157 #ifdef NET_MPSAFE
    158 #define WM_MPSAFE	1
    159 #endif
    160 
    161 /*
    162  * This device driver's max interrupt numbers.
    163  */
    164 #define WM_MAX_NTXINTR		16
    165 #define WM_MAX_NRXINTR		16
    166 #define WM_MAX_NINTR		(WM_MAX_NTXINTR + WM_MAX_NRXINTR + 1)
    167 
    168 /*
    169  * Transmit descriptor list size.  Due to errata, we can only have
    170  * 256 hardware descriptors in the ring on < 82544, but we use 4096
    171  * on >= 82544.  We tell the upper layers that they can queue a lot
    172  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
    173  * of them at a time.
    174  *
    175  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
    176  * chains containing many small mbufs have been observed in zero-copy
    177  * situations with jumbo frames.
    178  */
    179 #define	WM_NTXSEGS		256
    180 #define	WM_IFQUEUELEN		256
    181 #define	WM_TXQUEUELEN_MAX	64
    182 #define	WM_TXQUEUELEN_MAX_82547	16
    183 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
    184 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
    185 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
    186 #define	WM_NTXDESC_82542	256
    187 #define	WM_NTXDESC_82544	4096
    188 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
    189 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
    190 #define	WM_TXDESCSIZE(txq)	(WM_NTXDESC(txq) * sizeof(wiseman_txdesc_t))
    191 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
    192 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
    193 
    194 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
    195 
    196 /*
    197  * Receive descriptor list size.  We have one Rx buffer for normal
    198  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
    199  * packet.  We allocate 256 receive descriptors, each with a 2k
    200  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
    201  */
    202 #define	WM_NRXDESC		256
    203 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
    204 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
    205 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
    206 
    207 typedef union txdescs {
    208 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
    209 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
    210 } txdescs_t;
    211 
    212 #define	WM_CDTXOFF(x)	(sizeof(wiseman_txdesc_t) * x)
    213 #define	WM_CDRXOFF(x)	(sizeof(wiseman_rxdesc_t) * x)
    214 
    215 /*
    216  * Software state for transmit jobs.
    217  */
    218 struct wm_txsoft {
    219 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
    220 	bus_dmamap_t txs_dmamap;	/* our DMA map */
    221 	int txs_firstdesc;		/* first descriptor in packet */
    222 	int txs_lastdesc;		/* last descriptor in packet */
    223 	int txs_ndesc;			/* # of descriptors used */
    224 };
    225 
    226 /*
    227  * Software state for receive buffers.  Each descriptor gets a
    228  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
    229  * more than one buffer, we chain them together.
    230  */
    231 struct wm_rxsoft {
    232 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
    233 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
    234 };
    235 
    236 #define WM_LINKUP_TIMEOUT	50
    237 
    238 static uint16_t swfwphysem[] = {
    239 	SWFW_PHY0_SM,
    240 	SWFW_PHY1_SM,
    241 	SWFW_PHY2_SM,
    242 	SWFW_PHY3_SM
    243 };
    244 
    245 static const uint32_t wm_82580_rxpbs_table[] = {
    246 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
    247 };
    248 
    249 struct wm_softc;
    250 
    251 struct wm_txqueue {
    252 	kmutex_t *txq_lock;		/* lock for tx operations */
    253 
    254 	struct wm_softc *txq_sc;
    255 
    256 	int txq_id;			/* index of transmit queues */
    257 	int txq_intr_idx;		/* index of MSI-X tables */
    258 
    259 	/* Software state for the transmit descriptors. */
    260 	int txq_num;			/* must be a power of two */
    261 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
    262 
    263 	/* TX control data structures. */
    264 	int txq_ndesc;			/* must be a power of two */
    265 	txdescs_t *txq_descs_u;
    266         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
    267 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
    268 	int txq_desc_rseg;		/* real number of control segment */
    269 	size_t txq_desc_size;		/* control data size */
    270 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
    271 #define	txq_descs	txq_descs_u->sctxu_txdescs
    272 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
    273 
    274 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
    275 
    276 	int txq_free;			/* number of free Tx descriptors */
    277 	int txq_next;			/* next ready Tx descriptor */
    278 
    279 	int txq_sfree;			/* number of free Tx jobs */
    280 	int txq_snext;			/* next free Tx job */
    281 	int txq_sdirty;			/* dirty Tx jobs */
    282 
    283 	/* These 4 variables are used only on the 82547. */
    284 	int txq_fifo_size;		/* Tx FIFO size */
    285 	int txq_fifo_head;		/* current head of FIFO */
    286 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
    287 	int txq_fifo_stall;		/* Tx FIFO is stalled */
    288 
    289 	/* XXX which event counter is required? */
    290 };
    291 
    292 struct wm_rxqueue {
    293 	kmutex_t *rxq_lock;		/* lock for rx operations */
    294 
    295 	struct wm_softc *rxq_sc;
    296 
    297 	int rxq_id;			/* index of receive queues */
    298 	int rxq_intr_idx;		/* index of MSI-X tables */
    299 
    300 	/* Software state for the receive descriptors. */
    301 	wiseman_rxdesc_t *rxq_descs;
    302 
    303 	/* RX control data structures. */
    304 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
    305 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
    306 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
    307 	int rxq_desc_rseg;		/* real number of control segment */
    308 	size_t rxq_desc_size;		/* control data size */
    309 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
    310 
    311 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
    312 
    313 	int rxq_ptr;			/* next ready Rx descriptor/queue ent */
    314 	int rxq_discard;
    315 	int rxq_len;
    316 	struct mbuf *rxq_head;
    317 	struct mbuf *rxq_tail;
    318 	struct mbuf **rxq_tailp;
    319 
    320 	/* XXX which event counter is required? */
    321 };
    322 
    323 /*
    324  * Software state per device.
    325  */
    326 struct wm_softc {
    327 	device_t sc_dev;		/* generic device information */
    328 	bus_space_tag_t sc_st;		/* bus space tag */
    329 	bus_space_handle_t sc_sh;	/* bus space handle */
    330 	bus_size_t sc_ss;		/* bus space size */
    331 	bus_space_tag_t sc_iot;		/* I/O space tag */
    332 	bus_space_handle_t sc_ioh;	/* I/O space handle */
    333 	bus_size_t sc_ios;		/* I/O space size */
    334 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
    335 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
    336 	bus_size_t sc_flashs;		/* flash registers space size */
    337 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
    338 
    339 	struct ethercom sc_ethercom;	/* ethernet common data */
    340 	struct mii_data sc_mii;		/* MII/media information */
    341 
    342 	pci_chipset_tag_t sc_pc;
    343 	pcitag_t sc_pcitag;
    344 	int sc_bus_speed;		/* PCI/PCIX bus speed */
    345 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
    346 
    347 	uint16_t sc_pcidevid;		/* PCI device ID */
    348 	wm_chip_type sc_type;		/* MAC type */
    349 	int sc_rev;			/* MAC revision */
    350 	wm_phy_type sc_phytype;		/* PHY type */
    351 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
    352 #define	WM_MEDIATYPE_UNKNOWN		0x00
    353 #define	WM_MEDIATYPE_FIBER		0x01
    354 #define	WM_MEDIATYPE_COPPER		0x02
    355 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
    356 	int sc_funcid;			/* unit number of the chip (0 to 3) */
    357 	int sc_flags;			/* flags; see below */
    358 	int sc_if_flags;		/* last if_flags */
    359 	int sc_flowflags;		/* 802.3x flow control flags */
    360 	int sc_align_tweak;
    361 
    362 	void *sc_ihs[WM_MAX_NINTR];	/*
    363 					 * interrupt cookie.
    364 					 * legacy and msi use sc_ihs[0].
    365 					 */
    366 	pci_intr_handle_t *sc_intrs;	/* legacy and msi use sc_intrs[0] */
    367 	int sc_nintrs;			/* number of interrupts */
    368 
    369 	int sc_link_intr_idx;		/* index of MSI-X tables */
    370 
    371 	callout_t sc_tick_ch;		/* tick callout */
    372 	bool sc_stopping;
    373 
    374 	int sc_nvm_ver_major;
    375 	int sc_nvm_ver_minor;
    376 	int sc_nvm_ver_build;
    377 	int sc_nvm_addrbits;		/* NVM address bits */
    378 	unsigned int sc_nvm_wordsize;	/* NVM word size */
    379 	int sc_ich8_flash_base;
    380 	int sc_ich8_flash_bank_size;
    381 	int sc_nvm_k1_enabled;
    382 
    383 	int sc_ntxqueues;
    384 	struct wm_txqueue *sc_txq;
    385 
    386 	int sc_nrxqueues;
    387 	struct wm_rxqueue *sc_rxq;
    388 
    389 #ifdef WM_EVENT_COUNTERS
    390 	/* Event counters. */
    391 	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
    392 	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
    393 	struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
    394 	struct evcnt sc_ev_txdw;	/* Tx descriptor interrupts */
    395 	struct evcnt sc_ev_txqe;	/* Tx queue empty interrupts */
    396 	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
    397 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
    398 
    399 	struct evcnt sc_ev_rxipsum;	/* IP checksums checked in-bound */
    400 	struct evcnt sc_ev_rxtusum;	/* TCP/UDP cksums checked in-bound */
    401 	struct evcnt sc_ev_txipsum;	/* IP checksums comp. out-bound */
    402 	struct evcnt sc_ev_txtusum;	/* TCP/UDP cksums comp. out-bound */
    403 	struct evcnt sc_ev_txtusum6;	/* TCP/UDP v6 cksums comp. out-bound */
    404 	struct evcnt sc_ev_txtso;	/* TCP seg offload out-bound (IPv4) */
    405 	struct evcnt sc_ev_txtso6;	/* TCP seg offload out-bound (IPv6) */
    406 	struct evcnt sc_ev_txtsopain;	/* painful header manip. for TSO */
    407 
    408 	struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
    409 	struct evcnt sc_ev_txdrop;	/* Tx packets dropped (too many segs) */
    410 
    411 	struct evcnt sc_ev_tu;		/* Tx underrun */
    412 
    413 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
    414 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
    415 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
    416 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
    417 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
    418 #endif /* WM_EVENT_COUNTERS */
    419 
    420 	/* This variable are used only on the 82547. */
    421 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
    422 
    423 	uint32_t sc_ctrl;		/* prototype CTRL register */
    424 #if 0
    425 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
    426 #endif
    427 	uint32_t sc_icr;		/* prototype interrupt bits */
    428 	uint32_t sc_itr;		/* prototype intr throttling reg */
    429 	uint32_t sc_tctl;		/* prototype TCTL register */
    430 	uint32_t sc_rctl;		/* prototype RCTL register */
    431 	uint32_t sc_txcw;		/* prototype TXCW register */
    432 	uint32_t sc_tipg;		/* prototype TIPG register */
    433 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
    434 	uint32_t sc_pba;		/* prototype PBA register */
    435 
    436 	int sc_tbi_linkup;		/* TBI link status */
    437 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
    438 	int sc_tbi_serdes_ticks;	/* tbi ticks */
    439 
    440 	int sc_mchash_type;		/* multicast filter offset */
    441 
    442 	krndsource_t rnd_source;	/* random source */
    443 
    444 	kmutex_t *sc_core_lock;		/* lock for softc operations */
    445 };
    446 
    447 #define WM_TX_LOCK(_txq)	if ((_txq)->txq_lock) mutex_enter((_txq)->txq_lock)
    448 #define WM_TX_UNLOCK(_txq)	if ((_txq)->txq_lock) mutex_exit((_txq)->txq_lock)
    449 #define WM_TX_LOCKED(_txq)	(!(_txq)->txq_lock || mutex_owned((_txq)->txq_lock))
    450 #define WM_RX_LOCK(_rxq)	if ((_rxq)->rxq_lock) mutex_enter((_rxq)->rxq_lock)
    451 #define WM_RX_UNLOCK(_rxq)	if ((_rxq)->rxq_lock) mutex_exit((_rxq)->rxq_lock)
    452 #define WM_RX_LOCKED(_rxq)	(!(_rxq)->rxq_lock || mutex_owned((_rxq)->rxq_lock))
    453 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
    454 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
    455 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
    456 
    457 #ifdef WM_MPSAFE
    458 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
    459 #else
    460 #define CALLOUT_FLAGS	0
    461 #endif
    462 
    463 #define	WM_RXCHAIN_RESET(rxq)						\
    464 do {									\
    465 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
    466 	*(rxq)->rxq_tailp = NULL;					\
    467 	(rxq)->rxq_len = 0;						\
    468 } while (/*CONSTCOND*/0)
    469 
    470 #define	WM_RXCHAIN_LINK(rxq, m)						\
    471 do {									\
    472 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
    473 	(rxq)->rxq_tailp = &(m)->m_next;				\
    474 } while (/*CONSTCOND*/0)
    475 
    476 #ifdef WM_EVENT_COUNTERS
    477 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
    478 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
    479 #else
    480 #define	WM_EVCNT_INCR(ev)	/* nothing */
    481 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
    482 #endif
    483 
    484 #define	CSR_READ(sc, reg)						\
    485 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
    486 #define	CSR_WRITE(sc, reg, val)						\
    487 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
    488 #define	CSR_WRITE_FLUSH(sc)						\
    489 	(void) CSR_READ((sc), WMREG_STATUS)
    490 
    491 #define ICH8_FLASH_READ32(sc, reg) \
    492 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
    493 #define ICH8_FLASH_WRITE32(sc, reg, data) \
    494 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
    495 
    496 #define ICH8_FLASH_READ16(sc, reg) \
    497 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
    498 #define ICH8_FLASH_WRITE16(sc, reg, data) \
    499 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
    500 
    501 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((x)))
    502 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((x)))
    503 
    504 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
    505 #define	WM_CDTXADDR_HI(txq, x)						\
    506 	(sizeof(bus_addr_t) == 8 ?					\
    507 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
    508 
    509 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
    510 #define	WM_CDRXADDR_HI(rxq, x)						\
    511 	(sizeof(bus_addr_t) == 8 ?					\
    512 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
    513 
    514 /*
    515  * Register read/write functions.
    516  * Other than CSR_{READ|WRITE}().
    517  */
    518 #if 0
    519 static inline uint32_t wm_io_read(struct wm_softc *, int);
    520 #endif
    521 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
    522 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
    523 	uint32_t, uint32_t);
    524 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
    525 
    526 /*
    527  * Descriptor sync/init functions.
    528  */
    529 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
    530 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
    531 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
    532 
    533 /*
    534  * Device driver interface functions and commonly used functions.
    535  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
    536  */
    537 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
    538 static int	wm_match(device_t, cfdata_t, void *);
    539 static void	wm_attach(device_t, device_t, void *);
    540 static int	wm_detach(device_t, int);
    541 static bool	wm_suspend(device_t, const pmf_qual_t *);
    542 static bool	wm_resume(device_t, const pmf_qual_t *);
    543 static void	wm_watchdog(struct ifnet *);
    544 static void	wm_tick(void *);
    545 static int	wm_ifflags_cb(struct ethercom *);
    546 static int	wm_ioctl(struct ifnet *, u_long, void *);
    547 /* MAC address related */
    548 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
    549 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
    550 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
    551 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
    552 static void	wm_set_filter(struct wm_softc *);
    553 /* Reset and init related */
    554 static void	wm_set_vlan(struct wm_softc *);
    555 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
    556 static void	wm_get_auto_rd_done(struct wm_softc *);
    557 static void	wm_lan_init_done(struct wm_softc *);
    558 static void	wm_get_cfg_done(struct wm_softc *);
    559 static void	wm_initialize_hardware_bits(struct wm_softc *);
    560 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
    561 static void	wm_reset(struct wm_softc *);
    562 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
    563 static void	wm_rxdrain(struct wm_rxqueue *);
    564 static void	wm_rss_getkey(uint8_t *);
    565 static void	wm_init_rss(struct wm_softc *);
    566 static void	wm_adjust_qnum(struct wm_softc *, int);
    567 static int	wm_setup_legacy(struct wm_softc *);
    568 static int	wm_setup_msix(struct wm_softc *);
    569 static int	wm_init(struct ifnet *);
    570 static int	wm_init_locked(struct ifnet *);
    571 static void	wm_stop(struct ifnet *, int);
    572 static void	wm_stop_locked(struct ifnet *, int);
    573 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
    574 static void	wm_82547_txfifo_stall(void *);
    575 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
    576 /* DMA related */
    577 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
    578 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
    579 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
    580 static void	wm_init_tx_regs(struct wm_softc *, struct wm_txqueue *);
    581 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    582 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
    583 static void	wm_init_rx_regs(struct wm_softc *, struct wm_rxqueue *);
    584 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    585 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    586 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
    587 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    588 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    589 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
    590 static void	wm_init_tx_queue(struct wm_softc *, struct wm_txqueue *);
    591 static int	wm_init_rx_queue(struct wm_softc *, struct wm_rxqueue *);
    592 static int	wm_alloc_txrx_queues(struct wm_softc *);
    593 static void	wm_free_txrx_queues(struct wm_softc *);
    594 static int	wm_init_txrx_queues(struct wm_softc *);
    595 /* Start */
    596 static int	wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
    597     uint32_t *, uint8_t *);
    598 static void	wm_start(struct ifnet *);
    599 static void	wm_start_locked(struct ifnet *);
    600 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txsoft *,
    601     uint32_t *, uint32_t *, bool *);
    602 static void	wm_nq_start(struct ifnet *);
    603 static void	wm_nq_start_locked(struct ifnet *);
    604 /* Interrupt */
    605 static int	wm_txeof(struct wm_softc *);
    606 static void	wm_rxeof(struct wm_rxqueue *);
    607 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
    608 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
    609 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
    610 static void	wm_linkintr(struct wm_softc *, uint32_t);
    611 static int	wm_intr_legacy(void *);
    612 static int	wm_txintr_msix(void *);
    613 static int	wm_rxintr_msix(void *);
    614 static int	wm_linkintr_msix(void *);
    615 
    616 /*
    617  * Media related.
    618  * GMII, SGMII, TBI, SERDES and SFP.
    619  */
    620 /* Common */
    621 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
    622 /* GMII related */
    623 static void	wm_gmii_reset(struct wm_softc *);
    624 static int	wm_get_phy_id_82575(struct wm_softc *);
    625 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
    626 static int	wm_gmii_mediachange(struct ifnet *);
    627 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
    628 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
    629 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
    630 static int	wm_gmii_i82543_readreg(device_t, int, int);
    631 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
    632 static int	wm_gmii_i82544_readreg(device_t, int, int);
    633 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
    634 static int	wm_gmii_i80003_readreg(device_t, int, int);
    635 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
    636 static int	wm_gmii_bm_readreg(device_t, int, int);
    637 static void	wm_gmii_bm_writereg(device_t, int, int, int);
    638 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
    639 static int	wm_gmii_hv_readreg(device_t, int, int);
    640 static void	wm_gmii_hv_writereg(device_t, int, int, int);
    641 static int	wm_gmii_82580_readreg(device_t, int, int);
    642 static void	wm_gmii_82580_writereg(device_t, int, int, int);
    643 static int	wm_gmii_gs40g_readreg(device_t, int, int);
    644 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
    645 static void	wm_gmii_statchg(struct ifnet *);
    646 static int	wm_kmrn_readreg(struct wm_softc *, int);
    647 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
    648 /* SGMII */
    649 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
    650 static int	wm_sgmii_readreg(device_t, int, int);
    651 static void	wm_sgmii_writereg(device_t, int, int, int);
    652 /* TBI related */
    653 static void	wm_tbi_mediainit(struct wm_softc *);
    654 static int	wm_tbi_mediachange(struct ifnet *);
    655 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
    656 static int	wm_check_for_link(struct wm_softc *);
    657 static void	wm_tbi_tick(struct wm_softc *);
    658 /* SERDES related */
    659 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
    660 static int	wm_serdes_mediachange(struct ifnet *);
    661 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
    662 static void	wm_serdes_tick(struct wm_softc *);
    663 /* SFP related */
    664 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
    665 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
    666 
    667 /*
    668  * NVM related.
    669  * Microwire, SPI (w/wo EERD) and Flash.
    670  */
    671 /* Misc functions */
    672 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
    673 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
    674 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
    675 /* Microwire */
    676 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
    677 /* SPI */
    678 static int	wm_nvm_ready_spi(struct wm_softc *);
    679 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
    680 /* Using with EERD */
    681 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
    682 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
    683 /* Flash */
    684 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
    685     unsigned int *);
    686 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
    687 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
    688 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
    689 	uint16_t *);
    690 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
    691 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
    692 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
    693 /* iNVM */
    694 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
    695 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
    696 /* Lock, detecting NVM type, validate checksum and read */
    697 static int	wm_nvm_acquire(struct wm_softc *);
    698 static void	wm_nvm_release(struct wm_softc *);
    699 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
    700 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
    701 static int	wm_nvm_validate_checksum(struct wm_softc *);
    702 static void	wm_nvm_version_invm(struct wm_softc *);
    703 static void	wm_nvm_version(struct wm_softc *);
    704 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
    705 
    706 /*
    707  * Hardware semaphores.
    708  * Very complexed...
    709  */
    710 static int	wm_get_swsm_semaphore(struct wm_softc *);
    711 static void	wm_put_swsm_semaphore(struct wm_softc *);
    712 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
    713 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
    714 static int	wm_get_swfwhw_semaphore(struct wm_softc *);
    715 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
    716 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
    717 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
    718 
    719 /*
    720  * Management mode and power management related subroutines.
    721  * BMC, AMT, suspend/resume and EEE.
    722  */
    723 #ifdef WM_WOL
    724 static int	wm_check_mng_mode(struct wm_softc *);
    725 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
    726 static int	wm_check_mng_mode_82574(struct wm_softc *);
    727 static int	wm_check_mng_mode_generic(struct wm_softc *);
    728 #endif
    729 static int	wm_enable_mng_pass_thru(struct wm_softc *);
    730 static int	wm_check_reset_block(struct wm_softc *);
    731 static void	wm_get_hw_control(struct wm_softc *);
    732 static void	wm_release_hw_control(struct wm_softc *);
    733 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
    734 static void	wm_smbustopci(struct wm_softc *);
    735 static void	wm_init_manageability(struct wm_softc *);
    736 static void	wm_release_manageability(struct wm_softc *);
    737 static void	wm_get_wakeup(struct wm_softc *);
    738 #ifdef WM_WOL
    739 static void	wm_enable_phy_wakeup(struct wm_softc *);
    740 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
    741 static void	wm_enable_wakeup(struct wm_softc *);
    742 #endif
    743 /* LPLU (Low Power Link Up) */
    744 static void	wm_lplu_d0_disable(struct wm_softc *);
    745 static void	wm_lplu_d0_disable_pch(struct wm_softc *);
    746 /* EEE */
    747 static void	wm_set_eee_i350(struct wm_softc *);
    748 
    749 /*
    750  * Workarounds (mainly PHY related).
    751  * Basically, PHY's workarounds are in the PHY drivers.
    752  */
    753 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
    754 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
    755 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
    756 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
    757 static void	wm_k1_gig_workaround_hv(struct wm_softc *, int);
    758 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
    759 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
    760 static void	wm_reset_init_script_82575(struct wm_softc *);
    761 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
    762 static void	wm_pll_workaround_i210(struct wm_softc *);
    763 
    764 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
    765     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
    766 
    767 /*
    768  * Devices supported by this driver.
    769  */
    770 static const struct wm_product {
    771 	pci_vendor_id_t		wmp_vendor;
    772 	pci_product_id_t	wmp_product;
    773 	const char		*wmp_name;
    774 	wm_chip_type		wmp_type;
    775 	uint32_t		wmp_flags;
    776 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
    777 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
    778 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
    779 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
    780 #define WMP_MEDIATYPE(x)	((x) & 0x03)
    781 } wm_products[] = {
    782 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
    783 	  "Intel i82542 1000BASE-X Ethernet",
    784 	  WM_T_82542_2_1,	WMP_F_FIBER },
    785 
    786 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
    787 	  "Intel i82543GC 1000BASE-X Ethernet",
    788 	  WM_T_82543,		WMP_F_FIBER },
    789 
    790 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
    791 	  "Intel i82543GC 1000BASE-T Ethernet",
    792 	  WM_T_82543,		WMP_F_COPPER },
    793 
    794 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
    795 	  "Intel i82544EI 1000BASE-T Ethernet",
    796 	  WM_T_82544,		WMP_F_COPPER },
    797 
    798 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
    799 	  "Intel i82544EI 1000BASE-X Ethernet",
    800 	  WM_T_82544,		WMP_F_FIBER },
    801 
    802 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
    803 	  "Intel i82544GC 1000BASE-T Ethernet",
    804 	  WM_T_82544,		WMP_F_COPPER },
    805 
    806 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
    807 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
    808 	  WM_T_82544,		WMP_F_COPPER },
    809 
    810 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
    811 	  "Intel i82540EM 1000BASE-T Ethernet",
    812 	  WM_T_82540,		WMP_F_COPPER },
    813 
    814 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
    815 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
    816 	  WM_T_82540,		WMP_F_COPPER },
    817 
    818 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
    819 	  "Intel i82540EP 1000BASE-T Ethernet",
    820 	  WM_T_82540,		WMP_F_COPPER },
    821 
    822 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
    823 	  "Intel i82540EP 1000BASE-T Ethernet",
    824 	  WM_T_82540,		WMP_F_COPPER },
    825 
    826 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
    827 	  "Intel i82540EP 1000BASE-T Ethernet",
    828 	  WM_T_82540,		WMP_F_COPPER },
    829 
    830 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
    831 	  "Intel i82545EM 1000BASE-T Ethernet",
    832 	  WM_T_82545,		WMP_F_COPPER },
    833 
    834 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
    835 	  "Intel i82545GM 1000BASE-T Ethernet",
    836 	  WM_T_82545_3,		WMP_F_COPPER },
    837 
    838 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
    839 	  "Intel i82545GM 1000BASE-X Ethernet",
    840 	  WM_T_82545_3,		WMP_F_FIBER },
    841 
    842 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
    843 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
    844 	  WM_T_82545_3,		WMP_F_SERDES },
    845 
    846 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
    847 	  "Intel i82546EB 1000BASE-T Ethernet",
    848 	  WM_T_82546,		WMP_F_COPPER },
    849 
    850 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
    851 	  "Intel i82546EB 1000BASE-T Ethernet",
    852 	  WM_T_82546,		WMP_F_COPPER },
    853 
    854 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
    855 	  "Intel i82545EM 1000BASE-X Ethernet",
    856 	  WM_T_82545,		WMP_F_FIBER },
    857 
    858 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
    859 	  "Intel i82546EB 1000BASE-X Ethernet",
    860 	  WM_T_82546,		WMP_F_FIBER },
    861 
    862 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
    863 	  "Intel i82546GB 1000BASE-T Ethernet",
    864 	  WM_T_82546_3,		WMP_F_COPPER },
    865 
    866 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
    867 	  "Intel i82546GB 1000BASE-X Ethernet",
    868 	  WM_T_82546_3,		WMP_F_FIBER },
    869 
    870 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
    871 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
    872 	  WM_T_82546_3,		WMP_F_SERDES },
    873 
    874 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
    875 	  "i82546GB quad-port Gigabit Ethernet",
    876 	  WM_T_82546_3,		WMP_F_COPPER },
    877 
    878 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
    879 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
    880 	  WM_T_82546_3,		WMP_F_COPPER },
    881 
    882 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
    883 	  "Intel PRO/1000MT (82546GB)",
    884 	  WM_T_82546_3,		WMP_F_COPPER },
    885 
    886 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
    887 	  "Intel i82541EI 1000BASE-T Ethernet",
    888 	  WM_T_82541,		WMP_F_COPPER },
    889 
    890 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
    891 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
    892 	  WM_T_82541,		WMP_F_COPPER },
    893 
    894 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
    895 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
    896 	  WM_T_82541,		WMP_F_COPPER },
    897 
    898 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
    899 	  "Intel i82541ER 1000BASE-T Ethernet",
    900 	  WM_T_82541_2,		WMP_F_COPPER },
    901 
    902 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
    903 	  "Intel i82541GI 1000BASE-T Ethernet",
    904 	  WM_T_82541_2,		WMP_F_COPPER },
    905 
    906 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
    907 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
    908 	  WM_T_82541_2,		WMP_F_COPPER },
    909 
    910 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
    911 	  "Intel i82541PI 1000BASE-T Ethernet",
    912 	  WM_T_82541_2,		WMP_F_COPPER },
    913 
    914 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
    915 	  "Intel i82547EI 1000BASE-T Ethernet",
    916 	  WM_T_82547,		WMP_F_COPPER },
    917 
    918 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
    919 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
    920 	  WM_T_82547,		WMP_F_COPPER },
    921 
    922 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
    923 	  "Intel i82547GI 1000BASE-T Ethernet",
    924 	  WM_T_82547_2,		WMP_F_COPPER },
    925 
    926 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
    927 	  "Intel PRO/1000 PT (82571EB)",
    928 	  WM_T_82571,		WMP_F_COPPER },
    929 
    930 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
    931 	  "Intel PRO/1000 PF (82571EB)",
    932 	  WM_T_82571,		WMP_F_FIBER },
    933 
    934 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
    935 	  "Intel PRO/1000 PB (82571EB)",
    936 	  WM_T_82571,		WMP_F_SERDES },
    937 
    938 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
    939 	  "Intel PRO/1000 QT (82571EB)",
    940 	  WM_T_82571,		WMP_F_COPPER },
    941 
    942 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
    943 	  "Intel PRO/1000 PT Quad Port Server Adapter",
    944 	  WM_T_82571,		WMP_F_COPPER, },
    945 
    946 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
    947 	  "Intel Gigabit PT Quad Port Server ExpressModule",
    948 	  WM_T_82571,		WMP_F_COPPER, },
    949 
    950 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
    951 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
    952 	  WM_T_82571,		WMP_F_SERDES, },
    953 
    954 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
    955 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
    956 	  WM_T_82571,		WMP_F_SERDES, },
    957 
    958 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
    959 	  "Intel 82571EB Quad 1000baseX Ethernet",
    960 	  WM_T_82571,		WMP_F_FIBER, },
    961 
    962 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
    963 	  "Intel i82572EI 1000baseT Ethernet",
    964 	  WM_T_82572,		WMP_F_COPPER },
    965 
    966 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
    967 	  "Intel i82572EI 1000baseX Ethernet",
    968 	  WM_T_82572,		WMP_F_FIBER },
    969 
    970 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
    971 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
    972 	  WM_T_82572,		WMP_F_SERDES },
    973 
    974 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
    975 	  "Intel i82572EI 1000baseT Ethernet",
    976 	  WM_T_82572,		WMP_F_COPPER },
    977 
    978 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
    979 	  "Intel i82573E",
    980 	  WM_T_82573,		WMP_F_COPPER },
    981 
    982 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
    983 	  "Intel i82573E IAMT",
    984 	  WM_T_82573,		WMP_F_COPPER },
    985 
    986 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
    987 	  "Intel i82573L Gigabit Ethernet",
    988 	  WM_T_82573,		WMP_F_COPPER },
    989 
    990 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
    991 	  "Intel i82574L",
    992 	  WM_T_82574,		WMP_F_COPPER },
    993 
    994 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
    995 	  "Intel i82574L",
    996 	  WM_T_82574,		WMP_F_COPPER },
    997 
    998 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
    999 	  "Intel i82583V",
   1000 	  WM_T_82583,		WMP_F_COPPER },
   1001 
   1002 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
   1003 	  "i80003 dual 1000baseT Ethernet",
   1004 	  WM_T_80003,		WMP_F_COPPER },
   1005 
   1006 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
   1007 	  "i80003 dual 1000baseX Ethernet",
   1008 	  WM_T_80003,		WMP_F_COPPER },
   1009 
   1010 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
   1011 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
   1012 	  WM_T_80003,		WMP_F_SERDES },
   1013 
   1014 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
   1015 	  "Intel i80003 1000baseT Ethernet",
   1016 	  WM_T_80003,		WMP_F_COPPER },
   1017 
   1018 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
   1019 	  "Intel i80003 Gigabit Ethernet (SERDES)",
   1020 	  WM_T_80003,		WMP_F_SERDES },
   1021 
   1022 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
   1023 	  "Intel i82801H (M_AMT) LAN Controller",
   1024 	  WM_T_ICH8,		WMP_F_COPPER },
   1025 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
   1026 	  "Intel i82801H (AMT) LAN Controller",
   1027 	  WM_T_ICH8,		WMP_F_COPPER },
   1028 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
   1029 	  "Intel i82801H LAN Controller",
   1030 	  WM_T_ICH8,		WMP_F_COPPER },
   1031 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
   1032 	  "Intel i82801H (IFE) LAN Controller",
   1033 	  WM_T_ICH8,		WMP_F_COPPER },
   1034 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
   1035 	  "Intel i82801H (M) LAN Controller",
   1036 	  WM_T_ICH8,		WMP_F_COPPER },
   1037 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
   1038 	  "Intel i82801H IFE (GT) LAN Controller",
   1039 	  WM_T_ICH8,		WMP_F_COPPER },
   1040 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
   1041 	  "Intel i82801H IFE (G) LAN Controller",
   1042 	  WM_T_ICH8,		WMP_F_COPPER },
   1043 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
   1044 	  "82801I (AMT) LAN Controller",
   1045 	  WM_T_ICH9,		WMP_F_COPPER },
   1046 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
   1047 	  "82801I LAN Controller",
   1048 	  WM_T_ICH9,		WMP_F_COPPER },
   1049 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
   1050 	  "82801I (G) LAN Controller",
   1051 	  WM_T_ICH9,		WMP_F_COPPER },
   1052 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
   1053 	  "82801I (GT) LAN Controller",
   1054 	  WM_T_ICH9,		WMP_F_COPPER },
   1055 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
   1056 	  "82801I (C) LAN Controller",
   1057 	  WM_T_ICH9,		WMP_F_COPPER },
   1058 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
   1059 	  "82801I mobile LAN Controller",
   1060 	  WM_T_ICH9,		WMP_F_COPPER },
   1061 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IGP_M_V,
   1062 	  "82801I mobile (V) LAN Controller",
   1063 	  WM_T_ICH9,		WMP_F_COPPER },
   1064 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
   1065 	  "82801I mobile (AMT) LAN Controller",
   1066 	  WM_T_ICH9,		WMP_F_COPPER },
   1067 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
   1068 	  "82567LM-4 LAN Controller",
   1069 	  WM_T_ICH9,		WMP_F_COPPER },
   1070 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_82567V_3,
   1071 	  "82567V-3 LAN Controller",
   1072 	  WM_T_ICH9,		WMP_F_COPPER },
   1073 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
   1074 	  "82567LM-2 LAN Controller",
   1075 	  WM_T_ICH10,		WMP_F_COPPER },
   1076 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
   1077 	  "82567LF-2 LAN Controller",
   1078 	  WM_T_ICH10,		WMP_F_COPPER },
   1079 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
   1080 	  "82567LM-3 LAN Controller",
   1081 	  WM_T_ICH10,		WMP_F_COPPER },
   1082 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
   1083 	  "82567LF-3 LAN Controller",
   1084 	  WM_T_ICH10,		WMP_F_COPPER },
   1085 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
   1086 	  "82567V-2 LAN Controller",
   1087 	  WM_T_ICH10,		WMP_F_COPPER },
   1088 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
   1089 	  "82567V-3? LAN Controller",
   1090 	  WM_T_ICH10,		WMP_F_COPPER },
   1091 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
   1092 	  "HANKSVILLE LAN Controller",
   1093 	  WM_T_ICH10,		WMP_F_COPPER },
   1094 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
   1095 	  "PCH LAN (82577LM) Controller",
   1096 	  WM_T_PCH,		WMP_F_COPPER },
   1097 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
   1098 	  "PCH LAN (82577LC) Controller",
   1099 	  WM_T_PCH,		WMP_F_COPPER },
   1100 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
   1101 	  "PCH LAN (82578DM) Controller",
   1102 	  WM_T_PCH,		WMP_F_COPPER },
   1103 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
   1104 	  "PCH LAN (82578DC) Controller",
   1105 	  WM_T_PCH,		WMP_F_COPPER },
   1106 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
   1107 	  "PCH2 LAN (82579LM) Controller",
   1108 	  WM_T_PCH2,		WMP_F_COPPER },
   1109 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
   1110 	  "PCH2 LAN (82579V) Controller",
   1111 	  WM_T_PCH2,		WMP_F_COPPER },
   1112 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
   1113 	  "82575EB dual-1000baseT Ethernet",
   1114 	  WM_T_82575,		WMP_F_COPPER },
   1115 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
   1116 	  "82575EB dual-1000baseX Ethernet (SERDES)",
   1117 	  WM_T_82575,		WMP_F_SERDES },
   1118 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
   1119 	  "82575GB quad-1000baseT Ethernet",
   1120 	  WM_T_82575,		WMP_F_COPPER },
   1121 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
   1122 	  "82575GB quad-1000baseT Ethernet (PM)",
   1123 	  WM_T_82575,		WMP_F_COPPER },
   1124 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
   1125 	  "82576 1000BaseT Ethernet",
   1126 	  WM_T_82576,		WMP_F_COPPER },
   1127 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
   1128 	  "82576 1000BaseX Ethernet",
   1129 	  WM_T_82576,		WMP_F_FIBER },
   1130 
   1131 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
   1132 	  "82576 gigabit Ethernet (SERDES)",
   1133 	  WM_T_82576,		WMP_F_SERDES },
   1134 
   1135 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
   1136 	  "82576 quad-1000BaseT Ethernet",
   1137 	  WM_T_82576,		WMP_F_COPPER },
   1138 
   1139 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
   1140 	  "82576 Gigabit ET2 Quad Port Server Adapter",
   1141 	  WM_T_82576,		WMP_F_COPPER },
   1142 
   1143 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
   1144 	  "82576 gigabit Ethernet",
   1145 	  WM_T_82576,		WMP_F_COPPER },
   1146 
   1147 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
   1148 	  "82576 gigabit Ethernet (SERDES)",
   1149 	  WM_T_82576,		WMP_F_SERDES },
   1150 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
   1151 	  "82576 quad-gigabit Ethernet (SERDES)",
   1152 	  WM_T_82576,		WMP_F_SERDES },
   1153 
   1154 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
   1155 	  "82580 1000BaseT Ethernet",
   1156 	  WM_T_82580,		WMP_F_COPPER },
   1157 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
   1158 	  "82580 1000BaseX Ethernet",
   1159 	  WM_T_82580,		WMP_F_FIBER },
   1160 
   1161 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
   1162 	  "82580 1000BaseT Ethernet (SERDES)",
   1163 	  WM_T_82580,		WMP_F_SERDES },
   1164 
   1165 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
   1166 	  "82580 gigabit Ethernet (SGMII)",
   1167 	  WM_T_82580,		WMP_F_COPPER },
   1168 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
   1169 	  "82580 dual-1000BaseT Ethernet",
   1170 	  WM_T_82580,		WMP_F_COPPER },
   1171 
   1172 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
   1173 	  "82580 quad-1000BaseX Ethernet",
   1174 	  WM_T_82580,		WMP_F_FIBER },
   1175 
   1176 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
   1177 	  "DH89XXCC Gigabit Ethernet (SGMII)",
   1178 	  WM_T_82580,		WMP_F_COPPER },
   1179 
   1180 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
   1181 	  "DH89XXCC Gigabit Ethernet (SERDES)",
   1182 	  WM_T_82580,		WMP_F_SERDES },
   1183 
   1184 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
   1185 	  "DH89XXCC 1000BASE-KX Ethernet",
   1186 	  WM_T_82580,		WMP_F_SERDES },
   1187 
   1188 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
   1189 	  "DH89XXCC Gigabit Ethernet (SFP)",
   1190 	  WM_T_82580,		WMP_F_SERDES },
   1191 
   1192 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
   1193 	  "I350 Gigabit Network Connection",
   1194 	  WM_T_I350,		WMP_F_COPPER },
   1195 
   1196 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
   1197 	  "I350 Gigabit Fiber Network Connection",
   1198 	  WM_T_I350,		WMP_F_FIBER },
   1199 
   1200 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
   1201 	  "I350 Gigabit Backplane Connection",
   1202 	  WM_T_I350,		WMP_F_SERDES },
   1203 
   1204 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
   1205 	  "I350 Quad Port Gigabit Ethernet",
   1206 	  WM_T_I350,		WMP_F_SERDES },
   1207 
   1208 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
   1209 	  "I350 Gigabit Connection",
   1210 	  WM_T_I350,		WMP_F_COPPER },
   1211 
   1212 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
   1213 	  "I354 Gigabit Ethernet (KX)",
   1214 	  WM_T_I354,		WMP_F_SERDES },
   1215 
   1216 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
   1217 	  "I354 Gigabit Ethernet (SGMII)",
   1218 	  WM_T_I354,		WMP_F_COPPER },
   1219 
   1220 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
   1221 	  "I354 Gigabit Ethernet (2.5G)",
   1222 	  WM_T_I354,		WMP_F_COPPER },
   1223 
   1224 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
   1225 	  "I210-T1 Ethernet Server Adapter",
   1226 	  WM_T_I210,		WMP_F_COPPER },
   1227 
   1228 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
   1229 	  "I210 Ethernet (Copper OEM)",
   1230 	  WM_T_I210,		WMP_F_COPPER },
   1231 
   1232 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
   1233 	  "I210 Ethernet (Copper IT)",
   1234 	  WM_T_I210,		WMP_F_COPPER },
   1235 
   1236 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
   1237 	  "I210 Ethernet (FLASH less)",
   1238 	  WM_T_I210,		WMP_F_COPPER },
   1239 
   1240 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
   1241 	  "I210 Gigabit Ethernet (Fiber)",
   1242 	  WM_T_I210,		WMP_F_FIBER },
   1243 
   1244 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
   1245 	  "I210 Gigabit Ethernet (SERDES)",
   1246 	  WM_T_I210,		WMP_F_SERDES },
   1247 
   1248 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
   1249 	  "I210 Gigabit Ethernet (FLASH less)",
   1250 	  WM_T_I210,		WMP_F_SERDES },
   1251 
   1252 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
   1253 	  "I210 Gigabit Ethernet (SGMII)",
   1254 	  WM_T_I210,		WMP_F_COPPER },
   1255 
   1256 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
   1257 	  "I211 Ethernet (COPPER)",
   1258 	  WM_T_I211,		WMP_F_COPPER },
   1259 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
   1260 	  "I217 V Ethernet Connection",
   1261 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1262 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
   1263 	  "I217 LM Ethernet Connection",
   1264 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1265 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
   1266 	  "I218 V Ethernet Connection",
   1267 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1268 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
   1269 	  "I218 V Ethernet Connection",
   1270 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1271 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
   1272 	  "I218 V Ethernet Connection",
   1273 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1274 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
   1275 	  "I218 LM Ethernet Connection",
   1276 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1277 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
   1278 	  "I218 LM Ethernet Connection",
   1279 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1280 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
   1281 	  "I218 LM Ethernet Connection",
   1282 	  WM_T_PCH_LPT,		WMP_F_COPPER },
   1283 	{ 0,			0,
   1284 	  NULL,
   1285 	  0,			0 },
   1286 };
   1287 
   1288 #ifdef WM_EVENT_COUNTERS
   1289 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
   1290 #endif /* WM_EVENT_COUNTERS */
   1291 
   1292 
   1293 /*
   1294  * Register read/write functions.
   1295  * Other than CSR_{READ|WRITE}().
   1296  */
   1297 
   1298 #if 0 /* Not currently used */
   1299 static inline uint32_t
   1300 wm_io_read(struct wm_softc *sc, int reg)
   1301 {
   1302 
   1303 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1304 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
   1305 }
   1306 #endif
   1307 
   1308 static inline void
   1309 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
   1310 {
   1311 
   1312 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
   1313 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
   1314 }
   1315 
   1316 static inline void
   1317 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
   1318     uint32_t data)
   1319 {
   1320 	uint32_t regval;
   1321 	int i;
   1322 
   1323 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
   1324 
   1325 	CSR_WRITE(sc, reg, regval);
   1326 
   1327 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
   1328 		delay(5);
   1329 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
   1330 			break;
   1331 	}
   1332 	if (i == SCTL_CTL_POLL_TIMEOUT) {
   1333 		aprint_error("%s: WARNING:"
   1334 		    " i82575 reg 0x%08x setup did not indicate ready\n",
   1335 		    device_xname(sc->sc_dev), reg);
   1336 	}
   1337 }
   1338 
   1339 static inline void
   1340 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
   1341 {
   1342 	wa->wa_low = htole32(v & 0xffffffffU);
   1343 	if (sizeof(bus_addr_t) == 8)
   1344 		wa->wa_high = htole32((uint64_t) v >> 32);
   1345 	else
   1346 		wa->wa_high = 0;
   1347 }
   1348 
   1349 /*
   1350  * Descriptor sync/init functions.
   1351  */
   1352 static inline void
   1353 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
   1354 {
   1355 	struct wm_softc *sc = txq->txq_sc;
   1356 
   1357 	/* If it will wrap around, sync to the end of the ring. */
   1358 	if ((start + num) > WM_NTXDESC(txq)) {
   1359 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1360 		    WM_CDTXOFF(start), sizeof(wiseman_txdesc_t) *
   1361 		    (WM_NTXDESC(txq) - start), ops);
   1362 		num -= (WM_NTXDESC(txq) - start);
   1363 		start = 0;
   1364 	}
   1365 
   1366 	/* Now sync whatever is left. */
   1367 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
   1368 	    WM_CDTXOFF(start), sizeof(wiseman_txdesc_t) * num, ops);
   1369 }
   1370 
   1371 static inline void
   1372 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
   1373 {
   1374 	struct wm_softc *sc = rxq->rxq_sc;
   1375 
   1376 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
   1377 	    WM_CDRXOFF(start), sizeof(wiseman_rxdesc_t), ops);
   1378 }
   1379 
   1380 static inline void
   1381 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
   1382 {
   1383 	struct wm_softc *sc = rxq->rxq_sc;
   1384 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
   1385 	wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
   1386 	struct mbuf *m = rxs->rxs_mbuf;
   1387 
   1388 	/*
   1389 	 * Note: We scoot the packet forward 2 bytes in the buffer
   1390 	 * so that the payload after the Ethernet header is aligned
   1391 	 * to a 4-byte boundary.
   1392 
   1393 	 * XXX BRAINDAMAGE ALERT!
   1394 	 * The stupid chip uses the same size for every buffer, which
   1395 	 * is set in the Receive Control register.  We are using the 2K
   1396 	 * size option, but what we REALLY want is (2K - 2)!  For this
   1397 	 * reason, we can't "scoot" packets longer than the standard
   1398 	 * Ethernet MTU.  On strict-alignment platforms, if the total
   1399 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
   1400 	 * the upper layer copy the headers.
   1401 	 */
   1402 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
   1403 
   1404 	wm_set_dma_addr(&rxd->wrx_addr,
   1405 	    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
   1406 	rxd->wrx_len = 0;
   1407 	rxd->wrx_cksum = 0;
   1408 	rxd->wrx_status = 0;
   1409 	rxd->wrx_errors = 0;
   1410 	rxd->wrx_special = 0;
   1411 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   1412 
   1413 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
   1414 }
   1415 
   1416 /*
   1417  * Device driver interface functions and commonly used functions.
   1418  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
   1419  */
   1420 
   1421 /* Lookup supported device table */
   1422 static const struct wm_product *
   1423 wm_lookup(const struct pci_attach_args *pa)
   1424 {
   1425 	const struct wm_product *wmp;
   1426 
   1427 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
   1428 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
   1429 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
   1430 			return wmp;
   1431 	}
   1432 	return NULL;
   1433 }
   1434 
   1435 /* The match function (ca_match) */
   1436 static int
   1437 wm_match(device_t parent, cfdata_t cf, void *aux)
   1438 {
   1439 	struct pci_attach_args *pa = aux;
   1440 
   1441 	if (wm_lookup(pa) != NULL)
   1442 		return 1;
   1443 
   1444 	return 0;
   1445 }
   1446 
   1447 /* The attach function (ca_attach) */
   1448 static void
   1449 wm_attach(device_t parent, device_t self, void *aux)
   1450 {
   1451 	struct wm_softc *sc = device_private(self);
   1452 	struct pci_attach_args *pa = aux;
   1453 	prop_dictionary_t dict;
   1454 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   1455 	pci_chipset_tag_t pc = pa->pa_pc;
   1456 	int counts[PCI_INTR_TYPE_SIZE];
   1457 	pci_intr_type_t max_type;
   1458 	const char *eetype, *xname;
   1459 	bus_space_tag_t memt;
   1460 	bus_space_handle_t memh;
   1461 	bus_size_t memsize;
   1462 	int memh_valid;
   1463 	int i, error;
   1464 	const struct wm_product *wmp;
   1465 	prop_data_t ea;
   1466 	prop_number_t pn;
   1467 	uint8_t enaddr[ETHER_ADDR_LEN];
   1468 	uint16_t cfg1, cfg2, swdpin, nvmword;
   1469 	pcireg_t preg, memtype;
   1470 	uint16_t eeprom_data, apme_mask;
   1471 	bool force_clear_smbi;
   1472 	uint32_t link_mode;
   1473 	uint32_t reg;
   1474 
   1475 	sc->sc_dev = self;
   1476 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
   1477 	sc->sc_stopping = false;
   1478 
   1479 	wmp = wm_lookup(pa);
   1480 #ifdef DIAGNOSTIC
   1481 	if (wmp == NULL) {
   1482 		printf("\n");
   1483 		panic("wm_attach: impossible");
   1484 	}
   1485 #endif
   1486 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
   1487 
   1488 	sc->sc_pc = pa->pa_pc;
   1489 	sc->sc_pcitag = pa->pa_tag;
   1490 
   1491 	if (pci_dma64_available(pa))
   1492 		sc->sc_dmat = pa->pa_dmat64;
   1493 	else
   1494 		sc->sc_dmat = pa->pa_dmat;
   1495 
   1496 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
   1497 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
   1498 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
   1499 
   1500 	sc->sc_type = wmp->wmp_type;
   1501 	if (sc->sc_type < WM_T_82543) {
   1502 		if (sc->sc_rev < 2) {
   1503 			aprint_error_dev(sc->sc_dev,
   1504 			    "i82542 must be at least rev. 2\n");
   1505 			return;
   1506 		}
   1507 		if (sc->sc_rev < 3)
   1508 			sc->sc_type = WM_T_82542_2_0;
   1509 	}
   1510 
   1511 	/*
   1512 	 * Disable MSI for Errata:
   1513 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
   1514 	 *
   1515 	 *  82544: Errata 25
   1516 	 *  82540: Errata  6 (easy to reproduce device timeout)
   1517 	 *  82545: Errata  4 (easy to reproduce device timeout)
   1518 	 *  82546: Errata 26 (easy to reproduce device timeout)
   1519 	 *  82541: Errata  7 (easy to reproduce device timeout)
   1520 	 *
   1521 	 * "Byte Enables 2 and 3 are not set on MSI writes"
   1522 	 *
   1523 	 *  82571 & 82572: Errata 63
   1524 	 */
   1525 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
   1526 	    || (sc->sc_type == WM_T_82572))
   1527 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
   1528 
   1529 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1530 	    || (sc->sc_type == WM_T_82580)
   1531 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   1532 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   1533 		sc->sc_flags |= WM_F_NEWQUEUE;
   1534 
   1535 	/* Set device properties (mactype) */
   1536 	dict = device_properties(sc->sc_dev);
   1537 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
   1538 
   1539 	/*
   1540 	 * Map the device.  All devices support memory-mapped acccess,
   1541 	 * and it is really required for normal operation.
   1542 	 */
   1543 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
   1544 	switch (memtype) {
   1545 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
   1546 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
   1547 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
   1548 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
   1549 		break;
   1550 	default:
   1551 		memh_valid = 0;
   1552 		break;
   1553 	}
   1554 
   1555 	if (memh_valid) {
   1556 		sc->sc_st = memt;
   1557 		sc->sc_sh = memh;
   1558 		sc->sc_ss = memsize;
   1559 	} else {
   1560 		aprint_error_dev(sc->sc_dev,
   1561 		    "unable to map device registers\n");
   1562 		return;
   1563 	}
   1564 
   1565 	/*
   1566 	 * In addition, i82544 and later support I/O mapped indirect
   1567 	 * register access.  It is not desirable (nor supported in
   1568 	 * this driver) to use it for normal operation, though it is
   1569 	 * required to work around bugs in some chip versions.
   1570 	 */
   1571 	if (sc->sc_type >= WM_T_82544) {
   1572 		/* First we have to find the I/O BAR. */
   1573 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
   1574 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
   1575 			if (memtype == PCI_MAPREG_TYPE_IO)
   1576 				break;
   1577 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
   1578 			    PCI_MAPREG_MEM_TYPE_64BIT)
   1579 				i += 4;	/* skip high bits, too */
   1580 		}
   1581 		if (i < PCI_MAPREG_END) {
   1582 			/*
   1583 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
   1584 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
   1585 			 * It's no problem because newer chips has no this
   1586 			 * bug.
   1587 			 *
   1588 			 * The i8254x doesn't apparently respond when the
   1589 			 * I/O BAR is 0, which looks somewhat like it's not
   1590 			 * been configured.
   1591 			 */
   1592 			preg = pci_conf_read(pc, pa->pa_tag, i);
   1593 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
   1594 				aprint_error_dev(sc->sc_dev,
   1595 				    "WARNING: I/O BAR at zero.\n");
   1596 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
   1597 					0, &sc->sc_iot, &sc->sc_ioh,
   1598 					NULL, &sc->sc_ios) == 0) {
   1599 				sc->sc_flags |= WM_F_IOH_VALID;
   1600 			} else {
   1601 				aprint_error_dev(sc->sc_dev,
   1602 				    "WARNING: unable to map I/O space\n");
   1603 			}
   1604 		}
   1605 
   1606 	}
   1607 
   1608 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
   1609 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
   1610 	preg |= PCI_COMMAND_MASTER_ENABLE;
   1611 	if (sc->sc_type < WM_T_82542_2_1)
   1612 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
   1613 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
   1614 
   1615 	/* power up chip */
   1616 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
   1617 	    NULL)) && error != EOPNOTSUPP) {
   1618 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
   1619 		return;
   1620 	}
   1621 
   1622 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
   1623 
   1624 	/* Allocation settings */
   1625 	max_type = PCI_INTR_TYPE_MSIX;
   1626 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_ntxqueues + sc->sc_nrxqueues + 1;
   1627 	counts[PCI_INTR_TYPE_MSI] = 1;
   1628 	counts[PCI_INTR_TYPE_INTX] = 1;
   1629 
   1630 alloc_retry:
   1631 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
   1632 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
   1633 		return;
   1634 	}
   1635 
   1636 	if (pci_intr_type(sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
   1637 		error = wm_setup_msix(sc);
   1638 		if (error) {
   1639 			pci_intr_release(pc, sc->sc_intrs,
   1640 			    counts[PCI_INTR_TYPE_MSIX]);
   1641 
   1642 			/* Setup for MSI: Disable MSI-X */
   1643 			max_type = PCI_INTR_TYPE_MSI;
   1644 			counts[PCI_INTR_TYPE_MSI] = 1;
   1645 			counts[PCI_INTR_TYPE_INTX] = 1;
   1646 			goto alloc_retry;
   1647 		}
   1648 	} else 	if (pci_intr_type(sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
   1649 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1650 		error = wm_setup_legacy(sc);
   1651 		if (error) {
   1652 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1653 			    counts[PCI_INTR_TYPE_MSI]);
   1654 
   1655 			/* The next try is for INTx: Disable MSI */
   1656 			max_type = PCI_INTR_TYPE_INTX;
   1657 			counts[PCI_INTR_TYPE_INTX] = 1;
   1658 			goto alloc_retry;
   1659 		}
   1660 	} else {
   1661 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
   1662 		error = wm_setup_legacy(sc);
   1663 		if (error) {
   1664 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
   1665 			    counts[PCI_INTR_TYPE_INTX]);
   1666 			return;
   1667 		}
   1668 	}
   1669 
   1670 	/*
   1671 	 * Check the function ID (unit number of the chip).
   1672 	 */
   1673 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
   1674 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
   1675 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   1676 	    || (sc->sc_type == WM_T_82580)
   1677 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   1678 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
   1679 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
   1680 	else
   1681 		sc->sc_funcid = 0;
   1682 
   1683 	/*
   1684 	 * Determine a few things about the bus we're connected to.
   1685 	 */
   1686 	if (sc->sc_type < WM_T_82543) {
   1687 		/* We don't really know the bus characteristics here. */
   1688 		sc->sc_bus_speed = 33;
   1689 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
   1690 		/*
   1691 		 * CSA (Communication Streaming Architecture) is about as fast
   1692 		 * a 32-bit 66MHz PCI Bus.
   1693 		 */
   1694 		sc->sc_flags |= WM_F_CSA;
   1695 		sc->sc_bus_speed = 66;
   1696 		aprint_verbose_dev(sc->sc_dev,
   1697 		    "Communication Streaming Architecture\n");
   1698 		if (sc->sc_type == WM_T_82547) {
   1699 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
   1700 			callout_setfunc(&sc->sc_txfifo_ch,
   1701 					wm_82547_txfifo_stall, sc);
   1702 			aprint_verbose_dev(sc->sc_dev,
   1703 			    "using 82547 Tx FIFO stall work-around\n");
   1704 		}
   1705 	} else if (sc->sc_type >= WM_T_82571) {
   1706 		sc->sc_flags |= WM_F_PCIE;
   1707 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   1708 		    && (sc->sc_type != WM_T_ICH10)
   1709 		    && (sc->sc_type != WM_T_PCH)
   1710 		    && (sc->sc_type != WM_T_PCH2)
   1711 		    && (sc->sc_type != WM_T_PCH_LPT)) {
   1712 			/* ICH* and PCH* have no PCIe capability registers */
   1713 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1714 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
   1715 				NULL) == 0)
   1716 				aprint_error_dev(sc->sc_dev,
   1717 				    "unable to find PCIe capability\n");
   1718 		}
   1719 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
   1720 	} else {
   1721 		reg = CSR_READ(sc, WMREG_STATUS);
   1722 		if (reg & STATUS_BUS64)
   1723 			sc->sc_flags |= WM_F_BUS64;
   1724 		if ((reg & STATUS_PCIX_MODE) != 0) {
   1725 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
   1726 
   1727 			sc->sc_flags |= WM_F_PCIX;
   1728 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
   1729 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
   1730 				aprint_error_dev(sc->sc_dev,
   1731 				    "unable to find PCIX capability\n");
   1732 			else if (sc->sc_type != WM_T_82545_3 &&
   1733 				 sc->sc_type != WM_T_82546_3) {
   1734 				/*
   1735 				 * Work around a problem caused by the BIOS
   1736 				 * setting the max memory read byte count
   1737 				 * incorrectly.
   1738 				 */
   1739 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1740 				    sc->sc_pcixe_capoff + PCIX_CMD);
   1741 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
   1742 				    sc->sc_pcixe_capoff + PCIX_STATUS);
   1743 
   1744 				bytecnt =
   1745 				    (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
   1746 				    PCIX_CMD_BYTECNT_SHIFT;
   1747 				maxb =
   1748 				    (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
   1749 				    PCIX_STATUS_MAXB_SHIFT;
   1750 				if (bytecnt > maxb) {
   1751 					aprint_verbose_dev(sc->sc_dev,
   1752 					    "resetting PCI-X MMRBC: %d -> %d\n",
   1753 					    512 << bytecnt, 512 << maxb);
   1754 					pcix_cmd = (pcix_cmd &
   1755 					    ~PCIX_CMD_BYTECNT_MASK) |
   1756 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
   1757 					pci_conf_write(pa->pa_pc, pa->pa_tag,
   1758 					    sc->sc_pcixe_capoff + PCIX_CMD,
   1759 					    pcix_cmd);
   1760 				}
   1761 			}
   1762 		}
   1763 		/*
   1764 		 * The quad port adapter is special; it has a PCIX-PCIX
   1765 		 * bridge on the board, and can run the secondary bus at
   1766 		 * a higher speed.
   1767 		 */
   1768 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
   1769 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
   1770 								      : 66;
   1771 		} else if (sc->sc_flags & WM_F_PCIX) {
   1772 			switch (reg & STATUS_PCIXSPD_MASK) {
   1773 			case STATUS_PCIXSPD_50_66:
   1774 				sc->sc_bus_speed = 66;
   1775 				break;
   1776 			case STATUS_PCIXSPD_66_100:
   1777 				sc->sc_bus_speed = 100;
   1778 				break;
   1779 			case STATUS_PCIXSPD_100_133:
   1780 				sc->sc_bus_speed = 133;
   1781 				break;
   1782 			default:
   1783 				aprint_error_dev(sc->sc_dev,
   1784 				    "unknown PCIXSPD %d; assuming 66MHz\n",
   1785 				    reg & STATUS_PCIXSPD_MASK);
   1786 				sc->sc_bus_speed = 66;
   1787 				break;
   1788 			}
   1789 		} else
   1790 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
   1791 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
   1792 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
   1793 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
   1794 	}
   1795 
   1796 	/* clear interesting stat counters */
   1797 	CSR_READ(sc, WMREG_COLC);
   1798 	CSR_READ(sc, WMREG_RXERRC);
   1799 
   1800 	/* get PHY control from SMBus to PCIe */
   1801 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   1802 	    || (sc->sc_type == WM_T_PCH_LPT))
   1803 		wm_smbustopci(sc);
   1804 
   1805 	/* Reset the chip to a known state. */
   1806 	wm_reset(sc);
   1807 
   1808 	/* Get some information about the EEPROM. */
   1809 	switch (sc->sc_type) {
   1810 	case WM_T_82542_2_0:
   1811 	case WM_T_82542_2_1:
   1812 	case WM_T_82543:
   1813 	case WM_T_82544:
   1814 		/* Microwire */
   1815 		sc->sc_nvm_wordsize = 64;
   1816 		sc->sc_nvm_addrbits = 6;
   1817 		break;
   1818 	case WM_T_82540:
   1819 	case WM_T_82545:
   1820 	case WM_T_82545_3:
   1821 	case WM_T_82546:
   1822 	case WM_T_82546_3:
   1823 		/* Microwire */
   1824 		reg = CSR_READ(sc, WMREG_EECD);
   1825 		if (reg & EECD_EE_SIZE) {
   1826 			sc->sc_nvm_wordsize = 256;
   1827 			sc->sc_nvm_addrbits = 8;
   1828 		} else {
   1829 			sc->sc_nvm_wordsize = 64;
   1830 			sc->sc_nvm_addrbits = 6;
   1831 		}
   1832 		sc->sc_flags |= WM_F_LOCK_EECD;
   1833 		break;
   1834 	case WM_T_82541:
   1835 	case WM_T_82541_2:
   1836 	case WM_T_82547:
   1837 	case WM_T_82547_2:
   1838 		sc->sc_flags |= WM_F_LOCK_EECD;
   1839 		reg = CSR_READ(sc, WMREG_EECD);
   1840 		if (reg & EECD_EE_TYPE) {
   1841 			/* SPI */
   1842 			sc->sc_flags |= WM_F_EEPROM_SPI;
   1843 			wm_nvm_set_addrbits_size_eecd(sc);
   1844 		} else {
   1845 			/* Microwire */
   1846 			if ((reg & EECD_EE_ABITS) != 0) {
   1847 				sc->sc_nvm_wordsize = 256;
   1848 				sc->sc_nvm_addrbits = 8;
   1849 			} else {
   1850 				sc->sc_nvm_wordsize = 64;
   1851 				sc->sc_nvm_addrbits = 6;
   1852 			}
   1853 		}
   1854 		break;
   1855 	case WM_T_82571:
   1856 	case WM_T_82572:
   1857 		/* SPI */
   1858 		sc->sc_flags |= WM_F_EEPROM_SPI;
   1859 		wm_nvm_set_addrbits_size_eecd(sc);
   1860 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
   1861 		break;
   1862 	case WM_T_82573:
   1863 		sc->sc_flags |= WM_F_LOCK_SWSM;
   1864 		/* FALLTHROUGH */
   1865 	case WM_T_82574:
   1866 	case WM_T_82583:
   1867 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
   1868 			sc->sc_flags |= WM_F_EEPROM_FLASH;
   1869 			sc->sc_nvm_wordsize = 2048;
   1870 		} else {
   1871 			/* SPI */
   1872 			sc->sc_flags |= WM_F_EEPROM_SPI;
   1873 			wm_nvm_set_addrbits_size_eecd(sc);
   1874 		}
   1875 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
   1876 		break;
   1877 	case WM_T_82575:
   1878 	case WM_T_82576:
   1879 	case WM_T_82580:
   1880 	case WM_T_I350:
   1881 	case WM_T_I354:
   1882 	case WM_T_80003:
   1883 		/* SPI */
   1884 		sc->sc_flags |= WM_F_EEPROM_SPI;
   1885 		wm_nvm_set_addrbits_size_eecd(sc);
   1886 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
   1887 		    | WM_F_LOCK_SWSM;
   1888 		break;
   1889 	case WM_T_ICH8:
   1890 	case WM_T_ICH9:
   1891 	case WM_T_ICH10:
   1892 	case WM_T_PCH:
   1893 	case WM_T_PCH2:
   1894 	case WM_T_PCH_LPT:
   1895 		/* FLASH */
   1896 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
   1897 		sc->sc_nvm_wordsize = 2048;
   1898 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
   1899 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
   1900 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
   1901 			aprint_error_dev(sc->sc_dev,
   1902 			    "can't map FLASH registers\n");
   1903 			goto out;
   1904 		}
   1905 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
   1906 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
   1907 						ICH_FLASH_SECTOR_SIZE;
   1908 		sc->sc_ich8_flash_bank_size =
   1909 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
   1910 		sc->sc_ich8_flash_bank_size -=
   1911 		    (reg & ICH_GFPREG_BASE_MASK);
   1912 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
   1913 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
   1914 		break;
   1915 	case WM_T_I210:
   1916 	case WM_T_I211:
   1917 		if (wm_nvm_get_flash_presence_i210(sc)) {
   1918 			wm_nvm_set_addrbits_size_eecd(sc);
   1919 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
   1920 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
   1921 		} else {
   1922 			sc->sc_nvm_wordsize = INVM_SIZE;
   1923 			sc->sc_flags |= WM_F_EEPROM_INVM;
   1924 			sc->sc_flags |= WM_F_LOCK_SWFW;
   1925 		}
   1926 		break;
   1927 	default:
   1928 		break;
   1929 	}
   1930 
   1931 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
   1932 	switch (sc->sc_type) {
   1933 	case WM_T_82571:
   1934 	case WM_T_82572:
   1935 		reg = CSR_READ(sc, WMREG_SWSM2);
   1936 		if ((reg & SWSM2_LOCK) == 0) {
   1937 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
   1938 			force_clear_smbi = true;
   1939 		} else
   1940 			force_clear_smbi = false;
   1941 		break;
   1942 	case WM_T_82573:
   1943 	case WM_T_82574:
   1944 	case WM_T_82583:
   1945 		force_clear_smbi = true;
   1946 		break;
   1947 	default:
   1948 		force_clear_smbi = false;
   1949 		break;
   1950 	}
   1951 	if (force_clear_smbi) {
   1952 		reg = CSR_READ(sc, WMREG_SWSM);
   1953 		if ((reg & SWSM_SMBI) != 0)
   1954 			aprint_error_dev(sc->sc_dev,
   1955 			    "Please update the Bootagent\n");
   1956 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
   1957 	}
   1958 
   1959 	/*
   1960 	 * Defer printing the EEPROM type until after verifying the checksum
   1961 	 * This allows the EEPROM type to be printed correctly in the case
   1962 	 * that no EEPROM is attached.
   1963 	 */
   1964 	/*
   1965 	 * Validate the EEPROM checksum. If the checksum fails, flag
   1966 	 * this for later, so we can fail future reads from the EEPROM.
   1967 	 */
   1968 	if (wm_nvm_validate_checksum(sc)) {
   1969 		/*
   1970 		 * Read twice again because some PCI-e parts fail the
   1971 		 * first check due to the link being in sleep state.
   1972 		 */
   1973 		if (wm_nvm_validate_checksum(sc))
   1974 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   1975 	}
   1976 
   1977 	/* Set device properties (macflags) */
   1978 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
   1979 
   1980 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   1981 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
   1982 	else {
   1983 		aprint_verbose_dev(sc->sc_dev, "%u words ",
   1984 		    sc->sc_nvm_wordsize);
   1985 		if (sc->sc_flags & WM_F_EEPROM_INVM)
   1986 			aprint_verbose("iNVM");
   1987 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
   1988 			aprint_verbose("FLASH(HW)");
   1989 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
   1990 			aprint_verbose("FLASH");
   1991 		else {
   1992 			if (sc->sc_flags & WM_F_EEPROM_SPI)
   1993 				eetype = "SPI";
   1994 			else
   1995 				eetype = "MicroWire";
   1996 			aprint_verbose("(%d address bits) %s EEPROM",
   1997 			    sc->sc_nvm_addrbits, eetype);
   1998 		}
   1999 	}
   2000 	wm_nvm_version(sc);
   2001 	aprint_verbose("\n");
   2002 
   2003 	/* Check for I21[01] PLL workaround */
   2004 	if (sc->sc_type == WM_T_I210)
   2005 		sc->sc_flags |= WM_F_PLL_WA_I210;
   2006 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
   2007 		/* NVM image release 3.25 has a workaround */
   2008 		if ((sc->sc_nvm_ver_major < 3)
   2009 		    || ((sc->sc_nvm_ver_major == 3)
   2010 			&& (sc->sc_nvm_ver_minor < 25))) {
   2011 			aprint_verbose_dev(sc->sc_dev,
   2012 			    "ROM image version %d.%d is older than 3.25\n",
   2013 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
   2014 			sc->sc_flags |= WM_F_PLL_WA_I210;
   2015 		}
   2016 	}
   2017 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   2018 		wm_pll_workaround_i210(sc);
   2019 
   2020 	wm_get_wakeup(sc);
   2021 	switch (sc->sc_type) {
   2022 	case WM_T_82571:
   2023 	case WM_T_82572:
   2024 	case WM_T_82573:
   2025 	case WM_T_82574:
   2026 	case WM_T_82583:
   2027 	case WM_T_80003:
   2028 	case WM_T_ICH8:
   2029 	case WM_T_ICH9:
   2030 	case WM_T_ICH10:
   2031 	case WM_T_PCH:
   2032 	case WM_T_PCH2:
   2033 	case WM_T_PCH_LPT:
   2034 		/* Non-AMT based hardware can now take control from firmware */
   2035 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
   2036 			wm_get_hw_control(sc);
   2037 		break;
   2038 	default:
   2039 		break;
   2040 	}
   2041 
   2042 	/*
   2043 	 * Read the Ethernet address from the EEPROM, if not first found
   2044 	 * in device properties.
   2045 	 */
   2046 	ea = prop_dictionary_get(dict, "mac-address");
   2047 	if (ea != NULL) {
   2048 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
   2049 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
   2050 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
   2051 	} else {
   2052 		if (wm_read_mac_addr(sc, enaddr) != 0) {
   2053 			aprint_error_dev(sc->sc_dev,
   2054 			    "unable to read Ethernet address\n");
   2055 			goto out;
   2056 		}
   2057 	}
   2058 
   2059 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
   2060 	    ether_sprintf(enaddr));
   2061 
   2062 	/*
   2063 	 * Read the config info from the EEPROM, and set up various
   2064 	 * bits in the control registers based on their contents.
   2065 	 */
   2066 	pn = prop_dictionary_get(dict, "i82543-cfg1");
   2067 	if (pn != NULL) {
   2068 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2069 		cfg1 = (uint16_t) prop_number_integer_value(pn);
   2070 	} else {
   2071 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
   2072 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
   2073 			goto out;
   2074 		}
   2075 	}
   2076 
   2077 	pn = prop_dictionary_get(dict, "i82543-cfg2");
   2078 	if (pn != NULL) {
   2079 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2080 		cfg2 = (uint16_t) prop_number_integer_value(pn);
   2081 	} else {
   2082 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
   2083 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
   2084 			goto out;
   2085 		}
   2086 	}
   2087 
   2088 	/* check for WM_F_WOL */
   2089 	switch (sc->sc_type) {
   2090 	case WM_T_82542_2_0:
   2091 	case WM_T_82542_2_1:
   2092 	case WM_T_82543:
   2093 		/* dummy? */
   2094 		eeprom_data = 0;
   2095 		apme_mask = NVM_CFG3_APME;
   2096 		break;
   2097 	case WM_T_82544:
   2098 		apme_mask = NVM_CFG2_82544_APM_EN;
   2099 		eeprom_data = cfg2;
   2100 		break;
   2101 	case WM_T_82546:
   2102 	case WM_T_82546_3:
   2103 	case WM_T_82571:
   2104 	case WM_T_82572:
   2105 	case WM_T_82573:
   2106 	case WM_T_82574:
   2107 	case WM_T_82583:
   2108 	case WM_T_80003:
   2109 	default:
   2110 		apme_mask = NVM_CFG3_APME;
   2111 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
   2112 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
   2113 		break;
   2114 	case WM_T_82575:
   2115 	case WM_T_82576:
   2116 	case WM_T_82580:
   2117 	case WM_T_I350:
   2118 	case WM_T_I354: /* XXX ok? */
   2119 	case WM_T_ICH8:
   2120 	case WM_T_ICH9:
   2121 	case WM_T_ICH10:
   2122 	case WM_T_PCH:
   2123 	case WM_T_PCH2:
   2124 	case WM_T_PCH_LPT:
   2125 		/* XXX The funcid should be checked on some devices */
   2126 		apme_mask = WUC_APME;
   2127 		eeprom_data = CSR_READ(sc, WMREG_WUC);
   2128 		break;
   2129 	}
   2130 
   2131 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
   2132 	if ((eeprom_data & apme_mask) != 0)
   2133 		sc->sc_flags |= WM_F_WOL;
   2134 #ifdef WM_DEBUG
   2135 	if ((sc->sc_flags & WM_F_WOL) != 0)
   2136 		printf("WOL\n");
   2137 #endif
   2138 
   2139 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
   2140 		/* Check NVM for autonegotiation */
   2141 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
   2142 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
   2143 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
   2144 		}
   2145 	}
   2146 
   2147 	/*
   2148 	 * XXX need special handling for some multiple port cards
   2149 	 * to disable a paticular port.
   2150 	 */
   2151 
   2152 	if (sc->sc_type >= WM_T_82544) {
   2153 		pn = prop_dictionary_get(dict, "i82543-swdpin");
   2154 		if (pn != NULL) {
   2155 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
   2156 			swdpin = (uint16_t) prop_number_integer_value(pn);
   2157 		} else {
   2158 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
   2159 				aprint_error_dev(sc->sc_dev,
   2160 				    "unable to read SWDPIN\n");
   2161 				goto out;
   2162 			}
   2163 		}
   2164 	}
   2165 
   2166 	if (cfg1 & NVM_CFG1_ILOS)
   2167 		sc->sc_ctrl |= CTRL_ILOS;
   2168 
   2169 	/*
   2170 	 * XXX
   2171 	 * This code isn't correct because pin 2 and 3 are located
   2172 	 * in different position on newer chips. Check all datasheet.
   2173 	 *
   2174 	 * Until resolve this problem, check if a chip < 82580
   2175 	 */
   2176 	if (sc->sc_type <= WM_T_82580) {
   2177 		if (sc->sc_type >= WM_T_82544) {
   2178 			sc->sc_ctrl |=
   2179 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
   2180 			    CTRL_SWDPIO_SHIFT;
   2181 			sc->sc_ctrl |=
   2182 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
   2183 			    CTRL_SWDPINS_SHIFT;
   2184 		} else {
   2185 			sc->sc_ctrl |=
   2186 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
   2187 			    CTRL_SWDPIO_SHIFT;
   2188 		}
   2189 	}
   2190 
   2191 	/* XXX For other than 82580? */
   2192 	if (sc->sc_type == WM_T_82580) {
   2193 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
   2194 		printf("CFG3 = %08x\n", (uint32_t)nvmword);
   2195 		if (nvmword & __BIT(13)) {
   2196 			printf("SET ILOS\n");
   2197 			sc->sc_ctrl |= CTRL_ILOS;
   2198 		}
   2199 	}
   2200 
   2201 #if 0
   2202 	if (sc->sc_type >= WM_T_82544) {
   2203 		if (cfg1 & NVM_CFG1_IPS0)
   2204 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
   2205 		if (cfg1 & NVM_CFG1_IPS1)
   2206 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
   2207 		sc->sc_ctrl_ext |=
   2208 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
   2209 		    CTRL_EXT_SWDPIO_SHIFT;
   2210 		sc->sc_ctrl_ext |=
   2211 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
   2212 		    CTRL_EXT_SWDPINS_SHIFT;
   2213 	} else {
   2214 		sc->sc_ctrl_ext |=
   2215 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
   2216 		    CTRL_EXT_SWDPIO_SHIFT;
   2217 	}
   2218 #endif
   2219 
   2220 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   2221 #if 0
   2222 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   2223 #endif
   2224 
   2225 	if (sc->sc_type == WM_T_PCH) {
   2226 		uint16_t val;
   2227 
   2228 		/* Save the NVM K1 bit setting */
   2229 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
   2230 
   2231 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
   2232 			sc->sc_nvm_k1_enabled = 1;
   2233 		else
   2234 			sc->sc_nvm_k1_enabled = 0;
   2235 	}
   2236 
   2237 	/*
   2238 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
   2239 	 * media structures accordingly.
   2240 	 */
   2241 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
   2242 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
   2243 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
   2244 	    || sc->sc_type == WM_T_82573
   2245 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
   2246 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
   2247 		wm_gmii_mediainit(sc, wmp->wmp_product);
   2248 	} else if (sc->sc_type < WM_T_82543 ||
   2249 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
   2250 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
   2251 			aprint_error_dev(sc->sc_dev,
   2252 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
   2253 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
   2254 		}
   2255 		wm_tbi_mediainit(sc);
   2256 	} else {
   2257 		switch (sc->sc_type) {
   2258 		case WM_T_82575:
   2259 		case WM_T_82576:
   2260 		case WM_T_82580:
   2261 		case WM_T_I350:
   2262 		case WM_T_I354:
   2263 		case WM_T_I210:
   2264 		case WM_T_I211:
   2265 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   2266 			link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
   2267 			switch (link_mode) {
   2268 			case CTRL_EXT_LINK_MODE_1000KX:
   2269 				aprint_verbose_dev(sc->sc_dev, "1000KX\n");
   2270 				sc->sc_mediatype = WM_MEDIATYPE_SERDES;
   2271 				break;
   2272 			case CTRL_EXT_LINK_MODE_SGMII:
   2273 				if (wm_sgmii_uses_mdio(sc)) {
   2274 					aprint_verbose_dev(sc->sc_dev,
   2275 					    "SGMII(MDIO)\n");
   2276 					sc->sc_flags |= WM_F_SGMII;
   2277 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2278 					break;
   2279 				}
   2280 				aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
   2281 				/*FALLTHROUGH*/
   2282 			case CTRL_EXT_LINK_MODE_PCIE_SERDES:
   2283 				sc->sc_mediatype = wm_sfp_get_media_type(sc);
   2284 				if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
   2285 					if (link_mode
   2286 					    == CTRL_EXT_LINK_MODE_SGMII) {
   2287 						sc->sc_mediatype
   2288 						    = WM_MEDIATYPE_COPPER;
   2289 						sc->sc_flags |= WM_F_SGMII;
   2290 					} else {
   2291 						sc->sc_mediatype
   2292 						    = WM_MEDIATYPE_SERDES;
   2293 						aprint_verbose_dev(sc->sc_dev,
   2294 						    "SERDES\n");
   2295 					}
   2296 					break;
   2297 				}
   2298 				if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   2299 					aprint_verbose_dev(sc->sc_dev,
   2300 					    "SERDES\n");
   2301 
   2302 				/* Change current link mode setting */
   2303 				reg &= ~CTRL_EXT_LINK_MODE_MASK;
   2304 				switch (sc->sc_mediatype) {
   2305 				case WM_MEDIATYPE_COPPER:
   2306 					reg |= CTRL_EXT_LINK_MODE_SGMII;
   2307 					break;
   2308 				case WM_MEDIATYPE_SERDES:
   2309 					reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
   2310 					break;
   2311 				default:
   2312 					break;
   2313 				}
   2314 				CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2315 				break;
   2316 			case CTRL_EXT_LINK_MODE_GMII:
   2317 			default:
   2318 				aprint_verbose_dev(sc->sc_dev, "Copper\n");
   2319 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2320 				break;
   2321 			}
   2322 
   2323 			reg &= ~CTRL_EXT_I2C_ENA;
   2324 			if ((sc->sc_flags & WM_F_SGMII) != 0)
   2325 				reg |= CTRL_EXT_I2C_ENA;
   2326 			else
   2327 				reg &= ~CTRL_EXT_I2C_ENA;
   2328 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   2329 
   2330 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
   2331 				wm_gmii_mediainit(sc, wmp->wmp_product);
   2332 			else
   2333 				wm_tbi_mediainit(sc);
   2334 			break;
   2335 		default:
   2336 			if (sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   2337 				aprint_error_dev(sc->sc_dev,
   2338 				    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
   2339 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
   2340 			wm_gmii_mediainit(sc, wmp->wmp_product);
   2341 		}
   2342 	}
   2343 
   2344 	ifp = &sc->sc_ethercom.ec_if;
   2345 	xname = device_xname(sc->sc_dev);
   2346 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
   2347 	ifp->if_softc = sc;
   2348 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   2349 	ifp->if_ioctl = wm_ioctl;
   2350 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   2351 		ifp->if_start = wm_nq_start;
   2352 	else
   2353 		ifp->if_start = wm_start;
   2354 	ifp->if_watchdog = wm_watchdog;
   2355 	ifp->if_init = wm_init;
   2356 	ifp->if_stop = wm_stop;
   2357 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
   2358 	IFQ_SET_READY(&ifp->if_snd);
   2359 
   2360 	/* Check for jumbo frame */
   2361 	switch (sc->sc_type) {
   2362 	case WM_T_82573:
   2363 		/* XXX limited to 9234 if ASPM is disabled */
   2364 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
   2365 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
   2366 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2367 		break;
   2368 	case WM_T_82571:
   2369 	case WM_T_82572:
   2370 	case WM_T_82574:
   2371 	case WM_T_82575:
   2372 	case WM_T_82576:
   2373 	case WM_T_82580:
   2374 	case WM_T_I350:
   2375 	case WM_T_I354: /* XXXX ok? */
   2376 	case WM_T_I210:
   2377 	case WM_T_I211:
   2378 	case WM_T_80003:
   2379 	case WM_T_ICH9:
   2380 	case WM_T_ICH10:
   2381 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
   2382 	case WM_T_PCH_LPT:
   2383 		/* XXX limited to 9234 */
   2384 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2385 		break;
   2386 	case WM_T_PCH:
   2387 		/* XXX limited to 4096 */
   2388 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2389 		break;
   2390 	case WM_T_82542_2_0:
   2391 	case WM_T_82542_2_1:
   2392 	case WM_T_82583:
   2393 	case WM_T_ICH8:
   2394 		/* No support for jumbo frame */
   2395 		break;
   2396 	default:
   2397 		/* ETHER_MAX_LEN_JUMBO */
   2398 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
   2399 		break;
   2400 	}
   2401 
   2402 	/* If we're a i82543 or greater, we can support VLANs. */
   2403 	if (sc->sc_type >= WM_T_82543)
   2404 		sc->sc_ethercom.ec_capabilities |=
   2405 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
   2406 
   2407 	/*
   2408 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
   2409 	 * on i82543 and later.
   2410 	 */
   2411 	if (sc->sc_type >= WM_T_82543) {
   2412 		ifp->if_capabilities |=
   2413 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
   2414 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
   2415 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
   2416 		    IFCAP_CSUM_TCPv6_Tx |
   2417 		    IFCAP_CSUM_UDPv6_Tx;
   2418 	}
   2419 
   2420 	/*
   2421 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
   2422 	 *
   2423 	 *	82541GI (8086:1076) ... no
   2424 	 *	82572EI (8086:10b9) ... yes
   2425 	 */
   2426 	if (sc->sc_type >= WM_T_82571) {
   2427 		ifp->if_capabilities |=
   2428 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
   2429 	}
   2430 
   2431 	/*
   2432 	 * If we're a i82544 or greater (except i82547), we can do
   2433 	 * TCP segmentation offload.
   2434 	 */
   2435 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
   2436 		ifp->if_capabilities |= IFCAP_TSOv4;
   2437 	}
   2438 
   2439 	if (sc->sc_type >= WM_T_82571) {
   2440 		ifp->if_capabilities |= IFCAP_TSOv6;
   2441 	}
   2442 
   2443 #ifdef WM_MPSAFE
   2444 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   2445 #else
   2446 	sc->sc_core_lock = NULL;
   2447 #endif
   2448 
   2449 	/* Attach the interface. */
   2450 	if_attach(ifp);
   2451 	ether_ifattach(ifp, enaddr);
   2452 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
   2453 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
   2454 			  RND_FLAG_DEFAULT);
   2455 
   2456 #ifdef WM_EVENT_COUNTERS
   2457 	/* Attach event counters. */
   2458 	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
   2459 	    NULL, xname, "txsstall");
   2460 	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
   2461 	    NULL, xname, "txdstall");
   2462 	evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
   2463 	    NULL, xname, "txfifo_stall");
   2464 	evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
   2465 	    NULL, xname, "txdw");
   2466 	evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
   2467 	    NULL, xname, "txqe");
   2468 	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
   2469 	    NULL, xname, "rxintr");
   2470 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
   2471 	    NULL, xname, "linkintr");
   2472 
   2473 	evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
   2474 	    NULL, xname, "rxipsum");
   2475 	evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
   2476 	    NULL, xname, "rxtusum");
   2477 	evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
   2478 	    NULL, xname, "txipsum");
   2479 	evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
   2480 	    NULL, xname, "txtusum");
   2481 	evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
   2482 	    NULL, xname, "txtusum6");
   2483 
   2484 	evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
   2485 	    NULL, xname, "txtso");
   2486 	evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
   2487 	    NULL, xname, "txtso6");
   2488 	evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
   2489 	    NULL, xname, "txtsopain");
   2490 
   2491 	for (i = 0; i < WM_NTXSEGS; i++) {
   2492 		snprintf(wm_txseg_evcnt_names[i],
   2493 		    sizeof(wm_txseg_evcnt_names[i]), "txseg%d", i);
   2494 		evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
   2495 		    NULL, xname, wm_txseg_evcnt_names[i]);
   2496 	}
   2497 
   2498 	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
   2499 	    NULL, xname, "txdrop");
   2500 
   2501 	evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
   2502 	    NULL, xname, "tu");
   2503 
   2504 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
   2505 	    NULL, xname, "tx_xoff");
   2506 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
   2507 	    NULL, xname, "tx_xon");
   2508 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
   2509 	    NULL, xname, "rx_xoff");
   2510 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
   2511 	    NULL, xname, "rx_xon");
   2512 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
   2513 	    NULL, xname, "rx_macctl");
   2514 #endif /* WM_EVENT_COUNTERS */
   2515 
   2516 	if (pmf_device_register(self, wm_suspend, wm_resume))
   2517 		pmf_class_network_register(self, ifp);
   2518 	else
   2519 		aprint_error_dev(self, "couldn't establish power handler\n");
   2520 
   2521 	sc->sc_flags |= WM_F_ATTACHED;
   2522  out:
   2523 	return;
   2524 }
   2525 
   2526 /* The detach function (ca_detach) */
   2527 static int
   2528 wm_detach(device_t self, int flags __unused)
   2529 {
   2530 	struct wm_softc *sc = device_private(self);
   2531 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2532 	int i;
   2533 #ifndef WM_MPSAFE
   2534 	int s;
   2535 #endif
   2536 
   2537 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
   2538 		return 0;
   2539 
   2540 #ifndef WM_MPSAFE
   2541 	s = splnet();
   2542 #endif
   2543 	/* Stop the interface. Callouts are stopped in it. */
   2544 	wm_stop(ifp, 1);
   2545 
   2546 #ifndef WM_MPSAFE
   2547 	splx(s);
   2548 #endif
   2549 
   2550 	pmf_device_deregister(self);
   2551 
   2552 	/* Tell the firmware about the release */
   2553 	WM_CORE_LOCK(sc);
   2554 	wm_release_manageability(sc);
   2555 	wm_release_hw_control(sc);
   2556 	WM_CORE_UNLOCK(sc);
   2557 
   2558 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
   2559 
   2560 	/* Delete all remaining media. */
   2561 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
   2562 
   2563 	ether_ifdetach(ifp);
   2564 	if_detach(ifp);
   2565 
   2566 
   2567 	/* Unload RX dmamaps and free mbufs */
   2568 	for (i = 0; i < sc->sc_nrxqueues; i++) {
   2569 		struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   2570 		WM_RX_LOCK(rxq);
   2571 		wm_rxdrain(rxq);
   2572 		WM_RX_UNLOCK(rxq);
   2573 	}
   2574 	/* Must unlock here */
   2575 
   2576 	wm_free_txrx_queues(sc);
   2577 
   2578 	/* Disestablish the interrupt handler */
   2579 	for (i = 0; i < sc->sc_nintrs; i++) {
   2580 		if (sc->sc_ihs[i] != NULL) {
   2581 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
   2582 			sc->sc_ihs[i] = NULL;
   2583 		}
   2584 	}
   2585 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
   2586 
   2587 	/* Unmap the registers */
   2588 	if (sc->sc_ss) {
   2589 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
   2590 		sc->sc_ss = 0;
   2591 	}
   2592 	if (sc->sc_ios) {
   2593 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
   2594 		sc->sc_ios = 0;
   2595 	}
   2596 	if (sc->sc_flashs) {
   2597 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
   2598 		sc->sc_flashs = 0;
   2599 	}
   2600 
   2601 	if (sc->sc_core_lock)
   2602 		mutex_obj_free(sc->sc_core_lock);
   2603 
   2604 	return 0;
   2605 }
   2606 
   2607 static bool
   2608 wm_suspend(device_t self, const pmf_qual_t *qual)
   2609 {
   2610 	struct wm_softc *sc = device_private(self);
   2611 
   2612 	wm_release_manageability(sc);
   2613 	wm_release_hw_control(sc);
   2614 #ifdef WM_WOL
   2615 	wm_enable_wakeup(sc);
   2616 #endif
   2617 
   2618 	return true;
   2619 }
   2620 
   2621 static bool
   2622 wm_resume(device_t self, const pmf_qual_t *qual)
   2623 {
   2624 	struct wm_softc *sc = device_private(self);
   2625 
   2626 	wm_init_manageability(sc);
   2627 
   2628 	return true;
   2629 }
   2630 
   2631 /*
   2632  * wm_watchdog:		[ifnet interface function]
   2633  *
   2634  *	Watchdog timer handler.
   2635  */
   2636 static void
   2637 wm_watchdog(struct ifnet *ifp)
   2638 {
   2639 	struct wm_softc *sc = ifp->if_softc;
   2640 	struct wm_txqueue *txq = &sc->sc_txq[0];
   2641 
   2642 	/*
   2643 	 * Since we're using delayed interrupts, sweep up
   2644 	 * before we report an error.
   2645 	 */
   2646 	WM_TX_LOCK(txq);
   2647 	wm_txeof(sc);
   2648 	WM_TX_UNLOCK(txq);
   2649 
   2650 	if (txq->txq_free != WM_NTXDESC(txq)) {
   2651 #ifdef WM_DEBUG
   2652 		int i, j;
   2653 		struct wm_txsoft *txs;
   2654 #endif
   2655 		log(LOG_ERR,
   2656 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
   2657 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
   2658 		    txq->txq_next);
   2659 		ifp->if_oerrors++;
   2660 #ifdef WM_DEBUG
   2661 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
   2662 		    i = WM_NEXTTXS(txq, i)) {
   2663 		    txs = &txq->txq_soft[i];
   2664 		    printf("txs %d tx %d -> %d\n",
   2665 			i, txs->txs_firstdesc, txs->txs_lastdesc);
   2666 		    for (j = txs->txs_firstdesc; ;
   2667 			j = WM_NEXTTX(txq, j)) {
   2668 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
   2669 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
   2670 			printf("\t %#08x%08x\n",
   2671 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
   2672 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
   2673 			if (j == txs->txs_lastdesc)
   2674 				break;
   2675 			}
   2676 		}
   2677 #endif
   2678 		/* Reset the interface. */
   2679 		(void) wm_init(ifp);
   2680 	}
   2681 
   2682 	/* Try to get more packets going. */
   2683 	ifp->if_start(ifp);
   2684 }
   2685 
   2686 /*
   2687  * wm_tick:
   2688  *
   2689  *	One second timer, used to check link status, sweep up
   2690  *	completed transmit jobs, etc.
   2691  */
   2692 static void
   2693 wm_tick(void *arg)
   2694 {
   2695 	struct wm_softc *sc = arg;
   2696 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   2697 #ifndef WM_MPSAFE
   2698 	int s;
   2699 
   2700 	s = splnet();
   2701 #endif
   2702 
   2703 	WM_CORE_LOCK(sc);
   2704 
   2705 	if (sc->sc_stopping)
   2706 		goto out;
   2707 
   2708 	if (sc->sc_type >= WM_T_82542_2_1) {
   2709 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
   2710 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
   2711 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
   2712 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
   2713 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
   2714 	}
   2715 
   2716 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   2717 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
   2718 	    + CSR_READ(sc, WMREG_CRCERRS)
   2719 	    + CSR_READ(sc, WMREG_ALGNERRC)
   2720 	    + CSR_READ(sc, WMREG_SYMERRC)
   2721 	    + CSR_READ(sc, WMREG_RXERRC)
   2722 	    + CSR_READ(sc, WMREG_SEC)
   2723 	    + CSR_READ(sc, WMREG_CEXTERR)
   2724 	    + CSR_READ(sc, WMREG_RLEC);
   2725 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
   2726 
   2727 	if (sc->sc_flags & WM_F_HAS_MII)
   2728 		mii_tick(&sc->sc_mii);
   2729 	else if ((sc->sc_type >= WM_T_82575)
   2730 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   2731 		wm_serdes_tick(sc);
   2732 	else
   2733 		wm_tbi_tick(sc);
   2734 
   2735 out:
   2736 	WM_CORE_UNLOCK(sc);
   2737 #ifndef WM_MPSAFE
   2738 	splx(s);
   2739 #endif
   2740 
   2741 	if (!sc->sc_stopping)
   2742 		callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   2743 }
   2744 
   2745 static int
   2746 wm_ifflags_cb(struct ethercom *ec)
   2747 {
   2748 	struct ifnet *ifp = &ec->ec_if;
   2749 	struct wm_softc *sc = ifp->if_softc;
   2750 	int change = ifp->if_flags ^ sc->sc_if_flags;
   2751 	int rc = 0;
   2752 
   2753 	WM_CORE_LOCK(sc);
   2754 
   2755 	if (change != 0)
   2756 		sc->sc_if_flags = ifp->if_flags;
   2757 
   2758 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) {
   2759 		rc = ENETRESET;
   2760 		goto out;
   2761 	}
   2762 
   2763 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
   2764 		wm_set_filter(sc);
   2765 
   2766 	wm_set_vlan(sc);
   2767 
   2768 out:
   2769 	WM_CORE_UNLOCK(sc);
   2770 
   2771 	return rc;
   2772 }
   2773 
   2774 /*
   2775  * wm_ioctl:		[ifnet interface function]
   2776  *
   2777  *	Handle control requests from the operator.
   2778  */
   2779 static int
   2780 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   2781 {
   2782 	struct wm_softc *sc = ifp->if_softc;
   2783 	struct ifreq *ifr = (struct ifreq *) data;
   2784 	struct ifaddr *ifa = (struct ifaddr *)data;
   2785 	struct sockaddr_dl *sdl;
   2786 	int s, error;
   2787 
   2788 #ifndef WM_MPSAFE
   2789 	s = splnet();
   2790 #endif
   2791 	switch (cmd) {
   2792 	case SIOCSIFMEDIA:
   2793 	case SIOCGIFMEDIA:
   2794 		WM_CORE_LOCK(sc);
   2795 		/* Flow control requires full-duplex mode. */
   2796 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
   2797 		    (ifr->ifr_media & IFM_FDX) == 0)
   2798 			ifr->ifr_media &= ~IFM_ETH_FMASK;
   2799 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
   2800 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
   2801 				/* We can do both TXPAUSE and RXPAUSE. */
   2802 				ifr->ifr_media |=
   2803 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   2804 			}
   2805 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
   2806 		}
   2807 		WM_CORE_UNLOCK(sc);
   2808 #ifdef WM_MPSAFE
   2809 		s = splnet();
   2810 #endif
   2811 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
   2812 #ifdef WM_MPSAFE
   2813 		splx(s);
   2814 #endif
   2815 		break;
   2816 	case SIOCINITIFADDR:
   2817 		WM_CORE_LOCK(sc);
   2818 		if (ifa->ifa_addr->sa_family == AF_LINK) {
   2819 			sdl = satosdl(ifp->if_dl->ifa_addr);
   2820 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
   2821 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
   2822 			/* unicast address is first multicast entry */
   2823 			wm_set_filter(sc);
   2824 			error = 0;
   2825 			WM_CORE_UNLOCK(sc);
   2826 			break;
   2827 		}
   2828 		WM_CORE_UNLOCK(sc);
   2829 		/*FALLTHROUGH*/
   2830 	default:
   2831 #ifdef WM_MPSAFE
   2832 		s = splnet();
   2833 #endif
   2834 		/* It may call wm_start, so unlock here */
   2835 		error = ether_ioctl(ifp, cmd, data);
   2836 #ifdef WM_MPSAFE
   2837 		splx(s);
   2838 #endif
   2839 		if (error != ENETRESET)
   2840 			break;
   2841 
   2842 		error = 0;
   2843 
   2844 		if (cmd == SIOCSIFCAP) {
   2845 			error = (*ifp->if_init)(ifp);
   2846 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
   2847 			;
   2848 		else if (ifp->if_flags & IFF_RUNNING) {
   2849 			/*
   2850 			 * Multicast list has changed; set the hardware filter
   2851 			 * accordingly.
   2852 			 */
   2853 			WM_CORE_LOCK(sc);
   2854 			wm_set_filter(sc);
   2855 			WM_CORE_UNLOCK(sc);
   2856 		}
   2857 		break;
   2858 	}
   2859 
   2860 #ifndef WM_MPSAFE
   2861 	splx(s);
   2862 #endif
   2863 	return error;
   2864 }
   2865 
   2866 /* MAC address related */
   2867 
   2868 /*
   2869  * Get the offset of MAC address and return it.
   2870  * If error occured, use offset 0.
   2871  */
   2872 static uint16_t
   2873 wm_check_alt_mac_addr(struct wm_softc *sc)
   2874 {
   2875 	uint16_t myea[ETHER_ADDR_LEN / 2];
   2876 	uint16_t offset = NVM_OFF_MACADDR;
   2877 
   2878 	/* Try to read alternative MAC address pointer */
   2879 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
   2880 		return 0;
   2881 
   2882 	/* Check pointer if it's valid or not. */
   2883 	if ((offset == 0x0000) || (offset == 0xffff))
   2884 		return 0;
   2885 
   2886 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
   2887 	/*
   2888 	 * Check whether alternative MAC address is valid or not.
   2889 	 * Some cards have non 0xffff pointer but those don't use
   2890 	 * alternative MAC address in reality.
   2891 	 *
   2892 	 * Check whether the broadcast bit is set or not.
   2893 	 */
   2894 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
   2895 		if (((myea[0] & 0xff) & 0x01) == 0)
   2896 			return offset; /* Found */
   2897 
   2898 	/* Not found */
   2899 	return 0;
   2900 }
   2901 
   2902 static int
   2903 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
   2904 {
   2905 	uint16_t myea[ETHER_ADDR_LEN / 2];
   2906 	uint16_t offset = NVM_OFF_MACADDR;
   2907 	int do_invert = 0;
   2908 
   2909 	switch (sc->sc_type) {
   2910 	case WM_T_82580:
   2911 	case WM_T_I350:
   2912 	case WM_T_I354:
   2913 		/* EEPROM Top Level Partitioning */
   2914 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
   2915 		break;
   2916 	case WM_T_82571:
   2917 	case WM_T_82575:
   2918 	case WM_T_82576:
   2919 	case WM_T_80003:
   2920 	case WM_T_I210:
   2921 	case WM_T_I211:
   2922 		offset = wm_check_alt_mac_addr(sc);
   2923 		if (offset == 0)
   2924 			if ((sc->sc_funcid & 0x01) == 1)
   2925 				do_invert = 1;
   2926 		break;
   2927 	default:
   2928 		if ((sc->sc_funcid & 0x01) == 1)
   2929 			do_invert = 1;
   2930 		break;
   2931 	}
   2932 
   2933 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]),
   2934 		myea) != 0)
   2935 		goto bad;
   2936 
   2937 	enaddr[0] = myea[0] & 0xff;
   2938 	enaddr[1] = myea[0] >> 8;
   2939 	enaddr[2] = myea[1] & 0xff;
   2940 	enaddr[3] = myea[1] >> 8;
   2941 	enaddr[4] = myea[2] & 0xff;
   2942 	enaddr[5] = myea[2] >> 8;
   2943 
   2944 	/*
   2945 	 * Toggle the LSB of the MAC address on the second port
   2946 	 * of some dual port cards.
   2947 	 */
   2948 	if (do_invert != 0)
   2949 		enaddr[5] ^= 1;
   2950 
   2951 	return 0;
   2952 
   2953  bad:
   2954 	return -1;
   2955 }
   2956 
   2957 /*
   2958  * wm_set_ral:
   2959  *
   2960  *	Set an entery in the receive address list.
   2961  */
   2962 static void
   2963 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
   2964 {
   2965 	uint32_t ral_lo, ral_hi;
   2966 
   2967 	if (enaddr != NULL) {
   2968 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
   2969 		    (enaddr[3] << 24);
   2970 		ral_hi = enaddr[4] | (enaddr[5] << 8);
   2971 		ral_hi |= RAL_AV;
   2972 	} else {
   2973 		ral_lo = 0;
   2974 		ral_hi = 0;
   2975 	}
   2976 
   2977 	if (sc->sc_type >= WM_T_82544) {
   2978 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
   2979 		    ral_lo);
   2980 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
   2981 		    ral_hi);
   2982 	} else {
   2983 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
   2984 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
   2985 	}
   2986 }
   2987 
   2988 /*
   2989  * wm_mchash:
   2990  *
   2991  *	Compute the hash of the multicast address for the 4096-bit
   2992  *	multicast filter.
   2993  */
   2994 static uint32_t
   2995 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
   2996 {
   2997 	static const int lo_shift[4] = { 4, 3, 2, 0 };
   2998 	static const int hi_shift[4] = { 4, 5, 6, 8 };
   2999 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
   3000 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
   3001 	uint32_t hash;
   3002 
   3003 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3004 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3005 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   3006 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
   3007 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
   3008 		return (hash & 0x3ff);
   3009 	}
   3010 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
   3011 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
   3012 
   3013 	return (hash & 0xfff);
   3014 }
   3015 
   3016 /*
   3017  * wm_set_filter:
   3018  *
   3019  *	Set up the receive filter.
   3020  */
   3021 static void
   3022 wm_set_filter(struct wm_softc *sc)
   3023 {
   3024 	struct ethercom *ec = &sc->sc_ethercom;
   3025 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   3026 	struct ether_multi *enm;
   3027 	struct ether_multistep step;
   3028 	bus_addr_t mta_reg;
   3029 	uint32_t hash, reg, bit;
   3030 	int i, size;
   3031 
   3032 	if (sc->sc_type >= WM_T_82544)
   3033 		mta_reg = WMREG_CORDOVA_MTA;
   3034 	else
   3035 		mta_reg = WMREG_MTA;
   3036 
   3037 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
   3038 
   3039 	if (ifp->if_flags & IFF_BROADCAST)
   3040 		sc->sc_rctl |= RCTL_BAM;
   3041 	if (ifp->if_flags & IFF_PROMISC) {
   3042 		sc->sc_rctl |= RCTL_UPE;
   3043 		goto allmulti;
   3044 	}
   3045 
   3046 	/*
   3047 	 * Set the station address in the first RAL slot, and
   3048 	 * clear the remaining slots.
   3049 	 */
   3050 	if (sc->sc_type == WM_T_ICH8)
   3051 		size = WM_RAL_TABSIZE_ICH8 -1;
   3052 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
   3053 	    || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
   3054 	    || (sc->sc_type == WM_T_PCH_LPT))
   3055 		size = WM_RAL_TABSIZE_ICH8;
   3056 	else if (sc->sc_type == WM_T_82575)
   3057 		size = WM_RAL_TABSIZE_82575;
   3058 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
   3059 		size = WM_RAL_TABSIZE_82576;
   3060 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   3061 		size = WM_RAL_TABSIZE_I350;
   3062 	else
   3063 		size = WM_RAL_TABSIZE;
   3064 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
   3065 	for (i = 1; i < size; i++)
   3066 		wm_set_ral(sc, NULL, i);
   3067 
   3068 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3069 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3070 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   3071 		size = WM_ICH8_MC_TABSIZE;
   3072 	else
   3073 		size = WM_MC_TABSIZE;
   3074 	/* Clear out the multicast table. */
   3075 	for (i = 0; i < size; i++)
   3076 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
   3077 
   3078 	ETHER_FIRST_MULTI(step, ec, enm);
   3079 	while (enm != NULL) {
   3080 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
   3081 			/*
   3082 			 * We must listen to a range of multicast addresses.
   3083 			 * For now, just accept all multicasts, rather than
   3084 			 * trying to set only those filter bits needed to match
   3085 			 * the range.  (At this time, the only use of address
   3086 			 * ranges is for IP multicast routing, for which the
   3087 			 * range is big enough to require all bits set.)
   3088 			 */
   3089 			goto allmulti;
   3090 		}
   3091 
   3092 		hash = wm_mchash(sc, enm->enm_addrlo);
   3093 
   3094 		reg = (hash >> 5);
   3095 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   3096 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   3097 		    || (sc->sc_type == WM_T_PCH2)
   3098 		    || (sc->sc_type == WM_T_PCH_LPT))
   3099 			reg &= 0x1f;
   3100 		else
   3101 			reg &= 0x7f;
   3102 		bit = hash & 0x1f;
   3103 
   3104 		hash = CSR_READ(sc, mta_reg + (reg << 2));
   3105 		hash |= 1U << bit;
   3106 
   3107 		/* XXX Hardware bug?? */
   3108 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
   3109 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
   3110 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3111 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
   3112 		} else
   3113 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
   3114 
   3115 		ETHER_NEXT_MULTI(step, enm);
   3116 	}
   3117 
   3118 	ifp->if_flags &= ~IFF_ALLMULTI;
   3119 	goto setit;
   3120 
   3121  allmulti:
   3122 	ifp->if_flags |= IFF_ALLMULTI;
   3123 	sc->sc_rctl |= RCTL_MPE;
   3124 
   3125  setit:
   3126 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
   3127 }
   3128 
   3129 /* Reset and init related */
   3130 
   3131 static void
   3132 wm_set_vlan(struct wm_softc *sc)
   3133 {
   3134 	/* Deal with VLAN enables. */
   3135 	if (VLAN_ATTACHED(&sc->sc_ethercom))
   3136 		sc->sc_ctrl |= CTRL_VME;
   3137 	else
   3138 		sc->sc_ctrl &= ~CTRL_VME;
   3139 
   3140 	/* Write the control registers. */
   3141 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3142 }
   3143 
   3144 static void
   3145 wm_set_pcie_completion_timeout(struct wm_softc *sc)
   3146 {
   3147 	uint32_t gcr;
   3148 	pcireg_t ctrl2;
   3149 
   3150 	gcr = CSR_READ(sc, WMREG_GCR);
   3151 
   3152 	/* Only take action if timeout value is defaulted to 0 */
   3153 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
   3154 		goto out;
   3155 
   3156 	if ((gcr & GCR_CAP_VER2) == 0) {
   3157 		gcr |= GCR_CMPL_TMOUT_10MS;
   3158 		goto out;
   3159 	}
   3160 
   3161 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   3162 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
   3163 	ctrl2 |= WM_PCIE_DCSR2_16MS;
   3164 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   3165 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
   3166 
   3167 out:
   3168 	/* Disable completion timeout resend */
   3169 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
   3170 
   3171 	CSR_WRITE(sc, WMREG_GCR, gcr);
   3172 }
   3173 
   3174 void
   3175 wm_get_auto_rd_done(struct wm_softc *sc)
   3176 {
   3177 	int i;
   3178 
   3179 	/* wait for eeprom to reload */
   3180 	switch (sc->sc_type) {
   3181 	case WM_T_82571:
   3182 	case WM_T_82572:
   3183 	case WM_T_82573:
   3184 	case WM_T_82574:
   3185 	case WM_T_82583:
   3186 	case WM_T_82575:
   3187 	case WM_T_82576:
   3188 	case WM_T_82580:
   3189 	case WM_T_I350:
   3190 	case WM_T_I354:
   3191 	case WM_T_I210:
   3192 	case WM_T_I211:
   3193 	case WM_T_80003:
   3194 	case WM_T_ICH8:
   3195 	case WM_T_ICH9:
   3196 		for (i = 0; i < 10; i++) {
   3197 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
   3198 				break;
   3199 			delay(1000);
   3200 		}
   3201 		if (i == 10) {
   3202 			log(LOG_ERR, "%s: auto read from eeprom failed to "
   3203 			    "complete\n", device_xname(sc->sc_dev));
   3204 		}
   3205 		break;
   3206 	default:
   3207 		break;
   3208 	}
   3209 }
   3210 
   3211 void
   3212 wm_lan_init_done(struct wm_softc *sc)
   3213 {
   3214 	uint32_t reg = 0;
   3215 	int i;
   3216 
   3217 	/* wait for eeprom to reload */
   3218 	switch (sc->sc_type) {
   3219 	case WM_T_ICH10:
   3220 	case WM_T_PCH:
   3221 	case WM_T_PCH2:
   3222 	case WM_T_PCH_LPT:
   3223 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
   3224 			reg = CSR_READ(sc, WMREG_STATUS);
   3225 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
   3226 				break;
   3227 			delay(100);
   3228 		}
   3229 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
   3230 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
   3231 			    "complete\n", device_xname(sc->sc_dev), __func__);
   3232 		}
   3233 		break;
   3234 	default:
   3235 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3236 		    __func__);
   3237 		break;
   3238 	}
   3239 
   3240 	reg &= ~STATUS_LAN_INIT_DONE;
   3241 	CSR_WRITE(sc, WMREG_STATUS, reg);
   3242 }
   3243 
   3244 void
   3245 wm_get_cfg_done(struct wm_softc *sc)
   3246 {
   3247 	int mask;
   3248 	uint32_t reg;
   3249 	int i;
   3250 
   3251 	/* wait for eeprom to reload */
   3252 	switch (sc->sc_type) {
   3253 	case WM_T_82542_2_0:
   3254 	case WM_T_82542_2_1:
   3255 		/* null */
   3256 		break;
   3257 	case WM_T_82543:
   3258 	case WM_T_82544:
   3259 	case WM_T_82540:
   3260 	case WM_T_82545:
   3261 	case WM_T_82545_3:
   3262 	case WM_T_82546:
   3263 	case WM_T_82546_3:
   3264 	case WM_T_82541:
   3265 	case WM_T_82541_2:
   3266 	case WM_T_82547:
   3267 	case WM_T_82547_2:
   3268 	case WM_T_82573:
   3269 	case WM_T_82574:
   3270 	case WM_T_82583:
   3271 		/* generic */
   3272 		delay(10*1000);
   3273 		break;
   3274 	case WM_T_80003:
   3275 	case WM_T_82571:
   3276 	case WM_T_82572:
   3277 	case WM_T_82575:
   3278 	case WM_T_82576:
   3279 	case WM_T_82580:
   3280 	case WM_T_I350:
   3281 	case WM_T_I354:
   3282 	case WM_T_I210:
   3283 	case WM_T_I211:
   3284 		if (sc->sc_type == WM_T_82571) {
   3285 			/* Only 82571 shares port 0 */
   3286 			mask = EEMNGCTL_CFGDONE_0;
   3287 		} else
   3288 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
   3289 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
   3290 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
   3291 				break;
   3292 			delay(1000);
   3293 		}
   3294 		if (i >= WM_PHY_CFG_TIMEOUT) {
   3295 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
   3296 				device_xname(sc->sc_dev), __func__));
   3297 		}
   3298 		break;
   3299 	case WM_T_ICH8:
   3300 	case WM_T_ICH9:
   3301 	case WM_T_ICH10:
   3302 	case WM_T_PCH:
   3303 	case WM_T_PCH2:
   3304 	case WM_T_PCH_LPT:
   3305 		delay(10*1000);
   3306 		if (sc->sc_type >= WM_T_ICH10)
   3307 			wm_lan_init_done(sc);
   3308 		else
   3309 			wm_get_auto_rd_done(sc);
   3310 
   3311 		reg = CSR_READ(sc, WMREG_STATUS);
   3312 		if ((reg & STATUS_PHYRA) != 0)
   3313 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
   3314 		break;
   3315 	default:
   3316 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   3317 		    __func__);
   3318 		break;
   3319 	}
   3320 }
   3321 
   3322 /* Init hardware bits */
   3323 void
   3324 wm_initialize_hardware_bits(struct wm_softc *sc)
   3325 {
   3326 	uint32_t tarc0, tarc1, reg;
   3327 
   3328 	/* For 82571 variant, 80003 and ICHs */
   3329 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
   3330 	    || (sc->sc_type >= WM_T_80003)) {
   3331 
   3332 		/* Transmit Descriptor Control 0 */
   3333 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
   3334 		reg |= TXDCTL_COUNT_DESC;
   3335 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
   3336 
   3337 		/* Transmit Descriptor Control 1 */
   3338 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
   3339 		reg |= TXDCTL_COUNT_DESC;
   3340 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
   3341 
   3342 		/* TARC0 */
   3343 		tarc0 = CSR_READ(sc, WMREG_TARC0);
   3344 		switch (sc->sc_type) {
   3345 		case WM_T_82571:
   3346 		case WM_T_82572:
   3347 		case WM_T_82573:
   3348 		case WM_T_82574:
   3349 		case WM_T_82583:
   3350 		case WM_T_80003:
   3351 			/* Clear bits 30..27 */
   3352 			tarc0 &= ~__BITS(30, 27);
   3353 			break;
   3354 		default:
   3355 			break;
   3356 		}
   3357 
   3358 		switch (sc->sc_type) {
   3359 		case WM_T_82571:
   3360 		case WM_T_82572:
   3361 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
   3362 
   3363 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3364 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
   3365 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
   3366 			/* 8257[12] Errata No.7 */
   3367 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
   3368 
   3369 			/* TARC1 bit 28 */
   3370 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3371 				tarc1 &= ~__BIT(28);
   3372 			else
   3373 				tarc1 |= __BIT(28);
   3374 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3375 
   3376 			/*
   3377 			 * 8257[12] Errata No.13
   3378 			 * Disable Dyamic Clock Gating.
   3379 			 */
   3380 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3381 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
   3382 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3383 			break;
   3384 		case WM_T_82573:
   3385 		case WM_T_82574:
   3386 		case WM_T_82583:
   3387 			if ((sc->sc_type == WM_T_82574)
   3388 			    || (sc->sc_type == WM_T_82583))
   3389 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
   3390 
   3391 			/* Extended Device Control */
   3392 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3393 			reg &= ~__BIT(23);	/* Clear bit 23 */
   3394 			reg |= __BIT(22);	/* Set bit 22 */
   3395 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3396 
   3397 			/* Device Control */
   3398 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
   3399 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3400 
   3401 			/* PCIe Control Register */
   3402 			/*
   3403 			 * 82573 Errata (unknown).
   3404 			 *
   3405 			 * 82574 Errata 25 and 82583 Errata 12
   3406 			 * "Dropped Rx Packets":
   3407 			 *   NVM Image Version 2.1.4 and newer has no this bug.
   3408 			 */
   3409 			reg = CSR_READ(sc, WMREG_GCR);
   3410 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
   3411 			CSR_WRITE(sc, WMREG_GCR, reg);
   3412 
   3413 			if ((sc->sc_type == WM_T_82574)
   3414 			    || (sc->sc_type == WM_T_82583)) {
   3415 				/*
   3416 				 * Document says this bit must be set for
   3417 				 * proper operation.
   3418 				 */
   3419 				reg = CSR_READ(sc, WMREG_GCR);
   3420 				reg |= __BIT(22);
   3421 				CSR_WRITE(sc, WMREG_GCR, reg);
   3422 
   3423 				/*
   3424 				 * Apply workaround for hardware errata
   3425 				 * documented in errata docs Fixes issue where
   3426 				 * some error prone or unreliable PCIe
   3427 				 * completions are occurring, particularly
   3428 				 * with ASPM enabled. Without fix, issue can
   3429 				 * cause Tx timeouts.
   3430 				 */
   3431 				reg = CSR_READ(sc, WMREG_GCR2);
   3432 				reg |= __BIT(0);
   3433 				CSR_WRITE(sc, WMREG_GCR2, reg);
   3434 			}
   3435 			break;
   3436 		case WM_T_80003:
   3437 			/* TARC0 */
   3438 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   3439 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   3440 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
   3441 
   3442 			/* TARC1 bit 28 */
   3443 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3444 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3445 				tarc1 &= ~__BIT(28);
   3446 			else
   3447 				tarc1 |= __BIT(28);
   3448 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3449 			break;
   3450 		case WM_T_ICH8:
   3451 		case WM_T_ICH9:
   3452 		case WM_T_ICH10:
   3453 		case WM_T_PCH:
   3454 		case WM_T_PCH2:
   3455 		case WM_T_PCH_LPT:
   3456 			/* TARC 0 */
   3457 			if (sc->sc_type == WM_T_ICH8) {
   3458 				/* Set TARC0 bits 29 and 28 */
   3459 				tarc0 |= __BITS(29, 28);
   3460 			}
   3461 			/* Set TARC0 bits 23,24,26,27 */
   3462 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
   3463 
   3464 			/* CTRL_EXT */
   3465 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   3466 			reg |= __BIT(22);	/* Set bit 22 */
   3467 			/*
   3468 			 * Enable PHY low-power state when MAC is at D3
   3469 			 * w/o WoL
   3470 			 */
   3471 			if (sc->sc_type >= WM_T_PCH)
   3472 				reg |= CTRL_EXT_PHYPDEN;
   3473 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3474 
   3475 			/* TARC1 */
   3476 			tarc1 = CSR_READ(sc, WMREG_TARC1);
   3477 			/* bit 28 */
   3478 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
   3479 				tarc1 &= ~__BIT(28);
   3480 			else
   3481 				tarc1 |= __BIT(28);
   3482 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
   3483 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
   3484 
   3485 			/* Device Status */
   3486 			if (sc->sc_type == WM_T_ICH8) {
   3487 				reg = CSR_READ(sc, WMREG_STATUS);
   3488 				reg &= ~__BIT(31);
   3489 				CSR_WRITE(sc, WMREG_STATUS, reg);
   3490 
   3491 			}
   3492 
   3493 			/*
   3494 			 * Work-around descriptor data corruption issue during
   3495 			 * NFS v2 UDP traffic, just disable the NFS filtering
   3496 			 * capability.
   3497 			 */
   3498 			reg = CSR_READ(sc, WMREG_RFCTL);
   3499 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
   3500 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3501 			break;
   3502 		default:
   3503 			break;
   3504 		}
   3505 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
   3506 
   3507 		/*
   3508 		 * 8257[12] Errata No.52 and some others.
   3509 		 * Avoid RSS Hash Value bug.
   3510 		 */
   3511 		switch (sc->sc_type) {
   3512 		case WM_T_82571:
   3513 		case WM_T_82572:
   3514 		case WM_T_82573:
   3515 		case WM_T_80003:
   3516 		case WM_T_ICH8:
   3517 			reg = CSR_READ(sc, WMREG_RFCTL);
   3518 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
   3519 			CSR_WRITE(sc, WMREG_RFCTL, reg);
   3520 			break;
   3521 		default:
   3522 			break;
   3523 		}
   3524 	}
   3525 }
   3526 
   3527 static uint32_t
   3528 wm_rxpbs_adjust_82580(uint32_t val)
   3529 {
   3530 	uint32_t rv = 0;
   3531 
   3532 	if (val < __arraycount(wm_82580_rxpbs_table))
   3533 		rv = wm_82580_rxpbs_table[val];
   3534 
   3535 	return rv;
   3536 }
   3537 
   3538 /*
   3539  * wm_reset:
   3540  *
   3541  *	Reset the i82542 chip.
   3542  */
   3543 static void
   3544 wm_reset(struct wm_softc *sc)
   3545 {
   3546 	int phy_reset = 0;
   3547 	int i, error = 0;
   3548 	uint32_t reg, mask;
   3549 
   3550 	/*
   3551 	 * Allocate on-chip memory according to the MTU size.
   3552 	 * The Packet Buffer Allocation register must be written
   3553 	 * before the chip is reset.
   3554 	 */
   3555 	switch (sc->sc_type) {
   3556 	case WM_T_82547:
   3557 	case WM_T_82547_2:
   3558 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3559 		    PBA_22K : PBA_30K;
   3560 		for (i = 0; i < sc->sc_ntxqueues; i++) {
   3561 			struct wm_txqueue *txq = &sc->sc_txq[i];
   3562 			txq->txq_fifo_head = 0;
   3563 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
   3564 			txq->txq_fifo_size =
   3565 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
   3566 			txq->txq_fifo_stall = 0;
   3567 		}
   3568 		break;
   3569 	case WM_T_82571:
   3570 	case WM_T_82572:
   3571 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
   3572 	case WM_T_80003:
   3573 		sc->sc_pba = PBA_32K;
   3574 		break;
   3575 	case WM_T_82573:
   3576 		sc->sc_pba = PBA_12K;
   3577 		break;
   3578 	case WM_T_82574:
   3579 	case WM_T_82583:
   3580 		sc->sc_pba = PBA_20K;
   3581 		break;
   3582 	case WM_T_82576:
   3583 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
   3584 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
   3585 		break;
   3586 	case WM_T_82580:
   3587 	case WM_T_I350:
   3588 	case WM_T_I354:
   3589 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
   3590 		break;
   3591 	case WM_T_I210:
   3592 	case WM_T_I211:
   3593 		sc->sc_pba = PBA_34K;
   3594 		break;
   3595 	case WM_T_ICH8:
   3596 		/* Workaround for a bit corruption issue in FIFO memory */
   3597 		sc->sc_pba = PBA_8K;
   3598 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
   3599 		break;
   3600 	case WM_T_ICH9:
   3601 	case WM_T_ICH10:
   3602 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
   3603 		    PBA_14K : PBA_10K;
   3604 		break;
   3605 	case WM_T_PCH:
   3606 	case WM_T_PCH2:
   3607 	case WM_T_PCH_LPT:
   3608 		sc->sc_pba = PBA_26K;
   3609 		break;
   3610 	default:
   3611 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
   3612 		    PBA_40K : PBA_48K;
   3613 		break;
   3614 	}
   3615 	/*
   3616 	 * Only old or non-multiqueue devices have the PBA register
   3617 	 * XXX Need special handling for 82575.
   3618 	 */
   3619 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   3620 	    || (sc->sc_type == WM_T_82575))
   3621 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
   3622 
   3623 	/* Prevent the PCI-E bus from sticking */
   3624 	if (sc->sc_flags & WM_F_PCIE) {
   3625 		int timeout = 800;
   3626 
   3627 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
   3628 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   3629 
   3630 		while (timeout--) {
   3631 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
   3632 			    == 0)
   3633 				break;
   3634 			delay(100);
   3635 		}
   3636 	}
   3637 
   3638 	/* Set the completion timeout for interface */
   3639 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   3640 	    || (sc->sc_type == WM_T_82580)
   3641 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   3642 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
   3643 		wm_set_pcie_completion_timeout(sc);
   3644 
   3645 	/* Clear interrupt */
   3646 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   3647 	if (sc->sc_nintrs > 1) {
   3648 		if (sc->sc_type != WM_T_82574) {
   3649 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   3650 			CSR_WRITE(sc, WMREG_EIAC, 0);
   3651 		} else {
   3652 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   3653 		}
   3654 	}
   3655 
   3656 	/* Stop the transmit and receive processes. */
   3657 	CSR_WRITE(sc, WMREG_RCTL, 0);
   3658 	sc->sc_rctl &= ~RCTL_EN;
   3659 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
   3660 	CSR_WRITE_FLUSH(sc);
   3661 
   3662 	/* XXX set_tbi_sbp_82543() */
   3663 
   3664 	delay(10*1000);
   3665 
   3666 	/* Must acquire the MDIO ownership before MAC reset */
   3667 	switch (sc->sc_type) {
   3668 	case WM_T_82573:
   3669 	case WM_T_82574:
   3670 	case WM_T_82583:
   3671 		error = wm_get_hw_semaphore_82573(sc);
   3672 		break;
   3673 	default:
   3674 		break;
   3675 	}
   3676 
   3677 	/*
   3678 	 * 82541 Errata 29? & 82547 Errata 28?
   3679 	 * See also the description about PHY_RST bit in CTRL register
   3680 	 * in 8254x_GBe_SDM.pdf.
   3681 	 */
   3682 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
   3683 		CSR_WRITE(sc, WMREG_CTRL,
   3684 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
   3685 		CSR_WRITE_FLUSH(sc);
   3686 		delay(5000);
   3687 	}
   3688 
   3689 	switch (sc->sc_type) {
   3690 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
   3691 	case WM_T_82541:
   3692 	case WM_T_82541_2:
   3693 	case WM_T_82547:
   3694 	case WM_T_82547_2:
   3695 		/*
   3696 		 * On some chipsets, a reset through a memory-mapped write
   3697 		 * cycle can cause the chip to reset before completing the
   3698 		 * write cycle.  This causes major headache that can be
   3699 		 * avoided by issuing the reset via indirect register writes
   3700 		 * through I/O space.
   3701 		 *
   3702 		 * So, if we successfully mapped the I/O BAR at attach time,
   3703 		 * use that.  Otherwise, try our luck with a memory-mapped
   3704 		 * reset.
   3705 		 */
   3706 		if (sc->sc_flags & WM_F_IOH_VALID)
   3707 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
   3708 		else
   3709 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
   3710 		break;
   3711 	case WM_T_82545_3:
   3712 	case WM_T_82546_3:
   3713 		/* Use the shadow control register on these chips. */
   3714 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
   3715 		break;
   3716 	case WM_T_80003:
   3717 		mask = swfwphysem[sc->sc_funcid];
   3718 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   3719 		wm_get_swfw_semaphore(sc, mask);
   3720 		CSR_WRITE(sc, WMREG_CTRL, reg);
   3721 		wm_put_swfw_semaphore(sc, mask);
   3722 		break;
   3723 	case WM_T_ICH8:
   3724 	case WM_T_ICH9:
   3725 	case WM_T_ICH10:
   3726 	case WM_T_PCH:
   3727 	case WM_T_PCH2:
   3728 	case WM_T_PCH_LPT:
   3729 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
   3730 		if (wm_check_reset_block(sc) == 0) {
   3731 			/*
   3732 			 * Gate automatic PHY configuration by hardware on
   3733 			 * non-managed 82579
   3734 			 */
   3735 			if ((sc->sc_type == WM_T_PCH2)
   3736 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
   3737 				== 0))
   3738 				wm_gate_hw_phy_config_ich8lan(sc, 1);
   3739 
   3740 			reg |= CTRL_PHY_RESET;
   3741 			phy_reset = 1;
   3742 		}
   3743 		wm_get_swfwhw_semaphore(sc);
   3744 		CSR_WRITE(sc, WMREG_CTRL, reg);
   3745 		/* Don't insert a completion barrier when reset */
   3746 		delay(20*1000);
   3747 		wm_put_swfwhw_semaphore(sc);
   3748 		break;
   3749 	case WM_T_82580:
   3750 	case WM_T_I350:
   3751 	case WM_T_I354:
   3752 	case WM_T_I210:
   3753 	case WM_T_I211:
   3754 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   3755 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
   3756 			CSR_WRITE_FLUSH(sc);
   3757 		delay(5000);
   3758 		break;
   3759 	case WM_T_82542_2_0:
   3760 	case WM_T_82542_2_1:
   3761 	case WM_T_82543:
   3762 	case WM_T_82540:
   3763 	case WM_T_82545:
   3764 	case WM_T_82546:
   3765 	case WM_T_82571:
   3766 	case WM_T_82572:
   3767 	case WM_T_82573:
   3768 	case WM_T_82574:
   3769 	case WM_T_82575:
   3770 	case WM_T_82576:
   3771 	case WM_T_82583:
   3772 	default:
   3773 		/* Everything else can safely use the documented method. */
   3774 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
   3775 		break;
   3776 	}
   3777 
   3778 	/* Must release the MDIO ownership after MAC reset */
   3779 	switch (sc->sc_type) {
   3780 	case WM_T_82573:
   3781 	case WM_T_82574:
   3782 	case WM_T_82583:
   3783 		if (error == 0)
   3784 			wm_put_hw_semaphore_82573(sc);
   3785 		break;
   3786 	default:
   3787 		break;
   3788 	}
   3789 
   3790 	if (phy_reset != 0)
   3791 		wm_get_cfg_done(sc);
   3792 
   3793 	/* reload EEPROM */
   3794 	switch (sc->sc_type) {
   3795 	case WM_T_82542_2_0:
   3796 	case WM_T_82542_2_1:
   3797 	case WM_T_82543:
   3798 	case WM_T_82544:
   3799 		delay(10);
   3800 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   3801 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3802 		CSR_WRITE_FLUSH(sc);
   3803 		delay(2000);
   3804 		break;
   3805 	case WM_T_82540:
   3806 	case WM_T_82545:
   3807 	case WM_T_82545_3:
   3808 	case WM_T_82546:
   3809 	case WM_T_82546_3:
   3810 		delay(5*1000);
   3811 		/* XXX Disable HW ARPs on ASF enabled adapters */
   3812 		break;
   3813 	case WM_T_82541:
   3814 	case WM_T_82541_2:
   3815 	case WM_T_82547:
   3816 	case WM_T_82547_2:
   3817 		delay(20000);
   3818 		/* XXX Disable HW ARPs on ASF enabled adapters */
   3819 		break;
   3820 	case WM_T_82571:
   3821 	case WM_T_82572:
   3822 	case WM_T_82573:
   3823 	case WM_T_82574:
   3824 	case WM_T_82583:
   3825 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
   3826 			delay(10);
   3827 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
   3828 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   3829 			CSR_WRITE_FLUSH(sc);
   3830 		}
   3831 		/* check EECD_EE_AUTORD */
   3832 		wm_get_auto_rd_done(sc);
   3833 		/*
   3834 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
   3835 		 * is set.
   3836 		 */
   3837 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
   3838 		    || (sc->sc_type == WM_T_82583))
   3839 			delay(25*1000);
   3840 		break;
   3841 	case WM_T_82575:
   3842 	case WM_T_82576:
   3843 	case WM_T_82580:
   3844 	case WM_T_I350:
   3845 	case WM_T_I354:
   3846 	case WM_T_I210:
   3847 	case WM_T_I211:
   3848 	case WM_T_80003:
   3849 		/* check EECD_EE_AUTORD */
   3850 		wm_get_auto_rd_done(sc);
   3851 		break;
   3852 	case WM_T_ICH8:
   3853 	case WM_T_ICH9:
   3854 	case WM_T_ICH10:
   3855 	case WM_T_PCH:
   3856 	case WM_T_PCH2:
   3857 	case WM_T_PCH_LPT:
   3858 		break;
   3859 	default:
   3860 		panic("%s: unknown type\n", __func__);
   3861 	}
   3862 
   3863 	/* Check whether EEPROM is present or not */
   3864 	switch (sc->sc_type) {
   3865 	case WM_T_82575:
   3866 	case WM_T_82576:
   3867 	case WM_T_82580:
   3868 	case WM_T_I350:
   3869 	case WM_T_I354:
   3870 	case WM_T_ICH8:
   3871 	case WM_T_ICH9:
   3872 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
   3873 			/* Not found */
   3874 			sc->sc_flags |= WM_F_EEPROM_INVALID;
   3875 			if (sc->sc_type == WM_T_82575)
   3876 				wm_reset_init_script_82575(sc);
   3877 		}
   3878 		break;
   3879 	default:
   3880 		break;
   3881 	}
   3882 
   3883 	if ((sc->sc_type == WM_T_82580)
   3884 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
   3885 		/* clear global device reset status bit */
   3886 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
   3887 	}
   3888 
   3889 	/* Clear any pending interrupt events. */
   3890 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   3891 	reg = CSR_READ(sc, WMREG_ICR);
   3892 	if (sc->sc_nintrs > 1) {
   3893 		if (sc->sc_type != WM_T_82574) {
   3894 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   3895 			CSR_WRITE(sc, WMREG_EIAC, 0);
   3896 		} else
   3897 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   3898 	}
   3899 
   3900 	/* reload sc_ctrl */
   3901 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   3902 
   3903 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
   3904 		wm_set_eee_i350(sc);
   3905 
   3906 	/* dummy read from WUC */
   3907 	if (sc->sc_type == WM_T_PCH)
   3908 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
   3909 	/*
   3910 	 * For PCH, this write will make sure that any noise will be detected
   3911 	 * as a CRC error and be dropped rather than show up as a bad packet
   3912 	 * to the DMA engine
   3913 	 */
   3914 	if (sc->sc_type == WM_T_PCH)
   3915 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
   3916 
   3917 	if (sc->sc_type >= WM_T_82544)
   3918 		CSR_WRITE(sc, WMREG_WUC, 0);
   3919 
   3920 	wm_reset_mdicnfg_82580(sc);
   3921 
   3922 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
   3923 		wm_pll_workaround_i210(sc);
   3924 }
   3925 
   3926 /*
   3927  * wm_add_rxbuf:
   3928  *
   3929  *	Add a receive buffer to the indiciated descriptor.
   3930  */
   3931 static int
   3932 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
   3933 {
   3934 	struct wm_softc *sc = rxq->rxq_sc;
   3935 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
   3936 	struct mbuf *m;
   3937 	int error;
   3938 
   3939 	KASSERT(WM_RX_LOCKED(rxq));
   3940 
   3941 	MGETHDR(m, M_DONTWAIT, MT_DATA);
   3942 	if (m == NULL)
   3943 		return ENOBUFS;
   3944 
   3945 	MCLGET(m, M_DONTWAIT);
   3946 	if ((m->m_flags & M_EXT) == 0) {
   3947 		m_freem(m);
   3948 		return ENOBUFS;
   3949 	}
   3950 
   3951 	if (rxs->rxs_mbuf != NULL)
   3952 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   3953 
   3954 	rxs->rxs_mbuf = m;
   3955 
   3956 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   3957 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
   3958 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
   3959 	if (error) {
   3960 		/* XXX XXX XXX */
   3961 		aprint_error_dev(sc->sc_dev,
   3962 		    "unable to load rx DMA map %d, error = %d\n",
   3963 		    idx, error);
   3964 		panic("wm_add_rxbuf");
   3965 	}
   3966 
   3967 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   3968 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   3969 
   3970 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   3971 		if ((sc->sc_rctl & RCTL_EN) != 0)
   3972 			wm_init_rxdesc(rxq, idx);
   3973 	} else
   3974 		wm_init_rxdesc(rxq, idx);
   3975 
   3976 	return 0;
   3977 }
   3978 
   3979 /*
   3980  * wm_rxdrain:
   3981  *
   3982  *	Drain the receive queue.
   3983  */
   3984 static void
   3985 wm_rxdrain(struct wm_rxqueue *rxq)
   3986 {
   3987 	struct wm_softc *sc = rxq->rxq_sc;
   3988 	struct wm_rxsoft *rxs;
   3989 	int i;
   3990 
   3991 	KASSERT(WM_RX_LOCKED(rxq));
   3992 
   3993 	for (i = 0; i < WM_NRXDESC; i++) {
   3994 		rxs = &rxq->rxq_soft[i];
   3995 		if (rxs->rxs_mbuf != NULL) {
   3996 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
   3997 			m_freem(rxs->rxs_mbuf);
   3998 			rxs->rxs_mbuf = NULL;
   3999 		}
   4000 	}
   4001 }
   4002 
   4003 
   4004 /*
   4005  * XXX copy from FreeBSD's sys/net/rss_config.c
   4006  */
   4007 /*
   4008  * RSS secret key, intended to prevent attacks on load-balancing.  Its
   4009  * effectiveness may be limited by algorithm choice and available entropy
   4010  * during the boot.
   4011  *
   4012  * XXXRW: And that we don't randomize it yet!
   4013  *
   4014  * This is the default Microsoft RSS specification key which is also
   4015  * the Chelsio T5 firmware default key.
   4016  */
   4017 #define RSS_KEYSIZE 40
   4018 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
   4019 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
   4020 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
   4021 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
   4022 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
   4023 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
   4024 };
   4025 
   4026 /*
   4027  * Caller must pass an array of size sizeof(rss_key).
   4028  *
   4029  * XXX
   4030  * As if_ixgbe may use this function, this function should not be
   4031  * if_wm specific function.
   4032  */
   4033 static void
   4034 wm_rss_getkey(uint8_t *key)
   4035 {
   4036 
   4037 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
   4038 }
   4039 
   4040 /*
   4041  * Setup registers for RSS.
   4042  *
   4043  * XXX not yet VMDq support
   4044  */
   4045 static void
   4046 wm_init_rss(struct wm_softc *sc)
   4047 {
   4048 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
   4049 	int i;
   4050 
   4051 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
   4052 
   4053 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
   4054 		int qid, reta_ent;
   4055 
   4056 		qid  = i % sc->sc_nrxqueues;
   4057 		switch(sc->sc_type) {
   4058 		case WM_T_82574:
   4059 			reta_ent = __SHIFTIN(qid,
   4060 			    RETA_ENT_QINDEX_MASK_82574);
   4061 			break;
   4062 		case WM_T_82575:
   4063 			reta_ent = __SHIFTIN(qid,
   4064 			    RETA_ENT_QINDEX1_MASK_82575);
   4065 			break;
   4066 		default:
   4067 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
   4068 			break;
   4069 		}
   4070 
   4071 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
   4072 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
   4073 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
   4074 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
   4075 	}
   4076 
   4077 	wm_rss_getkey((uint8_t *)rss_key);
   4078 	for (i = 0; i < RSSRK_NUM_REGS; i++)
   4079 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
   4080 
   4081 	if (sc->sc_type == WM_T_82574)
   4082 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
   4083 	else
   4084 		mrqc = MRQC_ENABLE_RSS_MQ;
   4085 
   4086 	/* XXXX
   4087 	 * The same as FreeBSD igb.
   4088 	 * Why doesn't use MRQC_RSS_FIELD_IPV6_EX?
   4089 	 */
   4090 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
   4091 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
   4092 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
   4093 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
   4094 
   4095 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
   4096 }
   4097 
   4098 /*
   4099  * Adjust TX and RX queue numbers which the system actulally uses.
   4100  *
   4101  * The numbers are affected by below parameters.
   4102  *     - The nubmer of hardware queues
   4103  *     - The number of MSI-X vectors (= "nvectors" argument)
   4104  *     - ncpu
   4105  */
   4106 static void
   4107 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
   4108 {
   4109 	int hw_ntxqueues, hw_nrxqueues;
   4110 
   4111 	if (nvectors < 3) {
   4112 		sc->sc_ntxqueues = 1;
   4113 		sc->sc_nrxqueues = 1;
   4114 		return;
   4115 	}
   4116 
   4117 	switch(sc->sc_type) {
   4118 	case WM_T_82572:
   4119 		hw_ntxqueues = 2;
   4120 		hw_nrxqueues = 2;
   4121 		break;
   4122 	case WM_T_82574:
   4123 		hw_ntxqueues = 2;
   4124 		hw_nrxqueues = 2;
   4125 		break;
   4126 	case WM_T_82575:
   4127 		hw_ntxqueues = 4;
   4128 		hw_nrxqueues = 4;
   4129 		break;
   4130 	case WM_T_82576:
   4131 		hw_ntxqueues = 16;
   4132 		hw_nrxqueues = 16;
   4133 		break;
   4134 	case WM_T_82580:
   4135 	case WM_T_I350:
   4136 	case WM_T_I354:
   4137 		hw_ntxqueues = 8;
   4138 		hw_nrxqueues = 8;
   4139 		break;
   4140 	case WM_T_I210:
   4141 		hw_ntxqueues = 4;
   4142 		hw_nrxqueues = 4;
   4143 		break;
   4144 	case WM_T_I211:
   4145 		hw_ntxqueues = 2;
   4146 		hw_nrxqueues = 2;
   4147 		break;
   4148 		/*
   4149 		 * As below ethernet controllers does not support MSI-X,
   4150 		 * this driver let them not use multiqueue.
   4151 		 *     - WM_T_80003
   4152 		 *     - WM_T_ICH8
   4153 		 *     - WM_T_ICH9
   4154 		 *     - WM_T_ICH10
   4155 		 *     - WM_T_PCH
   4156 		 *     - WM_T_PCH2
   4157 		 *     - WM_T_PCH_LPT
   4158 		 */
   4159 	default:
   4160 		hw_ntxqueues = 1;
   4161 		hw_nrxqueues = 1;
   4162 		break;
   4163 	}
   4164 
   4165 	/*
   4166 	 * As queues more then MSI-X vectors cannot improve scaling, we limit
   4167 	 * the number of queues used actually.
   4168 	 *
   4169 	 * XXX
   4170 	 * Currently, we separate TX queue interrupts and RX queue interrupts.
   4171 	 * Howerver, the number of MSI-X vectors of recent controllers (such as
   4172 	 * I354) expects that drivers bundle a TX queue interrupt and a RX
   4173 	 * interrupt to one interrupt. e.g. FreeBSD's igb deals interrupts in
   4174 	 * such a way.
   4175 	 */
   4176 	if (nvectors < hw_ntxqueues + hw_nrxqueues + 1) {
   4177 		sc->sc_ntxqueues = (nvectors - 1) / 2;
   4178 		sc->sc_nrxqueues = (nvectors - 1) / 2;
   4179 	} else {
   4180 		sc->sc_ntxqueues = hw_ntxqueues;
   4181 		sc->sc_nrxqueues = hw_nrxqueues;
   4182 	}
   4183 
   4184 	/*
   4185 	 * As queues more then cpus cannot improve scaling, we limit
   4186 	 * the number of queues used actually.
   4187 	 */
   4188 	if (ncpu < sc->sc_ntxqueues)
   4189 		sc->sc_ntxqueues = ncpu;
   4190 	if (ncpu < sc->sc_nrxqueues)
   4191 		sc->sc_nrxqueues = ncpu;
   4192 
   4193 	/* XXX Currently, this driver supports RX multiqueue only. */
   4194 	sc->sc_ntxqueues = 1;
   4195 }
   4196 
   4197 /*
   4198  * Both single interrupt MSI and INTx can use this function.
   4199  */
   4200 static int
   4201 wm_setup_legacy(struct wm_softc *sc)
   4202 {
   4203 	pci_chipset_tag_t pc = sc->sc_pc;
   4204 	const char *intrstr = NULL;
   4205 	char intrbuf[PCI_INTRSTR_LEN];
   4206 	int error;
   4207 
   4208 	error = wm_alloc_txrx_queues(sc);
   4209 	if (error) {
   4210 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4211 		    error);
   4212 		return ENOMEM;
   4213 	}
   4214 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
   4215 	    sizeof(intrbuf));
   4216 #ifdef WM_MPSAFE
   4217 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
   4218 #endif
   4219 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
   4220 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
   4221 	if (sc->sc_ihs[0] == NULL) {
   4222 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
   4223 		    (pci_intr_type(sc->sc_intrs[0])
   4224 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
   4225 		return ENOMEM;
   4226 	}
   4227 
   4228 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
   4229 	sc->sc_nintrs = 1;
   4230 	return 0;
   4231 }
   4232 
   4233 static int
   4234 wm_setup_msix(struct wm_softc *sc)
   4235 {
   4236 	void *vih;
   4237 	kcpuset_t *affinity;
   4238 	int qidx, error, intr_idx, tx_established, rx_established;
   4239 	pci_chipset_tag_t pc = sc->sc_pc;
   4240 	const char *intrstr = NULL;
   4241 	char intrbuf[PCI_INTRSTR_LEN];
   4242 	char intr_xname[INTRDEVNAMEBUF];
   4243 
   4244 	error = wm_alloc_txrx_queues(sc);
   4245 	if (error) {
   4246 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
   4247 		    error);
   4248 		return ENOMEM;
   4249 	}
   4250 
   4251 	kcpuset_create(&affinity, false);
   4252 	intr_idx = 0;
   4253 
   4254 	/*
   4255 	 * TX
   4256 	 */
   4257 	tx_established = 0;
   4258 	for (qidx = 0; qidx < sc->sc_ntxqueues; qidx++) {
   4259 		struct wm_txqueue *txq = &sc->sc_txq[qidx];
   4260 
   4261 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4262 		    sizeof(intrbuf));
   4263 #ifdef WM_MPSAFE
   4264 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   4265 		    PCI_INTR_MPSAFE, true);
   4266 #endif
   4267 		memset(intr_xname, 0, sizeof(intr_xname));
   4268 		snprintf(intr_xname, sizeof(intr_xname), "%sTX%d",
   4269 		    device_xname(sc->sc_dev), qidx);
   4270 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4271 		    IPL_NET, wm_txintr_msix, txq, intr_xname);
   4272 		if (vih == NULL) {
   4273 			aprint_error_dev(sc->sc_dev,
   4274 			    "unable to establish MSI-X(for TX)%s%s\n",
   4275 			    intrstr ? " at " : "",
   4276 			    intrstr ? intrstr : "");
   4277 
   4278 			goto fail_0;
   4279 		}
   4280 		kcpuset_zero(affinity);
   4281 		/* Round-robin affinity */
   4282 		kcpuset_set(affinity, intr_idx % ncpu);
   4283 		error = interrupt_distribute(vih, affinity, NULL);
   4284 		if (error == 0) {
   4285 			aprint_normal_dev(sc->sc_dev,
   4286 			    "for TX interrupting at %s affinity to %u\n",
   4287 			    intrstr, intr_idx % ncpu);
   4288 		} else {
   4289 			aprint_normal_dev(sc->sc_dev,
   4290 			    "for TX interrupting at %s\n", intrstr);
   4291 		}
   4292 		sc->sc_ihs[intr_idx] = vih;
   4293 		txq->txq_id = qidx;
   4294 		txq->txq_intr_idx = intr_idx;
   4295 
   4296 		tx_established++;
   4297 		intr_idx++;
   4298 	}
   4299 
   4300 	/*
   4301 	 * RX
   4302 	 */
   4303 	rx_established = 0;
   4304 	for (qidx = 0; qidx < sc->sc_nrxqueues; qidx++) {
   4305 		struct wm_rxqueue *rxq = &sc->sc_rxq[qidx];
   4306 
   4307 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4308 		    sizeof(intrbuf));
   4309 #ifdef WM_MPSAFE
   4310 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   4311 		    PCI_INTR_MPSAFE, true);
   4312 #endif
   4313 		memset(intr_xname, 0, sizeof(intr_xname));
   4314 		snprintf(intr_xname, sizeof(intr_xname), "%sRX%d",
   4315 		    device_xname(sc->sc_dev), qidx);
   4316 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4317 		    IPL_NET, wm_rxintr_msix, rxq, intr_xname);
   4318 		if (vih == NULL) {
   4319 			aprint_error_dev(sc->sc_dev,
   4320 			    "unable to establish MSI-X(for RX)%s%s\n",
   4321 			    intrstr ? " at " : "",
   4322 			    intrstr ? intrstr : "");
   4323 
   4324 			goto fail_1;
   4325 		}
   4326 		kcpuset_zero(affinity);
   4327 		/* Round-robin affinity */
   4328 		kcpuset_set(affinity, intr_idx % ncpu);
   4329 		error = interrupt_distribute(vih, affinity, NULL);
   4330 		if (error == 0) {
   4331 			aprint_normal_dev(sc->sc_dev,
   4332 			    "for RX interrupting at %s affinity to %u\n",
   4333 			    intrstr, intr_idx % ncpu);
   4334 		} else {
   4335 			aprint_normal_dev(sc->sc_dev,
   4336 			    "for RX interrupting at %s\n", intrstr);
   4337 		}
   4338 		sc->sc_ihs[intr_idx] = vih;
   4339 		rxq->rxq_id = qidx;
   4340 		rxq->rxq_intr_idx = intr_idx;
   4341 
   4342 		rx_established++;
   4343 		intr_idx++;
   4344 	}
   4345 
   4346 	/*
   4347 	 * LINK
   4348 	 */
   4349 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
   4350 	    sizeof(intrbuf));
   4351 #ifdef WM_MPSAFE
   4352 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
   4353 	    PCI_INTR_MPSAFE, true);
   4354 #endif
   4355 	memset(intr_xname, 0, sizeof(intr_xname));
   4356 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
   4357 	    device_xname(sc->sc_dev));
   4358 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
   4359 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
   4360 	if (vih == NULL) {
   4361 		aprint_error_dev(sc->sc_dev,
   4362 		    "unable to establish MSI-X(for LINK)%s%s\n",
   4363 		    intrstr ? " at " : "",
   4364 		    intrstr ? intrstr : "");
   4365 
   4366 		goto fail_1;
   4367 	}
   4368 	/* keep default affinity to LINK interrupt */
   4369 	aprint_normal_dev(sc->sc_dev,
   4370 	    "for LINK interrupting at %s\n", intrstr);
   4371 	sc->sc_ihs[intr_idx] = vih;
   4372 	sc->sc_link_intr_idx = intr_idx;
   4373 
   4374 	sc->sc_nintrs = sc->sc_ntxqueues + sc->sc_nrxqueues + 1;
   4375 	kcpuset_destroy(affinity);
   4376 	return 0;
   4377 
   4378  fail_1:
   4379 	for (qidx = 0; qidx < rx_established; qidx++) {
   4380 		struct wm_rxqueue *rxq = &sc->sc_rxq[qidx];
   4381 		pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[rxq->rxq_intr_idx]);
   4382 		sc->sc_ihs[rxq->rxq_intr_idx] = NULL;
   4383 	}
   4384  fail_0:
   4385 	for (qidx = 0; qidx < tx_established; qidx++) {
   4386 		struct wm_txqueue *txq = &sc->sc_txq[qidx];
   4387 		pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[txq->txq_intr_idx]);
   4388 		sc->sc_ihs[txq->txq_intr_idx] = NULL;
   4389 	}
   4390 
   4391 	kcpuset_destroy(affinity);
   4392 	return ENOMEM;
   4393 }
   4394 
   4395 /*
   4396  * wm_init:		[ifnet interface function]
   4397  *
   4398  *	Initialize the interface.
   4399  */
   4400 static int
   4401 wm_init(struct ifnet *ifp)
   4402 {
   4403 	struct wm_softc *sc = ifp->if_softc;
   4404 	int ret;
   4405 
   4406 	WM_CORE_LOCK(sc);
   4407 	ret = wm_init_locked(ifp);
   4408 	WM_CORE_UNLOCK(sc);
   4409 
   4410 	return ret;
   4411 }
   4412 
   4413 static int
   4414 wm_init_locked(struct ifnet *ifp)
   4415 {
   4416 	struct wm_softc *sc = ifp->if_softc;
   4417 	int i, j, trynum, error = 0;
   4418 	uint32_t reg;
   4419 
   4420 	KASSERT(WM_CORE_LOCKED(sc));
   4421 	/*
   4422 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
   4423 	 * There is a small but measurable benefit to avoiding the adjusment
   4424 	 * of the descriptor so that the headers are aligned, for normal mtu,
   4425 	 * on such platforms.  One possibility is that the DMA itself is
   4426 	 * slightly more efficient if the front of the entire packet (instead
   4427 	 * of the front of the headers) is aligned.
   4428 	 *
   4429 	 * Note we must always set align_tweak to 0 if we are using
   4430 	 * jumbo frames.
   4431 	 */
   4432 #ifdef __NO_STRICT_ALIGNMENT
   4433 	sc->sc_align_tweak = 0;
   4434 #else
   4435 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
   4436 		sc->sc_align_tweak = 0;
   4437 	else
   4438 		sc->sc_align_tweak = 2;
   4439 #endif /* __NO_STRICT_ALIGNMENT */
   4440 
   4441 	/* Cancel any pending I/O. */
   4442 	wm_stop_locked(ifp, 0);
   4443 
   4444 	/* update statistics before reset */
   4445 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
   4446 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
   4447 
   4448 	/* Reset the chip to a known state. */
   4449 	wm_reset(sc);
   4450 
   4451 	switch (sc->sc_type) {
   4452 	case WM_T_82571:
   4453 	case WM_T_82572:
   4454 	case WM_T_82573:
   4455 	case WM_T_82574:
   4456 	case WM_T_82583:
   4457 	case WM_T_80003:
   4458 	case WM_T_ICH8:
   4459 	case WM_T_ICH9:
   4460 	case WM_T_ICH10:
   4461 	case WM_T_PCH:
   4462 	case WM_T_PCH2:
   4463 	case WM_T_PCH_LPT:
   4464 		/* AMT based hardware can now take control from firmware */
   4465 		if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   4466 			wm_get_hw_control(sc);
   4467 		break;
   4468 	default:
   4469 		break;
   4470 	}
   4471 
   4472 	/* Init hardware bits */
   4473 	wm_initialize_hardware_bits(sc);
   4474 
   4475 	/* Reset the PHY. */
   4476 	if (sc->sc_flags & WM_F_HAS_MII)
   4477 		wm_gmii_reset(sc);
   4478 
   4479 	/* Calculate (E)ITR value */
   4480 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4481 		sc->sc_itr = 450;	/* For EITR */
   4482 	} else if (sc->sc_type >= WM_T_82543) {
   4483 		/*
   4484 		 * Set up the interrupt throttling register (units of 256ns)
   4485 		 * Note that a footnote in Intel's documentation says this
   4486 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
   4487 		 * or 10Mbit mode.  Empirically, it appears to be the case
   4488 		 * that that is also true for the 1024ns units of the other
   4489 		 * interrupt-related timer registers -- so, really, we ought
   4490 		 * to divide this value by 4 when the link speed is low.
   4491 		 *
   4492 		 * XXX implement this division at link speed change!
   4493 		 */
   4494 
   4495 		/*
   4496 		 * For N interrupts/sec, set this value to:
   4497 		 * 1000000000 / (N * 256).  Note that we set the
   4498 		 * absolute and packet timer values to this value
   4499 		 * divided by 4 to get "simple timer" behavior.
   4500 		 */
   4501 
   4502 		sc->sc_itr = 1500;		/* 2604 ints/sec */
   4503 	}
   4504 
   4505 	error = wm_init_txrx_queues(sc);
   4506 	if (error)
   4507 		goto out;
   4508 
   4509 	/*
   4510 	 * Clear out the VLAN table -- we don't use it (yet).
   4511 	 */
   4512 	CSR_WRITE(sc, WMREG_VET, 0);
   4513 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
   4514 		trynum = 10; /* Due to hw errata */
   4515 	else
   4516 		trynum = 1;
   4517 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
   4518 		for (j = 0; j < trynum; j++)
   4519 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
   4520 
   4521 	/*
   4522 	 * Set up flow-control parameters.
   4523 	 *
   4524 	 * XXX Values could probably stand some tuning.
   4525 	 */
   4526 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
   4527 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
   4528 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)) {
   4529 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
   4530 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
   4531 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
   4532 	}
   4533 
   4534 	sc->sc_fcrtl = FCRTL_DFLT;
   4535 	if (sc->sc_type < WM_T_82543) {
   4536 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
   4537 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
   4538 	} else {
   4539 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
   4540 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
   4541 	}
   4542 
   4543 	if (sc->sc_type == WM_T_80003)
   4544 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
   4545 	else
   4546 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
   4547 
   4548 	/* Writes the control register. */
   4549 	wm_set_vlan(sc);
   4550 
   4551 	if (sc->sc_flags & WM_F_HAS_MII) {
   4552 		int val;
   4553 
   4554 		switch (sc->sc_type) {
   4555 		case WM_T_80003:
   4556 		case WM_T_ICH8:
   4557 		case WM_T_ICH9:
   4558 		case WM_T_ICH10:
   4559 		case WM_T_PCH:
   4560 		case WM_T_PCH2:
   4561 		case WM_T_PCH_LPT:
   4562 			/*
   4563 			 * Set the mac to wait the maximum time between each
   4564 			 * iteration and increase the max iterations when
   4565 			 * polling the phy; this fixes erroneous timeouts at
   4566 			 * 10Mbps.
   4567 			 */
   4568 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
   4569 			    0xFFFF);
   4570 			val = wm_kmrn_readreg(sc,
   4571 			    KUMCTRLSTA_OFFSET_INB_PARAM);
   4572 			val |= 0x3F;
   4573 			wm_kmrn_writereg(sc,
   4574 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
   4575 			break;
   4576 		default:
   4577 			break;
   4578 		}
   4579 
   4580 		if (sc->sc_type == WM_T_80003) {
   4581 			val = CSR_READ(sc, WMREG_CTRL_EXT);
   4582 			val &= ~CTRL_EXT_LINK_MODE_MASK;
   4583 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
   4584 
   4585 			/* Bypass RX and TX FIFO's */
   4586 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
   4587 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
   4588 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
   4589 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
   4590 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
   4591 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
   4592 		}
   4593 	}
   4594 #if 0
   4595 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
   4596 #endif
   4597 
   4598 	/* Set up checksum offload parameters. */
   4599 	reg = CSR_READ(sc, WMREG_RXCSUM);
   4600 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
   4601 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
   4602 		reg |= RXCSUM_IPOFL;
   4603 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
   4604 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
   4605 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
   4606 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
   4607 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
   4608 
   4609 	/* Set up MSI-X */
   4610 	if (sc->sc_nintrs > 1) {
   4611 		uint32_t ivar;
   4612 
   4613 		if (sc->sc_type == WM_T_82575) {
   4614 			/* Interrupt control */
   4615 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4616 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
   4617 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4618 
   4619 			/* TX */
   4620 			for (i = 0; i < sc->sc_ntxqueues; i++) {
   4621 				struct wm_txqueue *txq = &sc->sc_txq[i];
   4622 				CSR_WRITE(sc, WMREG_MSIXBM(txq->txq_intr_idx),
   4623 				    EITR_TX_QUEUE(txq->txq_id));
   4624 			}
   4625 			/* RX */
   4626 			for (i = 0; i < sc->sc_nrxqueues; i++) {
   4627 				struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   4628 				CSR_WRITE(sc, WMREG_MSIXBM(rxq->rxq_intr_idx),
   4629 				    EITR_RX_QUEUE(rxq->rxq_id));
   4630 			}
   4631 			/* Link status */
   4632 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
   4633 			    EITR_OTHER);
   4634 		} else if (sc->sc_type == WM_T_82574) {
   4635 			/* Interrupt control */
   4636 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
   4637 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
   4638 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   4639 
   4640 			ivar = 0;
   4641 			/* TX */
   4642 			for (i = 0; i < sc->sc_ntxqueues; i++) {
   4643 				struct wm_txqueue *txq = &sc->sc_txq[i];
   4644 				ivar |= __SHIFTIN((IVAR_VALID_82574|txq->txq_intr_idx),
   4645 				    IVAR_TX_MASK_Q_82574(txq->txq_id));
   4646 			}
   4647 			/* RX */
   4648 			for (i = 0; i < sc->sc_nrxqueues; i++) {
   4649 				struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   4650 				ivar |= __SHIFTIN((IVAR_VALID_82574|rxq->rxq_intr_idx),
   4651 				    IVAR_RX_MASK_Q_82574(rxq->rxq_id));
   4652 			}
   4653 			/* Link status */
   4654 			ivar |= __SHIFTIN((IVAR_VALID_82574|sc->sc_link_intr_idx),
   4655 			    IVAR_OTHER_MASK);
   4656 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
   4657 		} else {
   4658 			/* Interrupt control */
   4659 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR
   4660 			    | GPIE_MULTI_MSIX | GPIE_EIAME
   4661 			    | GPIE_PBA);
   4662 
   4663 			switch (sc->sc_type) {
   4664 			case WM_T_82580:
   4665 			case WM_T_I350:
   4666 			case WM_T_I354:
   4667 			case WM_T_I210:
   4668 			case WM_T_I211:
   4669 				/* TX */
   4670 				for (i = 0; i < sc->sc_ntxqueues; i++) {
   4671 					struct wm_txqueue *txq = &sc->sc_txq[i];
   4672 					int qid = txq->txq_id;
   4673 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   4674 					ivar &= ~IVAR_TX_MASK_Q(qid);
   4675 					ivar |= __SHIFTIN(
   4676 						(txq->txq_intr_idx | IVAR_VALID),
   4677 						IVAR_TX_MASK_Q(qid));
   4678 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   4679 				}
   4680 
   4681 				/* RX */
   4682 				for (i = 0; i < sc->sc_nrxqueues; i++) {
   4683 					struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   4684 					int qid = rxq->rxq_id;
   4685 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
   4686 					ivar &= ~IVAR_RX_MASK_Q(qid);
   4687 					ivar |= __SHIFTIN(
   4688 						(rxq->rxq_intr_idx | IVAR_VALID),
   4689 						IVAR_RX_MASK_Q(qid));
   4690 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
   4691 				}
   4692 				break;
   4693 			case WM_T_82576:
   4694 				/* TX */
   4695 				for (i = 0; i < sc->sc_ntxqueues; i++) {
   4696 					struct wm_txqueue *txq = &sc->sc_txq[i];
   4697 					int qid = txq->txq_id;
   4698 					ivar = CSR_READ(sc, WMREG_IVAR_Q_82576(qid));
   4699 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
   4700 					ivar |= __SHIFTIN(
   4701 						(txq->txq_intr_idx | IVAR_VALID),
   4702 						IVAR_TX_MASK_Q_82576(qid));
   4703 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid), ivar);
   4704 				}
   4705 
   4706 				/* RX */
   4707 				for (i = 0; i < sc->sc_nrxqueues; i++) {
   4708 					struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   4709 					int qid = rxq->rxq_id;
   4710 					ivar = CSR_READ(sc, WMREG_IVAR_Q_82576(qid));
   4711 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
   4712 					ivar |= __SHIFTIN(
   4713 						(rxq->rxq_intr_idx | IVAR_VALID),
   4714 						IVAR_RX_MASK_Q_82576(qid));
   4715 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid), ivar);
   4716 				}
   4717 				break;
   4718 			default:
   4719 				break;
   4720 			}
   4721 
   4722 			/* Link status */
   4723 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
   4724 			    IVAR_MISC_OTHER);
   4725 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
   4726 		}
   4727 
   4728 		if (sc->sc_nrxqueues > 1) {
   4729 			wm_init_rss(sc);
   4730 
   4731 			/*
   4732 			** NOTE: Receive Full-Packet Checksum Offload
   4733 			** is mutually exclusive with Multiqueue. However
   4734 			** this is not the same as TCP/IP checksums which
   4735 			** still work.
   4736 			*/
   4737 			reg = CSR_READ(sc, WMREG_RXCSUM);
   4738 			reg |= RXCSUM_PCSD;
   4739 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
   4740 		}
   4741 	}
   4742 
   4743 	/* Set up the interrupt registers. */
   4744 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   4745 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
   4746 	    ICR_RXO | ICR_RXT0;
   4747 	if (sc->sc_nintrs > 1) {
   4748 		uint32_t mask;
   4749 		switch (sc->sc_type) {
   4750 		case WM_T_82574:
   4751 			CSR_WRITE(sc, WMREG_EIAC_82574,
   4752 			    WMREG_EIAC_82574_MSIX_MASK);
   4753 			sc->sc_icr |= WMREG_EIAC_82574_MSIX_MASK;
   4754 			CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   4755 			break;
   4756 		default:
   4757 			if (sc->sc_type == WM_T_82575) {
   4758 				mask = 0;
   4759 				for (i = 0; i < sc->sc_ntxqueues; i++) {
   4760 					struct wm_txqueue *txq = &sc->sc_txq[i];
   4761 					mask |= EITR_TX_QUEUE(txq->txq_id);
   4762 				}
   4763 				for (i = 0; i < sc->sc_nrxqueues; i++) {
   4764 					struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   4765 					mask |= EITR_RX_QUEUE(rxq->rxq_id);
   4766 				}
   4767 				mask |= EITR_OTHER;
   4768 			} else {
   4769 				mask = 0;
   4770 				for (i = 0; i < sc->sc_ntxqueues; i++) {
   4771 					struct wm_txqueue *txq = &sc->sc_txq[i];
   4772 					mask |= 1 << txq->txq_intr_idx;
   4773 				}
   4774 				for (i = 0; i < sc->sc_nrxqueues; i++) {
   4775 					struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   4776 					mask |= 1 << rxq->rxq_intr_idx;
   4777 				}
   4778 				mask |= 1 << sc->sc_link_intr_idx;
   4779 			}
   4780 			CSR_WRITE(sc, WMREG_EIAC, mask);
   4781 			CSR_WRITE(sc, WMREG_EIAM, mask);
   4782 			CSR_WRITE(sc, WMREG_EIMS, mask);
   4783 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
   4784 			break;
   4785 		}
   4786 	} else
   4787 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
   4788 
   4789 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   4790 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   4791 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   4792 		reg = CSR_READ(sc, WMREG_KABGTXD);
   4793 		reg |= KABGTXD_BGSQLBIAS;
   4794 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
   4795 	}
   4796 
   4797 	/* Set up the inter-packet gap. */
   4798 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   4799 
   4800 	if (sc->sc_type >= WM_T_82543) {
   4801 		/*
   4802 		 * XXX 82574 has both ITR and EITR. SET EITR when we use
   4803 		 * the multi queue function with MSI-X.
   4804 		 */
   4805 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4806 			int qidx;
   4807 			for (qidx = 0; qidx < sc->sc_ntxqueues; qidx++) {
   4808 				struct wm_txqueue *txq = &sc->sc_txq[qidx];
   4809 				CSR_WRITE(sc, WMREG_EITR(txq->txq_intr_idx),
   4810 				    sc->sc_itr);
   4811 			}
   4812 			for (qidx = 0; qidx < sc->sc_nrxqueues; qidx++) {
   4813 				struct wm_rxqueue *rxq = &sc->sc_rxq[qidx];
   4814 				CSR_WRITE(sc, WMREG_EITR(rxq->rxq_intr_idx),
   4815 				    sc->sc_itr);
   4816 			}
   4817 			/*
   4818 			 * Link interrupts occur much less than TX
   4819 			 * interrupts and RX interrupts. So, we don't
   4820 			 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
   4821 			 * FreeBSD's if_igb.
   4822 			 */
   4823 		} else
   4824 			CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
   4825 	}
   4826 
   4827 	/* Set the VLAN ethernetype. */
   4828 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
   4829 
   4830 	/*
   4831 	 * Set up the transmit control register; we start out with
   4832 	 * a collision distance suitable for FDX, but update it whe
   4833 	 * we resolve the media type.
   4834 	 */
   4835 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
   4836 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
   4837 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   4838 	if (sc->sc_type >= WM_T_82571)
   4839 		sc->sc_tctl |= TCTL_MULR;
   4840 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   4841 
   4842 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4843 		/* Write TDT after TCTL.EN is set. See the document. */
   4844 		CSR_WRITE(sc, WMREG_TDT(0), 0);
   4845 	}
   4846 
   4847 	if (sc->sc_type == WM_T_80003) {
   4848 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
   4849 		reg &= ~TCTL_EXT_GCEX_MASK;
   4850 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
   4851 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
   4852 	}
   4853 
   4854 	/* Set the media. */
   4855 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
   4856 		goto out;
   4857 
   4858 	/* Configure for OS presence */
   4859 	wm_init_manageability(sc);
   4860 
   4861 	/*
   4862 	 * Set up the receive control register; we actually program
   4863 	 * the register when we set the receive filter.  Use multicast
   4864 	 * address offset type 0.
   4865 	 *
   4866 	 * Only the i82544 has the ability to strip the incoming
   4867 	 * CRC, so we don't enable that feature.
   4868 	 */
   4869 	sc->sc_mchash_type = 0;
   4870 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
   4871 	    | RCTL_MO(sc->sc_mchash_type);
   4872 
   4873 	/*
   4874 	 * The I350 has a bug where it always strips the CRC whether
   4875 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
   4876 	 */
   4877 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   4878 	    || (sc->sc_type == WM_T_I210))
   4879 		sc->sc_rctl |= RCTL_SECRC;
   4880 
   4881 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
   4882 	    && (ifp->if_mtu > ETHERMTU)) {
   4883 		sc->sc_rctl |= RCTL_LPE;
   4884 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   4885 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
   4886 	}
   4887 
   4888 	if (MCLBYTES == 2048) {
   4889 		sc->sc_rctl |= RCTL_2k;
   4890 	} else {
   4891 		if (sc->sc_type >= WM_T_82543) {
   4892 			switch (MCLBYTES) {
   4893 			case 4096:
   4894 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
   4895 				break;
   4896 			case 8192:
   4897 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
   4898 				break;
   4899 			case 16384:
   4900 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
   4901 				break;
   4902 			default:
   4903 				panic("wm_init: MCLBYTES %d unsupported",
   4904 				    MCLBYTES);
   4905 				break;
   4906 			}
   4907 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
   4908 	}
   4909 
   4910 	/* Set the receive filter. */
   4911 	wm_set_filter(sc);
   4912 
   4913 	/* Enable ECC */
   4914 	switch (sc->sc_type) {
   4915 	case WM_T_82571:
   4916 		reg = CSR_READ(sc, WMREG_PBA_ECC);
   4917 		reg |= PBA_ECC_CORR_EN;
   4918 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
   4919 		break;
   4920 	case WM_T_PCH_LPT:
   4921 		reg = CSR_READ(sc, WMREG_PBECCSTS);
   4922 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
   4923 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
   4924 
   4925 		reg = CSR_READ(sc, WMREG_CTRL);
   4926 		reg |= CTRL_MEHE;
   4927 		CSR_WRITE(sc, WMREG_CTRL, reg);
   4928 		break;
   4929 	default:
   4930 		break;
   4931 	}
   4932 
   4933 	/* On 575 and later set RDT only if RX enabled */
   4934 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   4935 		int qidx;
   4936 		for (qidx = 0; qidx < sc->sc_nrxqueues; qidx++) {
   4937 			struct wm_rxqueue *rxq = &sc->sc_rxq[qidx];
   4938 			for (i = 0; i < WM_NRXDESC; i++) {
   4939 				WM_RX_LOCK(rxq);
   4940 				wm_init_rxdesc(rxq, i);
   4941 				WM_RX_UNLOCK(rxq);
   4942 
   4943 			}
   4944 		}
   4945 	}
   4946 
   4947 	sc->sc_stopping = false;
   4948 
   4949 	/* Start the one second link check clock. */
   4950 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
   4951 
   4952 	/* ...all done! */
   4953 	ifp->if_flags |= IFF_RUNNING;
   4954 	ifp->if_flags &= ~IFF_OACTIVE;
   4955 
   4956  out:
   4957 	sc->sc_if_flags = ifp->if_flags;
   4958 	if (error)
   4959 		log(LOG_ERR, "%s: interface not running\n",
   4960 		    device_xname(sc->sc_dev));
   4961 	return error;
   4962 }
   4963 
   4964 /*
   4965  * wm_stop:		[ifnet interface function]
   4966  *
   4967  *	Stop transmission on the interface.
   4968  */
   4969 static void
   4970 wm_stop(struct ifnet *ifp, int disable)
   4971 {
   4972 	struct wm_softc *sc = ifp->if_softc;
   4973 
   4974 	WM_CORE_LOCK(sc);
   4975 	wm_stop_locked(ifp, disable);
   4976 	WM_CORE_UNLOCK(sc);
   4977 }
   4978 
   4979 static void
   4980 wm_stop_locked(struct ifnet *ifp, int disable)
   4981 {
   4982 	struct wm_softc *sc = ifp->if_softc;
   4983 	struct wm_txsoft *txs;
   4984 	int i, qidx;
   4985 
   4986 	KASSERT(WM_CORE_LOCKED(sc));
   4987 
   4988 	sc->sc_stopping = true;
   4989 
   4990 	/* Stop the one second clock. */
   4991 	callout_stop(&sc->sc_tick_ch);
   4992 
   4993 	/* Stop the 82547 Tx FIFO stall check timer. */
   4994 	if (sc->sc_type == WM_T_82547)
   4995 		callout_stop(&sc->sc_txfifo_ch);
   4996 
   4997 	if (sc->sc_flags & WM_F_HAS_MII) {
   4998 		/* Down the MII. */
   4999 		mii_down(&sc->sc_mii);
   5000 	} else {
   5001 #if 0
   5002 		/* Should we clear PHY's status properly? */
   5003 		wm_reset(sc);
   5004 #endif
   5005 	}
   5006 
   5007 	/* Stop the transmit and receive processes. */
   5008 	CSR_WRITE(sc, WMREG_TCTL, 0);
   5009 	CSR_WRITE(sc, WMREG_RCTL, 0);
   5010 	sc->sc_rctl &= ~RCTL_EN;
   5011 
   5012 	/*
   5013 	 * Clear the interrupt mask to ensure the device cannot assert its
   5014 	 * interrupt line.
   5015 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
   5016 	 * service any currently pending or shared interrupt.
   5017 	 */
   5018 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
   5019 	sc->sc_icr = 0;
   5020 	if (sc->sc_nintrs > 1) {
   5021 		if (sc->sc_type != WM_T_82574) {
   5022 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
   5023 			CSR_WRITE(sc, WMREG_EIAC, 0);
   5024 		} else
   5025 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
   5026 	}
   5027 
   5028 	/* Release any queued transmit buffers. */
   5029 	for (qidx = 0; qidx < sc->sc_ntxqueues; qidx++) {
   5030 		struct wm_txqueue *txq = &sc->sc_txq[qidx];
   5031 		WM_TX_LOCK(txq);
   5032 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5033 			txs = &txq->txq_soft[i];
   5034 			if (txs->txs_mbuf != NULL) {
   5035 				bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   5036 				m_freem(txs->txs_mbuf);
   5037 				txs->txs_mbuf = NULL;
   5038 			}
   5039 		}
   5040 		WM_TX_UNLOCK(txq);
   5041 	}
   5042 
   5043 	/* Mark the interface as down and cancel the watchdog timer. */
   5044 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5045 	ifp->if_timer = 0;
   5046 
   5047 	if (disable) {
   5048 		for (i = 0; i < sc->sc_nrxqueues; i++) {
   5049 			struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   5050 			WM_RX_LOCK(rxq);
   5051 			wm_rxdrain(rxq);
   5052 			WM_RX_UNLOCK(rxq);
   5053 		}
   5054 	}
   5055 
   5056 #if 0 /* notyet */
   5057 	if (sc->sc_type >= WM_T_82544)
   5058 		CSR_WRITE(sc, WMREG_WUC, 0);
   5059 #endif
   5060 }
   5061 
   5062 static void
   5063 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
   5064 {
   5065 	struct mbuf *m;
   5066 	int i;
   5067 
   5068 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
   5069 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
   5070 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
   5071 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
   5072 		    m->m_data, m->m_len, m->m_flags);
   5073 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
   5074 	    i, i == 1 ? "" : "s");
   5075 }
   5076 
   5077 /*
   5078  * wm_82547_txfifo_stall:
   5079  *
   5080  *	Callout used to wait for the 82547 Tx FIFO to drain,
   5081  *	reset the FIFO pointers, and restart packet transmission.
   5082  */
   5083 static void
   5084 wm_82547_txfifo_stall(void *arg)
   5085 {
   5086 	struct wm_softc *sc = arg;
   5087 	struct wm_txqueue *txq = sc->sc_txq;
   5088 #ifndef WM_MPSAFE
   5089 	int s;
   5090 
   5091 	s = splnet();
   5092 #endif
   5093 	WM_TX_LOCK(txq);
   5094 
   5095 	if (sc->sc_stopping)
   5096 		goto out;
   5097 
   5098 	if (txq->txq_fifo_stall) {
   5099 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
   5100 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
   5101 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
   5102 			/*
   5103 			 * Packets have drained.  Stop transmitter, reset
   5104 			 * FIFO pointers, restart transmitter, and kick
   5105 			 * the packet queue.
   5106 			 */
   5107 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
   5108 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
   5109 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
   5110 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
   5111 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
   5112 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
   5113 			CSR_WRITE(sc, WMREG_TCTL, tctl);
   5114 			CSR_WRITE_FLUSH(sc);
   5115 
   5116 			txq->txq_fifo_head = 0;
   5117 			txq->txq_fifo_stall = 0;
   5118 			wm_start_locked(&sc->sc_ethercom.ec_if);
   5119 		} else {
   5120 			/*
   5121 			 * Still waiting for packets to drain; try again in
   5122 			 * another tick.
   5123 			 */
   5124 			callout_schedule(&sc->sc_txfifo_ch, 1);
   5125 		}
   5126 	}
   5127 
   5128 out:
   5129 	WM_TX_UNLOCK(txq);
   5130 #ifndef WM_MPSAFE
   5131 	splx(s);
   5132 #endif
   5133 }
   5134 
   5135 /*
   5136  * wm_82547_txfifo_bugchk:
   5137  *
   5138  *	Check for bug condition in the 82547 Tx FIFO.  We need to
   5139  *	prevent enqueueing a packet that would wrap around the end
   5140  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
   5141  *
   5142  *	We do this by checking the amount of space before the end
   5143  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
   5144  *	the Tx FIFO, wait for all remaining packets to drain, reset
   5145  *	the internal FIFO pointers to the beginning, and restart
   5146  *	transmission on the interface.
   5147  */
   5148 #define	WM_FIFO_HDR		0x10
   5149 #define	WM_82547_PAD_LEN	0x3e0
   5150 static int
   5151 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
   5152 {
   5153 	struct wm_txqueue *txq = &sc->sc_txq[0];
   5154 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
   5155 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
   5156 
   5157 	/* Just return if already stalled. */
   5158 	if (txq->txq_fifo_stall)
   5159 		return 1;
   5160 
   5161 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   5162 		/* Stall only occurs in half-duplex mode. */
   5163 		goto send_packet;
   5164 	}
   5165 
   5166 	if (len >= WM_82547_PAD_LEN + space) {
   5167 		txq->txq_fifo_stall = 1;
   5168 		callout_schedule(&sc->sc_txfifo_ch, 1);
   5169 		return 1;
   5170 	}
   5171 
   5172  send_packet:
   5173 	txq->txq_fifo_head += len;
   5174 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
   5175 		txq->txq_fifo_head -= txq->txq_fifo_size;
   5176 
   5177 	return 0;
   5178 }
   5179 
   5180 static int
   5181 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5182 {
   5183 	int error;
   5184 
   5185 	/*
   5186 	 * Allocate the control data structures, and create and load the
   5187 	 * DMA map for it.
   5188 	 *
   5189 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5190 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5191 	 * both sets within the same 4G segment.
   5192 	 */
   5193 	if (sc->sc_type < WM_T_82544) {
   5194 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
   5195 		txq->txq_desc_size = sizeof(wiseman_txdesc_t) * WM_NTXDESC(txq);
   5196 	} else {
   5197 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
   5198 		txq->txq_desc_size = sizeof(txdescs_t);
   5199 	}
   5200 
   5201 	if ((error = bus_dmamem_alloc(sc->sc_dmat, txq->txq_desc_size, PAGE_SIZE,
   5202 		    (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg, 1,
   5203 		    &txq->txq_desc_rseg, 0)) != 0) {
   5204 		aprint_error_dev(sc->sc_dev,
   5205 		    "unable to allocate TX control data, error = %d\n",
   5206 		    error);
   5207 		goto fail_0;
   5208 	}
   5209 
   5210 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
   5211 		    txq->txq_desc_rseg, txq->txq_desc_size,
   5212 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
   5213 		aprint_error_dev(sc->sc_dev,
   5214 		    "unable to map TX control data, error = %d\n", error);
   5215 		goto fail_1;
   5216 	}
   5217 
   5218 	if ((error = bus_dmamap_create(sc->sc_dmat, txq->txq_desc_size, 1,
   5219 		    txq->txq_desc_size, 0, 0, &txq->txq_desc_dmamap)) != 0) {
   5220 		aprint_error_dev(sc->sc_dev,
   5221 		    "unable to create TX control data DMA map, error = %d\n",
   5222 		    error);
   5223 		goto fail_2;
   5224 	}
   5225 
   5226 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
   5227 		    txq->txq_descs_u, txq->txq_desc_size, NULL, 0)) != 0) {
   5228 		aprint_error_dev(sc->sc_dev,
   5229 		    "unable to load TX control data DMA map, error = %d\n",
   5230 		    error);
   5231 		goto fail_3;
   5232 	}
   5233 
   5234 	return 0;
   5235 
   5236  fail_3:
   5237 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5238  fail_2:
   5239 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5240 	    txq->txq_desc_size);
   5241  fail_1:
   5242 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5243  fail_0:
   5244 	return error;
   5245 }
   5246 
   5247 static void
   5248 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
   5249 {
   5250 
   5251 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
   5252 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
   5253 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
   5254 	    txq->txq_desc_size);
   5255 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
   5256 }
   5257 
   5258 static int
   5259 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5260 {
   5261 	int error;
   5262 
   5263 	/*
   5264 	 * Allocate the control data structures, and create and load the
   5265 	 * DMA map for it.
   5266 	 *
   5267 	 * NOTE: All Tx descriptors must be in the same 4G segment of
   5268 	 * memory.  So must Rx descriptors.  We simplify by allocating
   5269 	 * both sets within the same 4G segment.
   5270 	 */
   5271 	rxq->rxq_desc_size = sizeof(wiseman_rxdesc_t) * WM_NRXDESC;
   5272 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq->rxq_desc_size, PAGE_SIZE,
   5273 		    (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg, 1,
   5274 		    &rxq->rxq_desc_rseg, 0)) != 0) {
   5275 		aprint_error_dev(sc->sc_dev,
   5276 		    "unable to allocate RX control data, error = %d\n",
   5277 		    error);
   5278 		goto fail_0;
   5279 	}
   5280 
   5281 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
   5282 		    rxq->rxq_desc_rseg, rxq->rxq_desc_size,
   5283 		    (void **)&rxq->rxq_descs, BUS_DMA_COHERENT)) != 0) {
   5284 		aprint_error_dev(sc->sc_dev,
   5285 		    "unable to map RX control data, error = %d\n", error);
   5286 		goto fail_1;
   5287 	}
   5288 
   5289 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq->rxq_desc_size, 1,
   5290 		    rxq->rxq_desc_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
   5291 		aprint_error_dev(sc->sc_dev,
   5292 		    "unable to create RX control data DMA map, error = %d\n",
   5293 		    error);
   5294 		goto fail_2;
   5295 	}
   5296 
   5297 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
   5298 		    rxq->rxq_descs, rxq->rxq_desc_size, NULL, 0)) != 0) {
   5299 		aprint_error_dev(sc->sc_dev,
   5300 		    "unable to load RX control data DMA map, error = %d\n",
   5301 		    error);
   5302 		goto fail_3;
   5303 	}
   5304 
   5305 	return 0;
   5306 
   5307  fail_3:
   5308 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5309  fail_2:
   5310 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
   5311 	    rxq->rxq_desc_size);
   5312  fail_1:
   5313 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5314  fail_0:
   5315 	return error;
   5316 }
   5317 
   5318 static void
   5319 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5320 {
   5321 
   5322 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5323 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
   5324 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
   5325 	    rxq->rxq_desc_size);
   5326 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
   5327 }
   5328 
   5329 
   5330 static int
   5331 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5332 {
   5333 	int i, error;
   5334 
   5335 	/* Create the transmit buffer DMA maps. */
   5336 	WM_TXQUEUELEN(txq) =
   5337 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
   5338 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
   5339 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5340 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
   5341 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
   5342 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
   5343 			aprint_error_dev(sc->sc_dev,
   5344 			    "unable to create Tx DMA map %d, error = %d\n",
   5345 			    i, error);
   5346 			goto fail;
   5347 		}
   5348 	}
   5349 
   5350 	return 0;
   5351 
   5352  fail:
   5353 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5354 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5355 			bus_dmamap_destroy(sc->sc_dmat,
   5356 			    txq->txq_soft[i].txs_dmamap);
   5357 	}
   5358 	return error;
   5359 }
   5360 
   5361 static void
   5362 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
   5363 {
   5364 	int i;
   5365 
   5366 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
   5367 		if (txq->txq_soft[i].txs_dmamap != NULL)
   5368 			bus_dmamap_destroy(sc->sc_dmat,
   5369 			    txq->txq_soft[i].txs_dmamap);
   5370 	}
   5371 }
   5372 
   5373 static int
   5374 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5375 {
   5376 	int i, error;
   5377 
   5378 	/* Create the receive buffer DMA maps. */
   5379 	for (i = 0; i < WM_NRXDESC; i++) {
   5380 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
   5381 			    MCLBYTES, 0, 0,
   5382 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
   5383 			aprint_error_dev(sc->sc_dev,
   5384 			    "unable to create Rx DMA map %d error = %d\n",
   5385 			    i, error);
   5386 			goto fail;
   5387 		}
   5388 		rxq->rxq_soft[i].rxs_mbuf = NULL;
   5389 	}
   5390 
   5391 	return 0;
   5392 
   5393  fail:
   5394 	for (i = 0; i < WM_NRXDESC; i++) {
   5395 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5396 			bus_dmamap_destroy(sc->sc_dmat,
   5397 			    rxq->rxq_soft[i].rxs_dmamap);
   5398 	}
   5399 	return error;
   5400 }
   5401 
   5402 static void
   5403 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5404 {
   5405 	int i;
   5406 
   5407 	for (i = 0; i < WM_NRXDESC; i++) {
   5408 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
   5409 			bus_dmamap_destroy(sc->sc_dmat,
   5410 			    rxq->rxq_soft[i].rxs_dmamap);
   5411 	}
   5412 }
   5413 
   5414 /*
   5415  * wm_alloc_quques:
   5416  *	Allocate {tx,rx}descs and {tx,rx} buffers
   5417  */
   5418 static int
   5419 wm_alloc_txrx_queues(struct wm_softc *sc)
   5420 {
   5421 	int i, error, tx_done, rx_done;
   5422 
   5423 	/*
   5424 	 * For transmission
   5425 	 */
   5426 	sc->sc_txq = kmem_zalloc(sizeof(struct wm_txqueue) * sc->sc_ntxqueues,
   5427 	    KM_SLEEP);
   5428 	if (sc->sc_txq == NULL) {
   5429 		aprint_error_dev(sc->sc_dev, "unable to allocate wm_txqueue\n");
   5430 		error = ENOMEM;
   5431 		goto fail_0;
   5432 	}
   5433 
   5434 	error = 0;
   5435 	tx_done = 0;
   5436 	for (i = 0; i < sc->sc_ntxqueues; i++) {
   5437 		struct wm_txqueue *txq = &sc->sc_txq[i];
   5438 		txq->txq_sc = sc;
   5439 #ifdef WM_MPSAFE
   5440 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5441 #else
   5442 		txq->txq_lock = NULL;
   5443 #endif
   5444 		error = wm_alloc_tx_descs(sc, txq);
   5445 		if (error)
   5446 			break;
   5447 		error = wm_alloc_tx_buffer(sc, txq);
   5448 		if (error) {
   5449 			wm_free_tx_descs(sc, txq);
   5450 			break;
   5451 		}
   5452 		tx_done++;
   5453 	}
   5454 	if (error)
   5455 		goto fail_1;
   5456 
   5457 	/*
   5458 	 * For recieve
   5459 	 */
   5460 	sc->sc_rxq = kmem_zalloc(sizeof(struct wm_rxqueue) * sc->sc_nrxqueues,
   5461 	    KM_SLEEP);
   5462 	if (sc->sc_rxq == NULL) {
   5463 		aprint_error_dev(sc->sc_dev, "unable to allocate wm_rxqueue\n");
   5464 		error = ENOMEM;
   5465 		goto fail_1;
   5466 	}
   5467 
   5468 	error = 0;
   5469 	rx_done = 0;
   5470 	for (i = 0; i < sc->sc_nrxqueues; i++) {
   5471 		struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   5472 		rxq->rxq_sc = sc;
   5473 #ifdef WM_MPSAFE
   5474 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
   5475 #else
   5476 		rxq->rxq_lock = NULL;
   5477 #endif
   5478 		error = wm_alloc_rx_descs(sc, rxq);
   5479 		if (error)
   5480 			break;
   5481 
   5482 		error = wm_alloc_rx_buffer(sc, rxq);
   5483 		if (error) {
   5484 			wm_free_rx_descs(sc, rxq);
   5485 			break;
   5486 		}
   5487 
   5488 		rx_done++;
   5489 	}
   5490 	if (error)
   5491 		goto fail_2;
   5492 
   5493 	return 0;
   5494 
   5495  fail_2:
   5496 	for (i = 0; i < rx_done; i++) {
   5497 		struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   5498 		wm_free_rx_buffer(sc, rxq);
   5499 		wm_free_rx_descs(sc, rxq);
   5500 		if (rxq->rxq_lock)
   5501 			mutex_obj_free(rxq->rxq_lock);
   5502 	}
   5503 	kmem_free(sc->sc_rxq,
   5504 	    sizeof(struct wm_rxqueue) * sc->sc_nrxqueues);
   5505  fail_1:
   5506 	for (i = 0; i < tx_done; i++) {
   5507 		struct wm_txqueue *txq = &sc->sc_txq[i];
   5508 		wm_free_tx_buffer(sc, txq);
   5509 		wm_free_tx_descs(sc, txq);
   5510 		if (txq->txq_lock)
   5511 			mutex_obj_free(txq->txq_lock);
   5512 	}
   5513 	kmem_free(sc->sc_txq,
   5514 	    sizeof(struct wm_txqueue) * sc->sc_ntxqueues);
   5515  fail_0:
   5516 	return error;
   5517 }
   5518 
   5519 /*
   5520  * wm_free_quques:
   5521  *	Free {tx,rx}descs and {tx,rx} buffers
   5522  */
   5523 static void
   5524 wm_free_txrx_queues(struct wm_softc *sc)
   5525 {
   5526 	int i;
   5527 
   5528 	for (i = 0; i < sc->sc_nrxqueues; i++) {
   5529 		struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   5530 		wm_free_rx_buffer(sc, rxq);
   5531 		wm_free_rx_descs(sc, rxq);
   5532 		if (rxq->rxq_lock)
   5533 			mutex_obj_free(rxq->rxq_lock);
   5534 	}
   5535 	kmem_free(sc->sc_rxq, sizeof(struct wm_rxqueue) * sc->sc_nrxqueues);
   5536 
   5537 	for (i = 0; i < sc->sc_ntxqueues; i++) {
   5538 		struct wm_txqueue *txq = &sc->sc_txq[i];
   5539 		wm_free_tx_buffer(sc, txq);
   5540 		wm_free_tx_descs(sc, txq);
   5541 		if (txq->txq_lock)
   5542 			mutex_obj_free(txq->txq_lock);
   5543 	}
   5544 	kmem_free(sc->sc_txq, sizeof(struct wm_txqueue) * sc->sc_ntxqueues);
   5545 }
   5546 
   5547 static void
   5548 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   5549 {
   5550 
   5551 	KASSERT(WM_TX_LOCKED(txq));
   5552 
   5553 	/* Initialize the transmit descriptor ring. */
   5554 	memset(txq->txq_descs, 0, WM_TXDESCSIZE(txq));
   5555 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
   5556 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   5557 	txq->txq_free = WM_NTXDESC(txq);
   5558 	txq->txq_next = 0;
   5559 }
   5560 
   5561 static void
   5562 wm_init_tx_regs(struct wm_softc *sc, struct wm_txqueue *txq)
   5563 {
   5564 
   5565 	KASSERT(WM_TX_LOCKED(txq));
   5566 
   5567 	if (sc->sc_type < WM_T_82543) {
   5568 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
   5569 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
   5570 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(txq));
   5571 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
   5572 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
   5573 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
   5574 	} else {
   5575 		int qid = txq->txq_id;
   5576 
   5577 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
   5578 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
   5579 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCSIZE(txq));
   5580 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
   5581 
   5582 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
   5583 			/*
   5584 			 * Don't write TDT before TCTL.EN is set.
   5585 			 * See the document.
   5586 			 */
   5587 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
   5588 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
   5589 			    | TXDCTL_WTHRESH(0));
   5590 		else {
   5591 			/* ITR / 4 */
   5592 			CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
   5593 			if (sc->sc_type >= WM_T_82540) {
   5594 				/* should be same */
   5595 				CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
   5596 			}
   5597 
   5598 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
   5599 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
   5600 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
   5601 		}
   5602 	}
   5603 }
   5604 
   5605 static void
   5606 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
   5607 {
   5608 	int i;
   5609 
   5610 	KASSERT(WM_TX_LOCKED(txq));
   5611 
   5612 	/* Initialize the transmit job descriptors. */
   5613 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
   5614 		txq->txq_soft[i].txs_mbuf = NULL;
   5615 	txq->txq_sfree = WM_TXQUEUELEN(txq);
   5616 	txq->txq_snext = 0;
   5617 	txq->txq_sdirty = 0;
   5618 }
   5619 
   5620 static void
   5621 wm_init_tx_queue(struct wm_softc *sc, struct wm_txqueue *txq)
   5622 {
   5623 
   5624 	KASSERT(WM_TX_LOCKED(txq));
   5625 
   5626 	/*
   5627 	 * Set up some register offsets that are different between
   5628 	 * the i82542 and the i82543 and later chips.
   5629 	 */
   5630 	if (sc->sc_type < WM_T_82543) {
   5631 		txq->txq_tdt_reg = WMREG_OLD_TDT;
   5632 	} else {
   5633 		txq->txq_tdt_reg = WMREG_TDT(0);
   5634 	}
   5635 
   5636 	wm_init_tx_descs(sc, txq);
   5637 	wm_init_tx_regs(sc, txq);
   5638 	wm_init_tx_buffer(sc, txq);
   5639 }
   5640 
   5641 static void
   5642 wm_init_rx_regs(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5643 {
   5644 
   5645 	KASSERT(WM_RX_LOCKED(rxq));
   5646 
   5647 	/*
   5648 	 * Initialize the receive descriptor and receive job
   5649 	 * descriptor rings.
   5650 	 */
   5651 	if (sc->sc_type < WM_T_82543) {
   5652 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
   5653 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
   5654 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
   5655 		    sizeof(wiseman_rxdesc_t) * WM_NRXDESC);
   5656 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
   5657 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
   5658 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
   5659 
   5660 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
   5661 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
   5662 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
   5663 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
   5664 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
   5665 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
   5666 	} else {
   5667 		int qid = rxq->rxq_id;
   5668 
   5669 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
   5670 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
   5671 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_desc_size);
   5672 
   5673 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
   5674 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
   5675 				panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
   5676 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_LEGACY
   5677 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
   5678 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
   5679 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
   5680 			    | RXDCTL_WTHRESH(1));
   5681 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   5682 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   5683 		} else {
   5684 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
   5685 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
   5686 			/* ITR / 4 */
   5687 			CSR_WRITE(sc, WMREG_RDTR, (sc->sc_itr / 4) | RDTR_FPD);
   5688 			/* MUST be same */
   5689 			CSR_WRITE(sc, WMREG_RADV, sc->sc_itr / 4);
   5690 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
   5691 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
   5692 		}
   5693 	}
   5694 }
   5695 
   5696 static int
   5697 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5698 {
   5699 	struct wm_rxsoft *rxs;
   5700 	int error, i;
   5701 
   5702 	KASSERT(WM_RX_LOCKED(rxq));
   5703 
   5704 	for (i = 0; i < WM_NRXDESC; i++) {
   5705 		rxs = &rxq->rxq_soft[i];
   5706 		if (rxs->rxs_mbuf == NULL) {
   5707 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
   5708 				log(LOG_ERR, "%s: unable to allocate or map "
   5709 				    "rx buffer %d, error = %d\n",
   5710 				    device_xname(sc->sc_dev), i, error);
   5711 				/*
   5712 				 * XXX Should attempt to run with fewer receive
   5713 				 * XXX buffers instead of just failing.
   5714 				 */
   5715 				wm_rxdrain(rxq);
   5716 				return ENOMEM;
   5717 			}
   5718 		} else {
   5719 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
   5720 				wm_init_rxdesc(rxq, i);
   5721 			/*
   5722 			 * For 82575 and newer device, the RX descriptors
   5723 			 * must be initialized after the setting of RCTL.EN in
   5724 			 * wm_set_filter()
   5725 			 */
   5726 		}
   5727 	}
   5728 	rxq->rxq_ptr = 0;
   5729 	rxq->rxq_discard = 0;
   5730 	WM_RXCHAIN_RESET(rxq);
   5731 
   5732 	return 0;
   5733 }
   5734 
   5735 static int
   5736 wm_init_rx_queue(struct wm_softc *sc, struct wm_rxqueue *rxq)
   5737 {
   5738 
   5739 	KASSERT(WM_RX_LOCKED(rxq));
   5740 
   5741 	/*
   5742 	 * Set up some register offsets that are different between
   5743 	 * the i82542 and the i82543 and later chips.
   5744 	 */
   5745 	if (sc->sc_type < WM_T_82543) {
   5746 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
   5747 	} else {
   5748 		rxq->rxq_rdt_reg = WMREG_RDT(rxq->rxq_id);
   5749 	}
   5750 
   5751 	wm_init_rx_regs(sc, rxq);
   5752 	return wm_init_rx_buffer(sc, rxq);
   5753 }
   5754 
   5755 /*
   5756  * wm_init_quques:
   5757  *	Initialize {tx,rx}descs and {tx,rx} buffers
   5758  */
   5759 static int
   5760 wm_init_txrx_queues(struct wm_softc *sc)
   5761 {
   5762 	int i, error;
   5763 
   5764 	for (i = 0; i < sc->sc_ntxqueues; i++) {
   5765 		struct wm_txqueue *txq = &sc->sc_txq[i];
   5766 		WM_TX_LOCK(txq);
   5767 		wm_init_tx_queue(sc, txq);
   5768 		WM_TX_UNLOCK(txq);
   5769 	}
   5770 
   5771 	error = 0;
   5772 	for (i = 0; i < sc->sc_nrxqueues; i++) {
   5773 		struct wm_rxqueue *rxq = &sc->sc_rxq[i];
   5774 		WM_RX_LOCK(rxq);
   5775 		error = wm_init_rx_queue(sc, rxq);
   5776 		WM_RX_UNLOCK(rxq);
   5777 		if (error)
   5778 			break;
   5779 	}
   5780 
   5781 	return error;
   5782 }
   5783 
   5784 /*
   5785  * wm_tx_offload:
   5786  *
   5787  *	Set up TCP/IP checksumming parameters for the
   5788  *	specified packet.
   5789  */
   5790 static int
   5791 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
   5792     uint8_t *fieldsp)
   5793 {
   5794 	struct wm_txqueue *txq = &sc->sc_txq[0];
   5795 	struct mbuf *m0 = txs->txs_mbuf;
   5796 	struct livengood_tcpip_ctxdesc *t;
   5797 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
   5798 	uint32_t ipcse;
   5799 	struct ether_header *eh;
   5800 	int offset, iphl;
   5801 	uint8_t fields;
   5802 
   5803 	/*
   5804 	 * XXX It would be nice if the mbuf pkthdr had offset
   5805 	 * fields for the protocol headers.
   5806 	 */
   5807 
   5808 	eh = mtod(m0, struct ether_header *);
   5809 	switch (htons(eh->ether_type)) {
   5810 	case ETHERTYPE_IP:
   5811 	case ETHERTYPE_IPV6:
   5812 		offset = ETHER_HDR_LEN;
   5813 		break;
   5814 
   5815 	case ETHERTYPE_VLAN:
   5816 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   5817 		break;
   5818 
   5819 	default:
   5820 		/*
   5821 		 * Don't support this protocol or encapsulation.
   5822 		 */
   5823 		*fieldsp = 0;
   5824 		*cmdp = 0;
   5825 		return 0;
   5826 	}
   5827 
   5828 	if ((m0->m_pkthdr.csum_flags &
   5829 	    (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
   5830 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   5831 	} else {
   5832 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   5833 	}
   5834 	ipcse = offset + iphl - 1;
   5835 
   5836 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
   5837 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
   5838 	seg = 0;
   5839 	fields = 0;
   5840 
   5841 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   5842 		int hlen = offset + iphl;
   5843 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   5844 
   5845 		if (__predict_false(m0->m_len <
   5846 				    (hlen + sizeof(struct tcphdr)))) {
   5847 			/*
   5848 			 * TCP/IP headers are not in the first mbuf; we need
   5849 			 * to do this the slow and painful way.  Let's just
   5850 			 * hope this doesn't happen very often.
   5851 			 */
   5852 			struct tcphdr th;
   5853 
   5854 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
   5855 
   5856 			m_copydata(m0, hlen, sizeof(th), &th);
   5857 			if (v4) {
   5858 				struct ip ip;
   5859 
   5860 				m_copydata(m0, offset, sizeof(ip), &ip);
   5861 				ip.ip_len = 0;
   5862 				m_copyback(m0,
   5863 				    offset + offsetof(struct ip, ip_len),
   5864 				    sizeof(ip.ip_len), &ip.ip_len);
   5865 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   5866 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   5867 			} else {
   5868 				struct ip6_hdr ip6;
   5869 
   5870 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   5871 				ip6.ip6_plen = 0;
   5872 				m_copyback(m0,
   5873 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   5874 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   5875 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   5876 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   5877 			}
   5878 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   5879 			    sizeof(th.th_sum), &th.th_sum);
   5880 
   5881 			hlen += th.th_off << 2;
   5882 		} else {
   5883 			/*
   5884 			 * TCP/IP headers are in the first mbuf; we can do
   5885 			 * this the easy way.
   5886 			 */
   5887 			struct tcphdr *th;
   5888 
   5889 			if (v4) {
   5890 				struct ip *ip =
   5891 				    (void *)(mtod(m0, char *) + offset);
   5892 				th = (void *)(mtod(m0, char *) + hlen);
   5893 
   5894 				ip->ip_len = 0;
   5895 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   5896 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   5897 			} else {
   5898 				struct ip6_hdr *ip6 =
   5899 				    (void *)(mtod(m0, char *) + offset);
   5900 				th = (void *)(mtod(m0, char *) + hlen);
   5901 
   5902 				ip6->ip6_plen = 0;
   5903 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   5904 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   5905 			}
   5906 			hlen += th->th_off << 2;
   5907 		}
   5908 
   5909 		if (v4) {
   5910 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
   5911 			cmdlen |= WTX_TCPIP_CMD_IP;
   5912 		} else {
   5913 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
   5914 			ipcse = 0;
   5915 		}
   5916 		cmd |= WTX_TCPIP_CMD_TSE;
   5917 		cmdlen |= WTX_TCPIP_CMD_TSE |
   5918 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
   5919 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
   5920 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
   5921 	}
   5922 
   5923 	/*
   5924 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
   5925 	 * offload feature, if we load the context descriptor, we
   5926 	 * MUST provide valid values for IPCSS and TUCSS fields.
   5927 	 */
   5928 
   5929 	ipcs = WTX_TCPIP_IPCSS(offset) |
   5930 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
   5931 	    WTX_TCPIP_IPCSE(ipcse);
   5932 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
   5933 		WM_EVCNT_INCR(&sc->sc_ev_txipsum);
   5934 		fields |= WTX_IXSM;
   5935 	}
   5936 
   5937 	offset += iphl;
   5938 
   5939 	if (m0->m_pkthdr.csum_flags &
   5940 	    (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
   5941 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
   5942 		fields |= WTX_TXSM;
   5943 		tucs = WTX_TCPIP_TUCSS(offset) |
   5944 		    WTX_TCPIP_TUCSO(offset +
   5945 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
   5946 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   5947 	} else if ((m0->m_pkthdr.csum_flags &
   5948 	    (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
   5949 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
   5950 		fields |= WTX_TXSM;
   5951 		tucs = WTX_TCPIP_TUCSS(offset) |
   5952 		    WTX_TCPIP_TUCSO(offset +
   5953 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
   5954 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   5955 	} else {
   5956 		/* Just initialize it to a valid TCP context. */
   5957 		tucs = WTX_TCPIP_TUCSS(offset) |
   5958 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
   5959 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
   5960 	}
   5961 
   5962 	/* Fill in the context descriptor. */
   5963 	t = (struct livengood_tcpip_ctxdesc *)
   5964 	    &txq->txq_descs[txq->txq_next];
   5965 	t->tcpip_ipcs = htole32(ipcs);
   5966 	t->tcpip_tucs = htole32(tucs);
   5967 	t->tcpip_cmdlen = htole32(cmdlen);
   5968 	t->tcpip_seg = htole32(seg);
   5969 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   5970 
   5971 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   5972 	txs->txs_ndesc++;
   5973 
   5974 	*cmdp = cmd;
   5975 	*fieldsp = fields;
   5976 
   5977 	return 0;
   5978 }
   5979 
   5980 /*
   5981  * wm_start:		[ifnet interface function]
   5982  *
   5983  *	Start packet transmission on the interface.
   5984  */
   5985 static void
   5986 wm_start(struct ifnet *ifp)
   5987 {
   5988 	struct wm_softc *sc = ifp->if_softc;
   5989 	struct wm_txqueue *txq = &sc->sc_txq[0];
   5990 
   5991 	WM_TX_LOCK(txq);
   5992 	if (!sc->sc_stopping)
   5993 		wm_start_locked(ifp);
   5994 	WM_TX_UNLOCK(txq);
   5995 }
   5996 
   5997 static void
   5998 wm_start_locked(struct ifnet *ifp)
   5999 {
   6000 	struct wm_softc *sc = ifp->if_softc;
   6001 	struct wm_txqueue *txq = &sc->sc_txq[0];
   6002 	struct mbuf *m0;
   6003 	struct m_tag *mtag;
   6004 	struct wm_txsoft *txs;
   6005 	bus_dmamap_t dmamap;
   6006 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
   6007 	bus_addr_t curaddr;
   6008 	bus_size_t seglen, curlen;
   6009 	uint32_t cksumcmd;
   6010 	uint8_t cksumfields;
   6011 
   6012 	KASSERT(WM_TX_LOCKED(txq));
   6013 
   6014 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
   6015 		return;
   6016 
   6017 	/* Remember the previous number of free descriptors. */
   6018 	ofree = txq->txq_free;
   6019 
   6020 	/*
   6021 	 * Loop through the send queue, setting up transmit descriptors
   6022 	 * until we drain the queue, or use up all available transmit
   6023 	 * descriptors.
   6024 	 */
   6025 	for (;;) {
   6026 		m0 = NULL;
   6027 
   6028 		/* Get a work queue entry. */
   6029 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6030 			wm_txeof(sc);
   6031 			if (txq->txq_sfree == 0) {
   6032 				DPRINTF(WM_DEBUG_TX,
   6033 				    ("%s: TX: no free job descriptors\n",
   6034 					device_xname(sc->sc_dev)));
   6035 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
   6036 				break;
   6037 			}
   6038 		}
   6039 
   6040 		/* Grab a packet off the queue. */
   6041 		IFQ_DEQUEUE(&ifp->if_snd, m0);
   6042 		if (m0 == NULL)
   6043 			break;
   6044 
   6045 		DPRINTF(WM_DEBUG_TX,
   6046 		    ("%s: TX: have packet to transmit: %p\n",
   6047 		    device_xname(sc->sc_dev), m0));
   6048 
   6049 		txs = &txq->txq_soft[txq->txq_snext];
   6050 		dmamap = txs->txs_dmamap;
   6051 
   6052 		use_tso = (m0->m_pkthdr.csum_flags &
   6053 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
   6054 
   6055 		/*
   6056 		 * So says the Linux driver:
   6057 		 * The controller does a simple calculation to make sure
   6058 		 * there is enough room in the FIFO before initiating the
   6059 		 * DMA for each buffer.  The calc is:
   6060 		 *	4 = ceil(buffer len / MSS)
   6061 		 * To make sure we don't overrun the FIFO, adjust the max
   6062 		 * buffer len if the MSS drops.
   6063 		 */
   6064 		dmamap->dm_maxsegsz =
   6065 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
   6066 		    ? m0->m_pkthdr.segsz << 2
   6067 		    : WTX_MAX_LEN;
   6068 
   6069 		/*
   6070 		 * Load the DMA map.  If this fails, the packet either
   6071 		 * didn't fit in the allotted number of segments, or we
   6072 		 * were short on resources.  For the too-many-segments
   6073 		 * case, we simply report an error and drop the packet,
   6074 		 * since we can't sanely copy a jumbo packet to a single
   6075 		 * buffer.
   6076 		 */
   6077 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6078 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
   6079 		if (error) {
   6080 			if (error == EFBIG) {
   6081 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   6082 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6083 				    "DMA segments, dropping...\n",
   6084 				    device_xname(sc->sc_dev));
   6085 				wm_dump_mbuf_chain(sc, m0);
   6086 				m_freem(m0);
   6087 				continue;
   6088 			}
   6089 			/*  Short on resources, just stop for now. */
   6090 			DPRINTF(WM_DEBUG_TX,
   6091 			    ("%s: TX: dmamap load failed: %d\n",
   6092 			    device_xname(sc->sc_dev), error));
   6093 			break;
   6094 		}
   6095 
   6096 		segs_needed = dmamap->dm_nsegs;
   6097 		if (use_tso) {
   6098 			/* For sentinel descriptor; see below. */
   6099 			segs_needed++;
   6100 		}
   6101 
   6102 		/*
   6103 		 * Ensure we have enough descriptors free to describe
   6104 		 * the packet.  Note, we always reserve one descriptor
   6105 		 * at the end of the ring due to the semantics of the
   6106 		 * TDT register, plus one more in the event we need
   6107 		 * to load offload context.
   6108 		 */
   6109 		if (segs_needed > txq->txq_free - 2) {
   6110 			/*
   6111 			 * Not enough free descriptors to transmit this
   6112 			 * packet.  We haven't committed anything yet,
   6113 			 * so just unload the DMA map, put the packet
   6114 			 * pack on the queue, and punt.  Notify the upper
   6115 			 * layer that there are no more slots left.
   6116 			 */
   6117 			DPRINTF(WM_DEBUG_TX,
   6118 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6119 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6120 			    segs_needed, txq->txq_free - 1));
   6121 			ifp->if_flags |= IFF_OACTIVE;
   6122 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6123 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
   6124 			break;
   6125 		}
   6126 
   6127 		/*
   6128 		 * Check for 82547 Tx FIFO bug.  We need to do this
   6129 		 * once we know we can transmit the packet, since we
   6130 		 * do some internal FIFO space accounting here.
   6131 		 */
   6132 		if (sc->sc_type == WM_T_82547 &&
   6133 		    wm_82547_txfifo_bugchk(sc, m0)) {
   6134 			DPRINTF(WM_DEBUG_TX,
   6135 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
   6136 			    device_xname(sc->sc_dev)));
   6137 			ifp->if_flags |= IFF_OACTIVE;
   6138 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6139 			WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
   6140 			break;
   6141 		}
   6142 
   6143 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6144 
   6145 		DPRINTF(WM_DEBUG_TX,
   6146 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6147 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6148 
   6149 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
   6150 
   6151 		/*
   6152 		 * Store a pointer to the packet so that we can free it
   6153 		 * later.
   6154 		 *
   6155 		 * Initially, we consider the number of descriptors the
   6156 		 * packet uses the number of DMA segments.  This may be
   6157 		 * incremented by 1 if we do checksum offload (a descriptor
   6158 		 * is used to set the checksum context).
   6159 		 */
   6160 		txs->txs_mbuf = m0;
   6161 		txs->txs_firstdesc = txq->txq_next;
   6162 		txs->txs_ndesc = segs_needed;
   6163 
   6164 		/* Set up offload parameters for this packet. */
   6165 		if (m0->m_pkthdr.csum_flags &
   6166 		    (M_CSUM_TSOv4|M_CSUM_TSOv6|
   6167 		    M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
   6168 		    M_CSUM_TCPv6|M_CSUM_UDPv6)) {
   6169 			if (wm_tx_offload(sc, txs, &cksumcmd,
   6170 					  &cksumfields) != 0) {
   6171 				/* Error message already displayed. */
   6172 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   6173 				continue;
   6174 			}
   6175 		} else {
   6176 			cksumcmd = 0;
   6177 			cksumfields = 0;
   6178 		}
   6179 
   6180 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
   6181 
   6182 		/* Sync the DMA map. */
   6183 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   6184 		    BUS_DMASYNC_PREWRITE);
   6185 
   6186 		/* Initialize the transmit descriptor. */
   6187 		for (nexttx = txq->txq_next, seg = 0;
   6188 		     seg < dmamap->dm_nsegs; seg++) {
   6189 			for (seglen = dmamap->dm_segs[seg].ds_len,
   6190 			     curaddr = dmamap->dm_segs[seg].ds_addr;
   6191 			     seglen != 0;
   6192 			     curaddr += curlen, seglen -= curlen,
   6193 			     nexttx = WM_NEXTTX(txq, nexttx)) {
   6194 				curlen = seglen;
   6195 
   6196 				/*
   6197 				 * So says the Linux driver:
   6198 				 * Work around for premature descriptor
   6199 				 * write-backs in TSO mode.  Append a
   6200 				 * 4-byte sentinel descriptor.
   6201 				 */
   6202 				if (use_tso &&
   6203 				    seg == dmamap->dm_nsegs - 1 &&
   6204 				    curlen > 8)
   6205 					curlen -= 4;
   6206 
   6207 				wm_set_dma_addr(
   6208 				    &txq->txq_descs[nexttx].wtx_addr,
   6209 				    curaddr);
   6210 				txq->txq_descs[nexttx].wtx_cmdlen =
   6211 				    htole32(cksumcmd | curlen);
   6212 				txq->txq_descs[nexttx].wtx_fields.wtxu_status =
   6213 				    0;
   6214 				txq->txq_descs[nexttx].wtx_fields.wtxu_options =
   6215 				    cksumfields;
   6216 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan = 0;
   6217 				lasttx = nexttx;
   6218 
   6219 				DPRINTF(WM_DEBUG_TX,
   6220 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
   6221 				     "len %#04zx\n",
   6222 				    device_xname(sc->sc_dev), nexttx,
   6223 				    (uint64_t)curaddr, curlen));
   6224 			}
   6225 		}
   6226 
   6227 		KASSERT(lasttx != -1);
   6228 
   6229 		/*
   6230 		 * Set up the command byte on the last descriptor of
   6231 		 * the packet.  If we're in the interrupt delay window,
   6232 		 * delay the interrupt.
   6233 		 */
   6234 		txq->txq_descs[lasttx].wtx_cmdlen |=
   6235 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   6236 
   6237 		/*
   6238 		 * If VLANs are enabled and the packet has a VLAN tag, set
   6239 		 * up the descriptor to encapsulate the packet for us.
   6240 		 *
   6241 		 * This is only valid on the last descriptor of the packet.
   6242 		 */
   6243 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6244 			txq->txq_descs[lasttx].wtx_cmdlen |=
   6245 			    htole32(WTX_CMD_VLE);
   6246 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
   6247 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   6248 		}
   6249 
   6250 		txs->txs_lastdesc = lasttx;
   6251 
   6252 		DPRINTF(WM_DEBUG_TX,
   6253 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   6254 		    device_xname(sc->sc_dev),
   6255 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   6256 
   6257 		/* Sync the descriptors we're using. */
   6258 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   6259 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   6260 
   6261 		/* Give the packet to the chip. */
   6262 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   6263 
   6264 		DPRINTF(WM_DEBUG_TX,
   6265 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   6266 
   6267 		DPRINTF(WM_DEBUG_TX,
   6268 		    ("%s: TX: finished transmitting packet, job %d\n",
   6269 		    device_xname(sc->sc_dev), txq->txq_snext));
   6270 
   6271 		/* Advance the tx pointer. */
   6272 		txq->txq_free -= txs->txs_ndesc;
   6273 		txq->txq_next = nexttx;
   6274 
   6275 		txq->txq_sfree--;
   6276 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   6277 
   6278 		/* Pass the packet to any BPF listeners. */
   6279 		bpf_mtap(ifp, m0);
   6280 	}
   6281 
   6282 	if (m0 != NULL) {
   6283 		ifp->if_flags |= IFF_OACTIVE;
   6284 		WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   6285 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
   6286 		m_freem(m0);
   6287 	}
   6288 
   6289 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   6290 		/* No more slots; notify upper layer. */
   6291 		ifp->if_flags |= IFF_OACTIVE;
   6292 	}
   6293 
   6294 	if (txq->txq_free != ofree) {
   6295 		/* Set a watchdog timer in case the chip flakes out. */
   6296 		ifp->if_timer = 5;
   6297 	}
   6298 }
   6299 
   6300 /*
   6301  * wm_nq_tx_offload:
   6302  *
   6303  *	Set up TCP/IP checksumming parameters for the
   6304  *	specified packet, for NEWQUEUE devices
   6305  */
   6306 static int
   6307 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
   6308     uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
   6309 {
   6310 	struct wm_txqueue *txq = &sc->sc_txq[0];
   6311 	struct mbuf *m0 = txs->txs_mbuf;
   6312 	struct m_tag *mtag;
   6313 	uint32_t vl_len, mssidx, cmdc;
   6314 	struct ether_header *eh;
   6315 	int offset, iphl;
   6316 
   6317 	/*
   6318 	 * XXX It would be nice if the mbuf pkthdr had offset
   6319 	 * fields for the protocol headers.
   6320 	 */
   6321 	*cmdlenp = 0;
   6322 	*fieldsp = 0;
   6323 
   6324 	eh = mtod(m0, struct ether_header *);
   6325 	switch (htons(eh->ether_type)) {
   6326 	case ETHERTYPE_IP:
   6327 	case ETHERTYPE_IPV6:
   6328 		offset = ETHER_HDR_LEN;
   6329 		break;
   6330 
   6331 	case ETHERTYPE_VLAN:
   6332 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
   6333 		break;
   6334 
   6335 	default:
   6336 		/* Don't support this protocol or encapsulation. */
   6337 		*do_csum = false;
   6338 		return 0;
   6339 	}
   6340 	*do_csum = true;
   6341 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
   6342 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
   6343 
   6344 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
   6345 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
   6346 
   6347 	if ((m0->m_pkthdr.csum_flags &
   6348 	    (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) {
   6349 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
   6350 	} else {
   6351 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
   6352 	}
   6353 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
   6354 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
   6355 
   6356 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
   6357 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
   6358 		     << NQTXC_VLLEN_VLAN_SHIFT);
   6359 		*cmdlenp |= NQTX_CMD_VLE;
   6360 	}
   6361 
   6362 	mssidx = 0;
   6363 
   6364 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
   6365 		int hlen = offset + iphl;
   6366 		int tcp_hlen;
   6367 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
   6368 
   6369 		if (__predict_false(m0->m_len <
   6370 				    (hlen + sizeof(struct tcphdr)))) {
   6371 			/*
   6372 			 * TCP/IP headers are not in the first mbuf; we need
   6373 			 * to do this the slow and painful way.  Let's just
   6374 			 * hope this doesn't happen very often.
   6375 			 */
   6376 			struct tcphdr th;
   6377 
   6378 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
   6379 
   6380 			m_copydata(m0, hlen, sizeof(th), &th);
   6381 			if (v4) {
   6382 				struct ip ip;
   6383 
   6384 				m_copydata(m0, offset, sizeof(ip), &ip);
   6385 				ip.ip_len = 0;
   6386 				m_copyback(m0,
   6387 				    offset + offsetof(struct ip, ip_len),
   6388 				    sizeof(ip.ip_len), &ip.ip_len);
   6389 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
   6390 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
   6391 			} else {
   6392 				struct ip6_hdr ip6;
   6393 
   6394 				m_copydata(m0, offset, sizeof(ip6), &ip6);
   6395 				ip6.ip6_plen = 0;
   6396 				m_copyback(m0,
   6397 				    offset + offsetof(struct ip6_hdr, ip6_plen),
   6398 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
   6399 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
   6400 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
   6401 			}
   6402 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
   6403 			    sizeof(th.th_sum), &th.th_sum);
   6404 
   6405 			tcp_hlen = th.th_off << 2;
   6406 		} else {
   6407 			/*
   6408 			 * TCP/IP headers are in the first mbuf; we can do
   6409 			 * this the easy way.
   6410 			 */
   6411 			struct tcphdr *th;
   6412 
   6413 			if (v4) {
   6414 				struct ip *ip =
   6415 				    (void *)(mtod(m0, char *) + offset);
   6416 				th = (void *)(mtod(m0, char *) + hlen);
   6417 
   6418 				ip->ip_len = 0;
   6419 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
   6420 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
   6421 			} else {
   6422 				struct ip6_hdr *ip6 =
   6423 				    (void *)(mtod(m0, char *) + offset);
   6424 				th = (void *)(mtod(m0, char *) + hlen);
   6425 
   6426 				ip6->ip6_plen = 0;
   6427 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
   6428 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
   6429 			}
   6430 			tcp_hlen = th->th_off << 2;
   6431 		}
   6432 		hlen += tcp_hlen;
   6433 		*cmdlenp |= NQTX_CMD_TSE;
   6434 
   6435 		if (v4) {
   6436 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
   6437 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
   6438 		} else {
   6439 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
   6440 			*fieldsp |= NQTXD_FIELDS_TUXSM;
   6441 		}
   6442 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
   6443 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6444 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
   6445 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
   6446 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
   6447 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
   6448 	} else {
   6449 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
   6450 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
   6451 	}
   6452 
   6453 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
   6454 		*fieldsp |= NQTXD_FIELDS_IXSM;
   6455 		cmdc |= NQTXC_CMD_IP4;
   6456 	}
   6457 
   6458 	if (m0->m_pkthdr.csum_flags &
   6459 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6460 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
   6461 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
   6462 			cmdc |= NQTXC_CMD_TCP;
   6463 		} else {
   6464 			cmdc |= NQTXC_CMD_UDP;
   6465 		}
   6466 		cmdc |= NQTXC_CMD_IP4;
   6467 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6468 	}
   6469 	if (m0->m_pkthdr.csum_flags &
   6470 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6471 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
   6472 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
   6473 			cmdc |= NQTXC_CMD_TCP;
   6474 		} else {
   6475 			cmdc |= NQTXC_CMD_UDP;
   6476 		}
   6477 		cmdc |= NQTXC_CMD_IP6;
   6478 		*fieldsp |= NQTXD_FIELDS_TUXSM;
   6479 	}
   6480 
   6481 	/* Fill in the context descriptor. */
   6482 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
   6483 	    htole32(vl_len);
   6484 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
   6485 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
   6486 	    htole32(cmdc);
   6487 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
   6488 	    htole32(mssidx);
   6489 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
   6490 	DPRINTF(WM_DEBUG_TX,
   6491 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
   6492 	    txq->txq_next, 0, vl_len));
   6493 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
   6494 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
   6495 	txs->txs_ndesc++;
   6496 	return 0;
   6497 }
   6498 
   6499 /*
   6500  * wm_nq_start:		[ifnet interface function]
   6501  *
   6502  *	Start packet transmission on the interface for NEWQUEUE devices
   6503  */
   6504 static void
   6505 wm_nq_start(struct ifnet *ifp)
   6506 {
   6507 	struct wm_softc *sc = ifp->if_softc;
   6508 	struct wm_txqueue *txq = &sc->sc_txq[0];
   6509 
   6510 	WM_TX_LOCK(txq);
   6511 	if (!sc->sc_stopping)
   6512 		wm_nq_start_locked(ifp);
   6513 	WM_TX_UNLOCK(txq);
   6514 }
   6515 
   6516 static void
   6517 wm_nq_start_locked(struct ifnet *ifp)
   6518 {
   6519 	struct wm_softc *sc = ifp->if_softc;
   6520 	struct wm_txqueue *txq = &sc->sc_txq[0];
   6521 	struct mbuf *m0;
   6522 	struct m_tag *mtag;
   6523 	struct wm_txsoft *txs;
   6524 	bus_dmamap_t dmamap;
   6525 	int error, nexttx, lasttx = -1, seg, segs_needed;
   6526 	bool do_csum, sent;
   6527 
   6528 	KASSERT(WM_TX_LOCKED(txq));
   6529 
   6530 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
   6531 		return;
   6532 
   6533 	sent = false;
   6534 
   6535 	/*
   6536 	 * Loop through the send queue, setting up transmit descriptors
   6537 	 * until we drain the queue, or use up all available transmit
   6538 	 * descriptors.
   6539 	 */
   6540 	for (;;) {
   6541 		m0 = NULL;
   6542 
   6543 		/* Get a work queue entry. */
   6544 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
   6545 			wm_txeof(sc);
   6546 			if (txq->txq_sfree == 0) {
   6547 				DPRINTF(WM_DEBUG_TX,
   6548 				    ("%s: TX: no free job descriptors\n",
   6549 					device_xname(sc->sc_dev)));
   6550 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
   6551 				break;
   6552 			}
   6553 		}
   6554 
   6555 		/* Grab a packet off the queue. */
   6556 		IFQ_DEQUEUE(&ifp->if_snd, m0);
   6557 		if (m0 == NULL)
   6558 			break;
   6559 
   6560 		DPRINTF(WM_DEBUG_TX,
   6561 		    ("%s: TX: have packet to transmit: %p\n",
   6562 		    device_xname(sc->sc_dev), m0));
   6563 
   6564 		txs = &txq->txq_soft[txq->txq_snext];
   6565 		dmamap = txs->txs_dmamap;
   6566 
   6567 		/*
   6568 		 * Load the DMA map.  If this fails, the packet either
   6569 		 * didn't fit in the allotted number of segments, or we
   6570 		 * were short on resources.  For the too-many-segments
   6571 		 * case, we simply report an error and drop the packet,
   6572 		 * since we can't sanely copy a jumbo packet to a single
   6573 		 * buffer.
   6574 		 */
   6575 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
   6576 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
   6577 		if (error) {
   6578 			if (error == EFBIG) {
   6579 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   6580 				log(LOG_ERR, "%s: Tx packet consumes too many "
   6581 				    "DMA segments, dropping...\n",
   6582 				    device_xname(sc->sc_dev));
   6583 				wm_dump_mbuf_chain(sc, m0);
   6584 				m_freem(m0);
   6585 				continue;
   6586 			}
   6587 			/* Short on resources, just stop for now. */
   6588 			DPRINTF(WM_DEBUG_TX,
   6589 			    ("%s: TX: dmamap load failed: %d\n",
   6590 			    device_xname(sc->sc_dev), error));
   6591 			break;
   6592 		}
   6593 
   6594 		segs_needed = dmamap->dm_nsegs;
   6595 
   6596 		/*
   6597 		 * Ensure we have enough descriptors free to describe
   6598 		 * the packet.  Note, we always reserve one descriptor
   6599 		 * at the end of the ring due to the semantics of the
   6600 		 * TDT register, plus one more in the event we need
   6601 		 * to load offload context.
   6602 		 */
   6603 		if (segs_needed > txq->txq_free - 2) {
   6604 			/*
   6605 			 * Not enough free descriptors to transmit this
   6606 			 * packet.  We haven't committed anything yet,
   6607 			 * so just unload the DMA map, put the packet
   6608 			 * pack on the queue, and punt.  Notify the upper
   6609 			 * layer that there are no more slots left.
   6610 			 */
   6611 			DPRINTF(WM_DEBUG_TX,
   6612 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
   6613 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
   6614 			    segs_needed, txq->txq_free - 1));
   6615 			ifp->if_flags |= IFF_OACTIVE;
   6616 			bus_dmamap_unload(sc->sc_dmat, dmamap);
   6617 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
   6618 			break;
   6619 		}
   6620 
   6621 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
   6622 
   6623 		DPRINTF(WM_DEBUG_TX,
   6624 		    ("%s: TX: packet has %d (%d) DMA segments\n",
   6625 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
   6626 
   6627 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
   6628 
   6629 		/*
   6630 		 * Store a pointer to the packet so that we can free it
   6631 		 * later.
   6632 		 *
   6633 		 * Initially, we consider the number of descriptors the
   6634 		 * packet uses the number of DMA segments.  This may be
   6635 		 * incremented by 1 if we do checksum offload (a descriptor
   6636 		 * is used to set the checksum context).
   6637 		 */
   6638 		txs->txs_mbuf = m0;
   6639 		txs->txs_firstdesc = txq->txq_next;
   6640 		txs->txs_ndesc = segs_needed;
   6641 
   6642 		/* Set up offload parameters for this packet. */
   6643 		uint32_t cmdlen, fields, dcmdlen;
   6644 		if (m0->m_pkthdr.csum_flags &
   6645 		    (M_CSUM_TSOv4|M_CSUM_TSOv6|
   6646 		    M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
   6647 		    M_CSUM_TCPv6|M_CSUM_UDPv6)) {
   6648 			if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
   6649 			    &do_csum) != 0) {
   6650 				/* Error message already displayed. */
   6651 				bus_dmamap_unload(sc->sc_dmat, dmamap);
   6652 				continue;
   6653 			}
   6654 		} else {
   6655 			do_csum = false;
   6656 			cmdlen = 0;
   6657 			fields = 0;
   6658 		}
   6659 
   6660 		/* Sync the DMA map. */
   6661 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
   6662 		    BUS_DMASYNC_PREWRITE);
   6663 
   6664 		/* Initialize the first transmit descriptor. */
   6665 		nexttx = txq->txq_next;
   6666 		if (!do_csum) {
   6667 			/* setup a legacy descriptor */
   6668 			wm_set_dma_addr(
   6669 			    &txq->txq_descs[nexttx].wtx_addr,
   6670 			    dmamap->dm_segs[0].ds_addr);
   6671 			txq->txq_descs[nexttx].wtx_cmdlen =
   6672 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
   6673 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
   6674 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
   6675 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
   6676 			    NULL) {
   6677 				txq->txq_descs[nexttx].wtx_cmdlen |=
   6678 				    htole32(WTX_CMD_VLE);
   6679 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
   6680 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
   6681 			} else {
   6682 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
   6683 			}
   6684 			dcmdlen = 0;
   6685 		} else {
   6686 			/* setup an advanced data descriptor */
   6687 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   6688 			    htole64(dmamap->dm_segs[0].ds_addr);
   6689 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
   6690 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   6691 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
   6692 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
   6693 			    htole32(fields);
   6694 			DPRINTF(WM_DEBUG_TX,
   6695 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
   6696 			    device_xname(sc->sc_dev), nexttx,
   6697 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
   6698 			DPRINTF(WM_DEBUG_TX,
   6699 			    ("\t 0x%08x%08x\n", fields,
   6700 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
   6701 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
   6702 		}
   6703 
   6704 		lasttx = nexttx;
   6705 		nexttx = WM_NEXTTX(txq, nexttx);
   6706 		/*
   6707 		 * fill in the next descriptors. legacy or adcanced format
   6708 		 * is the same here
   6709 		 */
   6710 		for (seg = 1; seg < dmamap->dm_nsegs;
   6711 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
   6712 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
   6713 			    htole64(dmamap->dm_segs[seg].ds_addr);
   6714 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
   6715 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
   6716 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
   6717 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
   6718 			lasttx = nexttx;
   6719 
   6720 			DPRINTF(WM_DEBUG_TX,
   6721 			    ("%s: TX: desc %d: %#" PRIx64 ", "
   6722 			     "len %#04zx\n",
   6723 			    device_xname(sc->sc_dev), nexttx,
   6724 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
   6725 			    dmamap->dm_segs[seg].ds_len));
   6726 		}
   6727 
   6728 		KASSERT(lasttx != -1);
   6729 
   6730 		/*
   6731 		 * Set up the command byte on the last descriptor of
   6732 		 * the packet.  If we're in the interrupt delay window,
   6733 		 * delay the interrupt.
   6734 		 */
   6735 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
   6736 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
   6737 		txq->txq_descs[lasttx].wtx_cmdlen |=
   6738 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
   6739 
   6740 		txs->txs_lastdesc = lasttx;
   6741 
   6742 		DPRINTF(WM_DEBUG_TX,
   6743 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
   6744 		    device_xname(sc->sc_dev),
   6745 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
   6746 
   6747 		/* Sync the descriptors we're using. */
   6748 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
   6749 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
   6750 
   6751 		/* Give the packet to the chip. */
   6752 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
   6753 		sent = true;
   6754 
   6755 		DPRINTF(WM_DEBUG_TX,
   6756 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
   6757 
   6758 		DPRINTF(WM_DEBUG_TX,
   6759 		    ("%s: TX: finished transmitting packet, job %d\n",
   6760 		    device_xname(sc->sc_dev), txq->txq_snext));
   6761 
   6762 		/* Advance the tx pointer. */
   6763 		txq->txq_free -= txs->txs_ndesc;
   6764 		txq->txq_next = nexttx;
   6765 
   6766 		txq->txq_sfree--;
   6767 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
   6768 
   6769 		/* Pass the packet to any BPF listeners. */
   6770 		bpf_mtap(ifp, m0);
   6771 	}
   6772 
   6773 	if (m0 != NULL) {
   6774 		ifp->if_flags |= IFF_OACTIVE;
   6775 		WM_EVCNT_INCR(&sc->sc_ev_txdrop);
   6776 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n", __func__));
   6777 		m_freem(m0);
   6778 	}
   6779 
   6780 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
   6781 		/* No more slots; notify upper layer. */
   6782 		ifp->if_flags |= IFF_OACTIVE;
   6783 	}
   6784 
   6785 	if (sent) {
   6786 		/* Set a watchdog timer in case the chip flakes out. */
   6787 		ifp->if_timer = 5;
   6788 	}
   6789 }
   6790 
   6791 /* Interrupt */
   6792 
   6793 /*
   6794  * wm_txeof:
   6795  *
   6796  *	Helper; handle transmit interrupts.
   6797  */
   6798 static int
   6799 wm_txeof(struct wm_softc *sc)
   6800 {
   6801 	struct wm_txqueue *txq = &sc->sc_txq[0];
   6802 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   6803 	struct wm_txsoft *txs;
   6804 	bool processed = false;
   6805 	int count = 0;
   6806 	int i;
   6807 	uint8_t status;
   6808 
   6809 	if (sc->sc_stopping)
   6810 		return 0;
   6811 
   6812 	ifp->if_flags &= ~IFF_OACTIVE;
   6813 
   6814 	/*
   6815 	 * Go through the Tx list and free mbufs for those
   6816 	 * frames which have been transmitted.
   6817 	 */
   6818 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
   6819 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
   6820 		txs = &txq->txq_soft[i];
   6821 
   6822 		DPRINTF(WM_DEBUG_TX,
   6823 		    ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
   6824 
   6825 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
   6826 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   6827 
   6828 		status =
   6829 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
   6830 		if ((status & WTX_ST_DD) == 0) {
   6831 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
   6832 			    BUS_DMASYNC_PREREAD);
   6833 			break;
   6834 		}
   6835 
   6836 		processed = true;
   6837 		count++;
   6838 		DPRINTF(WM_DEBUG_TX,
   6839 		    ("%s: TX: job %d done: descs %d..%d\n",
   6840 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
   6841 		    txs->txs_lastdesc));
   6842 
   6843 		/*
   6844 		 * XXX We should probably be using the statistics
   6845 		 * XXX registers, but I don't know if they exist
   6846 		 * XXX on chips before the i82544.
   6847 		 */
   6848 
   6849 #ifdef WM_EVENT_COUNTERS
   6850 		if (status & WTX_ST_TU)
   6851 			WM_EVCNT_INCR(&sc->sc_ev_tu);
   6852 #endif /* WM_EVENT_COUNTERS */
   6853 
   6854 		if (status & (WTX_ST_EC|WTX_ST_LC)) {
   6855 			ifp->if_oerrors++;
   6856 			if (status & WTX_ST_LC)
   6857 				log(LOG_WARNING, "%s: late collision\n",
   6858 				    device_xname(sc->sc_dev));
   6859 			else if (status & WTX_ST_EC) {
   6860 				ifp->if_collisions += 16;
   6861 				log(LOG_WARNING, "%s: excessive collisions\n",
   6862 				    device_xname(sc->sc_dev));
   6863 			}
   6864 		} else
   6865 			ifp->if_opackets++;
   6866 
   6867 		txq->txq_free += txs->txs_ndesc;
   6868 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
   6869 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   6870 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
   6871 		m_freem(txs->txs_mbuf);
   6872 		txs->txs_mbuf = NULL;
   6873 	}
   6874 
   6875 	/* Update the dirty transmit buffer pointer. */
   6876 	txq->txq_sdirty = i;
   6877 	DPRINTF(WM_DEBUG_TX,
   6878 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
   6879 
   6880 	if (count != 0)
   6881 		rnd_add_uint32(&sc->rnd_source, count);
   6882 
   6883 	/*
   6884 	 * If there are no more pending transmissions, cancel the watchdog
   6885 	 * timer.
   6886 	 */
   6887 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
   6888 		ifp->if_timer = 0;
   6889 
   6890 	return processed;
   6891 }
   6892 
   6893 /*
   6894  * wm_rxeof:
   6895  *
   6896  *	Helper; handle receive interrupts.
   6897  */
   6898 static void
   6899 wm_rxeof(struct wm_rxqueue *rxq)
   6900 {
   6901 	struct wm_softc *sc = rxq->rxq_sc;
   6902 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   6903 	struct wm_rxsoft *rxs;
   6904 	struct mbuf *m;
   6905 	int i, len;
   6906 	int count = 0;
   6907 	uint8_t status, errors;
   6908 	uint16_t vlantag;
   6909 
   6910 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
   6911 		rxs = &rxq->rxq_soft[i];
   6912 
   6913 		DPRINTF(WM_DEBUG_RX,
   6914 		    ("%s: RX: checking descriptor %d\n",
   6915 		    device_xname(sc->sc_dev), i));
   6916 
   6917 		wm_cdrxsync(rxq, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
   6918 
   6919 		status = rxq->rxq_descs[i].wrx_status;
   6920 		errors = rxq->rxq_descs[i].wrx_errors;
   6921 		len = le16toh(rxq->rxq_descs[i].wrx_len);
   6922 		vlantag = rxq->rxq_descs[i].wrx_special;
   6923 
   6924 		if ((status & WRX_ST_DD) == 0) {
   6925 			/* We have processed all of the receive descriptors. */
   6926 			wm_cdrxsync(rxq, i, BUS_DMASYNC_PREREAD);
   6927 			break;
   6928 		}
   6929 
   6930 		count++;
   6931 		if (__predict_false(rxq->rxq_discard)) {
   6932 			DPRINTF(WM_DEBUG_RX,
   6933 			    ("%s: RX: discarding contents of descriptor %d\n",
   6934 			    device_xname(sc->sc_dev), i));
   6935 			wm_init_rxdesc(rxq, i);
   6936 			if (status & WRX_ST_EOP) {
   6937 				/* Reset our state. */
   6938 				DPRINTF(WM_DEBUG_RX,
   6939 				    ("%s: RX: resetting rxdiscard -> 0\n",
   6940 				    device_xname(sc->sc_dev)));
   6941 				rxq->rxq_discard = 0;
   6942 			}
   6943 			continue;
   6944 		}
   6945 
   6946 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   6947 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
   6948 
   6949 		m = rxs->rxs_mbuf;
   6950 
   6951 		/*
   6952 		 * Add a new receive buffer to the ring, unless of
   6953 		 * course the length is zero. Treat the latter as a
   6954 		 * failed mapping.
   6955 		 */
   6956 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
   6957 			/*
   6958 			 * Failed, throw away what we've done so
   6959 			 * far, and discard the rest of the packet.
   6960 			 */
   6961 			ifp->if_ierrors++;
   6962 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
   6963 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
   6964 			wm_init_rxdesc(rxq, i);
   6965 			if ((status & WRX_ST_EOP) == 0)
   6966 				rxq->rxq_discard = 1;
   6967 			if (rxq->rxq_head != NULL)
   6968 				m_freem(rxq->rxq_head);
   6969 			WM_RXCHAIN_RESET(rxq);
   6970 			DPRINTF(WM_DEBUG_RX,
   6971 			    ("%s: RX: Rx buffer allocation failed, "
   6972 			    "dropping packet%s\n", device_xname(sc->sc_dev),
   6973 			    rxq->rxq_discard ? " (discard)" : ""));
   6974 			continue;
   6975 		}
   6976 
   6977 		m->m_len = len;
   6978 		rxq->rxq_len += len;
   6979 		DPRINTF(WM_DEBUG_RX,
   6980 		    ("%s: RX: buffer at %p len %d\n",
   6981 		    device_xname(sc->sc_dev), m->m_data, len));
   6982 
   6983 		/* If this is not the end of the packet, keep looking. */
   6984 		if ((status & WRX_ST_EOP) == 0) {
   6985 			WM_RXCHAIN_LINK(rxq, m);
   6986 			DPRINTF(WM_DEBUG_RX,
   6987 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
   6988 			    device_xname(sc->sc_dev), rxq->rxq_len));
   6989 			continue;
   6990 		}
   6991 
   6992 		/*
   6993 		 * Okay, we have the entire packet now.  The chip is
   6994 		 * configured to include the FCS except I350 and I21[01]
   6995 		 * (not all chips can be configured to strip it),
   6996 		 * so we need to trim it.
   6997 		 * May need to adjust length of previous mbuf in the
   6998 		 * chain if the current mbuf is too short.
   6999 		 * For an eratta, the RCTL_SECRC bit in RCTL register
   7000 		 * is always set in I350, so we don't trim it.
   7001 		 */
   7002 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
   7003 		    && (sc->sc_type != WM_T_I210)
   7004 		    && (sc->sc_type != WM_T_I211)) {
   7005 			if (m->m_len < ETHER_CRC_LEN) {
   7006 				rxq->rxq_tail->m_len
   7007 				    -= (ETHER_CRC_LEN - m->m_len);
   7008 				m->m_len = 0;
   7009 			} else
   7010 				m->m_len -= ETHER_CRC_LEN;
   7011 			len = rxq->rxq_len - ETHER_CRC_LEN;
   7012 		} else
   7013 			len = rxq->rxq_len;
   7014 
   7015 		WM_RXCHAIN_LINK(rxq, m);
   7016 
   7017 		*rxq->rxq_tailp = NULL;
   7018 		m = rxq->rxq_head;
   7019 
   7020 		WM_RXCHAIN_RESET(rxq);
   7021 
   7022 		DPRINTF(WM_DEBUG_RX,
   7023 		    ("%s: RX: have entire packet, len -> %d\n",
   7024 		    device_xname(sc->sc_dev), len));
   7025 
   7026 		/* If an error occurred, update stats and drop the packet. */
   7027 		if (errors &
   7028 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
   7029 			if (errors & WRX_ER_SE)
   7030 				log(LOG_WARNING, "%s: symbol error\n",
   7031 				    device_xname(sc->sc_dev));
   7032 			else if (errors & WRX_ER_SEQ)
   7033 				log(LOG_WARNING, "%s: receive sequence error\n",
   7034 				    device_xname(sc->sc_dev));
   7035 			else if (errors & WRX_ER_CE)
   7036 				log(LOG_WARNING, "%s: CRC error\n",
   7037 				    device_xname(sc->sc_dev));
   7038 			m_freem(m);
   7039 			continue;
   7040 		}
   7041 
   7042 		/* No errors.  Receive the packet. */
   7043 		m->m_pkthdr.rcvif = ifp;
   7044 		m->m_pkthdr.len = len;
   7045 
   7046 		/*
   7047 		 * If VLANs are enabled, VLAN packets have been unwrapped
   7048 		 * for us.  Associate the tag with the packet.
   7049 		 */
   7050 		/* XXXX should check for i350 and i354 */
   7051 		if ((status & WRX_ST_VP) != 0) {
   7052 			VLAN_INPUT_TAG(ifp, m,
   7053 			    le16toh(vlantag),
   7054 			    continue);
   7055 		}
   7056 
   7057 		/* Set up checksum info for this packet. */
   7058 		if ((status & WRX_ST_IXSM) == 0) {
   7059 			if (status & WRX_ST_IPCS) {
   7060 				WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
   7061 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
   7062 				if (errors & WRX_ER_IPE)
   7063 					m->m_pkthdr.csum_flags |=
   7064 					    M_CSUM_IPv4_BAD;
   7065 			}
   7066 			if (status & WRX_ST_TCPCS) {
   7067 				/*
   7068 				 * Note: we don't know if this was TCP or UDP,
   7069 				 * so we just set both bits, and expect the
   7070 				 * upper layers to deal.
   7071 				 */
   7072 				WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
   7073 				m->m_pkthdr.csum_flags |=
   7074 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
   7075 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
   7076 				if (errors & WRX_ER_TCPE)
   7077 					m->m_pkthdr.csum_flags |=
   7078 					    M_CSUM_TCP_UDP_BAD;
   7079 			}
   7080 		}
   7081 
   7082 		ifp->if_ipackets++;
   7083 
   7084 		WM_RX_UNLOCK(rxq);
   7085 
   7086 		/* Pass this up to any BPF listeners. */
   7087 		bpf_mtap(ifp, m);
   7088 
   7089 		/* Pass it on. */
   7090 		(*ifp->if_input)(ifp, m);
   7091 
   7092 		WM_RX_LOCK(rxq);
   7093 
   7094 		if (sc->sc_stopping)
   7095 			break;
   7096 	}
   7097 
   7098 	/* Update the receive pointer. */
   7099 	rxq->rxq_ptr = i;
   7100 	if (count != 0)
   7101 		rnd_add_uint32(&sc->rnd_source, count);
   7102 
   7103 	DPRINTF(WM_DEBUG_RX,
   7104 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
   7105 }
   7106 
   7107 /*
   7108  * wm_linkintr_gmii:
   7109  *
   7110  *	Helper; handle link interrupts for GMII.
   7111  */
   7112 static void
   7113 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
   7114 {
   7115 
   7116 	KASSERT(WM_CORE_LOCKED(sc));
   7117 
   7118 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7119 		__func__));
   7120 
   7121 	if (icr & ICR_LSC) {
   7122 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
   7123 
   7124 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
   7125 			wm_gig_downshift_workaround_ich8lan(sc);
   7126 
   7127 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
   7128 			device_xname(sc->sc_dev)));
   7129 		mii_pollstat(&sc->sc_mii);
   7130 		if (sc->sc_type == WM_T_82543) {
   7131 			int miistatus, active;
   7132 
   7133 			/*
   7134 			 * With 82543, we need to force speed and
   7135 			 * duplex on the MAC equal to what the PHY
   7136 			 * speed and duplex configuration is.
   7137 			 */
   7138 			miistatus = sc->sc_mii.mii_media_status;
   7139 
   7140 			if (miistatus & IFM_ACTIVE) {
   7141 				active = sc->sc_mii.mii_media_active;
   7142 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   7143 				switch (IFM_SUBTYPE(active)) {
   7144 				case IFM_10_T:
   7145 					sc->sc_ctrl |= CTRL_SPEED_10;
   7146 					break;
   7147 				case IFM_100_TX:
   7148 					sc->sc_ctrl |= CTRL_SPEED_100;
   7149 					break;
   7150 				case IFM_1000_T:
   7151 					sc->sc_ctrl |= CTRL_SPEED_1000;
   7152 					break;
   7153 				default:
   7154 					/*
   7155 					 * fiber?
   7156 					 * Shoud not enter here.
   7157 					 */
   7158 					printf("unknown media (%x)\n",
   7159 					    active);
   7160 					break;
   7161 				}
   7162 				if (active & IFM_FDX)
   7163 					sc->sc_ctrl |= CTRL_FD;
   7164 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7165 			}
   7166 		} else if ((sc->sc_type == WM_T_ICH8)
   7167 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
   7168 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
   7169 		} else if (sc->sc_type == WM_T_PCH) {
   7170 			wm_k1_gig_workaround_hv(sc,
   7171 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
   7172 		}
   7173 
   7174 		if ((sc->sc_phytype == WMPHY_82578)
   7175 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
   7176 			== IFM_1000_T)) {
   7177 
   7178 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
   7179 				delay(200*1000); /* XXX too big */
   7180 
   7181 				/* Link stall fix for link up */
   7182 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   7183 				    HV_MUX_DATA_CTRL,
   7184 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
   7185 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
   7186 				wm_gmii_hv_writereg(sc->sc_dev, 1,
   7187 				    HV_MUX_DATA_CTRL,
   7188 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
   7189 			}
   7190 		}
   7191 	} else if (icr & ICR_RXSEQ) {
   7192 		DPRINTF(WM_DEBUG_LINK,
   7193 		    ("%s: LINK Receive sequence error\n",
   7194 			device_xname(sc->sc_dev)));
   7195 	}
   7196 }
   7197 
   7198 /*
   7199  * wm_linkintr_tbi:
   7200  *
   7201  *	Helper; handle link interrupts for TBI mode.
   7202  */
   7203 static void
   7204 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
   7205 {
   7206 	uint32_t status;
   7207 
   7208 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7209 		__func__));
   7210 
   7211 	status = CSR_READ(sc, WMREG_STATUS);
   7212 	if (icr & ICR_LSC) {
   7213 		if (status & STATUS_LU) {
   7214 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
   7215 			    device_xname(sc->sc_dev),
   7216 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   7217 			/*
   7218 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   7219 			 * so we should update sc->sc_ctrl
   7220 			 */
   7221 
   7222 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   7223 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   7224 			sc->sc_fcrtl &= ~FCRTL_XONE;
   7225 			if (status & STATUS_FD)
   7226 				sc->sc_tctl |=
   7227 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   7228 			else
   7229 				sc->sc_tctl |=
   7230 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   7231 			if (sc->sc_ctrl & CTRL_TFCE)
   7232 				sc->sc_fcrtl |= FCRTL_XONE;
   7233 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   7234 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   7235 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   7236 				      sc->sc_fcrtl);
   7237 			sc->sc_tbi_linkup = 1;
   7238 		} else {
   7239 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
   7240 			    device_xname(sc->sc_dev)));
   7241 			sc->sc_tbi_linkup = 0;
   7242 		}
   7243 		/* Update LED */
   7244 		wm_tbi_serdes_set_linkled(sc);
   7245 	} else if (icr & ICR_RXSEQ) {
   7246 		DPRINTF(WM_DEBUG_LINK,
   7247 		    ("%s: LINK: Receive sequence error\n",
   7248 		    device_xname(sc->sc_dev)));
   7249 	}
   7250 }
   7251 
   7252 /*
   7253  * wm_linkintr_serdes:
   7254  *
   7255  *	Helper; handle link interrupts for TBI mode.
   7256  */
   7257 static void
   7258 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
   7259 {
   7260 	struct mii_data *mii = &sc->sc_mii;
   7261 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   7262 	uint32_t pcs_adv, pcs_lpab, reg;
   7263 
   7264 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
   7265 		__func__));
   7266 
   7267 	if (icr & ICR_LSC) {
   7268 		/* Check PCS */
   7269 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   7270 		if ((reg & PCS_LSTS_LINKOK) != 0) {
   7271 			mii->mii_media_status |= IFM_ACTIVE;
   7272 			sc->sc_tbi_linkup = 1;
   7273 		} else {
   7274 			mii->mii_media_status |= IFM_NONE;
   7275 			sc->sc_tbi_linkup = 0;
   7276 			wm_tbi_serdes_set_linkled(sc);
   7277 			return;
   7278 		}
   7279 		mii->mii_media_active |= IFM_1000_SX;
   7280 		if ((reg & PCS_LSTS_FDX) != 0)
   7281 			mii->mii_media_active |= IFM_FDX;
   7282 		else
   7283 			mii->mii_media_active |= IFM_HDX;
   7284 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   7285 			/* Check flow */
   7286 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
   7287 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
   7288 				DPRINTF(WM_DEBUG_LINK,
   7289 				    ("XXX LINKOK but not ACOMP\n"));
   7290 				return;
   7291 			}
   7292 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   7293 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   7294 			DPRINTF(WM_DEBUG_LINK,
   7295 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
   7296 			if ((pcs_adv & TXCW_SYM_PAUSE)
   7297 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   7298 				mii->mii_media_active |= IFM_FLOW
   7299 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   7300 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   7301 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   7302 			    && (pcs_lpab & TXCW_SYM_PAUSE)
   7303 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   7304 				mii->mii_media_active |= IFM_FLOW
   7305 				    | IFM_ETH_TXPAUSE;
   7306 			else if ((pcs_adv & TXCW_SYM_PAUSE)
   7307 			    && (pcs_adv & TXCW_ASYM_PAUSE)
   7308 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   7309 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
   7310 				mii->mii_media_active |= IFM_FLOW
   7311 				    | IFM_ETH_RXPAUSE;
   7312 		}
   7313 		/* Update LED */
   7314 		wm_tbi_serdes_set_linkled(sc);
   7315 	} else {
   7316 		DPRINTF(WM_DEBUG_LINK,
   7317 		    ("%s: LINK: Receive sequence error\n",
   7318 		    device_xname(sc->sc_dev)));
   7319 	}
   7320 }
   7321 
   7322 /*
   7323  * wm_linkintr:
   7324  *
   7325  *	Helper; handle link interrupts.
   7326  */
   7327 static void
   7328 wm_linkintr(struct wm_softc *sc, uint32_t icr)
   7329 {
   7330 
   7331 	KASSERT(WM_CORE_LOCKED(sc));
   7332 
   7333 	if (sc->sc_flags & WM_F_HAS_MII)
   7334 		wm_linkintr_gmii(sc, icr);
   7335 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   7336 	    && (sc->sc_type >= WM_T_82575))
   7337 		wm_linkintr_serdes(sc, icr);
   7338 	else
   7339 		wm_linkintr_tbi(sc, icr);
   7340 }
   7341 
   7342 /*
   7343  * wm_intr_legacy:
   7344  *
   7345  *	Interrupt service routine for INTx and MSI.
   7346  */
   7347 static int
   7348 wm_intr_legacy(void *arg)
   7349 {
   7350 	struct wm_softc *sc = arg;
   7351 	struct wm_txqueue *txq = &sc->sc_txq[0];
   7352 	struct wm_rxqueue *rxq = &sc->sc_rxq[0];
   7353 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7354 	uint32_t icr, rndval = 0;
   7355 	int handled = 0;
   7356 
   7357 	DPRINTF(WM_DEBUG_TX,
   7358 	    ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
   7359 	while (1 /* CONSTCOND */) {
   7360 		icr = CSR_READ(sc, WMREG_ICR);
   7361 		if ((icr & sc->sc_icr) == 0)
   7362 			break;
   7363 		if (rndval == 0)
   7364 			rndval = icr;
   7365 
   7366 		WM_RX_LOCK(rxq);
   7367 
   7368 		if (sc->sc_stopping) {
   7369 			WM_RX_UNLOCK(rxq);
   7370 			break;
   7371 		}
   7372 
   7373 		handled = 1;
   7374 
   7375 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   7376 		if (icr & (ICR_RXDMT0|ICR_RXT0)) {
   7377 			DPRINTF(WM_DEBUG_RX,
   7378 			    ("%s: RX: got Rx intr 0x%08x\n",
   7379 			    device_xname(sc->sc_dev),
   7380 			    icr & (ICR_RXDMT0|ICR_RXT0)));
   7381 			WM_EVCNT_INCR(&sc->sc_ev_rxintr);
   7382 		}
   7383 #endif
   7384 		wm_rxeof(rxq);
   7385 
   7386 		WM_RX_UNLOCK(rxq);
   7387 		WM_TX_LOCK(txq);
   7388 
   7389 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
   7390 		if (icr & ICR_TXDW) {
   7391 			DPRINTF(WM_DEBUG_TX,
   7392 			    ("%s: TX: got TXDW interrupt\n",
   7393 			    device_xname(sc->sc_dev)));
   7394 			WM_EVCNT_INCR(&sc->sc_ev_txdw);
   7395 		}
   7396 #endif
   7397 		wm_txeof(sc);
   7398 
   7399 		WM_TX_UNLOCK(txq);
   7400 		WM_CORE_LOCK(sc);
   7401 
   7402 		if (icr & (ICR_LSC|ICR_RXSEQ)) {
   7403 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   7404 			wm_linkintr(sc, icr);
   7405 		}
   7406 
   7407 		WM_CORE_UNLOCK(sc);
   7408 
   7409 		if (icr & ICR_RXO) {
   7410 #if defined(WM_DEBUG)
   7411 			log(LOG_WARNING, "%s: Receive overrun\n",
   7412 			    device_xname(sc->sc_dev));
   7413 #endif /* defined(WM_DEBUG) */
   7414 		}
   7415 	}
   7416 
   7417 	rnd_add_uint32(&sc->rnd_source, rndval);
   7418 
   7419 	if (handled) {
   7420 		/* Try to get more packets going. */
   7421 		ifp->if_start(ifp);
   7422 	}
   7423 
   7424 	return handled;
   7425 }
   7426 
   7427 /*
   7428  * wm_txintr_msix:
   7429  *
   7430  *	Interrupt service routine for TX complete interrupt for MSI-X.
   7431  */
   7432 static int
   7433 wm_txintr_msix(void *arg)
   7434 {
   7435 	struct wm_txqueue *txq = arg;
   7436 	struct wm_softc *sc = txq->txq_sc;
   7437 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7438 	int handled = 0;
   7439 
   7440 	DPRINTF(WM_DEBUG_TX,
   7441 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
   7442 
   7443 	if (sc->sc_type == WM_T_82574)
   7444 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(txq->txq_id)); /* 82574 only */
   7445 	else if (sc->sc_type == WM_T_82575)
   7446 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(txq->txq_id));
   7447 	else
   7448 		CSR_WRITE(sc, WMREG_EIMC, 1 << txq->txq_intr_idx);
   7449 
   7450 	WM_TX_LOCK(txq);
   7451 
   7452 	if (sc->sc_stopping)
   7453 		goto out;
   7454 
   7455 	WM_EVCNT_INCR(&sc->sc_ev_txdw);
   7456 	handled = wm_txeof(sc);
   7457 
   7458 out:
   7459 	WM_TX_UNLOCK(txq);
   7460 
   7461 	if (sc->sc_type == WM_T_82574)
   7462 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(txq->txq_id)); /* 82574 only */
   7463 	else if (sc->sc_type == WM_T_82575)
   7464 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(txq->txq_id));
   7465 	else
   7466 		CSR_WRITE(sc, WMREG_EIMS, 1 << txq->txq_intr_idx);
   7467 
   7468 	if (handled) {
   7469 		/* Try to get more packets going. */
   7470 		ifp->if_start(ifp);
   7471 	}
   7472 
   7473 	return handled;
   7474 }
   7475 
   7476 /*
   7477  * wm_rxintr_msix:
   7478  *
   7479  *	Interrupt service routine for RX interrupt for MSI-X.
   7480  */
   7481 static int
   7482 wm_rxintr_msix(void *arg)
   7483 {
   7484 	struct wm_rxqueue *rxq = arg;
   7485 	struct wm_softc *sc = rxq->rxq_sc;
   7486 
   7487 	DPRINTF(WM_DEBUG_RX,
   7488 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
   7489 
   7490 	if (sc->sc_type == WM_T_82574)
   7491 		CSR_WRITE(sc, WMREG_IMC, ICR_RXQ(rxq->rxq_id)); /* 82574 only */
   7492 	else if (sc->sc_type == WM_T_82575)
   7493 		CSR_WRITE(sc, WMREG_EIMC, EITR_RX_QUEUE(rxq->rxq_id));
   7494 	else
   7495 		CSR_WRITE(sc, WMREG_EIMC, 1 << rxq->rxq_intr_idx);
   7496 
   7497 	WM_RX_LOCK(rxq);
   7498 
   7499 	if (sc->sc_stopping)
   7500 		goto out;
   7501 
   7502 	WM_EVCNT_INCR(&sc->sc_ev_rxintr);
   7503 	wm_rxeof(rxq);
   7504 
   7505 out:
   7506 	WM_RX_UNLOCK(rxq);
   7507 
   7508 	if (sc->sc_type == WM_T_82574)
   7509 		CSR_WRITE(sc, WMREG_IMS, ICR_RXQ(rxq->rxq_id));
   7510 	else if (sc->sc_type == WM_T_82575)
   7511 		CSR_WRITE(sc, WMREG_EIMS, EITR_RX_QUEUE(rxq->rxq_id));
   7512 	else
   7513 		CSR_WRITE(sc, WMREG_EIMS, 1 << rxq->rxq_intr_idx);
   7514 
   7515 	return 1;
   7516 }
   7517 
   7518 /*
   7519  * wm_linkintr_msix:
   7520  *
   7521  *	Interrupt service routine for link status change for MSI-X.
   7522  */
   7523 static int
   7524 wm_linkintr_msix(void *arg)
   7525 {
   7526 	struct wm_softc *sc = arg;
   7527 	uint32_t reg;
   7528 
   7529 	DPRINTF(WM_DEBUG_LINK,
   7530 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
   7531 
   7532 	reg = CSR_READ(sc, WMREG_ICR);
   7533 	WM_CORE_LOCK(sc);
   7534 	if ((sc->sc_stopping) || ((reg & ICR_LSC) == 0))
   7535 		goto out;
   7536 
   7537 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
   7538 	wm_linkintr(sc, ICR_LSC);
   7539 
   7540 out:
   7541 	WM_CORE_UNLOCK(sc);
   7542 
   7543 	if (sc->sc_type == WM_T_82574)
   7544 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC); /* 82574 only */
   7545 	else if (sc->sc_type == WM_T_82575)
   7546 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
   7547 	else
   7548 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
   7549 
   7550 	return 1;
   7551 }
   7552 
   7553 /*
   7554  * Media related.
   7555  * GMII, SGMII, TBI (and SERDES)
   7556  */
   7557 
   7558 /* Common */
   7559 
   7560 /*
   7561  * wm_tbi_serdes_set_linkled:
   7562  *
   7563  *	Update the link LED on TBI and SERDES devices.
   7564  */
   7565 static void
   7566 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
   7567 {
   7568 
   7569 	if (sc->sc_tbi_linkup)
   7570 		sc->sc_ctrl |= CTRL_SWDPIN(0);
   7571 	else
   7572 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
   7573 
   7574 	/* 82540 or newer devices are active low */
   7575 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
   7576 
   7577 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7578 }
   7579 
   7580 /* GMII related */
   7581 
   7582 /*
   7583  * wm_gmii_reset:
   7584  *
   7585  *	Reset the PHY.
   7586  */
   7587 static void
   7588 wm_gmii_reset(struct wm_softc *sc)
   7589 {
   7590 	uint32_t reg;
   7591 	int rv;
   7592 
   7593 	/* get phy semaphore */
   7594 	switch (sc->sc_type) {
   7595 	case WM_T_82571:
   7596 	case WM_T_82572:
   7597 	case WM_T_82573:
   7598 	case WM_T_82574:
   7599 	case WM_T_82583:
   7600 		 /* XXX should get sw semaphore, too */
   7601 		rv = wm_get_swsm_semaphore(sc);
   7602 		break;
   7603 	case WM_T_82575:
   7604 	case WM_T_82576:
   7605 	case WM_T_82580:
   7606 	case WM_T_I350:
   7607 	case WM_T_I354:
   7608 	case WM_T_I210:
   7609 	case WM_T_I211:
   7610 	case WM_T_80003:
   7611 		rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   7612 		break;
   7613 	case WM_T_ICH8:
   7614 	case WM_T_ICH9:
   7615 	case WM_T_ICH10:
   7616 	case WM_T_PCH:
   7617 	case WM_T_PCH2:
   7618 	case WM_T_PCH_LPT:
   7619 		rv = wm_get_swfwhw_semaphore(sc);
   7620 		break;
   7621 	default:
   7622 		/* nothing to do*/
   7623 		rv = 0;
   7624 		break;
   7625 	}
   7626 	if (rv != 0) {
   7627 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   7628 		    __func__);
   7629 		return;
   7630 	}
   7631 
   7632 	switch (sc->sc_type) {
   7633 	case WM_T_82542_2_0:
   7634 	case WM_T_82542_2_1:
   7635 		/* null */
   7636 		break;
   7637 	case WM_T_82543:
   7638 		/*
   7639 		 * With 82543, we need to force speed and duplex on the MAC
   7640 		 * equal to what the PHY speed and duplex configuration is.
   7641 		 * In addition, we need to perform a hardware reset on the PHY
   7642 		 * to take it out of reset.
   7643 		 */
   7644 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   7645 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7646 
   7647 		/* The PHY reset pin is active-low. */
   7648 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   7649 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
   7650 		    CTRL_EXT_SWDPIN(4));
   7651 		reg |= CTRL_EXT_SWDPIO(4);
   7652 
   7653 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   7654 		CSR_WRITE_FLUSH(sc);
   7655 		delay(10*1000);
   7656 
   7657 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
   7658 		CSR_WRITE_FLUSH(sc);
   7659 		delay(150);
   7660 #if 0
   7661 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
   7662 #endif
   7663 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
   7664 		break;
   7665 	case WM_T_82544:	/* reset 10000us */
   7666 	case WM_T_82540:
   7667 	case WM_T_82545:
   7668 	case WM_T_82545_3:
   7669 	case WM_T_82546:
   7670 	case WM_T_82546_3:
   7671 	case WM_T_82541:
   7672 	case WM_T_82541_2:
   7673 	case WM_T_82547:
   7674 	case WM_T_82547_2:
   7675 	case WM_T_82571:	/* reset 100us */
   7676 	case WM_T_82572:
   7677 	case WM_T_82573:
   7678 	case WM_T_82574:
   7679 	case WM_T_82575:
   7680 	case WM_T_82576:
   7681 	case WM_T_82580:
   7682 	case WM_T_I350:
   7683 	case WM_T_I354:
   7684 	case WM_T_I210:
   7685 	case WM_T_I211:
   7686 	case WM_T_82583:
   7687 	case WM_T_80003:
   7688 		/* generic reset */
   7689 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   7690 		CSR_WRITE_FLUSH(sc);
   7691 		delay(20000);
   7692 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7693 		CSR_WRITE_FLUSH(sc);
   7694 		delay(20000);
   7695 
   7696 		if ((sc->sc_type == WM_T_82541)
   7697 		    || (sc->sc_type == WM_T_82541_2)
   7698 		    || (sc->sc_type == WM_T_82547)
   7699 		    || (sc->sc_type == WM_T_82547_2)) {
   7700 			/* workaround for igp are done in igp_reset() */
   7701 			/* XXX add code to set LED after phy reset */
   7702 		}
   7703 		break;
   7704 	case WM_T_ICH8:
   7705 	case WM_T_ICH9:
   7706 	case WM_T_ICH10:
   7707 	case WM_T_PCH:
   7708 	case WM_T_PCH2:
   7709 	case WM_T_PCH_LPT:
   7710 		/* generic reset */
   7711 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   7712 		CSR_WRITE_FLUSH(sc);
   7713 		delay(100);
   7714 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7715 		CSR_WRITE_FLUSH(sc);
   7716 		delay(150);
   7717 		break;
   7718 	default:
   7719 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
   7720 		    __func__);
   7721 		break;
   7722 	}
   7723 
   7724 	/* release PHY semaphore */
   7725 	switch (sc->sc_type) {
   7726 	case WM_T_82571:
   7727 	case WM_T_82572:
   7728 	case WM_T_82573:
   7729 	case WM_T_82574:
   7730 	case WM_T_82583:
   7731 		 /* XXX should put sw semaphore, too */
   7732 		wm_put_swsm_semaphore(sc);
   7733 		break;
   7734 	case WM_T_82575:
   7735 	case WM_T_82576:
   7736 	case WM_T_82580:
   7737 	case WM_T_I350:
   7738 	case WM_T_I354:
   7739 	case WM_T_I210:
   7740 	case WM_T_I211:
   7741 	case WM_T_80003:
   7742 		wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   7743 		break;
   7744 	case WM_T_ICH8:
   7745 	case WM_T_ICH9:
   7746 	case WM_T_ICH10:
   7747 	case WM_T_PCH:
   7748 	case WM_T_PCH2:
   7749 	case WM_T_PCH_LPT:
   7750 		wm_put_swfwhw_semaphore(sc);
   7751 		break;
   7752 	default:
   7753 		/* nothing to do*/
   7754 		rv = 0;
   7755 		break;
   7756 	}
   7757 
   7758 	/* get_cfg_done */
   7759 	wm_get_cfg_done(sc);
   7760 
   7761 	/* extra setup */
   7762 	switch (sc->sc_type) {
   7763 	case WM_T_82542_2_0:
   7764 	case WM_T_82542_2_1:
   7765 	case WM_T_82543:
   7766 	case WM_T_82544:
   7767 	case WM_T_82540:
   7768 	case WM_T_82545:
   7769 	case WM_T_82545_3:
   7770 	case WM_T_82546:
   7771 	case WM_T_82546_3:
   7772 	case WM_T_82541_2:
   7773 	case WM_T_82547_2:
   7774 	case WM_T_82571:
   7775 	case WM_T_82572:
   7776 	case WM_T_82573:
   7777 	case WM_T_82575:
   7778 	case WM_T_82576:
   7779 	case WM_T_82580:
   7780 	case WM_T_I350:
   7781 	case WM_T_I354:
   7782 	case WM_T_I210:
   7783 	case WM_T_I211:
   7784 	case WM_T_80003:
   7785 		/* null */
   7786 		break;
   7787 	case WM_T_82574:
   7788 	case WM_T_82583:
   7789 		wm_lplu_d0_disable(sc);
   7790 		break;
   7791 	case WM_T_82541:
   7792 	case WM_T_82547:
   7793 		/* XXX Configure actively LED after PHY reset */
   7794 		break;
   7795 	case WM_T_ICH8:
   7796 	case WM_T_ICH9:
   7797 	case WM_T_ICH10:
   7798 	case WM_T_PCH:
   7799 	case WM_T_PCH2:
   7800 	case WM_T_PCH_LPT:
   7801 		/* Allow time for h/w to get to a quiescent state afer reset */
   7802 		delay(10*1000);
   7803 
   7804 		if (sc->sc_type == WM_T_PCH)
   7805 			wm_hv_phy_workaround_ich8lan(sc);
   7806 
   7807 		if (sc->sc_type == WM_T_PCH2)
   7808 			wm_lv_phy_workaround_ich8lan(sc);
   7809 
   7810 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
   7811 			/*
   7812 			 * dummy read to clear the phy wakeup bit after lcd
   7813 			 * reset
   7814 			 */
   7815 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
   7816 		}
   7817 
   7818 		/*
   7819 		 * XXX Configure the LCD with th extended configuration region
   7820 		 * in NVM
   7821 		 */
   7822 
   7823 		/* Disable D0 LPLU. */
   7824 		if (sc->sc_type >= WM_T_PCH)	/* PCH* */
   7825 			wm_lplu_d0_disable_pch(sc);
   7826 		else
   7827 			wm_lplu_d0_disable(sc);	/* ICH* */
   7828 		break;
   7829 	default:
   7830 		panic("%s: unknown type\n", __func__);
   7831 		break;
   7832 	}
   7833 }
   7834 
   7835 /*
   7836  * wm_get_phy_id_82575:
   7837  *
   7838  * Return PHY ID. Return -1 if it failed.
   7839  */
   7840 static int
   7841 wm_get_phy_id_82575(struct wm_softc *sc)
   7842 {
   7843 	uint32_t reg;
   7844 	int phyid = -1;
   7845 
   7846 	/* XXX */
   7847 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   7848 		return -1;
   7849 
   7850 	if (wm_sgmii_uses_mdio(sc)) {
   7851 		switch (sc->sc_type) {
   7852 		case WM_T_82575:
   7853 		case WM_T_82576:
   7854 			reg = CSR_READ(sc, WMREG_MDIC);
   7855 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
   7856 			break;
   7857 		case WM_T_82580:
   7858 		case WM_T_I350:
   7859 		case WM_T_I354:
   7860 		case WM_T_I210:
   7861 		case WM_T_I211:
   7862 			reg = CSR_READ(sc, WMREG_MDICNFG);
   7863 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
   7864 			break;
   7865 		default:
   7866 			return -1;
   7867 		}
   7868 	}
   7869 
   7870 	return phyid;
   7871 }
   7872 
   7873 
   7874 /*
   7875  * wm_gmii_mediainit:
   7876  *
   7877  *	Initialize media for use on 1000BASE-T devices.
   7878  */
   7879 static void
   7880 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
   7881 {
   7882 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   7883 	struct mii_data *mii = &sc->sc_mii;
   7884 	uint32_t reg;
   7885 
   7886 	/* We have GMII. */
   7887 	sc->sc_flags |= WM_F_HAS_MII;
   7888 
   7889 	if (sc->sc_type == WM_T_80003)
   7890 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   7891 	else
   7892 		sc->sc_tipg = TIPG_1000T_DFLT;
   7893 
   7894 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
   7895 	if ((sc->sc_type == WM_T_82580)
   7896 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
   7897 	    || (sc->sc_type == WM_T_I211)) {
   7898 		reg = CSR_READ(sc, WMREG_PHPM);
   7899 		reg &= ~PHPM_GO_LINK_D;
   7900 		CSR_WRITE(sc, WMREG_PHPM, reg);
   7901 	}
   7902 
   7903 	/*
   7904 	 * Let the chip set speed/duplex on its own based on
   7905 	 * signals from the PHY.
   7906 	 * XXXbouyer - I'm not sure this is right for the 80003,
   7907 	 * the em driver only sets CTRL_SLU here - but it seems to work.
   7908 	 */
   7909 	sc->sc_ctrl |= CTRL_SLU;
   7910 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   7911 
   7912 	/* Initialize our media structures and probe the GMII. */
   7913 	mii->mii_ifp = ifp;
   7914 
   7915 	/*
   7916 	 * Determine the PHY access method.
   7917 	 *
   7918 	 *  For SGMII, use SGMII specific method.
   7919 	 *
   7920 	 *  For some devices, we can determine the PHY access method
   7921 	 * from sc_type.
   7922 	 *
   7923 	 *  For ICH and PCH variants, it's difficult to determine the PHY
   7924 	 * access  method by sc_type, so use the PCI product ID for some
   7925 	 * devices.
   7926 	 * For other ICH8 variants, try to use igp's method. If the PHY
   7927 	 * can't detect, then use bm's method.
   7928 	 */
   7929 	switch (prodid) {
   7930 	case PCI_PRODUCT_INTEL_PCH_M_LM:
   7931 	case PCI_PRODUCT_INTEL_PCH_M_LC:
   7932 		/* 82577 */
   7933 		sc->sc_phytype = WMPHY_82577;
   7934 		break;
   7935 	case PCI_PRODUCT_INTEL_PCH_D_DM:
   7936 	case PCI_PRODUCT_INTEL_PCH_D_DC:
   7937 		/* 82578 */
   7938 		sc->sc_phytype = WMPHY_82578;
   7939 		break;
   7940 	case PCI_PRODUCT_INTEL_PCH2_LV_LM:
   7941 	case PCI_PRODUCT_INTEL_PCH2_LV_V:
   7942 		/* 82579 */
   7943 		sc->sc_phytype = WMPHY_82579;
   7944 		break;
   7945 	case PCI_PRODUCT_INTEL_82801I_BM:
   7946 	case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
   7947 	case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
   7948 	case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
   7949 	case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
   7950 	case PCI_PRODUCT_INTEL_82801J_R_BM_V:
   7951 		/* 82567 */
   7952 		sc->sc_phytype = WMPHY_BM;
   7953 		mii->mii_readreg = wm_gmii_bm_readreg;
   7954 		mii->mii_writereg = wm_gmii_bm_writereg;
   7955 		break;
   7956 	default:
   7957 		if (((sc->sc_flags & WM_F_SGMII) != 0)
   7958 		    && !wm_sgmii_uses_mdio(sc)){
   7959 			/* SGMII */
   7960 			mii->mii_readreg = wm_sgmii_readreg;
   7961 			mii->mii_writereg = wm_sgmii_writereg;
   7962 		} else if (sc->sc_type >= WM_T_80003) {
   7963 			/* 80003 */
   7964 			mii->mii_readreg = wm_gmii_i80003_readreg;
   7965 			mii->mii_writereg = wm_gmii_i80003_writereg;
   7966 		} else if (sc->sc_type >= WM_T_I210) {
   7967 			/* I210 and I211 */
   7968 			mii->mii_readreg = wm_gmii_gs40g_readreg;
   7969 			mii->mii_writereg = wm_gmii_gs40g_writereg;
   7970 		} else if (sc->sc_type >= WM_T_82580) {
   7971 			/* 82580, I350 and I354 */
   7972 			sc->sc_phytype = WMPHY_82580;
   7973 			mii->mii_readreg = wm_gmii_82580_readreg;
   7974 			mii->mii_writereg = wm_gmii_82580_writereg;
   7975 		} else if (sc->sc_type >= WM_T_82544) {
   7976 			/* 82544, 0, [56], [17], 8257[1234] and 82583 */
   7977 			mii->mii_readreg = wm_gmii_i82544_readreg;
   7978 			mii->mii_writereg = wm_gmii_i82544_writereg;
   7979 		} else {
   7980 			mii->mii_readreg = wm_gmii_i82543_readreg;
   7981 			mii->mii_writereg = wm_gmii_i82543_writereg;
   7982 		}
   7983 		break;
   7984 	}
   7985 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_LPT)) {
   7986 		/* All PCH* use _hv_ */
   7987 		mii->mii_readreg = wm_gmii_hv_readreg;
   7988 		mii->mii_writereg = wm_gmii_hv_writereg;
   7989 	}
   7990 	mii->mii_statchg = wm_gmii_statchg;
   7991 
   7992 	wm_gmii_reset(sc);
   7993 
   7994 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   7995 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
   7996 	    wm_gmii_mediastatus);
   7997 
   7998 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
   7999 	    || (sc->sc_type == WM_T_82580)
   8000 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
   8001 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
   8002 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
   8003 			/* Attach only one port */
   8004 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
   8005 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8006 		} else {
   8007 			int i, id;
   8008 			uint32_t ctrl_ext;
   8009 
   8010 			id = wm_get_phy_id_82575(sc);
   8011 			if (id != -1) {
   8012 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
   8013 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
   8014 			}
   8015 			if ((id == -1)
   8016 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
   8017 				/* Power on sgmii phy if it is disabled */
   8018 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   8019 				CSR_WRITE(sc, WMREG_CTRL_EXT,
   8020 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
   8021 				CSR_WRITE_FLUSH(sc);
   8022 				delay(300*1000); /* XXX too long */
   8023 
   8024 				/* from 1 to 8 */
   8025 				for (i = 1; i < 8; i++)
   8026 					mii_attach(sc->sc_dev, &sc->sc_mii,
   8027 					    0xffffffff, i, MII_OFFSET_ANY,
   8028 					    MIIF_DOPAUSE);
   8029 
   8030 				/* restore previous sfp cage power state */
   8031 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   8032 			}
   8033 		}
   8034 	} else {
   8035 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8036 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8037 	}
   8038 
   8039 	/*
   8040 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
   8041 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
   8042 	 */
   8043 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
   8044 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
   8045 		wm_set_mdio_slow_mode_hv(sc);
   8046 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8047 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8048 	}
   8049 
   8050 	/*
   8051 	 * (For ICH8 variants)
   8052 	 * If PHY detection failed, use BM's r/w function and retry.
   8053 	 */
   8054 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   8055 		/* if failed, retry with *_bm_* */
   8056 		mii->mii_readreg = wm_gmii_bm_readreg;
   8057 		mii->mii_writereg = wm_gmii_bm_writereg;
   8058 
   8059 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
   8060 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
   8061 	}
   8062 
   8063 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
   8064 		/* Any PHY wasn't find */
   8065 		ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
   8066 		ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
   8067 		sc->sc_phytype = WMPHY_NONE;
   8068 	} else {
   8069 		/*
   8070 		 * PHY Found!
   8071 		 * Check PHY type.
   8072 		 */
   8073 		uint32_t model;
   8074 		struct mii_softc *child;
   8075 
   8076 		child = LIST_FIRST(&mii->mii_phys);
   8077 		model = child->mii_mpd_model;
   8078 		if (model == MII_MODEL_yyINTEL_I82566)
   8079 			sc->sc_phytype = WMPHY_IGP_3;
   8080 
   8081 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
   8082 	}
   8083 }
   8084 
   8085 /*
   8086  * wm_gmii_mediachange:	[ifmedia interface function]
   8087  *
   8088  *	Set hardware to newly-selected media on a 1000BASE-T device.
   8089  */
   8090 static int
   8091 wm_gmii_mediachange(struct ifnet *ifp)
   8092 {
   8093 	struct wm_softc *sc = ifp->if_softc;
   8094 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   8095 	int rc;
   8096 
   8097 	if ((ifp->if_flags & IFF_UP) == 0)
   8098 		return 0;
   8099 
   8100 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
   8101 	sc->sc_ctrl |= CTRL_SLU;
   8102 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   8103 	    || (sc->sc_type > WM_T_82543)) {
   8104 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
   8105 	} else {
   8106 		sc->sc_ctrl &= ~CTRL_ASDE;
   8107 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
   8108 		if (ife->ifm_media & IFM_FDX)
   8109 			sc->sc_ctrl |= CTRL_FD;
   8110 		switch (IFM_SUBTYPE(ife->ifm_media)) {
   8111 		case IFM_10_T:
   8112 			sc->sc_ctrl |= CTRL_SPEED_10;
   8113 			break;
   8114 		case IFM_100_TX:
   8115 			sc->sc_ctrl |= CTRL_SPEED_100;
   8116 			break;
   8117 		case IFM_1000_T:
   8118 			sc->sc_ctrl |= CTRL_SPEED_1000;
   8119 			break;
   8120 		default:
   8121 			panic("wm_gmii_mediachange: bad media 0x%x",
   8122 			    ife->ifm_media);
   8123 		}
   8124 	}
   8125 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8126 	if (sc->sc_type <= WM_T_82543)
   8127 		wm_gmii_reset(sc);
   8128 
   8129 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
   8130 		return 0;
   8131 	return rc;
   8132 }
   8133 
   8134 /*
   8135  * wm_gmii_mediastatus:	[ifmedia interface function]
   8136  *
   8137  *	Get the current interface media status on a 1000BASE-T device.
   8138  */
   8139 static void
   8140 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   8141 {
   8142 	struct wm_softc *sc = ifp->if_softc;
   8143 
   8144 	ether_mediastatus(ifp, ifmr);
   8145 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   8146 	    | sc->sc_flowflags;
   8147 }
   8148 
   8149 #define	MDI_IO		CTRL_SWDPIN(2)
   8150 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
   8151 #define	MDI_CLK		CTRL_SWDPIN(3)
   8152 
   8153 static void
   8154 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
   8155 {
   8156 	uint32_t i, v;
   8157 
   8158 	v = CSR_READ(sc, WMREG_CTRL);
   8159 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   8160 	v |= MDI_DIR | CTRL_SWDPIO(3);
   8161 
   8162 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
   8163 		if (data & i)
   8164 			v |= MDI_IO;
   8165 		else
   8166 			v &= ~MDI_IO;
   8167 		CSR_WRITE(sc, WMREG_CTRL, v);
   8168 		CSR_WRITE_FLUSH(sc);
   8169 		delay(10);
   8170 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8171 		CSR_WRITE_FLUSH(sc);
   8172 		delay(10);
   8173 		CSR_WRITE(sc, WMREG_CTRL, v);
   8174 		CSR_WRITE_FLUSH(sc);
   8175 		delay(10);
   8176 	}
   8177 }
   8178 
   8179 static uint32_t
   8180 wm_i82543_mii_recvbits(struct wm_softc *sc)
   8181 {
   8182 	uint32_t v, i, data = 0;
   8183 
   8184 	v = CSR_READ(sc, WMREG_CTRL);
   8185 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
   8186 	v |= CTRL_SWDPIO(3);
   8187 
   8188 	CSR_WRITE(sc, WMREG_CTRL, v);
   8189 	CSR_WRITE_FLUSH(sc);
   8190 	delay(10);
   8191 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8192 	CSR_WRITE_FLUSH(sc);
   8193 	delay(10);
   8194 	CSR_WRITE(sc, WMREG_CTRL, v);
   8195 	CSR_WRITE_FLUSH(sc);
   8196 	delay(10);
   8197 
   8198 	for (i = 0; i < 16; i++) {
   8199 		data <<= 1;
   8200 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8201 		CSR_WRITE_FLUSH(sc);
   8202 		delay(10);
   8203 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
   8204 			data |= 1;
   8205 		CSR_WRITE(sc, WMREG_CTRL, v);
   8206 		CSR_WRITE_FLUSH(sc);
   8207 		delay(10);
   8208 	}
   8209 
   8210 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
   8211 	CSR_WRITE_FLUSH(sc);
   8212 	delay(10);
   8213 	CSR_WRITE(sc, WMREG_CTRL, v);
   8214 	CSR_WRITE_FLUSH(sc);
   8215 	delay(10);
   8216 
   8217 	return data;
   8218 }
   8219 
   8220 #undef MDI_IO
   8221 #undef MDI_DIR
   8222 #undef MDI_CLK
   8223 
   8224 /*
   8225  * wm_gmii_i82543_readreg:	[mii interface function]
   8226  *
   8227  *	Read a PHY register on the GMII (i82543 version).
   8228  */
   8229 static int
   8230 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
   8231 {
   8232 	struct wm_softc *sc = device_private(self);
   8233 	int rv;
   8234 
   8235 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   8236 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
   8237 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
   8238 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
   8239 
   8240 	DPRINTF(WM_DEBUG_GMII,
   8241 	    ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
   8242 	    device_xname(sc->sc_dev), phy, reg, rv));
   8243 
   8244 	return rv;
   8245 }
   8246 
   8247 /*
   8248  * wm_gmii_i82543_writereg:	[mii interface function]
   8249  *
   8250  *	Write a PHY register on the GMII (i82543 version).
   8251  */
   8252 static void
   8253 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
   8254 {
   8255 	struct wm_softc *sc = device_private(self);
   8256 
   8257 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
   8258 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
   8259 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
   8260 	    (MII_COMMAND_START << 30), 32);
   8261 }
   8262 
   8263 /*
   8264  * wm_gmii_i82544_readreg:	[mii interface function]
   8265  *
   8266  *	Read a PHY register on the GMII.
   8267  */
   8268 static int
   8269 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
   8270 {
   8271 	struct wm_softc *sc = device_private(self);
   8272 	uint32_t mdic = 0;
   8273 	int i, rv;
   8274 
   8275 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
   8276 	    MDIC_REGADD(reg));
   8277 
   8278 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   8279 		mdic = CSR_READ(sc, WMREG_MDIC);
   8280 		if (mdic & MDIC_READY)
   8281 			break;
   8282 		delay(50);
   8283 	}
   8284 
   8285 	if ((mdic & MDIC_READY) == 0) {
   8286 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
   8287 		    device_xname(sc->sc_dev), phy, reg);
   8288 		rv = 0;
   8289 	} else if (mdic & MDIC_E) {
   8290 #if 0 /* This is normal if no PHY is present. */
   8291 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
   8292 		    device_xname(sc->sc_dev), phy, reg);
   8293 #endif
   8294 		rv = 0;
   8295 	} else {
   8296 		rv = MDIC_DATA(mdic);
   8297 		if (rv == 0xffff)
   8298 			rv = 0;
   8299 	}
   8300 
   8301 	return rv;
   8302 }
   8303 
   8304 /*
   8305  * wm_gmii_i82544_writereg:	[mii interface function]
   8306  *
   8307  *	Write a PHY register on the GMII.
   8308  */
   8309 static void
   8310 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
   8311 {
   8312 	struct wm_softc *sc = device_private(self);
   8313 	uint32_t mdic = 0;
   8314 	int i;
   8315 
   8316 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
   8317 	    MDIC_REGADD(reg) | MDIC_DATA(val));
   8318 
   8319 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
   8320 		mdic = CSR_READ(sc, WMREG_MDIC);
   8321 		if (mdic & MDIC_READY)
   8322 			break;
   8323 		delay(50);
   8324 	}
   8325 
   8326 	if ((mdic & MDIC_READY) == 0)
   8327 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
   8328 		    device_xname(sc->sc_dev), phy, reg);
   8329 	else if (mdic & MDIC_E)
   8330 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
   8331 		    device_xname(sc->sc_dev), phy, reg);
   8332 }
   8333 
   8334 /*
   8335  * wm_gmii_i80003_readreg:	[mii interface function]
   8336  *
   8337  *	Read a PHY register on the kumeran
   8338  * This could be handled by the PHY layer if we didn't have to lock the
   8339  * ressource ...
   8340  */
   8341 static int
   8342 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
   8343 {
   8344 	struct wm_softc *sc = device_private(self);
   8345 	int sem;
   8346 	int rv;
   8347 
   8348 	if (phy != 1) /* only one PHY on kumeran bus */
   8349 		return 0;
   8350 
   8351 	sem = swfwphysem[sc->sc_funcid];
   8352 	if (wm_get_swfw_semaphore(sc, sem)) {
   8353 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8354 		    __func__);
   8355 		return 0;
   8356 	}
   8357 
   8358 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
   8359 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   8360 		    reg >> GG82563_PAGE_SHIFT);
   8361 	} else {
   8362 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   8363 		    reg >> GG82563_PAGE_SHIFT);
   8364 	}
   8365 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   8366 	delay(200);
   8367 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
   8368 	delay(200);
   8369 
   8370 	wm_put_swfw_semaphore(sc, sem);
   8371 	return rv;
   8372 }
   8373 
   8374 /*
   8375  * wm_gmii_i80003_writereg:	[mii interface function]
   8376  *
   8377  *	Write a PHY register on the kumeran.
   8378  * This could be handled by the PHY layer if we didn't have to lock the
   8379  * ressource ...
   8380  */
   8381 static void
   8382 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
   8383 {
   8384 	struct wm_softc *sc = device_private(self);
   8385 	int sem;
   8386 
   8387 	if (phy != 1) /* only one PHY on kumeran bus */
   8388 		return;
   8389 
   8390 	sem = swfwphysem[sc->sc_funcid];
   8391 	if (wm_get_swfw_semaphore(sc, sem)) {
   8392 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8393 		    __func__);
   8394 		return;
   8395 	}
   8396 
   8397 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
   8398 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
   8399 		    reg >> GG82563_PAGE_SHIFT);
   8400 	} else {
   8401 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
   8402 		    reg >> GG82563_PAGE_SHIFT);
   8403 	}
   8404 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
   8405 	delay(200);
   8406 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
   8407 	delay(200);
   8408 
   8409 	wm_put_swfw_semaphore(sc, sem);
   8410 }
   8411 
   8412 /*
   8413  * wm_gmii_bm_readreg:	[mii interface function]
   8414  *
   8415  *	Read a PHY register on the kumeran
   8416  * This could be handled by the PHY layer if we didn't have to lock the
   8417  * ressource ...
   8418  */
   8419 static int
   8420 wm_gmii_bm_readreg(device_t self, int phy, int reg)
   8421 {
   8422 	struct wm_softc *sc = device_private(self);
   8423 	int sem;
   8424 	int rv;
   8425 
   8426 	sem = swfwphysem[sc->sc_funcid];
   8427 	if (wm_get_swfw_semaphore(sc, sem)) {
   8428 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8429 		    __func__);
   8430 		return 0;
   8431 	}
   8432 
   8433 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   8434 		if (phy == 1)
   8435 			wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
   8436 			    reg);
   8437 		else
   8438 			wm_gmii_i82544_writereg(self, phy,
   8439 			    GG82563_PHY_PAGE_SELECT,
   8440 			    reg >> GG82563_PAGE_SHIFT);
   8441 	}
   8442 
   8443 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
   8444 	wm_put_swfw_semaphore(sc, sem);
   8445 	return rv;
   8446 }
   8447 
   8448 /*
   8449  * wm_gmii_bm_writereg:	[mii interface function]
   8450  *
   8451  *	Write a PHY register on the kumeran.
   8452  * This could be handled by the PHY layer if we didn't have to lock the
   8453  * ressource ...
   8454  */
   8455 static void
   8456 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
   8457 {
   8458 	struct wm_softc *sc = device_private(self);
   8459 	int sem;
   8460 
   8461 	sem = swfwphysem[sc->sc_funcid];
   8462 	if (wm_get_swfw_semaphore(sc, sem)) {
   8463 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8464 		    __func__);
   8465 		return;
   8466 	}
   8467 
   8468 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
   8469 		if (phy == 1)
   8470 			wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
   8471 			    reg);
   8472 		else
   8473 			wm_gmii_i82544_writereg(self, phy,
   8474 			    GG82563_PHY_PAGE_SELECT,
   8475 			    reg >> GG82563_PAGE_SHIFT);
   8476 	}
   8477 
   8478 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
   8479 	wm_put_swfw_semaphore(sc, sem);
   8480 }
   8481 
   8482 static void
   8483 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
   8484 {
   8485 	struct wm_softc *sc = device_private(self);
   8486 	uint16_t regnum = BM_PHY_REG_NUM(offset);
   8487 	uint16_t wuce;
   8488 
   8489 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
   8490 	if (sc->sc_type == WM_T_PCH) {
   8491 		/* XXX e1000 driver do nothing... why? */
   8492 	}
   8493 
   8494 	/* Set page 769 */
   8495 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8496 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   8497 
   8498 	wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
   8499 
   8500 	wuce &= ~BM_WUC_HOST_WU_BIT;
   8501 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
   8502 	    wuce | BM_WUC_ENABLE_BIT);
   8503 
   8504 	/* Select page 800 */
   8505 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8506 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
   8507 
   8508 	/* Write page 800 */
   8509 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
   8510 
   8511 	if (rd)
   8512 		*val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
   8513 	else
   8514 		wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
   8515 
   8516 	/* Set page 769 */
   8517 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8518 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
   8519 
   8520 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
   8521 }
   8522 
   8523 /*
   8524  * wm_gmii_hv_readreg:	[mii interface function]
   8525  *
   8526  *	Read a PHY register on the kumeran
   8527  * This could be handled by the PHY layer if we didn't have to lock the
   8528  * ressource ...
   8529  */
   8530 static int
   8531 wm_gmii_hv_readreg(device_t self, int phy, int reg)
   8532 {
   8533 	struct wm_softc *sc = device_private(self);
   8534 	uint16_t page = BM_PHY_REG_PAGE(reg);
   8535 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   8536 	uint16_t val;
   8537 	int rv;
   8538 
   8539 	if (wm_get_swfwhw_semaphore(sc)) {
   8540 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8541 		    __func__);
   8542 		return 0;
   8543 	}
   8544 
   8545 	/* XXX Workaround failure in MDIO access while cable is disconnected */
   8546 	if (sc->sc_phytype == WMPHY_82577) {
   8547 		/* XXX must write */
   8548 	}
   8549 
   8550 	/* Page 800 works differently than the rest so it has its own func */
   8551 	if (page == BM_WUC_PAGE) {
   8552 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
   8553 		return val;
   8554 	}
   8555 
   8556 	/*
   8557 	 * Lower than page 768 works differently than the rest so it has its
   8558 	 * own func
   8559 	 */
   8560 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   8561 		printf("gmii_hv_readreg!!!\n");
   8562 		return 0;
   8563 	}
   8564 
   8565 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   8566 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8567 		    page << BME1000_PAGE_SHIFT);
   8568 	}
   8569 
   8570 	rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
   8571 	wm_put_swfwhw_semaphore(sc);
   8572 	return rv;
   8573 }
   8574 
   8575 /*
   8576  * wm_gmii_hv_writereg:	[mii interface function]
   8577  *
   8578  *	Write a PHY register on the kumeran.
   8579  * This could be handled by the PHY layer if we didn't have to lock the
   8580  * ressource ...
   8581  */
   8582 static void
   8583 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
   8584 {
   8585 	struct wm_softc *sc = device_private(self);
   8586 	uint16_t page = BM_PHY_REG_PAGE(reg);
   8587 	uint16_t regnum = BM_PHY_REG_NUM(reg);
   8588 
   8589 	if (wm_get_swfwhw_semaphore(sc)) {
   8590 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8591 		    __func__);
   8592 		return;
   8593 	}
   8594 
   8595 	/* XXX Workaround failure in MDIO access while cable is disconnected */
   8596 
   8597 	/* Page 800 works differently than the rest so it has its own func */
   8598 	if (page == BM_WUC_PAGE) {
   8599 		uint16_t tmp;
   8600 
   8601 		tmp = val;
   8602 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
   8603 		return;
   8604 	}
   8605 
   8606 	/*
   8607 	 * Lower than page 768 works differently than the rest so it has its
   8608 	 * own func
   8609 	 */
   8610 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
   8611 		printf("gmii_hv_writereg!!!\n");
   8612 		return;
   8613 	}
   8614 
   8615 	/*
   8616 	 * XXX Workaround MDIO accesses being disabled after entering IEEE
   8617 	 * Power Down (whenever bit 11 of the PHY control register is set)
   8618 	 */
   8619 
   8620 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
   8621 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
   8622 		    page << BME1000_PAGE_SHIFT);
   8623 	}
   8624 
   8625 	wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
   8626 	wm_put_swfwhw_semaphore(sc);
   8627 }
   8628 
   8629 /*
   8630  * wm_gmii_82580_readreg:	[mii interface function]
   8631  *
   8632  *	Read a PHY register on the 82580 and I350.
   8633  * This could be handled by the PHY layer if we didn't have to lock the
   8634  * ressource ...
   8635  */
   8636 static int
   8637 wm_gmii_82580_readreg(device_t self, int phy, int reg)
   8638 {
   8639 	struct wm_softc *sc = device_private(self);
   8640 	int sem;
   8641 	int rv;
   8642 
   8643 	sem = swfwphysem[sc->sc_funcid];
   8644 	if (wm_get_swfw_semaphore(sc, sem)) {
   8645 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8646 		    __func__);
   8647 		return 0;
   8648 	}
   8649 
   8650 	rv = wm_gmii_i82544_readreg(self, phy, reg);
   8651 
   8652 	wm_put_swfw_semaphore(sc, sem);
   8653 	return rv;
   8654 }
   8655 
   8656 /*
   8657  * wm_gmii_82580_writereg:	[mii interface function]
   8658  *
   8659  *	Write a PHY register on the 82580 and I350.
   8660  * This could be handled by the PHY layer if we didn't have to lock the
   8661  * ressource ...
   8662  */
   8663 static void
   8664 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
   8665 {
   8666 	struct wm_softc *sc = device_private(self);
   8667 	int sem;
   8668 
   8669 	sem = swfwphysem[sc->sc_funcid];
   8670 	if (wm_get_swfw_semaphore(sc, sem)) {
   8671 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8672 		    __func__);
   8673 		return;
   8674 	}
   8675 
   8676 	wm_gmii_i82544_writereg(self, phy, reg, val);
   8677 
   8678 	wm_put_swfw_semaphore(sc, sem);
   8679 }
   8680 
   8681 /*
   8682  * wm_gmii_gs40g_readreg:	[mii interface function]
   8683  *
   8684  *	Read a PHY register on the I2100 and I211.
   8685  * This could be handled by the PHY layer if we didn't have to lock the
   8686  * ressource ...
   8687  */
   8688 static int
   8689 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
   8690 {
   8691 	struct wm_softc *sc = device_private(self);
   8692 	int sem;
   8693 	int page, offset;
   8694 	int rv;
   8695 
   8696 	/* Acquire semaphore */
   8697 	sem = swfwphysem[sc->sc_funcid];
   8698 	if (wm_get_swfw_semaphore(sc, sem)) {
   8699 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8700 		    __func__);
   8701 		return 0;
   8702 	}
   8703 
   8704 	/* Page select */
   8705 	page = reg >> GS40G_PAGE_SHIFT;
   8706 	wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
   8707 
   8708 	/* Read reg */
   8709 	offset = reg & GS40G_OFFSET_MASK;
   8710 	rv = wm_gmii_i82544_readreg(self, phy, offset);
   8711 
   8712 	wm_put_swfw_semaphore(sc, sem);
   8713 	return rv;
   8714 }
   8715 
   8716 /*
   8717  * wm_gmii_gs40g_writereg:	[mii interface function]
   8718  *
   8719  *	Write a PHY register on the I210 and I211.
   8720  * This could be handled by the PHY layer if we didn't have to lock the
   8721  * ressource ...
   8722  */
   8723 static void
   8724 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
   8725 {
   8726 	struct wm_softc *sc = device_private(self);
   8727 	int sem;
   8728 	int page, offset;
   8729 
   8730 	/* Acquire semaphore */
   8731 	sem = swfwphysem[sc->sc_funcid];
   8732 	if (wm_get_swfw_semaphore(sc, sem)) {
   8733 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8734 		    __func__);
   8735 		return;
   8736 	}
   8737 
   8738 	/* Page select */
   8739 	page = reg >> GS40G_PAGE_SHIFT;
   8740 	wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
   8741 
   8742 	/* Write reg */
   8743 	offset = reg & GS40G_OFFSET_MASK;
   8744 	wm_gmii_i82544_writereg(self, phy, offset, val);
   8745 
   8746 	/* Release semaphore */
   8747 	wm_put_swfw_semaphore(sc, sem);
   8748 }
   8749 
   8750 /*
   8751  * wm_gmii_statchg:	[mii interface function]
   8752  *
   8753  *	Callback from MII layer when media changes.
   8754  */
   8755 static void
   8756 wm_gmii_statchg(struct ifnet *ifp)
   8757 {
   8758 	struct wm_softc *sc = ifp->if_softc;
   8759 	struct mii_data *mii = &sc->sc_mii;
   8760 
   8761 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
   8762 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   8763 	sc->sc_fcrtl &= ~FCRTL_XONE;
   8764 
   8765 	/*
   8766 	 * Get flow control negotiation result.
   8767 	 */
   8768 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
   8769 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
   8770 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
   8771 		mii->mii_media_active &= ~IFM_ETH_FMASK;
   8772 	}
   8773 
   8774 	if (sc->sc_flowflags & IFM_FLOW) {
   8775 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
   8776 			sc->sc_ctrl |= CTRL_TFCE;
   8777 			sc->sc_fcrtl |= FCRTL_XONE;
   8778 		}
   8779 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
   8780 			sc->sc_ctrl |= CTRL_RFCE;
   8781 	}
   8782 
   8783 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
   8784 		DPRINTF(WM_DEBUG_LINK,
   8785 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
   8786 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   8787 	} else {
   8788 		DPRINTF(WM_DEBUG_LINK,
   8789 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
   8790 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   8791 	}
   8792 
   8793 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   8794 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   8795 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
   8796 						 : WMREG_FCRTL, sc->sc_fcrtl);
   8797 	if (sc->sc_type == WM_T_80003) {
   8798 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
   8799 		case IFM_1000_T:
   8800 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   8801 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
   8802 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
   8803 			break;
   8804 		default:
   8805 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
   8806 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
   8807 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
   8808 			break;
   8809 		}
   8810 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
   8811 	}
   8812 }
   8813 
   8814 /*
   8815  * wm_kmrn_readreg:
   8816  *
   8817  *	Read a kumeran register
   8818  */
   8819 static int
   8820 wm_kmrn_readreg(struct wm_softc *sc, int reg)
   8821 {
   8822 	int rv;
   8823 
   8824 	if (sc->sc_flags & WM_F_LOCK_SWFW) {
   8825 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
   8826 			aprint_error_dev(sc->sc_dev,
   8827 			    "%s: failed to get semaphore\n", __func__);
   8828 			return 0;
   8829 		}
   8830 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   8831 		if (wm_get_swfwhw_semaphore(sc)) {
   8832 			aprint_error_dev(sc->sc_dev,
   8833 			    "%s: failed to get semaphore\n", __func__);
   8834 			return 0;
   8835 		}
   8836 	}
   8837 
   8838 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   8839 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   8840 	    KUMCTRLSTA_REN);
   8841 	CSR_WRITE_FLUSH(sc);
   8842 	delay(2);
   8843 
   8844 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
   8845 
   8846 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   8847 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   8848 	else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   8849 		wm_put_swfwhw_semaphore(sc);
   8850 
   8851 	return rv;
   8852 }
   8853 
   8854 /*
   8855  * wm_kmrn_writereg:
   8856  *
   8857  *	Write a kumeran register
   8858  */
   8859 static void
   8860 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
   8861 {
   8862 
   8863 	if (sc->sc_flags & WM_F_LOCK_SWFW) {
   8864 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
   8865 			aprint_error_dev(sc->sc_dev,
   8866 			    "%s: failed to get semaphore\n", __func__);
   8867 			return;
   8868 		}
   8869 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   8870 		if (wm_get_swfwhw_semaphore(sc)) {
   8871 			aprint_error_dev(sc->sc_dev,
   8872 			    "%s: failed to get semaphore\n", __func__);
   8873 			return;
   8874 		}
   8875 	}
   8876 
   8877 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
   8878 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
   8879 	    (val & KUMCTRLSTA_MASK));
   8880 
   8881 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   8882 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
   8883 	else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   8884 		wm_put_swfwhw_semaphore(sc);
   8885 }
   8886 
   8887 /* SGMII related */
   8888 
   8889 /*
   8890  * wm_sgmii_uses_mdio
   8891  *
   8892  * Check whether the transaction is to the internal PHY or the external
   8893  * MDIO interface. Return true if it's MDIO.
   8894  */
   8895 static bool
   8896 wm_sgmii_uses_mdio(struct wm_softc *sc)
   8897 {
   8898 	uint32_t reg;
   8899 	bool ismdio = false;
   8900 
   8901 	switch (sc->sc_type) {
   8902 	case WM_T_82575:
   8903 	case WM_T_82576:
   8904 		reg = CSR_READ(sc, WMREG_MDIC);
   8905 		ismdio = ((reg & MDIC_DEST) != 0);
   8906 		break;
   8907 	case WM_T_82580:
   8908 	case WM_T_I350:
   8909 	case WM_T_I354:
   8910 	case WM_T_I210:
   8911 	case WM_T_I211:
   8912 		reg = CSR_READ(sc, WMREG_MDICNFG);
   8913 		ismdio = ((reg & MDICNFG_DEST) != 0);
   8914 		break;
   8915 	default:
   8916 		break;
   8917 	}
   8918 
   8919 	return ismdio;
   8920 }
   8921 
   8922 /*
   8923  * wm_sgmii_readreg:	[mii interface function]
   8924  *
   8925  *	Read a PHY register on the SGMII
   8926  * This could be handled by the PHY layer if we didn't have to lock the
   8927  * ressource ...
   8928  */
   8929 static int
   8930 wm_sgmii_readreg(device_t self, int phy, int reg)
   8931 {
   8932 	struct wm_softc *sc = device_private(self);
   8933 	uint32_t i2ccmd;
   8934 	int i, rv;
   8935 
   8936 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
   8937 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8938 		    __func__);
   8939 		return 0;
   8940 	}
   8941 
   8942 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   8943 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   8944 	    | I2CCMD_OPCODE_READ;
   8945 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   8946 
   8947 	/* Poll the ready bit */
   8948 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   8949 		delay(50);
   8950 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   8951 		if (i2ccmd & I2CCMD_READY)
   8952 			break;
   8953 	}
   8954 	if ((i2ccmd & I2CCMD_READY) == 0)
   8955 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
   8956 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   8957 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   8958 
   8959 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
   8960 
   8961 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
   8962 	return rv;
   8963 }
   8964 
   8965 /*
   8966  * wm_sgmii_writereg:	[mii interface function]
   8967  *
   8968  *	Write a PHY register on the SGMII.
   8969  * This could be handled by the PHY layer if we didn't have to lock the
   8970  * ressource ...
   8971  */
   8972 static void
   8973 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
   8974 {
   8975 	struct wm_softc *sc = device_private(self);
   8976 	uint32_t i2ccmd;
   8977 	int i;
   8978 	int val_swapped;
   8979 
   8980 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
   8981 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   8982 		    __func__);
   8983 		return;
   8984 	}
   8985 	/* Swap the data bytes for the I2C interface */
   8986 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
   8987 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
   8988 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
   8989 	    | I2CCMD_OPCODE_WRITE | val_swapped;
   8990 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   8991 
   8992 	/* Poll the ready bit */
   8993 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   8994 		delay(50);
   8995 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   8996 		if (i2ccmd & I2CCMD_READY)
   8997 			break;
   8998 	}
   8999 	if ((i2ccmd & I2CCMD_READY) == 0)
   9000 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
   9001 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9002 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
   9003 
   9004 	wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
   9005 }
   9006 
   9007 /* TBI related */
   9008 
   9009 /*
   9010  * wm_tbi_mediainit:
   9011  *
   9012  *	Initialize media for use on 1000BASE-X devices.
   9013  */
   9014 static void
   9015 wm_tbi_mediainit(struct wm_softc *sc)
   9016 {
   9017 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9018 	const char *sep = "";
   9019 
   9020 	if (sc->sc_type < WM_T_82543)
   9021 		sc->sc_tipg = TIPG_WM_DFLT;
   9022 	else
   9023 		sc->sc_tipg = TIPG_LG_DFLT;
   9024 
   9025 	sc->sc_tbi_serdes_anegticks = 5;
   9026 
   9027 	/* Initialize our media structures */
   9028 	sc->sc_mii.mii_ifp = ifp;
   9029 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
   9030 
   9031 	if ((sc->sc_type >= WM_T_82575)
   9032 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
   9033 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   9034 		    wm_serdes_mediachange, wm_serdes_mediastatus);
   9035 	else
   9036 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
   9037 		    wm_tbi_mediachange, wm_tbi_mediastatus);
   9038 
   9039 	/*
   9040 	 * SWD Pins:
   9041 	 *
   9042 	 *	0 = Link LED (output)
   9043 	 *	1 = Loss Of Signal (input)
   9044 	 */
   9045 	sc->sc_ctrl |= CTRL_SWDPIO(0);
   9046 
   9047 	/* XXX Perhaps this is only for TBI */
   9048 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   9049 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
   9050 
   9051 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
   9052 		sc->sc_ctrl &= ~CTRL_LRST;
   9053 
   9054 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9055 
   9056 #define	ADD(ss, mm, dd)							\
   9057 do {									\
   9058 	aprint_normal("%s%s", sep, ss);					\
   9059 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL);	\
   9060 	sep = ", ";							\
   9061 } while (/*CONSTCOND*/0)
   9062 
   9063 	aprint_normal_dev(sc->sc_dev, "");
   9064 
   9065 	/* Only 82545 is LX */
   9066 	if (sc->sc_type == WM_T_82545) {
   9067 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
   9068 		ADD("1000baseLX-FDX", IFM_1000_LX|IFM_FDX, ANAR_X_FD);
   9069 	} else {
   9070 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
   9071 		ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
   9072 	}
   9073 	ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
   9074 	aprint_normal("\n");
   9075 
   9076 #undef ADD
   9077 
   9078 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
   9079 }
   9080 
   9081 /*
   9082  * wm_tbi_mediachange:	[ifmedia interface function]
   9083  *
   9084  *	Set hardware to newly-selected media on a 1000BASE-X device.
   9085  */
   9086 static int
   9087 wm_tbi_mediachange(struct ifnet *ifp)
   9088 {
   9089 	struct wm_softc *sc = ifp->if_softc;
   9090 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9091 	uint32_t status;
   9092 	int i;
   9093 
   9094 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   9095 		/* XXX need some work for >= 82571 and < 82575 */
   9096 		if (sc->sc_type < WM_T_82575)
   9097 			return 0;
   9098 	}
   9099 
   9100 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   9101 	    || (sc->sc_type >= WM_T_82575))
   9102 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   9103 
   9104 	sc->sc_ctrl &= ~CTRL_LRST;
   9105 	sc->sc_txcw = TXCW_ANE;
   9106 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9107 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
   9108 	else if (ife->ifm_media & IFM_FDX)
   9109 		sc->sc_txcw |= TXCW_FD;
   9110 	else
   9111 		sc->sc_txcw |= TXCW_HD;
   9112 
   9113 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
   9114 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
   9115 
   9116 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
   9117 		    device_xname(sc->sc_dev), sc->sc_txcw));
   9118 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9119 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9120 	CSR_WRITE_FLUSH(sc);
   9121 	delay(1000);
   9122 
   9123 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
   9124 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
   9125 
   9126 	/*
   9127 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
   9128 	 * optics detect a signal, 0 if they don't.
   9129 	 */
   9130 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
   9131 		/* Have signal; wait for the link to come up. */
   9132 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
   9133 			delay(10000);
   9134 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
   9135 				break;
   9136 		}
   9137 
   9138 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
   9139 			    device_xname(sc->sc_dev),i));
   9140 
   9141 		status = CSR_READ(sc, WMREG_STATUS);
   9142 		DPRINTF(WM_DEBUG_LINK,
   9143 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
   9144 			device_xname(sc->sc_dev),status, STATUS_LU));
   9145 		if (status & STATUS_LU) {
   9146 			/* Link is up. */
   9147 			DPRINTF(WM_DEBUG_LINK,
   9148 			    ("%s: LINK: set media -> link up %s\n",
   9149 			    device_xname(sc->sc_dev),
   9150 			    (status & STATUS_FD) ? "FDX" : "HDX"));
   9151 
   9152 			/*
   9153 			 * NOTE: CTRL will update TFCE and RFCE automatically,
   9154 			 * so we should update sc->sc_ctrl
   9155 			 */
   9156 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
   9157 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
   9158 			sc->sc_fcrtl &= ~FCRTL_XONE;
   9159 			if (status & STATUS_FD)
   9160 				sc->sc_tctl |=
   9161 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
   9162 			else
   9163 				sc->sc_tctl |=
   9164 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
   9165 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
   9166 				sc->sc_fcrtl |= FCRTL_XONE;
   9167 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
   9168 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
   9169 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
   9170 				      sc->sc_fcrtl);
   9171 			sc->sc_tbi_linkup = 1;
   9172 		} else {
   9173 			if (i == WM_LINKUP_TIMEOUT)
   9174 				wm_check_for_link(sc);
   9175 			/* Link is down. */
   9176 			DPRINTF(WM_DEBUG_LINK,
   9177 			    ("%s: LINK: set media -> link down\n",
   9178 			    device_xname(sc->sc_dev)));
   9179 			sc->sc_tbi_linkup = 0;
   9180 		}
   9181 	} else {
   9182 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
   9183 		    device_xname(sc->sc_dev)));
   9184 		sc->sc_tbi_linkup = 0;
   9185 	}
   9186 
   9187 	wm_tbi_serdes_set_linkled(sc);
   9188 
   9189 	return 0;
   9190 }
   9191 
   9192 /*
   9193  * wm_tbi_mediastatus:	[ifmedia interface function]
   9194  *
   9195  *	Get the current interface media status on a 1000BASE-X device.
   9196  */
   9197 static void
   9198 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9199 {
   9200 	struct wm_softc *sc = ifp->if_softc;
   9201 	uint32_t ctrl, status;
   9202 
   9203 	ifmr->ifm_status = IFM_AVALID;
   9204 	ifmr->ifm_active = IFM_ETHER;
   9205 
   9206 	status = CSR_READ(sc, WMREG_STATUS);
   9207 	if ((status & STATUS_LU) == 0) {
   9208 		ifmr->ifm_active |= IFM_NONE;
   9209 		return;
   9210 	}
   9211 
   9212 	ifmr->ifm_status |= IFM_ACTIVE;
   9213 	/* Only 82545 is LX */
   9214 	if (sc->sc_type == WM_T_82545)
   9215 		ifmr->ifm_active |= IFM_1000_LX;
   9216 	else
   9217 		ifmr->ifm_active |= IFM_1000_SX;
   9218 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
   9219 		ifmr->ifm_active |= IFM_FDX;
   9220 	else
   9221 		ifmr->ifm_active |= IFM_HDX;
   9222 	ctrl = CSR_READ(sc, WMREG_CTRL);
   9223 	if (ctrl & CTRL_RFCE)
   9224 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
   9225 	if (ctrl & CTRL_TFCE)
   9226 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
   9227 }
   9228 
   9229 /* XXX TBI only */
   9230 static int
   9231 wm_check_for_link(struct wm_softc *sc)
   9232 {
   9233 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9234 	uint32_t rxcw;
   9235 	uint32_t ctrl;
   9236 	uint32_t status;
   9237 	uint32_t sig;
   9238 
   9239 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
   9240 		/* XXX need some work for >= 82571 */
   9241 		if (sc->sc_type >= WM_T_82571) {
   9242 			sc->sc_tbi_linkup = 1;
   9243 			return 0;
   9244 		}
   9245 	}
   9246 
   9247 	rxcw = CSR_READ(sc, WMREG_RXCW);
   9248 	ctrl = CSR_READ(sc, WMREG_CTRL);
   9249 	status = CSR_READ(sc, WMREG_STATUS);
   9250 
   9251 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
   9252 
   9253 	DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
   9254 		device_xname(sc->sc_dev), __func__,
   9255 		((ctrl & CTRL_SWDPIN(1)) == sig),
   9256 		((status & STATUS_LU) != 0),
   9257 		((rxcw & RXCW_C) != 0)
   9258 		    ));
   9259 
   9260 	/*
   9261 	 * SWDPIN   LU RXCW
   9262 	 *      0    0    0
   9263 	 *      0    0    1	(should not happen)
   9264 	 *      0    1    0	(should not happen)
   9265 	 *      0    1    1	(should not happen)
   9266 	 *      1    0    0	Disable autonego and force linkup
   9267 	 *      1    0    1	got /C/ but not linkup yet
   9268 	 *      1    1    0	(linkup)
   9269 	 *      1    1    1	If IFM_AUTO, back to autonego
   9270 	 *
   9271 	 */
   9272 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
   9273 	    && ((status & STATUS_LU) == 0)
   9274 	    && ((rxcw & RXCW_C) == 0)) {
   9275 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
   9276 			__func__));
   9277 		sc->sc_tbi_linkup = 0;
   9278 		/* Disable auto-negotiation in the TXCW register */
   9279 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
   9280 
   9281 		/*
   9282 		 * Force link-up and also force full-duplex.
   9283 		 *
   9284 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
   9285 		 * so we should update sc->sc_ctrl
   9286 		 */
   9287 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
   9288 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9289 	} else if (((status & STATUS_LU) != 0)
   9290 	    && ((rxcw & RXCW_C) != 0)
   9291 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
   9292 		sc->sc_tbi_linkup = 1;
   9293 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
   9294 			__func__));
   9295 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9296 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
   9297 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
   9298 	    && ((rxcw & RXCW_C) != 0)) {
   9299 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
   9300 	} else {
   9301 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
   9302 			status));
   9303 	}
   9304 
   9305 	return 0;
   9306 }
   9307 
   9308 /*
   9309  * wm_tbi_tick:
   9310  *
   9311  *	Check the link on TBI devices.
   9312  *	This function acts as mii_tick().
   9313  */
   9314 static void
   9315 wm_tbi_tick(struct wm_softc *sc)
   9316 {
   9317 	struct wm_txqueue *txq __diagused = &sc->sc_txq[0];
   9318 	struct mii_data *mii = &sc->sc_mii;
   9319 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9320 	uint32_t status;
   9321 
   9322 	KASSERT(WM_TX_LOCKED(txq));
   9323 
   9324 	status = CSR_READ(sc, WMREG_STATUS);
   9325 
   9326 	/* XXX is this needed? */
   9327 	(void)CSR_READ(sc, WMREG_RXCW);
   9328 	(void)CSR_READ(sc, WMREG_CTRL);
   9329 
   9330 	/* set link status */
   9331 	if ((status & STATUS_LU) == 0) {
   9332 		DPRINTF(WM_DEBUG_LINK,
   9333 		    ("%s: LINK: checklink -> down\n",
   9334 			device_xname(sc->sc_dev)));
   9335 		sc->sc_tbi_linkup = 0;
   9336 	} else if (sc->sc_tbi_linkup == 0) {
   9337 		DPRINTF(WM_DEBUG_LINK,
   9338 		    ("%s: LINK: checklink -> up %s\n",
   9339 			device_xname(sc->sc_dev),
   9340 			(status & STATUS_FD) ? "FDX" : "HDX"));
   9341 		sc->sc_tbi_linkup = 1;
   9342 		sc->sc_tbi_serdes_ticks = 0;
   9343 	}
   9344 
   9345 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
   9346 		goto setled;
   9347 
   9348 	if ((status & STATUS_LU) == 0) {
   9349 		sc->sc_tbi_linkup = 0;
   9350 		/* If the timer expired, retry autonegotiation */
   9351 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9352 		    && (++sc->sc_tbi_serdes_ticks
   9353 			>= sc->sc_tbi_serdes_anegticks)) {
   9354 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   9355 			sc->sc_tbi_serdes_ticks = 0;
   9356 			/*
   9357 			 * Reset the link, and let autonegotiation do
   9358 			 * its thing
   9359 			 */
   9360 			sc->sc_ctrl |= CTRL_LRST;
   9361 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9362 			CSR_WRITE_FLUSH(sc);
   9363 			delay(1000);
   9364 			sc->sc_ctrl &= ~CTRL_LRST;
   9365 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9366 			CSR_WRITE_FLUSH(sc);
   9367 			delay(1000);
   9368 			CSR_WRITE(sc, WMREG_TXCW,
   9369 			    sc->sc_txcw & ~TXCW_ANE);
   9370 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
   9371 		}
   9372 	}
   9373 
   9374 setled:
   9375 	wm_tbi_serdes_set_linkled(sc);
   9376 }
   9377 
   9378 /* SERDES related */
   9379 static void
   9380 wm_serdes_power_up_link_82575(struct wm_softc *sc)
   9381 {
   9382 	uint32_t reg;
   9383 
   9384 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
   9385 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
   9386 		return;
   9387 
   9388 	reg = CSR_READ(sc, WMREG_PCS_CFG);
   9389 	reg |= PCS_CFG_PCS_EN;
   9390 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
   9391 
   9392 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
   9393 	reg &= ~CTRL_EXT_SWDPIN(3);
   9394 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   9395 	CSR_WRITE_FLUSH(sc);
   9396 }
   9397 
   9398 static int
   9399 wm_serdes_mediachange(struct ifnet *ifp)
   9400 {
   9401 	struct wm_softc *sc = ifp->if_softc;
   9402 	bool pcs_autoneg = true; /* XXX */
   9403 	uint32_t ctrl_ext, pcs_lctl, reg;
   9404 
   9405 	/* XXX Currently, this function is not called on 8257[12] */
   9406 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
   9407 	    || (sc->sc_type >= WM_T_82575))
   9408 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
   9409 
   9410 	wm_serdes_power_up_link_82575(sc);
   9411 
   9412 	sc->sc_ctrl |= CTRL_SLU;
   9413 
   9414 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
   9415 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
   9416 
   9417 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9418 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
   9419 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
   9420 	case CTRL_EXT_LINK_MODE_SGMII:
   9421 		pcs_autoneg = true;
   9422 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
   9423 		break;
   9424 	case CTRL_EXT_LINK_MODE_1000KX:
   9425 		pcs_autoneg = false;
   9426 		/* FALLTHROUGH */
   9427 	default:
   9428 		if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)){
   9429 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
   9430 				pcs_autoneg = false;
   9431 		}
   9432 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
   9433 		    | CTRL_FRCFDX;
   9434 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
   9435 	}
   9436 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   9437 
   9438 	if (pcs_autoneg) {
   9439 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
   9440 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
   9441 
   9442 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
   9443 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
   9444 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
   9445 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
   9446 	} else
   9447 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
   9448 
   9449 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
   9450 
   9451 
   9452 	return 0;
   9453 }
   9454 
   9455 static void
   9456 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
   9457 {
   9458 	struct wm_softc *sc = ifp->if_softc;
   9459 	struct mii_data *mii = &sc->sc_mii;
   9460 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
   9461 	uint32_t pcs_adv, pcs_lpab, reg;
   9462 
   9463 	ifmr->ifm_status = IFM_AVALID;
   9464 	ifmr->ifm_active = IFM_ETHER;
   9465 
   9466 	/* Check PCS */
   9467 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9468 	if ((reg & PCS_LSTS_LINKOK) == 0) {
   9469 		ifmr->ifm_active |= IFM_NONE;
   9470 		sc->sc_tbi_linkup = 0;
   9471 		goto setled;
   9472 	}
   9473 
   9474 	sc->sc_tbi_linkup = 1;
   9475 	ifmr->ifm_status |= IFM_ACTIVE;
   9476 	ifmr->ifm_active |= IFM_1000_SX; /* XXX */
   9477 	if ((reg & PCS_LSTS_FDX) != 0)
   9478 		ifmr->ifm_active |= IFM_FDX;
   9479 	else
   9480 		ifmr->ifm_active |= IFM_HDX;
   9481 	mii->mii_media_active &= ~IFM_ETH_FMASK;
   9482 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
   9483 		/* Check flow */
   9484 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9485 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
   9486 			printf("XXX LINKOK but not ACOMP\n");
   9487 			goto setled;
   9488 		}
   9489 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
   9490 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
   9491 			printf("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab);
   9492 		if ((pcs_adv & TXCW_SYM_PAUSE)
   9493 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
   9494 			mii->mii_media_active |= IFM_FLOW
   9495 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
   9496 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
   9497 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   9498 		    && (pcs_lpab & TXCW_SYM_PAUSE)
   9499 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   9500 			mii->mii_media_active |= IFM_FLOW
   9501 			    | IFM_ETH_TXPAUSE;
   9502 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
   9503 		    && (pcs_adv & TXCW_ASYM_PAUSE)
   9504 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
   9505 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
   9506 			mii->mii_media_active |= IFM_FLOW
   9507 			    | IFM_ETH_RXPAUSE;
   9508 		} else {
   9509 		}
   9510 	}
   9511 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
   9512 	    | (mii->mii_media_active & IFM_ETH_FMASK);
   9513 setled:
   9514 	wm_tbi_serdes_set_linkled(sc);
   9515 }
   9516 
   9517 /*
   9518  * wm_serdes_tick:
   9519  *
   9520  *	Check the link on serdes devices.
   9521  */
   9522 static void
   9523 wm_serdes_tick(struct wm_softc *sc)
   9524 {
   9525 	struct wm_txqueue *txq __diagused = &sc->sc_txq[0];
   9526 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
   9527 	struct mii_data *mii = &sc->sc_mii;
   9528 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
   9529 	uint32_t reg;
   9530 
   9531 	KASSERT(WM_TX_LOCKED(txq));
   9532 
   9533 	mii->mii_media_status = IFM_AVALID;
   9534 	mii->mii_media_active = IFM_ETHER;
   9535 
   9536 	/* Check PCS */
   9537 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
   9538 	if ((reg & PCS_LSTS_LINKOK) != 0) {
   9539 		mii->mii_media_status |= IFM_ACTIVE;
   9540 		sc->sc_tbi_linkup = 1;
   9541 		sc->sc_tbi_serdes_ticks = 0;
   9542 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
   9543 		if ((reg & PCS_LSTS_FDX) != 0)
   9544 			mii->mii_media_active |= IFM_FDX;
   9545 		else
   9546 			mii->mii_media_active |= IFM_HDX;
   9547 	} else {
   9548 		mii->mii_media_status |= IFM_NONE;
   9549 		sc->sc_tbi_linkup = 0;
   9550 		    /* If the timer expired, retry autonegotiation */
   9551 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
   9552 		    && (++sc->sc_tbi_serdes_ticks
   9553 			>= sc->sc_tbi_serdes_anegticks)) {
   9554 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
   9555 			sc->sc_tbi_serdes_ticks = 0;
   9556 			/* XXX */
   9557 			wm_serdes_mediachange(ifp);
   9558 		}
   9559 	}
   9560 
   9561 	wm_tbi_serdes_set_linkled(sc);
   9562 }
   9563 
   9564 /* SFP related */
   9565 
   9566 static int
   9567 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
   9568 {
   9569 	uint32_t i2ccmd;
   9570 	int i;
   9571 
   9572 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
   9573 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
   9574 
   9575 	/* Poll the ready bit */
   9576 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
   9577 		delay(50);
   9578 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
   9579 		if (i2ccmd & I2CCMD_READY)
   9580 			break;
   9581 	}
   9582 	if ((i2ccmd & I2CCMD_READY) == 0)
   9583 		return -1;
   9584 	if ((i2ccmd & I2CCMD_ERROR) != 0)
   9585 		return -1;
   9586 
   9587 	*data = i2ccmd & 0x00ff;
   9588 
   9589 	return 0;
   9590 }
   9591 
   9592 static uint32_t
   9593 wm_sfp_get_media_type(struct wm_softc *sc)
   9594 {
   9595 	uint32_t ctrl_ext;
   9596 	uint8_t val = 0;
   9597 	int timeout = 3;
   9598 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
   9599 	int rv = -1;
   9600 
   9601 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   9602 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
   9603 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
   9604 	CSR_WRITE_FLUSH(sc);
   9605 
   9606 	/* Read SFP module data */
   9607 	while (timeout) {
   9608 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
   9609 		if (rv == 0)
   9610 			break;
   9611 		delay(100*1000); /* XXX too big */
   9612 		timeout--;
   9613 	}
   9614 	if (rv != 0)
   9615 		goto out;
   9616 	switch (val) {
   9617 	case SFF_SFP_ID_SFF:
   9618 		aprint_normal_dev(sc->sc_dev,
   9619 		    "Module/Connector soldered to board\n");
   9620 		break;
   9621 	case SFF_SFP_ID_SFP:
   9622 		aprint_normal_dev(sc->sc_dev, "SFP\n");
   9623 		break;
   9624 	case SFF_SFP_ID_UNKNOWN:
   9625 		goto out;
   9626 	default:
   9627 		break;
   9628 	}
   9629 
   9630 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
   9631 	if (rv != 0) {
   9632 		goto out;
   9633 	}
   9634 
   9635 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
   9636 		mediatype = WM_MEDIATYPE_SERDES;
   9637 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
   9638 		sc->sc_flags |= WM_F_SGMII;
   9639 		mediatype = WM_MEDIATYPE_COPPER;
   9640 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
   9641 		sc->sc_flags |= WM_F_SGMII;
   9642 		mediatype = WM_MEDIATYPE_SERDES;
   9643 	}
   9644 
   9645 out:
   9646 	/* Restore I2C interface setting */
   9647 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   9648 
   9649 	return mediatype;
   9650 }
   9651 /*
   9652  * NVM related.
   9653  * Microwire, SPI (w/wo EERD) and Flash.
   9654  */
   9655 
   9656 /* Both spi and uwire */
   9657 
   9658 /*
   9659  * wm_eeprom_sendbits:
   9660  *
   9661  *	Send a series of bits to the EEPROM.
   9662  */
   9663 static void
   9664 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
   9665 {
   9666 	uint32_t reg;
   9667 	int x;
   9668 
   9669 	reg = CSR_READ(sc, WMREG_EECD);
   9670 
   9671 	for (x = nbits; x > 0; x--) {
   9672 		if (bits & (1U << (x - 1)))
   9673 			reg |= EECD_DI;
   9674 		else
   9675 			reg &= ~EECD_DI;
   9676 		CSR_WRITE(sc, WMREG_EECD, reg);
   9677 		CSR_WRITE_FLUSH(sc);
   9678 		delay(2);
   9679 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   9680 		CSR_WRITE_FLUSH(sc);
   9681 		delay(2);
   9682 		CSR_WRITE(sc, WMREG_EECD, reg);
   9683 		CSR_WRITE_FLUSH(sc);
   9684 		delay(2);
   9685 	}
   9686 }
   9687 
   9688 /*
   9689  * wm_eeprom_recvbits:
   9690  *
   9691  *	Receive a series of bits from the EEPROM.
   9692  */
   9693 static void
   9694 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
   9695 {
   9696 	uint32_t reg, val;
   9697 	int x;
   9698 
   9699 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
   9700 
   9701 	val = 0;
   9702 	for (x = nbits; x > 0; x--) {
   9703 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
   9704 		CSR_WRITE_FLUSH(sc);
   9705 		delay(2);
   9706 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
   9707 			val |= (1U << (x - 1));
   9708 		CSR_WRITE(sc, WMREG_EECD, reg);
   9709 		CSR_WRITE_FLUSH(sc);
   9710 		delay(2);
   9711 	}
   9712 	*valp = val;
   9713 }
   9714 
   9715 /* Microwire */
   9716 
   9717 /*
   9718  * wm_nvm_read_uwire:
   9719  *
   9720  *	Read a word from the EEPROM using the MicroWire protocol.
   9721  */
   9722 static int
   9723 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   9724 {
   9725 	uint32_t reg, val;
   9726 	int i;
   9727 
   9728 	for (i = 0; i < wordcnt; i++) {
   9729 		/* Clear SK and DI. */
   9730 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
   9731 		CSR_WRITE(sc, WMREG_EECD, reg);
   9732 
   9733 		/*
   9734 		 * XXX: workaround for a bug in qemu-0.12.x and prior
   9735 		 * and Xen.
   9736 		 *
   9737 		 * We use this workaround only for 82540 because qemu's
   9738 		 * e1000 act as 82540.
   9739 		 */
   9740 		if (sc->sc_type == WM_T_82540) {
   9741 			reg |= EECD_SK;
   9742 			CSR_WRITE(sc, WMREG_EECD, reg);
   9743 			reg &= ~EECD_SK;
   9744 			CSR_WRITE(sc, WMREG_EECD, reg);
   9745 			CSR_WRITE_FLUSH(sc);
   9746 			delay(2);
   9747 		}
   9748 		/* XXX: end of workaround */
   9749 
   9750 		/* Set CHIP SELECT. */
   9751 		reg |= EECD_CS;
   9752 		CSR_WRITE(sc, WMREG_EECD, reg);
   9753 		CSR_WRITE_FLUSH(sc);
   9754 		delay(2);
   9755 
   9756 		/* Shift in the READ command. */
   9757 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
   9758 
   9759 		/* Shift in address. */
   9760 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
   9761 
   9762 		/* Shift out the data. */
   9763 		wm_eeprom_recvbits(sc, &val, 16);
   9764 		data[i] = val & 0xffff;
   9765 
   9766 		/* Clear CHIP SELECT. */
   9767 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
   9768 		CSR_WRITE(sc, WMREG_EECD, reg);
   9769 		CSR_WRITE_FLUSH(sc);
   9770 		delay(2);
   9771 	}
   9772 
   9773 	return 0;
   9774 }
   9775 
   9776 /* SPI */
   9777 
   9778 /*
   9779  * Set SPI and FLASH related information from the EECD register.
   9780  * For 82541 and 82547, the word size is taken from EEPROM.
   9781  */
   9782 static int
   9783 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
   9784 {
   9785 	int size;
   9786 	uint32_t reg;
   9787 	uint16_t data;
   9788 
   9789 	reg = CSR_READ(sc, WMREG_EECD);
   9790 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
   9791 
   9792 	/* Read the size of NVM from EECD by default */
   9793 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   9794 	switch (sc->sc_type) {
   9795 	case WM_T_82541:
   9796 	case WM_T_82541_2:
   9797 	case WM_T_82547:
   9798 	case WM_T_82547_2:
   9799 		/* Set dummy value to access EEPROM */
   9800 		sc->sc_nvm_wordsize = 64;
   9801 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
   9802 		reg = data;
   9803 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
   9804 		if (size == 0)
   9805 			size = 6; /* 64 word size */
   9806 		else
   9807 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
   9808 		break;
   9809 	case WM_T_80003:
   9810 	case WM_T_82571:
   9811 	case WM_T_82572:
   9812 	case WM_T_82573: /* SPI case */
   9813 	case WM_T_82574: /* SPI case */
   9814 	case WM_T_82583: /* SPI case */
   9815 		size += NVM_WORD_SIZE_BASE_SHIFT;
   9816 		if (size > 14)
   9817 			size = 14;
   9818 		break;
   9819 	case WM_T_82575:
   9820 	case WM_T_82576:
   9821 	case WM_T_82580:
   9822 	case WM_T_I350:
   9823 	case WM_T_I354:
   9824 	case WM_T_I210:
   9825 	case WM_T_I211:
   9826 		size += NVM_WORD_SIZE_BASE_SHIFT;
   9827 		if (size > 15)
   9828 			size = 15;
   9829 		break;
   9830 	default:
   9831 		aprint_error_dev(sc->sc_dev,
   9832 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
   9833 		return -1;
   9834 		break;
   9835 	}
   9836 
   9837 	sc->sc_nvm_wordsize = 1 << size;
   9838 
   9839 	return 0;
   9840 }
   9841 
   9842 /*
   9843  * wm_nvm_ready_spi:
   9844  *
   9845  *	Wait for a SPI EEPROM to be ready for commands.
   9846  */
   9847 static int
   9848 wm_nvm_ready_spi(struct wm_softc *sc)
   9849 {
   9850 	uint32_t val;
   9851 	int usec;
   9852 
   9853 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
   9854 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
   9855 		wm_eeprom_recvbits(sc, &val, 8);
   9856 		if ((val & SPI_SR_RDY) == 0)
   9857 			break;
   9858 	}
   9859 	if (usec >= SPI_MAX_RETRIES) {
   9860 		aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
   9861 		return 1;
   9862 	}
   9863 	return 0;
   9864 }
   9865 
   9866 /*
   9867  * wm_nvm_read_spi:
   9868  *
   9869  *	Read a work from the EEPROM using the SPI protocol.
   9870  */
   9871 static int
   9872 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   9873 {
   9874 	uint32_t reg, val;
   9875 	int i;
   9876 	uint8_t opc;
   9877 
   9878 	/* Clear SK and CS. */
   9879 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
   9880 	CSR_WRITE(sc, WMREG_EECD, reg);
   9881 	CSR_WRITE_FLUSH(sc);
   9882 	delay(2);
   9883 
   9884 	if (wm_nvm_ready_spi(sc))
   9885 		return 1;
   9886 
   9887 	/* Toggle CS to flush commands. */
   9888 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
   9889 	CSR_WRITE_FLUSH(sc);
   9890 	delay(2);
   9891 	CSR_WRITE(sc, WMREG_EECD, reg);
   9892 	CSR_WRITE_FLUSH(sc);
   9893 	delay(2);
   9894 
   9895 	opc = SPI_OPC_READ;
   9896 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
   9897 		opc |= SPI_OPC_A8;
   9898 
   9899 	wm_eeprom_sendbits(sc, opc, 8);
   9900 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
   9901 
   9902 	for (i = 0; i < wordcnt; i++) {
   9903 		wm_eeprom_recvbits(sc, &val, 16);
   9904 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
   9905 	}
   9906 
   9907 	/* Raise CS and clear SK. */
   9908 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
   9909 	CSR_WRITE(sc, WMREG_EECD, reg);
   9910 	CSR_WRITE_FLUSH(sc);
   9911 	delay(2);
   9912 
   9913 	return 0;
   9914 }
   9915 
   9916 /* Using with EERD */
   9917 
   9918 static int
   9919 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
   9920 {
   9921 	uint32_t attempts = 100000;
   9922 	uint32_t i, reg = 0;
   9923 	int32_t done = -1;
   9924 
   9925 	for (i = 0; i < attempts; i++) {
   9926 		reg = CSR_READ(sc, rw);
   9927 
   9928 		if (reg & EERD_DONE) {
   9929 			done = 0;
   9930 			break;
   9931 		}
   9932 		delay(5);
   9933 	}
   9934 
   9935 	return done;
   9936 }
   9937 
   9938 static int
   9939 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
   9940     uint16_t *data)
   9941 {
   9942 	int i, eerd = 0;
   9943 	int error = 0;
   9944 
   9945 	for (i = 0; i < wordcnt; i++) {
   9946 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
   9947 
   9948 		CSR_WRITE(sc, WMREG_EERD, eerd);
   9949 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
   9950 		if (error != 0)
   9951 			break;
   9952 
   9953 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
   9954 	}
   9955 
   9956 	return error;
   9957 }
   9958 
   9959 /* Flash */
   9960 
   9961 static int
   9962 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
   9963 {
   9964 	uint32_t eecd;
   9965 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
   9966 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
   9967 	uint8_t sig_byte = 0;
   9968 
   9969 	switch (sc->sc_type) {
   9970 	case WM_T_ICH8:
   9971 	case WM_T_ICH9:
   9972 		eecd = CSR_READ(sc, WMREG_EECD);
   9973 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
   9974 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
   9975 			return 0;
   9976 		}
   9977 		/* FALLTHROUGH */
   9978 	default:
   9979 		/* Default to 0 */
   9980 		*bank = 0;
   9981 
   9982 		/* Check bank 0 */
   9983 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
   9984 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   9985 			*bank = 0;
   9986 			return 0;
   9987 		}
   9988 
   9989 		/* Check bank 1 */
   9990 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
   9991 		    &sig_byte);
   9992 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
   9993 			*bank = 1;
   9994 			return 0;
   9995 		}
   9996 	}
   9997 
   9998 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
   9999 		device_xname(sc->sc_dev)));
   10000 	return -1;
   10001 }
   10002 
   10003 /******************************************************************************
   10004  * This function does initial flash setup so that a new read/write/erase cycle
   10005  * can be started.
   10006  *
   10007  * sc - The pointer to the hw structure
   10008  ****************************************************************************/
   10009 static int32_t
   10010 wm_ich8_cycle_init(struct wm_softc *sc)
   10011 {
   10012 	uint16_t hsfsts;
   10013 	int32_t error = 1;
   10014 	int32_t i     = 0;
   10015 
   10016 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10017 
   10018 	/* May be check the Flash Des Valid bit in Hw status */
   10019 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
   10020 		return error;
   10021 	}
   10022 
   10023 	/* Clear FCERR in Hw status by writing 1 */
   10024 	/* Clear DAEL in Hw status by writing a 1 */
   10025 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
   10026 
   10027 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10028 
   10029 	/*
   10030 	 * Either we should have a hardware SPI cycle in progress bit to check
   10031 	 * against, in order to start a new cycle or FDONE bit should be
   10032 	 * changed in the hardware so that it is 1 after harware reset, which
   10033 	 * can then be used as an indication whether a cycle is in progress or
   10034 	 * has been completed .. we should also have some software semaphore
   10035 	 * mechanism to guard FDONE or the cycle in progress bit so that two
   10036 	 * threads access to those bits can be sequentiallized or a way so that
   10037 	 * 2 threads dont start the cycle at the same time
   10038 	 */
   10039 
   10040 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   10041 		/*
   10042 		 * There is no cycle running at present, so we can start a
   10043 		 * cycle
   10044 		 */
   10045 
   10046 		/* Begin by setting Flash Cycle Done. */
   10047 		hsfsts |= HSFSTS_DONE;
   10048 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10049 		error = 0;
   10050 	} else {
   10051 		/*
   10052 		 * otherwise poll for sometime so the current cycle has a
   10053 		 * chance to end before giving up.
   10054 		 */
   10055 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
   10056 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10057 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
   10058 				error = 0;
   10059 				break;
   10060 			}
   10061 			delay(1);
   10062 		}
   10063 		if (error == 0) {
   10064 			/*
   10065 			 * Successful in waiting for previous cycle to timeout,
   10066 			 * now set the Flash Cycle Done.
   10067 			 */
   10068 			hsfsts |= HSFSTS_DONE;
   10069 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
   10070 		}
   10071 	}
   10072 	return error;
   10073 }
   10074 
   10075 /******************************************************************************
   10076  * This function starts a flash cycle and waits for its completion
   10077  *
   10078  * sc - The pointer to the hw structure
   10079  ****************************************************************************/
   10080 static int32_t
   10081 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
   10082 {
   10083 	uint16_t hsflctl;
   10084 	uint16_t hsfsts;
   10085 	int32_t error = 1;
   10086 	uint32_t i = 0;
   10087 
   10088 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
   10089 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   10090 	hsflctl |= HSFCTL_GO;
   10091 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   10092 
   10093 	/* Wait till FDONE bit is set to 1 */
   10094 	do {
   10095 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10096 		if (hsfsts & HSFSTS_DONE)
   10097 			break;
   10098 		delay(1);
   10099 		i++;
   10100 	} while (i < timeout);
   10101 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
   10102 		error = 0;
   10103 
   10104 	return error;
   10105 }
   10106 
   10107 /******************************************************************************
   10108  * Reads a byte or word from the NVM using the ICH8 flash access registers.
   10109  *
   10110  * sc - The pointer to the hw structure
   10111  * index - The index of the byte or word to read.
   10112  * size - Size of data to read, 1=byte 2=word
   10113  * data - Pointer to the word to store the value read.
   10114  *****************************************************************************/
   10115 static int32_t
   10116 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
   10117     uint32_t size, uint16_t *data)
   10118 {
   10119 	uint16_t hsfsts;
   10120 	uint16_t hsflctl;
   10121 	uint32_t flash_linear_address;
   10122 	uint32_t flash_data = 0;
   10123 	int32_t error = 1;
   10124 	int32_t count = 0;
   10125 
   10126 	if (size < 1  || size > 2 || data == 0x0 ||
   10127 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
   10128 		return error;
   10129 
   10130 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
   10131 	    sc->sc_ich8_flash_base;
   10132 
   10133 	do {
   10134 		delay(1);
   10135 		/* Steps */
   10136 		error = wm_ich8_cycle_init(sc);
   10137 		if (error)
   10138 			break;
   10139 
   10140 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
   10141 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
   10142 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
   10143 		    & HSFCTL_BCOUNT_MASK;
   10144 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
   10145 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
   10146 
   10147 		/*
   10148 		 * Write the last 24 bits of index into Flash Linear address
   10149 		 * field in Flash Address
   10150 		 */
   10151 		/* TODO: TBD maybe check the index against the size of flash */
   10152 
   10153 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
   10154 
   10155 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
   10156 
   10157 		/*
   10158 		 * Check if FCERR is set to 1, if set to 1, clear it and try
   10159 		 * the whole sequence a few more times, else read in (shift in)
   10160 		 * the Flash Data0, the order is least significant byte first
   10161 		 * msb to lsb
   10162 		 */
   10163 		if (error == 0) {
   10164 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
   10165 			if (size == 1)
   10166 				*data = (uint8_t)(flash_data & 0x000000FF);
   10167 			else if (size == 2)
   10168 				*data = (uint16_t)(flash_data & 0x0000FFFF);
   10169 			break;
   10170 		} else {
   10171 			/*
   10172 			 * If we've gotten here, then things are probably
   10173 			 * completely hosed, but if the error condition is
   10174 			 * detected, it won't hurt to give it another try...
   10175 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
   10176 			 */
   10177 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
   10178 			if (hsfsts & HSFSTS_ERR) {
   10179 				/* Repeat for some time before giving up. */
   10180 				continue;
   10181 			} else if ((hsfsts & HSFSTS_DONE) == 0)
   10182 				break;
   10183 		}
   10184 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
   10185 
   10186 	return error;
   10187 }
   10188 
   10189 /******************************************************************************
   10190  * Reads a single byte from the NVM using the ICH8 flash access registers.
   10191  *
   10192  * sc - pointer to wm_hw structure
   10193  * index - The index of the byte to read.
   10194  * data - Pointer to a byte to store the value read.
   10195  *****************************************************************************/
   10196 static int32_t
   10197 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
   10198 {
   10199 	int32_t status;
   10200 	uint16_t word = 0;
   10201 
   10202 	status = wm_read_ich8_data(sc, index, 1, &word);
   10203 	if (status == 0)
   10204 		*data = (uint8_t)word;
   10205 	else
   10206 		*data = 0;
   10207 
   10208 	return status;
   10209 }
   10210 
   10211 /******************************************************************************
   10212  * Reads a word from the NVM using the ICH8 flash access registers.
   10213  *
   10214  * sc - pointer to wm_hw structure
   10215  * index - The starting byte index of the word to read.
   10216  * data - Pointer to a word to store the value read.
   10217  *****************************************************************************/
   10218 static int32_t
   10219 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
   10220 {
   10221 	int32_t status;
   10222 
   10223 	status = wm_read_ich8_data(sc, index, 2, data);
   10224 	return status;
   10225 }
   10226 
   10227 /******************************************************************************
   10228  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
   10229  * register.
   10230  *
   10231  * sc - Struct containing variables accessed by shared code
   10232  * offset - offset of word in the EEPROM to read
   10233  * data - word read from the EEPROM
   10234  * words - number of words to read
   10235  *****************************************************************************/
   10236 static int
   10237 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10238 {
   10239 	int32_t  error = 0;
   10240 	uint32_t flash_bank = 0;
   10241 	uint32_t act_offset = 0;
   10242 	uint32_t bank_offset = 0;
   10243 	uint16_t word = 0;
   10244 	uint16_t i = 0;
   10245 
   10246 	/*
   10247 	 * We need to know which is the valid flash bank.  In the event
   10248 	 * that we didn't allocate eeprom_shadow_ram, we may not be
   10249 	 * managing flash_bank.  So it cannot be trusted and needs
   10250 	 * to be updated with each read.
   10251 	 */
   10252 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
   10253 	if (error) {
   10254 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
   10255 			device_xname(sc->sc_dev)));
   10256 		flash_bank = 0;
   10257 	}
   10258 
   10259 	/*
   10260 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
   10261 	 * size
   10262 	 */
   10263 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
   10264 
   10265 	error = wm_get_swfwhw_semaphore(sc);
   10266 	if (error) {
   10267 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10268 		    __func__);
   10269 		return error;
   10270 	}
   10271 
   10272 	for (i = 0; i < words; i++) {
   10273 		/* The NVM part needs a byte offset, hence * 2 */
   10274 		act_offset = bank_offset + ((offset + i) * 2);
   10275 		error = wm_read_ich8_word(sc, act_offset, &word);
   10276 		if (error) {
   10277 			aprint_error_dev(sc->sc_dev,
   10278 			    "%s: failed to read NVM\n", __func__);
   10279 			break;
   10280 		}
   10281 		data[i] = word;
   10282 	}
   10283 
   10284 	wm_put_swfwhw_semaphore(sc);
   10285 	return error;
   10286 }
   10287 
   10288 /* iNVM */
   10289 
   10290 static int
   10291 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
   10292 {
   10293 	int32_t  rv = 0;
   10294 	uint32_t invm_dword;
   10295 	uint16_t i;
   10296 	uint8_t record_type, word_address;
   10297 
   10298 	for (i = 0; i < INVM_SIZE; i++) {
   10299 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
   10300 		/* Get record type */
   10301 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
   10302 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
   10303 			break;
   10304 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
   10305 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
   10306 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
   10307 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
   10308 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
   10309 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
   10310 			if (word_address == address) {
   10311 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
   10312 				rv = 0;
   10313 				break;
   10314 			}
   10315 		}
   10316 	}
   10317 
   10318 	return rv;
   10319 }
   10320 
   10321 static int
   10322 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
   10323 {
   10324 	int rv = 0;
   10325 	int i;
   10326 
   10327 	for (i = 0; i < words; i++) {
   10328 		switch (offset + i) {
   10329 		case NVM_OFF_MACADDR:
   10330 		case NVM_OFF_MACADDR1:
   10331 		case NVM_OFF_MACADDR2:
   10332 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
   10333 			if (rv != 0) {
   10334 				data[i] = 0xffff;
   10335 				rv = -1;
   10336 			}
   10337 			break;
   10338 		case NVM_OFF_CFG2:
   10339 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10340 			if (rv != 0) {
   10341 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
   10342 				rv = 0;
   10343 			}
   10344 			break;
   10345 		case NVM_OFF_CFG4:
   10346 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10347 			if (rv != 0) {
   10348 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
   10349 				rv = 0;
   10350 			}
   10351 			break;
   10352 		case NVM_OFF_LED_1_CFG:
   10353 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10354 			if (rv != 0) {
   10355 				*data = NVM_LED_1_CFG_DEFAULT_I211;
   10356 				rv = 0;
   10357 			}
   10358 			break;
   10359 		case NVM_OFF_LED_0_2_CFG:
   10360 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10361 			if (rv != 0) {
   10362 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
   10363 				rv = 0;
   10364 			}
   10365 			break;
   10366 		case NVM_OFF_ID_LED_SETTINGS:
   10367 			rv = wm_nvm_read_word_invm(sc, offset, data);
   10368 			if (rv != 0) {
   10369 				*data = ID_LED_RESERVED_FFFF;
   10370 				rv = 0;
   10371 			}
   10372 			break;
   10373 		default:
   10374 			DPRINTF(WM_DEBUG_NVM,
   10375 			    ("NVM word 0x%02x is not mapped.\n", offset));
   10376 			*data = NVM_RESERVED_WORD;
   10377 			break;
   10378 		}
   10379 	}
   10380 
   10381 	return rv;
   10382 }
   10383 
   10384 /* Lock, detecting NVM type, validate checksum, version and read */
   10385 
   10386 /*
   10387  * wm_nvm_acquire:
   10388  *
   10389  *	Perform the EEPROM handshake required on some chips.
   10390  */
   10391 static int
   10392 wm_nvm_acquire(struct wm_softc *sc)
   10393 {
   10394 	uint32_t reg;
   10395 	int x;
   10396 	int ret = 0;
   10397 
   10398 	/* always success */
   10399 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
   10400 		return 0;
   10401 
   10402 	if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
   10403 		ret = wm_get_swfwhw_semaphore(sc);
   10404 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
   10405 		/* This will also do wm_get_swsm_semaphore() if needed */
   10406 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
   10407 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
   10408 		ret = wm_get_swsm_semaphore(sc);
   10409 	}
   10410 
   10411 	if (ret) {
   10412 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
   10413 			__func__);
   10414 		return 1;
   10415 	}
   10416 
   10417 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   10418 		reg = CSR_READ(sc, WMREG_EECD);
   10419 
   10420 		/* Request EEPROM access. */
   10421 		reg |= EECD_EE_REQ;
   10422 		CSR_WRITE(sc, WMREG_EECD, reg);
   10423 
   10424 		/* ..and wait for it to be granted. */
   10425 		for (x = 0; x < 1000; x++) {
   10426 			reg = CSR_READ(sc, WMREG_EECD);
   10427 			if (reg & EECD_EE_GNT)
   10428 				break;
   10429 			delay(5);
   10430 		}
   10431 		if ((reg & EECD_EE_GNT) == 0) {
   10432 			aprint_error_dev(sc->sc_dev,
   10433 			    "could not acquire EEPROM GNT\n");
   10434 			reg &= ~EECD_EE_REQ;
   10435 			CSR_WRITE(sc, WMREG_EECD, reg);
   10436 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   10437 				wm_put_swfwhw_semaphore(sc);
   10438 			if (sc->sc_flags & WM_F_LOCK_SWFW)
   10439 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   10440 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
   10441 				wm_put_swsm_semaphore(sc);
   10442 			return 1;
   10443 		}
   10444 	}
   10445 
   10446 	return 0;
   10447 }
   10448 
   10449 /*
   10450  * wm_nvm_release:
   10451  *
   10452  *	Release the EEPROM mutex.
   10453  */
   10454 static void
   10455 wm_nvm_release(struct wm_softc *sc)
   10456 {
   10457 	uint32_t reg;
   10458 
   10459 	/* always success */
   10460 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
   10461 		return;
   10462 
   10463 	if (sc->sc_flags & WM_F_LOCK_EECD) {
   10464 		reg = CSR_READ(sc, WMREG_EECD);
   10465 		reg &= ~EECD_EE_REQ;
   10466 		CSR_WRITE(sc, WMREG_EECD, reg);
   10467 	}
   10468 
   10469 	if (sc->sc_flags & WM_F_LOCK_EXTCNF)
   10470 		wm_put_swfwhw_semaphore(sc);
   10471 	if (sc->sc_flags & WM_F_LOCK_SWFW)
   10472 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
   10473 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
   10474 		wm_put_swsm_semaphore(sc);
   10475 }
   10476 
   10477 static int
   10478 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
   10479 {
   10480 	uint32_t eecd = 0;
   10481 
   10482 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
   10483 	    || sc->sc_type == WM_T_82583) {
   10484 		eecd = CSR_READ(sc, WMREG_EECD);
   10485 
   10486 		/* Isolate bits 15 & 16 */
   10487 		eecd = ((eecd >> 15) & 0x03);
   10488 
   10489 		/* If both bits are set, device is Flash type */
   10490 		if (eecd == 0x03)
   10491 			return 0;
   10492 	}
   10493 	return 1;
   10494 }
   10495 
   10496 static int
   10497 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
   10498 {
   10499 	uint32_t eec;
   10500 
   10501 	eec = CSR_READ(sc, WMREG_EEC);
   10502 	if ((eec & EEC_FLASH_DETECTED) != 0)
   10503 		return 1;
   10504 
   10505 	return 0;
   10506 }
   10507 
   10508 /*
   10509  * wm_nvm_validate_checksum
   10510  *
   10511  * The checksum is defined as the sum of the first 64 (16 bit) words.
   10512  */
   10513 static int
   10514 wm_nvm_validate_checksum(struct wm_softc *sc)
   10515 {
   10516 	uint16_t checksum;
   10517 	uint16_t eeprom_data;
   10518 #ifdef WM_DEBUG
   10519 	uint16_t csum_wordaddr, valid_checksum;
   10520 #endif
   10521 	int i;
   10522 
   10523 	checksum = 0;
   10524 
   10525 	/* Don't check for I211 */
   10526 	if (sc->sc_type == WM_T_I211)
   10527 		return 0;
   10528 
   10529 #ifdef WM_DEBUG
   10530 	if (sc->sc_type == WM_T_PCH_LPT) {
   10531 		csum_wordaddr = NVM_OFF_COMPAT;
   10532 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
   10533 	} else {
   10534 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
   10535 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
   10536 	}
   10537 
   10538 	/* Dump EEPROM image for debug */
   10539 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   10540 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   10541 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
   10542 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
   10543 		if ((eeprom_data & valid_checksum) == 0) {
   10544 			DPRINTF(WM_DEBUG_NVM,
   10545 			    ("%s: NVM need to be updated (%04x != %04x)\n",
   10546 				device_xname(sc->sc_dev), eeprom_data,
   10547 				    valid_checksum));
   10548 		}
   10549 	}
   10550 
   10551 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
   10552 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
   10553 		for (i = 0; i < NVM_SIZE; i++) {
   10554 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
   10555 				printf("XXXX ");
   10556 			else
   10557 				printf("%04hx ", eeprom_data);
   10558 			if (i % 8 == 7)
   10559 				printf("\n");
   10560 		}
   10561 	}
   10562 
   10563 #endif /* WM_DEBUG */
   10564 
   10565 	for (i = 0; i < NVM_SIZE; i++) {
   10566 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
   10567 			return 1;
   10568 		checksum += eeprom_data;
   10569 	}
   10570 
   10571 	if (checksum != (uint16_t) NVM_CHECKSUM) {
   10572 #ifdef WM_DEBUG
   10573 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
   10574 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
   10575 #endif
   10576 	}
   10577 
   10578 	return 0;
   10579 }
   10580 
   10581 static void
   10582 wm_nvm_version_invm(struct wm_softc *sc)
   10583 {
   10584 	uint32_t dword;
   10585 
   10586 	/*
   10587 	 * Linux's code to decode version is very strange, so we don't
   10588 	 * obey that algorithm and just use word 61 as the document.
   10589 	 * Perhaps it's not perfect though...
   10590 	 *
   10591 	 * Example:
   10592 	 *
   10593 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
   10594 	 */
   10595 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
   10596 	dword = __SHIFTOUT(dword, INVM_VER_1);
   10597 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
   10598 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
   10599 }
   10600 
   10601 static void
   10602 wm_nvm_version(struct wm_softc *sc)
   10603 {
   10604 	uint16_t major, minor, build, patch;
   10605 	uint16_t uid0, uid1;
   10606 	uint16_t nvm_data;
   10607 	uint16_t off;
   10608 	bool check_version = false;
   10609 	bool check_optionrom = false;
   10610 	bool have_build = false;
   10611 
   10612 	/*
   10613 	 * Version format:
   10614 	 *
   10615 	 * XYYZ
   10616 	 * X0YZ
   10617 	 * X0YY
   10618 	 *
   10619 	 * Example:
   10620 	 *
   10621 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
   10622 	 *	82571	0x50a6	5.10.6?
   10623 	 *	82572	0x506a	5.6.10?
   10624 	 *	82572EI	0x5069	5.6.9?
   10625 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
   10626 	 *		0x2013	2.1.3?
   10627 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
   10628 	 */
   10629 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
   10630 	switch (sc->sc_type) {
   10631 	case WM_T_82571:
   10632 	case WM_T_82572:
   10633 	case WM_T_82574:
   10634 	case WM_T_82583:
   10635 		check_version = true;
   10636 		check_optionrom = true;
   10637 		have_build = true;
   10638 		break;
   10639 	case WM_T_82575:
   10640 	case WM_T_82576:
   10641 	case WM_T_82580:
   10642 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
   10643 			check_version = true;
   10644 		break;
   10645 	case WM_T_I211:
   10646 		wm_nvm_version_invm(sc);
   10647 		goto printver;
   10648 	case WM_T_I210:
   10649 		if (!wm_nvm_get_flash_presence_i210(sc)) {
   10650 			wm_nvm_version_invm(sc);
   10651 			goto printver;
   10652 		}
   10653 		/* FALLTHROUGH */
   10654 	case WM_T_I350:
   10655 	case WM_T_I354:
   10656 		check_version = true;
   10657 		check_optionrom = true;
   10658 		break;
   10659 	default:
   10660 		return;
   10661 	}
   10662 	if (check_version) {
   10663 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
   10664 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
   10665 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
   10666 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
   10667 			build = nvm_data & NVM_BUILD_MASK;
   10668 			have_build = true;
   10669 		} else
   10670 			minor = nvm_data & 0x00ff;
   10671 
   10672 		/* Decimal */
   10673 		minor = (minor / 16) * 10 + (minor % 16);
   10674 		sc->sc_nvm_ver_major = major;
   10675 		sc->sc_nvm_ver_minor = minor;
   10676 
   10677 printver:
   10678 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
   10679 		    sc->sc_nvm_ver_minor);
   10680 		if (have_build) {
   10681 			sc->sc_nvm_ver_build = build;
   10682 			aprint_verbose(".%d", build);
   10683 		}
   10684 	}
   10685 	if (check_optionrom) {
   10686 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
   10687 		/* Option ROM Version */
   10688 		if ((off != 0x0000) && (off != 0xffff)) {
   10689 			off += NVM_COMBO_VER_OFF;
   10690 			wm_nvm_read(sc, off + 1, 1, &uid1);
   10691 			wm_nvm_read(sc, off, 1, &uid0);
   10692 			if ((uid0 != 0) && (uid0 != 0xffff)
   10693 			    && (uid1 != 0) && (uid1 != 0xffff)) {
   10694 				/* 16bits */
   10695 				major = uid0 >> 8;
   10696 				build = (uid0 << 8) | (uid1 >> 8);
   10697 				patch = uid1 & 0x00ff;
   10698 				aprint_verbose(", option ROM Version %d.%d.%d",
   10699 				    major, build, patch);
   10700 			}
   10701 		}
   10702 	}
   10703 
   10704 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
   10705 	aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
   10706 }
   10707 
   10708 /*
   10709  * wm_nvm_read:
   10710  *
   10711  *	Read data from the serial EEPROM.
   10712  */
   10713 static int
   10714 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
   10715 {
   10716 	int rv;
   10717 
   10718 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
   10719 		return 1;
   10720 
   10721 	if (wm_nvm_acquire(sc))
   10722 		return 1;
   10723 
   10724 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   10725 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   10726 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
   10727 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
   10728 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
   10729 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
   10730 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
   10731 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
   10732 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
   10733 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
   10734 	else
   10735 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
   10736 
   10737 	wm_nvm_release(sc);
   10738 	return rv;
   10739 }
   10740 
   10741 /*
   10742  * Hardware semaphores.
   10743  * Very complexed...
   10744  */
   10745 
   10746 static int
   10747 wm_get_swsm_semaphore(struct wm_softc *sc)
   10748 {
   10749 	int32_t timeout;
   10750 	uint32_t swsm;
   10751 
   10752 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   10753 		/* Get the SW semaphore. */
   10754 		timeout = sc->sc_nvm_wordsize + 1;
   10755 		while (timeout) {
   10756 			swsm = CSR_READ(sc, WMREG_SWSM);
   10757 
   10758 			if ((swsm & SWSM_SMBI) == 0)
   10759 				break;
   10760 
   10761 			delay(50);
   10762 			timeout--;
   10763 		}
   10764 
   10765 		if (timeout == 0) {
   10766 			aprint_error_dev(sc->sc_dev,
   10767 			    "could not acquire SWSM SMBI\n");
   10768 			return 1;
   10769 		}
   10770 	}
   10771 
   10772 	/* Get the FW semaphore. */
   10773 	timeout = sc->sc_nvm_wordsize + 1;
   10774 	while (timeout) {
   10775 		swsm = CSR_READ(sc, WMREG_SWSM);
   10776 		swsm |= SWSM_SWESMBI;
   10777 		CSR_WRITE(sc, WMREG_SWSM, swsm);
   10778 		/* If we managed to set the bit we got the semaphore. */
   10779 		swsm = CSR_READ(sc, WMREG_SWSM);
   10780 		if (swsm & SWSM_SWESMBI)
   10781 			break;
   10782 
   10783 		delay(50);
   10784 		timeout--;
   10785 	}
   10786 
   10787 	if (timeout == 0) {
   10788 		aprint_error_dev(sc->sc_dev, "could not acquire SWSM SWESMBI\n");
   10789 		/* Release semaphores */
   10790 		wm_put_swsm_semaphore(sc);
   10791 		return 1;
   10792 	}
   10793 	return 0;
   10794 }
   10795 
   10796 static void
   10797 wm_put_swsm_semaphore(struct wm_softc *sc)
   10798 {
   10799 	uint32_t swsm;
   10800 
   10801 	swsm = CSR_READ(sc, WMREG_SWSM);
   10802 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
   10803 	CSR_WRITE(sc, WMREG_SWSM, swsm);
   10804 }
   10805 
   10806 static int
   10807 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   10808 {
   10809 	uint32_t swfw_sync;
   10810 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
   10811 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
   10812 	int timeout = 200;
   10813 
   10814 	for (timeout = 0; timeout < 200; timeout++) {
   10815 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
   10816 			if (wm_get_swsm_semaphore(sc)) {
   10817 				aprint_error_dev(sc->sc_dev,
   10818 				    "%s: failed to get semaphore\n",
   10819 				    __func__);
   10820 				return 1;
   10821 			}
   10822 		}
   10823 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   10824 		if ((swfw_sync & (swmask | fwmask)) == 0) {
   10825 			swfw_sync |= swmask;
   10826 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   10827 			if (sc->sc_flags & WM_F_LOCK_SWSM)
   10828 				wm_put_swsm_semaphore(sc);
   10829 			return 0;
   10830 		}
   10831 		if (sc->sc_flags & WM_F_LOCK_SWSM)
   10832 			wm_put_swsm_semaphore(sc);
   10833 		delay(5000);
   10834 	}
   10835 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
   10836 	    device_xname(sc->sc_dev), mask, swfw_sync);
   10837 	return 1;
   10838 }
   10839 
   10840 static void
   10841 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
   10842 {
   10843 	uint32_t swfw_sync;
   10844 
   10845 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
   10846 		while (wm_get_swsm_semaphore(sc) != 0)
   10847 			continue;
   10848 	}
   10849 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
   10850 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
   10851 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
   10852 	if (sc->sc_flags & WM_F_LOCK_SWSM)
   10853 		wm_put_swsm_semaphore(sc);
   10854 }
   10855 
   10856 static int
   10857 wm_get_swfwhw_semaphore(struct wm_softc *sc)
   10858 {
   10859 	uint32_t ext_ctrl;
   10860 	int timeout = 200;
   10861 
   10862 	for (timeout = 0; timeout < 200; timeout++) {
   10863 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   10864 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
   10865 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   10866 
   10867 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   10868 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
   10869 			return 0;
   10870 		delay(5000);
   10871 	}
   10872 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
   10873 	    device_xname(sc->sc_dev), ext_ctrl);
   10874 	return 1;
   10875 }
   10876 
   10877 static void
   10878 wm_put_swfwhw_semaphore(struct wm_softc *sc)
   10879 {
   10880 	uint32_t ext_ctrl;
   10881 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
   10882 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   10883 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
   10884 }
   10885 
   10886 static int
   10887 wm_get_hw_semaphore_82573(struct wm_softc *sc)
   10888 {
   10889 	int i = 0;
   10890 	uint32_t reg;
   10891 
   10892 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   10893 	do {
   10894 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
   10895 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
   10896 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   10897 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
   10898 			break;
   10899 		delay(2*1000);
   10900 		i++;
   10901 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
   10902 
   10903 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
   10904 		wm_put_hw_semaphore_82573(sc);
   10905 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
   10906 		    device_xname(sc->sc_dev));
   10907 		return -1;
   10908 	}
   10909 
   10910 	return 0;
   10911 }
   10912 
   10913 static void
   10914 wm_put_hw_semaphore_82573(struct wm_softc *sc)
   10915 {
   10916 	uint32_t reg;
   10917 
   10918 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   10919 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
   10920 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   10921 }
   10922 
   10923 /*
   10924  * Management mode and power management related subroutines.
   10925  * BMC, AMT, suspend/resume and EEE.
   10926  */
   10927 
   10928 #ifdef WM_WOL
   10929 static int
   10930 wm_check_mng_mode(struct wm_softc *sc)
   10931 {
   10932 	int rv;
   10933 
   10934 	switch (sc->sc_type) {
   10935 	case WM_T_ICH8:
   10936 	case WM_T_ICH9:
   10937 	case WM_T_ICH10:
   10938 	case WM_T_PCH:
   10939 	case WM_T_PCH2:
   10940 	case WM_T_PCH_LPT:
   10941 		rv = wm_check_mng_mode_ich8lan(sc);
   10942 		break;
   10943 	case WM_T_82574:
   10944 	case WM_T_82583:
   10945 		rv = wm_check_mng_mode_82574(sc);
   10946 		break;
   10947 	case WM_T_82571:
   10948 	case WM_T_82572:
   10949 	case WM_T_82573:
   10950 	case WM_T_80003:
   10951 		rv = wm_check_mng_mode_generic(sc);
   10952 		break;
   10953 	default:
   10954 		/* noting to do */
   10955 		rv = 0;
   10956 		break;
   10957 	}
   10958 
   10959 	return rv;
   10960 }
   10961 
   10962 static int
   10963 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
   10964 {
   10965 	uint32_t fwsm;
   10966 
   10967 	fwsm = CSR_READ(sc, WMREG_FWSM);
   10968 
   10969 	if (((fwsm & FWSM_FW_VALID) != 0) &&
   10970 	    (fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
   10971 		return 1;
   10972 
   10973 	return 0;
   10974 }
   10975 
   10976 static int
   10977 wm_check_mng_mode_82574(struct wm_softc *sc)
   10978 {
   10979 	uint16_t data;
   10980 
   10981 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   10982 
   10983 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
   10984 		return 1;
   10985 
   10986 	return 0;
   10987 }
   10988 
   10989 static int
   10990 wm_check_mng_mode_generic(struct wm_softc *sc)
   10991 {
   10992 	uint32_t fwsm;
   10993 
   10994 	fwsm = CSR_READ(sc, WMREG_FWSM);
   10995 
   10996 	if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
   10997 		return 1;
   10998 
   10999 	return 0;
   11000 }
   11001 #endif /* WM_WOL */
   11002 
   11003 static int
   11004 wm_enable_mng_pass_thru(struct wm_softc *sc)
   11005 {
   11006 	uint32_t manc, fwsm, factps;
   11007 
   11008 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
   11009 		return 0;
   11010 
   11011 	manc = CSR_READ(sc, WMREG_MANC);
   11012 
   11013 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
   11014 		device_xname(sc->sc_dev), manc));
   11015 	if ((manc & MANC_RECV_TCO_EN) == 0)
   11016 		return 0;
   11017 
   11018 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
   11019 		fwsm = CSR_READ(sc, WMREG_FWSM);
   11020 		factps = CSR_READ(sc, WMREG_FACTPS);
   11021 		if (((factps & FACTPS_MNGCG) == 0)
   11022 		    && ((fwsm & FWSM_MODE_MASK)
   11023 			== (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
   11024 			return 1;
   11025 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
   11026 		uint16_t data;
   11027 
   11028 		factps = CSR_READ(sc, WMREG_FACTPS);
   11029 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
   11030 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
   11031 			device_xname(sc->sc_dev), factps, data));
   11032 		if (((factps & FACTPS_MNGCG) == 0)
   11033 		    && ((data & NVM_CFG2_MNGM_MASK)
   11034 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
   11035 			return 1;
   11036 	} else if (((manc & MANC_SMBUS_EN) != 0)
   11037 	    && ((manc & MANC_ASF_EN) == 0))
   11038 		return 1;
   11039 
   11040 	return 0;
   11041 }
   11042 
   11043 static int
   11044 wm_check_reset_block(struct wm_softc *sc)
   11045 {
   11046 	bool blocked = false;
   11047 	uint32_t reg;
   11048 	int i = 0;
   11049 
   11050 	switch (sc->sc_type) {
   11051 	case WM_T_ICH8:
   11052 	case WM_T_ICH9:
   11053 	case WM_T_ICH10:
   11054 	case WM_T_PCH:
   11055 	case WM_T_PCH2:
   11056 	case WM_T_PCH_LPT:
   11057 		do {
   11058 			reg = CSR_READ(sc, WMREG_FWSM);
   11059 			if ((reg & FWSM_RSPCIPHY) == 0) {
   11060 				blocked = true;
   11061 				delay(10*1000);
   11062 				continue;
   11063 			}
   11064 			blocked = false;
   11065 		} while (blocked && (i++ < 10));
   11066 		return blocked ? 1 : 0;
   11067 		break;
   11068 	case WM_T_82571:
   11069 	case WM_T_82572:
   11070 	case WM_T_82573:
   11071 	case WM_T_82574:
   11072 	case WM_T_82583:
   11073 	case WM_T_80003:
   11074 		reg = CSR_READ(sc, WMREG_MANC);
   11075 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
   11076 			return -1;
   11077 		else
   11078 			return 0;
   11079 		break;
   11080 	default:
   11081 		/* no problem */
   11082 		break;
   11083 	}
   11084 
   11085 	return 0;
   11086 }
   11087 
   11088 static void
   11089 wm_get_hw_control(struct wm_softc *sc)
   11090 {
   11091 	uint32_t reg;
   11092 
   11093 	switch (sc->sc_type) {
   11094 	case WM_T_82573:
   11095 		reg = CSR_READ(sc, WMREG_SWSM);
   11096 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
   11097 		break;
   11098 	case WM_T_82571:
   11099 	case WM_T_82572:
   11100 	case WM_T_82574:
   11101 	case WM_T_82583:
   11102 	case WM_T_80003:
   11103 	case WM_T_ICH8:
   11104 	case WM_T_ICH9:
   11105 	case WM_T_ICH10:
   11106 	case WM_T_PCH:
   11107 	case WM_T_PCH2:
   11108 	case WM_T_PCH_LPT:
   11109 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11110 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
   11111 		break;
   11112 	default:
   11113 		break;
   11114 	}
   11115 }
   11116 
   11117 static void
   11118 wm_release_hw_control(struct wm_softc *sc)
   11119 {
   11120 	uint32_t reg;
   11121 
   11122 	if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
   11123 		return;
   11124 
   11125 	if (sc->sc_type == WM_T_82573) {
   11126 		reg = CSR_READ(sc, WMREG_SWSM);
   11127 		reg &= ~SWSM_DRV_LOAD;
   11128 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
   11129 	} else {
   11130 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11131 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
   11132 	}
   11133 }
   11134 
   11135 static void
   11136 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
   11137 {
   11138 	uint32_t reg;
   11139 
   11140 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
   11141 
   11142 	if (on != 0)
   11143 		reg |= EXTCNFCTR_GATE_PHY_CFG;
   11144 	else
   11145 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
   11146 
   11147 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
   11148 }
   11149 
   11150 static void
   11151 wm_smbustopci(struct wm_softc *sc)
   11152 {
   11153 	uint32_t fwsm;
   11154 
   11155 	fwsm = CSR_READ(sc, WMREG_FWSM);
   11156 	if (((fwsm & FWSM_FW_VALID) == 0)
   11157 	    && ((wm_check_reset_block(sc) == 0))) {
   11158 		sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
   11159 		sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
   11160 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11161 		CSR_WRITE_FLUSH(sc);
   11162 		delay(10);
   11163 		sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
   11164 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
   11165 		CSR_WRITE_FLUSH(sc);
   11166 		delay(50*1000);
   11167 
   11168 		/*
   11169 		 * Gate automatic PHY configuration by hardware on non-managed
   11170 		 * 82579
   11171 		 */
   11172 		if (sc->sc_type == WM_T_PCH2)
   11173 			wm_gate_hw_phy_config_ich8lan(sc, 1);
   11174 	}
   11175 }
   11176 
   11177 static void
   11178 wm_init_manageability(struct wm_softc *sc)
   11179 {
   11180 
   11181 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   11182 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
   11183 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   11184 
   11185 		/* Disable hardware interception of ARP */
   11186 		manc &= ~MANC_ARP_EN;
   11187 
   11188 		/* Enable receiving management packets to the host */
   11189 		if (sc->sc_type >= WM_T_82571) {
   11190 			manc |= MANC_EN_MNG2HOST;
   11191 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
   11192 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
   11193 		}
   11194 
   11195 		CSR_WRITE(sc, WMREG_MANC, manc);
   11196 	}
   11197 }
   11198 
   11199 static void
   11200 wm_release_manageability(struct wm_softc *sc)
   11201 {
   11202 
   11203 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
   11204 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
   11205 
   11206 		manc |= MANC_ARP_EN;
   11207 		if (sc->sc_type >= WM_T_82571)
   11208 			manc &= ~MANC_EN_MNG2HOST;
   11209 
   11210 		CSR_WRITE(sc, WMREG_MANC, manc);
   11211 	}
   11212 }
   11213 
   11214 static void
   11215 wm_get_wakeup(struct wm_softc *sc)
   11216 {
   11217 
   11218 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
   11219 	switch (sc->sc_type) {
   11220 	case WM_T_82573:
   11221 	case WM_T_82583:
   11222 		sc->sc_flags |= WM_F_HAS_AMT;
   11223 		/* FALLTHROUGH */
   11224 	case WM_T_80003:
   11225 	case WM_T_82541:
   11226 	case WM_T_82547:
   11227 	case WM_T_82571:
   11228 	case WM_T_82572:
   11229 	case WM_T_82574:
   11230 	case WM_T_82575:
   11231 	case WM_T_82576:
   11232 	case WM_T_82580:
   11233 	case WM_T_I350:
   11234 	case WM_T_I354:
   11235 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
   11236 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
   11237 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   11238 		break;
   11239 	case WM_T_ICH8:
   11240 	case WM_T_ICH9:
   11241 	case WM_T_ICH10:
   11242 	case WM_T_PCH:
   11243 	case WM_T_PCH2:
   11244 	case WM_T_PCH_LPT:
   11245 		sc->sc_flags |= WM_F_HAS_AMT;
   11246 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
   11247 		break;
   11248 	default:
   11249 		break;
   11250 	}
   11251 
   11252 	/* 1: HAS_MANAGE */
   11253 	if (wm_enable_mng_pass_thru(sc) != 0)
   11254 		sc->sc_flags |= WM_F_HAS_MANAGE;
   11255 
   11256 #ifdef WM_DEBUG
   11257 	printf("\n");
   11258 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
   11259 		printf("HAS_AMT,");
   11260 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
   11261 		printf("ARC_SUBSYS_VALID,");
   11262 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
   11263 		printf("ASF_FIRMWARE_PRES,");
   11264 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
   11265 		printf("HAS_MANAGE,");
   11266 	printf("\n");
   11267 #endif
   11268 	/*
   11269 	 * Note that the WOL flags is set after the resetting of the eeprom
   11270 	 * stuff
   11271 	 */
   11272 }
   11273 
   11274 #ifdef WM_WOL
   11275 /* WOL in the newer chipset interfaces (pchlan) */
   11276 static void
   11277 wm_enable_phy_wakeup(struct wm_softc *sc)
   11278 {
   11279 #if 0
   11280 	uint16_t preg;
   11281 
   11282 	/* Copy MAC RARs to PHY RARs */
   11283 
   11284 	/* Copy MAC MTA to PHY MTA */
   11285 
   11286 	/* Configure PHY Rx Control register */
   11287 
   11288 	/* Enable PHY wakeup in MAC register */
   11289 
   11290 	/* Configure and enable PHY wakeup in PHY registers */
   11291 
   11292 	/* Activate PHY wakeup */
   11293 
   11294 	/* XXX */
   11295 #endif
   11296 }
   11297 
   11298 /* Power down workaround on D3 */
   11299 static void
   11300 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
   11301 {
   11302 	uint32_t reg;
   11303 	int i;
   11304 
   11305 	for (i = 0; i < 2; i++) {
   11306 		/* Disable link */
   11307 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   11308 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   11309 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   11310 
   11311 		/*
   11312 		 * Call gig speed drop workaround on Gig disable before
   11313 		 * accessing any PHY registers
   11314 		 */
   11315 		if (sc->sc_type == WM_T_ICH8)
   11316 			wm_gig_downshift_workaround_ich8lan(sc);
   11317 
   11318 		/* Write VR power-down enable */
   11319 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   11320 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   11321 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
   11322 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
   11323 
   11324 		/* Read it back and test */
   11325 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
   11326 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
   11327 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
   11328 			break;
   11329 
   11330 		/* Issue PHY reset and repeat at most one more time */
   11331 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
   11332 	}
   11333 }
   11334 
   11335 static void
   11336 wm_enable_wakeup(struct wm_softc *sc)
   11337 {
   11338 	uint32_t reg, pmreg;
   11339 	pcireg_t pmode;
   11340 
   11341 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   11342 		&pmreg, NULL) == 0)
   11343 		return;
   11344 
   11345 	/* Advertise the wakeup capability */
   11346 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
   11347 	    | CTRL_SWDPIN(3));
   11348 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
   11349 
   11350 	/* ICH workaround */
   11351 	switch (sc->sc_type) {
   11352 	case WM_T_ICH8:
   11353 	case WM_T_ICH9:
   11354 	case WM_T_ICH10:
   11355 	case WM_T_PCH:
   11356 	case WM_T_PCH2:
   11357 	case WM_T_PCH_LPT:
   11358 		/* Disable gig during WOL */
   11359 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
   11360 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
   11361 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   11362 		if (sc->sc_type == WM_T_PCH)
   11363 			wm_gmii_reset(sc);
   11364 
   11365 		/* Power down workaround */
   11366 		if (sc->sc_phytype == WMPHY_82577) {
   11367 			struct mii_softc *child;
   11368 
   11369 			/* Assume that the PHY is copper */
   11370 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
   11371 			if (child->mii_mpd_rev <= 2)
   11372 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
   11373 				    (768 << 5) | 25, 0x0444); /* magic num */
   11374 		}
   11375 		break;
   11376 	default:
   11377 		break;
   11378 	}
   11379 
   11380 	/* Keep the laser running on fiber adapters */
   11381 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
   11382 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
   11383 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11384 		reg |= CTRL_EXT_SWDPIN(3);
   11385 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11386 	}
   11387 
   11388 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
   11389 #if 0	/* for the multicast packet */
   11390 	reg |= WUFC_MC;
   11391 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
   11392 #endif
   11393 
   11394 	if (sc->sc_type == WM_T_PCH) {
   11395 		wm_enable_phy_wakeup(sc);
   11396 	} else {
   11397 		CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
   11398 		CSR_WRITE(sc, WMREG_WUFC, reg);
   11399 	}
   11400 
   11401 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
   11402 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
   11403 		|| (sc->sc_type == WM_T_PCH2))
   11404 		    && (sc->sc_phytype == WMPHY_IGP_3))
   11405 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
   11406 
   11407 	/* Request PME */
   11408 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
   11409 #if 0
   11410 	/* Disable WOL */
   11411 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
   11412 #else
   11413 	/* For WOL */
   11414 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
   11415 #endif
   11416 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
   11417 }
   11418 #endif /* WM_WOL */
   11419 
   11420 /* LPLU */
   11421 
   11422 static void
   11423 wm_lplu_d0_disable(struct wm_softc *sc)
   11424 {
   11425 	uint32_t reg;
   11426 
   11427 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   11428 	reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
   11429 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   11430 }
   11431 
   11432 static void
   11433 wm_lplu_d0_disable_pch(struct wm_softc *sc)
   11434 {
   11435 	uint32_t reg;
   11436 
   11437 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
   11438 	reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
   11439 	reg |= HV_OEM_BITS_ANEGNOW;
   11440 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
   11441 }
   11442 
   11443 /* EEE */
   11444 
   11445 static void
   11446 wm_set_eee_i350(struct wm_softc *sc)
   11447 {
   11448 	uint32_t ipcnfg, eeer;
   11449 
   11450 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
   11451 	eeer = CSR_READ(sc, WMREG_EEER);
   11452 
   11453 	if ((sc->sc_flags & WM_F_EEE) != 0) {
   11454 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   11455 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
   11456 		    | EEER_LPI_FC);
   11457 	} else {
   11458 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
   11459 		ipcnfg &= ~IPCNFG_10BASE_TE;
   11460 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
   11461 		    | EEER_LPI_FC);
   11462 	}
   11463 
   11464 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
   11465 	CSR_WRITE(sc, WMREG_EEER, eeer);
   11466 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
   11467 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
   11468 }
   11469 
   11470 /*
   11471  * Workarounds (mainly PHY related).
   11472  * Basically, PHY's workarounds are in the PHY drivers.
   11473  */
   11474 
   11475 /* Work-around for 82566 Kumeran PCS lock loss */
   11476 static void
   11477 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
   11478 {
   11479 #if 0
   11480 	int miistatus, active, i;
   11481 	int reg;
   11482 
   11483 	miistatus = sc->sc_mii.mii_media_status;
   11484 
   11485 	/* If the link is not up, do nothing */
   11486 	if ((miistatus & IFM_ACTIVE) == 0)
   11487 		return;
   11488 
   11489 	active = sc->sc_mii.mii_media_active;
   11490 
   11491 	/* Nothing to do if the link is other than 1Gbps */
   11492 	if (IFM_SUBTYPE(active) != IFM_1000_T)
   11493 		return;
   11494 
   11495 	for (i = 0; i < 10; i++) {
   11496 		/* read twice */
   11497 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   11498 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
   11499 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
   11500 			goto out;	/* GOOD! */
   11501 
   11502 		/* Reset the PHY */
   11503 		wm_gmii_reset(sc);
   11504 		delay(5*1000);
   11505 	}
   11506 
   11507 	/* Disable GigE link negotiation */
   11508 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
   11509 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
   11510 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
   11511 
   11512 	/*
   11513 	 * Call gig speed drop workaround on Gig disable before accessing
   11514 	 * any PHY registers.
   11515 	 */
   11516 	wm_gig_downshift_workaround_ich8lan(sc);
   11517 
   11518 out:
   11519 	return;
   11520 #endif
   11521 }
   11522 
   11523 /* WOL from S5 stops working */
   11524 static void
   11525 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
   11526 {
   11527 	uint16_t kmrn_reg;
   11528 
   11529 	/* Only for igp3 */
   11530 	if (sc->sc_phytype == WMPHY_IGP_3) {
   11531 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
   11532 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
   11533 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   11534 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
   11535 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
   11536 	}
   11537 }
   11538 
   11539 /*
   11540  * Workaround for pch's PHYs
   11541  * XXX should be moved to new PHY driver?
   11542  */
   11543 static void
   11544 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
   11545 {
   11546 	if (sc->sc_phytype == WMPHY_82577)
   11547 		wm_set_mdio_slow_mode_hv(sc);
   11548 
   11549 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
   11550 
   11551 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
   11552 
   11553 	/* 82578 */
   11554 	if (sc->sc_phytype == WMPHY_82578) {
   11555 		/* PCH rev. < 3 */
   11556 		if (sc->sc_rev < 3) {
   11557 			/* XXX 6 bit shift? Why? Is it page2? */
   11558 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
   11559 			    0x66c0);
   11560 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
   11561 			    0xffff);
   11562 		}
   11563 
   11564 		/* XXX phy rev. < 2 */
   11565 	}
   11566 
   11567 	/* Select page 0 */
   11568 
   11569 	/* XXX acquire semaphore */
   11570 	wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
   11571 	/* XXX release semaphore */
   11572 
   11573 	/*
   11574 	 * Configure the K1 Si workaround during phy reset assuming there is
   11575 	 * link so that it disables K1 if link is in 1Gbps.
   11576 	 */
   11577 	wm_k1_gig_workaround_hv(sc, 1);
   11578 }
   11579 
   11580 static void
   11581 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
   11582 {
   11583 
   11584 	wm_set_mdio_slow_mode_hv(sc);
   11585 }
   11586 
   11587 static void
   11588 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
   11589 {
   11590 	int k1_enable = sc->sc_nvm_k1_enabled;
   11591 
   11592 	/* XXX acquire semaphore */
   11593 
   11594 	if (link) {
   11595 		k1_enable = 0;
   11596 
   11597 		/* Link stall fix for link up */
   11598 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
   11599 	} else {
   11600 		/* Link stall fix for link down */
   11601 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
   11602 	}
   11603 
   11604 	wm_configure_k1_ich8lan(sc, k1_enable);
   11605 
   11606 	/* XXX release semaphore */
   11607 }
   11608 
   11609 static void
   11610 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
   11611 {
   11612 	uint32_t reg;
   11613 
   11614 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
   11615 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
   11616 	    reg | HV_KMRN_MDIO_SLOW);
   11617 }
   11618 
   11619 static void
   11620 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
   11621 {
   11622 	uint32_t ctrl, ctrl_ext, tmp;
   11623 	uint16_t kmrn_reg;
   11624 
   11625 	kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
   11626 
   11627 	if (k1_enable)
   11628 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
   11629 	else
   11630 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
   11631 
   11632 	wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
   11633 
   11634 	delay(20);
   11635 
   11636 	ctrl = CSR_READ(sc, WMREG_CTRL);
   11637 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
   11638 
   11639 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
   11640 	tmp |= CTRL_FRCSPD;
   11641 
   11642 	CSR_WRITE(sc, WMREG_CTRL, tmp);
   11643 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
   11644 	CSR_WRITE_FLUSH(sc);
   11645 	delay(20);
   11646 
   11647 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
   11648 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
   11649 	CSR_WRITE_FLUSH(sc);
   11650 	delay(20);
   11651 }
   11652 
   11653 /* special case - for 82575 - need to do manual init ... */
   11654 static void
   11655 wm_reset_init_script_82575(struct wm_softc *sc)
   11656 {
   11657 	/*
   11658 	 * remark: this is untested code - we have no board without EEPROM
   11659 	 *  same setup as mentioned int the FreeBSD driver for the i82575
   11660 	 */
   11661 
   11662 	/* SerDes configuration via SERDESCTRL */
   11663 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
   11664 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
   11665 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
   11666 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
   11667 
   11668 	/* CCM configuration via CCMCTL register */
   11669 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
   11670 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
   11671 
   11672 	/* PCIe lanes configuration */
   11673 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
   11674 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
   11675 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
   11676 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
   11677 
   11678 	/* PCIe PLL Configuration */
   11679 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
   11680 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
   11681 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
   11682 }
   11683 
   11684 static void
   11685 wm_reset_mdicnfg_82580(struct wm_softc *sc)
   11686 {
   11687 	uint32_t reg;
   11688 	uint16_t nvmword;
   11689 	int rv;
   11690 
   11691 	if ((sc->sc_flags & WM_F_SGMII) == 0)
   11692 		return;
   11693 
   11694 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
   11695 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
   11696 	if (rv != 0) {
   11697 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
   11698 		    __func__);
   11699 		return;
   11700 	}
   11701 
   11702 	reg = CSR_READ(sc, WMREG_MDICNFG);
   11703 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
   11704 		reg |= MDICNFG_DEST;
   11705 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
   11706 		reg |= MDICNFG_COM_MDIO;
   11707 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   11708 }
   11709 
   11710 /*
   11711  * I210 Errata 25 and I211 Errata 10
   11712  * Slow System Clock.
   11713  */
   11714 static void
   11715 wm_pll_workaround_i210(struct wm_softc *sc)
   11716 {
   11717 	uint32_t mdicnfg, wuc;
   11718 	uint32_t reg;
   11719 	pcireg_t pcireg;
   11720 	uint32_t pmreg;
   11721 	uint16_t nvmword, tmp_nvmword;
   11722 	int phyval;
   11723 	bool wa_done = false;
   11724 	int i;
   11725 
   11726 	/* Save WUC and MDICNFG registers */
   11727 	wuc = CSR_READ(sc, WMREG_WUC);
   11728 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
   11729 
   11730 	reg = mdicnfg & ~MDICNFG_DEST;
   11731 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
   11732 
   11733 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
   11734 		nvmword = INVM_DEFAULT_AL;
   11735 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
   11736 
   11737 	/* Get Power Management cap offset */
   11738 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
   11739 		&pmreg, NULL) == 0)
   11740 		return;
   11741 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
   11742 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
   11743 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
   11744 
   11745 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
   11746 			break; /* OK */
   11747 		}
   11748 
   11749 		wa_done = true;
   11750 		/* Directly reset the internal PHY */
   11751 		reg = CSR_READ(sc, WMREG_CTRL);
   11752 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
   11753 
   11754 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
   11755 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
   11756 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
   11757 
   11758 		CSR_WRITE(sc, WMREG_WUC, 0);
   11759 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
   11760 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   11761 
   11762 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
   11763 		    pmreg + PCI_PMCSR);
   11764 		pcireg |= PCI_PMCSR_STATE_D3;
   11765 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   11766 		    pmreg + PCI_PMCSR, pcireg);
   11767 		delay(1000);
   11768 		pcireg &= ~PCI_PMCSR_STATE_D3;
   11769 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
   11770 		    pmreg + PCI_PMCSR, pcireg);
   11771 
   11772 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
   11773 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
   11774 
   11775 		/* Restore WUC register */
   11776 		CSR_WRITE(sc, WMREG_WUC, wuc);
   11777 	}
   11778 
   11779 	/* Restore MDICNFG setting */
   11780 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
   11781 	if (wa_done)
   11782 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
   11783 }
   11784